aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap6
-rw-r--r--Documentation/ABI/testing/sysfs-class-firmware14
-rw-r--r--Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update14
-rw-r--r--Documentation/core-api/workqueue.rst4
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/health/ti,afe4403.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/health/ti,afe4404.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/light/rohm,bu27010.yaml1
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-msm.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml2
-rw-r--r--Documentation/filesystems/files.rst53
-rw-r--r--Documentation/filesystems/fscrypt.rst121
-rw-r--r--Documentation/filesystems/nfs/exporting.rst7
-rw-r--r--Documentation/filesystems/overlayfs.rst12
-rw-r--r--Documentation/filesystems/porting.rst7
-rw-r--r--Documentation/netlink/specs/devlink.yaml18
-rw-r--r--Documentation/netlink/specs/nfsd.yaml89
-rw-r--r--Documentation/networking/representors.rst8
-rw-r--r--Documentation/process/embargoed-hardware-issues.rst19
-rw-r--r--Documentation/rust/general-information.rst2
-rw-r--r--Documentation/trace/fprobe.rst8
-rw-r--r--Documentation/translations/zh_CN/core-api/workqueue.rst2
-rw-r--r--MAINTAINERS44
-rw-r--r--Makefile4
-rw-r--r--arch/arc/kernel/troubleshoot.c6
-rw-r--r--arch/arm/boot/dts/rockchip/rk3128.dtsi18
-rw-r--r--arch/arm/boot/dts/ti/omap/omap4-l4-abe.dtsi6
-rw-r--r--arch/arm/boot/dts/ti/omap/omap4-l4.dtsi2
-rw-r--r--arch/arm/boot/dts/ti/omap/omap5-l4-abe.dtsi6
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c60
-rw-r--r--arch/arm/mach-omap1/timer32k.c14
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c2
-rw-r--r--arch/arm64/boot/dts/freescale/imx93.dtsi4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-demo.dts39
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-db820c.dts32
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi32
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts18
-rw-r--r--arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi10
-rw-r--r--arch/arm64/include/asm/kvm_arm.h4
-rw-r--r--arch/arm64/kvm/arch_timer.c13
-rw-r--r--arch/arm64/kvm/emulate-nested.c2
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c44
-rw-r--r--arch/arm64/kvm/pmu.c4
-rw-r--r--arch/arm64/kvm/sys_regs.c4
-rw-r--r--arch/ia64/include/asm/cpu.h5
-rw-r--r--arch/ia64/kernel/topology.c2
-rw-r--r--arch/loongarch/include/asm/io.h5
-rw-r--r--arch/loongarch/include/asm/linkage.h8
-rw-r--r--arch/loongarch/include/asm/pgtable-bits.h4
-rw-r--r--arch/loongarch/kernel/entry.S4
-rw-r--r--arch/loongarch/kernel/genex.S16
-rw-r--r--arch/loongarch/kernel/setup.c10
-rw-r--r--arch/loongarch/mm/init.c9
-rw-r--r--arch/loongarch/mm/tlbex.S36
-rw-r--r--arch/mips/kvm/mmu.c3
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-8xx.h7
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h2
-rw-r--r--arch/powerpc/kernel/entry_32.S8
-rw-r--r--arch/powerpc/kernel/head_85xx.S2
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/lib/qspinlock.c3
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c9
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/powerpc/mm/pgtable.c32
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c11
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c2
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S8
-rw-r--r--arch/riscv/Kconfig3
-rw-r--r--arch/riscv/Kconfig.errata1
-rw-r--r--arch/riscv/Makefile1
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi2
-rw-r--r--arch/riscv/boot/dts/thead/th1520.dtsi1
-rw-r--r--arch/riscv/errata/andes/Makefile4
-rw-r--r--arch/riscv/include/asm/ftrace.h21
-rw-r--r--arch/riscv/include/asm/kprobes.h11
-rw-r--r--arch/riscv/include/asm/uprobes.h13
-rw-r--r--arch/riscv/kernel/irq.c4
-rw-r--r--arch/riscv/kernel/setup.c13
-rw-r--r--arch/riscv/kernel/signal.c7
-rw-r--r--arch/riscv/kernel/traps.c28
-rw-r--r--arch/riscv/mm/fault.c2
-rw-r--r--arch/riscv/mm/hugetlbpage.c19
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c18
-rw-r--r--arch/s390/boot/vmem.c7
-rw-r--r--arch/s390/hypfs/inode.c4
-rw-r--r--arch/s390/kvm/interrupt.c16
-rw-r--r--arch/s390/net/bpf_jit_comp.c25
-rw-r--r--arch/s390/pci/pci_dma.c15
-rw-r--r--arch/sparc/lib/checksum_32.S2
-rw-r--r--arch/x86/boot/compressed/sev.c10
-rw-r--r--arch/x86/events/utils.c5
-rw-r--r--arch/x86/include/asm/cpu.h2
-rw-r--r--arch/x86/include/asm/fpu/api.h3
-rw-r--r--arch/x86/include/asm/i8259.h2
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/include/asm/msr-index.h9
-rw-r--r--arch/x86/include/asm/smp.h1
-rw-r--r--arch/x86/include/asm/svm.h1
-rw-r--r--arch/x86/include/asm/uaccess.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c3
-rw-r--r--arch/x86/kernel/alternative.c13
-rw-r--r--arch/x86/kernel/cpu/amd.c8
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c10
-rw-r--r--arch/x86/kernel/fpu/core.c5
-rw-r--r--arch/x86/kernel/fpu/xstate.c12
-rw-r--r--arch/x86/kernel/fpu/xstate.h3
-rw-r--r--arch/x86/kernel/i8259.c38
-rw-r--r--arch/x86/kernel/sev-shared.c53
-rw-r--r--arch/x86/kernel/sev.c30
-rw-r--r--arch/x86/kernel/smp.c39
-rw-r--r--arch/x86/kernel/smpboot.c27
-rw-r--r--arch/x86/kernel/topology.c2
-rw-r--r--arch/x86/kernel/tsc_sync.c10
-rw-r--r--arch/x86/kvm/cpuid.c8
-rw-r--r--arch/x86/kvm/lapic.c8
-rw-r--r--arch/x86/kvm/pmu.c27
-rw-r--r--arch/x86/kvm/pmu.h6
-rw-r--r--arch/x86/kvm/svm/avic.c5
-rw-r--r--arch/x86/kvm/svm/nested.c3
-rw-r--r--arch/x86/kvm/svm/pmu.c2
-rw-r--r--arch/x86/kvm/svm/svm.c5
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c4
-rw-r--r--arch/x86/kvm/x86.c40
-rw-r--r--arch/x86/lib/copy_mc.c8
-rw-r--r--block/bdev.c65
-rw-r--r--block/blk-throttle.c6
-rw-r--r--block/disk-events.c18
-rw-r--r--block/fops.c65
-rw-r--r--block/genhd.c19
-rw-r--r--block/ioctl.c11
-rw-r--r--block/partitions/core.c43
-rw-r--r--block/sed-opal.c7
-rw-r--r--crypto/asymmetric_keys/public_key.c5
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c11
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h1
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c9
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h5
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h8
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c12
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c1
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c9
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c3
-rw-r--r--drivers/acpi/acpi_processor.c1
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/ec.c11
-rw-r--r--drivers/acpi/irq.c7
-rw-r--r--drivers/acpi/nfit/core.c22
-rw-r--r--drivers/acpi/resource.c26
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/android/binderfs.c8
-rw-r--r--drivers/ata/libata-scsi.c5
-rw-r--r--drivers/base/regmap/regmap.c2
-rw-r--r--drivers/block/ataflop.c4
-rw-r--r--drivers/block/drbd/drbd_int.h2
-rw-r--r--drivers/block/drbd/drbd_nl.c65
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/pktcdvd.c76
-rw-r--r--drivers/block/rnbd/rnbd-srv.c27
-rw-r--r--drivers/block/rnbd/rnbd-srv.h2
-rw-r--r--drivers/block/xen-blkback/blkback.c4
-rw-r--r--drivers/block/xen-blkback/common.h4
-rw-r--r--drivers/block/xen-blkback/xenbus.c40
-rw-r--r--drivers/block/zram/zram_drv.c31
-rw-r--r--drivers/block/zram/zram_drv.h2
-rw-r--r--drivers/bluetooth/btrtl.c10
-rw-r--r--drivers/bluetooth/hci_vhci.c3
-rw-r--r--drivers/cache/Kconfig2
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/clk/clk.c21
-rw-r--r--drivers/clk/socfpga/clk-gate.c27
-rw-r--r--drivers/clk/stm32/clk-stm32-core.c2
-rw-r--r--drivers/clk/ti/clk-44xx.c5
-rw-r--r--drivers/clk/ti/clk-54xx.c4
-rw-r--r--drivers/connector/cn_proc.c2
-rw-r--r--drivers/counter/counter-chrdev.c4
-rw-r--r--drivers/counter/microchip-tcb-capture.c2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h3
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c14
-rw-r--r--drivers/dma-buf/dma-fence-unwrap.c13
-rw-r--r--drivers/dma-buf/sync_file.c9
-rw-r--r--drivers/dma/fsl-edma-common.c25
-rw-r--r--drivers/dma/fsl-edma-common.h14
-rw-r--r--drivers/dma/fsl-edma-main.c8
-rw-r--r--drivers/dma/idxd/device.c5
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c3
-rw-r--r--drivers/dma/ste_dma40.c1
-rw-r--r--drivers/dma/stm32-dma.c11
-rw-r--r--drivers/dma/stm32-mdma.c33
-rw-r--r--drivers/firewire/sbp2.c1
-rw-r--r--drivers/firmware/efi/efi.c8
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c7
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.h2
-rw-r--r--drivers/firmware/efi/unaccepted_memory.c64
-rw-r--r--drivers/firmware/imx/imx-dsp.c2
-rw-r--r--drivers/fpga/tests/Kconfig4
-rw-r--r--drivers/fpga/tests/fpga-region-test.c2
-rw-r--r--drivers/gpio/gpio-vf610.c7
-rw-r--r--drivers/gpio/gpiolib-acpi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c3
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c14
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c6
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c17
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/drm_gem.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c24
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c4
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c9
-rw-r--r--drivers/gpu/drm/logicvc/Kconfig2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c27
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c13
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c19
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c14
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c4
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c29
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c35
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c2
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c31
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c27
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c3
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c9
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpmux.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c2
-rw-r--r--drivers/iio/adc/ad7192.c29
-rw-r--r--drivers/iio/adc/exynos_adc.c24
-rw-r--r--drivers/iio/adc/imx8qxp-adc.c4
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c39
-rw-r--r--drivers/iio/adc/xilinx-xadc.h2
-rw-r--r--drivers/iio/addac/Kconfig2
-rw-r--r--drivers/iio/afe/iio-rescale.c19
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c6
-rw-r--r--drivers/iio/dac/ad3552r.c4
-rw-r--r--drivers/iio/frequency/admv1013.c4
-rw-r--r--drivers/iio/imu/bno055/Kconfig2
-rw-r--r--drivers/iio/light/vcnl4000.c1
-rw-r--r--drivers/iio/pressure/bmp280-core.c2
-rw-r--r--drivers/iio/pressure/dps310.c8
-rw-r--r--drivers/iio/pressure/ms5611_core.c2
-rw-r--r--drivers/iio/proximity/irsd200.c6
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c4
-rw-r--r--drivers/input/joystick/xpad.c4
-rw-r--r--drivers/input/misc/powermate.c1
-rw-r--r--drivers/input/mouse/elantech.c1
-rw-r--r--drivers/input/mouse/psmouse-smbus.c19
-rw-r--r--drivers/input/mouse/synaptics.c2
-rw-r--r--drivers/input/rmi4/rmi_smbus.c50
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h8
-rw-r--r--drivers/input/touchscreen/goodix.c19
-rw-r--r--drivers/iommu/iommu.c3
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c4
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c2
-rw-r--r--drivers/mcb/mcb-core.c10
-rw-r--r--drivers/mcb/mcb-parse.c2
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/super.c95
-rw-r--r--drivers/md/dm.c20
-rw-r--r--drivers/md/md.c23
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/media/i2c/ov8858.c10
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c4
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c17
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c7
-rw-r--r--drivers/misc/fastrpc.c34
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c2
-rw-r--r--drivers/misc/ibmvmc.c2
-rw-r--r--drivers/mmc/core/block.c31
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/sdio.c8
-rw-r--r--drivers/mmc/host/mtk-sd.c6
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c104
-rw-r--r--drivers/mmc/host/sdhci-sprd.c1
-rw-r--r--drivers/mtd/devices/block2mtd.c51
-rw-r--r--drivers/mtd/maps/physmap-core.c11
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c16
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c23
-rw-r--r--drivers/mtd/nand/raw/nand_base.c3
-rw-r--r--drivers/mtd/nand/raw/nand_jedec.c3
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c3
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c9
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c2
-rw-r--r--drivers/mtd/nand/spi/micron.c2
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c46
-rw-r--r--drivers/net/can/flexcan/flexcan.h2
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c2
-rw-r--r--drivers/net/can/sja1000/sja1000.c8
-rw-r--r--drivers/net/dsa/bcm_sf2.c24
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c15
-rw-r--r--drivers/net/ethernet/adi/adin1110.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c36
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c22
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c18
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c5
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c6
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c6
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c12
-rw-r--r--drivers/net/ethernet/sfc/tc.c38
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c17
-rw-r--r--drivers/net/ethernet/ti/Kconfig5
-rw-r--r--drivers/net/ethernet/ti/Makefile7
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c4
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c9
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.c10
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c2
-rw-r--r--drivers/net/gtp.c5
-rw-r--r--drivers/net/ieee802154/adf7242.c5
-rw-r--r--drivers/net/ieee802154/ca8210.c17
-rw-r--r--drivers/net/macsec.c2
-rw-r--r--drivers/net/mdio/mdio-mux.c47
-rw-r--r--drivers/net/phy/bcm7xxx.c3
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.c6
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/usb/dm9601.c7
-rw-r--r--drivers/net/usb/r8152.c303
-rw-r--r--drivers/net/usb/smsc95xx.c6
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c17
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.h2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c4
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_port.c17
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.c8
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.c21
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/nvme/host/auth.c4
-rw-r--r--drivers/nvme/host/ioctl.c10
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/nvme/host/rdma.c3
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c9
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c20
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/tcp.c7
-rw-r--r--drivers/nvmem/imx-ocotp.c6
-rw-r--r--drivers/perf/riscv_pmu.c3
-rw-r--r--drivers/perf/riscv_pmu_sbi.c16
-rw-r--r--drivers/phy/freescale/phy-fsl-lynx-28g.c27
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c38
-rw-r--r--drivers/phy/qualcomm/phy-qcom-apq8064-sata.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c7
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c24
-rw-r--r--drivers/phy/realtek/Kconfig5
-rw-r--r--drivers/phy/realtek/phy-rtk-usb2.c10
-rw-r--r--drivers/phy/realtek/phy-rtk-usb3.c10
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-wpcm450.c6
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.h1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c17
-rw-r--r--drivers/pinctrl/renesas/Kconfig1
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c4
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c4
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c42
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h4
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c19
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.h2
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c21
-rw-r--r--drivers/platform/surface/surface_platform_profile.c3
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c73
-rw-r--r--drivers/platform/x86/apple-gmux.c14
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c3
-rw-r--r--drivers/platform/x86/asus-wmi.c15
-rw-r--r--drivers/platform/x86/asus-wmi.h2
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c8
-rw-r--r--drivers/platform/x86/mlx-platform.c5
-rw-r--r--drivers/platform/x86/msi-ec.c3
-rw-r--r--drivers/platform/x86/sony-laptop.c2
-rw-r--r--drivers/pmdomain/imx/scu-pd.c3
-rw-r--r--drivers/power/supply/qcom_battmgr.c8
-rw-r--r--drivers/s390/block/dasd.c12
-rw-r--r--drivers/s390/block/dasd_genhd.c45
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dasd_ioctl.c2
-rw-r--r--drivers/s390/cio/css.c6
-rw-r--r--drivers/s390/net/Kconfig2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/sd.c39
-rw-r--r--drivers/soc/renesas/Kconfig11
-rw-r--r--drivers/soundwire/Makefile4
-rw-r--r--drivers/soundwire/bus.c31
-rw-r--r--drivers/soundwire/bus_type.c11
-rw-r--r--drivers/soundwire/irq.c59
-rw-r--r--drivers/soundwire/irq.h43
-rw-r--r--drivers/spi/spi-npcm-fiu.c5
-rw-r--r--drivers/target/target_core_iblock.c19
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_pscsi.c26
-rw-r--r--drivers/target/target_core_pscsi.h2
-rw-r--r--drivers/tee/amdtee/core.c10
-rw-r--r--drivers/thunderbolt/icm.c40
-rw-r--r--drivers/thunderbolt/switch.c7
-rw-r--r--drivers/thunderbolt/tb.c10
-rw-r--r--drivers/thunderbolt/tmu.c2
-rw-r--r--drivers/thunderbolt/xdomain.c58
-rw-r--r--drivers/tty/serial/8250/8250_omap.c25
-rw-r--r--drivers/tty/serial/serial_core.c15
-rw-r--r--drivers/tty/tty_io.c10
-rw-r--r--drivers/ufs/core/ufshcd.c2
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c3
-rw-r--r--drivers/usb/cdns3/core.h3
-rw-r--r--drivers/usb/core/devio.c26
-rw-r--r--drivers/usb/core/hub.c25
-rw-r--r--drivers/usb/core/hub.h2
-rw-r--r--drivers/usb/dwc3/core.c39
-rw-r--r--drivers/usb/gadget/function/f_fs.c4
-rw-r--r--drivers/usb/gadget/function/f_ncm.c26
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c20
-rw-r--r--drivers/usb/host/xhci-hub.c19
-rw-r--r--drivers/usb/host/xhci-mem.c4
-rw-r--r--drivers/usb/host/xhci-ring.c16
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c1
-rw-r--r--drivers/usb/misc/onboard_usb_hub.h1
-rw-r--r--drivers/usb/musb/musb_debugfs.c2
-rw-r--r--drivers/usb/musb/musb_host.c9
-rw-r--r--drivers/usb/serial/option.c7
-rw-r--r--drivers/usb/typec/altmodes/displayport.c5
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c12
-rw-r--r--drivers/usb/typec/ucsi/psy.c9
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c2
-rw-r--r--drivers/vdpa/mlx5/net/debug.c5
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c70
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.h11
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c5
-rw-r--r--drivers/vhost/vhost.c4
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c4
-rw-r--r--drivers/video/fbdev/core/cfbcopyarea.c2
-rw-r--r--drivers/video/fbdev/core/syscopyarea.c2
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.h2
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c4
-rw-r--r--drivers/video/fbdev/sa1100fb.c2
-rw-r--r--drivers/video/fbdev/uvesafb.c2
-rw-r--r--drivers/virtio/virtio_balloon.c6
-rw-r--r--drivers/virtio/virtio_mmio.c19
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c2
-rw-r--r--fs/9p/vfs_inode.c6
-rw-r--r--fs/9p/vfs_inode_dotl.c16
-rw-r--r--fs/9p/xattr.c8
-rw-r--r--fs/9p/xattr.h2
-rw-r--r--fs/adfs/inode.c13
-rw-r--r--fs/affs/amigaffs.c4
-rw-r--r--fs/affs/inode.c17
-rw-r--r--fs/afs/dynroot.c2
-rw-r--r--fs/afs/inode.c8
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/write.c2
-rw-r--r--fs/afs/xattr.c2
-rw-r--r--fs/attr.c4
-rw-r--r--fs/autofs/autofs_i.h20
-rw-r--r--fs/autofs/init.c9
-rw-r--r--fs/autofs/inode.c437
-rw-r--r--fs/autofs/root.c6
-rw-r--r--fs/bad_inode.c2
-rw-r--r--fs/befs/linuxvfs.c10
-rw-r--r--fs/bfs/dir.c9
-rw-r--r--fs/bfs/inode.c12
-rw-r--r--fs/binfmt_misc.c2
-rw-r--r--fs/btrfs/backref.c14
-rw-r--r--fs/btrfs/backref.h3
-rw-r--r--fs/btrfs/ctree.c21
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/delayed-inode.c20
-rw-r--r--fs/btrfs/dev-replace.c14
-rw-r--r--fs/btrfs/file.c18
-rw-r--r--fs/btrfs/inode.c52
-rw-r--r--fs/btrfs/ioctl.c18
-rw-r--r--fs/btrfs/reflink.c2
-rw-r--r--fs/btrfs/relocation.c7
-rw-r--r--fs/btrfs/transaction.c3
-rw-r--r--fs/btrfs/tree-log.c12
-rw-r--r--fs/btrfs/volumes.c109
-rw-r--r--fs/btrfs/volumes.h6
-rw-r--r--fs/btrfs/xattr.c2
-rw-r--r--fs/btrfs/xattr.h2
-rw-r--r--fs/ceph/addr.c10
-rw-r--r--fs/ceph/caps.c4
-rw-r--r--fs/ceph/crypto.c3
-rw-r--r--fs/ceph/file.c4
-rw-r--r--fs/ceph/inode.c68
-rw-r--r--fs/ceph/mds_client.c10
-rw-r--r--fs/ceph/snap.c4
-rw-r--r--fs/ceph/super.h2
-rw-r--r--fs/ceph/xattr.c2
-rw-r--r--fs/char_dev.c2
-rw-r--r--fs/coda/coda_linux.c6
-rw-r--r--fs/coda/dir.c2
-rw-r--r--fs/coda/file.c2
-rw-r--r--fs/configfs/inode.c8
-rw-r--r--fs/cramfs/inode.c6
-rw-r--r--fs/crypto/bio.c39
-rw-r--r--fs/crypto/crypto.c163
-rw-r--r--fs/crypto/fname.c6
-rw-r--r--fs/crypto/fscrypt_private.h164
-rw-r--r--fs/crypto/hooks.c4
-rw-r--r--fs/crypto/inline_crypt.c32
-rw-r--r--fs/crypto/keyring.c82
-rw-r--r--fs/crypto/keysetup.c62
-rw-r--r--fs/crypto/keysetup_v1.c20
-rw-r--r--fs/crypto/policy.c83
-rw-r--r--fs/debugfs/inode.c2
-rw-r--r--fs/devpts/inode.c6
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h2
-rw-r--r--fs/ecryptfs/inode.c2
-rw-r--r--fs/efivarfs/file.c2
-rw-r--r--fs/efivarfs/inode.c2
-rw-r--r--fs/efs/inode.c5
-rw-r--r--fs/erofs/data.c4
-rw-r--r--fs/erofs/inode.c3
-rw-r--r--fs/erofs/internal.h2
-rw-r--r--fs/erofs/super.c20
-rw-r--r--fs/erofs/xattr.c2
-rw-r--r--fs/erofs/xattr.h4
-rw-r--r--fs/exfat/exfat_fs.h1
-rw-r--r--fs/exfat/file.c7
-rw-r--r--fs/exfat/inode.c31
-rw-r--r--fs/exfat/misc.c8
-rw-r--r--fs/exfat/namei.c31
-rw-r--r--fs/exfat/super.c4
-rw-r--r--fs/ext2/dir.c6
-rw-r--r--fs/ext2/ialloc.c2
-rw-r--r--fs/ext2/inode.c13
-rw-r--r--fs/ext2/super.c2
-rw-r--r--fs/ext2/xattr.c4
-rw-r--r--fs/ext2/xattr.h2
-rw-r--r--fs/ext4/crypto.c13
-rw-r--r--fs/ext4/ext4.h22
-rw-r--r--fs/ext4/extents.c11
-rw-r--r--fs/ext4/fsmap.c9
-rw-r--r--fs/ext4/ialloc.c4
-rw-r--r--fs/ext4/inline.c4
-rw-r--r--fs/ext4/inode.c19
-rw-r--r--fs/ext4/ioctl.c13
-rw-r--r--fs/ext4/namei.c10
-rw-r--r--fs/ext4/super.c54
-rw-r--r--fs/ext4/xattr.c10
-rw-r--r--fs/ext4/xattr.h2
-rw-r--r--fs/f2fs/dir.c6
-rw-r--r--fs/f2fs/f2fs.h11
-rw-r--r--fs/f2fs/file.c14
-rw-r--r--fs/f2fs/inline.c2
-rw-r--r--fs/f2fs/inode.c24
-rw-r--r--fs/f2fs/namei.c4
-rw-r--r--fs/f2fs/recovery.c8
-rw-r--r--fs/f2fs/super.c28
-rw-r--r--fs/f2fs/xattr.c4
-rw-r--r--fs/f2fs/xattr.h2
-rw-r--r--fs/fat/inode.c25
-rw-r--r--fs/fat/misc.c6
-rw-r--r--fs/file.c153
-rw-r--r--fs/file_table.c49
-rw-r--r--fs/freevxfs/vxfs_inode.c6
-rw-r--r--fs/fs-writeback.c41
-rw-r--r--fs/fs_context.c34
-rw-r--r--fs/fsopen.c1
-rw-r--r--fs/fuse/control.c2
-rw-r--r--fs/fuse/dir.c10
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/fuse/inode.c29
-rw-r--r--fs/fuse/readdir.c6
-rw-r--r--fs/fuse/xattr.c2
-rw-r--r--fs/gfs2/bmap.c10
-rw-r--r--fs/gfs2/dir.c10
-rw-r--r--fs/gfs2/glock.c11
-rw-r--r--fs/gfs2/glops.c11
-rw-r--r--fs/gfs2/inode.c7
-rw-r--r--fs/gfs2/quota.c2
-rw-r--r--fs/gfs2/super.c12
-rw-r--r--fs/gfs2/super.h4
-rw-r--r--fs/gfs2/xattr.c4
-rw-r--r--fs/hfs/attr.c2
-rw-r--r--fs/hfs/catalog.c8
-rw-r--r--fs/hfs/hfs_fs.h2
-rw-r--r--fs/hfs/inode.c16
-rw-r--r--fs/hfs/sysdep.c10
-rw-r--r--fs/hfsplus/catalog.c8
-rw-r--r--fs/hfsplus/inode.c22
-rw-r--r--fs/hfsplus/xattr.c2
-rw-r--r--fs/hfsplus/xattr.h2
-rw-r--r--fs/hostfs/hostfs_kern.c12
-rw-r--r--fs/hpfs/dir.c12
-rw-r--r--fs/hpfs/inode.c16
-rw-r--r--fs/hpfs/namei.c22
-rw-r--r--fs/hpfs/super.c10
-rw-r--r--fs/hugetlbfs/inode.c10
-rw-r--r--fs/init.c6
-rw-r--r--fs/inode.c43
-rw-r--r--fs/internal.h22
-rw-r--r--fs/iomap/buffered-io.c10
-rw-r--r--fs/isofs/inode.c4
-rw-r--r--fs/isofs/rock.c18
-rw-r--r--fs/jffs2/dir.c35
-rw-r--r--fs/jffs2/file.c4
-rw-r--r--fs/jffs2/fs.c20
-rw-r--r--fs/jffs2/os-linux.h4
-rw-r--r--fs/jffs2/xattr.c2
-rw-r--r--fs/jffs2/xattr.h2
-rw-r--r--fs/jfs/inode.c2
-rw-r--r--fs/jfs/jfs_imap.c20
-rw-r--r--fs/jfs/jfs_inode.c4
-rw-r--r--fs/jfs/jfs_logmgr.c33
-rw-r--r--fs/jfs/jfs_logmgr.h2
-rw-r--r--fs/jfs/jfs_mount.c3
-rw-r--r--fs/jfs/jfs_xattr.h2
-rw-r--r--fs/jfs/namei.c20
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/jfs/xattr.c2
-rw-r--r--fs/kernfs/inode.c8
-rw-r--r--fs/kernfs/kernfs-internal.h2
-rw-r--r--fs/libfs.c41
-rw-r--r--fs/lockd/svc.c7
-rw-r--r--fs/lockd/svclock.c43
-rw-r--r--fs/locks.c12
-rw-r--r--fs/minix/bitmap.c2
-rw-r--r--fs/minix/dir.c6
-rw-r--r--fs/minix/inode.c17
-rw-r--r--fs/minix/itree_common.c2
-rw-r--r--fs/namei.c40
-rw-r--r--fs/namespace.c40
-rw-r--r--fs/nfs/blocklayout/blocklayout.h2
-rw-r--r--fs/nfs/blocklayout/dev.c76
-rw-r--r--fs/nfs/callback.c46
-rw-r--r--fs/nfs/callback_proc.c2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c17
-rw-r--r--fs/nfs/fscache.h4
-rw-r--r--fs/nfs/inode.c30
-rw-r--r--fs/nfs/nfs.h2
-rw-r--r--fs/nfs/nfs42proc.c3
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs/pnfs.c33
-rw-r--r--fs/nfs/super.c2
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/nfsd/Makefile3
-rw-r--r--fs/nfsd/blocklayout.c3
-rw-r--r--fs/nfsd/blocklayoutxdr.c6
-rw-r--r--fs/nfsd/blocklayoutxdr.h4
-rw-r--r--fs/nfsd/export.c32
-rw-r--r--fs/nfsd/export.h4
-rw-r--r--fs/nfsd/filecache.c27
-rw-r--r--fs/nfsd/flexfilelayoutxdr.c6
-rw-r--r--fs/nfsd/flexfilelayoutxdr.h4
-rw-r--r--fs/nfsd/netlink.c32
-rw-r--r--fs/nfsd/netlink.h22
-rw-r--r--fs/nfsd/nfs3proc.c9
-rw-r--r--fs/nfsd/nfs4callback.c97
-rw-r--r--fs/nfsd/nfs4layouts.c6
-rw-r--r--fs/nfsd/nfs4proc.c40
-rw-r--r--fs/nfsd/nfs4state.c156
-rw-r--r--fs/nfsd/nfs4xdr.c2636
-rw-r--r--fs/nfsd/nfsctl.c205
-rw-r--r--fs/nfsd/nfsd.h17
-rw-r--r--fs/nfsd/nfsfh.c2
-rw-r--r--fs/nfsd/nfsfh.h3
-rw-r--r--fs/nfsd/nfssvc.c42
-rw-r--r--fs/nfsd/pnfs.h6
-rw-r--r--fs/nfsd/state.h27
-rw-r--r--fs/nfsd/stats.c4
-rw-r--r--fs/nfsd/stats.h18
-rw-r--r--fs/nfsd/trace.h87
-rw-r--r--fs/nfsd/vfs.c75
-rw-r--r--fs/nfsd/vfs.h4
-rw-r--r--fs/nfsd/xdr4.h154
-rw-r--r--fs/nfsd/xdr4cb.h18
-rw-r--r--fs/nilfs2/dir.c6
-rw-r--r--fs/nilfs2/inode.c20
-rw-r--r--fs/notify/dnotify/dnotify.c6
-rw-r--r--fs/notify/fanotify/fanotify_user.c25
-rw-r--r--fs/nsfs.c2
-rw-r--r--fs/ntfs/inode.c25
-rw-r--r--fs/ntfs/mft.c2
-rw-r--r--fs/ntfs3/attrib.c12
-rw-r--r--fs/ntfs3/attrlist.c15
-rw-r--r--fs/ntfs3/bitmap.c4
-rw-r--r--fs/ntfs3/dir.c6
-rw-r--r--fs/ntfs3/file.c10
-rw-r--r--fs/ntfs3/frecord.c19
-rw-r--r--fs/ntfs3/fslog.c6
-rw-r--r--fs/ntfs3/fsntfs.c19
-rw-r--r--fs/ntfs3/index.c3
-rw-r--r--fs/ntfs3/inode.c24
-rw-r--r--fs/ntfs3/namei.c6
-rw-r--r--fs/ntfs3/ntfs.h2
-rw-r--r--fs/ntfs3/ntfs_fs.h6
-rw-r--r--fs/ntfs3/record.c74
-rw-r--r--fs/ntfs3/super.c104
-rw-r--r--fs/ntfs3/xattr.c9
-rw-r--r--fs/ocfs2/acl.c4
-rw-r--r--fs/ocfs2/alloc.c6
-rw-r--r--fs/ocfs2/aops.c6
-rw-r--r--fs/ocfs2/cluster/heartbeat.c81
-rw-r--r--fs/ocfs2/dir.c9
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c4
-rw-r--r--fs/ocfs2/dlmglue.c29
-rw-r--r--fs/ocfs2/file.c30
-rw-r--r--fs/ocfs2/inode.c28
-rw-r--r--fs/ocfs2/move_extents.c4
-rw-r--r--fs/ocfs2/namei.c16
-rw-r--r--fs/ocfs2/refcounttree.c12
-rw-r--r--fs/ocfs2/xattr.c8
-rw-r--r--fs/ocfs2/xattr.h2
-rw-r--r--fs/omfs/inode.c12
-rw-r--r--fs/open.c52
-rw-r--r--fs/openpromfs/inode.c4
-rw-r--r--fs/orangefs/orangefs-kernel.h2
-rw-r--r--fs/orangefs/orangefs-utils.c16
-rw-r--r--fs/orangefs/xattr.c2
-rw-r--r--fs/overlayfs/file.c9
-rw-r--r--fs/overlayfs/inode.c3
-rw-r--r--fs/overlayfs/params.c119
-rw-r--r--fs/overlayfs/super.c28
-rw-r--r--fs/overlayfs/util.c4
-rw-r--r--fs/pipe.c66
-rw-r--r--fs/proc/base.c4
-rw-r--r--fs/proc/fd.c11
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/proc/nommu.c2
-rw-r--r--fs/proc/proc_sysctl.c2
-rw-r--r--fs/proc/self.c2
-rw-r--r--fs/proc/task_mmu.c4
-rw-r--r--fs/proc/task_nommu.c2
-rw-r--r--fs/proc/thread_self.c2
-rw-r--r--fs/pstore/inode.c5
-rw-r--r--fs/qnx4/inode.c6
-rw-r--r--fs/qnx6/inode.c6
-rw-r--r--fs/ramfs/inode.c7
-rw-r--r--fs/reiserfs/inode.c26
-rw-r--r--fs/reiserfs/journal.c56
-rw-r--r--fs/reiserfs/namei.c8
-rw-r--r--fs/reiserfs/procfs.c2
-rw-r--r--fs/reiserfs/reiserfs.h13
-rw-r--r--fs/reiserfs/stree.c5
-rw-r--r--fs/reiserfs/super.c2
-rw-r--r--fs/reiserfs/xattr.c4
-rw-r--r--fs/romfs/super.c5
-rw-r--r--fs/smb/client/cached_dir.c141
-rw-r--r--fs/smb/client/cached_dir.h2
-rw-r--r--fs/smb/client/cifsfs.h2
-rw-r--r--fs/smb/client/file.c18
-rw-r--r--fs/smb/client/fscache.h6
-rw-r--r--fs/smb/client/inode.c17
-rw-r--r--fs/smb/client/smb2ops.c6
-rw-r--r--fs/smb/client/xattr.c2
-rw-r--r--fs/smb/server/smb2pdu.c19
-rw-r--r--fs/smb/server/vfs_cache.c7
-rw-r--r--fs/squashfs/inode.c6
-rw-r--r--fs/squashfs/squashfs.h2
-rw-r--r--fs/squashfs/xattr.c2
-rw-r--r--fs/stack.c4
-rw-r--r--fs/stat.c4
-rw-r--r--fs/super.c66
-rw-r--r--fs/sysv/dir.c6
-rw-r--r--fs/sysv/ialloc.c2
-rw-r--r--fs/sysv/inode.c12
-rw-r--r--fs/sysv/itree.c2
-rw-r--r--fs/tracefs/inode.c2
-rw-r--r--fs/ubifs/crypto.c3
-rw-r--r--fs/ubifs/debug.c12
-rw-r--r--fs/ubifs/dir.c23
-rw-r--r--fs/ubifs/file.c16
-rw-r--r--fs/ubifs/journal.c12
-rw-r--r--fs/ubifs/super.c8
-rw-r--r--fs/ubifs/ubifs.h2
-rw-r--r--fs/ubifs/xattr.c2
-rw-r--r--fs/udf/ialloc.c4
-rw-r--r--fs/udf/inode.c38
-rw-r--r--fs/udf/namei.c16
-rw-r--r--fs/ufs/dir.c6
-rw-r--r--fs/ufs/ialloc.c2
-rw-r--r--fs/ufs/inode.c42
-rw-r--r--fs/vboxsf/utils.c15
-rw-r--r--fs/xattr.c6
-rw-r--r--fs/xfs/libxfs/xfs_ag.c6
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c10
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c6
-rw-r--r--fs/xfs/libxfs/xfs_trans_inode.c2
-rw-r--r--fs/xfs/scrub/xfile.c1
-rw-r--r--fs/xfs/xfs_bmap_util.c7
-rw-r--r--fs/xfs/xfs_buf.c22
-rw-r--r--fs/xfs/xfs_buf.h3
-rw-r--r--fs/xfs/xfs_extent_busy.c3
-rw-r--r--fs/xfs/xfs_inode.c4
-rw-r--r--fs/xfs/xfs_inode_item.c4
-rw-r--r--fs/xfs/xfs_iops.c13
-rw-r--r--fs/xfs/xfs_itable.c12
-rw-r--r--fs/xfs/xfs_notify_failure.c6
-rw-r--r--fs/xfs/xfs_rtalloc.c30
-rw-r--r--fs/xfs/xfs_super.c42
-rw-r--r--fs/xfs/xfs_xattr.c2
-rw-r--r--fs/xfs/xfs_xattr.h2
-rw-r--r--fs/zonefs/super.c10
-rw-r--r--include/acpi/processor.h5
-rw-r--r--include/drm/gpu_scheduler.h3
-rw-r--r--include/kvm/arm_arch_timer.h7
-rw-r--r--include/linux/blkdev.h11
-rw-r--r--include/linux/cgroup-defs.h2
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/device-mapper.h1
-rw-r--r--include/linux/dma-fence.h19
-rw-r--r--include/linux/exportfs.h14
-rw-r--r--include/linux/fdtable.h17
-rw-r--r--include/linux/fs.h133
-rw-r--r--include/linux/fs_context.h2
-rw-r--r--include/linux/fs_stack.h6
-rw-r--r--include/linux/fscrypt.h82
-rw-r--r--include/linux/fsnotify.h3
-rw-r--r--include/linux/hugetlb.h41
-rw-r--r--include/linux/ieee80211.h29
-rw-r--r--include/linux/iov_iter.h274
-rw-r--r--include/linux/iversion.h2
-rw-r--r--include/linux/kasan.h6
-rw-r--r--include/linux/llist.h46
-rw-r--r--include/linux/lockd/lockd.h2
-rw-r--r--include/linux/lwq.h124
-rw-r--r--include/linux/mcb.h1
-rw-r--r--include/linux/mount.h4
-rw-r--r--include/linux/mtd/jedec.h3
-rw-r--r--include/linux/mtd/onfi.h1
-rw-r--r--include/linux/mtd/rawnand.h2
-rw-r--r--include/linux/namei.h26
-rw-r--r--include/linux/nfs4.h262
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/pipe_fs_i.h22
-rw-r--r--include/linux/pktcdvd.h4
-rw-r--r--include/linux/pseudo_fs.h2
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/linux/sunrpc/svc.h45
-rw-r--r--include/linux/sunrpc/svc_xprt.h2
-rw-r--r--include/linux/sunrpc/xprt.h3
-rw-r--r--include/linux/swap.h1
-rw-r--r--include/linux/uio.h34
-rw-r--r--include/linux/virtio_net.h19
-rw-r--r--include/linux/watch_queue.h2
-rw-r--r--include/net/bluetooth/hci_mon.h2
-rw-r--r--include/net/macsec.h1
-rw-r--r--include/net/netfilter/nf_flow_table.h1
-rw-r--r--include/net/netns/xfrm.h1
-rw-r--r--include/net/sock.h10
-rw-r--r--include/net/tcp.h3
-rw-r--r--include/scsi/scsi_device.h20
-rw-r--r--include/sound/soc-dapm.h1
-rw-r--r--include/trace/events/neigh.h4
-rw-r--r--include/trace/events/rpcrdma.h10
-rw-r--r--include/trace/events/sunrpc.h1
-rw-r--r--include/uapi/drm/nouveau_drm.h4
-rw-r--r--include/uapi/linux/fscrypt.h3
-rw-r--r--include/uapi/linux/gtp.h2
-rw-r--r--include/uapi/linux/if_packet.h6
-rw-r--r--include/uapi/linux/nfsd_netlink.h39
-rw-r--r--include/video/mmp_disp.h2
-rw-r--r--include/video/uvesafb.h2
-rw-r--r--init/do_mounts.c2
-rw-r--r--io_uring/fdinfo.c18
-rw-r--r--io_uring/io_uring.c6
-rw-r--r--io_uring/openclose.c9
-rw-r--r--io_uring/rw.c11
-rw-r--r--ipc/mqueue.c19
-rw-r--r--kernel/acct.c4
-rw-r--r--kernel/auditsc.c8
-rw-r--r--kernel/bpf/inode.c5
-rw-r--r--kernel/bpf/mprog.c10
-rw-r--r--kernel/bpf/syscall.c21
-rw-r--r--kernel/bpf/task_iter.c4
-rw-r--r--kernel/bpf/tcx.c8
-rw-r--r--kernel/bpf/verifier.c6
-rw-r--r--kernel/cgroup/cgroup-v1.c5
-rw-r--r--kernel/dma/swiotlb.c5
-rw-r--r--kernel/events/core.c40
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/kcmp.c4
-rw-r--r--kernel/power/hibernate.c14
-rw-r--r--kernel/power/power.h2
-rw-r--r--kernel/power/swap.c37
-rw-r--r--kernel/sched/fair.c74
-rw-r--r--kernel/trace/fprobe.c6
-rw-r--r--kernel/trace/trace_kprobe.c83
-rw-r--r--kernel/trace/trace_output.c2
-rw-r--r--kernel/trace/trace_probe.h1
-rw-r--r--kernel/workqueue.c24
-rw-r--r--lib/Kconfig5
-rw-r--r--lib/Makefile2
-rw-r--r--lib/iov_iter.c437
-rw-r--r--lib/llist.c28
-rw-r--r--lib/lwq.c158
-rw-r--r--lib/maple_tree.c2
-rw-r--r--lib/test_maple_tree.c35
-rw-r--r--mm/damon/sysfs.c7
-rw-r--r--mm/hugetlb.c82
-rw-r--r--mm/kasan/report.c2
-rw-r--r--mm/memory.c13
-rw-r--r--mm/mempolicy.c4
-rw-r--r--mm/migrate.c14
-rw-r--r--mm/mmap.c46
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/readahead.c3
-rw-r--r--mm/shmem.c22
-rw-r--r--mm/slab_common.c7
-rw-r--r--mm/swapfile.c23
-rw-r--r--mm/zswap.c4
-rw-r--r--net/bluetooth/hci_conn.c9
-rw-r--r--net/bluetooth/hci_event.c48
-rw-r--r--net/bluetooth/hci_sock.c3
-rw-r--r--net/bluetooth/hci_sync.c26
-rw-r--r--net/can/isotp.c19
-rw-r--r--net/ceph/messenger.c4
-rw-r--r--net/core/datagram.c75
-rw-r--r--net/core/dev.c73
-rw-r--r--net/core/dev.h3
-rw-r--r--net/core/neighbour.c67
-rw-r--r--net/core/pktgen.c14
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/skbuff.c40
-rw-r--r--net/core/stream.c12
-rw-r--r--net/devlink/health.c30
-rw-r--r--net/handshake/netlink.c30
-rw-r--r--net/ipv4/af_inet.c10
-rw-r--r--net/ipv4/esp4.c6
-rw-r--r--net/ipv4/fib_semantics.c14
-rw-r--r--net/ipv4/inet_connection_sock.c1
-rw-r--r--net/ipv4/inet_hashtables.c24
-rw-r--r--net/ipv4/tcp.c24
-rw-r--r--net/ipv4/tcp_bpf.c12
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv4/tcp_output.c26
-rw-r--r--net/ipv4/tcp_recovery.c2
-rw-r--r--net/ipv6/esp6.c6
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/mac80211/key.c3
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mctp/route.c22
-rw-r--r--net/mptcp/protocol.c43
-rw-r--r--net/netfilter/nf_flow_table_core.c14
-rw-r--r--net/netfilter/nf_tables_api.c70
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/nft_inner.c1
-rw-r--r--net/netfilter/nft_payload.c2
-rw-r--r--net/netfilter/nft_set_pipapo.h2
-rw-r--r--net/netfilter/nft_set_rbtree.c2
-rw-r--r--net/nfc/llcp_core.c30
-rw-r--r--net/nfc/nci/core.c5
-rw-r--r--net/nfc/nci/spi.c2
-rw-r--r--net/packet/af_packet.c7
-rw-r--r--net/rfkill/core.c5
-rw-r--r--net/rfkill/rfkill-gpio.c4
-rw-r--r--net/sched/act_ct.c9
-rw-r--r--net/sched/cls_u32.c2
-rw-r--r--net/sched/sch_hfsc.c18
-rw-r--r--net/smc/Kconfig1
-rw-r--r--net/smc/af_smc.c5
-rw-r--r--net/smc/smc_ib.c7
-rw-r--r--net/smc/smc_ib.h2
-rw-r--r--net/smc/smc_stats.h14
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/backchannel_rqst.c13
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/sunrpc/svc.c155
-rw-r--r--net/sunrpc/svc_xprt.c236
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c6
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c3
-rw-r--r--net/tls/tls_main.c10
-rw-r--r--net/tls/tls_sw.c19
-rw-r--r--net/vmw_vsock/virtio_transport.c18
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/mlme.c3
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/xdp/xsk_queue.c10
-rw-r--r--net/xfrm/xfrm_interface_core.c22
-rw-r--r--net/xfrm/xfrm_policy.c27
-rw-r--r--rust/Makefile16
-rw-r--r--rust/kernel/error.rs4
-rw-r--r--scripts/const_structs.checkpatch1
-rw-r--r--security/apparmor/apparmorfs.c7
-rw-r--r--security/apparmor/policy_unpack.c4
-rw-r--r--security/inode.c2
-rw-r--r--security/selinux/selinuxfs.c2
-rw-r--r--sound/core/pcm_native.c4
-rw-r--r--sound/pci/hda/patch_realtek.c27
-rw-r--r--sound/soc/codecs/cs35l56.c8
-rw-r--r--sound/soc/codecs/cs42l42-sdw.c1
-rw-r--r--sound/soc/codecs/cs42l43-jack.c2
-rw-r--r--sound/soc/codecs/da7219-aad.c11
-rw-r--r--sound/soc/codecs/lpass-wsa-macro.c4
-rw-r--r--sound/soc/codecs/rt5645.c2
-rw-r--r--sound/soc/codecs/tas2780.c2
-rw-r--r--sound/soc/codecs/wcd938x-sdw.c27
-rw-r--r--sound/soc/codecs/wcd938x.c76
-rw-r--r--sound/soc/dwc/dwc-i2s.c2
-rw-r--r--sound/soc/pxa/pxa-ssp.c2
-rw-r--r--sound/soc/soc-component.c1
-rw-r--r--sound/soc/soc-dapm.c12
-rw-r--r--sound/soc/ti/ams-delta.c4
-rw-r--r--tools/arch/x86/include/uapi/asm/unistd_32.h2
-rw-r--r--tools/build/feature/test-llvm.cpp14
-rw-r--r--tools/include/linux/rwsem.h40
-rw-r--r--tools/include/nolibc/arch-i386.h4
-rw-r--r--tools/include/nolibc/crt.h1
-rw-r--r--tools/net/ynl/Makefile.deps1
-rw-r--r--tools/net/ynl/generated/Makefile2
-rw-r--r--tools/net/ynl/generated/devlink-user.c54
-rw-r--r--tools/net/ynl/generated/nfsd-user.c95
-rw-r--r--tools/net/ynl/generated/nfsd-user.h33
-rw-r--r--tools/perf/dlfilters/dlfilter-test-api-v0.c12
-rw-r--r--tools/perf/dlfilters/dlfilter-test-api-v2.c12
-rw-r--r--tools/perf/util/dlfilter.c32
-rw-r--r--tools/perf/util/pmu.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_helpers.h16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_links.c64
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_opts.c271
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer.c6
-rw-r--r--tools/testing/selftests/bpf/progs/timer_failure.c47
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc13
-rw-r--r--tools/testing/selftests/kvm/include/ucall_common.h2
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h23
-rw-r--r--tools/testing/selftests/kvm/lib/guest_sprintf.c7
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/apic.c2
-rw-r--r--tools/testing/selftests/kvm/memslot_perf_test.c9
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c2
-rwxr-xr-xtools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh1
-rw-r--r--tools/testing/selftests/kvm/x86_64/state_test.c110
-rw-r--r--tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c4
-rw-r--r--tools/testing/selftests/mm/mremap_dontunmap.c1
-rw-r--r--tools/testing/selftests/net/Makefile1
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh7
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh21
-rwxr-xr-xtools/testing/selftests/net/netns-name.sh87
-rwxr-xr-xtools/testing/selftests/net/openvswitch/openvswitch.sh21
-rw-r--r--tools/testing/selftests/net/openvswitch/ovs-dpctl.py48
-rwxr-xr-xtools/testing/selftests/netfilter/nft_audit.sh52
-rw-r--r--tools/testing/selftests/riscv/mm/Makefile6
-rw-r--r--tools/testing/selftests/riscv/mm/mmap_bottomup.c (renamed from tools/testing/selftests/riscv/mm/testcases/mmap_bottomup.c)2
-rw-r--r--tools/testing/selftests/riscv/mm/mmap_default.c (renamed from tools/testing/selftests/riscv/mm/testcases/mmap_default.c)2
-rw-r--r--tools/testing/selftests/riscv/mm/mmap_test.h (renamed from tools/testing/selftests/riscv/mm/testcases/mmap_test.h)0
-rwxr-xr-xtools/testing/selftests/riscv/mm/run_mmap.sh (renamed from tools/testing/selftests/riscv/mm/testcases/run_mmap.sh)0
-rw-r--r--tools/testing/selftests/user_events/abi_test.c16
-rw-r--r--tools/virtio/linux/dma-mapping.h12
1116 files changed, 13562 insertions, 7399 deletions
diff --git a/.mailmap b/.mailmap
index c80903efec75..2643b7203a74 100644
--- a/.mailmap
+++ b/.mailmap
@@ -87,6 +87,7 @@ Baolin Wang <[email protected]> <[email protected]>
+Bartosz Golaszewski <[email protected]> <[email protected]>
Ben Gardner <[email protected]>
@@ -450,9 +451,10 @@ Oleksandr Natalenko <[email protected]> <[email protected]>
+Oleksij Rempel <[email protected]>
Paolo 'Blaisorblade' Giarrusso <[email protected]>
diff --git a/Documentation/ABI/testing/sysfs-class-firmware b/Documentation/ABI/testing/sysfs-class-firmware
index 978d3d500400..fba87a55f3ca 100644
--- a/Documentation/ABI/testing/sysfs-class-firmware
+++ b/Documentation/ABI/testing/sysfs-class-firmware
@@ -1,7 +1,7 @@
What: /sys/class/firmware/.../data
Date: July 2022
KernelVersion: 5.19
-Contact: Russ Weight <[email protected]>
+Contact: Russ Weight <[email protected]>
Description: The data sysfs file is used for firmware-fallback and for
firmware uploads. Cat a firmware image to this sysfs file
after you echo 1 to the loading sysfs file. When the firmware
@@ -13,7 +13,7 @@ Description: The data sysfs file is used for firmware-fallback and for
What: /sys/class/firmware/.../cancel
Date: July 2022
KernelVersion: 5.19
-Contact: Russ Weight <[email protected]>
+Contact: Russ Weight <[email protected]>
Description: Write-only. For firmware uploads, write a "1" to this file to
request that the transfer of firmware data to the lower-level
device be canceled. This request will be rejected (EBUSY) if
@@ -23,7 +23,7 @@ Description: Write-only. For firmware uploads, write a "1" to this file to
What: /sys/class/firmware/.../error
Date: July 2022
KernelVersion: 5.19
-Contact: Russ Weight <[email protected]>
+Contact: Russ Weight <[email protected]>
Description: Read-only. Returns a string describing a failed firmware
upload. This string will be in the form of <STATUS>:<ERROR>,
where <STATUS> will be one of the status strings described
@@ -37,7 +37,7 @@ Description: Read-only. Returns a string describing a failed firmware
What: /sys/class/firmware/.../loading
Date: July 2022
KernelVersion: 5.19
-Contact: Russ Weight <[email protected]>
+Contact: Russ Weight <[email protected]>
Description: The loading sysfs file is used for both firmware-fallback and
for firmware uploads. Echo 1 onto the loading file to indicate
you are writing a firmware file to the data sysfs node. Echo
@@ -49,7 +49,7 @@ Description: The loading sysfs file is used for both firmware-fallback and
What: /sys/class/firmware/.../remaining_size
Date: July 2022
KernelVersion: 5.19
-Contact: Russ Weight <[email protected]>
+Contact: Russ Weight <[email protected]>
Description: Read-only. For firmware upload, this file contains the size
of the firmware data that remains to be transferred to the
lower-level device driver. The size value is initialized to
@@ -62,7 +62,7 @@ Description: Read-only. For firmware upload, this file contains the size
What: /sys/class/firmware/.../status
Date: July 2022
KernelVersion: 5.19
-Contact: Russ Weight <[email protected]>
+Contact: Russ Weight <[email protected]>
Description: Read-only. Returns a string describing the current status of
a firmware upload. The string will be one of the following:
idle, "receiving", "preparing", "transferring", "programming".
@@ -70,7 +70,7 @@ Description: Read-only. Returns a string describing the current status of
What: /sys/class/firmware/.../timeout
Date: July 2022
KernelVersion: 5.19
-Contact: Russ Weight <[email protected]>
+Contact: Russ Weight <[email protected]>
Description: This file supports the timeout mechanism for firmware
fallback. This file has no affect on firmware uploads. For
more information on timeouts please see the documentation
diff --git a/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update b/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update
index 0a41afe0ab4c..9051695d2211 100644
--- a/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update
+++ b/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update
@@ -1,7 +1,7 @@
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/sr_root_entry_hash
Date: Sep 2022
KernelVersion: 5.20
-Contact: Russ Weight <[email protected]>
+Contact: Peter Colberg <[email protected]>
Description: Read only. Returns the root entry hash for the static
region if one is programmed, else it returns the
string: "hash not programmed". This file is only
@@ -11,7 +11,7 @@ Description: Read only. Returns the root entry hash for the static
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/pr_root_entry_hash
Date: Sep 2022
KernelVersion: 5.20
-Contact: Russ Weight <[email protected]>
+Contact: Peter Colberg <[email protected]>
Description: Read only. Returns the root entry hash for the partial
reconfiguration region if one is programmed, else it
returns the string: "hash not programmed". This file
@@ -21,7 +21,7 @@ Description: Read only. Returns the root entry hash for the partial
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/bmc_root_entry_hash
Date: Sep 2022
KernelVersion: 5.20
-Contact: Russ Weight <[email protected]>
+Contact: Peter Colberg <[email protected]>
Description: Read only. Returns the root entry hash for the BMC image
if one is programmed, else it returns the string:
"hash not programmed". This file is only visible if the
@@ -31,7 +31,7 @@ Description: Read only. Returns the root entry hash for the BMC image
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/sr_canceled_csks
Date: Sep 2022
KernelVersion: 5.20
-Contact: Russ Weight <[email protected]>
+Contact: Peter Colberg <[email protected]>
Description: Read only. Returns a list of indices for canceled code
signing keys for the static region. The standard bitmap
list format is used (e.g. "1,2-6,9").
@@ -39,7 +39,7 @@ Description: Read only. Returns a list of indices for canceled code
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/pr_canceled_csks
Date: Sep 2022
KernelVersion: 5.20
-Contact: Russ Weight <[email protected]>
+Contact: Peter Colberg <[email protected]>
Description: Read only. Returns a list of indices for canceled code
signing keys for the partial reconfiguration region. The
standard bitmap list format is used (e.g. "1,2-6,9").
@@ -47,7 +47,7 @@ Description: Read only. Returns a list of indices for canceled code
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/bmc_canceled_csks
Date: Sep 2022
KernelVersion: 5.20
-Contact: Russ Weight <[email protected]>
+Contact: Peter Colberg <[email protected]>
Description: Read only. Returns a list of indices for canceled code
signing keys for the BMC. The standard bitmap list format
is used (e.g. "1,2-6,9").
@@ -55,7 +55,7 @@ Description: Read only. Returns a list of indices for canceled code
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/flash_count
Date: Sep 2022
KernelVersion: 5.20
-Contact: Russ Weight <[email protected]>
+Contact: Peter Colberg <[email protected]>
Description: Read only. Returns number of times the secure update
staging area has been flashed.
Format: "%u".
diff --git a/Documentation/core-api/workqueue.rst b/Documentation/core-api/workqueue.rst
index 5d7b01aed1fe..0046af06531a 100644
--- a/Documentation/core-api/workqueue.rst
+++ b/Documentation/core-api/workqueue.rst
@@ -244,7 +244,7 @@ unbound worker-pools and only one work item could be active at any given
time thus achieving the same ordering property as ST wq.
In the current implementation the above configuration only guarantees
-ST behavior within a given NUMA node. Instead ``alloc_ordered_queue()`` should
+ST behavior within a given NUMA node. Instead ``alloc_ordered_workqueue()`` should
be used to achieve system-wide ST behavior.
@@ -390,7 +390,7 @@ The default affinity scope can be changed with the module parameter
scope can be changed using ``apply_workqueue_attrs()``.
If ``WQ_SYSFS`` is set, the workqueue will have the following affinity scope
-related interface files under its ``/sys/devices/virtual/WQ_NAME/``
+related interface files under its ``/sys/devices/virtual/workqueue/WQ_NAME/``
directory.
``affinity_scope``
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml b/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml
index 23ada8f87526..769ce23aaac2 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml
+++ b/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml
@@ -13,6 +13,8 @@ description: |
maintainers:
- Michael Tretter <[email protected]>
+ - Harini Katakam <[email protected]>
+ - Radhey Shyam Pandey <[email protected]>
allOf:
- $ref: ../dma-controller.yaml#
@@ -65,6 +67,7 @@ required:
- interrupts
- clocks
- clock-names
+ - xlnx,bus-width
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml
index 7cc4ddc4e9b7..2aa1f4b063eb 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml
@@ -61,7 +61,7 @@ patternProperties:
required:
- reg
- additionalProperties: true
+ additionalProperties: false
allOf:
- $ref: /schemas/spi/spi-peripheral-props.yaml#
diff --git a/Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml b/Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml
index 2594fa192f93..2a04906531fb 100644
--- a/Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml
+++ b/Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml
@@ -32,7 +32,8 @@ properties:
spi-cpol: true
- reset-gpios: true
+ reset-gpios:
+ maxItems: 1
interrupts:
minItems: 1
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml
index 4e508bfcc9d8..5121685337b5 100644
--- a/Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml
@@ -78,7 +78,8 @@ properties:
- const: -1000
- const: 22000
- reset-gpios: true
+ reset-gpios:
+ maxItems: 1
adi,dc-dc-ilim-microamp:
enum: [150000, 200000, 250000, 300000, 350000, 400000]
diff --git a/Documentation/devicetree/bindings/iio/health/ti,afe4403.yaml b/Documentation/devicetree/bindings/iio/health/ti,afe4403.yaml
index b9b5beac33b2..5b6cde86b5a5 100644
--- a/Documentation/devicetree/bindings/iio/health/ti,afe4403.yaml
+++ b/Documentation/devicetree/bindings/iio/health/ti,afe4403.yaml
@@ -23,7 +23,8 @@ properties:
maxItems: 1
description: Connected to ADC_RDY pin.
- reset-gpios: true
+ reset-gpios:
+ maxItems: 1
required:
- compatible
diff --git a/Documentation/devicetree/bindings/iio/health/ti,afe4404.yaml b/Documentation/devicetree/bindings/iio/health/ti,afe4404.yaml
index 2958c4ca75b4..167d10bd60af 100644
--- a/Documentation/devicetree/bindings/iio/health/ti,afe4404.yaml
+++ b/Documentation/devicetree/bindings/iio/health/ti,afe4404.yaml
@@ -23,7 +23,8 @@ properties:
maxItems: 1
description: Connected to ADC_RDY pin.
- reset-gpios: true
+ reset-gpios:
+ maxItems: 1
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/iio/light/rohm,bu27010.yaml b/Documentation/devicetree/bindings/iio/light/rohm,bu27010.yaml
index 8376d64a641a..bed42d5d0d94 100644
--- a/Documentation/devicetree/bindings/iio/light/rohm,bu27010.yaml
+++ b/Documentation/devicetree/bindings/iio/light/rohm,bu27010.yaml
@@ -45,5 +45,6 @@ examples:
light-sensor@38 {
compatible = "rohm,bu27010";
reg = <0x38>;
+ vdd-supply = <&vdd>;
};
};
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml b/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
index 80141eb7fc6b..10f34aa8ba8a 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
@@ -69,7 +69,7 @@ properties:
maxItems: 4
clocks:
- minItems: 3
+ minItems: 2
items:
- description: Main peripheral bus clock, PCLK/HCLK - AHB Bus clock
- description: SDC MMC clock, MCLK
diff --git a/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
index 5073007267ad..634cec5d57ea 100644
--- a/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
@@ -70,7 +70,7 @@ examples:
phy@84000 {
compatible = "qcom,ipq6018-qmp-pcie-phy";
- reg = <0x0 0x00084000 0x0 0x1000>;
+ reg = <0x00084000 0x1000>;
clocks = <&gcc GCC_PCIE0_AUX_CLK>,
<&gcc GCC_PCIE0_AHB_CLK>,
diff --git a/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml b/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml
index 7a6de938b11d..4118aa54bbd5 100644
--- a/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml
+++ b/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml
@@ -82,7 +82,7 @@ properties:
description:
Current at which the headset micbias sense clamp will engage, 0 to
disable.
- enum: [ 0, 14, 23, 41, 50, 60, 68, 86, 95 ]
+ enum: [ 0, 14, 24, 43, 52, 61, 71, 90, 99 ]
default: 0
cirrus,bias-ramp-ms:
diff --git a/Documentation/filesystems/files.rst b/Documentation/filesystems/files.rst
index bcf84459917f..9e38e4c221ca 100644
--- a/Documentation/filesystems/files.rst
+++ b/Documentation/filesystems/files.rst
@@ -62,7 +62,7 @@ the fdtable structure -
be held.
4. To look up the file structure given an fd, a reader
- must use either lookup_fd_rcu() or files_lookup_fd_rcu() APIs. These
+ must use either lookup_fdget_rcu() or files_lookup_fdget_rcu() APIs. These
take care of barrier requirements due to lock-free lookup.
An example::
@@ -70,43 +70,22 @@ the fdtable structure -
struct file *file;
rcu_read_lock();
- file = lookup_fd_rcu(fd);
- if (file) {
- ...
- }
- ....
+ file = lookup_fdget_rcu(fd);
rcu_read_unlock();
-
-5. Handling of the file structures is special. Since the look-up
- of the fd (fget()/fget_light()) are lock-free, it is possible
- that look-up may race with the last put() operation on the
- file structure. This is avoided using atomic_long_inc_not_zero()
- on ->f_count::
-
- rcu_read_lock();
- file = files_lookup_fd_rcu(files, fd);
if (file) {
- if (atomic_long_inc_not_zero(&file->f_count))
- *fput_needed = 1;
- else
- /* Didn't get the reference, someone's freed */
- file = NULL;
+ ...
+ fput(file);
}
- rcu_read_unlock();
....
- return file;
-
- atomic_long_inc_not_zero() detects if refcounts is already zero or
- goes to zero during increment. If it does, we fail
- fget()/fget_light().
-6. Since both fdtable and file structures can be looked up
+5. Since both fdtable and file structures can be looked up
lock-free, they must be installed using rcu_assign_pointer()
API. If they are looked up lock-free, rcu_dereference()
must be used. However it is advisable to use files_fdtable()
- and lookup_fd_rcu()/files_lookup_fd_rcu() which take care of these issues.
+ and lookup_fdget_rcu()/files_lookup_fdget_rcu() which take care of these
+ issues.
-7. While updating, the fdtable pointer must be looked up while
+6. While updating, the fdtable pointer must be looked up while
holding files->file_lock. If ->file_lock is dropped, then
another thread expand the files thereby creating a new
fdtable and making the earlier fdtable pointer stale.
@@ -126,3 +105,19 @@ the fdtable structure -
Since locate_fd() can drop ->file_lock (and reacquire ->file_lock),
the fdtable pointer (fdt) must be loaded after locate_fd().
+On newer kernels rcu based file lookup has been switched to rely on
+SLAB_TYPESAFE_BY_RCU instead of call_rcu(). It isn't sufficient anymore
+to just acquire a reference to the file in question under rcu using
+atomic_long_inc_not_zero() since the file might have already been
+recycled and someone else might have bumped the reference. In other
+words, callers might see reference count bumps from newer users. For
+this is reason it is necessary to verify that the pointer is the same
+before and after the reference count increment. This pattern can be seen
+in get_file_rcu() and __files_get_rcu().
+
+In addition, it isn't possible to access or check fields in struct file
+without first aqcuiring a reference on it under rcu lookup. Not doing
+that was always very dodgy and it was only usable for non-pointer data
+in struct file. With SLAB_TYPESAFE_BY_RCU it is necessary that callers
+either first acquire a reference or they must hold the files_lock of the
+fdtable.
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index a624e92f2687..1b84f818e574 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -261,9 +261,9 @@ DIRECT_KEY policies
The Adiantum encryption mode (see `Encryption modes and usage`_) is
suitable for both contents and filenames encryption, and it accepts
-long IVs --- long enough to hold both an 8-byte logical block number
-and a 16-byte per-file nonce. Also, the overhead of each Adiantum key
-is greater than that of an AES-256-XTS key.
+long IVs --- long enough to hold both an 8-byte data unit index and a
+16-byte per-file nonce. Also, the overhead of each Adiantum key is
+greater than that of an AES-256-XTS key.
Therefore, to improve performance and save memory, for Adiantum a
"direct key" configuration is supported. When the user has enabled
@@ -300,8 +300,8 @@ IV_INO_LBLK_32 policies
IV_INO_LBLK_32 policies work like IV_INO_LBLK_64, except that for
IV_INO_LBLK_32, the inode number is hashed with SipHash-2-4 (where the
-SipHash key is derived from the master key) and added to the file
-logical block number mod 2^32 to produce a 32-bit IV.
+SipHash key is derived from the master key) and added to the file data
+unit index mod 2^32 to produce a 32-bit IV.
This format is optimized for use with inline encryption hardware
compliant with the eMMC v5.2 standard, which supports only 32 IV bits
@@ -451,31 +451,62 @@ acceleration is recommended:
Contents encryption
-------------------
-For file contents, each filesystem block is encrypted independently.
-Starting from Linux kernel 5.5, encryption of filesystems with block
-size less than system's page size is supported.
-
-Each block's IV is set to the logical block number within the file as
-a little endian number, except that:
-
-- With CBC mode encryption, ESSIV is also used. Specifically, each IV
- is encrypted with AES-256 where the AES-256 key is the SHA-256 hash
- of the file's data encryption key.
-
-- With `DIRECT_KEY policies`_, the file's nonce is appended to the IV.
- Currently this is only allowed with the Adiantum encryption mode.
-
-- With `IV_INO_LBLK_64 policies`_, the logical block number is limited
- to 32 bits and is placed in bits 0-31 of the IV. The inode number
- (which is also limited to 32 bits) is placed in bits 32-63.
-
-- With `IV_INO_LBLK_32 policies`_, the logical block number is limited
- to 32 bits and is placed in bits 0-31 of the IV. The inode number
- is then hashed and added mod 2^32.
-
-Note that because file logical block numbers are included in the IVs,
-filesystems must enforce that blocks are never shifted around within
-encrypted files, e.g. via "collapse range" or "insert range".
+For contents encryption, each file's contents is divided into "data
+units". Each data unit is encrypted independently. The IV for each
+data unit incorporates the zero-based index of the data unit within
+the file. This ensures that each data unit within a file is encrypted
+differently, which is essential to prevent leaking information.
+
+Note: the encryption depending on the offset into the file means that
+operations like "collapse range" and "insert range" that rearrange the
+extent mapping of files are not supported on encrypted files.
+
+There are two cases for the sizes of the data units:
+
+* Fixed-size data units. This is how all filesystems other than UBIFS
+ work. A file's data units are all the same size; the last data unit
+ is zero-padded if needed. By default, the data unit size is equal
+ to the filesystem block size. On some filesystems, users can select
+ a sub-block data unit size via the ``log2_data_unit_size`` field of
+ the encryption policy; see `FS_IOC_SET_ENCRYPTION_POLICY`_.
+
+* Variable-size data units. This is what UBIFS does. Each "UBIFS
+ data node" is treated as a crypto data unit. Each contains variable
+ length, possibly compressed data, zero-padded to the next 16-byte
+ boundary. Users cannot select a sub-block data unit size on UBIFS.
+
+In the case of compression + encryption, the compressed data is
+encrypted. UBIFS compression works as described above. f2fs
+compression works a bit differently; it compresses a number of
+filesystem blocks into a smaller number of filesystem blocks.
+Therefore a f2fs-compressed file still uses fixed-size data units, and
+it is encrypted in a similar way to a file containing holes.
+
+As mentioned in `Key hierarchy`_, the default encryption setting uses
+per-file keys. In this case, the IV for each data unit is simply the
+index of the data unit in the file. However, users can select an
+encryption setting that does not use per-file keys. For these, some
+kind of file identifier is incorporated into the IVs as follows:
+
+- With `DIRECT_KEY policies`_, the data unit index is placed in bits
+ 0-63 of the IV, and the file's nonce is placed in bits 64-191.
+
+- With `IV_INO_LBLK_64 policies`_, the data unit index is placed in
+ bits 0-31 of the IV, and the file's inode number is placed in bits
+ 32-63. This setting is only allowed when data unit indices and
+ inode numbers fit in 32 bits.
+
+- With `IV_INO_LBLK_32 policies`_, the file's inode number is hashed
+ and added to the data unit index. The resulting value is truncated
+ to 32 bits and placed in bits 0-31 of the IV. This setting is only
+ allowed when data unit indices and inode numbers fit in 32 bits.
+
+The byte order of the IV is always little endian.
+
+If the user selects FSCRYPT_MODE_AES_128_CBC for the contents mode, an
+ESSIV layer is automatically included. In this case, before the IV is
+passed to AES-128-CBC, it is encrypted with AES-256 where the AES-256
+key is the SHA-256 hash of the file's contents encryption key.
Filenames encryption
--------------------
@@ -544,7 +575,8 @@ follows::
__u8 contents_encryption_mode;
__u8 filenames_encryption_mode;
__u8 flags;
- __u8 __reserved[4];
+ __u8 log2_data_unit_size;
+ __u8 __reserved[3];
__u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE];
};
@@ -586,6 +618,29 @@ This structure must be initialized as follows:
The DIRECT_KEY, IV_INO_LBLK_64, and IV_INO_LBLK_32 flags are
mutually exclusive.
+- ``log2_data_unit_size`` is the log2 of the data unit size in bytes,
+ or 0 to select the default data unit size. The data unit size is
+ the granularity of file contents encryption. For example, setting
+ ``log2_data_unit_size`` to 12 causes file contents be passed to the
+ underlying encryption algorithm (such as AES-256-XTS) in 4096-byte
+ data units, each with its own IV.
+
+ Not all filesystems support setting ``log2_data_unit_size``. ext4
+ and f2fs support it since Linux v6.7. On filesystems that support
+ it, the supported nonzero values are 9 through the log2 of the
+ filesystem block size, inclusively. The default value of 0 selects
+ the filesystem block size.
+
+ The main use case for ``log2_data_unit_size`` is for selecting a
+ data unit size smaller than the filesystem block size for
+ compatibility with inline encryption hardware that only supports
+ smaller data unit sizes. ``/sys/block/$disk/queue/crypto/`` may be
+ useful for checking which data unit sizes are supported by a
+ particular system's inline encryption hardware.
+
+ Leave this field zeroed unless you are certain you need it. Using
+ an unnecessarily small data unit size reduces performance.
+
- For v2 encryption policies, ``__reserved`` must be zeroed.
- For v1 encryption policies, ``master_key_descriptor`` specifies how
@@ -1079,8 +1134,8 @@ The caller must zero all input fields, then fill in ``key_spec``:
On success, 0 is returned and the kernel fills in the output fields:
- ``status`` indicates whether the key is absent, present, or
- incompletely removed. Incompletely removed means that the master
- secret has been removed, but some files are still in use; i.e.,
+ incompletely removed. Incompletely removed means that removal has
+ been initiated, but some files are still in use; i.e.,
`FS_IOC_REMOVE_ENCRYPTION_KEY`_ returned 0 but set the informational
status flag FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY.
diff --git a/Documentation/filesystems/nfs/exporting.rst b/Documentation/filesystems/nfs/exporting.rst
index 4b30daee399a..198d805d611c 100644
--- a/Documentation/filesystems/nfs/exporting.rst
+++ b/Documentation/filesystems/nfs/exporting.rst
@@ -241,3 +241,10 @@ following flags are defined:
all of an inode's dirty data on last close. Exports that behave this
way should set EXPORT_OP_FLUSH_ON_CLOSE so that NFSD knows to skip
waiting for writeback when closing such files.
+
+ EXPORT_OP_ASYNC_LOCK - Indicates a capable filesystem to do async lock
+ requests from lockd. Only set EXPORT_OP_ASYNC_LOCK if the filesystem has
+ it's own ->lock() functionality as core posix_lock_file() implementation
+ has no async lock request handling yet. For more information about how to
+ indicate an async lock request from a ->lock() file_operations struct, see
+ fs/locks.c and comment for the function vfs_lock_file().
diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst
index cdefbe73d85c..5b93268e400f 100644
--- a/Documentation/filesystems/overlayfs.rst
+++ b/Documentation/filesystems/overlayfs.rst
@@ -339,6 +339,18 @@ The specified lower directories will be stacked beginning from the
rightmost one and going left. In the above example lower1 will be the
top, lower2 the middle and lower3 the bottom layer.
+Note: directory names containing colons can be provided as lower layer by
+escaping the colons with a single backslash. For example:
+
+ mount -t overlay overlay -olowerdir=/a\:lower\:\:dir /merged
+
+Since kernel version v6.5, directory names containing colons can also
+be provided as lower layer using the fsconfig syscall from new mount api:
+
+ fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir", "/a:lower::dir", 0);
+
+In the latter case, colons in lower layer directory names will be escaped
+as an octal characters (\072) when displayed in /proc/self/mountinfo.
Metadata only copy up
---------------------
diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
index 4d05b9862451..d69f59700a23 100644
--- a/Documentation/filesystems/porting.rst
+++ b/Documentation/filesystems/porting.rst
@@ -1045,3 +1045,10 @@ filesystem type is now moved to a later point when the devices are closed:
As this is a VFS level change it has no practical consequences for filesystems
other than that all of them must use one of the provided kill_litter_super(),
kill_anon_super(), or kill_block_super() helpers.
+
+---
+
+**mandatory**
+
+Lock ordering has been changed so that s_umount ranks above open_mutex again.
+All places where s_umount was taken under open_mutex have been fixed up.
diff --git a/Documentation/netlink/specs/devlink.yaml b/Documentation/netlink/specs/devlink.yaml
index d1ebcd927149..065661acb878 100644
--- a/Documentation/netlink/specs/devlink.yaml
+++ b/Documentation/netlink/specs/devlink.yaml
@@ -323,7 +323,7 @@ operations:
- dev-name
- sb-index
reply: &sb-get-reply
- value: 11
+ value: 13
attributes: *sb-id-attrs
dump:
request:
@@ -350,7 +350,7 @@ operations:
- sb-index
- sb-pool-index
reply: &sb-pool-get-reply
- value: 15
+ value: 17
attributes: *sb-pool-id-attrs
dump:
request:
@@ -378,7 +378,7 @@ operations:
- sb-index
- sb-pool-index
reply: &sb-port-pool-get-reply
- value: 19
+ value: 21
attributes: *sb-port-pool-id-attrs
dump:
request:
@@ -407,7 +407,7 @@ operations:
- sb-pool-type
- sb-tc-index
reply: &sb-tc-pool-bind-get-reply
- value: 23
+ value: 25
attributes: *sb-tc-pool-bind-id-attrs
dump:
request:
@@ -538,7 +538,7 @@ operations:
- dev-name
- trap-name
reply: &trap-get-reply
- value: 61
+ value: 63
attributes: *trap-id-attrs
dump:
request:
@@ -564,7 +564,7 @@ operations:
- dev-name
- trap-group-name
reply: &trap-group-get-reply
- value: 65
+ value: 67
attributes: *trap-group-id-attrs
dump:
request:
@@ -590,7 +590,7 @@ operations:
- dev-name
- trap-policer-id
reply: &trap-policer-get-reply
- value: 69
+ value: 71
attributes: *trap-policer-id-attrs
dump:
request:
@@ -617,7 +617,7 @@ operations:
- port-index
- rate-node-name
reply: &rate-get-reply
- value: 74
+ value: 76
attributes: *rate-id-attrs
dump:
request:
@@ -643,7 +643,7 @@ operations:
- dev-name
- linecard-index
reply: &linecard-get-reply
- value: 78
+ value: 80
attributes: *linecard-id-attrs
dump:
request:
diff --git a/Documentation/netlink/specs/nfsd.yaml b/Documentation/netlink/specs/nfsd.yaml
new file mode 100644
index 000000000000..05acc73e2e33
--- /dev/null
+++ b/Documentation/netlink/specs/nfsd.yaml
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+
+name: nfsd
+protocol: genetlink
+uapi-header: linux/nfsd_netlink.h
+
+doc: NFSD configuration over generic netlink.
+
+attribute-sets:
+ -
+ name: rpc-status
+ attributes:
+ -
+ name: xid
+ type: u32
+ byte-order: big-endian
+ -
+ name: flags
+ type: u32
+ -
+ name: prog
+ type: u32
+ -
+ name: version
+ type: u8
+ -
+ name: proc
+ type: u32
+ -
+ name: service_time
+ type: s64
+ -
+ name: pad
+ type: pad
+ -
+ name: saddr4
+ type: u32
+ byte-order: big-endian
+ display-hint: ipv4
+ -
+ name: daddr4
+ type: u32
+ byte-order: big-endian
+ display-hint: ipv4
+ -
+ name: saddr6
+ type: binary
+ display-hint: ipv6
+ -
+ name: daddr6
+ type: binary
+ display-hint: ipv6
+ -
+ name: sport
+ type: u16
+ byte-order: big-endian
+ -
+ name: dport
+ type: u16
+ byte-order: big-endian
+ -
+ name: compound-ops
+ type: u32
+ multi-attr: true
+
+operations:
+ list:
+ -
+ name: rpc-status-get
+ doc: dump pending nfsd rpc
+ attribute-set: rpc-status
+ dump:
+ pre: nfsd-nl-rpc-status-get-start
+ post: nfsd-nl-rpc-status-get-done
+ reply:
+ attributes:
+ - xid
+ - flags
+ - prog
+ - version
+ - proc
+ - service_time
+ - saddr4
+ - daddr4
+ - saddr6
+ - daddr6
+ - sport
+ - dport
+ - compound-ops
diff --git a/Documentation/networking/representors.rst b/Documentation/networking/representors.rst
index ee1f5cd54496..decb39c19b9e 100644
--- a/Documentation/networking/representors.rst
+++ b/Documentation/networking/representors.rst
@@ -162,9 +162,11 @@ How are representors identified?
The representor netdevice should *not* directly refer to a PCIe device (e.g.
through ``net_dev->dev.parent`` / ``SET_NETDEV_DEV()``), either of the
representee or of the switchdev function.
-Instead, it should implement the ``ndo_get_devlink_port()`` netdevice op, which
-the kernel uses to provide the ``phys_switch_id`` and ``phys_port_name`` sysfs
-nodes. (Some legacy drivers implement ``ndo_get_port_parent_id()`` and
+Instead, the driver should use the ``SET_NETDEV_DEVLINK_PORT`` macro to
+assign a devlink port instance to the netdevice before registering the
+netdevice; the kernel uses the devlink port to provide the ``phys_switch_id``
+and ``phys_port_name`` sysfs nodes.
+(Some legacy drivers implement ``ndo_get_port_parent_id()`` and
``ndo_get_phys_port_name()`` directly, but this is deprecated.) See
:ref:`Documentation/networking/devlink/devlink-port.rst <devlink_port>` for the
details of this API.
diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst
index ac7c52f130c9..31000f075707 100644
--- a/Documentation/process/embargoed-hardware-issues.rst
+++ b/Documentation/process/embargoed-hardware-issues.rst
@@ -25,15 +25,15 @@ Contact
The Linux kernel hardware security team is separate from the regular Linux
kernel security team.
-The team only handles the coordination of embargoed hardware security
-issues. Reports of pure software security bugs in the Linux kernel are not
+The team only handles developing fixes for embargoed hardware security
+issues. Reports of pure software security bugs in the Linux kernel are not
handled by this team and the reporter will be guided to contact the regular
Linux kernel security team (:ref:`Documentation/admin-guide/
<securitybugs>`) instead.
The team can be contacted by email at <[email protected]>. This
-is a private list of security officers who will help you to coordinate an
-issue according to our documented process.
+is a private list of security officers who will help you to coordinate a
+fix according to our documented process.
The list is encrypted and email to the list can be sent by either PGP or
S/MIME encrypted and must be signed with the reporter's PGP key or S/MIME
@@ -132,11 +132,11 @@ other hardware could be affected.
The hardware security team will provide an incident-specific encrypted
mailing-list which will be used for initial discussion with the reporter,
-further disclosure and coordination.
+further disclosure, and coordination of fixes.
The hardware security team will provide the disclosing party a list of
developers (domain experts) who should be informed initially about the
-issue after confirming with the developers that they will adhere to this
+issue after confirming with the developers that they will adhere to this
Memorandum of Understanding and the documented process. These developers
form the initial response team and will be responsible for handling the
issue after initial contact. The hardware security team is supporting the
@@ -209,13 +209,18 @@ five work days this is taken as silent acknowledgement.
After acknowledgement or resolution of an objection the expert is disclosed
by the incident team and brought into the development process.
+List participants may not communicate about the issue outside of the
+private mailing list. List participants may not use any shared resources
+(e.g. employer build farms, CI systems, etc) when working on patches.
+
Coordinated release
"""""""""""""""""""
The involved parties will negotiate the date and time where the embargo
ends. At that point the prepared mitigations are integrated into the
-relevant kernel trees and published.
+relevant kernel trees and published. There is no pre-notification process:
+fixes are published in public and available to everyone at the same time.
While we understand that hardware security issues need coordinated embargo
time, the embargo time should be constrained to the minimum time which is
diff --git a/Documentation/rust/general-information.rst b/Documentation/rust/general-information.rst
index 49029ee82e55..081397827a7e 100644
--- a/Documentation/rust/general-information.rst
+++ b/Documentation/rust/general-information.rst
@@ -29,7 +29,7 @@ target with the same invocation used for compilation, e.g.::
To read the docs locally in your web browser, run e.g.::
- xdg-open rust/doc/kernel/index.html
+ xdg-open Documentation/output/rust/rustdoc/kernel/index.html
To learn about how to write the documentation, please see coding-guidelines.rst.
diff --git a/Documentation/trace/fprobe.rst b/Documentation/trace/fprobe.rst
index 7a895514b537..196f52386aaa 100644
--- a/Documentation/trace/fprobe.rst
+++ b/Documentation/trace/fprobe.rst
@@ -91,9 +91,9 @@ The prototype of the entry/exit callback function are as follows:
.. code-block:: c
- int entry_callback(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs, void *entry_data);
+ int entry_callback(struct fprobe *fp, unsigned long entry_ip, unsigned long ret_ip, struct pt_regs *regs, void *entry_data);
- void exit_callback(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs, void *entry_data);
+ void exit_callback(struct fprobe *fp, unsigned long entry_ip, unsigned long ret_ip, struct pt_regs *regs, void *entry_data);
Note that the @entry_ip is saved at function entry and passed to exit handler.
If the entry callback function returns !0, the corresponding exit callback will be cancelled.
@@ -108,6 +108,10 @@ If the entry callback function returns !0, the corresponding exit callback will
Note that this may not be the actual entry address of the function but
the address where the ftrace is instrumented.
+@ret_ip
+ This is the return address that the traced function will return to,
+ somewhere in the caller. This can be used at both entry and exit.
+
@regs
This is the `pt_regs` data structure at the entry and exit. Note that
the instruction pointer of @regs may be different from the @entry_ip
diff --git a/Documentation/translations/zh_CN/core-api/workqueue.rst b/Documentation/translations/zh_CN/core-api/workqueue.rst
index 6c1b5ec31d75..7fac6f75d078 100644
--- a/Documentation/translations/zh_CN/core-api/workqueue.rst
+++ b/Documentation/translations/zh_CN/core-api/workqueue.rst
@@ -202,7 +202,7 @@ workqueue将自动创建与属性相匹配的后备工作者池。调节并发�
同的排序属性。
在目前的实现中,上述配置只保证了特定NUMA节点内的ST行为。相反,
-``alloc_ordered_queue()`` 应该被用来实现全系统的ST行为。
+``alloc_ordered_workqueue()`` 应该被用来实现全系统的ST行为。
执行场景示例
diff --git a/MAINTAINERS b/MAINTAINERS
index 7608b714653f..dd5de540ec0b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -378,8 +378,9 @@ F: drivers/acpi/viot.c
F: include/linux/acpi_viot.h
ACPI WMI DRIVER
+M: Armin Wolf <[email protected]>
-S: Orphan
+S: Maintained
F: Documentation/driver-api/wmi.rst
F: Documentation/wmi/
F: drivers/platform/x86/wmi.c
@@ -2221,21 +2222,28 @@ F: arch/arm/boot/dts/ti/omap/omap3-igep*
ARM/INTEL IXP4XX ARM ARCHITECTURE
M: Linus Walleij <[email protected]>
M: Imre Kaloz <[email protected]>
-M: Krzysztof Halasa <[email protected]>
L: [email protected] (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/arm/intel-ixp4xx.yaml
F: Documentation/devicetree/bindings/gpio/intel,ixp4xx-gpio.txt
F: Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
F: Documentation/devicetree/bindings/memory-controllers/intel,ixp4xx-expansion*
+F: Documentation/devicetree/bindings/rng/intel,ixp46x-rng.yaml
F: Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
F: arch/arm/boot/dts/intel/ixp/
F: arch/arm/mach-ixp4xx/
F: drivers/bus/intel-ixp4xx-eb.c
+F: drivers/char/hw_random/ixp4xx-rng.c
F: drivers/clocksource/timer-ixp4xx.c
F: drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
F: drivers/gpio/gpio-ixp4xx.c
F: drivers/irqchip/irq-ixp4xx.c
+F: drivers/net/ethernet/xscale/ixp4xx_eth.c
+F: drivers/net/wan/ixp4xx_hss.c
+F: drivers/soc/ixp4xx/ixp4xx-npe.c
+F: drivers/soc/ixp4xx/ixp4xx-qmgr.c
+F: include/linux/soc/ixp4xx/npe.h
+F: include/linux/soc/ixp4xx/qmgr.h
ARM/INTEL KEEMBAY ARCHITECTURE
M: Paul J. Murphy <[email protected]>
@@ -2337,7 +2345,7 @@ F: drivers/rtc/rtc-mt7622.c
ARM/Mediatek SoC support
M: Matthias Brugger <[email protected]>
-R: AngeloGioacchino Del Regno <[email protected]>
+M: AngeloGioacchino Del Regno <[email protected]>
L: [email protected] (moderated for non-subscribers)
L: [email protected] (moderated for non-subscribers)
@@ -6758,7 +6766,7 @@ F: drivers/gpu/drm/panel/panel-sitronix-st7701.c
DRM DRIVER FOR SITRONIX ST7703 PANELS
M: Guido Günther <[email protected]>
R: Purism Kernel Team <[email protected]>
-R: Ondrej Jirman <[email protected]>
+R: Ondrej Jirman <[email protected]>
S: Maintained
F: Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml
F: drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -8103,7 +8111,7 @@ F: include/linux/arm_ffa.h
FIRMWARE LOADER (request_firmware)
M: Luis Chamberlain <[email protected]>
-M: Russ Weight <[email protected]>
+M: Russ Weight <[email protected]>
S: Maintained
F: Documentation/firmware_class/
@@ -10630,22 +10638,6 @@ L: [email protected]
S: Maintained
F: drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
-INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT
-M: Krzysztof Halasa <[email protected]>
-S: Maintained
-F: drivers/net/ethernet/xscale/ixp4xx_eth.c
-F: drivers/net/wan/ixp4xx_hss.c
-F: drivers/soc/ixp4xx/ixp4xx-npe.c
-F: drivers/soc/ixp4xx/ixp4xx-qmgr.c
-F: include/linux/soc/ixp4xx/npe.h
-F: include/linux/soc/ixp4xx/qmgr.h
-
-INTEL IXP4XX RANDOM NUMBER GENERATOR SUPPORT
-M: Deepak Saxena <[email protected]>
-S: Maintained
-F: Documentation/devicetree/bindings/rng/intel,ixp46x-rng.yaml
-F: drivers/char/hw_random/ixp4xx-rng.c
-
INTEL KEEM BAY DRM DRIVER
M: Anitha Chrisanthus <[email protected]>
M: Edmund Dea <[email protected]>
@@ -10709,7 +10701,7 @@ F: drivers/mfd/intel-m10-bmc*
F: include/linux/mfd/intel-m10-bmc.h
INTEL MAX10 BMC SECURE UPDATES
-M: Russ Weight <[email protected]>
+M: Peter Colberg <[email protected]>
S: Maintained
F: Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update
@@ -13854,9 +13846,10 @@ F: Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
F: drivers/staging/media/meson/vdec/
METHODE UDPU SUPPORT
-M: Vladimir Vid <[email protected]>
+M: Robert Marko <[email protected]>
S: Maintained
-F: arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
+F: arch/arm64/boot/dts/marvell/armada-3720-eDPU.dts
+F: arch/arm64/boot/dts/marvell/armada-3720-uDPU.*
MHI BUS
M: Manivannan Sadhasivam <[email protected]>
@@ -15139,7 +15132,7 @@ NOLIBC HEADER FILE
M: Willy Tarreau <[email protected]>
M: Thomas Weißschuh <[email protected]>
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/wtarreau/nolibc.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/nolibc/linux-nolibc.git
F: tools/include/nolibc/
F: tools/testing/selftests/nolibc/
@@ -20493,6 +20486,7 @@ F: include/dt-bindings/clock/starfive?jh71*.h
STARFIVE JH71X0 PINCTRL DRIVERS
M: Emil Renner Berthing <[email protected]>
M: Jianlong Huang <[email protected]>
+M: Hal Feng <[email protected]>
S: Maintained
F: Documentation/devicetree/bindings/pinctrl/starfive,jh71*.yaml
diff --git a/Makefile b/Makefile
index 88ebf6547964..5c418efbe89b 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
@@ -1474,7 +1474,7 @@ endif # CONFIG_MODULES
# Directories & files removed with 'make clean'
CLEAN_FILES += vmlinux.symvers modules-only.symvers \
modules.builtin modules.builtin.modinfo modules.nsdeps \
- compile_commands.json .thinlto-cache rust/test rust/doc \
+ compile_commands.json .thinlto-cache rust/test \
rust-project.json .vmlinux.objs .vmlinux.export.c
# Directories & files removed with 'make mrproper'
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index d5b3ed2c58f5..c380d8c30704 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -90,10 +90,12 @@ static void show_faulting_vma(unsigned long address)
*/
if (vma) {
char buf[ARC_PATH_MAX];
- char *nm = "?";
+ char *nm = "anon";
if (vma->vm_file) {
- nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1);
+ /* XXX: can we use %pD below and get rid of buf? */
+ nm = d_path(file_user_path(vma->vm_file), buf,
+ ARC_PATH_MAX-1);
if (IS_ERR(nm))
nm = "?";
}
diff --git a/arch/arm/boot/dts/rockchip/rk3128.dtsi b/arch/arm/boot/dts/rockchip/rk3128.dtsi
index b63bd4ad3143..88a4b0d6d928 100644
--- a/arch/arm/boot/dts/rockchip/rk3128.dtsi
+++ b/arch/arm/boot/dts/rockchip/rk3128.dtsi
@@ -64,7 +64,8 @@
compatible = "arm,armv7-timer";
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
- <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
arm,cpu-registers-not-fw-configured;
clock-frequency = <24000000>;
};
@@ -233,7 +234,7 @@
compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
reg = <0x20044000 0x20>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru PCLK_TIMER>, <&xin24m>;
+ clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER0>;
clock-names = "pclk", "timer";
};
@@ -241,7 +242,7 @@
compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
reg = <0x20044020 0x20>;
interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru PCLK_TIMER>, <&xin24m>;
+ clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER1>;
clock-names = "pclk", "timer";
};
@@ -249,7 +250,7 @@
compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
reg = <0x20044040 0x20>;
interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru PCLK_TIMER>, <&xin24m>;
+ clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER2>;
clock-names = "pclk", "timer";
};
@@ -257,7 +258,7 @@
compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
reg = <0x20044060 0x20>;
interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru PCLK_TIMER>, <&xin24m>;
+ clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER3>;
clock-names = "pclk", "timer";
};
@@ -265,7 +266,7 @@
compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
reg = <0x20044080 0x20>;
interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru PCLK_TIMER>, <&xin24m>;
+ clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER4>;
clock-names = "pclk", "timer";
};
@@ -273,7 +274,7 @@
compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
reg = <0x200440a0 0x20>;
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru PCLK_TIMER>, <&xin24m>;
+ clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER5>;
clock-names = "pclk", "timer";
};
@@ -426,7 +427,7 @@
i2c0: i2c@20072000 {
compatible = "rockchip,rk3128-i2c", "rockchip,rk3288-i2c";
- reg = <20072000 0x1000>;
+ reg = <0x20072000 0x1000>;
interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "i2c";
clocks = <&cru PCLK_I2C0>;
@@ -458,6 +459,7 @@
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
arm,pl330-broken-no-flushp;
+ arm,pl330-periph-burst;
clocks = <&cru ACLK_DMAC>;
clock-names = "apb_pclk";
#dma-cells = <1>;
diff --git a/arch/arm/boot/dts/ti/omap/omap4-l4-abe.dtsi b/arch/arm/boot/dts/ti/omap/omap4-l4-abe.dtsi
index 7ae8b620515c..59f546a278f8 100644
--- a/arch/arm/boot/dts/ti/omap/omap4-l4-abe.dtsi
+++ b/arch/arm/boot/dts/ti/omap/omap4-l4-abe.dtsi
@@ -109,6 +109,8 @@
reg = <0x0 0xff>, /* MPU private access */
<0x49022000 0xff>; /* L3 Interconnect */
reg-names = "mpu", "dma";
+ clocks = <&abe_clkctrl OMAP4_MCBSP1_CLKCTRL 24>;
+ clock-names = "fck";
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "common";
ti,buffer-size = <128>;
@@ -142,6 +144,8 @@
reg = <0x0 0xff>, /* MPU private access */
<0x49024000 0xff>; /* L3 Interconnect */
reg-names = "mpu", "dma";
+ clocks = <&abe_clkctrl OMAP4_MCBSP2_CLKCTRL 24>;
+ clock-names = "fck";
interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "common";
ti,buffer-size = <128>;
@@ -175,6 +179,8 @@
reg = <0x0 0xff>, /* MPU private access */
<0x49026000 0xff>; /* L3 Interconnect */
reg-names = "mpu", "dma";
+ clocks = <&abe_clkctrl OMAP4_MCBSP3_CLKCTRL 24>;
+ clock-names = "fck";
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "common";
ti,buffer-size = <128>;
diff --git a/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi b/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi
index 46b8f9efd413..3fcef3080eae 100644
--- a/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi
+++ b/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi
@@ -2043,6 +2043,8 @@
compatible = "ti,omap4-mcbsp";
reg = <0x0 0xff>; /* L4 Interconnect */
reg-names = "mpu";
+ clocks = <&l4_per_clkctrl OMAP4_MCBSP4_CLKCTRL 24>;
+ clock-names = "fck";
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "common";
ti,buffer-size = <128>;
diff --git a/arch/arm/boot/dts/ti/omap/omap5-l4-abe.dtsi b/arch/arm/boot/dts/ti/omap/omap5-l4-abe.dtsi
index a03bca5a3584..97b0c3b5f573 100644
--- a/arch/arm/boot/dts/ti/omap/omap5-l4-abe.dtsi
+++ b/arch/arm/boot/dts/ti/omap/omap5-l4-abe.dtsi
@@ -109,6 +109,8 @@
reg = <0x0 0xff>, /* MPU private access */
<0x49022000 0xff>; /* L3 Interconnect */
reg-names = "mpu", "dma";
+ clocks = <&abe_clkctrl OMAP5_MCBSP1_CLKCTRL 24>;
+ clock-names = "fck";
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "common";
ti,buffer-size = <128>;
@@ -142,6 +144,8 @@
reg = <0x0 0xff>, /* MPU private access */
<0x49024000 0xff>; /* L3 Interconnect */
reg-names = "mpu", "dma";
+ clocks = <&abe_clkctrl OMAP5_MCBSP2_CLKCTRL 24>;
+ clock-names = "fck";
interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "common";
ti,buffer-size = <128>;
@@ -175,6 +179,8 @@
reg = <0x0 0xff>, /* MPU private access */
<0x49026000 0xff>; /* L3 Interconnect */
reg-names = "mpu", "dma";
+ clocks = <&abe_clkctrl OMAP5_MCBSP3_CLKCTRL 24>;
+ clock-names = "fck";
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "common";
ti,buffer-size = <128>;
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 9808cd27e2cf..67de96c7717d 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -550,6 +550,7 @@ static struct platform_device *ams_delta_devices[] __initdata = {
&ams_delta_nand_device,
&ams_delta_lcd_device,
&cx20442_codec_device,
+ &modem_nreset_device,
};
static struct gpiod_lookup_table *ams_delta_gpio_tables[] __initdata = {
@@ -782,26 +783,28 @@ static struct plat_serial8250_port ams_delta_modem_ports[] = {
{ },
};
+static int ams_delta_modem_pm_activate(struct device *dev)
+{
+ modem_priv.regulator = regulator_get(dev, "RESET#");
+ if (IS_ERR(modem_priv.regulator))
+ return -EPROBE_DEFER;
+
+ return 0;
+}
+
+static struct dev_pm_domain ams_delta_modem_pm_domain = {
+ .activate = ams_delta_modem_pm_activate,
+};
+
static struct platform_device ams_delta_modem_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM1,
.dev = {
.platform_data = ams_delta_modem_ports,
+ .pm_domain = &ams_delta_modem_pm_domain,
},
};
-static int __init modem_nreset_init(void)
-{
- int err;
-
- err = platform_device_register(&modem_nreset_device);
- if (err)
- pr_err("Couldn't register the modem regulator device\n");
-
- return err;
-}
-
-
/*
* This function expects MODEM IRQ number already assigned to the port.
* The MODEM device requires its RESET# pin kept high during probe.
@@ -833,37 +836,6 @@ static int __init ams_delta_modem_init(void)
}
arch_initcall_sync(ams_delta_modem_init);
-static int __init late_init(void)
-{
- int err;
-
- err = modem_nreset_init();
- if (err)
- return err;
-
- /*
- * Once the modem device is registered, the modem_nreset
- * regulator can be requested on behalf of that device.
- */
- modem_priv.regulator = regulator_get(&ams_delta_modem_device.dev,
- "RESET#");
- if (IS_ERR(modem_priv.regulator)) {
- err = PTR_ERR(modem_priv.regulator);
- goto unregister;
- }
- return 0;
-
-unregister:
- platform_device_unregister(&ams_delta_modem_device);
- return err;
-}
-
-static void __init ams_delta_init_late(void)
-{
- omap1_init_late();
- late_init();
-}
-
static void __init ams_delta_map_io(void)
{
omap1_map_io();
@@ -877,7 +849,7 @@ MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
.init_early = omap1_init_early,
.init_irq = omap1_init_irq,
.init_machine = ams_delta_init,
- .init_late = ams_delta_init_late,
+ .init_late = omap1_init_late,
.init_time = omap1_timer_init,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 410d17d1d443..f618a6df2938 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -176,17 +176,18 @@ static u64 notrace omap_32k_read_sched_clock(void)
return sync32k_cnt_reg ? readl_relaxed(sync32k_cnt_reg) : 0;
}
+static struct timespec64 persistent_ts;
+static cycles_t cycles;
+static unsigned int persistent_mult, persistent_shift;
+
/**
* omap_read_persistent_clock64 - Return time from a persistent clock.
+ * @ts: &struct timespec64 for the returned time
*
* Reads the time from a source which isn't disabled during PM, the
* 32k sync timer. Convert the cycles elapsed since last read into
* nsecs and adds to a monotonically increasing timespec64.
*/
-static struct timespec64 persistent_ts;
-static cycles_t cycles;
-static unsigned int persistent_mult, persistent_shift;
-
static void omap_read_persistent_clock64(struct timespec64 *ts)
{
unsigned long long nsecs;
@@ -206,10 +207,9 @@ static void omap_read_persistent_clock64(struct timespec64 *ts)
/**
* omap_init_clocksource_32k - setup and register counter 32k as a
* kernel clocksource
- * @pbase: base addr of counter_32k module
- * @size: size of counter_32k to map
+ * @vbase: base addr of counter_32k module
*
- * Returns 0 upon success or negative error code upon failure.
+ * Returns: %0 upon success or negative error code upon failure.
*
*/
static int __init omap_init_clocksource_32k(void __iomem *vbase)
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 1e17b5f77588..ba71928c0fcb 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2209,7 +2209,7 @@ int omap_hwmod_parse_module_range(struct omap_hwmod *oh,
return err;
pr_debug("omap_hwmod: %s %pOFn at %pR\n",
- oh->name, np, &res);
+ oh->name, np, res);
if (oh && oh->mpu_rt_idx) {
omap_hwmod_fix_mpu_rt_idx(oh, np, res);
diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi
index 6f85a05ee7e1..dcf6e4846ac9 100644
--- a/arch/arm64/boot/dts/freescale/imx93.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx93.dtsi
@@ -185,7 +185,7 @@
#size-cells = <1>;
ranges;
- anomix_ns_gpr: syscon@44210000 {
+ aonmix_ns_gpr: syscon@44210000 {
compatible = "fsl,imx93-aonmix-ns-syscfg", "syscon";
reg = <0x44210000 0x1000>;
};
@@ -319,6 +319,7 @@
assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>;
assigned-clock-rates = <40000000>;
fsl,clk-source = /bits/ 8 <0>;
+ fsl,stop-mode = <&aonmix_ns_gpr 0x14 0>;
status = "disabled";
};
@@ -591,6 +592,7 @@
assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>;
assigned-clock-rates = <40000000>;
fsl,clk-source = /bits/ 8 <0>;
+ fsl,stop-mode = <&wakeupmix_gpr 0x0c 2>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index 36ef2dbe8add..3ee9266fa8e9 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -905,7 +905,7 @@
status = "disabled";
};
- sata_phy: t-phy@1a243000 {
+ sata_phy: t-phy {
compatible = "mediatek,mt7622-tphy",
"mediatek,generic-tphy-v1";
#address-cells = <2>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
index 68539ea788df..24eda00e320d 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
@@ -434,7 +434,7 @@
};
};
- pcie_phy: t-phy@11c00000 {
+ pcie_phy: t-phy {
compatible = "mediatek,mt7986-tphy",
"mediatek,generic-tphy-v2";
#address-cells = <2>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
index b2485ddfd33b..5d635085fe3f 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
@@ -48,7 +48,7 @@
memory@40000000 {
device_type = "memory";
- reg = <0 0x40000000 0 0x80000000>;
+ reg = <0 0x40000000 0x2 0x00000000>;
};
reserved-memory {
@@ -56,13 +56,8 @@
#size-cells = <2>;
ranges;
- /* 2 MiB reserved for ARM Trusted Firmware (BL31) */
- bl31_secmon_reserved: secmon@54600000 {
- no-map;
- reg = <0 0x54600000 0x0 0x200000>;
- };
-
- /* 12 MiB reserved for OP-TEE (BL32)
+ /*
+ * 12 MiB reserved for OP-TEE (BL32)
* +-----------------------+ 0x43e0_0000
* | SHMEM 2MiB |
* +-----------------------+ 0x43c0_0000
@@ -75,6 +70,34 @@
no-map;
reg = <0 0x43200000 0 0x00c00000>;
};
+
+ scp_mem: memory@50000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x50000000 0 0x2900000>;
+ no-map;
+ };
+
+ vpu_mem: memory@53000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x53000000 0 0x1400000>; /* 20 MB */
+ };
+
+ /* 2 MiB reserved for ARM Trusted Firmware (BL31) */
+ bl31_secmon_mem: memory@54600000 {
+ no-map;
+ reg = <0 0x54600000 0x0 0x200000>;
+ };
+
+ snd_dma_mem: memory@60000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x60000000 0 0x1100000>;
+ no-map;
+ };
+
+ apu_mem: memory@62000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x62000000 0 0x1400000>; /* 20 MB */
+ };
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
index a9e52b50c8c4..54c674c45b49 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
@@ -313,6 +313,7 @@
interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH 0>;
cpus = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>,
<&cpu4>, <&cpu5>, <&cpu6>, <&cpu7>;
+ status = "fail";
};
dmic_codec: dmic-codec {
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
index 385b178314db..3067a4091a7a 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
@@ -62,25 +62,23 @@
stdout-path = "serial0:115200n8";
};
- clocks {
- divclk4: divclk4 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32768>;
- clock-output-names = "divclk4";
+ div1_mclk: divclk1 {
+ compatible = "gpio-gate-clock";
+ pinctrl-0 = <&audio_mclk>;
+ pinctrl-names = "default";
+ clocks = <&rpmcc RPM_SMD_DIV_CLK1>;
+ #clock-cells = <0>;
+ enable-gpios = <&pm8994_gpios 15 0>;
+ };
- pinctrl-names = "default";
- pinctrl-0 = <&divclk4_pin_a>;
- };
+ divclk4: divclk4 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "divclk4";
- div1_mclk: divclk1 {
- compatible = "gpio-gate-clock";
- pinctrl-0 = <&audio_mclk>;
- pinctrl-names = "default";
- clocks = <&rpmcc RPM_SMD_DIV_CLK1>;
- #clock-cells = <0>;
- enable-gpios = <&pm8994_gpios 15 0>;
- };
+ pinctrl-names = "default";
+ pinctrl-0 = <&divclk4_pin_a>;
};
gpio-keys {
diff --git a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
index bcd2397eb373..06f8ff624181 100644
--- a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
@@ -11,26 +11,24 @@
#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
/ {
- clocks {
- divclk1_cdc: divclk1 {
- compatible = "gpio-gate-clock";
- clocks = <&rpmcc RPM_SMD_DIV_CLK1>;
- #clock-cells = <0>;
- enable-gpios = <&pm8994_gpios 15 GPIO_ACTIVE_HIGH>;
+ divclk1_cdc: divclk1 {
+ compatible = "gpio-gate-clock";
+ clocks = <&rpmcc RPM_SMD_DIV_CLK1>;
+ #clock-cells = <0>;
+ enable-gpios = <&pm8994_gpios 15 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&divclk1_default>;
- };
+ pinctrl-names = "default";
+ pinctrl-0 = <&divclk1_default>;
+ };
- divclk4: divclk4 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32768>;
- clock-output-names = "divclk4";
+ divclk4: divclk4 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "divclk4";
- pinctrl-names = "default";
- pinctrl-0 = <&divclk4_pin_a>;
- };
+ pinctrl-names = "default";
+ pinctrl-0 = <&divclk4_pin_a>;
};
gpio-keys {
diff --git a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
index d1066edaea47..f8e9d90afab0 100644
--- a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
+++ b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
@@ -20,16 +20,14 @@
qcom,pmic-id = <0x20009 0x2000a 0x00 0x00>;
qcom,board-id = <31 0>;
- clocks {
- divclk2_haptics: divclk2 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32768>;
- clock-output-names = "divclk2";
-
- pinctrl-names = "default";
- pinctrl-0 = <&divclk2_pin_a>;
- };
+ divclk2_haptics: divclk2 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "divclk2";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&divclk2_pin_a>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi b/arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi
index 3c3b6287cd27..eaa43f022a65 100644
--- a/arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi
@@ -173,7 +173,7 @@
compatible = "qcom,pmm8654au-gpio", "qcom,spmi-gpio";
reg = <0x8800>;
gpio-controller;
- gpio-ranges = <&pmm8654au_2_gpios 0 0 12>;
+ gpio-ranges = <&pmm8654au_1_gpios 0 0 12>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
index 08a3ad3e7ae9..de0a1f2af983 100644
--- a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
@@ -68,15 +68,17 @@
simple-audio-card,format = "i2s";
simple-audio-card,name = "Haikou,I2S-codec";
simple-audio-card,mclk-fs = <512>;
+ simple-audio-card,frame-master = <&sgtl5000_codec>;
+ simple-audio-card,bitclock-master = <&sgtl5000_codec>;
- simple-audio-card,codec {
- clocks = <&sgtl5000_clk>;
+ sgtl5000_codec: simple-audio-card,codec {
sound-dai = <&sgtl5000>;
+ // Prevent the dai subsystem from overwriting the clock
+ // frequency. We are using a fixed-frequency oscillator.
+ system-clock-fixed;
};
simple-audio-card,cpu {
- bitclock-master;
- frame-master;
sound-dai = <&i2s0_8ch>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
index 7dccbe8a9393..f2279aa6ca9e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
@@ -492,6 +492,7 @@
&i2s0 {
pinctrl-0 = <&i2s0_2ch_bus>;
+ pinctrl-1 = <&i2s0_2ch_bus_bclk_off>;
rockchip,capture-channels = <2>;
rockchip,playback-channels = <2>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 9da0b6d77c8d..5bc2d4faeea6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -2457,6 +2457,16 @@
<4 RK_PA0 1 &pcfg_pull_none>;
};
+ i2s0_2ch_bus_bclk_off: i2s0-2ch-bus-bclk-off {
+ rockchip,pins =
+ <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>,
+ <3 RK_PD1 1 &pcfg_pull_none>,
+ <3 RK_PD2 1 &pcfg_pull_none>,
+ <3 RK_PD3 1 &pcfg_pull_none>,
+ <3 RK_PD7 1 &pcfg_pull_none>,
+ <4 RK_PA0 1 &pcfg_pull_none>;
+ };
+
i2s0_8ch_bus: i2s0-8ch-bus {
rockchip,pins =
<3 RK_PD0 1 &pcfg_pull_none>,
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 5882b2415596..1095c6647e96 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -344,14 +344,14 @@
*/
#define __HFGRTR_EL2_RES0 (GENMASK(63, 56) | GENMASK(53, 51))
#define __HFGRTR_EL2_MASK GENMASK(49, 0)
-#define __HFGRTR_EL2_nMASK (GENMASK(55, 54) | BIT(50))
+#define __HFGRTR_EL2_nMASK (GENMASK(58, 57) | GENMASK(55, 54) | BIT(50))
#define __HFGWTR_EL2_RES0 (GENMASK(63, 56) | GENMASK(53, 51) | \
BIT(46) | BIT(42) | BIT(40) | BIT(28) | \
GENMASK(26, 25) | BIT(21) | BIT(18) | \
GENMASK(15, 14) | GENMASK(10, 9) | BIT(2))
#define __HFGWTR_EL2_MASK GENMASK(49, 0)
-#define __HFGWTR_EL2_nMASK (GENMASK(55, 54) | BIT(50))
+#define __HFGWTR_EL2_nMASK (GENMASK(58, 57) | GENMASK(55, 54) | BIT(50))
#define __HFGITR_EL2_RES0 GENMASK(63, 57)
#define __HFGITR_EL2_MASK GENMASK(54, 0)
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 6dcdae4d38cb..a1e24228aaaa 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -55,11 +55,6 @@ static struct irq_ops arch_timer_irq_ops = {
.get_input_level = kvm_arch_timer_get_input_level,
};
-static bool has_cntpoff(void)
-{
- return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
-}
-
static int nr_timers(struct kvm_vcpu *vcpu)
{
if (!vcpu_has_nv(vcpu))
@@ -180,7 +175,7 @@ u64 kvm_phys_timer_read(void)
return timecounter->cc->read(timecounter->cc);
}
-static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
+void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
{
if (vcpu_has_nv(vcpu)) {
if (is_hyp_ctxt(vcpu)) {
@@ -548,8 +543,7 @@ static void timer_save_state(struct arch_timer_context *ctx)
timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
cval = read_sysreg_el0(SYS_CNTP_CVAL);
- if (!has_cntpoff())
- cval -= timer_get_offset(ctx);
+ cval -= timer_get_offset(ctx);
timer_set_cval(ctx, cval);
@@ -636,8 +630,7 @@ static void timer_restore_state(struct arch_timer_context *ctx)
cval = timer_get_cval(ctx);
offset = timer_get_offset(ctx);
set_cntpoff(offset);
- if (!has_cntpoff())
- cval += offset;
+ cval += offset;
write_sysreg_el0(cval, SYS_CNTP_CVAL);
isb();
write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index 9ced1bf0c2b7..ee902ff2a50f 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -977,6 +977,8 @@ enum fg_filter_id {
static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
/* HFGRTR_EL2, HFGWTR_EL2 */
+ SR_FGT(SYS_PIR_EL1, HFGxTR, nPIR_EL1, 0),
+ SR_FGT(SYS_PIRE0_EL1, HFGxTR, nPIRE0_EL1, 0),
SR_FGT(SYS_TPIDR2_EL0, HFGxTR, nTPIDR2_EL0, 0),
SR_FGT(SYS_SMPRI_EL1, HFGxTR, nSMPRI_EL1, 0),
SR_FGT(SYS_ACCDATA_EL1, HFGxTR, nACCDATA_EL1, 0),
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 6537f58b1a8c..448b17080d36 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -39,6 +39,26 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
___activate_traps(vcpu);
+ if (has_cntpoff()) {
+ struct timer_map map;
+
+ get_timer_map(vcpu, &map);
+
+ /*
+ * We're entrering the guest. Reload the correct
+ * values from memory now that TGE is clear.
+ */
+ if (map.direct_ptimer == vcpu_ptimer(vcpu))
+ val = __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
+ if (map.direct_ptimer == vcpu_hptimer(vcpu))
+ val = __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2);
+
+ if (map.direct_ptimer) {
+ write_sysreg_el0(val, SYS_CNTP_CVAL);
+ isb();
+ }
+ }
+
val = read_sysreg(cpacr_el1);
val |= CPACR_ELx_TTA;
val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
@@ -77,6 +97,30 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+ if (has_cntpoff()) {
+ struct timer_map map;
+ u64 val, offset;
+
+ get_timer_map(vcpu, &map);
+
+ /*
+ * We're exiting the guest. Save the latest CVAL value
+ * to memory and apply the offset now that TGE is set.
+ */
+ val = read_sysreg_el0(SYS_CNTP_CVAL);
+ if (map.direct_ptimer == vcpu_ptimer(vcpu))
+ __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = val;
+ if (map.direct_ptimer == vcpu_hptimer(vcpu))
+ __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = val;
+
+ offset = read_sysreg_s(SYS_CNTPOFF_EL2);
+
+ if (map.direct_ptimer && offset) {
+ write_sysreg_el0(val + offset, SYS_CNTP_CVAL);
+ isb();
+ }
+ }
+
/*
* ARM errata 1165522 and 1530923 require the actual execution of the
* above before we can switch to the EL2/EL0 translation regime used by
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index 0eea225fd09a..a243934c5568 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -39,7 +39,7 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
{
struct kvm_pmu_events *pmu = kvm_get_pmu_events();
- if (!kvm_arm_support_pmu_v3() || !pmu || !kvm_pmu_switch_needed(attr))
+ if (!kvm_arm_support_pmu_v3() || !kvm_pmu_switch_needed(attr))
return;
if (!attr->exclude_host)
@@ -55,7 +55,7 @@ void kvm_clr_pmu_events(u32 clr)
{
struct kvm_pmu_events *pmu = kvm_get_pmu_events();
- if (!kvm_arm_support_pmu_v3() || !pmu)
+ if (!kvm_arm_support_pmu_v3())
return;
pmu->events_host &= ~clr;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index e92ec810d449..0afd6136e275 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -2122,8 +2122,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
- { SYS_DESC(SYS_PIRE0_EL1), access_vm_reg, reset_unknown, PIRE0_EL1 },
- { SYS_DESC(SYS_PIR_EL1), access_vm_reg, reset_unknown, PIR_EL1 },
+ { SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1 },
+ { SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1 },
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
{ SYS_DESC(SYS_LORSA_EL1), trap_loregion },
diff --git a/arch/ia64/include/asm/cpu.h b/arch/ia64/include/asm/cpu.h
index db125df9e088..642d71675ddb 100644
--- a/arch/ia64/include/asm/cpu.h
+++ b/arch/ia64/include/asm/cpu.h
@@ -15,9 +15,4 @@ DECLARE_PER_CPU(struct ia64_cpu, cpu_devices);
DECLARE_PER_CPU(int, cpu_state);
-#ifdef CONFIG_HOTPLUG_CPU
-extern int arch_register_cpu(int num);
-extern void arch_unregister_cpu(int);
-#endif
-
#endif /* _ASM_IA64_CPU_H_ */
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 94a848b06f15..741863a187a6 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -59,7 +59,7 @@ void __ref arch_unregister_cpu(int num)
}
EXPORT_SYMBOL(arch_unregister_cpu);
#else
-static int __init arch_register_cpu(int num)
+int __init arch_register_cpu(int num)
{
return register_cpu(&sysfs_cpus[num].cpu, num);
}
diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
index 0dcb36b32cb2..c486c2341b66 100644
--- a/arch/loongarch/include/asm/io.h
+++ b/arch/loongarch/include/asm/io.h
@@ -52,10 +52,9 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
* @offset: bus address of the memory
* @size: size of the resource to map
*/
-extern pgprot_t pgprot_wc;
-
#define ioremap_wc(offset, size) \
- ioremap_prot((offset), (size), pgprot_val(pgprot_wc))
+ ioremap_prot((offset), (size), \
+ pgprot_val(wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC))
#define ioremap_cache(offset, size) \
ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL))
diff --git a/arch/loongarch/include/asm/linkage.h b/arch/loongarch/include/asm/linkage.h
index 81b0c4cfbf4f..e2eca1a25b4e 100644
--- a/arch/loongarch/include/asm/linkage.h
+++ b/arch/loongarch/include/asm/linkage.h
@@ -33,4 +33,12 @@
.cfi_endproc; \
SYM_END(name, SYM_T_FUNC)
+#define SYM_CODE_START(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \
+ .cfi_startproc;
+
+#define SYM_CODE_END(name) \
+ .cfi_endproc; \
+ SYM_END(name, SYM_T_NONE)
+
#endif
diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h
index 35348d4c4209..21319c1e045c 100644
--- a/arch/loongarch/include/asm/pgtable-bits.h
+++ b/arch/loongarch/include/asm/pgtable-bits.h
@@ -105,13 +105,15 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot)
return __pgprot(prot);
}
+extern bool wc_enabled;
+
#define pgprot_writecombine pgprot_writecombine
static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
{
unsigned long prot = pgprot_val(_prot);
- prot = (prot & ~_CACHE_MASK) | _CACHE_WUC;
+ prot = (prot & ~_CACHE_MASK) | (wc_enabled ? _CACHE_WUC : _CACHE_SUC);
return __pgprot(prot);
}
diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S
index 65518bb8f472..1ec8e4c4cc2b 100644
--- a/arch/loongarch/kernel/entry.S
+++ b/arch/loongarch/kernel/entry.S
@@ -18,7 +18,7 @@
.text
.cfi_sections .debug_frame
.align 5
-SYM_FUNC_START(handle_syscall)
+SYM_CODE_START(handle_syscall)
csrrd t0, PERCPU_BASE_KS
la.pcrel t1, kernelsp
add.d t1, t1, t0
@@ -71,7 +71,7 @@ SYM_FUNC_START(handle_syscall)
bl do_syscall
RESTORE_ALL_AND_RET
-SYM_FUNC_END(handle_syscall)
+SYM_CODE_END(handle_syscall)
_ASM_NOKPROBE(handle_syscall)
SYM_CODE_START(ret_from_fork)
diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
index 78f066384657..2bb3aa2dcfcb 100644
--- a/arch/loongarch/kernel/genex.S
+++ b/arch/loongarch/kernel/genex.S
@@ -31,7 +31,7 @@ SYM_FUNC_START(__arch_cpu_idle)
1: jr ra
SYM_FUNC_END(__arch_cpu_idle)
-SYM_FUNC_START(handle_vint)
+SYM_CODE_START(handle_vint)
BACKUP_T0T1
SAVE_ALL
la_abs t1, __arch_cpu_idle
@@ -46,11 +46,11 @@ SYM_FUNC_START(handle_vint)
la_abs t0, do_vint
jirl ra, t0, 0
RESTORE_ALL_AND_RET
-SYM_FUNC_END(handle_vint)
+SYM_CODE_END(handle_vint)
-SYM_FUNC_START(except_vec_cex)
+SYM_CODE_START(except_vec_cex)
b cache_parity_error
-SYM_FUNC_END(except_vec_cex)
+SYM_CODE_END(except_vec_cex)
.macro build_prep_badv
csrrd t0, LOONGARCH_CSR_BADV
@@ -66,7 +66,7 @@ SYM_FUNC_END(except_vec_cex)
.macro BUILD_HANDLER exception handler prep
.align 5
- SYM_FUNC_START(handle_\exception)
+ SYM_CODE_START(handle_\exception)
666:
BACKUP_T0T1
SAVE_ALL
@@ -76,7 +76,7 @@ SYM_FUNC_END(except_vec_cex)
jirl ra, t0, 0
668:
RESTORE_ALL_AND_RET
- SYM_FUNC_END(handle_\exception)
+ SYM_CODE_END(handle_\exception)
SYM_DATA(unwind_hint_\exception, .word 668b - 666b)
.endm
@@ -93,7 +93,7 @@ SYM_FUNC_END(except_vec_cex)
BUILD_HANDLER watch watch none
BUILD_HANDLER reserved reserved none /* others */
-SYM_FUNC_START(handle_sys)
+SYM_CODE_START(handle_sys)
la_abs t0, handle_syscall
jr t0
-SYM_FUNC_END(handle_sys)
+SYM_CODE_END(handle_sys)
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 7783f0a3d742..aed65915e932 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -161,19 +161,19 @@ static void __init smbios_parse(void)
}
#ifdef CONFIG_ARCH_WRITECOMBINE
-pgprot_t pgprot_wc = PAGE_KERNEL_WUC;
+bool wc_enabled = true;
#else
-pgprot_t pgprot_wc = PAGE_KERNEL_SUC;
+bool wc_enabled = false;
#endif
-EXPORT_SYMBOL(pgprot_wc);
+EXPORT_SYMBOL(wc_enabled);
static int __init setup_writecombine(char *p)
{
if (!strcmp(p, "on"))
- pgprot_wc = PAGE_KERNEL_WUC;
+ wc_enabled = true;
else if (!strcmp(p, "off"))
- pgprot_wc = PAGE_KERNEL_SUC;
+ wc_enabled = false;
else
pr_warn("Unknown writecombine setting \"%s\".\n", p);
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
index f3fe8c06ba4d..4dd53427f657 100644
--- a/arch/loongarch/mm/init.c
+++ b/arch/loongarch/mm/init.c
@@ -43,11 +43,11 @@ void copy_user_highpage(struct page *to, struct page *from,
{
void *vfrom, *vto;
- vto = kmap_atomic(to);
- vfrom = kmap_atomic(from);
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
copy_page(vto, vfrom);
- kunmap_atomic(vfrom);
- kunmap_atomic(vto);
+ kunmap_local(vfrom);
+ kunmap_local(vto);
/* Make sure this page is cleared on other CPU's too before using it */
smp_wmb();
}
@@ -240,6 +240,7 @@ pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
#ifndef __PAGETABLE_PUD_FOLDED
pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
+EXPORT_SYMBOL(invalid_pud_table);
#endif
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S
index ca17dd3a1915..d5d682f3d29f 100644
--- a/arch/loongarch/mm/tlbex.S
+++ b/arch/loongarch/mm/tlbex.S
@@ -17,7 +17,7 @@
#define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3)
.macro tlb_do_page_fault, write
- SYM_FUNC_START(tlb_do_page_fault_\write)
+ SYM_CODE_START(tlb_do_page_fault_\write)
SAVE_ALL
csrrd a2, LOONGARCH_CSR_BADV
move a0, sp
@@ -25,13 +25,13 @@
li.w a1, \write
bl do_page_fault
RESTORE_ALL_AND_RET
- SYM_FUNC_END(tlb_do_page_fault_\write)
+ SYM_CODE_END(tlb_do_page_fault_\write)
.endm
tlb_do_page_fault 0
tlb_do_page_fault 1
-SYM_FUNC_START(handle_tlb_protect)
+SYM_CODE_START(handle_tlb_protect)
BACKUP_T0T1
SAVE_ALL
move a0, sp
@@ -41,9 +41,9 @@ SYM_FUNC_START(handle_tlb_protect)
la_abs t0, do_page_fault
jirl ra, t0, 0
RESTORE_ALL_AND_RET
-SYM_FUNC_END(handle_tlb_protect)
+SYM_CODE_END(handle_tlb_protect)
-SYM_FUNC_START(handle_tlb_load)
+SYM_CODE_START(handle_tlb_load)
csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1
csrwr ra, EXCEPTION_KS2
@@ -187,16 +187,16 @@ nopage_tlb_load:
csrrd ra, EXCEPTION_KS2
la_abs t0, tlb_do_page_fault_0
jr t0
-SYM_FUNC_END(handle_tlb_load)
+SYM_CODE_END(handle_tlb_load)
-SYM_FUNC_START(handle_tlb_load_ptw)
+SYM_CODE_START(handle_tlb_load_ptw)
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la_abs t0, tlb_do_page_fault_0
jr t0
-SYM_FUNC_END(handle_tlb_load_ptw)
+SYM_CODE_END(handle_tlb_load_ptw)
-SYM_FUNC_START(handle_tlb_store)
+SYM_CODE_START(handle_tlb_store)
csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1
csrwr ra, EXCEPTION_KS2
@@ -343,16 +343,16 @@ nopage_tlb_store:
csrrd ra, EXCEPTION_KS2
la_abs t0, tlb_do_page_fault_1
jr t0
-SYM_FUNC_END(handle_tlb_store)
+SYM_CODE_END(handle_tlb_store)
-SYM_FUNC_START(handle_tlb_store_ptw)
+SYM_CODE_START(handle_tlb_store_ptw)
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la_abs t0, tlb_do_page_fault_1
jr t0
-SYM_FUNC_END(handle_tlb_store_ptw)
+SYM_CODE_END(handle_tlb_store_ptw)
-SYM_FUNC_START(handle_tlb_modify)
+SYM_CODE_START(handle_tlb_modify)
csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1
csrwr ra, EXCEPTION_KS2
@@ -497,16 +497,16 @@ nopage_tlb_modify:
csrrd ra, EXCEPTION_KS2
la_abs t0, tlb_do_page_fault_1
jr t0
-SYM_FUNC_END(handle_tlb_modify)
+SYM_CODE_END(handle_tlb_modify)
-SYM_FUNC_START(handle_tlb_modify_ptw)
+SYM_CODE_START(handle_tlb_modify_ptw)
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la_abs t0, tlb_do_page_fault_1
jr t0
-SYM_FUNC_END(handle_tlb_modify_ptw)
+SYM_CODE_END(handle_tlb_modify_ptw)
-SYM_FUNC_START(handle_tlb_refill)
+SYM_CODE_START(handle_tlb_refill)
csrwr t0, LOONGARCH_CSR_TLBRSAVE
csrrd t0, LOONGARCH_CSR_PGD
lddir t0, t0, 3
@@ -521,4 +521,4 @@ SYM_FUNC_START(handle_tlb_refill)
tlbfill
csrrd t0, LOONGARCH_CSR_TLBRSAVE
ertn
-SYM_FUNC_END(handle_tlb_refill)
+SYM_CODE_END(handle_tlb_refill)
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 7b2ac1319d70..467ee6b95ae1 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -592,7 +592,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
gfn_t gfn = gpa >> PAGE_SHIFT;
int srcu_idx, err;
kvm_pfn_t pfn;
- pte_t *ptep, entry, old_pte;
+ pte_t *ptep, entry;
bool writeable;
unsigned long prot_bits;
unsigned long mmu_seq;
@@ -664,7 +664,6 @@ retry:
entry = pfn_pte(pfn, __pgprot(prot_bits));
/* Write the PTE */
- old_pte = *ptep;
set_pte(ptep, entry);
err = 0;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3aaadfd2c8eb..d5d5388973ac 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -910,7 +910,7 @@ config ARCH_FORCE_MAX_ORDER
default "6" if PPC32 && PPC_64K_PAGES
range 4 10 if PPC32 && PPC_256K_PAGES
default "4" if PPC32 && PPC_256K_PAGES
- range 10 10
+ range 10 12
default "10"
help
The kernel page allocator limits the size of maximal physically
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index 21f681ee535a..e6fe1d5731f2 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -94,6 +94,13 @@ static inline pte_t pte_wrprotect(pte_t pte)
#define pte_wrprotect pte_wrprotect
+static inline int pte_read(pte_t pte)
+{
+ return (pte_val(pte) & _PAGE_RO) != _PAGE_NA;
+}
+
+#define pte_read pte_read
+
static inline int pte_write(pte_t pte)
{
return !(pte_val(pte) & _PAGE_RO);
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 5cd9acf58a7d..eb6891e34cbd 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -197,7 +197,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
{
unsigned long old;
- if (pte_young(*ptep))
+ if (!pte_young(*ptep))
return 0;
old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
return (old & _PAGE_ACCESSED) != 0;
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 56ea48276356..c721478c5934 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -25,7 +25,9 @@ static inline int pte_write(pte_t pte)
return pte_val(pte) & _PAGE_RW;
}
#endif
+#ifndef pte_read
static inline int pte_read(pte_t pte) { return 1; }
+#endif
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 9692acb0361f..7eda33a24bb4 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -137,8 +137,9 @@ ret_from_syscall:
lis r4,icache_44x_need_flush@ha
lwz r5,icache_44x_need_flush@l(r4)
cmplwi cr0,r5,0
- bne- 2f
+ bne- .L44x_icache_flush
#endif /* CONFIG_PPC_47x */
+.L44x_icache_flush_return:
kuep_unlock
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
@@ -172,10 +173,11 @@ syscall_exit_finish:
b 1b
#ifdef CONFIG_44x
-2: li r7,0
+.L44x_icache_flush:
+ li r7,0
iccci r0,r0
stw r7,icache_44x_need_flush@l(r4)
- b 1b
+ b .L44x_icache_flush_return
#endif /* CONFIG_44x */
.globl ret_from_fork
diff --git a/arch/powerpc/kernel/head_85xx.S b/arch/powerpc/kernel/head_85xx.S
index 97e9ea0c7297..0f1641a31250 100644
--- a/arch/powerpc/kernel/head_85xx.S
+++ b/arch/powerpc/kernel/head_85xx.S
@@ -395,7 +395,7 @@ interrupt_base:
#ifdef CONFIG_PPC_FPU
FP_UNAVAILABLE_EXCEPTION
#else
- EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, unknown_exception)
+ EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, emulation_assist_interrupt)
#endif
/* System Call Interrupt */
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 2f1026fba00d..20f72cd1d813 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -948,6 +948,8 @@ void __init setup_arch(char **cmdline_p)
/* Parse memory topology */
mem_topology_setup();
+ /* Set max_mapnr before paging_init() */
+ set_max_mapnr(max_pfn);
/*
* Release secondary cpus out of their spinloops at 0x60 now that
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 253620979d0c..6dd2f46bd3ef 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -406,6 +406,9 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
if ((yield_count & 1) == 0)
goto yield_prev; /* owner vcpu is running */
+ if (get_owner_cpu(READ_ONCE(lock->val)) != yield_cpu)
+ goto yield_prev; /* re-sample lock owner */
+
spin_end();
preempted = true;
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index 39acc2cbab4c..9e1f6558d026 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -1212,14 +1212,7 @@ void radix__tlb_flush(struct mmu_gather *tlb)
smp_mb(); /* see radix__flush_tlb_mm */
exit_flush_lazy_tlbs(mm);
- _tlbiel_pid(mm->context.id, RIC_FLUSH_ALL);
-
- /*
- * It should not be possible to have coprocessors still
- * attached here.
- */
- if (WARN_ON_ONCE(atomic_read(&mm->context.copros) > 0))
- __flush_all_mm(mm, true);
+ __flush_all_mm(mm, true);
preempt_enable();
} else {
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 8b121df7b08f..07e8f4f1e07f 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -288,7 +288,6 @@ void __init mem_init(void)
#endif
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
- set_max_mapnr(max_pfn);
kasan_late_init();
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 3ba9fe411604..4d69bfb9bc11 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -104,6 +104,8 @@ static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
/* Embedded type MMU with HW exec support. This is a bit more complicated
* as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
* instead we "filter out" the exec permission for non clean pages.
+ *
+ * This is also called once for the folio. So only work with folio->flags here.
*/
static inline pte_t set_pte_filter(pte_t pte)
{
@@ -190,29 +192,39 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pte, unsigned int nr)
{
- /*
- * Make sure hardware valid bit is not set. We don't do
- * tlb flush for this update.
- */
- VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
/* Note: mm->context.id might not yet have been assigned as
* this context might not have been activated yet when this
- * is called.
+ * is called. Filter the pte value and use the filtered value
+ * to setup all the ptes in the range.
*/
pte = set_pte_filter(pte);
- /* Perform the setting of the PTE */
- arch_enter_lazy_mmu_mode();
+ /*
+ * We don't need to call arch_enter/leave_lazy_mmu_mode()
+ * because we expect set_ptes to be only be used on not present
+ * and not hw_valid ptes. Hence there is no translation cache flush
+ * involved that need to be batched.
+ */
for (;;) {
+
+ /*
+ * Make sure hardware valid bit is not set. We don't do
+ * tlb flush for this update.
+ */
+ VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
+
+ /* Perform the setting of the PTE */
__set_pte_at(mm, addr, ptep, pte, 0);
if (--nr == 0)
break;
ptep++;
- pte = __pte(pte_val(pte) + (1UL << PTE_RPN_SHIFT));
addr += PAGE_SIZE;
+ /*
+ * increment the pfn.
+ */
+ pte = pfn_pte(pte_pfn(pte) + 1, pte_pgprot((pte)));
}
- arch_leave_lazy_mmu_mode();
}
void unmap_kernel_page(unsigned long va)
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 1a587618015c..18daafbe2e65 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -66,7 +66,7 @@ static int match_context(const void *v, struct file *file, unsigned fd)
*/
static struct spu_context *coredump_next_context(int *fd)
{
- struct spu_context *ctx;
+ struct spu_context *ctx = NULL;
struct file *file;
int n = iterate_fd(current->files, *fd, match_context, NULL);
if (!n)
@@ -74,10 +74,13 @@ static struct spu_context *coredump_next_context(int *fd)
*fd = n - 1;
rcu_read_lock();
- file = lookup_fd_rcu(*fd);
- ctx = SPUFS_I(file_inode(file))->i_ctx;
- get_spu_context(ctx);
+ file = lookup_fdget_rcu(*fd);
rcu_read_unlock();
+ if (file) {
+ ctx = SPUFS_I(file_inode(file))->i_ctx;
+ get_spu_context(ctx);
+ fput(file);
+ }
return ctx;
}
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 38c5be34c895..10c1320adfd0 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -86,7 +86,7 @@ spufs_new_inode(struct super_block *sb, umode_t mode)
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
out:
return inode;
}
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index bae45b358a09..2b0cac6fb61f 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -184,9 +184,6 @@ _GLOBAL_TOC(plpar_hcall)
plpar_hcall_trace:
HCALL_INST_PRECALL(R5)
- std r4,STK_PARAM(R4)(r1)
- mr r0,r4
-
mr r4,r5
mr r5,r6
mr r6,r7
@@ -196,7 +193,7 @@ plpar_hcall_trace:
HVSC
- ld r12,STK_PARAM(R4)(r1)
+ ld r12,STACK_FRAME_MIN_SIZE+STK_PARAM(R4)(r1)
std r4,0(r12)
std r5,8(r12)
std r6,16(r12)
@@ -296,9 +293,6 @@ _GLOBAL_TOC(plpar_hcall9)
plpar_hcall9_trace:
HCALL_INST_PRECALL(R5)
- std r4,STK_PARAM(R4)(r1)
- mr r0,r4
-
mr r4,r5
mr r5,r6
mr r6,r7
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index d607ab0f7c6d..9c48fecc6719 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -273,11 +273,9 @@ config RISCV_DMA_NONCOHERENT
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select DMA_BOUNCE_UNALIGNED_KMALLOC if SWIOTLB
- select DMA_DIRECT_REMAP if MMU
config RISCV_NONSTANDARD_CACHE_OPS
bool
- depends on RISCV_DMA_NONCOHERENT
help
This enables function pointer support for non-standard noncoherent
systems to handle cache management.
@@ -550,6 +548,7 @@ config RISCV_ISA_ZICBOM
depends on RISCV_ALTERNATIVE
default y
select RISCV_DMA_NONCOHERENT
+ select DMA_DIRECT_REMAP
help
Adds support to dynamically detect the presence of the ZICBOM
extension (Cache Block Management Operations) and enable its
diff --git a/arch/riscv/Kconfig.errata b/arch/riscv/Kconfig.errata
index 566bcefeab50..e2c731cfed8c 100644
--- a/arch/riscv/Kconfig.errata
+++ b/arch/riscv/Kconfig.errata
@@ -77,6 +77,7 @@ config ERRATA_THEAD_PBMT
config ERRATA_THEAD_CMO
bool "Apply T-Head cache management errata"
depends on ERRATA_THEAD && MMU
+ select DMA_DIRECT_REMAP
select RISCV_DMA_NONCOHERENT
default y
help
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 1329e060c548..b43a6bb7e4dc 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -6,7 +6,6 @@
# for more details.
#
-OBJCOPYFLAGS := -O binary
LDFLAGS_vmlinux := -z norelro
ifeq ($(CONFIG_RELOCATABLE),y)
LDFLAGS_vmlinux += -shared -Bsymbolic -z notext --emit-relocs
diff --git a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
index 12ebe9792356..2c02358abd71 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
@@ -431,7 +431,7 @@
};
ss-pins {
- pinmux = <GPIOMUX(48, GPOUT_SYS_SPI0_FSS,
+ pinmux = <GPIOMUX(49, GPOUT_SYS_SPI0_FSS,
GPOEN_ENABLE,
GPI_SYS_SPI0_FSS)>;
bias-disable;
diff --git a/arch/riscv/boot/dts/thead/th1520.dtsi b/arch/riscv/boot/dts/thead/th1520.dtsi
index ce708183b6f6..ff364709a6df 100644
--- a/arch/riscv/boot/dts/thead/th1520.dtsi
+++ b/arch/riscv/boot/dts/thead/th1520.dtsi
@@ -139,6 +139,7 @@
interrupt-parent = <&plic>;
#address-cells = <2>;
#size-cells = <2>;
+ dma-noncoherent;
ranges;
plic: interrupt-controller@ffd8000000 {
diff --git a/arch/riscv/errata/andes/Makefile b/arch/riscv/errata/andes/Makefile
index 2d644e19caef..6278c389b801 100644
--- a/arch/riscv/errata/andes/Makefile
+++ b/arch/riscv/errata/andes/Makefile
@@ -1 +1,5 @@
+ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
+CFLAGS_errata.o := -mcmodel=medany
+endif
+
obj-y += errata.o
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index 740a979171e5..2b2f5df7ef2c 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -31,6 +31,27 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
return addr;
}
+/*
+ * Let's do like x86/arm64 and ignore the compat syscalls.
+ */
+#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
+static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
+{
+ return is_compat_task();
+}
+
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+static inline bool arch_syscall_match_sym_name(const char *sym,
+ const char *name)
+{
+ /*
+ * Since all syscall functions have __riscv_ prefix, we must skip it.
+ * However, as we described above, we decided to ignore compat
+ * syscalls, so we don't care about __riscv_compat_ prefix here.
+ */
+ return !strcmp(sym + 8, name);
+}
+
struct dyn_arch_ftrace {
};
#endif
diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h
index e7882ccb0fd4..78ea44f76718 100644
--- a/arch/riscv/include/asm/kprobes.h
+++ b/arch/riscv/include/asm/kprobes.h
@@ -40,6 +40,15 @@ void arch_remove_kprobe(struct kprobe *p);
int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr);
bool kprobe_breakpoint_handler(struct pt_regs *regs);
bool kprobe_single_step_handler(struct pt_regs *regs);
-
+#else
+static inline bool kprobe_breakpoint_handler(struct pt_regs *regs)
+{
+ return false;
+}
+
+static inline bool kprobe_single_step_handler(struct pt_regs *regs)
+{
+ return false;
+}
#endif /* CONFIG_KPROBES */
#endif /* _ASM_RISCV_KPROBES_H */
diff --git a/arch/riscv/include/asm/uprobes.h b/arch/riscv/include/asm/uprobes.h
index f2183e00fdd2..3fc7deda9190 100644
--- a/arch/riscv/include/asm/uprobes.h
+++ b/arch/riscv/include/asm/uprobes.h
@@ -34,7 +34,18 @@ struct arch_uprobe {
bool simulate;
};
+#ifdef CONFIG_UPROBES
bool uprobe_breakpoint_handler(struct pt_regs *regs);
bool uprobe_single_step_handler(struct pt_regs *regs);
-
+#else
+static inline bool uprobe_breakpoint_handler(struct pt_regs *regs)
+{
+ return false;
+}
+
+static inline bool uprobe_single_step_handler(struct pt_regs *regs)
+{
+ return false;
+}
+#endif /* CONFIG_UPROBES */
#endif /* _ASM_RISCV_UPROBES_H */
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index a8efa053c4a5..9cc0a7669271 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -60,7 +60,7 @@ static void init_irq_stacks(void)
}
#endif /* CONFIG_VMAP_STACK */
-#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void)
{
#ifdef CONFIG_IRQ_STACKS
@@ -92,7 +92,7 @@ void do_softirq_own_stack(void)
#endif
__do_softirq();
}
-#endif /* CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK */
+#endif /* CONFIG_SOFTIRQ_ON_OWN_STACK */
#else
static void init_irq_stacks(void) {}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index e600aab116a4..aac853ae4eb7 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -173,19 +173,6 @@ static void __init init_resources(void)
if (ret < 0)
goto error;
-#ifdef CONFIG_KEXEC_CORE
- if (crashk_res.start != crashk_res.end) {
- ret = add_resource(&iomem_resource, &crashk_res);
- if (ret < 0)
- goto error;
- }
- if (crashk_low_res.start != crashk_low_res.end) {
- ret = add_resource(&iomem_resource, &crashk_low_res);
- if (ret < 0)
- goto error;
- }
-#endif
-
#ifdef CONFIG_CRASH_DUMP
if (elfcorehdr_size > 0) {
elfcorehdr_res.start = elfcorehdr_addr;
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 180d951d3624..21a4d0e111bc 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -311,13 +311,6 @@ static inline void __user *get_sigframe(struct ksignal *ksig,
/* Align the stack frame. */
sp &= ~0xfUL;
- /*
- * Fail if the size of the altstack is not large enough for the
- * sigframe construction.
- */
- if (current->sas_ss_size && sp < current->sas_ss_sp)
- return (void __user __force *)-1UL;
-
return (void __user *)sp;
}
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 19807c4d3805..fae8f610d867 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -13,6 +13,8 @@
#include <linux/kdebug.h>
#include <linux/uaccess.h>
#include <linux/kprobes.h>
+#include <linux/uprobes.h>
+#include <asm/uprobes.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/irq.h>
@@ -247,22 +249,28 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
return GET_INSN_LENGTH(insn);
}
+static bool probe_single_step_handler(struct pt_regs *regs)
+{
+ bool user = user_mode(regs);
+
+ return user ? uprobe_single_step_handler(regs) : kprobe_single_step_handler(regs);
+}
+
+static bool probe_breakpoint_handler(struct pt_regs *regs)
+{
+ bool user = user_mode(regs);
+
+ return user ? uprobe_breakpoint_handler(regs) : kprobe_breakpoint_handler(regs);
+}
+
void handle_break(struct pt_regs *regs)
{
-#ifdef CONFIG_KPROBES
- if (kprobe_single_step_handler(regs))
+ if (probe_single_step_handler(regs))
return;
- if (kprobe_breakpoint_handler(regs))
- return;
-#endif
-#ifdef CONFIG_UPROBES
- if (uprobe_single_step_handler(regs))
+ if (probe_breakpoint_handler(regs))
return;
- if (uprobe_breakpoint_handler(regs))
- return;
-#endif
current->thread.bad_cause = regs->cause;
if (user_mode(regs))
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 6115d7514972..90d4ba36d1d0 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -72,7 +72,7 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
}
pagefault_out_of_memory();
return;
- } else if (fault & VM_FAULT_SIGBUS) {
+ } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) {
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) {
no_context(regs, addr);
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
index e4a2ace92dbe..b52f0210481f 100644
--- a/arch/riscv/mm/hugetlbpage.c
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -183,15 +183,22 @@ void set_huge_pte_at(struct mm_struct *mm,
pte_t pte,
unsigned long sz)
{
+ unsigned long hugepage_shift;
int i, pte_num;
- if (!pte_napot(pte)) {
- set_pte_at(mm, addr, ptep, pte);
- return;
- }
+ if (sz >= PGDIR_SIZE)
+ hugepage_shift = PGDIR_SHIFT;
+ else if (sz >= P4D_SIZE)
+ hugepage_shift = P4D_SHIFT;
+ else if (sz >= PUD_SIZE)
+ hugepage_shift = PUD_SHIFT;
+ else if (sz >= PMD_SIZE)
+ hugepage_shift = PMD_SHIFT;
+ else
+ hugepage_shift = PAGE_SHIFT;
- pte_num = napot_pte_num(napot_cont_order(pte));
- for (i = 0; i < pte_num; i++, ptep++, addr += PAGE_SIZE)
+ pte_num = sz >> hugepage_shift;
+ for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift))
set_pte_at(mm, addr, ptep, pte);
}
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index ecd3ae6f4116..8581693e62d3 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -245,7 +245,7 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx);
/* Set return value. */
if (!is_tail_call)
- emit_mv(RV_REG_A0, RV_REG_A5, ctx);
+ emit_addiw(RV_REG_A0, RV_REG_A5, 0, ctx);
emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
is_tail_call ? (RV_FENTRY_NINSNS + 1) * 4 : 0, /* skip reserved nops and TCC init */
ctx);
@@ -759,8 +759,10 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
if (ret)
return ret;
- if (save_ret)
- emit_sd(RV_REG_FP, -retval_off, regmap[BPF_REG_0], ctx);
+ if (save_ret) {
+ emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
+ emit_sd(RV_REG_FP, -(retval_off - 8), regmap[BPF_REG_0], ctx);
+ }
/* update branch with beqz */
if (ctx->insns) {
@@ -853,7 +855,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
if (save_ret) {
- stack_size += 8;
+ stack_size += 16; /* Save both A5 (BPF R0) and A0 */
retval_off = stack_size;
}
@@ -957,6 +959,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
if (ret)
goto out;
emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
+ emit_sd(RV_REG_FP, -(retval_off - 8), regmap[BPF_REG_0], ctx);
im->ip_after_call = ctx->insns + ctx->ninsns;
/* 2 nops reserved for auipc+jalr pair */
emit(rv_nop(), ctx);
@@ -988,8 +991,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
if (flags & BPF_TRAMP_F_RESTORE_REGS)
restore_args(nregs, args_off, ctx);
- if (save_ret)
+ if (save_ret) {
emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx);
+ emit_ld(regmap[BPF_REG_0], -(retval_off - 8), RV_REG_FP, ctx);
+ }
emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
@@ -1515,7 +1520,8 @@ out_be:
if (ret)
return ret;
- emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
+ if (insn->src_reg != BPF_PSEUDO_CALL)
+ emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
break;
}
/* tail call */
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index 01257ce3b89c..442a74f113cb 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -57,6 +57,7 @@ static void kasan_populate_shadow(void)
pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
+ unsigned long memgap_start = 0;
unsigned long untracked_end;
unsigned long start, end;
int i;
@@ -101,8 +102,12 @@ static void kasan_populate_shadow(void)
* +- shadow end ----+---------+- shadow end ---+
*/
- for_each_physmem_usable_range(i, &start, &end)
+ for_each_physmem_usable_range(i, &start, &end) {
kasan_populate(start, end, POPULATE_KASAN_MAP_SHADOW);
+ if (memgap_start && physmem_info.info_source == MEM_DETECT_DIAG260)
+ kasan_populate(memgap_start, start, POPULATE_KASAN_ZERO_SHADOW);
+ memgap_start = end;
+ }
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
untracked_end = VMALLOC_START;
/* shallowly populate kasan shadow for vmalloc and modules */
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index ada83149932f..858beaf4a8cb 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -53,7 +53,7 @@ static void hypfs_update_update(struct super_block *sb)
struct inode *inode = d_inode(sb_info->update_file);
sb_info->last_update = ktime_get_seconds();
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
}
/* directory tree removal functions */
@@ -101,7 +101,7 @@ static struct inode *hypfs_make_inode(struct super_block *sb, umode_t mode)
ret->i_mode = mode;
ret->i_uid = hypfs_info->uid;
ret->i_gid = hypfs_info->gid;
- ret->i_atime = ret->i_mtime = inode_set_ctime_current(ret);
+ simple_inode_init_ts(ret);
if (S_ISDIR(mode))
set_nlink(ret, 2);
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index c1b47d608a2b..efaebba5ee19 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -303,11 +303,6 @@ static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi)
return 0;
}
-static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa)
-{
- return READ_ONCE(gisa->next_alert) != (u32)virt_to_phys(gisa);
-}
-
static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
{
set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
@@ -3216,11 +3211,12 @@ void kvm_s390_gisa_destroy(struct kvm *kvm)
if (!gi->origin)
return;
- if (gi->alert.mask)
- KVM_EVENT(3, "vm 0x%pK has unexpected iam 0x%02x",
- kvm, gi->alert.mask);
- while (gisa_in_alert_list(gi->origin))
- cpu_relax();
+ WARN(gi->alert.mask != 0x00,
+ "unexpected non zero alert.mask 0x%02x",
+ gi->alert.mask);
+ gi->alert.mask = 0x00;
+ if (gisa_set_iam(gi->origin, gi->alert.mask))
+ process_gib_alert_list();
hrtimer_cancel(&gi->timer);
gi->origin = NULL;
VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 2861e3360aff..e507692e51e7 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -2066,6 +2066,7 @@ struct bpf_tramp_jit {
* func_addr's original caller
*/
int stack_size; /* Trampoline stack size */
+ int backchain_off; /* Offset of backchain */
int stack_args_off; /* Offset of stack arguments for calling
* func_addr, has to be at the top
*/
@@ -2086,9 +2087,10 @@ struct bpf_tramp_jit {
* for __bpf_prog_enter() return value and
* func_addr respectively
*/
- int r14_off; /* Offset of saved %r14 */
int run_ctx_off; /* Offset of struct bpf_tramp_run_ctx */
int tccnt_off; /* Offset of saved tailcall counter */
+ int r14_off; /* Offset of saved %r14, has to be at the
+ * bottom */
int do_fexit; /* do_fexit: label */
};
@@ -2247,8 +2249,12 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
* Calculate the stack layout.
*/
- /* Reserve STACK_FRAME_OVERHEAD bytes for the callees. */
+ /*
+ * Allocate STACK_FRAME_OVERHEAD bytes for the callees. As the s390x
+ * ABI requires, put our backchain at the end of the allocated memory.
+ */
tjit->stack_size = STACK_FRAME_OVERHEAD;
+ tjit->backchain_off = tjit->stack_size - sizeof(u64);
tjit->stack_args_off = alloc_stack(tjit, nr_stack_args * sizeof(u64));
tjit->reg_args_off = alloc_stack(tjit, nr_reg_args * sizeof(u64));
tjit->ip_off = alloc_stack(tjit, sizeof(u64));
@@ -2256,16 +2262,25 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
tjit->bpf_args_off = alloc_stack(tjit, nr_bpf_args * sizeof(u64));
tjit->retval_off = alloc_stack(tjit, sizeof(u64));
tjit->r7_r8_off = alloc_stack(tjit, 2 * sizeof(u64));
- tjit->r14_off = alloc_stack(tjit, sizeof(u64));
tjit->run_ctx_off = alloc_stack(tjit,
sizeof(struct bpf_tramp_run_ctx));
tjit->tccnt_off = alloc_stack(tjit, sizeof(u64));
- /* The caller has already reserved STACK_FRAME_OVERHEAD bytes. */
- tjit->stack_size -= STACK_FRAME_OVERHEAD;
+ tjit->r14_off = alloc_stack(tjit, sizeof(u64) * 2);
+ /*
+ * In accordance with the s390x ABI, the caller has allocated
+ * STACK_FRAME_OVERHEAD bytes for us. 8 of them contain the caller's
+ * backchain, and the rest we can use.
+ */
+ tjit->stack_size -= STACK_FRAME_OVERHEAD - sizeof(u64);
tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD;
+ /* lgr %r1,%r15 */
+ EMIT4(0xb9040000, REG_1, REG_15);
/* aghi %r15,-stack_size */
EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size);
+ /* stg %r1,backchain_off(%r15) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_1, REG_0, REG_15,
+ tjit->backchain_off);
/* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */
_EMIT6(0xd203f000 | tjit->tccnt_off,
0xf000 | (tjit->stack_size + STK_OFF_TCCNT));
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 2d9b01d7ca4c..99209085c75b 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -564,6 +564,17 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
s->dma_length = 0;
}
}
+
+static unsigned long *bitmap_vzalloc(size_t bits, gfp_t flags)
+{
+ size_t n = BITS_TO_LONGS(bits);
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, sizeof(unsigned long), &bytes)))
+ return NULL;
+
+ return vzalloc(bytes);
+}
int zpci_dma_init_device(struct zpci_dev *zdev)
{
@@ -604,13 +615,13 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
zdev->end_dma - zdev->start_dma + 1);
zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
- zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
+ zdev->iommu_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
if (!zdev->iommu_bitmap) {
rc = -ENOMEM;
goto free_dma_table;
}
if (!s390_iommu_strict) {
- zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
+ zdev->lazy_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
if (!zdev->lazy_bitmap) {
rc = -ENOMEM;
goto free_bitmap;
diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S
index 84ad709cbecb..66eda40fce36 100644
--- a/arch/sparc/lib/checksum_32.S
+++ b/arch/sparc/lib/checksum_32.S
@@ -453,5 +453,5 @@ ccslow: cmp %g1, 0
* we only bother with faults on loads... */
cc_fault:
- ret
+ retl
clr %o0
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index dc8c876fbd8f..80d76aea1f7b 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -103,6 +103,16 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
return ES_OK;
}
+static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
+{
+ return ES_OK;
+}
+
+static bool fault_in_kernel_space(unsigned long address)
+{
+ return false;
+}
+
#undef __init
#define __init
diff --git a/arch/x86/events/utils.c b/arch/x86/events/utils.c
index 76b1f8bb0fd5..dab4ed199227 100644
--- a/arch/x86/events/utils.c
+++ b/arch/x86/events/utils.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <asm/insn.h>
+#include <linux/mm.h>
#include "perf_event.h"
@@ -132,9 +133,9 @@ static int get_branch_type(unsigned long from, unsigned long to, int abort,
* The LBR logs any address in the IP, even if the IP just
* faulted. This means userspace can control the from address.
* Ensure we don't blindly read any address by validating it is
- * a known text address.
+ * a known text address and not a vsyscall address.
*/
- if (kernel_text_address(from)) {
+ if (kernel_text_address(from) && !in_gate_area_no_mm(from)) {
addr = (void *)from;
/*
* Assume we can get the maximum possible size
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 3a233ebff712..25050d953eee 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -28,8 +28,6 @@ struct x86_cpu {
};
#ifdef CONFIG_HOTPLUG_CPU
-extern int arch_register_cpu(int num);
-extern void arch_unregister_cpu(int);
extern void soft_restart_cpu(void);
#endif
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index 31089b851c4f..a2be3aefff9f 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -157,7 +157,8 @@ static inline void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) {
static inline void fpu_sync_guest_vmexit_xfd_state(void) { }
#endif
-extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, unsigned int size, u32 pkru);
+extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
+ unsigned int size, u64 xfeatures, u32 pkru);
extern int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, u64 xcr0, u32 *vpkru);
static inline void fpstate_set_confidential(struct fpu_guest *gfpu)
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 637fa1df3512..c715097e92fd 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -69,6 +69,8 @@ struct legacy_pic {
void (*make_irq)(unsigned int irq);
};
+void legacy_pic_pcat_compat(void);
+
extern struct legacy_pic *legacy_pic;
extern struct legacy_pic null_legacy_pic;
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 5fcd85fd64fd..197316121f04 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -27,6 +27,7 @@
* _X - regular server parts
* _D - micro server parts
* _N,_P - other mobile parts
+ * _H - premium mobile parts
* _S - other client parts
*
* Historical OPTDIFFs:
@@ -124,6 +125,7 @@
#define INTEL_FAM6_METEORLAKE 0xAC
#define INTEL_FAM6_METEORLAKE_L 0xAA
+#define INTEL_FAM6_ARROWLAKE_H 0xC5
#define INTEL_FAM6_ARROWLAKE 0xC6
#define INTEL_FAM6_LUNARLAKE_M 0xBD
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 17715cb8731d..70d139406bc8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -528,7 +528,6 @@ struct kvm_pmu {
u64 raw_event_mask;
struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC];
struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED];
- struct irq_work irq_work;
/*
* Overlay the bitmap with a 64-bit atomic so that all bits can be
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 1d111350197f..b37abb55e948 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -637,12 +637,17 @@
/* AMD Last Branch Record MSRs */
#define MSR_AMD64_LBR_SELECT 0xc000010e
-/* Fam 17h MSRs */
-#define MSR_F17H_IRPERF 0xc00000e9
+/* Zen4 */
+#define MSR_ZEN4_BP_CFG 0xc001102e
+#define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5
+/* Zen 2 */
#define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3
#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1)
+/* Fam 17h MSRs */
+#define MSR_F17H_IRPERF 0xc00000e9
+
/* Fam 16h MSRs */
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index ad98dd1d9cfb..c31c633419fe 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -129,7 +129,6 @@ void native_smp_send_reschedule(int cpu);
void native_send_call_func_ipi(const struct cpumask *mask);
void native_send_call_func_single_ipi(int cpu);
-bool smp_park_other_cpus_in_init(void);
void smp_store_cpu_info(int id);
asmlinkage __visible void smp_reboot_interrupt(void);
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 19bf955b67e0..3ac0ffc4f3e2 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -268,6 +268,7 @@ enum avic_ipi_failure_cause {
AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
AVIC_IPI_FAILURE_INVALID_TARGET,
AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
+ AVIC_IPI_FAILURE_INVALID_IPI_VECTOR,
};
#define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(8, 0)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 8bae40a66282..5c367c1290c3 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -496,7 +496,7 @@ copy_mc_to_kernel(void *to, const void *from, unsigned len);
#define copy_mc_to_kernel copy_mc_to_kernel
unsigned long __must_check
-copy_mc_to_user(void *to, const void *from, unsigned len);
+copy_mc_to_user(void __user *to, const void *from, unsigned len);
#endif
/*
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 2a0ea38955df..c55c0ef47a18 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -148,6 +148,9 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
pr_debug("Local APIC address 0x%08x\n", madt->address);
}
+ if (madt->flags & ACPI_MADT_PCAT_COMPAT)
+ legacy_pic_pcat_compat();
+
/* ACPI 6.3 and newer support the online capable bit. */
if (acpi_gbl_FADT.header.revision > 6 ||
(acpi_gbl_FADT.header.revision == 6 &&
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 517ee01503be..73be3931e4f0 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -403,6 +403,17 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
u8 insn_buff[MAX_PATCH_LEN];
DPRINTK(ALT, "alt table %px, -> %px", start, end);
+
+ /*
+ * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using
+ * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here.
+ * During the process, KASAN becomes confused seeing partial LA57
+ * conversion and triggers a false-positive out-of-bound report.
+ *
+ * Disable KASAN until the patching is complete.
+ */
+ kasan_disable_current();
+
/*
* The scan order should be from start to end. A later scanned
* alternative code can overwrite previously scanned alternative code.
@@ -452,6 +463,8 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
text_poke_early(instr, insn_buff, insn_buff_sz);
}
+
+ kasan_enable_current();
}
static inline bool is_jcc32(struct insn *insn)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 03ef962a6992..ece2b5b7b0fe 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -80,6 +80,10 @@ static const int amd_div0[] =
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf),
AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf));
+static const int amd_erratum_1485[] =
+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf),
+ AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf));
+
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
{
int osvw_id = *erratum++;
@@ -1149,6 +1153,10 @@ static void init_amd(struct cpuinfo_x86 *c)
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
setup_force_cpu_bug(X86_BUG_DIV0);
}
+
+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
+ cpu_has_amd_erratum(c, amd_erratum_1485))
+ msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index ded1fc7cb7cb..f136ac046851 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -30,15 +30,15 @@ struct rmid_entry {
struct list_head list;
};
-/**
- * @rmid_free_lru A least recently used list of free RMIDs
+/*
+ * @rmid_free_lru - A least recently used list of free RMIDs
* These RMIDs are guaranteed to have an occupancy less than the
* threshold occupancy
*/
static LIST_HEAD(rmid_free_lru);
-/**
- * @rmid_limbo_count count of currently unused but (potentially)
+/*
+ * @rmid_limbo_count - count of currently unused but (potentially)
* dirty RMIDs.
* This counts RMIDs that no one is currently using but that
* may have a occupancy value > resctrl_rmid_realloc_threshold. User can
@@ -46,7 +46,7 @@ static LIST_HEAD(rmid_free_lru);
*/
static unsigned int rmid_limbo_count;
-/**
+/*
* @rmid_entry - The entry in the limbo and free lists.
*/
static struct rmid_entry *rmid_ptrs;
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index a86d37052a64..a21a4d0ecc34 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -369,14 +369,15 @@ int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest)
EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate);
void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
- unsigned int size, u32 pkru)
+ unsigned int size, u64 xfeatures, u32 pkru)
{
struct fpstate *kstate = gfpu->fpstate;
union fpregs_state *ustate = buf;
struct membuf mb = { .p = buf, .left = size };
if (cpu_feature_enabled(X86_FEATURE_XSAVE)) {
- __copy_xstate_to_uabi_buf(mb, kstate, pkru, XSTATE_COPY_XSAVE);
+ __copy_xstate_to_uabi_buf(mb, kstate, xfeatures, pkru,
+ XSTATE_COPY_XSAVE);
} else {
memcpy(&ustate->fxsave, &kstate->regs.fxsave,
sizeof(ustate->fxsave));
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index cadf68737e6b..ef6906107c54 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -1049,6 +1049,7 @@ static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
* __copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
* @to: membuf descriptor
* @fpstate: The fpstate buffer from which to copy
+ * @xfeatures: The mask of xfeatures to save (XSAVE mode only)
* @pkru_val: The PKRU value to store in the PKRU component
* @copy_mode: The requested copy mode
*
@@ -1059,7 +1060,8 @@ static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
* It supports partial copy but @to.pos always starts from zero.
*/
void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
- u32 pkru_val, enum xstate_copy_mode copy_mode)
+ u64 xfeatures, u32 pkru_val,
+ enum xstate_copy_mode copy_mode)
{
const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
struct xregs_state *xinit = &init_fpstate.regs.xsave;
@@ -1083,7 +1085,7 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
break;
case XSTATE_COPY_XSAVE:
- header.xfeatures &= fpstate->user_xfeatures;
+ header.xfeatures &= fpstate->user_xfeatures & xfeatures;
break;
}
@@ -1185,6 +1187,7 @@ void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
enum xstate_copy_mode copy_mode)
{
__copy_xstate_to_uabi_buf(to, tsk->thread.fpu.fpstate,
+ tsk->thread.fpu.fpstate->user_xfeatures,
tsk->thread.pkru, copy_mode);
}
@@ -1536,10 +1539,7 @@ static int fpstate_realloc(u64 xfeatures, unsigned int ksize,
fpregs_restore_userregs();
newfps->xfeatures = curfps->xfeatures | xfeatures;
-
- if (!guest_fpu)
- newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
-
+ newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
newfps->xfd = curfps->xfd & ~xfeatures;
/* Do the final updates within the locked region */
diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
index a4ecb04d8d64..3518fb26d06b 100644
--- a/arch/x86/kernel/fpu/xstate.h
+++ b/arch/x86/kernel/fpu/xstate.h
@@ -43,7 +43,8 @@ enum xstate_copy_mode {
struct membuf;
extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
- u32 pkru_val, enum xstate_copy_mode copy_mode);
+ u64 xfeatures, u32 pkru_val,
+ enum xstate_copy_mode copy_mode);
extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
enum xstate_copy_mode mode);
extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru);
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 30a55207c000..c20d1832c481 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -32,6 +32,7 @@
*/
static void init_8259A(int auto_eoi);
+static bool pcat_compat __ro_after_init;
static int i8259A_auto_eoi;
DEFINE_RAW_SPINLOCK(i8259A_lock);
@@ -299,15 +300,32 @@ static void unmask_8259A(void)
static int probe_8259A(void)
{
+ unsigned char new_val, probe_val = ~(1 << PIC_CASCADE_IR);
unsigned long flags;
- unsigned char probe_val = ~(1 << PIC_CASCADE_IR);
- unsigned char new_val;
+
+ /*
+ * If MADT has the PCAT_COMPAT flag set, then do not bother probing
+ * for the PIC. Some BIOSes leave the PIC uninitialized and probing
+ * fails.
+ *
+ * Right now this causes problems as quite some code depends on
+ * nr_legacy_irqs() > 0 or has_legacy_pic() == true. This is silly
+ * when the system has an IO/APIC because then PIC is not required
+ * at all, except for really old machines where the timer interrupt
+ * must be routed through the PIC. So just pretend that the PIC is
+ * there and let legacy_pic->init() initialize it for nothing.
+ *
+ * Alternatively this could just try to initialize the PIC and
+ * repeat the probe, but for cases where there is no PIC that's
+ * just pointless.
+ */
+ if (pcat_compat)
+ return nr_legacy_irqs();
+
/*
- * Check to see if we have a PIC.
- * Mask all except the cascade and read
- * back the value we just wrote. If we don't
- * have a PIC, we will read 0xff as opposed to the
- * value we wrote.
+ * Check to see if we have a PIC. Mask all except the cascade and
+ * read back the value we just wrote. If we don't have a PIC, we
+ * will read 0xff as opposed to the value we wrote.
*/
raw_spin_lock_irqsave(&i8259A_lock, flags);
@@ -429,5 +447,9 @@ static int __init i8259A_init_ops(void)
return 0;
}
-
device_initcall(i8259A_init_ops);
+
+void __init legacy_pic_pcat_compat(void)
+{
+ pcat_compat = true;
+}
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index dcf325b7b022..ccb0915e84e1 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -632,6 +632,23 @@ fail:
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
}
+static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt,
+ unsigned long address,
+ bool write)
+{
+ if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) {
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = X86_PF_USER;
+ ctxt->fi.cr2 = address;
+ if (write)
+ ctxt->fi.error_code |= X86_PF_WRITE;
+
+ return ES_EXCEPTION;
+ }
+
+ return ES_OK;
+}
+
static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
void *src, char *buf,
unsigned int data_size,
@@ -639,7 +656,12 @@ static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
bool backwards)
{
int i, b = backwards ? -1 : 1;
- enum es_result ret = ES_OK;
+ unsigned long address = (unsigned long)src;
+ enum es_result ret;
+
+ ret = vc_insn_string_check(ctxt, address, false);
+ if (ret != ES_OK)
+ return ret;
for (i = 0; i < count; i++) {
void *s = src + (i * data_size * b);
@@ -660,7 +682,12 @@ static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
bool backwards)
{
int i, s = backwards ? -1 : 1;
- enum es_result ret = ES_OK;
+ unsigned long address = (unsigned long)dst;
+ enum es_result ret;
+
+ ret = vc_insn_string_check(ctxt, address, true);
+ if (ret != ES_OK)
+ return ret;
for (i = 0; i < count; i++) {
void *d = dst + (i * data_size * s);
@@ -696,6 +723,9 @@ static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
{
struct insn *insn = &ctxt->insn;
+ size_t size;
+ u64 port;
+
*exitinfo = 0;
switch (insn->opcode.bytes[0]) {
@@ -704,7 +734,7 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
case 0x6d:
*exitinfo |= IOIO_TYPE_INS;
*exitinfo |= IOIO_SEG_ES;
- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+ port = ctxt->regs->dx & 0xffff;
break;
/* OUTS opcodes */
@@ -712,41 +742,43 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
case 0x6f:
*exitinfo |= IOIO_TYPE_OUTS;
*exitinfo |= IOIO_SEG_DS;
- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+ port = ctxt->regs->dx & 0xffff;
break;
/* IN immediate opcodes */
case 0xe4:
case 0xe5:
*exitinfo |= IOIO_TYPE_IN;
- *exitinfo |= (u8)insn->immediate.value << 16;
+ port = (u8)insn->immediate.value & 0xffff;
break;
/* OUT immediate opcodes */
case 0xe6:
case 0xe7:
*exitinfo |= IOIO_TYPE_OUT;
- *exitinfo |= (u8)insn->immediate.value << 16;
+ port = (u8)insn->immediate.value & 0xffff;
break;
/* IN register opcodes */
case 0xec:
case 0xed:
*exitinfo |= IOIO_TYPE_IN;
- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+ port = ctxt->regs->dx & 0xffff;
break;
/* OUT register opcodes */
case 0xee:
case 0xef:
*exitinfo |= IOIO_TYPE_OUT;
- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+ port = ctxt->regs->dx & 0xffff;
break;
default:
return ES_DECODE_FAILED;
}
+ *exitinfo |= port << 16;
+
switch (insn->opcode.bytes[0]) {
case 0x6c:
case 0x6e:
@@ -756,12 +788,15 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
case 0xee:
/* Single byte opcodes */
*exitinfo |= IOIO_DATA_8;
+ size = 1;
break;
default:
/* Length determined by instruction parsing */
*exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16
: IOIO_DATA_32;
+ size = (insn->opnd_bytes == 2) ? 2 : 4;
}
+
switch (insn->addr_bytes) {
case 2:
*exitinfo |= IOIO_ADDR_16;
@@ -777,7 +812,7 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
if (insn_has_rep_prefix(insn))
*exitinfo |= IOIO_REP;
- return ES_OK;
+ return vc_ioio_check(ctxt, (u16)port, size);
}
static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index d8c1e3be74c0..6395bfd87b68 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -524,6 +524,33 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
return ES_OK;
}
+static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
+{
+ BUG_ON(size > 4);
+
+ if (user_mode(ctxt->regs)) {
+ struct thread_struct *t = &current->thread;
+ struct io_bitmap *iobm = t->io_bitmap;
+ size_t idx;
+
+ if (!iobm)
+ goto fault;
+
+ for (idx = port; idx < port + size; ++idx) {
+ if (test_bit(idx, iobm->bitmap))
+ goto fault;
+ }
+ }
+
+ return ES_OK;
+
+fault:
+ ctxt->fi.vector = X86_TRAP_GP;
+ ctxt->fi.error_code = 0;
+
+ return ES_EXCEPTION;
+}
+
/* Include code shared with pre-decompression boot stage */
#include "sev-shared.c"
@@ -1508,6 +1535,9 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
return ES_DECODE_FAILED;
}
+ if (user_mode(ctxt->regs))
+ return ES_UNSUPPORTED;
+
switch (mmio) {
case INSN_MMIO_WRITE:
memcpy(ghcb->shared_buffer, reg_data, bytes);
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 6eb06d001bcc..96a771f9f930 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -131,7 +131,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
}
/*
- * Disable virtualization, APIC etc. and park the CPU in a HLT loop
+ * this function calls the 'stop' function on all other CPUs in the system.
*/
DEFINE_IDTENTRY_SYSVEC(sysvec_reboot)
{
@@ -172,17 +172,13 @@ static void native_stop_other_cpus(int wait)
* 2) Wait for all other CPUs to report that they reached the
* HLT loop in stop_this_cpu()
*
- * 3) If the system uses INIT/STARTUP for CPU bringup, then
- * send all present CPUs an INIT vector, which brings them
- * completely out of the way.
+ * 3) If #2 timed out send an NMI to the CPUs which did not
+ * yet report
*
- * 4) If #3 is not possible and #2 timed out send an NMI to the
- * CPUs which did not yet report
- *
- * 5) Wait for all other CPUs to report that they reached the
+ * 4) Wait for all other CPUs to report that they reached the
* HLT loop in stop_this_cpu()
*
- * #4 can obviously race against a CPU reaching the HLT loop late.
+ * #3 can obviously race against a CPU reaching the HLT loop late.
* That CPU will have reported already and the "have all CPUs
* reached HLT" condition will be true despite the fact that the
* other CPU is still handling the NMI. Again, there is no
@@ -198,7 +194,7 @@ static void native_stop_other_cpus(int wait)
/*
* Don't wait longer than a second for IPI completion. The
* wait request is not checked here because that would
- * prevent an NMI/INIT shutdown in case that not all
+ * prevent an NMI shutdown attempt in case that not all
* CPUs reach shutdown state.
*/
timeout = USEC_PER_SEC;
@@ -206,27 +202,7 @@ static void native_stop_other_cpus(int wait)
udelay(1);
}
- /*
- * Park all other CPUs in INIT including "offline" CPUs, if
- * possible. That's a safe place where they can't resume execution
- * of HLT and then execute the HLT loop from overwritten text or
- * page tables.
- *
- * The only downside is a broadcast MCE, but up to the point where
- * the kexec() kernel brought all APs online again an MCE will just
- * make HLT resume and handle the MCE. The machine crashes and burns
- * due to overwritten text, page tables and data. So there is a
- * choice between fire and frying pan. The result is pretty much
- * the same. Chose frying pan until x86 provides a sane mechanism
- * to park a CPU.
- */
- if (smp_park_other_cpus_in_init())
- goto done;
-
- /*
- * If park with INIT was not possible and the REBOOT_VECTOR didn't
- * take all secondary CPUs offline, try with the NMI.
- */
+ /* if the REBOOT_VECTOR didn't work, try with the NMI */
if (!cpumask_empty(&cpus_stop_mask)) {
/*
* If NMI IPI is enabled, try to register the stop handler
@@ -249,7 +225,6 @@ static void native_stop_other_cpus(int wait)
udelay(1);
}
-done:
local_irq_save(flags);
disable_local_APIC();
mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 48e040618731..2a187c0cbd5b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1240,33 +1240,6 @@ void arch_thaw_secondary_cpus_end(void)
cache_aps_init();
}
-bool smp_park_other_cpus_in_init(void)
-{
- unsigned int cpu, this_cpu = smp_processor_id();
- unsigned int apicid;
-
- if (apic->wakeup_secondary_cpu_64 || apic->wakeup_secondary_cpu)
- return false;
-
- /*
- * If this is a crash stop which does not execute on the boot CPU,
- * then this cannot use the INIT mechanism because INIT to the boot
- * CPU will reset the machine.
- */
- if (this_cpu)
- return false;
-
- for_each_cpu_and(cpu, &cpus_booted_once_mask, cpu_present_mask) {
- if (cpu == this_cpu)
- continue;
- apicid = apic->cpu_present_to_apicid(cpu);
- if (apicid == BAD_APICID)
- continue;
- send_init_sequence(apicid);
- }
- return true;
-}
-
/*
* Early setup to make printk work.
*/
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index ca004e2e4469..0bab03130033 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -54,7 +54,7 @@ void arch_unregister_cpu(int num)
EXPORT_SYMBOL(arch_unregister_cpu);
#else /* CONFIG_HOTPLUG_CPU */
-static int __init arch_register_cpu(int num)
+int __init arch_register_cpu(int num)
{
return register_cpu(&per_cpu(cpu_devices, num).cpu, num);
}
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index bbc440c93e08..1123ef3ccf90 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -15,6 +15,7 @@
* ( The serial nature of the boot logic and the CPU hotplug lock
* protects against more than 2 CPUs entering this code. )
*/
+#include <linux/workqueue.h>
#include <linux/topology.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
@@ -342,6 +343,13 @@ static inline unsigned int loop_timeout(int cpu)
return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20;
}
+static void tsc_sync_mark_tsc_unstable(struct work_struct *work)
+{
+ mark_tsc_unstable("check_tsc_sync_source failed");
+}
+
+static DECLARE_WORK(tsc_sync_work, tsc_sync_mark_tsc_unstable);
+
/*
* The freshly booted CPU initiates this via an async SMP function call.
*/
@@ -395,7 +403,7 @@ retry:
"turning off TSC clock.\n", max_warp);
if (random_warps)
pr_warn("TSC warped randomly between CPUs\n");
- mark_tsc_unstable("check_tsc_sync_source failed");
+ schedule_work(&tsc_sync_work);
}
/*
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 0544e30b4946..773132c3bf5a 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -360,14 +360,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vcpu->arch.guest_supported_xcr0 =
cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
- /*
- * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
- * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
- * supported by the host.
- */
- vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0 |
- XFEATURE_MASK_FPSSE;
-
kvm_update_pv_runtime(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index dcd60b39e794..3e977dbbf993 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2759,13 +2759,17 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
{
u32 reg = kvm_lapic_get_reg(apic, lvt_type);
int vector, mode, trig_mode;
+ int r;
if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
vector = reg & APIC_VECTOR_MASK;
mode = reg & APIC_MODE_MASK;
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
- return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
- NULL);
+
+ r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
+ if (r && lvt_type == APIC_LVTPC)
+ kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
+ return r;
}
return 0;
}
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index edb89b51b383..9ae07db6f0f6 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -93,14 +93,6 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
#undef __KVM_X86_PMU_OP
}
-static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
-{
- struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
- struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
-
- kvm_pmu_deliver_pmi(vcpu);
-}
-
static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -124,20 +116,7 @@ static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
}
- if (!pmc->intr || skip_pmi)
- return;
-
- /*
- * Inject PMI. If vcpu was in a guest mode during NMI PMI
- * can be ejected on a guest mode re-entry. Otherwise we can't
- * be sure that vcpu wasn't executing hlt instruction at the
- * time of vmexit and is not going to re-enter guest mode until
- * woken up. So we should wake it, but this is impossible from
- * NMI context. Do it from irq work instead.
- */
- if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu))
- irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
- else
+ if (pmc->intr && !skip_pmi)
kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
}
@@ -675,9 +654,6 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
void kvm_pmu_reset(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
-
- irq_work_sync(&pmu->irq_work);
static_call(kvm_x86_pmu_reset)(vcpu);
}
@@ -687,7 +663,6 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
memset(pmu, 0, sizeof(*pmu));
static_call(kvm_x86_pmu_init)(vcpu);
- init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
pmu->event_count = 0;
pmu->need_cleanup = false;
kvm_pmu_refresh(vcpu);
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 7d9ba301c090..1d64113de488 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -74,6 +74,12 @@ static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
return counter & pmc_bitmask(pmc);
}
+static inline void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
+{
+ pmc->counter += val - pmc_read_counter(pmc);
+ pmc->counter &= pmc_bitmask(pmc);
+}
+
static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
{
if (pmc->perf_event) {
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 2092db892d7d..4b74ea91f4e6 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -529,8 +529,11 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
WARN_ONCE(1, "Invalid backing page\n");
break;
+ case AVIC_IPI_FAILURE_INVALID_IPI_VECTOR:
+ /* Invalid IPI with vector < 16 */
+ break;
default:
- pr_err("Unknown IPI interception\n");
+ vcpu_unimpl(vcpu, "Unknown avic incomplete IPI interception\n");
}
return 1;
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index dd496c9e5f91..3fea8c47679e 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -1253,6 +1253,9 @@ void svm_leave_nested(struct kvm_vcpu *vcpu)
nested_svm_uninit_mmu_context(vcpu);
vmcb_mark_all_dirty(svm->vmcb);
+
+ if (kvm_apicv_activated(vcpu->kvm))
+ kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
}
kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index cef5a3d0abd0..373ff6a6687b 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -160,7 +160,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
/* MSR_PERFCTRn */
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
if (pmc) {
- pmc->counter += data - pmc_read_counter(pmc);
+ pmc_write_counter(pmc, data);
pmc_update_sample_period(pmc);
return 0;
}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 9507df93f410..beea99c8e8e0 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -691,7 +691,7 @@ static int svm_hardware_enable(void)
*/
if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
struct sev_es_save_area *hostsa;
- u32 msr_hi;
+ u32 __maybe_unused msr_hi;
hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
@@ -913,8 +913,7 @@ void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
if (intercept == svm->x2avic_msrs_intercepted)
return;
- if (!x2avic_enabled ||
- !apic_x2apic_mode(svm->vcpu.arch.apic))
+ if (!x2avic_enabled)
return;
for (i = 0; i < MAX_DIRECT_ACCESS_MSRS; i++) {
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index f2efa0bf7ae8..820d3e1f6b4f 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -436,11 +436,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated &&
!(msr & MSR_PMC_FULL_WIDTH_BIT))
data = (s64)(s32)data;
- pmc->counter += data - pmc_read_counter(pmc);
+ pmc_write_counter(pmc, data);
pmc_update_sample_period(pmc);
break;
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
- pmc->counter += data - pmc_read_counter(pmc);
+ pmc_write_counter(pmc, data);
pmc_update_sample_period(pmc);
break;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9f18b06bbda6..41cce5031126 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5382,26 +5382,37 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
return 0;
}
-static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
- struct kvm_xsave *guest_xsave)
-{
- if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
- return;
-
- fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
- guest_xsave->region,
- sizeof(guest_xsave->region),
- vcpu->arch.pkru);
-}
static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
u8 *state, unsigned int size)
{
+ /*
+ * Only copy state for features that are enabled for the guest. The
+ * state itself isn't problematic, but setting bits in the header for
+ * features that are supported in *this* host but not exposed to the
+ * guest can result in KVM_SET_XSAVE failing when live migrating to a
+ * compatible host without the features that are NOT exposed to the
+ * guest.
+ *
+ * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
+ * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
+ * supported by the host.
+ */
+ u64 supported_xcr0 = vcpu->arch.guest_supported_xcr0 |
+ XFEATURE_MASK_FPSSE;
+
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
return;
- fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
- state, size, vcpu->arch.pkru);
+ fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size,
+ supported_xcr0, vcpu->arch.pkru);
+}
+
+static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
+ struct kvm_xsave *guest_xsave)
+{
+ return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
+ sizeof(guest_xsave->region));
}
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
@@ -12843,6 +12854,9 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
return true;
#endif
+ if (kvm_test_request(KVM_REQ_PMI, vcpu))
+ return true;
+
if (kvm_arch_interrupt_allowed(vcpu) &&
(kvm_cpu_has_interrupt(vcpu) ||
kvm_guest_apic_has_interrupt(vcpu)))
diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c
index 80efd45a7761..6e8b7e600def 100644
--- a/arch/x86/lib/copy_mc.c
+++ b/arch/x86/lib/copy_mc.c
@@ -70,23 +70,23 @@ unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigne
}
EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
-unsigned long __must_check copy_mc_to_user(void *dst, const void *src, unsigned len)
+unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, unsigned len)
{
unsigned long ret;
if (copy_mc_fragile_enabled) {
__uaccess_begin();
- ret = copy_mc_fragile(dst, src, len);
+ ret = copy_mc_fragile((__force void *)dst, src, len);
__uaccess_end();
return ret;
}
if (static_cpu_has(X86_FEATURE_ERMS)) {
__uaccess_begin();
- ret = copy_mc_enhanced_fast_string(dst, src, len);
+ ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len);
__uaccess_end();
return ret;
}
- return copy_user_generic(dst, src, len);
+ return copy_user_generic((__force void *)dst, src, len);
}
diff --git a/block/bdev.c b/block/bdev.c
index f3b13aa1b7d4..2018d250e131 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -829,6 +829,28 @@ put_blkdev:
}
EXPORT_SYMBOL(blkdev_get_by_dev);
+struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
+ const struct blk_holder_ops *hops)
+{
+ struct bdev_handle *handle = kmalloc(sizeof(*handle), GFP_KERNEL);
+ struct block_device *bdev;
+
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+ bdev = blkdev_get_by_dev(dev, mode, holder, hops);
+ if (IS_ERR(bdev)) {
+ kfree(handle);
+ return ERR_CAST(bdev);
+ }
+ handle->bdev = bdev;
+ handle->holder = holder;
+ if (holder)
+ mode |= BLK_OPEN_EXCL;
+ handle->mode = mode;
+ return handle;
+}
+EXPORT_SYMBOL(bdev_open_by_dev);
+
/**
* blkdev_get_by_path - open a block device by name
* @path: path to the block device to open
@@ -867,6 +889,28 @@ struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode,
}
EXPORT_SYMBOL(blkdev_get_by_path);
+struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
+ void *holder, const struct blk_holder_ops *hops)
+{
+ struct bdev_handle *handle;
+ dev_t dev;
+ int error;
+
+ error = lookup_bdev(path, &dev);
+ if (error)
+ return ERR_PTR(error);
+
+ handle = bdev_open_by_dev(dev, mode, holder, hops);
+ if (!IS_ERR(handle) && (mode & BLK_OPEN_WRITE) &&
+ bdev_read_only(handle->bdev)) {
+ bdev_release(handle);
+ return ERR_PTR(-EACCES);
+ }
+
+ return handle;
+}
+EXPORT_SYMBOL(bdev_open_by_path);
+
void blkdev_put(struct block_device *bdev, void *holder)
{
struct gendisk *disk = bdev->bd_disk;
@@ -903,6 +947,13 @@ void blkdev_put(struct block_device *bdev, void *holder)
}
EXPORT_SYMBOL(blkdev_put);
+void bdev_release(struct bdev_handle *handle)
+{
+ blkdev_put(handle->bdev, handle->holder);
+ kfree(handle);
+}
+EXPORT_SYMBOL(bdev_release);
+
/**
* lookup_bdev() - Look up a struct block_device by name.
* @pathname: Name of the block device in the filesystem.
@@ -961,20 +1012,20 @@ void bdev_mark_dead(struct block_device *bdev, bool surprise)
mutex_lock(&bdev->bd_holder_lock);
if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead)
bdev->bd_holder_ops->mark_dead(bdev, surprise);
- else
+ else {
+ mutex_unlock(&bdev->bd_holder_lock);
sync_blockdev(bdev);
- mutex_unlock(&bdev->bd_holder_lock);
+ }
invalidate_bdev(bdev);
}
-#ifdef CONFIG_DASD_MODULE
/*
- * Drivers should not use this directly, but the DASD driver has historically
- * had a shutdown to offline mode that doesn't actually remove the gendisk
- * that otherwise looks a lot like a safe device removal.
+ * New drivers should not use this directly. There are some drivers however
+ * that needs this for historical reasons. For example, the DASD driver has
+ * historically had a shutdown to offline mode that doesn't actually remove the
+ * gendisk that otherwise looks a lot like a safe device removal.
*/
EXPORT_SYMBOL_GPL(bdev_mark_dead);
-#endif
void sync_bdevs(bool wait)
{
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 38a881cf97d0..13e4377a8b28 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -723,6 +723,12 @@ static unsigned int calculate_io_allowed(u32 iops_limit,
static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
{
+ /*
+ * Can result be wider than 64 bits?
+ * We check against 62, not 64, due to ilog2 truncation.
+ */
+ if (ilog2(bps_limit) + ilog2(jiffy_elapsed) - ilog2(HZ) > 62)
+ return U64_MAX;
return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
}
diff --git a/block/disk-events.c b/block/disk-events.c
index 13c3372c465a..2f697224386a 100644
--- a/block/disk-events.c
+++ b/block/disk-events.c
@@ -266,11 +266,8 @@ static unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
* disk_check_media_change - check if a removable media has been changed
* @disk: gendisk to check
*
- * Check whether a removable media has been changed, and attempt to free all
- * dentries and inodes and invalidates all block device page cache entries in
- * that case.
- *
- * Returns %true if the media has changed, or %false if not.
+ * Returns %true and marks the disk for a partition rescan whether a removable
+ * media has been changed, and %false if the media did not change.
*/
bool disk_check_media_change(struct gendisk *disk)
{
@@ -278,12 +275,11 @@ bool disk_check_media_change(struct gendisk *disk)
events = disk_clear_events(disk, DISK_EVENT_MEDIA_CHANGE |
DISK_EVENT_EJECT_REQUEST);
- if (!(events & DISK_EVENT_MEDIA_CHANGE))
- return false;
-
- bdev_mark_dead(disk->part0, true);
- set_bit(GD_NEED_PART_SCAN, &disk->state);
- return true;
+ if (events & DISK_EVENT_MEDIA_CHANGE) {
+ set_bit(GD_NEED_PART_SCAN, &disk->state);
+ return true;
+ }
+ return false;
}
EXPORT_SYMBOL(disk_check_media_change);
diff --git a/block/fops.c b/block/fops.c
index acff3d5d22d4..0abaac705daf 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -542,15 +542,31 @@ static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
return error;
}
+/**
+ * file_to_blk_mode - get block open flags from file flags
+ * @file: file whose open flags should be converted
+ *
+ * Look at file open flags and generate corresponding block open flags from
+ * them. The function works both for file just being open (e.g. during ->open
+ * callback) and for file that is already open. This is actually non-trivial
+ * (see comment in the function).
+ */
blk_mode_t file_to_blk_mode(struct file *file)
{
blk_mode_t mode = 0;
+ struct bdev_handle *handle = file->private_data;
if (file->f_mode & FMODE_READ)
mode |= BLK_OPEN_READ;
if (file->f_mode & FMODE_WRITE)
mode |= BLK_OPEN_WRITE;
- if (file->private_data)
+ /*
+ * do_dentry_open() clears O_EXCL from f_flags, use handle->mode to
+ * determine whether the open was exclusive for already open files.
+ */
+ if (handle)
+ mode |= handle->mode & BLK_OPEN_EXCL;
+ else if (file->f_flags & O_EXCL)
mode |= BLK_OPEN_EXCL;
if (file->f_flags & O_NDELAY)
mode |= BLK_OPEN_NDELAY;
@@ -568,7 +584,8 @@ blk_mode_t file_to_blk_mode(struct file *file)
static int blkdev_open(struct inode *inode, struct file *filp)
{
- struct block_device *bdev;
+ struct bdev_handle *handle;
+ blk_mode_t mode;
/*
* Preserve backwards compatibility and allow large file access
@@ -579,29 +596,24 @@ static int blkdev_open(struct inode *inode, struct file *filp)
filp->f_flags |= O_LARGEFILE;
filp->f_mode |= FMODE_BUF_RASYNC | FMODE_CAN_ODIRECT;
- /*
- * Use the file private data to store the holder for exclusive openes.
- * file_to_blk_mode relies on it being present to set BLK_OPEN_EXCL.
- */
- if (filp->f_flags & O_EXCL)
- filp->private_data = filp;
-
- bdev = blkdev_get_by_dev(inode->i_rdev, file_to_blk_mode(filp),
- filp->private_data, NULL);
- if (IS_ERR(bdev))
- return PTR_ERR(bdev);
+ mode = file_to_blk_mode(filp);
+ handle = bdev_open_by_dev(inode->i_rdev, mode,
+ mode & BLK_OPEN_EXCL ? filp : NULL, NULL);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
- if (bdev_nowait(bdev))
+ if (bdev_nowait(handle->bdev))
filp->f_mode |= FMODE_NOWAIT;
- filp->f_mapping = bdev->bd_inode->i_mapping;
+ filp->f_mapping = handle->bdev->bd_inode->i_mapping;
filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
+ filp->private_data = handle;
return 0;
}
static int blkdev_release(struct inode *inode, struct file *filp)
{
- blkdev_put(I_BDEV(filp->f_mapping->host), filp->private_data);
+ bdev_release(filp->private_data);
return 0;
}
@@ -772,24 +784,35 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
filemap_invalidate_lock(inode->i_mapping);
- /* Invalidate the page cache, including dirty pages. */
- error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
- if (error)
- goto fail;
-
+ /*
+ * Invalidate the page cache, including dirty pages, for valid
+ * de-allocate mode calls to fallocate().
+ */
switch (mode) {
case FALLOC_FL_ZERO_RANGE:
case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
+ error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
+ if (error)
+ goto fail;
+
error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
len >> SECTOR_SHIFT, GFP_KERNEL,
BLKDEV_ZERO_NOUNMAP);
break;
case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
+ error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
+ if (error)
+ goto fail;
+
error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
len >> SECTOR_SHIFT, GFP_KERNEL,
BLKDEV_ZERO_NOFALLBACK);
break;
case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
+ error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
+ if (error)
+ goto fail;
+
error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
len >> SECTOR_SHIFT, GFP_KERNEL);
break;
diff --git a/block/genhd.c b/block/genhd.c
index cc32a0c704eb..c9d06f72c587 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -342,7 +342,7 @@ EXPORT_SYMBOL_GPL(disk_uevent);
int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode)
{
- struct block_device *bdev;
+ struct bdev_handle *handle;
int ret = 0;
if (disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN))
@@ -366,12 +366,12 @@ int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode)
}
set_bit(GD_NEED_PART_SCAN, &disk->state);
- bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~BLK_OPEN_EXCL, NULL,
- NULL);
- if (IS_ERR(bdev))
- ret = PTR_ERR(bdev);
+ handle = bdev_open_by_dev(disk_devt(disk), mode & ~BLK_OPEN_EXCL, NULL,
+ NULL);
+ if (IS_ERR(handle))
+ ret = PTR_ERR(handle);
else
- blkdev_put(bdev, NULL);
+ bdev_release(handle);
/*
* If blkdev_get_by_dev() failed early, GD_NEED_PART_SCAN is still set,
@@ -559,6 +559,13 @@ static void blk_report_disk_dead(struct gendisk *disk, bool surprise)
struct block_device *bdev;
unsigned long idx;
+ /*
+ * On surprise disk removal, bdev_mark_dead() may call into file
+ * systems below. Make it clear that we're expecting to not hold
+ * disk->open_mutex.
+ */
+ lockdep_assert_not_held(&disk->open_mutex);
+
rcu_read_lock();
xa_for_each(&disk->part_tbl, idx, bdev) {
if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
diff --git a/block/ioctl.c b/block/ioctl.c
index d5f5cd61efd7..4160f4e6bd5b 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -370,9 +370,10 @@ static int blkdev_flushbuf(struct block_device *bdev, unsigned cmd,
mutex_lock(&bdev->bd_holder_lock);
if (bdev->bd_holder_ops && bdev->bd_holder_ops->sync)
bdev->bd_holder_ops->sync(bdev);
- else
+ else {
+ mutex_unlock(&bdev->bd_holder_lock);
sync_blockdev(bdev);
- mutex_unlock(&bdev->bd_holder_lock);
+ }
invalidate_bdev(bdev);
return 0;
@@ -467,6 +468,7 @@ static int blkdev_bszset(struct block_device *bdev, blk_mode_t mode,
int __user *argp)
{
int ret, n;
+ struct bdev_handle *handle;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -478,10 +480,11 @@ static int blkdev_bszset(struct block_device *bdev, blk_mode_t mode,
if (mode & BLK_OPEN_EXCL)
return set_blocksize(bdev, n);
- if (IS_ERR(blkdev_get_by_dev(bdev->bd_dev, mode, &bdev, NULL)))
+ handle = bdev_open_by_dev(bdev->bd_dev, mode, &bdev, NULL);
+ if (IS_ERR(handle))
return -EBUSY;
ret = set_blocksize(bdev, n);
- blkdev_put(bdev, &bdev);
+ bdev_release(handle);
return ret;
}
diff --git a/block/partitions/core.c b/block/partitions/core.c
index e137a87f4db0..f47ffcfdfcec 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -274,17 +274,6 @@ void drop_partition(struct block_device *part)
put_device(&part->bd_device);
}
-static void delete_partition(struct block_device *part)
-{
- /*
- * Remove the block device from the inode hash, so that it cannot be
- * looked up any more even when openers still hold references.
- */
- remove_inode_hash(part->bd_inode);
- bdev_mark_dead(part, false);
- drop_partition(part);
-}
-
static ssize_t whole_disk_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -485,7 +474,18 @@ int bdev_del_partition(struct gendisk *disk, int partno)
if (atomic_read(&part->bd_openers))
goto out_unlock;
- delete_partition(part);
+ /*
+ * We verified that @part->bd_openers is zero above and so
+ * @part->bd_holder{_ops} can't be set. And since we hold
+ * @disk->open_mutex the device can't be claimed by anyone.
+ *
+ * So no need to call @part->bd_holder_ops->mark_dead() here.
+ * Just delete the partition and invalidate it.
+ */
+
+ remove_inode_hash(part->bd_inode);
+ invalidate_bdev(part);
+ drop_partition(part);
ret = 0;
out_unlock:
mutex_unlock(&disk->open_mutex);
@@ -663,8 +663,23 @@ rescan:
sync_blockdev(disk->part0);
invalidate_bdev(disk->part0);
- xa_for_each_start(&disk->part_tbl, idx, part, 1)
- delete_partition(part);
+ xa_for_each_start(&disk->part_tbl, idx, part, 1) {
+ /*
+ * Remove the block device from the inode hash, so that
+ * it cannot be looked up any more even when openers
+ * still hold references.
+ */
+ remove_inode_hash(part->bd_inode);
+
+ /*
+ * If @disk->open_partitions isn't elevated but there's
+ * still an active holder of that block device things
+ * are broken.
+ */
+ WARN_ON_ONCE(atomic_read(&part->bd_openers));
+ invalidate_bdev(part);
+ drop_partition(part);
+ }
clear_bit(GD_NEED_PART_SCAN, &disk->state);
/*
diff --git a/block/sed-opal.c b/block/sed-opal.c
index 6d7f25d1711b..04f38a3f5d95 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -2888,12 +2888,11 @@ static int opal_lock_unlock(struct opal_dev *dev,
if (lk_unlk->session.who > OPAL_USER9)
return -EINVAL;
- ret = opal_get_key(dev, &lk_unlk->session.opal_key);
- if (ret)
- return ret;
mutex_lock(&dev->dev_lock);
opal_lock_check_for_saved_key(dev, lk_unlk);
- ret = __opal_lock_unlock(dev, lk_unlk);
+ ret = opal_get_key(dev, &lk_unlk->session.opal_key);
+ if (!ret)
+ ret = __opal_lock_unlock(dev, lk_unlk);
mutex_unlock(&dev->dev_lock);
return ret;
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index abeecb8329b3..1dcab27986a6 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -81,14 +81,13 @@ software_key_determine_akcipher(const struct public_key *pkey,
* RSA signatures usually use EMSA-PKCS1-1_5 [RFC3447 sec 8.2].
*/
if (strcmp(encoding, "pkcs1") == 0) {
+ *sig = op == kernel_pkey_sign ||
+ op == kernel_pkey_verify;
if (!hash_algo) {
- *sig = false;
n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
"pkcs1pad(%s)",
pkey->pkey_algo);
} else {
- *sig = op == kernel_pkey_sign ||
- op == kernel_pkey_verify;
n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
"pkcs1pad(%s,%s)",
pkey->pkey_algo, hash_algo);
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 467a60235370..7e9359611d69 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -367,14 +367,19 @@ int ivpu_boot(struct ivpu_device *vdev)
return 0;
}
-int ivpu_shutdown(struct ivpu_device *vdev)
+void ivpu_prepare_for_reset(struct ivpu_device *vdev)
{
- int ret;
-
ivpu_hw_irq_disable(vdev);
disable_irq(vdev->irq);
ivpu_ipc_disable(vdev);
ivpu_mmu_disable(vdev);
+}
+
+int ivpu_shutdown(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ivpu_prepare_for_reset(vdev);
ret = ivpu_hw_power_down(vdev);
if (ret)
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 03b3d6532fb6..2adc349126bb 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -151,6 +151,7 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link);
int ivpu_boot(struct ivpu_device *vdev);
int ivpu_shutdown(struct ivpu_device *vdev);
+void ivpu_prepare_for_reset(struct ivpu_device *vdev);
static inline u8 ivpu_revision(struct ivpu_device *vdev)
{
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 0191cf8e5964..a277bbae78fc 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -220,8 +220,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
if (ret)
return ret;
- fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size,
- DRM_IVPU_BO_CACHED | DRM_IVPU_BO_NOSNOOP);
+ fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC);
if (!fw->mem) {
ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
return -ENOMEM;
@@ -331,7 +330,7 @@ int ivpu_fw_load(struct ivpu_device *vdev)
memset(start, 0, size);
}
- clflush_cache_range(fw->mem->kvaddr, fw->mem->base.size);
+ wmb(); /* Flush WC buffers after writing fw->mem */
return 0;
}
@@ -433,7 +432,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
if (!ivpu_fw_is_cold_boot(vdev)) {
boot_params->save_restore_ret_address = 0;
vdev->pm->is_warmboot = true;
- clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
+ wmb(); /* Flush WC buffers after writing save_restore_ret_address */
return;
}
@@ -495,7 +494,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
- clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
+ wmb(); /* Flush WC buffers after writing bootparams */
ivpu_fw_boot_params_print(vdev, boot_params);
}
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index f4130586ff1b..6b0ceda5f253 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -8,8 +8,6 @@
#include <drm/drm_gem.h>
#include <drm/drm_mm.h>
-#define DRM_IVPU_BO_NOSNOOP 0x10000000
-
struct dma_buf;
struct ivpu_bo_ops;
struct ivpu_file_priv;
@@ -85,9 +83,6 @@ static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
{
- if (bo->flags & DRM_IVPU_BO_NOSNOOP)
- return false;
-
return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
}
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index ab341237bcf9..1079e06255ba 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -13,6 +13,7 @@ struct ivpu_hw_ops {
int (*power_up)(struct ivpu_device *vdev);
int (*boot_fw)(struct ivpu_device *vdev);
int (*power_down)(struct ivpu_device *vdev);
+ int (*reset)(struct ivpu_device *vdev);
bool (*is_idle)(struct ivpu_device *vdev);
void (*wdt_disable)(struct ivpu_device *vdev);
void (*diagnose_failure)(struct ivpu_device *vdev);
@@ -91,6 +92,13 @@ static inline int ivpu_hw_power_down(struct ivpu_device *vdev)
return vdev->hw->ops->power_down(vdev);
};
+static inline int ivpu_hw_reset(struct ivpu_device *vdev)
+{
+ ivpu_dbg(vdev, PM, "HW reset\n");
+
+ return vdev->hw->ops->reset(vdev);
+};
+
static inline void ivpu_hw_wdt_disable(struct ivpu_device *vdev)
{
vdev->hw->ops->wdt_disable(vdev);
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
index 9eae1c241bc0..18be8b98e9a8 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
@@ -940,9 +940,6 @@ static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
if (status == 0)
return 0;
- /* Disable global interrupt before handling local buttress interrupts */
- REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
-
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL));
@@ -974,9 +971,6 @@ static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
else
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
- /* Re-enable global interrupt */
- REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
-
if (schedule_recovery)
ivpu_pm_schedule_recovery(vdev);
@@ -988,9 +982,14 @@ static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr)
struct ivpu_device *vdev = ptr;
u32 ret_irqv, ret_irqb;
+ REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+
ret_irqv = ivpu_hw_37xx_irqv_handler(vdev, irq);
ret_irqb = ivpu_hw_37xx_irqb_handler(vdev, irq);
+ /* Re-enable global interrupts to re-trigger MSI for pending interrupts */
+ REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+
return IRQ_RETVAL(ret_irqb | ret_irqv);
}
@@ -1029,6 +1028,7 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
.power_up = ivpu_hw_37xx_power_up,
.is_idle = ivpu_hw_37xx_is_idle,
.power_down = ivpu_hw_37xx_power_down,
+ .reset = ivpu_hw_37xx_reset,
.boot_fw = ivpu_hw_37xx_boot_fw,
.wdt_disable = ivpu_hw_37xx_wdt_disable,
.diagnose_failure = ivpu_hw_37xx_diagnose_failure,
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
index 8bdb59a45da6..85171a408363 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
@@ -1179,6 +1179,7 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
.power_up = ivpu_hw_40xx_power_up,
.is_idle = ivpu_hw_40xx_is_idle,
.power_down = ivpu_hw_40xx_power_down,
+ .reset = ivpu_hw_40xx_reset,
.boot_fw = ivpu_hw_40xx_boot_fw,
.wdt_disable = ivpu_hw_40xx_wdt_disable,
.diagnose_failure = ivpu_hw_40xx_diagnose_failure,
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index 1d2e554e2c4a..ce94f4029127 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -11,6 +11,7 @@
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
+#define IVPU_MMU_VPU_ADDRESS_MASK GENMASK(47, 12)
#define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39)
#define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30)
#define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21)
@@ -328,12 +329,8 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
return -EINVAL;
- /*
- * VPU is only 32 bit, but DMA engine is 38 bit
- * Ranges < 2 GB are reserved for VPU internal registers
- * Limit range to 8 GB
- */
- if (vpu_addr < SZ_2G || vpu_addr > SZ_8G)
+
+ if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK)
return -EINVAL;
prot = IVPU_MMU_ENTRY_MAPPED;
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index e6f27daf5560..ffff2496e8e8 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -261,7 +261,8 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
ivpu_dbg(vdev, PM, "Pre-reset..\n");
atomic_inc(&vdev->pm->reset_counter);
atomic_set(&vdev->pm->in_reset, 1);
- ivpu_shutdown(vdev);
+ ivpu_prepare_for_reset(vdev);
+ ivpu_hw_reset(vdev);
ivpu_pm_prepare_cold_boot(vdev);
ivpu_jobs_abort_all(vdev);
ivpu_dbg(vdev, PM, "Pre-reset done.\n");
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index c711db8a9c33..0f5218e361df 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -12,6 +12,7 @@
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/acpi.h>
+#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/kernel.h>
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index f41dda2d3493..a4aa53b7e2bb 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1410,10 +1410,10 @@ static int __init acpi_init(void)
acpi_init_ffh();
pci_mmcfg_late_init();
- acpi_arm_init();
acpi_viot_early_init();
acpi_hest_init();
acpi_ghes_init();
+ acpi_arm_init();
acpi_scan_init();
acpi_ec_init();
acpi_debugfs_init();
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 660834a49c1f..c95d0edb0be9 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1915,6 +1915,17 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
},
{
/*
+ * HP Pavilion Gaming Laptop 15-dk1xxx
+ * https://github.com/systemd/systemd/issues/28942
+ */
+ .callback = ec_honor_dsdt_gpe,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"),
+ },
+ },
+ {
+ /*
* Samsung hardware
* https://bugzilla.kernel.org/show_bug.cgi?id=44161
*/
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index c2c786eb95ab..1687483ff319 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -57,6 +57,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
int polarity)
{
struct irq_fwspec fwspec;
+ unsigned int irq;
fwspec.fwnode = acpi_get_gsi_domain_id(gsi);
if (WARN_ON(!fwspec.fwnode)) {
@@ -68,7 +69,11 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
fwspec.param_count = 2;
- return irq_create_fwspec_mapping(&fwspec);
+ irq = irq_create_fwspec_mapping(&fwspec);
+ if (!irq)
+ return -EINVAL;
+
+ return irq;
}
EXPORT_SYMBOL_GPL(acpi_register_gsi);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index f96bf32cd368..7d88db451cfb 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -3339,6 +3339,16 @@ static int acpi_nfit_add(struct acpi_device *adev)
acpi_size sz;
int rc = 0;
+ rc = acpi_dev_install_notify_handler(adev, ACPI_DEVICE_NOTIFY,
+ acpi_nfit_notify);
+ if (rc)
+ return rc;
+
+ rc = devm_add_action_or_reset(dev, acpi_nfit_remove_notify_handler,
+ adev);
+ if (rc)
+ return rc;
+
status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
if (ACPI_FAILURE(status)) {
/* The NVDIMM root device allows OS to trigger enumeration of
@@ -3386,17 +3396,7 @@ static int acpi_nfit_add(struct acpi_device *adev)
if (rc)
return rc;
- rc = devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
- if (rc)
- return rc;
-
- rc = acpi_dev_install_notify_handler(adev, ACPI_DEVICE_NOTIFY,
- acpi_nfit_notify);
- if (rc)
- return rc;
-
- return devm_add_action_or_reset(dev, acpi_nfit_remove_notify_handler,
- adev);
+ return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
}
static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 32cfa3f4efd3..297a88587031 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -440,6 +440,13 @@ static const struct dmi_system_id asus_laptop[] = {
},
},
{
+ .ident = "Asus ExpertBook B1402CBA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
+ },
+ },
+ {
.ident = "Asus ExpertBook B1502CBA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -500,16 +507,23 @@ static const struct dmi_system_id maingear_laptop[] = {
static const struct dmi_system_id pcspecialist_laptop[] = {
{
- .ident = "PCSpecialist Elimina Pro 16 M",
- /*
- * Some models have product-name "Elimina Pro 16 M",
- * others "GM6BGEQ". Match on board-name to match both.
- */
+ /* TongFang GM6BGEQ / PCSpecialist Elimina Pro 16 M, RTX 3050 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"),
DMI_MATCH(DMI_BOARD_NAME, "GM6BGEQ"),
},
},
+ {
+ /* TongFang GM6BG5Q, RTX 4050 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GM6BG5Q"),
+ },
+ },
+ {
+ /* TongFang GM6BG0Q / PCSpecialist Elimina Pro 16 M, RTX 4060 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GM6BG0Q"),
+ },
+ },
{ }
};
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 367afac5f1bf..92128aae2d06 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -4812,6 +4812,8 @@ static void binder_release_work(struct binder_proc *proc,
"undelivered TRANSACTION_ERROR: %u\n",
e->cmd);
} break;
+ case BINDER_WORK_TRANSACTION_PENDING:
+ case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
case BINDER_WORK_TRANSACTION_COMPLETE: {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered TRANSACTION_COMPLETE\n");
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 81effec17b3d..420dc9cbf774 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -152,7 +152,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
goto err;
inode->i_ino = minor + INODE_OFFSET;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
init_special_inode(inode, S_IFCHR | 0600,
MKDEV(MAJOR(binderfs_dev), minor));
inode->i_fop = &binder_fops;
@@ -431,7 +431,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
}
inode->i_ino = SECOND_INODE;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
init_special_inode(inode, S_IFCHR | 0600,
MKDEV(MAJOR(binderfs_dev), minor));
inode->i_fop = &binder_ctl_fops;
@@ -473,7 +473,7 @@ static struct inode *binderfs_make_inode(struct super_block *sb, int mode)
if (ret) {
ret->i_ino = iunique(sb, BINDERFS_MAX_MINOR + INODE_OFFSET);
ret->i_mode = mode;
- ret->i_atime = ret->i_mtime = inode_set_ctime_current(ret);
+ simple_inode_init_ts(ret);
}
return ret;
}
@@ -702,7 +702,7 @@ static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
inode->i_ino = FIRST_INODE;
inode->i_fop = &simple_dir_operations;
inode->i_mode = S_IFDIR | 0755;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_op = &binderfs_dir_inode_operations;
set_nlink(inode, 2);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index a371b497035e..3a957c4da409 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1053,10 +1053,11 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
/*
* Ask the sd driver to issue START STOP UNIT on runtime suspend
- * and resume only. For system level suspend/resume, devices
- * power state is handled directly by libata EH.
+ * and resume and shutdown only. For system level suspend/resume,
+ * devices power state is handled directly by libata EH.
*/
sdev->manage_runtime_start_stop = true;
+ sdev->manage_shutdown = true;
}
/*
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 884cb51c8f67..234a84ecde8b 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1478,7 +1478,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data)
/* If the user didn't specify a name match any */
if (data)
- return !strcmp((*r)->name, data);
+ return (*r)->name && !strcmp((*r)->name, data);
else
return 1;
}
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index cd738cab725f..50949207798d 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1760,8 +1760,10 @@ static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
/* invalidate the buffer track to force a reread */
BufferDrive = -1;
set_bit(drive, &fake_change);
- if (disk_check_media_change(disk))
+ if (disk_check_media_change(disk)) {
+ bdev_mark_dead(disk->part0, true);
floppy_revalidate(disk);
+ }
return 0;
default:
return -EINVAL;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index a30a5ed811be..f017e917612b 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -524,7 +524,9 @@ struct drbd_md {
struct drbd_backing_dev {
struct block_device *backing_bdev;
+ struct bdev_handle *backing_bdev_handle;
struct block_device *md_bdev;
+ struct bdev_handle *md_bdev_handle;
struct drbd_md md;
struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
sector_t known_size; /* last known size of that backing device */
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index d3538bd83fb3..43747a1aae43 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -82,7 +82,7 @@ static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
DEFINE_MUTEX(notification_mutex);
-/* used blkdev_get_by_path, to claim our meta data device(s) */
+/* used bdev_open_by_path, to claim our meta data device(s) */
static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
@@ -1635,43 +1635,45 @@ success:
return 0;
}
-static struct block_device *open_backing_dev(struct drbd_device *device,
+static struct bdev_handle *open_backing_dev(struct drbd_device *device,
const char *bdev_path, void *claim_ptr, bool do_bd_link)
{
- struct block_device *bdev;
+ struct bdev_handle *handle;
int err = 0;
- bdev = blkdev_get_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE,
- claim_ptr, NULL);
- if (IS_ERR(bdev)) {
+ handle = bdev_open_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ claim_ptr, NULL);
+ if (IS_ERR(handle)) {
drbd_err(device, "open(\"%s\") failed with %ld\n",
- bdev_path, PTR_ERR(bdev));
- return bdev;
+ bdev_path, PTR_ERR(handle));
+ return handle;
}
if (!do_bd_link)
- return bdev;
+ return handle;
- err = bd_link_disk_holder(bdev, device->vdisk);
+ err = bd_link_disk_holder(handle->bdev, device->vdisk);
if (err) {
- blkdev_put(bdev, claim_ptr);
+ bdev_release(handle);
drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
bdev_path, err);
- bdev = ERR_PTR(err);
+ handle = ERR_PTR(err);
}
- return bdev;
+ return handle;
}
static int open_backing_devices(struct drbd_device *device,
struct disk_conf *new_disk_conf,
struct drbd_backing_dev *nbc)
{
- struct block_device *bdev;
+ struct bdev_handle *handle;
- bdev = open_backing_dev(device, new_disk_conf->backing_dev, device, true);
- if (IS_ERR(bdev))
+ handle = open_backing_dev(device, new_disk_conf->backing_dev, device,
+ true);
+ if (IS_ERR(handle))
return ERR_OPEN_DISK;
- nbc->backing_bdev = bdev;
+ nbc->backing_bdev = handle->bdev;
+ nbc->backing_bdev_handle = handle;
/*
* meta_dev_idx >= 0: external fixed size, possibly multiple
@@ -1681,7 +1683,7 @@ static int open_backing_devices(struct drbd_device *device,
* should check it for you already; but if you don't, or
* someone fooled it, we need to double check here)
*/
- bdev = open_backing_dev(device, new_disk_conf->meta_dev,
+ handle = open_backing_dev(device, new_disk_conf->meta_dev,
/* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
* if potentially shared with other drbd minors */
(new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder,
@@ -1689,20 +1691,21 @@ static int open_backing_devices(struct drbd_device *device,
* as would happen with internal metadata. */
(new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT &&
new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL));
- if (IS_ERR(bdev))
+ if (IS_ERR(handle))
return ERR_OPEN_MD_DISK;
- nbc->md_bdev = bdev;
+ nbc->md_bdev = handle->bdev;
+ nbc->md_bdev_handle = handle;
return NO_ERROR;
}
-static void close_backing_dev(struct drbd_device *device, struct block_device *bdev,
- void *claim_ptr, bool do_bd_unlink)
+static void close_backing_dev(struct drbd_device *device,
+ struct bdev_handle *handle, bool do_bd_unlink)
{
- if (!bdev)
+ if (!handle)
return;
if (do_bd_unlink)
- bd_unlink_disk_holder(bdev, device->vdisk);
- blkdev_put(bdev, claim_ptr);
+ bd_unlink_disk_holder(handle->bdev, device->vdisk);
+ bdev_release(handle);
}
void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
@@ -1710,11 +1713,9 @@ void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *
if (ldev == NULL)
return;
- close_backing_dev(device, ldev->md_bdev,
- ldev->md.meta_dev_idx < 0 ?
- (void *)device : (void *)drbd_m_holder,
+ close_backing_dev(device, ldev->md_bdev_handle,
ldev->md_bdev != ldev->backing_bdev);
- close_backing_dev(device, ldev->backing_bdev, device, true);
+ close_backing_dev(device, ldev->backing_bdev_handle, true);
kfree(ldev->disk_conf);
kfree(ldev);
@@ -2130,11 +2131,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
fail:
conn_reconfig_done(connection);
if (nbc) {
- close_backing_dev(device, nbc->md_bdev,
- nbc->disk_conf->meta_dev_idx < 0 ?
- (void *)device : (void *)drbd_m_holder,
+ close_backing_dev(device, nbc->md_bdev_handle,
nbc->md_bdev != nbc->backing_bdev);
- close_backing_dev(device, nbc->backing_bdev, device, true);
+ close_backing_dev(device, nbc->backing_bdev_handle, true);
kfree(nbc);
}
kfree(new_disk_conf);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index ea4eb88a2e45..11114a5d9e5c 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3215,8 +3215,10 @@ static int invalidate_drive(struct gendisk *disk)
/* invalidate the buffer track to force a reread */
set_bit((long)disk->private_data, &fake_change);
process_fd_request();
- if (disk_check_media_change(disk))
+ if (disk_check_media_change(disk)) {
+ bdev_mark_dead(disk->part0, true);
floppy_revalidate(disk);
+ }
return 0;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index a1428538bda5..d56d972aadb3 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -340,8 +340,8 @@ static ssize_t device_map_show(const struct class *c, const struct class_attribu
n += sysfs_emit_at(data, n, "%s %u:%u %u:%u\n",
pd->disk->disk_name,
MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
- MAJOR(pd->bdev->bd_dev),
- MINOR(pd->bdev->bd_dev));
+ MAJOR(pd->bdev_handle->bdev->bd_dev),
+ MINOR(pd->bdev_handle->bdev->bd_dev));
}
mutex_unlock(&ctl_mutex);
return n;
@@ -437,7 +437,8 @@ static int pkt_seq_show(struct seq_file *m, void *p)
char *msg;
int states[PACKET_NUM_STATES];
- seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name, pd->bdev);
+ seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name,
+ pd->bdev_handle->bdev);
seq_printf(m, "\nSettings:\n");
seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
@@ -714,7 +715,7 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
*/
static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
{
- struct request_queue *q = bdev_get_queue(pd->bdev);
+ struct request_queue *q = bdev_get_queue(pd->bdev_handle->bdev);
struct scsi_cmnd *scmd;
struct request *rq;
int ret = 0;
@@ -1047,7 +1048,8 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
continue;
bio = pkt->r_bios[f];
- bio_init(bio, pd->bdev, bio->bi_inline_vecs, 1, REQ_OP_READ);
+ bio_init(bio, pd->bdev_handle->bdev, bio->bi_inline_vecs, 1,
+ REQ_OP_READ);
bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
@@ -1262,8 +1264,8 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
struct device *ddev = disk_to_dev(pd->disk);
int f;
- bio_init(pkt->w_bio, pd->bdev, pkt->w_bio->bi_inline_vecs, pkt->frames,
- REQ_OP_WRITE);
+ bio_init(pkt->w_bio, pd->bdev_handle->bdev, pkt->w_bio->bi_inline_vecs,
+ pkt->frames, REQ_OP_WRITE);
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
@@ -2160,18 +2162,20 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
int ret;
long lba;
struct request_queue *q;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
/*
* We need to re-open the cdrom device without O_NONBLOCK to be able
* to read/write from/to it. It is already opened in O_NONBLOCK mode
* so open should not fail.
*/
- bdev = blkdev_get_by_dev(pd->bdev->bd_dev, BLK_OPEN_READ, pd, NULL);
- if (IS_ERR(bdev)) {
- ret = PTR_ERR(bdev);
+ bdev_handle = bdev_open_by_dev(pd->bdev_handle->bdev->bd_dev,
+ BLK_OPEN_READ, pd, NULL);
+ if (IS_ERR(bdev_handle)) {
+ ret = PTR_ERR(bdev_handle);
goto out;
}
+ pd->open_bdev_handle = bdev_handle;
ret = pkt_get_last_written(pd, &lba);
if (ret) {
@@ -2180,9 +2184,9 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
}
set_capacity(pd->disk, lba << 2);
- set_capacity_and_notify(pd->bdev->bd_disk, lba << 2);
+ set_capacity_and_notify(pd->bdev_handle->bdev->bd_disk, lba << 2);
- q = bdev_get_queue(pd->bdev);
+ q = bdev_get_queue(pd->bdev_handle->bdev);
if (write) {
ret = pkt_open_write(pd);
if (ret)
@@ -2214,7 +2218,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
return 0;
out_putdev:
- blkdev_put(bdev, pd);
+ bdev_release(bdev_handle);
out:
return ret;
}
@@ -2233,7 +2237,8 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
pkt_lock_door(pd, 0);
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
- blkdev_put(pd->bdev, pd);
+ bdev_release(pd->open_bdev_handle);
+ pd->open_bdev_handle = NULL;
pkt_shrink_pktlist(pd);
}
@@ -2321,8 +2326,8 @@ static void pkt_end_io_read_cloned(struct bio *bio)
static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
{
- struct bio *cloned_bio =
- bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set);
+ struct bio *cloned_bio = bio_alloc_clone(pd->bdev_handle->bdev, bio,
+ GFP_NOIO, &pkt_bio_set);
struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
psd->pd = pd;
@@ -2492,7 +2497,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
{
struct device *ddev = disk_to_dev(pd->disk);
int i;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct scsi_device *sdev;
if (pd->pkt_dev == dev) {
@@ -2503,8 +2508,9 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
struct pktcdvd_device *pd2 = pkt_devs[i];
if (!pd2)
continue;
- if (pd2->bdev->bd_dev == dev) {
- dev_err(ddev, "%pg already setup\n", pd2->bdev);
+ if (pd2->bdev_handle->bdev->bd_dev == dev) {
+ dev_err(ddev, "%pg already setup\n",
+ pd2->bdev_handle->bdev);
return -EBUSY;
}
if (pd2->pkt_dev == dev) {
@@ -2513,13 +2519,13 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
}
}
- bdev = blkdev_get_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY, NULL,
- NULL);
- if (IS_ERR(bdev))
- return PTR_ERR(bdev);
- sdev = scsi_device_from_queue(bdev->bd_disk->queue);
+ bdev_handle = bdev_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY,
+ NULL, NULL);
+ if (IS_ERR(bdev_handle))
+ return PTR_ERR(bdev_handle);
+ sdev = scsi_device_from_queue(bdev_handle->bdev->bd_disk->queue);
if (!sdev) {
- blkdev_put(bdev, NULL);
+ bdev_release(bdev_handle);
return -EINVAL;
}
put_device(&sdev->sdev_gendev);
@@ -2527,8 +2533,8 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
- pd->bdev = bdev;
- set_blocksize(bdev, CD_FRAMESIZE);
+ pd->bdev_handle = bdev_handle;
+ set_blocksize(bdev_handle->bdev, CD_FRAMESIZE);
pkt_init_queue(pd);
@@ -2540,11 +2546,11 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
}
proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd);
- dev_notice(ddev, "writer mapped to %pg\n", bdev);
+ dev_notice(ddev, "writer mapped to %pg\n", bdev_handle->bdev);
return 0;
out_mem:
- blkdev_put(bdev, NULL);
+ bdev_release(bdev_handle);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
return -ENOMEM;
@@ -2599,9 +2605,9 @@ static unsigned int pkt_check_events(struct gendisk *disk,
if (!pd)
return 0;
- if (!pd->bdev)
+ if (!pd->bdev_handle)
return 0;
- attached_disk = pd->bdev->bd_disk;
+ attached_disk = pd->bdev_handle->bdev->bd_disk;
if (!attached_disk || !attached_disk->fops->check_events)
return 0;
return attached_disk->fops->check_events(attached_disk, clearing);
@@ -2686,7 +2692,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
goto out_mem2;
/* inherit events of the host device */
- disk->events = pd->bdev->bd_disk->events;
+ disk->events = pd->bdev_handle->bdev->bd_disk->events;
ret = add_disk(disk);
if (ret)
@@ -2751,7 +2757,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
pkt_debugfs_dev_remove(pd);
pkt_sysfs_dev_remove(pd);
- blkdev_put(pd->bdev, NULL);
+ bdev_release(pd->bdev_handle);
remove_proc_entry(pd->disk->disk_name, pkt_proc);
dev_notice(ddev, "writer unmapped\n");
@@ -2778,7 +2784,7 @@ static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
if (pd) {
- ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
+ ctrl_cmd->dev = new_encode_dev(pd->bdev_handle->bdev->bd_dev);
ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
} else {
ctrl_cmd->dev = 0;
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index c186df0ec641..65de51f3dfd9 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -145,7 +145,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
priv->sess_dev = sess_dev;
priv->id = id;
- bio = bio_alloc(sess_dev->bdev, 1,
+ bio = bio_alloc(sess_dev->bdev_handle->bdev, 1,
rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
if (bio_add_page(bio, virt_to_page(data), datalen,
offset_in_page(data)) != datalen) {
@@ -219,7 +219,7 @@ void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id)
rnbd_put_sess_dev(sess_dev);
wait_for_completion(&dc); /* wait for inflights to drop to zero */
- blkdev_put(sess_dev->bdev, NULL);
+ bdev_release(sess_dev->bdev_handle);
mutex_lock(&sess_dev->dev->lock);
list_del(&sess_dev->dev_list);
if (!sess_dev->readonly)
@@ -534,7 +534,7 @@ rnbd_srv_get_or_create_srv_dev(struct block_device *bdev,
static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
struct rnbd_srv_sess_dev *sess_dev)
{
- struct block_device *bdev = sess_dev->bdev;
+ struct block_device *bdev = sess_dev->bdev_handle->bdev;
rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP);
rsp->device_id = cpu_to_le32(sess_dev->device_id);
@@ -559,7 +559,7 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
static struct rnbd_srv_sess_dev *
rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
const struct rnbd_msg_open *open_msg,
- struct block_device *bdev, bool readonly,
+ struct bdev_handle *handle, bool readonly,
struct rnbd_srv_dev *srv_dev)
{
struct rnbd_srv_sess_dev *sdev = rnbd_sess_dev_alloc(srv_sess);
@@ -571,7 +571,7 @@ rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
strscpy(sdev->pathname, open_msg->dev_name, sizeof(sdev->pathname));
- sdev->bdev = bdev;
+ sdev->bdev_handle = handle;
sdev->sess = srv_sess;
sdev->dev = srv_dev;
sdev->readonly = readonly;
@@ -676,7 +676,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
struct rnbd_srv_dev *srv_dev;
struct rnbd_srv_sess_dev *srv_sess_dev;
const struct rnbd_msg_open *open_msg = msg;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
blk_mode_t open_flags = BLK_OPEN_READ;
char *full_path;
struct rnbd_msg_open_rsp *rsp = data;
@@ -714,15 +714,15 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
goto reject;
}
- bdev = blkdev_get_by_path(full_path, open_flags, NULL, NULL);
- if (IS_ERR(bdev)) {
- ret = PTR_ERR(bdev);
+ bdev_handle = bdev_open_by_path(full_path, open_flags, NULL, NULL);
+ if (IS_ERR(bdev_handle)) {
+ ret = PTR_ERR(bdev_handle);
pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %d\n",
full_path, srv_sess->sessname, ret);
goto free_path;
}
- srv_dev = rnbd_srv_get_or_create_srv_dev(bdev, srv_sess,
+ srv_dev = rnbd_srv_get_or_create_srv_dev(bdev_handle->bdev, srv_sess,
open_msg->access_mode);
if (IS_ERR(srv_dev)) {
pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %ld\n",
@@ -731,7 +731,8 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
goto blkdev_put;
}
- srv_sess_dev = rnbd_srv_create_set_sess_dev(srv_sess, open_msg, bdev,
+ srv_sess_dev = rnbd_srv_create_set_sess_dev(srv_sess, open_msg,
+ bdev_handle,
open_msg->access_mode == RNBD_ACCESS_RO,
srv_dev);
if (IS_ERR(srv_sess_dev)) {
@@ -747,7 +748,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
*/
mutex_lock(&srv_dev->lock);
if (!srv_dev->dev_kobj.state_in_sysfs) {
- ret = rnbd_srv_create_dev_sysfs(srv_dev, bdev);
+ ret = rnbd_srv_create_dev_sysfs(srv_dev, bdev_handle->bdev);
if (ret) {
mutex_unlock(&srv_dev->lock);
rnbd_srv_err(srv_sess_dev,
@@ -790,7 +791,7 @@ srv_dev_put:
}
rnbd_put_srv_dev(srv_dev);
blkdev_put:
- blkdev_put(bdev, NULL);
+ bdev_release(bdev_handle);
free_path:
kfree(full_path);
reject:
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
index 1027656dedb0..343cc682b617 100644
--- a/drivers/block/rnbd/rnbd-srv.h
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -46,7 +46,7 @@ struct rnbd_srv_dev {
struct rnbd_srv_sess_dev {
/* Entry inside rnbd_srv_dev struct */
struct list_head dev_list;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct rnbd_srv_session *sess;
struct rnbd_srv_dev *dev;
struct kobject kobj;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index c362f4ad80ab..4defd7f387c7 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -465,7 +465,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
}
req->dev = vbd->pdevice;
- req->bdev = vbd->bdev;
+ req->bdev = vbd->bdev_handle->bdev;
rc = 0;
out:
@@ -969,7 +969,7 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring,
int err = 0;
int status = BLKIF_RSP_OKAY;
struct xen_blkif *blkif = ring->blkif;
- struct block_device *bdev = blkif->vbd.bdev;
+ struct block_device *bdev = blkif->vbd.bdev_handle->bdev;
struct phys_req preq;
xen_blkif_get(blkif);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 40f67bfc052d..5ff50e76cee5 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -221,7 +221,7 @@ struct xen_vbd {
unsigned char type;
/* phys device that this vbd maps to. */
u32 pdevice;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
/* Cached size parameter. */
sector_t size;
unsigned int flush_support:1;
@@ -360,7 +360,7 @@ struct pending_req {
};
-#define vbd_sz(_v) bdev_nr_sectors((_v)->bdev)
+#define vbd_sz(_v) bdev_nr_sectors((_v)->bdev_handle->bdev)
#define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
#define xen_blkif_put(_b) \
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index bb66178c432b..e34219ea2b05 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -81,7 +81,7 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
int i;
/* Not ready to connect? */
- if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev)
+ if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev_handle)
return;
/* Already connected? */
@@ -99,12 +99,13 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
return;
}
- err = sync_blockdev(blkif->vbd.bdev);
+ err = sync_blockdev(blkif->vbd.bdev_handle->bdev);
if (err) {
xenbus_dev_error(blkif->be->dev, err, "block flush");
return;
}
- invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
+ invalidate_inode_pages2(
+ blkif->vbd.bdev_handle->bdev->bd_inode->i_mapping);
for (i = 0; i < blkif->nr_rings; i++) {
ring = &blkif->rings[i];
@@ -472,9 +473,9 @@ static void xenvbd_sysfs_delif(struct xenbus_device *dev)
static void xen_vbd_free(struct xen_vbd *vbd)
{
- if (vbd->bdev)
- blkdev_put(vbd->bdev, NULL);
- vbd->bdev = NULL;
+ if (vbd->bdev_handle)
+ bdev_release(vbd->bdev_handle);
+ vbd->bdev_handle = NULL;
}
static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
@@ -482,7 +483,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
int cdrom)
{
struct xen_vbd *vbd;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
vbd = &blkif->vbd;
vbd->handle = handle;
@@ -491,17 +492,17 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
vbd->pdevice = MKDEV(major, minor);
- bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
+ bdev_handle = bdev_open_by_dev(vbd->pdevice, vbd->readonly ?
BLK_OPEN_READ : BLK_OPEN_WRITE, NULL, NULL);
- if (IS_ERR(bdev)) {
+ if (IS_ERR(bdev_handle)) {
pr_warn("xen_vbd_create: device %08x could not be opened\n",
vbd->pdevice);
return -ENOENT;
}
- vbd->bdev = bdev;
- if (vbd->bdev->bd_disk == NULL) {
+ vbd->bdev_handle = bdev_handle;
+ if (vbd->bdev_handle->bdev->bd_disk == NULL) {
pr_warn("xen_vbd_create: device %08x doesn't exist\n",
vbd->pdevice);
xen_vbd_free(vbd);
@@ -509,14 +510,14 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
}
vbd->size = vbd_sz(vbd);
- if (cdrom || disk_to_cdi(vbd->bdev->bd_disk))
+ if (cdrom || disk_to_cdi(vbd->bdev_handle->bdev->bd_disk))
vbd->type |= VDISK_CDROM;
- if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
+ if (vbd->bdev_handle->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
vbd->type |= VDISK_REMOVABLE;
- if (bdev_write_cache(bdev))
+ if (bdev_write_cache(bdev_handle->bdev))
vbd->flush_support = true;
- if (bdev_max_secure_erase_sectors(bdev))
+ if (bdev_max_secure_erase_sectors(bdev_handle->bdev))
vbd->discard_secure = true;
pr_debug("Successful creation of handle=%04x (dom=%u)\n",
@@ -569,7 +570,7 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info
struct xen_blkif *blkif = be->blkif;
int err;
int state = 0;
- struct block_device *bdev = be->blkif->vbd.bdev;
+ struct block_device *bdev = be->blkif->vbd.bdev_handle->bdev;
if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
return;
@@ -930,15 +931,16 @@ again:
goto abort;
}
err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
- (unsigned long)
- bdev_logical_block_size(be->blkif->vbd.bdev));
+ (unsigned long)bdev_logical_block_size(
+ be->blkif->vbd.bdev_handle->bdev));
if (err) {
xenbus_dev_fatal(dev, err, "writing %s/sector-size",
dev->nodename);
goto abort;
}
err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u",
- bdev_physical_block_size(be->blkif->vbd.bdev));
+ bdev_physical_block_size(
+ be->blkif->vbd.bdev_handle->bdev));
if (err)
xenbus_dev_error(dev, err, "writing %s/physical-sector-size",
dev->nodename);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 06673c6ca255..d77d3664ca08 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -414,17 +414,14 @@ static ssize_t writeback_limit_show(struct device *dev,
static void reset_bdev(struct zram *zram)
{
- struct block_device *bdev;
-
if (!zram->backing_dev)
return;
- bdev = zram->bdev;
- blkdev_put(bdev, zram);
+ bdev_release(zram->bdev_handle);
/* hope filp_close flush all of IO */
filp_close(zram->backing_dev, NULL);
zram->backing_dev = NULL;
- zram->bdev = NULL;
+ zram->bdev_handle = NULL;
zram->disk->fops = &zram_devops;
kvfree(zram->bitmap);
zram->bitmap = NULL;
@@ -470,7 +467,7 @@ static ssize_t backing_dev_store(struct device *dev,
struct address_space *mapping;
unsigned int bitmap_sz;
unsigned long nr_pages, *bitmap = NULL;
- struct block_device *bdev = NULL;
+ struct bdev_handle *bdev_handle = NULL;
int err;
struct zram *zram = dev_to_zram(dev);
@@ -507,11 +504,11 @@ static ssize_t backing_dev_store(struct device *dev,
goto out;
}
- bdev = blkdev_get_by_dev(inode->i_rdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
- zram, NULL);
- if (IS_ERR(bdev)) {
- err = PTR_ERR(bdev);
- bdev = NULL;
+ bdev_handle = bdev_open_by_dev(inode->i_rdev,
+ BLK_OPEN_READ | BLK_OPEN_WRITE, zram, NULL);
+ if (IS_ERR(bdev_handle)) {
+ err = PTR_ERR(bdev_handle);
+ bdev_handle = NULL;
goto out;
}
@@ -525,7 +522,7 @@ static ssize_t backing_dev_store(struct device *dev,
reset_bdev(zram);
- zram->bdev = bdev;
+ zram->bdev_handle = bdev_handle;
zram->backing_dev = backing_dev;
zram->bitmap = bitmap;
zram->nr_pages = nr_pages;
@@ -538,8 +535,8 @@ static ssize_t backing_dev_store(struct device *dev,
out:
kvfree(bitmap);
- if (bdev)
- blkdev_put(bdev, zram);
+ if (bdev_handle)
+ bdev_release(bdev_handle);
if (backing_dev)
filp_close(backing_dev, NULL);
@@ -581,7 +578,7 @@ static void read_from_bdev_async(struct zram *zram, struct page *page,
{
struct bio *bio;
- bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO);
+ bio = bio_alloc(zram->bdev_handle->bdev, 1, parent->bi_opf, GFP_NOIO);
bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
__bio_add_page(bio, page, PAGE_SIZE, 0);
bio_chain(bio, parent);
@@ -697,7 +694,7 @@ static ssize_t writeback_store(struct device *dev,
continue;
}
- bio_init(&bio, zram->bdev, &bio_vec, 1,
+ bio_init(&bio, zram->bdev_handle->bdev, &bio_vec, 1,
REQ_OP_WRITE | REQ_SYNC);
bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
__bio_add_page(&bio, page, PAGE_SIZE, 0);
@@ -779,7 +776,7 @@ static void zram_sync_read(struct work_struct *work)
struct bio_vec bv;
struct bio bio;
- bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ);
+ bio_init(&bio, zw->zram->bdev_handle->bdev, &bv, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9);
__bio_add_page(&bio, zw->page, PAGE_SIZE, 0);
zw->error = submit_bio_wait(&bio);
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index ca7a15bd4845..d090753f97be 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -132,7 +132,7 @@ struct zram {
spinlock_t wb_limit_lock;
bool wb_limit_enable;
u64 bd_wb_limit;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
unsigned long *bitmap;
unsigned long nr_pages;
#endif
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 84c2c2e1122f..277d039ecbb4 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -962,13 +962,10 @@ static void btrtl_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb)
skb_put_data(skb, buf, strlen(buf));
}
-static int btrtl_register_devcoredump_support(struct hci_dev *hdev)
+static void btrtl_register_devcoredump_support(struct hci_dev *hdev)
{
- int err;
+ hci_devcd_register(hdev, btrtl_coredump, btrtl_dmp_hdr, NULL);
- err = hci_devcd_register(hdev, btrtl_coredump, btrtl_dmp_hdr, NULL);
-
- return err;
}
void btrtl_set_driver_name(struct hci_dev *hdev, const char *driver_name)
@@ -1255,8 +1252,7 @@ int btrtl_download_firmware(struct hci_dev *hdev,
}
done:
- if (!err)
- err = btrtl_register_devcoredump_support(hdev);
+ btrtl_register_devcoredump_support(hdev);
return err;
}
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 40e2b9fa11a2..f3892e9ce800 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -74,7 +74,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
struct vhci_data *data = hci_get_drvdata(hdev);
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
+
+ mutex_lock(&data->open_mutex);
skb_queue_tail(&data->readq, skb);
+ mutex_unlock(&data->open_mutex);
wake_up_interruptible(&data->read_wait);
return 0;
diff --git a/drivers/cache/Kconfig b/drivers/cache/Kconfig
index a57677f908f3..d6e5e3abaad8 100644
--- a/drivers/cache/Kconfig
+++ b/drivers/cache/Kconfig
@@ -3,7 +3,7 @@ menu "Cache Drivers"
config AX45MP_L2_CACHE
bool "Andes Technology AX45MP L2 Cache controller"
- depends on RISCV_DMA_NONCOHERENT
+ depends on RISCV
select RISCV_NONSTANDARD_CACHE_OPS
help
Support for the L2 cache controller on Andes Technology AX45MP platforms.
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 9211531689b2..22d249333f53 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -920,7 +920,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
if (ret > 0) {
struct inode *inode = file_inode(file);
- inode->i_atime = current_time(inode);
+ inode_set_atime_to_ts(inode, current_time(inode));
}
return ret;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index c249f9791ae8..473563bc7496 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -3416,6 +3416,7 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core,
unsigned int i, char terminator)
{
struct clk_core *parent;
+ const char *name = NULL;
/*
* Go through the following options to fetch a parent's name.
@@ -3430,18 +3431,20 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core,
* registered (yet).
*/
parent = clk_core_get_parent_by_index(core, i);
- if (parent)
+ if (parent) {
seq_puts(s, parent->name);
- else if (core->parents[i].name)
+ } else if (core->parents[i].name) {
seq_puts(s, core->parents[i].name);
- else if (core->parents[i].fw_name)
+ } else if (core->parents[i].fw_name) {
seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
- else if (core->parents[i].index >= 0)
- seq_puts(s,
- of_clk_get_parent_name(core->of_node,
- core->parents[i].index));
- else
- seq_puts(s, "(missing)");
+ } else {
+ if (core->parents[i].index >= 0)
+ name = of_clk_get_parent_name(core->of_node, core->parents[i].index);
+ if (!name)
+ name = "(missing)";
+
+ seq_puts(s, name);
+ }
seq_putc(s, terminator);
}
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
index 8dd601bd8538..0a5a95e0267f 100644
--- a/drivers/clk/socfpga/clk-gate.c
+++ b/drivers/clk/socfpga/clk-gate.c
@@ -87,10 +87,8 @@ static int socfpga_clk_set_parent(struct clk_hw *hwclk, u8 parent)
return 0;
}
-static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
- unsigned long parent_rate)
+static u32 socfpga_clk_get_div(struct socfpga_gate_clk *socfpgaclk)
{
- struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
u32 div = 1, val;
if (socfpgaclk->fixed_div)
@@ -105,12 +103,33 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
div = (1 << val);
}
+ return div;
+}
+
+static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
+ u32 div = socfpga_clk_get_div(socfpgaclk);
+
return parent_rate / div;
}
+
+static int socfpga_clk_determine_rate(struct clk_hw *hwclk,
+ struct clk_rate_request *req)
+{
+ struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
+ u32 div = socfpga_clk_get_div(socfpgaclk);
+
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
+}
+
static struct clk_ops gateclk_ops = {
.recalc_rate = socfpga_clk_recalc_rate,
- .determine_rate = clk_hw_determine_rate_no_reparent,
+ .determine_rate = socfpga_clk_determine_rate,
.get_parent = socfpga_clk_get_parent,
.set_parent = socfpga_clk_set_parent,
};
diff --git a/drivers/clk/stm32/clk-stm32-core.c b/drivers/clk/stm32/clk-stm32-core.c
index d5aa09e9fce4..067b918a8894 100644
--- a/drivers/clk/stm32/clk-stm32-core.c
+++ b/drivers/clk/stm32/clk-stm32-core.c
@@ -431,7 +431,7 @@ static int clk_stm32_composite_determine_rate(struct clk_hw *hw,
{
struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
const struct stm32_div_cfg *divider;
- unsigned long rate;
+ long rate;
if (composite->div_id == NO_STM32_DIV)
return 0;
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index 868bc7af21b0..9b2824ed785b 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -749,9 +749,14 @@ static struct ti_dt_clk omap44xx_clks[] = {
DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
+ DT_CLK("40122000.mcbsp", "prcm_fck", "abe-clkctrl:0028:26"),
+ DT_CLK("40124000.mcbsp", "prcm_fck", "abe-clkctrl:0030:26"),
+ DT_CLK("40126000.mcbsp", "prcm_fck", "abe-clkctrl:0038:26"),
DT_CLK(NULL, "mcbsp4_sync_mux_ck", "l4-per-clkctrl:00c0:26"),
+ DT_CLK("48096000.mcbsp", "prcm_fck", "l4-per-clkctrl:00c0:26"),
DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "l3-init-clkctrl:00c0:8"),
DT_CLK(NULL, "otg_60m_gfclk", "l3-init-clkctrl:0040:24"),
+ DT_CLK(NULL, "pad_fck", "pad_clks_ck"),
DT_CLK(NULL, "per_mcbsp4_gfclk", "l4-per-clkctrl:00c0:24"),
DT_CLK(NULL, "pmd_stm_clock_mux_ck", "emu-sys-clkctrl:0000:20"),
DT_CLK(NULL, "pmd_trace_clk_mux_ck", "emu-sys-clkctrl:0000:22"),
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index b4aff76eb373..74dfd5823f83 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -565,15 +565,19 @@ static struct ti_dt_clk omap54xx_clks[] = {
DT_CLK(NULL, "gpio8_dbclk", "l4per-clkctrl:00f8:8"),
DT_CLK(NULL, "mcbsp1_gfclk", "abe-clkctrl:0028:24"),
DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
+ DT_CLK("40122000.mcbsp", "prcm_fck", "abe-clkctrl:0028:26"),
DT_CLK(NULL, "mcbsp2_gfclk", "abe-clkctrl:0030:24"),
DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
+ DT_CLK("40124000.mcbsp", "prcm_fck", "abe-clkctrl:0030:26"),
DT_CLK(NULL, "mcbsp3_gfclk", "abe-clkctrl:0038:24"),
DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
+ DT_CLK("40126000.mcbsp", "prcm_fck", "abe-clkctrl:0038:26"),
DT_CLK(NULL, "mmc1_32khz_clk", "l3init-clkctrl:0008:8"),
DT_CLK(NULL, "mmc1_fclk", "l3init-clkctrl:0008:25"),
DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"),
DT_CLK(NULL, "mmc2_fclk", "l3init-clkctrl:0010:25"),
DT_CLK(NULL, "mmc2_fclk_mux", "l3init-clkctrl:0010:24"),
+ DT_CLK(NULL, "pad_fck", "pad_clks_ck"),
DT_CLK(NULL, "sata_ref_clk", "l3init-clkctrl:0068:8"),
DT_CLK(NULL, "timer10_gfclk_mux", "l4per-clkctrl:0008:24"),
DT_CLK(NULL, "timer11_gfclk_mux", "l4per-clkctrl:0010:24"),
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 05d562e9c8b1..44b19e696176 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -54,7 +54,7 @@ static int cn_filter(struct sock *dsk, struct sk_buff *skb, void *data)
enum proc_cn_mcast_op mc_op;
uintptr_t val;
- if (!dsk || !data)
+ if (!dsk || !dsk->sk_user_data || !data)
return 0;
ptr = (__u32 *)data;
diff --git a/drivers/counter/counter-chrdev.c b/drivers/counter/counter-chrdev.c
index 80acdf62794a..afc94d0062b1 100644
--- a/drivers/counter/counter-chrdev.c
+++ b/drivers/counter/counter-chrdev.c
@@ -247,8 +247,8 @@ static int counter_get_ext(const struct counter_comp *const ext,
if (*id == component_id)
return 0;
- if (ext->type == COUNTER_COMP_ARRAY) {
- element = ext->priv;
+ if (ext[*ext_idx].type == COUNTER_COMP_ARRAY) {
+ element = ext[*ext_idx].priv;
if (component_id - *id < element->length)
return 0;
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index 975e431d1590..b3e615cbd2ca 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -97,7 +97,7 @@ static int mchp_tc_count_function_write(struct counter_device *counter,
priv->qdec_mode = 0;
/* Set highest rate based on whether soc has gclk or not */
bmr &= ~(ATMEL_TC_QDEN | ATMEL_TC_POSEN);
- if (priv->tc_cfg->has_gclk)
+ if (!priv->tc_cfg->has_gclk)
cmr |= ATMEL_TC_TIMER_CLOCK2;
else
cmr |= ATMEL_TC_TIMER_CLOCK1;
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 59a4c0259456..154590e1f764 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -35,6 +35,9 @@ struct virtio_crypto {
struct virtqueue *ctrl_vq;
struct data_queue *data_vq;
+ /* Work struct for config space updates */
+ struct work_struct config_work;
+
/* To protect the vq operations for the controlq */
spinlock_t ctrl_lock;
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 94849fa3bd74..43a0838d31ff 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -335,6 +335,14 @@ static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
virtcrypto_free_queues(vcrypto);
}
+static void vcrypto_config_changed_work(struct work_struct *work)
+{
+ struct virtio_crypto *vcrypto =
+ container_of(work, struct virtio_crypto, config_work);
+
+ virtcrypto_update_status(vcrypto);
+}
+
static int virtcrypto_probe(struct virtio_device *vdev)
{
int err = -EFAULT;
@@ -454,6 +462,8 @@ static int virtcrypto_probe(struct virtio_device *vdev)
if (err)
goto free_engines;
+ INIT_WORK(&vcrypto->config_work, vcrypto_config_changed_work);
+
return 0;
free_engines:
@@ -490,6 +500,7 @@ static void virtcrypto_remove(struct virtio_device *vdev)
dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
+ flush_work(&vcrypto->config_work);
if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto);
virtio_reset_device(vdev);
@@ -504,7 +515,7 @@ static void virtcrypto_config_changed(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
- virtcrypto_update_status(vcrypto);
+ schedule_work(&vcrypto->config_work);
}
#ifdef CONFIG_PM_SLEEP
@@ -512,6 +523,7 @@ static int virtcrypto_freeze(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
+ flush_work(&vcrypto->config_work);
virtio_reset_device(vdev);
virtcrypto_free_unused_reqs(vcrypto);
if (virtcrypto_dev_started(vcrypto))
diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c
index c625bb2b5d56..628af51c81af 100644
--- a/drivers/dma-buf/dma-fence-unwrap.c
+++ b/drivers/dma-buf/dma-fence-unwrap.c
@@ -76,16 +76,11 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
if (!dma_fence_is_signaled(tmp)) {
++count;
- } else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
- &tmp->flags)) {
- if (ktime_after(tmp->timestamp, timestamp))
- timestamp = tmp->timestamp;
} else {
- /*
- * Use the current time if the fence is
- * currently signaling.
- */
- timestamp = ktime_get();
+ ktime_t t = dma_fence_timestamp(tmp);
+
+ if (ktime_after(t, timestamp))
+ timestamp = t;
}
}
}
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index af57799c86ce..2e9a316c596a 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -268,13 +268,10 @@ static int sync_fill_fence_info(struct dma_fence *fence,
sizeof(info->driver_name));
info->status = dma_fence_get_status(fence);
- while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
- !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
- cpu_relax();
info->timestamp_ns =
- test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
- ktime_to_ns(fence->timestamp) :
- ktime_set(0, 0);
+ dma_fence_is_signaled(fence) ?
+ ktime_to_ns(dma_fence_timestamp(fence)) :
+ ktime_set(0, 0);
return info->status;
}
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index a0f5741abcc4..6a3abe5b1790 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -92,8 +92,14 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
edma_writel_chreg(fsl_chan, val, ch_sbr);
- if (flags & FSL_EDMA_DRV_HAS_CHMUX)
- edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
+ if (flags & FSL_EDMA_DRV_HAS_CHMUX) {
+ /*
+ * ch_mux: With the exception of 0, attempts to write a value
+ * already in use will be forced to 0.
+ */
+ if (!edma_readl_chreg(fsl_chan, ch_mux))
+ edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
+ }
val = edma_readl_chreg(fsl_chan, ch_csr);
val |= EDMA_V3_CH_CSR_ERQ;
@@ -448,12 +454,25 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga);
+ csr = le16_to_cpu(tcd->csr);
+
if (fsl_chan->is_sw) {
- csr = le16_to_cpu(tcd->csr);
csr |= EDMA_TCD_CSR_START;
tcd->csr = cpu_to_le16(csr);
}
+ /*
+ * Must clear CHn_CSR[DONE] bit before enable TCDn_CSR[ESG] at EDMAv3
+ * eDMAv4 have not such requirement.
+ * Change MLINK need clear CHn_CSR[DONE] for both eDMAv3 and eDMAv4.
+ */
+ if (((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_SG) &&
+ (csr & EDMA_TCD_CSR_E_SG)) ||
+ ((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_LINK) &&
+ (csr & EDMA_TCD_CSR_E_LINK)))
+ edma_writel_chreg(fsl_chan, edma_readl_chreg(fsl_chan, ch_csr), ch_csr);
+
+
edma_write_tcdreg(fsl_chan, tcd->csr, csr);
}
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 3cc0cc8fc2d0..40d50cc3d75a 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -183,11 +183,23 @@ struct fsl_edma_desc {
#define FSL_EDMA_DRV_BUS_8BYTE BIT(10)
#define FSL_EDMA_DRV_DEV_TO_DEV BIT(11)
#define FSL_EDMA_DRV_ALIGN_64BYTE BIT(12)
+/* Need clean CHn_CSR DONE before enable TCD's ESG */
+#define FSL_EDMA_DRV_CLEAR_DONE_E_SG BIT(13)
+/* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */
+#define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14)
#define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
FSL_EDMA_DRV_BUS_8BYTE | \
FSL_EDMA_DRV_DEV_TO_DEV | \
- FSL_EDMA_DRV_ALIGN_64BYTE)
+ FSL_EDMA_DRV_ALIGN_64BYTE | \
+ FSL_EDMA_DRV_CLEAR_DONE_E_SG | \
+ FSL_EDMA_DRV_CLEAR_DONE_E_LINK)
+
+#define FSL_EDMA_DRV_EDMA4 (FSL_EDMA_DRV_SPLIT_REG | \
+ FSL_EDMA_DRV_BUS_8BYTE | \
+ FSL_EDMA_DRV_DEV_TO_DEV | \
+ FSL_EDMA_DRV_ALIGN_64BYTE | \
+ FSL_EDMA_DRV_CLEAR_DONE_E_LINK)
struct fsl_edma_drvdata {
u32 dmamuxs; /* only used before v3 */
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 63d48d046f04..8c4ed7012e23 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -154,18 +154,20 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
fsl_chan = to_fsl_edma_chan(chan);
i = fsl_chan - fsl_edma->chans;
- chan = dma_get_slave_channel(chan);
- chan->device->privatecnt++;
fsl_chan->priority = dma_spec->args[1];
fsl_chan->is_rxchan = dma_spec->args[2] & ARGS_RX;
fsl_chan->is_remote = dma_spec->args[2] & ARGS_REMOTE;
fsl_chan->is_multi_fifo = dma_spec->args[2] & ARGS_MULTI_FIFO;
if (!b_chmux && i == dma_spec->args[0]) {
+ chan = dma_get_slave_channel(chan);
+ chan->device->privatecnt++;
mutex_unlock(&fsl_edma->fsl_edma_mutex);
return chan;
} else if (b_chmux && !fsl_chan->srcid) {
/* if controller support channel mux, choose a free channel */
+ chan = dma_get_slave_channel(chan);
+ chan->device->privatecnt++;
fsl_chan->srcid = dma_spec->args[0];
mutex_unlock(&fsl_edma->fsl_edma_mutex);
return chan;
@@ -355,7 +357,7 @@ static struct fsl_edma_drvdata imx93_data3 = {
};
static struct fsl_edma_drvdata imx93_data4 = {
- .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
+ .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4,
.chreg_space_sz = 0x8000,
.chreg_off = 0x10000,
.setup_irq = fsl_edma3_irq_init,
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 22d6f4e455b7..8f754f922217 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -477,6 +477,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
union idxd_command_reg cmd;
DECLARE_COMPLETION_ONSTACK(done);
u32 stat;
+ unsigned long flags;
if (idxd_device_is_halted(idxd)) {
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
@@ -490,7 +491,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
cmd.operand = operand;
cmd.int_req = 1;
- spin_lock(&idxd->cmd_lock);
+ spin_lock_irqsave(&idxd->cmd_lock, flags);
wait_event_lock_irq(idxd->cmd_waitq,
!test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
idxd->cmd_lock);
@@ -507,7 +508,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
* After command submitted, release lock and go to sleep until
* the command completes via interrupt.
*/
- spin_unlock(&idxd->cmd_lock);
+ spin_unlock_irqrestore(&idxd->cmd_lock, flags);
wait_for_completion(&done);
stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
spin_lock(&idxd->cmd_lock);
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index c51dc017b48a..06d12ac39144 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -450,9 +450,8 @@ static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
- synchronize_irq(c->irq);
-
spin_unlock_irqrestore(&c->vc.lock, flags);
+ synchronize_irq(c->irq);
return 0;
}
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 89e82508c133..002833fb1fa0 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3668,6 +3668,7 @@ static int __init d40_probe(struct platform_device *pdev)
regulator_disable(base->lcpa_regulator);
regulator_put(base->lcpa_regulator);
}
+ pm_runtime_disable(base->dev);
report_failure:
d40_err(dev, "probe failed\n");
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 5c36811aa134..0b30151fb45c 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -1113,8 +1113,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
/* Activate Double Buffer Mode if DMA triggers STM32 MDMA and more than 1 sg */
- if (chan->trig_mdma && sg_len > 1)
+ if (chan->trig_mdma && sg_len > 1) {
chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
+ chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT;
+ }
for_each_sg(sgl, sg, sg_len, i) {
ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
@@ -1387,11 +1389,12 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
residue = stm32_dma_get_remaining_bytes(chan);
- if (chan->desc->cyclic && !stm32_dma_is_current_sg(chan)) {
+ if ((chan->desc->cyclic || chan->trig_mdma) && !stm32_dma_is_current_sg(chan)) {
n_sg++;
if (n_sg == chan->desc->num_sgs)
n_sg = 0;
- residue = sg_req->len;
+ if (!chan->trig_mdma)
+ residue = sg_req->len;
}
/*
@@ -1401,7 +1404,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
* residue = remaining bytes from NDTR + remaining
* periods/sg to be transferred
*/
- if (!chan->desc->cyclic || n_sg != 0)
+ if ((!chan->desc->cyclic && !chan->trig_mdma) || n_sg != 0)
for (i = n_sg; i < desc->num_sgs; i++)
residue += desc->sg_req[i].len;
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 0de234022c6d..bae08b3f55c7 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -777,8 +777,6 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
/* Enable interrupts */
ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE;
- if (sg_len > 1)
- ccr |= STM32_MDMA_CCR_BTIE;
desc->ccr = ccr;
return 0;
@@ -1236,6 +1234,10 @@ static int stm32_mdma_resume(struct dma_chan *c)
unsigned long flags;
u32 status, reg;
+ /* Transfer can be terminated */
+ if (!chan->desc || (stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & STM32_MDMA_CCR_EN))
+ return -EPERM;
+
hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc;
spin_lock_irqsave(&chan->vchan.lock, flags);
@@ -1316,21 +1318,35 @@ static int stm32_mdma_slave_config(struct dma_chan *c,
static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
struct stm32_mdma_desc *desc,
- u32 curr_hwdesc)
+ u32 curr_hwdesc,
+ struct dma_tx_state *state)
{
struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
struct stm32_mdma_hwdesc *hwdesc;
- u32 cbndtr, residue, modulo, burst_size;
+ u32 cisr, clar, cbndtr, residue, modulo, burst_size;
int i;
+ cisr = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
+
residue = 0;
- for (i = curr_hwdesc + 1; i < desc->count; i++) {
+ /* Get the next hw descriptor to process from current transfer */
+ clar = stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id));
+ for (i = desc->count - 1; i >= 0; i--) {
hwdesc = desc->node[i].hwdesc;
+
+ if (hwdesc->clar == clar)
+ break;/* Current transfer found, stop cumulating */
+
+ /* Cumulate residue of unprocessed hw descriptors */
residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr);
}
cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
+ state->in_flight_bytes = 0;
+ if (chan->chan_config.m2m_hw && (cisr & STM32_MDMA_CISR_CRQA))
+ state->in_flight_bytes = cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
+
if (!chan->mem_burst)
return residue;
@@ -1360,11 +1376,10 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
vdesc = vchan_find_desc(&chan->vchan, cookie);
if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
- residue = stm32_mdma_desc_residue(chan, chan->desc,
- chan->curr_hwdesc);
+ residue = stm32_mdma_desc_residue(chan, chan->desc, chan->curr_hwdesc, state);
else if (vdesc)
- residue = stm32_mdma_desc_residue(chan,
- to_stm32_mdma_desc(vdesc), 0);
+ residue = stm32_mdma_desc_residue(chan, to_stm32_mdma_desc(vdesc), 0, state);
+
dma_set_residue(state, residue);
spin_unlock_irqrestore(&chan->vchan.lock, flags);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 749868b9e80d..7edf2c95282f 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1521,6 +1521,7 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
if (sbp2_param_exclusive_login) {
sdev->manage_system_start_stop = true;
sdev->manage_runtime_start_stop = true;
+ sdev->manage_shutdown = true;
}
if (sdev->type == TYPE_ROM)
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index ce20a60676f0..1974f0ad32ba 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -273,9 +273,13 @@ static __init int efivar_ssdt_load(void)
if (status == EFI_NOT_FOUND) {
break;
} else if (status == EFI_BUFFER_TOO_SMALL) {
- name = krealloc(name, name_size, GFP_KERNEL);
- if (!name)
+ efi_char16_t *name_tmp =
+ krealloc(name, name_size, GFP_KERNEL);
+ if (!name_tmp) {
+ kfree(name);
return -ENOMEM;
+ }
+ name = name_tmp;
continue;
}
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 2fee52ed335d..9d5df683f882 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -605,11 +605,8 @@ setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_s
break;
case EFI_UNACCEPTED_MEMORY:
- if (!IS_ENABLED(CONFIG_UNACCEPTED_MEMORY)) {
- efi_warn_once(
-"The system has unaccepted memory, but kernel does not support it\nConsider enabling CONFIG_UNACCEPTED_MEMORY\n");
+ if (!IS_ENABLED(CONFIG_UNACCEPTED_MEMORY))
continue;
- }
e820_type = E820_TYPE_RAM;
process_unaccepted_memory(d->phys_addr,
d->phys_addr + PAGE_SIZE * d->num_pages);
@@ -852,6 +849,8 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
unsigned long kernel_entry;
efi_status_t status;
+ boot_params_pointer = boot_params;
+
efi_system_table = sys_table_arg;
/* Check if we were booted by the EFI firmware */
if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
diff --git a/drivers/firmware/efi/libstub/x86-stub.h b/drivers/firmware/efi/libstub/x86-stub.h
index 37c5a36b9d8c..2748bca192df 100644
--- a/drivers/firmware/efi/libstub/x86-stub.h
+++ b/drivers/firmware/efi/libstub/x86-stub.h
@@ -2,6 +2,8 @@
#include <linux/efi.h>
+extern struct boot_params *boot_params_pointer asm("boot_params");
+
extern void trampoline_32bit_src(void *, bool);
extern const u16 trampoline_ljmp_imm_offset;
diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c
index 853f7dc3c21d..135278ddaf62 100644
--- a/drivers/firmware/efi/unaccepted_memory.c
+++ b/drivers/firmware/efi/unaccepted_memory.c
@@ -5,9 +5,17 @@
#include <linux/spinlock.h>
#include <asm/unaccepted_memory.h>
-/* Protects unaccepted memory bitmap */
+/* Protects unaccepted memory bitmap and accepting_list */
static DEFINE_SPINLOCK(unaccepted_memory_lock);
+struct accept_range {
+ struct list_head list;
+ unsigned long start;
+ unsigned long end;
+};
+
+static LIST_HEAD(accepting_list);
+
/*
* accept_memory() -- Consult bitmap and accept the memory if needed.
*
@@ -24,6 +32,7 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
{
struct efi_unaccepted_memory *unaccepted;
unsigned long range_start, range_end;
+ struct accept_range range, *entry;
unsigned long flags;
u64 unit_size;
@@ -78,20 +87,67 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
end = unaccepted->size * unit_size * BITS_PER_BYTE;
- range_start = start / unit_size;
-
+ range.start = start / unit_size;
+ range.end = DIV_ROUND_UP(end, unit_size);
+retry:
spin_lock_irqsave(&unaccepted_memory_lock, flags);
+
+ /*
+ * Check if anybody works on accepting the same range of the memory.
+ *
+ * The check is done with unit_size granularity. It is crucial to catch
+ * all accept requests to the same unit_size block, even if they don't
+ * overlap on physical address level.
+ */
+ list_for_each_entry(entry, &accepting_list, list) {
+ if (entry->end < range.start)
+ continue;
+ if (entry->start >= range.end)
+ continue;
+
+ /*
+ * Somebody else accepting the range. Or at least part of it.
+ *
+ * Drop the lock and retry until it is complete.
+ */
+ spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
+ goto retry;
+ }
+
+ /*
+ * Register that the range is about to be accepted.
+ * Make sure nobody else will accept it.
+ */
+ list_add(&range.list, &accepting_list);
+
+ range_start = range.start;
for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
- DIV_ROUND_UP(end, unit_size)) {
+ range.end) {
unsigned long phys_start, phys_end;
unsigned long len = range_end - range_start;
phys_start = range_start * unit_size + unaccepted->phys_base;
phys_end = range_end * unit_size + unaccepted->phys_base;
+ /*
+ * Keep interrupts disabled until the accept operation is
+ * complete in order to prevent deadlocks.
+ *
+ * Enabling interrupts before calling arch_accept_memory()
+ * creates an opportunity for an interrupt handler to request
+ * acceptance for the same memory. The handler will continuously
+ * spin with interrupts disabled, preventing other task from
+ * making progress with the acceptance process.
+ */
+ spin_unlock(&unaccepted_memory_lock);
+
arch_accept_memory(phys_start, phys_end);
+
+ spin_lock(&unaccepted_memory_lock);
bitmap_clear(unaccepted->bitmap, range_start, len);
}
+
+ list_del(&range.list);
spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
}
diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
index 508eab346fc6..a48a58e0c61f 100644
--- a/drivers/firmware/imx/imx-dsp.c
+++ b/drivers/firmware/imx/imx-dsp.c
@@ -114,11 +114,11 @@ static int imx_dsp_setup_channels(struct imx_dsp_ipc *dsp_ipc)
dsp_chan->idx = i % 2;
dsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
if (IS_ERR(dsp_chan->ch)) {
- kfree(dsp_chan->name);
ret = PTR_ERR(dsp_chan->ch);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to request mbox chan %s ret %d\n",
chan_name, ret);
+ kfree(dsp_chan->name);
goto out;
}
diff --git a/drivers/fpga/tests/Kconfig b/drivers/fpga/tests/Kconfig
index e4a64815f16d..d4e55204c092 100644
--- a/drivers/fpga/tests/Kconfig
+++ b/drivers/fpga/tests/Kconfig
@@ -1,6 +1,6 @@
config FPGA_KUNIT_TESTS
- tristate "KUnit test for the FPGA subsystem" if !KUNIT_ALL_TESTS
- depends on FPGA && FPGA_REGION && FPGA_BRIDGE && KUNIT=y
+ bool "KUnit test for the FPGA subsystem" if !KUNIT_ALL_TESTS
+ depends on FPGA=y && FPGA_REGION=y && FPGA_BRIDGE=y && KUNIT=y && MODULES=n
default KUNIT_ALL_TESTS
help
This builds unit tests for the FPGA subsystem
diff --git a/drivers/fpga/tests/fpga-region-test.c b/drivers/fpga/tests/fpga-region-test.c
index 9f9d50ee7871..baab07e3fc59 100644
--- a/drivers/fpga/tests/fpga-region-test.c
+++ b/drivers/fpga/tests/fpga-region-test.c
@@ -93,6 +93,8 @@ static void fpga_region_test_class_find(struct kunit *test)
region = fpga_region_class_find(NULL, &ctx->region_pdev->dev, fake_region_match);
KUNIT_EXPECT_PTR_EQ(test, region, ctx->region);
+
+ put_device(&region->dev);
}
/*
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index dbc7ba0ee72c..656d6b1dddb5 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -126,14 +126,14 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
unsigned long mask = BIT(gpio);
u32 val;
+ vf610_gpio_set(chip, gpio, value);
+
if (port->sdata && port->sdata->have_paddr) {
val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
val |= mask;
vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
}
- vf610_gpio_set(chip, gpio, value);
-
return pinctrl_gpio_direction_output(chip->base + gpio);
}
@@ -246,7 +246,8 @@ static const struct irq_chip vf610_irqchip = {
.irq_unmask = vf610_gpio_irq_unmask,
.irq_set_type = vf610_gpio_irq_set_type,
.irq_set_wake = vf610_gpio_irq_set_wake,
- .flags = IRQCHIP_IMMUTABLE,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_MASK_ON_SUSPEND
+ | IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index fbda452fb4d6..51e41676de0b 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -951,6 +951,7 @@ static struct gpio_desc *acpi_get_gpiod_from_data(struct fwnode_handle *fwnode,
if (!propname)
return ERR_PTR(-EINVAL);
+ memset(&lookup, 0, sizeof(lookup));
lookup.index = index;
ret = acpi_gpio_property_lookup(fwnode, propname, index, &lookup);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 7d6daf8d2bfa..e036011137aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1103,7 +1103,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
if (unlikely(ret))
goto error;
- ret = drm_exec_lock_obj(&ctx->exec, &bo->tbo.base);
+ ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
drm_exec_retry_on_contention(&ctx->exec);
if (unlikely(ret))
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index efdb1c48f431..d93a8961274c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -65,7 +65,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
}
amdgpu_sync_create(&p->sync);
- drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 0dc9c655c4fb..76549c2cffeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -47,7 +47,6 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
{
switch (ctx_prio) {
- case AMDGPU_CTX_PRIORITY_UNSET:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_NORMAL:
@@ -55,6 +54,11 @@ bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return true;
default:
+ case AMDGPU_CTX_PRIORITY_UNSET:
+ /* UNSET priority is not valid and we don't carry that
+ * around, but set it to NORMAL in the only place this
+ * function is called, amdgpu_ctx_ioctl().
+ */
return false;
}
}
@@ -64,7 +68,8 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
{
switch (ctx_prio) {
case AMDGPU_CTX_PRIORITY_UNSET:
- return DRM_SCHED_PRIORITY_UNSET;
+ pr_warn_once("AMD-->DRM context priority value UNSET-->NORMAL");
+ return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_VERY_LOW:
return DRM_SCHED_PRIORITY_MIN;
@@ -94,9 +99,6 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
int32_t priority)
{
- if (!amdgpu_ctx_priority_is_valid(priority))
- return -EINVAL;
-
/* NORMAL and below are accessible by everyone */
if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
return 0;
@@ -631,8 +633,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
return 0;
}
-
-
static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv, uint32_t id,
bool set, u32 *stable_pstate)
@@ -675,8 +675,10 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
id = args->in.ctx_id;
priority = args->in.priority;
- /* For backwards compatibility reasons, we need to accept
- * ioctls with garbage in the priority field */
+ /* For backwards compatibility, we need to accept ioctls with garbage
+ * in the priority field. Garbage values in the priority field, result
+ * in the priority being set to NORMAL.
+ */
if (!amdgpu_ctx_priority_is_valid(priority))
priority = AMDGPU_CTX_PRIORITY_NORMAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 12210598e5b8..ba3a87cb88cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -403,7 +403,10 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
continue;
}
- r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ /* Reserve fences for two SDMA page table updates */
+ r = dma_resv_reserve_fences(resv, 2);
+ if (!r)
+ r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (!r)
r = amdgpu_vm_handle_moved(adev, vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
index da4be0bbb446..8eee5d783a92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
@@ -142,6 +142,10 @@ int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev)
int r;
int size;
+ /* SI HW does not have doorbells, skip allocation */
+ if (adev->doorbell.num_kernel_doorbells == 0)
+ return 0;
+
/* Reserve first num_kernel_doorbells (page-aligned) for kernel ops */
size = ALIGN(adev->doorbell.num_kernel_doorbells * sizeof(u32), PAGE_SIZE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index f3ee83cdf97e..d28e21baef16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -252,7 +252,7 @@ static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_res_cursor cursor;
- if (bo->tbo.resource->mem_type != TTM_PL_VRAM)
+ if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
return false;
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index f5daadcec865..82f25996ff5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1090,7 +1090,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
struct drm_gem_object *gobj = dma_buf->priv;
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
- if (abo->tbo.resource->mem_type == TTM_PL_VRAM)
+ if (abo->tbo.resource &&
+ abo->tbo.resource->mem_type == TTM_PL_VRAM)
bo = gem_to_amdgpu_bo(gobj);
}
mem = bo->tbo.resource;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 6a8494f98d3e..fe8ba9e9837b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1124,7 +1124,7 @@ static void vi_program_aspm(struct amdgpu_device *adev)
bool bL1SS = false;
bool bClkReqSupport = true;
- if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
+ if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_pcie_dynamic_switching_supported())
return;
if (adev->flags & AMD_IS_APU ||
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 3a9077b60029..d08e60dff46d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1262,6 +1262,9 @@ static void disable_vbios_mode_if_required(
if (stream == NULL)
continue;
+ if (stream->apply_seamless_boot_optimization)
+ continue;
+
// only looking for first odm pipe
if (pipe->prev_odm_pipe)
continue;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index f448b903e190..84148a79414b 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -692,7 +692,7 @@ static struct ti_sn65dsi86 *bridge_to_ti_sn65dsi86(struct drm_bridge *bridge)
return container_of(bridge, struct ti_sn65dsi86, bridge);
}
-static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
+static int ti_sn_attach_host(struct auxiliary_device *adev, struct ti_sn65dsi86 *pdata)
{
int val;
struct mipi_dsi_host *host;
@@ -707,7 +707,7 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
if (!host)
return -EPROBE_DEFER;
- dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ dsi = devm_mipi_dsi_device_register_full(&adev->dev, host, &info);
if (IS_ERR(dsi))
return PTR_ERR(dsi);
@@ -725,7 +725,7 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
pdata->dsi = dsi;
- return devm_mipi_dsi_attach(dev, dsi);
+ return devm_mipi_dsi_attach(&adev->dev, dsi);
}
static int ti_sn_bridge_attach(struct drm_bridge *bridge,
@@ -1298,9 +1298,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
struct device_node *np = pdata->dev->of_node;
int ret;
- pdata->next_bridge = devm_drm_of_get_bridge(pdata->dev, np, 1, 0);
+ pdata->next_bridge = devm_drm_of_get_bridge(&adev->dev, np, 1, 0);
if (IS_ERR(pdata->next_bridge))
- return dev_err_probe(pdata->dev, PTR_ERR(pdata->next_bridge),
+ return dev_err_probe(&adev->dev, PTR_ERR(pdata->next_bridge),
"failed to create panel bridge\n");
ti_sn_bridge_parse_lanes(pdata, np);
@@ -1319,9 +1319,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
drm_bridge_add(&pdata->bridge);
- ret = ti_sn_attach_host(pdata);
+ ret = ti_sn_attach_host(adev, pdata);
if (ret) {
- dev_err_probe(pdata->dev, ret, "failed to attach dsi host\n");
+ dev_err_probe(&adev->dev, ret, "failed to attach dsi host\n");
goto err_remove_bridge;
}
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index ed96cfcfa304..8c929ef72c72 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -2574,14 +2574,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
struct drm_dp_mst_branch *found_mstb;
struct drm_dp_mst_port *port;
+ if (!mstb)
+ return NULL;
+
if (memcmp(mstb->guid, guid, 16) == 0)
return mstb;
list_for_each_entry(port, &mstb->ports, next) {
- if (!port->mstb)
- continue;
-
found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
if (found_mstb)
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 292e38eb6218..60794fcde1d5 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -290,7 +290,8 @@ static int
update_connector_routing(struct drm_atomic_state *state,
struct drm_connector *connector,
struct drm_connector_state *old_connector_state,
- struct drm_connector_state *new_connector_state)
+ struct drm_connector_state *new_connector_state,
+ bool added_by_user)
{
const struct drm_connector_helper_funcs *funcs;
struct drm_encoder *new_encoder;
@@ -339,9 +340,13 @@ update_connector_routing(struct drm_atomic_state *state,
* there's a chance the connector may have been destroyed during the
* process, but it's better to ignore that then cause
* drm_atomic_helper_resume() to fail.
+ *
+ * Last, we want to ignore connector registration when the connector
+ * was not pulled in the atomic state by user-space (ie, was pulled
+ * in by the driver, e.g. when updating a DP-MST stream).
*/
if (!state->duplicated && drm_connector_is_unregistered(connector) &&
- crtc_state->active) {
+ added_by_user && crtc_state->active) {
drm_dbg_atomic(connector->dev,
"[CONNECTOR:%d:%s] is not registered\n",
connector->base.id, connector->name);
@@ -620,7 +625,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_connector *connector;
struct drm_connector_state *old_connector_state, *new_connector_state;
int i, ret;
- unsigned int connectors_mask = 0;
+ unsigned int connectors_mask = 0, user_connectors_mask = 0;
+
+ for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i)
+ user_connectors_mask |= BIT(i);
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
bool has_connectors =
@@ -685,7 +693,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
*/
ret = update_connector_routing(state, connector,
old_connector_state,
- new_connector_state);
+ new_connector_state,
+ BIT(i) & user_connectors_mask);
if (ret)
return ret;
if (old_connector_state->crtc) {
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 340da8257b51..4b71040ae5be 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -123,6 +123,9 @@ static const struct edid_quirk {
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
EDID_QUIRK('A', 'E', 'O', 0, EDID_QUIRK_FORCE_6BPC),
+ /* BenQ GW2765 */
+ EDID_QUIRK('B', 'N', 'Q', 0x78d6, EDID_QUIRK_FORCE_8BPC),
+
/* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
EDID_QUIRK('B', 'O', 'E', 0x78b, EDID_QUIRK_FORCE_6BPC),
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 6129b89bb366..44a948b80ee1 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -540,7 +540,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
struct page **pages;
struct folio *folio;
struct folio_batch fbatch;
- int i, j, npages;
+ long i, j, npages;
if (WARN_ON(!obj->filp))
return ERR_PTR(-EINVAL);
@@ -564,11 +564,13 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
i = 0;
while (i < npages) {
+ long nr;
folio = shmem_read_folio_gfp(mapping, i,
mapping_gfp_mask(mapping));
if (IS_ERR(folio))
goto fail;
- for (j = 0; j < folio_nr_pages(folio); j++, i++)
+ nr = min(npages - i, folio_nr_pages(folio));
+ for (j = 0; j < nr; j++, i++)
pages[i] = folio_file_page(folio, i);
/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 1b00ef2c6185..80e4ec6ee403 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -2553,8 +2553,7 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
- XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1),
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset,
lane_pipe_reset);
if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port),
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index aa4d842d4c5a..a2195e28b625 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -235,6 +235,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err)
case 0:
case -EAGAIN:
case -ENOSPC: /* transient failure to evict? */
+ case -ENOBUFS: /* temporarily out of fences? */
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
@@ -915,11 +916,7 @@ static struct file *mmap_singleton(struct drm_i915_private *i915)
{
struct file *file;
- rcu_read_lock();
- file = READ_ONCE(i915->gem.mmap_singleton);
- if (file && !get_file_rcu(file))
- file = NULL;
- rcu_read_unlock();
+ file = get_file_active(&i915->gem.mmap_singleton);
if (file)
return file;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index 0b414eae1683..2c0f1f3e28ff 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -376,9 +376,26 @@ void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
* driver threads, but also with hardware/firmware agents. A dedicated
* locking register is used.
*/
- if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
+ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
+ /*
+ * The steering control and semaphore registers are inside an
+ * "always on" power domain with respect to RC6. However there
+ * are some issues if higher-level platform sleep states are
+ * entering/exiting at the same time these registers are
+ * accessed. Grabbing GT forcewake and holding it over the
+ * entire lock/steer/unlock cycle ensures that those sleep
+ * states have been fully exited before we access these
+ * registers. This wakeref will be released in the unlock
+ * routine.
+ *
+ * This is expected to become a formally documented/numbered
+ * workaround soon.
+ */
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_GT);
+
err = wait_for(intel_uncore_read_fw(gt->uncore,
MTL_STEER_SEMAPHORE) == 0x1, 100);
+ }
/*
* Even on platforms with a hardware lock, we'll continue to grab
@@ -415,8 +432,11 @@ void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags)
{
spin_unlock_irqrestore(&gt->mcr_lock, flags);
- if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
+ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
intel_uncore_write_fw(gt->uncore, MTL_STEER_SEMAPHORE, 0x1);
+
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_GT);
+ }
}
/**
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 04bc1f4a1115..59e1e21df271 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -482,8 +482,7 @@ static void oa_report_id_clear(struct i915_perf_stream *stream, u32 *report)
static bool oa_report_ctx_invalid(struct i915_perf_stream *stream, void *report)
{
return !(oa_report_id(stream, report) &
- stream->perf->gen8_valid_ctx_bit) &&
- GRAPHICS_VER(stream->perf->i915) <= 11;
+ stream->perf->gen8_valid_ctx_bit);
}
static u64 oa_timestamp(struct i915_perf_stream *stream, void *report)
@@ -5106,6 +5105,7 @@ static void i915_perf_init_info(struct drm_i915_private *i915)
perf->gen8_valid_ctx_bit = BIT(16);
break;
case 12:
+ perf->gen8_valid_ctx_bit = BIT(16);
/*
* Calculate offset at runtime in oa_pin_context for gen12 and
* cache the value in perf->ctx_oactxctrl_offset.
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index d35973b41186..7b1076b5e748 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -832,9 +832,18 @@ static void i915_pmu_event_start(struct perf_event *event, int flags)
static void i915_pmu_event_stop(struct perf_event *event, int flags)
{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (pmu->closed)
+ goto out;
+
if (flags & PERF_EF_UPDATE)
i915_pmu_event_read(event);
i915_pmu_disable(event);
+
+out:
event->hw.state = PERF_HES_STOPPED;
}
diff --git a/drivers/gpu/drm/logicvc/Kconfig b/drivers/gpu/drm/logicvc/Kconfig
index fa7a88368809..1df22a852a23 100644
--- a/drivers/gpu/drm/logicvc/Kconfig
+++ b/drivers/gpu/drm/logicvc/Kconfig
@@ -5,5 +5,7 @@ config DRM_LOGICVC
select DRM_KMS_HELPER
select DRM_KMS_DMA_HELPER
select DRM_GEM_DMA_HELPER
+ select REGMAP
+ select REGMAP_MMIO
help
DRM display driver for the logiCVC programmable logic block from Xylon
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 9f364df52478..0e0a41b2f57f 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -239,6 +239,7 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
npages = obj->size >> PAGE_SHIFT;
mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
if (!mtk_gem->pages) {
+ sg_free_table(sgt);
kfree(sgt);
return -ENOMEM;
}
@@ -248,12 +249,15 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
if (!mtk_gem->kvaddr) {
+ sg_free_table(sgt);
kfree(sgt);
kfree(mtk_gem->pages);
return -ENOMEM;
}
-out:
+ sg_free_table(sgt);
kfree(sgt);
+
+out:
iosys_map_set_vaddr(map, mtk_gem->kvaddr);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index c2aaaded07ed..0be195f9149c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -119,6 +119,7 @@ static u64 _dpu_plane_calc_bw(const struct dpu_mdss_cfg *catalog,
struct dpu_sw_pipe_cfg *pipe_cfg)
{
int src_width, src_height, dst_height, fps;
+ u64 plane_pixel_rate, plane_bit_rate;
u64 plane_prefill_bw;
u64 plane_bw;
u32 hw_latency_lines;
@@ -136,13 +137,12 @@ static u64 _dpu_plane_calc_bw(const struct dpu_mdss_cfg *catalog,
scale_factor = src_height > dst_height ?
mult_frac(src_height, 1, dst_height) : 1;
- plane_bw =
- src_width * mode->vtotal * fps * fmt->bpp *
- scale_factor;
+ plane_pixel_rate = src_width * mode->vtotal * fps;
+ plane_bit_rate = plane_pixel_rate * fmt->bpp;
- plane_prefill_bw =
- src_width * hw_latency_lines * fps * fmt->bpp *
- scale_factor * mode->vtotal;
+ plane_bw = plane_bit_rate * scale_factor;
+
+ plane_prefill_bw = plane_bw * hw_latency_lines;
if ((vbp+vpw) > hw_latency_lines)
do_div(plane_prefill_bw, (vbp+vpw));
@@ -733,9 +733,11 @@ static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
struct dpu_sw_pipe *pipe,
struct dpu_sw_pipe_cfg *pipe_cfg,
- const struct dpu_format *fmt)
+ const struct dpu_format *fmt,
+ const struct drm_display_mode *mode)
{
uint32_t min_src_size;
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
@@ -774,6 +776,12 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
return -EINVAL;
}
+ /* max clk check */
+ if (_dpu_plane_calc_clk(mode, pipe_cfg) > kms->perf.max_core_clk_rate) {
+ DPU_DEBUG_PLANE(pdpu, "plane exceeds max mdp core clk limits\n");
+ return -E2BIG;
+ }
+
return 0;
}
@@ -899,12 +907,13 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
}
- ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt);
+ ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode);
if (ret)
return ret;
if (r_pipe->sspp) {
- ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt);
+ ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt,
+ &crtc_state->adjusted_mode);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index a7a5c7e0ab92..77a8d9366ed7 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1774,13 +1774,6 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
return rc;
while (--link_train_max_retries) {
- rc = dp_ctrl_reinitialize_mainlink(ctrl);
- if (rc) {
- DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n",
- rc);
- break;
- }
-
training_step = DP_TRAINING_NONE;
rc = dp_ctrl_setup_main_link(ctrl, &training_step);
if (rc == 0) {
@@ -1832,6 +1825,12 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
/* stop link training before start re training */
dp_ctrl_clear_training_pattern(ctrl);
}
+
+ rc = dp_ctrl_reinitialize_mainlink(ctrl);
+ if (rc) {
+ DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", rc);
+ break;
+ }
}
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 42427129acea..6375daaeb98e 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -1090,7 +1090,7 @@ int dp_link_process_request(struct dp_link *dp_link)
} else if (dp_link_read_psr_error_status(link)) {
DRM_ERROR("PSR IRQ_HPD received\n");
} else if (dp_link_psr_capability_changed(link)) {
- drm_dbg_dp(link->drm_dev, "PSR Capability changed");
+ drm_dbg_dp(link->drm_dev, "PSR Capability changed\n");
} else {
ret = dp_link_process_link_status_update(link);
if (!ret) {
@@ -1107,7 +1107,7 @@ int dp_link_process_request(struct dp_link *dp_link)
}
}
- drm_dbg_dp(link->drm_dev, "sink request=%#x",
+ drm_dbg_dp(link->drm_dev, "sink request=%#x\n",
dp_link->sink_request);
return ret;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 5d9ec27c89d3..3d6fb708dc22 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1082,9 +1082,21 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
{
+ u32 data;
+
if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
return;
+ data = dsi_read(msm_host, REG_DSI_STATUS0);
+
+ /* if video mode engine is not busy, its because
+ * either timing engine was not turned on or the
+ * DSI controller has finished transmitting the video
+ * data already, so no need to wait in those cases
+ */
+ if (!(data & DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY))
+ return;
+
if (msm_host->power_on && msm_host->enabled) {
dsi_wait4video_done(msm_host);
/* delay 4 ms to skip BLLP */
@@ -1894,10 +1906,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
}
msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (msm_host->irq < 0) {
- ret = msm_host->irq;
- dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
- return ret;
+ if (!msm_host->irq) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return -EINVAL;
}
/* do not autoenable, will be enabled later */
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 2e87dd6cb17b..348c66b14683 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -511,7 +511,7 @@ static int mdss_remove(struct platform_device *pdev)
static const struct msm_mdss_data msm8998_data = {
.ubwc_enc_version = UBWC_1_0,
.ubwc_dec_version = UBWC_1_0,
- .highest_bank_bit = 1,
+ .highest_bank_bit = 2,
};
static const struct msm_mdss_data qcm2290_data = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
index 46b057fe1412..3249e5c1c893 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
@@ -62,6 +62,18 @@ nvkm_uconn_uevent_gpio(struct nvkm_object *object, u64 token, u32 bits)
return object->client->event(token, &args, sizeof(args.v0));
}
+static bool
+nvkm_connector_is_dp_dms(u8 type)
+{
+ switch (type) {
+ case DCB_CONNECTOR_DMS59_DP0:
+ case DCB_CONNECTOR_DMS59_DP1:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int
nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
{
@@ -101,7 +113,7 @@ nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_
if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_GPIO_LO;
if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ) {
/* TODO: support DP IRQ on ANX9805 and remove this hack. */
- if (!outp->info.location)
+ if (!outp->info.location && !nvkm_connector_is_dp_dms(conn->info.type))
return -EINVAL;
}
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 5ac926281d2c..c9087f474cbc 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1342,9 +1342,7 @@ static const struct panel_init_cmd starry_himax83102_j02_init_cmd[] = {
_INIT_DCS_CMD(0xB1, 0x01, 0xBF, 0x11),
_INIT_DCS_CMD(0xCB, 0x86),
_INIT_DCS_CMD(0xD2, 0x3C, 0xFA),
- _INIT_DCS_CMD(0xE9, 0xC5),
- _INIT_DCS_CMD(0xD3, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0C, 0x01),
- _INIT_DCS_CMD(0xE9, 0x3F),
+ _INIT_DCS_CMD(0xD3, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0C, 0x01),
_INIT_DCS_CMD(0xE7, 0x02, 0x00, 0x28, 0x01, 0x7E, 0x0F, 0x7E, 0x10, 0xA0, 0x00, 0x00, 0x20, 0x40, 0x50, 0x40),
_INIT_DCS_CMD(0xBD, 0x02),
_INIT_DCS_CMD(0xD8, 0xFF, 0xFF, 0xBF, 0xFE, 0xAA, 0xA0, 0xFF, 0xFF, 0xBF, 0xFE, 0xAA, 0xA0),
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index feb665df35a1..95c8472d878a 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -976,32 +976,6 @@ static const struct panel_desc auo_b116xak01 = {
},
};
-static const struct drm_display_mode auo_b116xw03_mode = {
- .clock = 70589,
- .hdisplay = 1366,
- .hsync_start = 1366 + 40,
- .hsync_end = 1366 + 40 + 40,
- .htotal = 1366 + 40 + 40 + 32,
- .vdisplay = 768,
- .vsync_start = 768 + 10,
- .vsync_end = 768 + 10 + 12,
- .vtotal = 768 + 10 + 12 + 6,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc auo_b116xw03 = {
- .modes = &auo_b116xw03_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 256,
- .height = 144,
- },
- .delay = {
- .enable = 400,
- },
-};
-
static const struct drm_display_mode auo_b133han05_mode = {
.clock = 142600,
.hdisplay = 1920,
@@ -1726,9 +1700,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b116xa01",
.data = &auo_b116xak01,
}, {
- .compatible = "auo,b116xw03",
- .data = &auo_b116xw03,
- }, {
.compatible = "auo,b133han05",
.data = &auo_b133han05,
}, {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 95959dcc6e0e..dd7928d9570f 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -919,6 +919,38 @@ static const struct panel_desc auo_b101xtn01 = {
},
};
+static const struct drm_display_mode auo_b116xw03_mode = {
+ .clock = 70589,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 40,
+ .hsync_end = 1366 + 40 + 40,
+ .htotal = 1366 + 40 + 40 + 32,
+ .vdisplay = 768,
+ .vsync_start = 768 + 10,
+ .vsync_end = 768 + 10 + 12,
+ .vtotal = 768 + 10 + 12 + 6,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc auo_b116xw03 = {
+ .modes = &auo_b116xw03_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+ .delay = {
+ .prepare = 1,
+ .enable = 200,
+ .disable = 200,
+ .unprepare = 500,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing auo_g070vvn01_timings = {
.pixelclock = { 33300000, 34209000, 45000000 },
.hactive = { 800, 800, 800 },
@@ -4103,6 +4135,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b101xtn01",
.data = &auo_b101xtn01,
}, {
+ .compatible = "auo,b116xw03",
+ .data = &auo_b116xw03,
+ }, {
.compatible = "auo,g070vvn01",
.data = &auo_g070vvn01,
}, {
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 506371c42745..5a3a622fc672 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -929,7 +929,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
if (next) {
next->s_fence->scheduled.timestamp =
- job->s_fence->finished.timestamp;
+ dma_fence_timestamp(&job->s_fence->finished);
/* start TO timer for next job */
drm_sched_start_timeout(sched);
}
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index ff86ba1ae1b8..8ea120eb8674 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -745,7 +745,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res));
if (ret) {
- drm_err(dev, "could not acquire memory range %pr: %d\n", &res, ret);
+ drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 7726a72befc5..d48b39132b32 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -232,10 +232,6 @@ void ttm_device_fini(struct ttm_device *bdev)
struct ttm_resource_manager *man;
unsigned i;
- man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
- ttm_resource_manager_set_used(man, false);
- ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
-
mutex_lock(&ttm_global_mutex);
list_del(&bdev->device_list);
mutex_unlock(&ttm_global_mutex);
@@ -243,6 +239,10 @@ void ttm_device_fini(struct ttm_device *bdev)
drain_workqueue(bdev->wq);
destroy_workqueue(bdev->wq);
+ man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
+ ttm_resource_manager_set_used(man, false);
+ ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
+
spin_lock(&bdev->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
if (list_empty(&man->lru[0]))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index c43853597776..2bfac3aad7b7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -34,6 +34,8 @@
static void vmw_bo_release(struct vmw_bo *vbo)
{
+ WARN_ON(vbo->tbo.base.funcs &&
+ kref_read(&vbo->tbo.base.refcount) != 0);
vmw_bo_unmap(vbo);
drm_gem_object_release(&vbo->tbo.base);
}
@@ -497,7 +499,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
if (!(flags & drm_vmw_synccpu_allow_cs)) {
atomic_dec(&vmw_bo->cpu_writers);
}
- vmw_user_bo_unref(vmw_bo);
+ vmw_user_bo_unref(&vmw_bo);
}
return ret;
@@ -539,7 +541,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
return ret;
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
- vmw_user_bo_unref(vbo);
+ vmw_user_bo_unref(&vbo);
if (unlikely(ret != 0)) {
if (ret == -ERESTARTSYS || ret == -EBUSY)
return -EBUSY;
@@ -612,7 +614,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
}
*out = to_vmw_bo(gobj);
- ttm_bo_get(&(*out)->tbo);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 1d433fceed3d..0d496dc9c6af 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -195,12 +195,19 @@ static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
return buf;
}
-static inline void vmw_user_bo_unref(struct vmw_bo *vbo)
+static inline struct vmw_bo *vmw_user_bo_ref(struct vmw_bo *vbo)
{
- if (vbo) {
- ttm_bo_put(&vbo->tbo);
- drm_gem_object_put(&vbo->tbo.base);
- }
+ drm_gem_object_get(&vbo->tbo.base);
+ return vbo;
+}
+
+static inline void vmw_user_bo_unref(struct vmw_bo **buf)
+{
+ struct vmw_bo *tmp_buf = *buf;
+
+ *buf = NULL;
+ if (tmp_buf)
+ drm_gem_object_put(&tmp_buf->tbo.base);
}
static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index c0b24d1cacbf..a7c07692262b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -432,7 +432,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure.
*/
- ret = vmw_bo_create(dev_priv, &bo_params, &buf);
+ ret = vmw_gem_object_create(dev_priv, &bo_params, &buf);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
goto out_done;
@@ -502,7 +502,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
vmw_resource_mob_attach(res);
/* Let go of the old mob. */
- vmw_bo_unreference(&old_buf);
+ vmw_user_bo_unref(&old_buf);
res->id = vcotbl->type;
ret = dma_resv_reserve_fences(bo->base.resv, 1);
@@ -521,7 +521,7 @@ out_map_new:
out_wait:
ttm_bo_unpin(bo);
ttm_bo_unreserve(bo);
- vmw_bo_unreference(&buf);
+ vmw_user_bo_unref(&buf);
out_done:
MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 58bfdf203eca..3cd5090dedfc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -853,6 +853,10 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
/**
* GEM related functionality - vmwgfx_gem.c
*/
+struct vmw_bo_params;
+int vmw_gem_object_create(struct vmw_private *vmw,
+ struct vmw_bo_params *params,
+ struct vmw_bo **p_vbo);
extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 98e0723ca6f5..36987ef3fc30 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1151,7 +1151,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
SVGAMobId *id,
struct vmw_bo **vmw_bo_p)
{
- struct vmw_bo *vmw_bo;
+ struct vmw_bo *vmw_bo, *tmp_bo;
uint32_t handle = *id;
struct vmw_relocation *reloc;
int ret;
@@ -1164,7 +1164,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
}
vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
- vmw_user_bo_unref(vmw_bo);
+ tmp_bo = vmw_bo;
+ vmw_user_bo_unref(&tmp_bo);
if (unlikely(ret != 0))
return ret;
@@ -1206,7 +1207,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
SVGAGuestPtr *ptr,
struct vmw_bo **vmw_bo_p)
{
- struct vmw_bo *vmw_bo;
+ struct vmw_bo *vmw_bo, *tmp_bo;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
int ret;
@@ -1220,7 +1221,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
- vmw_user_bo_unref(vmw_bo);
+ tmp_bo = vmw_bo;
+ vmw_user_bo_unref(&tmp_bo);
if (unlikely(ret != 0))
return ret;
@@ -1619,7 +1621,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
SVGA3dTextureState *last_state = (SVGA3dTextureState *)
- ((unsigned long) header + header->size + sizeof(header));
+ ((unsigned long) header + header->size + sizeof(*header));
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
((unsigned long) header + sizeof(*cmd));
struct vmw_resource *ctx;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index c0da89e16e6f..8b1eb0061610 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -111,6 +111,20 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.vm_ops = &vmw_vm_ops,
};
+int vmw_gem_object_create(struct vmw_private *vmw,
+ struct vmw_bo_params *params,
+ struct vmw_bo **p_vbo)
+{
+ int ret = vmw_bo_create(vmw, params, p_vbo);
+
+ if (ret != 0)
+ goto out_no_bo;
+
+ (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
+out_no_bo:
+ return ret;
+}
+
int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
@@ -126,12 +140,10 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
.pin = false
};
- ret = vmw_bo_create(dev_priv, &params, p_vbo);
+ ret = vmw_gem_object_create(dev_priv, &params, p_vbo);
if (ret != 0)
goto out_no_bo;
- (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
-
ret = drm_gem_handle_create(filp, &(*p_vbo)->tbo.base, handle);
out_no_bo:
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 1489ad73c103..818b7f109f53 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1471,8 +1471,8 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
/* Reserve and switch the backing mob. */
mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void) vmw_resource_reserve(res, false, true);
- vmw_bo_unreference(&res->guest_memory_bo);
- res->guest_memory_bo = vmw_bo_reference(bo_mob);
+ vmw_user_bo_unref(&res->guest_memory_bo);
+ res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
res->guest_memory_offset = 0;
vmw_resource_unreserve(res, false, false, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
@@ -1666,7 +1666,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
if (bo)
- vmw_user_bo_unref(bo);
+ vmw_user_bo_unref(&bo);
if (surface)
vmw_surface_unreference(&surface);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index fb85f244c3d0..c45b4724e414 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -451,7 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
- vmw_user_bo_unref(buf);
+ vmw_user_bo_unref(&buf);
out_unlock:
mutex_unlock(&overlay->mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 71eeabf001c8..ca300c7427d2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -141,7 +141,7 @@ static void vmw_resource_release(struct kref *kref)
if (res->coherent)
vmw_bo_dirty_release(res->guest_memory_bo);
ttm_bo_unreserve(bo);
- vmw_bo_unreference(&res->guest_memory_bo);
+ vmw_user_bo_unref(&res->guest_memory_bo);
}
if (likely(res->hw_destroy != NULL)) {
@@ -338,7 +338,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
return 0;
}
- ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo);
+ ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);
if (unlikely(ret != 0))
goto out_no_bo;
@@ -457,11 +457,11 @@ void vmw_resource_unreserve(struct vmw_resource *res,
vmw_resource_mob_detach(res);
if (res->coherent)
vmw_bo_dirty_release(res->guest_memory_bo);
- vmw_bo_unreference(&res->guest_memory_bo);
+ vmw_user_bo_unref(&res->guest_memory_bo);
}
if (new_guest_memory_bo) {
- res->guest_memory_bo = vmw_bo_reference(new_guest_memory_bo);
+ res->guest_memory_bo = vmw_user_bo_ref(new_guest_memory_bo);
/*
* The validation code should already have added a
@@ -551,7 +551,7 @@ out_no_reserve:
ttm_bo_put(val_buf->bo);
val_buf->bo = NULL;
if (guest_memory_dirty)
- vmw_bo_unreference(&res->guest_memory_bo);
+ vmw_user_bo_unref(&res->guest_memory_bo);
return ret;
}
@@ -727,7 +727,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr,
goto out_no_validate;
else if (!res->func->needs_guest_memory && res->guest_memory_bo) {
WARN_ON_ONCE(vmw_resource_mob_attached(res));
- vmw_bo_unreference(&res->guest_memory_bo);
+ vmw_user_bo_unref(&res->guest_memory_bo);
}
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 1e81ff2422cf..a01ca3226d0a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -180,7 +180,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
res->guest_memory_size = size;
if (byte_code) {
- res->guest_memory_bo = vmw_bo_reference(byte_code);
+ res->guest_memory_bo = vmw_user_bo_ref(byte_code);
res->guest_memory_offset = offset;
}
shader->size = size;
@@ -809,7 +809,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
shader_type, num_input_sig,
num_output_sig, tfile, shader_handle);
out_bad_arg:
- vmw_user_bo_unref(buffer);
+ vmw_user_bo_unref(&buffer);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 5db403ee8261..3829be282ff0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -686,9 +686,6 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res;
- if (res->guest_memory_bo)
- drm_gem_object_put(&res->guest_memory_bo->tbo.base);
-
*p_base = NULL;
vmw_resource_unreference(&res);
}
@@ -855,23 +852,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
* expect a backup buffer to be present.
*/
if (dev_priv->has_mob && req->shareable) {
- uint32_t backup_handle;
-
- ret = vmw_gem_object_create_with_handle(dev_priv,
- file_priv,
- res->guest_memory_size,
- &backup_handle,
- &res->guest_memory_bo);
+ struct vmw_bo_params params = {
+ .domain = VMW_BO_DOMAIN_SYS,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
+ .bo_type = ttm_bo_type_device,
+ .size = res->guest_memory_size,
+ .pin = false
+ };
+
+ ret = vmw_gem_object_create(dev_priv,
+ &params,
+ &res->guest_memory_bo);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
- vmw_bo_reference(res->guest_memory_bo);
- /*
- * We don't expose the handle to the userspace and surface
- * already holds a gem reference
- */
- drm_gem_handle_delete(file_priv, backup_handle);
}
tmp = vmw_resource_reference(&srf->res);
@@ -1512,7 +1507,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
if (ret == 0) {
if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
VMW_DEBUG_USER("Surface backup buffer too small.\n");
- vmw_bo_unreference(&res->guest_memory_bo);
+ vmw_user_bo_unref(&res->guest_memory_bo);
ret = -EINVAL;
goto out_unlock;
} else {
@@ -1526,8 +1521,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
res->guest_memory_size,
&backup_handle,
&res->guest_memory_bo);
- if (ret == 0)
- vmw_bo_reference(res->guest_memory_bo);
}
if (unlikely(ret != 0)) {
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 66dc5f97a009..8311e1028ddb 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -610,7 +610,8 @@ static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size,
&flat_buf->daddr,
- DMA_FROM_DEVICE, GFP_KERNEL);
+ DMA_FROM_DEVICE,
+ GFP_KERNEL | __GFP_NOWARN);
if (!flat_buf->vaddr) {
kfree(flat_buf);
return -ENOMEM;
@@ -1174,16 +1175,6 @@ static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev)
}
/*
- * In sysFS mode we can have multiple writers per sink. Since this
- * sink is already enabled no memory is needed and the HW need not be
- * touched, even if the buffer size has changed.
- */
- if (drvdata->mode == CS_MODE_SYSFS) {
- atomic_inc(&csdev->refcnt);
- goto out;
- }
-
- /*
* If we don't have a buffer or it doesn't match the requested size,
* use the buffer allocated above. Otherwise reuse the existing buffer.
*/
@@ -1204,7 +1195,7 @@ out:
static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
{
- int ret;
+ int ret = 0;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct etr_buf *sysfs_buf = tmc_etr_get_sysfs_buffer(csdev);
@@ -1213,12 +1204,24 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
return PTR_ERR(sysfs_buf);
spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ /*
+ * In sysFS mode we can have multiple writers per sink. Since this
+ * sink is already enabled no memory is needed and the HW need not be
+ * touched, even if the buffer size has changed.
+ */
+ if (drvdata->mode == CS_MODE_SYSFS) {
+ atomic_inc(&csdev->refcnt);
+ goto out;
+ }
+
ret = tmc_etr_enable_hw(drvdata, sysfs_buf);
if (!ret) {
drvdata->mode = CS_MODE_SYSFS;
atomic_inc(&csdev->refcnt);
}
+out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (!ret)
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 5a416b39b818..28e2a5fc4528 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -749,6 +749,8 @@ static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr)
func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG);
func_ctrl_reg_val |= ASPEED_I2CD_SLAVE_EN;
writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG);
+
+ bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
}
static int aspeed_i2c_reg_slave(struct i2c_client *client)
@@ -765,7 +767,6 @@ static int aspeed_i2c_reg_slave(struct i2c_client *client)
__aspeed_i2c_reg_slave(bus, client->addr);
bus->slave = client;
- bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
spin_unlock_irqrestore(&bus->lock, flags);
return 0;
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 579b30581725..0d3c9a041b56 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -1059,9 +1059,10 @@ static int stm32f7_i2c_smbus_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
/* Configure PEC */
if ((flags & I2C_CLIENT_PEC) && f7_msg->size != I2C_SMBUS_QUICK) {
cr1 |= STM32F7_I2C_CR1_PECEN;
- cr2 |= STM32F7_I2C_CR2_PECBYTE;
- if (!f7_msg->read_write)
+ if (!f7_msg->read_write) {
+ cr2 |= STM32F7_I2C_CR2_PECBYTE;
f7_msg->count++;
+ }
} else {
cr1 &= ~STM32F7_I2C_CR1_PECEN;
cr2 &= ~STM32F7_I2C_CR2_PECBYTE;
@@ -1149,8 +1150,10 @@ static void stm32f7_i2c_smbus_rep_start(struct stm32f7_i2c_dev *i2c_dev)
f7_msg->stop = true;
/* Add one byte for PEC if needed */
- if (cr1 & STM32F7_I2C_CR1_PECEN)
+ if (cr1 & STM32F7_I2C_CR1_PECEN) {
+ cr2 |= STM32F7_I2C_CR2_PECBYTE;
f7_msg->count++;
+ }
/* Set number of bytes to be transferred */
cr2 &= ~(STM32F7_I2C_CR2_NBYTES_MASK);
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 22f2280eab7f..9f2e4aa28159 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -61,7 +61,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
if (ret)
goto err;
- adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
+ adap = of_get_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
if (!adap) {
ret = -ENODEV;
goto err_with_revert;
diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c
index baccf4bfaf02..8305661e1253 100644
--- a/drivers/i2c/muxes/i2c-mux-gpmux.c
+++ b/drivers/i2c/muxes/i2c-mux-gpmux.c
@@ -52,7 +52,7 @@ static struct i2c_adapter *mux_parent_adapter(struct device *dev)
dev_err(dev, "Cannot parse i2c-parent\n");
return ERR_PTR(-ENODEV);
}
- parent = of_find_i2c_adapter_by_node(parent_np);
+ parent = of_get_i2c_adapter_by_node(parent_np);
of_node_put(parent_np);
if (!parent)
return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index 18236b9fa14a..6ebca7bfd8a2 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -62,7 +62,7 @@ static struct i2c_adapter *i2c_mux_pinctrl_parent_adapter(struct device *dev)
dev_err(dev, "Cannot parse i2c-parent\n");
return ERR_PTR(-ENODEV);
}
- parent = of_find_i2c_adapter_by_node(parent_np);
+ parent = of_get_i2c_adapter_by_node(parent_np);
of_node_put(parent_np);
if (!parent)
return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 69d1103b9508..b64fd365f83f 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -177,6 +177,7 @@ struct ad7192_chip_info {
struct ad7192_state {
const struct ad7192_chip_info *chip_info;
struct regulator *avdd;
+ struct regulator *vref;
struct clk *mclk;
u16 int_vref_mv;
u32 fclk;
@@ -1008,10 +1009,30 @@ static int ad7192_probe(struct spi_device *spi)
if (ret)
return dev_err_probe(&spi->dev, ret, "Failed to enable specified DVdd supply\n");
- ret = regulator_get_voltage(st->avdd);
- if (ret < 0) {
- dev_err(&spi->dev, "Device tree error, reference voltage undefined\n");
- return ret;
+ st->vref = devm_regulator_get_optional(&spi->dev, "vref");
+ if (IS_ERR(st->vref)) {
+ if (PTR_ERR(st->vref) != -ENODEV)
+ return PTR_ERR(st->vref);
+
+ ret = regulator_get_voltage(st->avdd);
+ if (ret < 0)
+ return dev_err_probe(&spi->dev, ret,
+ "Device tree error, AVdd voltage undefined\n");
+ } else {
+ ret = regulator_enable(st->vref);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable specified Vref supply\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&spi->dev, ad7192_reg_disable, st->vref);
+ if (ret)
+ return ret;
+
+ ret = regulator_get_voltage(st->vref);
+ if (ret < 0)
+ return dev_err_probe(&spi->dev, ret,
+ "Device tree error, Vref voltage undefined\n");
}
st->int_vref_mv = ret / 1000;
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index cff1ba57fb16..43c8af41b4a9 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -826,16 +826,26 @@ static int exynos_adc_probe(struct platform_device *pdev)
}
}
+ /* leave out any TS related code if unreachable */
+ if (IS_REACHABLE(CONFIG_INPUT)) {
+ has_ts = of_property_read_bool(pdev->dev.of_node,
+ "has-touchscreen") || pdata;
+ }
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
info->irq = irq;
- irq = platform_get_irq(pdev, 1);
- if (irq == -EPROBE_DEFER)
- return irq;
+ if (has_ts) {
+ irq = platform_get_irq(pdev, 1);
+ if (irq == -EPROBE_DEFER)
+ return irq;
- info->tsirq = irq;
+ info->tsirq = irq;
+ } else {
+ info->tsirq = -1;
+ }
info->dev = &pdev->dev;
@@ -900,12 +910,6 @@ static int exynos_adc_probe(struct platform_device *pdev)
if (info->data->init_hw)
info->data->init_hw(info);
- /* leave out any TS related code if unreachable */
- if (IS_REACHABLE(CONFIG_INPUT)) {
- has_ts = of_property_read_bool(pdev->dev.of_node,
- "has-touchscreen") || pdata;
- }
-
if (pdata)
info->delay = pdata->delay;
else
diff --git a/drivers/iio/adc/imx8qxp-adc.c b/drivers/iio/adc/imx8qxp-adc.c
index f5a0fc9e64c5..fff6e5a2d956 100644
--- a/drivers/iio/adc/imx8qxp-adc.c
+++ b/drivers/iio/adc/imx8qxp-adc.c
@@ -38,8 +38,8 @@
#define IMX8QXP_ADR_ADC_FCTRL 0x30
#define IMX8QXP_ADR_ADC_SWTRIG 0x34
#define IMX8QXP_ADR_ADC_TCTRL(tid) (0xc0 + (tid) * 4)
-#define IMX8QXP_ADR_ADC_CMDH(cid) (0x100 + (cid) * 8)
-#define IMX8QXP_ADR_ADC_CMDL(cid) (0x104 + (cid) * 8)
+#define IMX8QXP_ADR_ADC_CMDL(cid) (0x100 + (cid) * 8)
+#define IMX8QXP_ADR_ADC_CMDH(cid) (0x104 + (cid) * 8)
#define IMX8QXP_ADR_ADC_RESFIFO 0x300
#define IMX8QXP_ADR_ADC_TST 0xffc
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index dba73300f894..564c0cad0fc7 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -456,6 +456,9 @@ static const struct xadc_ops xadc_zynq_ops = {
.interrupt_handler = xadc_zynq_interrupt_handler,
.update_alarm = xadc_zynq_update_alarm,
.type = XADC_TYPE_S7,
+ /* Temp in C = (val * 503.975) / 2**bits - 273.15 */
+ .temp_scale = 503975,
+ .temp_offset = 273150,
};
static const unsigned int xadc_axi_reg_offsets[] = {
@@ -566,6 +569,9 @@ static const struct xadc_ops xadc_7s_axi_ops = {
.interrupt_handler = xadc_axi_interrupt_handler,
.flags = XADC_FLAGS_BUFFERED | XADC_FLAGS_IRQ_OPTIONAL,
.type = XADC_TYPE_S7,
+ /* Temp in C = (val * 503.975) / 2**bits - 273.15 */
+ .temp_scale = 503975,
+ .temp_offset = 273150,
};
static const struct xadc_ops xadc_us_axi_ops = {
@@ -577,6 +583,12 @@ static const struct xadc_ops xadc_us_axi_ops = {
.interrupt_handler = xadc_axi_interrupt_handler,
.flags = XADC_FLAGS_BUFFERED | XADC_FLAGS_IRQ_OPTIONAL,
.type = XADC_TYPE_US,
+ /**
+ * Values below are for UltraScale+ (SYSMONE4) using internal reference.
+ * See https://docs.xilinx.com/v/u/en-US/ug580-ultrascale-sysmon
+ */
+ .temp_scale = 509314,
+ .temp_offset = 280231,
};
static int _xadc_update_adc_reg(struct xadc *xadc, unsigned int reg,
@@ -945,8 +957,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
*val2 = bits;
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_TEMP:
- /* Temp in C = (val * 503.975) / 2**bits - 273.15 */
- *val = 503975;
+ *val = xadc->ops->temp_scale;
*val2 = bits;
return IIO_VAL_FRACTIONAL_LOG2;
default:
@@ -954,7 +965,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
}
case IIO_CHAN_INFO_OFFSET:
/* Only the temperature channel has an offset */
- *val = -((273150 << bits) / 503975);
+ *val = -((xadc->ops->temp_offset << bits) / xadc->ops->temp_scale);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
ret = xadc_read_samplerate(xadc);
@@ -1423,28 +1434,6 @@ static int xadc_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Disable all alarms */
- ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_ALARM_MASK,
- XADC_CONF1_ALARM_MASK);
- if (ret)
- return ret;
-
- /* Set thresholds to min/max */
- for (i = 0; i < 16; i++) {
- /*
- * Set max voltage threshold and both temperature thresholds to
- * 0xffff, min voltage threshold to 0.
- */
- if (i % 8 < 4 || i == 7)
- xadc->threshold[i] = 0xffff;
- else
- xadc->threshold[i] = 0;
- ret = xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(i),
- xadc->threshold[i]);
- if (ret)
- return ret;
- }
-
/* Go to non-buffered mode */
xadc_postdisable(indio_dev);
diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
index 7d78ce698967..3036f4d613ff 100644
--- a/drivers/iio/adc/xilinx-xadc.h
+++ b/drivers/iio/adc/xilinx-xadc.h
@@ -85,6 +85,8 @@ struct xadc_ops {
unsigned int flags;
enum xadc_type type;
+ int temp_scale;
+ int temp_offset;
};
static inline int _xadc_read_adc_reg(struct xadc *xadc, unsigned int reg,
diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig
index 877f9124803c..397544f23b85 100644
--- a/drivers/iio/addac/Kconfig
+++ b/drivers/iio/addac/Kconfig
@@ -24,6 +24,8 @@ config AD74413R
depends on GPIOLIB && SPI
select REGMAP_SPI
select CRC8
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Analog Devices AD74412R/AD74413R
quad-channel software configurable input/output solution.
diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c
index 1f280c360701..56e5913ab82d 100644
--- a/drivers/iio/afe/iio-rescale.c
+++ b/drivers/iio/afe/iio-rescale.c
@@ -214,8 +214,18 @@ static int rescale_read_raw(struct iio_dev *indio_dev,
return ret < 0 ? ret : -EOPNOTSUPP;
}
- ret = iio_read_channel_scale(rescale->source, &scale, &scale2);
- return rescale_process_offset(rescale, ret, scale, scale2,
+ if (iio_channel_has_info(rescale->source->channel,
+ IIO_CHAN_INFO_SCALE)) {
+ ret = iio_read_channel_scale(rescale->source, &scale, &scale2);
+ return rescale_process_offset(rescale, ret, scale, scale2,
+ schan_off, val, val2);
+ }
+
+ /*
+ * If we get here we have no scale so scale 1:1 but apply
+ * rescaler and offset, if any.
+ */
+ return rescale_process_offset(rescale, IIO_VAL_FRACTIONAL, 1, 1,
schan_off, val, val2);
default:
return -EINVAL;
@@ -280,8 +290,9 @@ static int rescale_configure_channel(struct device *dev,
chan->type = rescale->cfg->type;
if (iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) &&
- iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE)) {
- dev_info(dev, "using raw+scale source channel\n");
+ (iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE) ||
+ iio_channel_has_info(schan, IIO_CHAN_INFO_OFFSET))) {
+ dev_info(dev, "using raw+scale/offset source channel\n");
} else if (iio_channel_has_info(schan, IIO_CHAN_INFO_PROCESSED)) {
dev_info(dev, "using processed channel\n");
rescale->chan_processed = true;
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index b72d39fc2434..6bfe5d6847e7 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -190,8 +190,11 @@ int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
/*
* Ignore samples if the buffer is not set: it is needed if the ODR is
* set but the buffer is not enabled yet.
+ *
+ * Note: iio_device_claim_buffer_mode() returns -EBUSY if the buffer
+ * is not enabled.
*/
- if (!iio_buffer_enabled(indio_dev))
+ if (iio_device_claim_buffer_mode(indio_dev) < 0)
return 0;
out = (s16 *)st->samples;
@@ -210,6 +213,7 @@ int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
iio_push_to_buffers_with_timestamp(indio_dev, st->samples,
timestamp + delta);
+ iio_device_release_buffer_mode(indio_dev);
return 0;
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_push_data);
diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c
index d5ea1a1be122..a492e8f2fc0f 100644
--- a/drivers/iio/dac/ad3552r.c
+++ b/drivers/iio/dac/ad3552r.c
@@ -140,8 +140,8 @@ enum ad3552r_ch_vref_select {
};
enum ad3542r_id {
- AD3542R_ID = 0x4008,
- AD3552R_ID = 0x4009,
+ AD3542R_ID = 0x4009,
+ AD3552R_ID = 0x4008,
};
enum ad3552r_ch_output_range {
diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c
index 6355c1f28423..92923074f930 100644
--- a/drivers/iio/frequency/admv1013.c
+++ b/drivers/iio/frequency/admv1013.c
@@ -351,9 +351,9 @@ static int admv1013_update_mixer_vgate(struct admv1013_state *st)
if (vcm < 0)
return vcm;
- if (vcm < 1800000)
+ if (vcm <= 1800000)
mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100;
- else if (vcm > 1800000 && vcm < 2600000)
+ else if (vcm > 1800000 && vcm <= 2600000)
mixer_vgate = (2375 * vcm / 1000000 + 125) / 100;
else
return -EINVAL;
diff --git a/drivers/iio/imu/bno055/Kconfig b/drivers/iio/imu/bno055/Kconfig
index fa79b1ac4f85..83e53acfbe88 100644
--- a/drivers/iio/imu/bno055/Kconfig
+++ b/drivers/iio/imu/bno055/Kconfig
@@ -2,6 +2,8 @@
config BOSCH_BNO055
tristate
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
config BOSCH_BNO055_SERIAL
tristate "Bosch BNO055 attached via UART"
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 3a52b09c2823..fdf763a04b0b 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -1513,7 +1513,6 @@ static int vcnl4040_write_event_config(struct iio_dev *indio_dev,
out:
mutex_unlock(&data->vcnl4000_lock);
- data->chip_spec->set_power_state(data, data->ps_int || data->als_int);
return ret;
}
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 6089f3f9d8f4..a2ef1373a274 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -2179,7 +2179,7 @@ int bmp280_common_probe(struct device *dev,
* however as it happens, the BMP085 shares the chip ID of BMP180
* so we look for an IRQ if we have that.
*/
- if (irq > 0 || (chip_id == BMP180_CHIP_ID)) {
+ if (irq > 0 && (chip_id == BMP180_CHIP_ID)) {
ret = bmp085_fetch_eoc_irq(dev, name, irq, data);
if (ret)
return ret;
diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c
index b10dbf5cf494..1ff091b2f764 100644
--- a/drivers/iio/pressure/dps310.c
+++ b/drivers/iio/pressure/dps310.c
@@ -57,8 +57,8 @@
#define DPS310_RESET_MAGIC 0x09
#define DPS310_COEF_BASE 0x10
-/* Make sure sleep time is <= 20ms for usleep_range */
-#define DPS310_POLL_SLEEP_US(t) min(20000, (t) / 8)
+/* Make sure sleep time is <= 30ms for usleep_range */
+#define DPS310_POLL_SLEEP_US(t) min(30000, (t) / 8)
/* Silently handle error in rate value here */
#define DPS310_POLL_TIMEOUT_US(rc) ((rc) <= 0 ? 1000000 : 1000000 / (rc))
@@ -402,8 +402,8 @@ static int dps310_reset_wait(struct dps310_data *data)
if (rc)
return rc;
- /* Wait for device chip access: 2.5ms in specification */
- usleep_range(2500, 12000);
+ /* Wait for device chip access: 15ms in specification */
+ usleep_range(15000, 55000);
return 0;
}
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index 627497e61a63..2fc706f9d8ae 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -76,7 +76,7 @@ static bool ms5611_prom_is_valid(u16 *prom, size_t len)
crc = (crc >> 12) & 0x000F;
- return crc_orig != 0x0000 && crc == crc_orig;
+ return crc == crc_orig;
}
static int ms5611_read_prom(struct iio_dev *indio_dev)
diff --git a/drivers/iio/proximity/irsd200.c b/drivers/iio/proximity/irsd200.c
index 5bd791b46d98..bdff91f6b1a3 100644
--- a/drivers/iio/proximity/irsd200.c
+++ b/drivers/iio/proximity/irsd200.c
@@ -759,14 +759,14 @@ static irqreturn_t irsd200_trigger_handler(int irq, void *pollf)
{
struct iio_dev *indio_dev = ((struct iio_poll_func *)pollf)->indio_dev;
struct irsd200_data *data = iio_priv(indio_dev);
- s16 buf = 0;
+ s64 buf[2] = {};
int ret;
- ret = irsd200_read_data(data, &buf);
+ ret = irsd200_read_data(data, (s16 *)buf);
if (ret)
goto end;
- iio_push_to_buffers_with_timestamp(indio_dev, &buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
iio_get_time_ns(indio_dev));
end:
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index a5ab22cedd41..788fc249234f 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -267,7 +267,7 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
if (!HFI1_CAP_IS_KSET(SDMA))
return -EINVAL;
- if (!from->user_backed)
+ if (!user_backed_iter(from))
return -EINVAL;
idx = srcu_read_lock(&fd->pq_srcu);
pq = srcu_dereference(fd->pq, &fd->pq_srcu);
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 152952127f13..29e4c59aa23b 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -2244,7 +2244,7 @@ static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
struct qib_user_sdma_queue *pq = fp->pq;
- if (!from->user_backed || !from->nr_segs || !pq)
+ if (!user_backed_iter(from) || !from->nr_segs || !pq)
return -EINVAL;
return qib_user_sdma_writev(rcd, pq, iter_iov(from), from->nr_segs);
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index ed7d4b02f45a..455e966eeff3 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -64,8 +64,8 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
inode->i_blocks = 0;
- inode->i_atime = inode_set_ctime_current(inode);
- inode->i_mtime = inode->i_atime;
+ simple_inode_init_ts(inode);
+
inode->i_private = data;
if (S_ISDIR(mode)) {
inode->i_op = &simple_dir_inode_operations;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index ede380551e55..f5c21565bb3c 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -130,6 +130,7 @@ static const struct xpad_device {
{ 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
{ 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 },
{ 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 },
+ { 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -272,6 +273,7 @@ static const struct xpad_device {
{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
+ { 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 },
{ 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
@@ -459,6 +461,7 @@ static const struct usb_device_id xpad_table[] = {
{ USB_INTERFACE_INFO('X', 'B', 0) }, /* Xbox USB-IF not-approved class */
XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 controller */
XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */
+ XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One controllers */
XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster Xbox 360 controllers */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */
XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft Xbox One controllers */
@@ -477,6 +480,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries controllers */
XPAD_XBOXONE_VENDOR(0x10f5), /* Turtle Beach Controllers */
XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
+ XPAD_XBOX360_VENDOR(0x11ff), /* PXN V900 */
XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */
XPAD_XBOX360_VENDOR(0x12ab), /* Xbox 360 dance pads */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane Xbox 360 controllers */
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index c1c733a9cb89..db2ba89adaef 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -425,6 +425,7 @@ static void powermate_disconnect(struct usb_interface *intf)
pm->requires_update = 0;
usb_kill_urb(pm->irq);
input_unregister_device(pm->input);
+ usb_kill_urb(pm->config);
usb_free_urb(pm->irq);
usb_free_urb(pm->config);
powermate_free_buffers(interface_to_usbdev(intf), pm);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 2118b2075f43..4e38229404b4 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -2114,6 +2114,7 @@ static int elantech_setup_ps2(struct psmouse *psmouse,
psmouse->protocol_handler = elantech_process_byte;
psmouse->disconnect = elantech_disconnect;
psmouse->reconnect = elantech_reconnect;
+ psmouse->fast_reconnect = NULL;
psmouse->pktsize = info->hw_version > 1 ? 6 : 4;
return 0;
diff --git a/drivers/input/mouse/psmouse-smbus.c b/drivers/input/mouse/psmouse-smbus.c
index 7b13de979908..2a2459b1b4f2 100644
--- a/drivers/input/mouse/psmouse-smbus.c
+++ b/drivers/input/mouse/psmouse-smbus.c
@@ -5,7 +5,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/libps2.h>
@@ -119,18 +118,13 @@ static psmouse_ret_t psmouse_smbus_process_byte(struct psmouse *psmouse)
return PSMOUSE_FULL_PACKET;
}
-static void psmouse_activate_smbus_mode(struct psmouse_smbus_dev *smbdev)
-{
- if (smbdev->need_deactivate) {
- psmouse_deactivate(smbdev->psmouse);
- /* Give the device time to switch into SMBus mode */
- msleep(30);
- }
-}
-
static int psmouse_smbus_reconnect(struct psmouse *psmouse)
{
- psmouse_activate_smbus_mode(psmouse->private);
+ struct psmouse_smbus_dev *smbdev = psmouse->private;
+
+ if (smbdev->need_deactivate)
+ psmouse_deactivate(psmouse);
+
return 0;
}
@@ -263,7 +257,8 @@ int psmouse_smbus_init(struct psmouse *psmouse,
}
}
- psmouse_activate_smbus_mode(smbdev);
+ if (need_deactivate)
+ psmouse_deactivate(psmouse);
psmouse->private = smbdev;
psmouse->protocol_handler = psmouse_smbus_process_byte;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ada299ec5bba..22d16d80efb9 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1623,6 +1623,7 @@ static int synaptics_init_ps2(struct psmouse *psmouse,
psmouse->set_rate = synaptics_set_rate;
psmouse->disconnect = synaptics_disconnect;
psmouse->reconnect = synaptics_reconnect;
+ psmouse->fast_reconnect = NULL;
psmouse->cleanup = synaptics_reset;
/* Synaptics can usually stay in sync without extra help */
psmouse->resync_time = 0;
@@ -1752,6 +1753,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse,
psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) &&
!SYN_CAP_EXT_BUTTONS_STICK(info->ext_cap_10);
const struct rmi_device_platform_data pdata = {
+ .reset_delay_ms = 30,
.sensor_pdata = {
.sensor_type = rmi_sensor_touchpad,
.axis_align.flip_y = true,
diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
index 7059a2762aeb..b0b099b5528a 100644
--- a/drivers/input/rmi4/rmi_smbus.c
+++ b/drivers/input/rmi4/rmi_smbus.c
@@ -235,12 +235,29 @@ static void rmi_smb_clear_state(struct rmi_smb_xport *rmi_smb)
static int rmi_smb_enable_smbus_mode(struct rmi_smb_xport *rmi_smb)
{
- int retval;
+ struct i2c_client *client = rmi_smb->client;
+ int smbus_version;
+
+ /*
+ * psmouse driver resets the controller, we only need to wait
+ * to give the firmware chance to fully reinitialize.
+ */
+ if (rmi_smb->xport.pdata.reset_delay_ms)
+ msleep(rmi_smb->xport.pdata.reset_delay_ms);
/* we need to get the smbus version to activate the touchpad */
- retval = rmi_smb_get_version(rmi_smb);
- if (retval < 0)
- return retval;
+ smbus_version = rmi_smb_get_version(rmi_smb);
+ if (smbus_version < 0)
+ return smbus_version;
+
+ rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
+ smbus_version);
+
+ if (smbus_version != 2 && smbus_version != 3) {
+ dev_err(&client->dev, "Unrecognized SMB version %d\n",
+ smbus_version);
+ return -ENODEV;
+ }
return 0;
}
@@ -253,11 +270,10 @@ static int rmi_smb_reset(struct rmi_transport_dev *xport, u16 reset_addr)
rmi_smb_clear_state(rmi_smb);
/*
- * we do not call the actual reset command, it has to be handled in
- * PS/2 or there will be races between PS/2 and SMBus.
- * PS/2 should ensure that a psmouse_reset is called before
- * intializing the device and after it has been removed to be in a known
- * state.
+ * We do not call the actual reset command, it has to be handled in
+ * PS/2 or there will be races between PS/2 and SMBus. PS/2 should
+ * ensure that a psmouse_reset is called before initializing the
+ * device and after it has been removed to be in a known state.
*/
return rmi_smb_enable_smbus_mode(rmi_smb);
}
@@ -272,7 +288,6 @@ static int rmi_smb_probe(struct i2c_client *client)
{
struct rmi_device_platform_data *pdata = dev_get_platdata(&client->dev);
struct rmi_smb_xport *rmi_smb;
- int smbus_version;
int error;
if (!pdata) {
@@ -311,18 +326,9 @@ static int rmi_smb_probe(struct i2c_client *client)
rmi_smb->xport.proto_name = "smb";
rmi_smb->xport.ops = &rmi_smb_ops;
- smbus_version = rmi_smb_get_version(rmi_smb);
- if (smbus_version < 0)
- return smbus_version;
-
- rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
- smbus_version);
-
- if (smbus_version != 2 && smbus_version != 3) {
- dev_err(&client->dev, "Unrecognized SMB version %d\n",
- smbus_version);
- return -ENODEV;
- }
+ error = rmi_smb_enable_smbus_mode(rmi_smb);
+ if (error)
+ return error;
i2c_set_clientdata(client, rmi_smb);
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index 1724d6cb8649..9c39553d30fa 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -619,6 +619,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
+ /* Fujitsu Lifebook E5411 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU CLIENT COMPUTING LIMITED"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E5411"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX)
+ },
+ {
/* Gigabyte M912 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index da9954d6df44..af32fbe57b63 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -900,6 +900,25 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
dev_info(dev, "No ACPI GpioInt resource, assuming that the GPIO order is reset, int\n");
ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
gpio_mapping = acpi_goodix_int_last_gpios;
+ } else if (ts->gpio_count == 1 && ts->gpio_int_idx == 0) {
+ /*
+ * On newer devices there is only 1 GpioInt resource and _PS0
+ * does the whole reset sequence for us.
+ */
+ acpi_device_fix_up_power(ACPI_COMPANION(dev));
+
+ /*
+ * Before the _PS0 call the int GPIO may have been in output
+ * mode and the call should have put the int GPIO in input mode,
+ * but the GPIO subsys cached state may still think it is
+ * in output mode, causing gpiochip_lock_as_irq() failure.
+ *
+ * Add a mapping for the int GPIO to make the
+ * gpiod_int = gpiod_get(..., GPIOD_IN) call succeed,
+ * which will explicitly set the direction to input.
+ */
+ ts->irq_pin_access_method = IRQ_PIN_ACCESS_NONE;
+ gpio_mapping = acpi_goodix_int_first_gpios;
} else {
dev_warn(dev, "Unexpected ACPI resources: gpio_count %d, gpio_int_idx %d\n",
ts->gpio_count, ts->gpio_int_idx);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 3bfc56df4f78..c146378c7d03 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1108,7 +1108,8 @@ map_end:
}
- iommu_flush_iotlb_all(domain);
+ if (!list_empty(&mappings) && iommu_is_dma_domain(domain))
+ iommu_flush_iotlb_all(domain);
out:
iommu_put_resv_regions(dev, &mappings);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 75a2dd550625..a8c89df1a997 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -5112,8 +5112,6 @@ static int __init its_probe_one(struct its_node *its)
}
its->cmd_base = (void *)page_address(page);
its->cmd_write = its->cmd_base;
- its->get_msi_base = its_irq_get_msi_base;
- its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI;
err = its_alloc_tables(its);
if (err)
@@ -5362,6 +5360,8 @@ static struct its_node __init *its_node_init(struct resource *res,
its->typer = gic_read_typer(its_base + GITS_TYPER);
its->base = its_base;
its->phys_base = res->start;
+ its->get_msi_base = its_irq_get_msi_base;
+ its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI;
its->numa_node = numa_node;
its->fwnode_handle = handle;
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 1efd17979f24..b82b89888a5e 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -678,7 +678,7 @@ ph_state(struct dchannel *dch)
}
/*
- * disable/enable BChannel for desired protocoll
+ * disable/enable BChannel for desired protocol
*/
static int
hfcsusb_setup_bch(struct bchannel *bch, int protocol)
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index 978fdfc19a06..0cac5bead84f 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -387,17 +387,13 @@ EXPORT_SYMBOL_NS_GPL(mcb_free_dev, MCB);
static int __mcb_bus_add_devices(struct device *dev, void *data)
{
- struct mcb_device *mdev = to_mcb_device(dev);
int retval;
- if (mdev->is_added)
- return 0;
-
retval = device_attach(dev);
- if (retval < 0)
+ if (retval < 0) {
dev_err(dev, "Error adding device (%d)\n", retval);
-
- mdev->is_added = true;
+ return retval;
+ }
return 0;
}
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index 2aef990f379f..656b6b71c768 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -99,8 +99,6 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
mdev->mem.end = mdev->mem.start + size - 1;
mdev->mem.flags = IORESOURCE_MEM;
- mdev->is_added = false;
-
ret = mcb_device_register(bus, mdev);
if (ret < 0)
goto err;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 5a79bb3c272f..2aa3f2c1f719 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -299,6 +299,7 @@ struct cached_dev {
struct list_head list;
struct bcache_device disk;
struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct cache_sb sb;
struct cache_sb_disk *sb_disk;
@@ -421,6 +422,7 @@ struct cache {
struct kobject kobj;
struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct task_struct *alloc_thread;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 0ae2b3676293..a30c8d4f2ac8 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1368,8 +1368,8 @@ static void cached_dev_free(struct closure *cl)
if (dc->sb_disk)
put_page(virt_to_page(dc->sb_disk));
- if (!IS_ERR_OR_NULL(dc->bdev))
- blkdev_put(dc->bdev, dc);
+ if (dc->bdev_handle)
+ bdev_release(dc->bdev_handle);
wake_up(&unregister_wait);
@@ -1444,7 +1444,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
/* Cached device - bcache superblock */
static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
- struct block_device *bdev,
+ struct bdev_handle *bdev_handle,
struct cached_dev *dc)
{
const char *err = "cannot allocate memory";
@@ -1452,14 +1452,15 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
int ret = -ENOMEM;
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
- dc->bdev = bdev;
+ dc->bdev_handle = bdev_handle;
+ dc->bdev = bdev_handle->bdev;
dc->sb_disk = sb_disk;
if (cached_dev_init(dc, sb->block_size << 9))
goto err;
err = "error creating kobject";
- if (kobject_add(&dc->disk.kobj, bdev_kobj(bdev), "bcache"))
+ if (kobject_add(&dc->disk.kobj, bdev_kobj(dc->bdev), "bcache"))
goto err;
if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
goto err;
@@ -2216,8 +2217,8 @@ void bch_cache_release(struct kobject *kobj)
if (ca->sb_disk)
put_page(virt_to_page(ca->sb_disk));
- if (!IS_ERR_OR_NULL(ca->bdev))
- blkdev_put(ca->bdev, ca);
+ if (ca->bdev_handle)
+ bdev_release(ca->bdev_handle);
kfree(ca);
module_put(THIS_MODULE);
@@ -2337,38 +2338,42 @@ err_free:
}
static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
- struct block_device *bdev, struct cache *ca)
+ struct bdev_handle *bdev_handle,
+ struct cache *ca)
{
const char *err = NULL; /* must be set for any error case */
int ret = 0;
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
- ca->bdev = bdev;
+ ca->bdev_handle = bdev_handle;
+ ca->bdev = bdev_handle->bdev;
ca->sb_disk = sb_disk;
- if (bdev_max_discard_sectors((bdev)))
+ if (bdev_max_discard_sectors((bdev_handle->bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
ret = cache_alloc(ca);
if (ret != 0) {
- /*
- * If we failed here, it means ca->kobj is not initialized yet,
- * kobject_put() won't be called and there is no chance to
- * call blkdev_put() to bdev in bch_cache_release(). So we
- * explicitly call blkdev_put() here.
- */
- blkdev_put(bdev, ca);
if (ret == -ENOMEM)
err = "cache_alloc(): -ENOMEM";
else if (ret == -EPERM)
err = "cache_alloc(): cache device is too small";
else
err = "cache_alloc(): unknown error";
- goto err;
+ pr_notice("error %pg: %s\n", bdev_handle->bdev, err);
+ /*
+ * If we failed here, it means ca->kobj is not initialized yet,
+ * kobject_put() won't be called and there is no chance to
+ * call bdev_release() to bdev in bch_cache_release(). So
+ * we explicitly call bdev_release() here.
+ */
+ bdev_release(bdev_handle);
+ return ret;
}
- if (kobject_add(&ca->kobj, bdev_kobj(bdev), "bcache")) {
- err = "error calling kobject_add";
+ if (kobject_add(&ca->kobj, bdev_kobj(bdev_handle->bdev), "bcache")) {
+ pr_notice("error %pg: error calling kobject_add\n",
+ bdev_handle->bdev);
ret = -ENOMEM;
goto out;
}
@@ -2382,15 +2387,10 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
goto out;
}
- pr_info("registered cache device %pg\n", ca->bdev);
+ pr_info("registered cache device %pg\n", ca->bdev_handle->bdev);
out:
kobject_put(&ca->kobj);
-
-err:
- if (err)
- pr_notice("error %pg: %s\n", ca->bdev, err);
-
return ret;
}
@@ -2445,7 +2445,7 @@ struct async_reg_args {
char *path;
struct cache_sb *sb;
struct cache_sb_disk *sb_disk;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
void *holder;
};
@@ -2456,8 +2456,8 @@ static void register_bdev_worker(struct work_struct *work)
container_of(work, struct async_reg_args, reg_work.work);
mutex_lock(&bch_register_lock);
- if (register_bdev(args->sb, args->sb_disk, args->bdev, args->holder)
- < 0)
+ if (register_bdev(args->sb, args->sb_disk, args->bdev_handle,
+ args->holder) < 0)
fail = true;
mutex_unlock(&bch_register_lock);
@@ -2477,7 +2477,8 @@ static void register_cache_worker(struct work_struct *work)
container_of(work, struct async_reg_args, reg_work.work);
/* blkdev_put() will be called in bch_cache_release() */
- if (register_cache(args->sb, args->sb_disk, args->bdev, args->holder))
+ if (register_cache(args->sb, args->sb_disk, args->bdev_handle,
+ args->holder))
fail = true;
if (fail)
@@ -2514,7 +2515,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
char *path = NULL;
struct cache_sb *sb;
struct cache_sb_disk *sb_disk;
- struct block_device *bdev, *bdev2;
+ struct bdev_handle *bdev_handle, *bdev_handle2;
void *holder = NULL;
ssize_t ret;
bool async_registration = false;
@@ -2547,15 +2548,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
ret = -EINVAL;
err = "failed to open device";
- bdev = blkdev_get_by_path(strim(path), BLK_OPEN_READ, NULL, NULL);
- if (IS_ERR(bdev))
+ bdev_handle = bdev_open_by_path(strim(path), BLK_OPEN_READ, NULL, NULL);
+ if (IS_ERR(bdev_handle))
goto out_free_sb;
err = "failed to set blocksize";
- if (set_blocksize(bdev, 4096))
+ if (set_blocksize(bdev_handle->bdev, 4096))
goto out_blkdev_put;
- err = read_super(sb, bdev, &sb_disk);
+ err = read_super(sb, bdev_handle->bdev, &sb_disk);
if (err)
goto out_blkdev_put;
@@ -2567,13 +2568,13 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
}
/* Now reopen in exclusive mode with proper holder */
- bdev2 = blkdev_get_by_dev(bdev->bd_dev, BLK_OPEN_READ | BLK_OPEN_WRITE,
- holder, NULL);
- blkdev_put(bdev, NULL);
- bdev = bdev2;
- if (IS_ERR(bdev)) {
- ret = PTR_ERR(bdev);
- bdev = NULL;
+ bdev_handle2 = bdev_open_by_dev(bdev_handle->bdev->bd_dev,
+ BLK_OPEN_READ | BLK_OPEN_WRITE, holder, NULL);
+ bdev_release(bdev_handle);
+ bdev_handle = bdev_handle2;
+ if (IS_ERR(bdev_handle)) {
+ ret = PTR_ERR(bdev_handle);
+ bdev_handle = NULL;
if (ret == -EBUSY) {
dev_t dev;
@@ -2608,7 +2609,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
args->path = path;
args->sb = sb;
args->sb_disk = sb_disk;
- args->bdev = bdev;
+ args->bdev_handle = bdev_handle;
args->holder = holder;
register_device_async(args);
/* No wait and returns to user space */
@@ -2617,14 +2618,14 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (SB_IS_BDEV(sb)) {
mutex_lock(&bch_register_lock);
- ret = register_bdev(sb, sb_disk, bdev, holder);
+ ret = register_bdev(sb, sb_disk, bdev_handle, holder);
mutex_unlock(&bch_register_lock);
/* blkdev_put() will be called in cached_dev_free() */
if (ret < 0)
goto out_free_sb;
} else {
/* blkdev_put() will be called in bch_cache_release() */
- ret = register_cache(sb, sb_disk, bdev, holder);
+ ret = register_cache(sb, sb_disk, bdev_handle, holder);
if (ret)
goto out_free_sb;
}
@@ -2640,8 +2641,8 @@ out_free_holder:
out_put_sb_page:
put_page(virt_to_page(sb_disk));
out_blkdev_put:
- if (bdev)
- blkdev_put(bdev, holder);
+ if (bdev_handle)
+ bdev_release(bdev_handle);
out_free_sb:
kfree(sb);
out_free_path:
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 64a1f306c96c..f7212e8fc27f 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -724,7 +724,7 @@ static struct table_device *open_table_device(struct mapped_device *md,
dev_t dev, blk_mode_t mode)
{
struct table_device *td;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
u64 part_off;
int r;
@@ -733,9 +733,9 @@ static struct table_device *open_table_device(struct mapped_device *md,
return ERR_PTR(-ENOMEM);
refcount_set(&td->count, 1);
- bdev = blkdev_get_by_dev(dev, mode, _dm_claim_ptr, NULL);
- if (IS_ERR(bdev)) {
- r = PTR_ERR(bdev);
+ bdev_handle = bdev_open_by_dev(dev, mode, _dm_claim_ptr, NULL);
+ if (IS_ERR(bdev_handle)) {
+ r = PTR_ERR(bdev_handle);
goto out_free_td;
}
@@ -745,20 +745,22 @@ static struct table_device *open_table_device(struct mapped_device *md,
* called.
*/
if (md->disk->slave_dir) {
- r = bd_link_disk_holder(bdev, md->disk);
+ r = bd_link_disk_holder(bdev_handle->bdev, md->disk);
if (r)
goto out_blkdev_put;
}
td->dm_dev.mode = mode;
- td->dm_dev.bdev = bdev;
- td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL);
+ td->dm_dev.bdev = bdev_handle->bdev;
+ td->dm_dev.bdev_handle = bdev_handle;
+ td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev_handle->bdev, &part_off,
+ NULL, NULL);
format_dev_t(td->dm_dev.name, dev);
list_add(&td->list, &md->table_devices);
return td;
out_blkdev_put:
- blkdev_put(bdev, _dm_claim_ptr);
+ bdev_release(bdev_handle);
out_free_td:
kfree(td);
return ERR_PTR(r);
@@ -771,7 +773,7 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
{
if (md->disk->slave_dir)
bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
- blkdev_put(td->dm_dev.bdev, _dm_claim_ptr);
+ bdev_release(td->dm_dev.bdev_handle);
put_dax(td->dm_dev.dax_dev);
list_del(&td->list);
kfree(td);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index a104a025084d..839e79e567ee 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2452,8 +2452,7 @@ static void export_rdev(struct md_rdev *rdev, struct mddev *mddev)
if (test_bit(AutoDetected, &rdev->flags))
md_autodetect_dev(rdev->bdev->bd_dev);
#endif
- blkdev_put(rdev->bdev,
- test_bit(Holder, &rdev->flags) ? rdev : &claim_rdev);
+ bdev_release(rdev->bdev_handle);
rdev->bdev = NULL;
kobject_put(&rdev->kobj);
}
@@ -3633,7 +3632,6 @@ EXPORT_SYMBOL_GPL(md_rdev_init);
static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
{
struct md_rdev *rdev;
- struct md_rdev *holder;
sector_t size;
int err;
@@ -3648,21 +3646,16 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
if (err)
goto out_clear_rdev;
- if (super_format == -2) {
- holder = &claim_rdev;
- } else {
- holder = rdev;
- set_bit(Holder, &rdev->flags);
- }
-
- rdev->bdev = blkdev_get_by_dev(newdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
- holder, NULL);
- if (IS_ERR(rdev->bdev)) {
+ rdev->bdev_handle = bdev_open_by_dev(newdev,
+ BLK_OPEN_READ | BLK_OPEN_WRITE,
+ super_format == -2 ? &claim_rdev : rdev, NULL);
+ if (IS_ERR(rdev->bdev_handle)) {
pr_warn("md: could not open device unknown-block(%u,%u).\n",
MAJOR(newdev), MINOR(newdev));
- err = PTR_ERR(rdev->bdev);
+ err = PTR_ERR(rdev->bdev_handle);
goto out_clear_rdev;
}
+ rdev->bdev = rdev->bdev_handle->bdev;
kobject_init(&rdev->kobj, &rdev_ktype);
@@ -3693,7 +3686,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
return rdev;
out_blkdev_put:
- blkdev_put(rdev->bdev, holder);
+ bdev_release(rdev->bdev_handle);
out_clear_rdev:
md_rdev_clear(rdev);
out_free_rdev:
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7c9c13abd7ca..274e7d61d19f 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -59,6 +59,7 @@ struct md_rdev {
*/
struct block_device *meta_bdev;
struct block_device *bdev; /* block device handle */
+ struct bdev_handle *bdev_handle; /* Handle from open for bdev */
struct page *sb_page, *bb_page;
int sb_loaded;
@@ -211,9 +212,6 @@ enum flag_bits {
* check if there is collision between raid1
* serial bios.
*/
- Holder, /* rdev is used as holder while opening
- * underlying disk exclusively.
- */
};
static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
diff --git a/drivers/media/i2c/ov8858.c b/drivers/media/i2c/ov8858.c
index 3af6125a2eee..4d9fd76e2f60 100644
--- a/drivers/media/i2c/ov8858.c
+++ b/drivers/media/i2c/ov8858.c
@@ -1850,9 +1850,9 @@ static int ov8858_parse_of(struct ov8858 *ov8858)
}
ret = v4l2_fwnode_endpoint_parse(endpoint, &vep);
+ fwnode_handle_put(endpoint);
if (ret) {
dev_err(dev, "Failed to parse endpoint: %d\n", ret);
- fwnode_handle_put(endpoint);
return ret;
}
@@ -1864,12 +1864,9 @@ static int ov8858_parse_of(struct ov8858 *ov8858)
default:
dev_err(dev, "Unsupported number of data lanes %u\n",
ov8858->num_lanes);
- fwnode_handle_put(endpoint);
return -EINVAL;
}
- ov8858->subdev.fwnode = endpoint;
-
return 0;
}
@@ -1913,7 +1910,7 @@ static int ov8858_probe(struct i2c_client *client)
ret = ov8858_init_ctrls(ov8858);
if (ret)
- goto err_put_fwnode;
+ return ret;
sd = &ov8858->subdev;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
@@ -1964,8 +1961,6 @@ err_clean_entity:
media_entity_cleanup(&sd->entity);
err_free_handler:
v4l2_ctrl_handler_free(&ov8858->ctrl_handler);
-err_put_fwnode:
- fwnode_handle_put(ov8858->subdev.fwnode);
return ret;
}
@@ -1978,7 +1973,6 @@ static void ov8858_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(&ov8858->ctrl_handler);
- fwnode_handle_put(ov8858->subdev.fwnode);
pm_runtime_disable(&client->dev);
if (!pm_runtime_status_suspended(&client->dev))
diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
index 1bde8b6e0b11..e38198e259c0 100644
--- a/drivers/media/pci/intel/ipu-bridge.c
+++ b/drivers/media/pci/intel/ipu-bridge.c
@@ -107,8 +107,10 @@ static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev
for_each_acpi_dev_match(ivsc_adev, acpi_id->id, NULL, -1)
/* camera sensor depends on IVSC in DSDT if exist */
for_each_acpi_consumer_dev(ivsc_adev, consumer)
- if (consumer->handle == handle)
+ if (consumer->handle == handle) {
+ acpi_dev_put(consumer);
return ivsc_adev;
+ }
}
return NULL;
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index 4285770fde18..996684a73038 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -55,11 +55,18 @@ xvip_graph_find_entity(struct xvip_composite_device *xdev,
{
struct xvip_graph_entity *entity;
struct v4l2_async_connection *asd;
-
- list_for_each_entry(asd, &xdev->notifier.done_list, asc_entry) {
- entity = to_xvip_entity(asd);
- if (entity->asd.match.fwnode == fwnode)
- return entity;
+ struct list_head *lists[] = {
+ &xdev->notifier.done_list,
+ &xdev->notifier.waiting_list
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(lists); i++) {
+ list_for_each_entry(asd, lists[i], asc_entry) {
+ entity = to_xvip_entity(asd);
+ if (entity->asd.match.fwnode == fwnode)
+ return entity;
+ }
}
return NULL;
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index b92348ad61f6..31752c06d1f0 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -502,6 +502,13 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
V4L2_SUBDEV_CLIENT_CAP_STREAMS;
int rval;
+ /*
+ * If the streams API is not enabled, remove V4L2_SUBDEV_CAP_STREAMS.
+ * Remove this when the API is no longer experimental.
+ */
+ if (!v4l2_subdev_enable_streams_api)
+ streams_subdev = false;
+
switch (cmd) {
case VIDIOC_SUBDEV_QUERYCAP: {
struct v4l2_subdev_capability *cap = arg;
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index a66b7c111cd5..1c6c62a7f7f5 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -958,6 +958,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
if (err)
return err;
+ memset(ctx->buf->virt, 0, pkt_size);
rpra = ctx->buf->virt;
list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
pages = fastrpc_phy_page_start(list, ctx->nscalars);
@@ -1090,6 +1091,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
}
}
+ /* Clean up fdlist which is updated by DSP */
for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
if (!fdlist[i])
break;
@@ -1156,11 +1158,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- if (ctx->nscalars) {
- err = fastrpc_get_args(kernel, ctx);
- if (err)
- goto bail;
- }
+ err = fastrpc_get_args(kernel, ctx);
+ if (err)
+ goto bail;
/* make sure that all CPU memory writes are seen by DSP */
dma_wmb();
@@ -1179,20 +1179,18 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
if (err)
goto bail;
+ /* make sure that all memory writes by DSP are seen by CPU */
+ dma_rmb();
+ /* populate all the output buffers with results */
+ err = fastrpc_put_args(ctx, kernel);
+ if (err)
+ goto bail;
+
/* Check the response from remote dsp */
err = ctx->retval;
if (err)
goto bail;
- if (ctx->nscalars) {
- /* make sure that all memory writes by DSP are seen by CPU */
- dma_rmb();
- /* populate all the output buffers with results */
- err = fastrpc_put_args(ctx, kernel);
- if (err)
- goto bail;
- }
-
bail:
if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
/* We are done with this compute context */
@@ -1983,11 +1981,13 @@ static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_me
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0);
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
&args[0]);
- fastrpc_map_put(map);
- if (err)
+ if (err) {
dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n", map->fd, map->raddr);
+ return err;
+ }
+ fastrpc_map_put(map);
- return err;
+ return 0;
}
static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp)
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 5867af9f592c..c44de892a61e 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -139,7 +139,7 @@ static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode)
if (ret) {
ret->i_ino = get_next_ino();
ret->i_mode = mode;
- ret->i_atime = ret->i_mtime = inode_set_ctime_current(ret);
+ simple_inode_init_ts(ret);
}
return ret;
}
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index 2101eb12bcba..7739b783c2db 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -1124,7 +1124,7 @@ static ssize_t ibmvmc_write(struct file *file, const char *buffer,
goto out;
inode = file_inode(file);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n",
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index b5b414a71e0b..3a8f27c3e310 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -179,6 +179,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_queue *mq);
static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
static int mmc_spi_err_check(struct mmc_card *card);
+static int mmc_blk_busy_cb(void *cb_data, bool *busy);
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
@@ -470,7 +471,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_data data = {};
struct mmc_request mrq = {};
struct scatterlist sg;
- bool r1b_resp, use_r1b_resp = false;
+ bool r1b_resp;
unsigned int busy_timeout_ms;
int err;
unsigned int target_part;
@@ -551,8 +552,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
busy_timeout_ms = idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS;
r1b_resp = (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B;
if (r1b_resp)
- use_r1b_resp = mmc_prepare_busy_cmd(card->host, &cmd,
- busy_timeout_ms);
+ mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout_ms);
mmc_wait_for_req(card->host, &mrq);
memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
@@ -605,19 +605,28 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
if (idata->ic.postsleep_min_us)
usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
- /* No need to poll when using HW busy detection. */
- if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
- return 0;
-
if (mmc_host_is_spi(card->host)) {
if (idata->ic.write_flag || r1b_resp || cmd.flags & MMC_RSP_SPI_BUSY)
return mmc_spi_err_check(card);
return err;
}
- /* Ensure RPMB/R1B command has completed by polling with CMD13. */
- if (idata->rpmb || r1b_resp)
- err = mmc_poll_for_busy(card, busy_timeout_ms, false,
- MMC_BUSY_IO);
+
+ /*
+ * Ensure RPMB, writes and R1B responses are completed by polling with
+ * CMD13. Note that, usually we don't need to poll when using HW busy
+ * detection, but here it's needed since some commands may indicate the
+ * error through the R1 status bits.
+ */
+ if (idata->rpmb || idata->ic.write_flag || r1b_resp) {
+ struct mmc_blk_busy_data cb_data = {
+ .card = card,
+ };
+
+ err = __mmc_poll_for_busy(card->host, 0, busy_timeout_ms,
+ &mmc_blk_busy_cb, &cb_data);
+
+ idata->ic.response[0] = cb_data.status;
+ }
return err;
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 89cd48fcec79..4a4bab9aa726 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -104,7 +104,7 @@ static int mmc_decode_cid(struct mmc_card *card)
case 3: /* MMC v3.1 - v3.3 */
case 4: /* MMC v4 */
card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
- card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
+ card->cid.oemid = UNSTUFF_BITS(resp, 104, 8);
card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index f64b9ac76a5c..5914516df2f7 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -1089,8 +1089,14 @@ static int mmc_sdio_resume(struct mmc_host *host)
}
err = mmc_sdio_reinit_card(host);
} else if (mmc_card_wake_sdio_irq(host)) {
- /* We may have switched to 1-bit mode during suspend */
+ /*
+ * We may have switched to 1-bit mode during suspend,
+ * need to hold retuning, because tuning only supprt
+ * 4-bit mode or 8 bit mode.
+ */
+ mmc_retune_hold_now(host);
err = sdio_enable_4bit_bus(host->card);
+ mmc_retune_release(host);
}
if (err)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 5392200cfdf7..97f7c3d4be6e 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -669,11 +669,11 @@ static void msdc_reset_hw(struct msdc_host *host)
u32 val;
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST);
- readl_poll_timeout(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0);
+ readl_poll_timeout_atomic(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0);
sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR);
- readl_poll_timeout(host->base + MSDC_FIFOCS, val,
- !(val & MSDC_FIFOCS_CLR), 0, 0);
+ readl_poll_timeout_atomic(host->base + MSDC_FIFOCS, val,
+ !(val & MSDC_FIFOCS_CLR), 0, 0);
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index ae8c307b7aa7..109d4b010f97 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -1144,42 +1144,6 @@ static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg)
return value;
}
-#ifdef CONFIG_PM_SLEEP
-static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
-{
- struct sdhci_pci_slot *slot = chip->slots[0];
-
- pci_free_irq_vectors(slot->chip->pdev);
- gli_pcie_enable_msi(slot);
-
- return sdhci_pci_resume_host(chip);
-}
-
-static int sdhci_cqhci_gli_resume(struct sdhci_pci_chip *chip)
-{
- struct sdhci_pci_slot *slot = chip->slots[0];
- int ret;
-
- ret = sdhci_pci_gli_resume(chip);
- if (ret)
- return ret;
-
- return cqhci_resume(slot->host->mmc);
-}
-
-static int sdhci_cqhci_gli_suspend(struct sdhci_pci_chip *chip)
-{
- struct sdhci_pci_slot *slot = chip->slots[0];
- int ret;
-
- ret = cqhci_suspend(slot->host->mmc);
- if (ret)
- return ret;
-
- return sdhci_suspend_host(slot->host);
-}
-#endif
-
static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
struct mmc_ios *ios)
{
@@ -1420,6 +1384,70 @@ static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip)
}
#endif
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+
+ pci_free_irq_vectors(slot->chip->pdev);
+ gli_pcie_enable_msi(slot);
+
+ return sdhci_pci_resume_host(chip);
+}
+
+static int gl9763e_resume(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+ int ret;
+
+ ret = sdhci_pci_gli_resume(chip);
+ if (ret)
+ return ret;
+
+ ret = cqhci_resume(slot->host->mmc);
+ if (ret)
+ return ret;
+
+ /*
+ * Disable LPM negotiation to bring device back in sync
+ * with its runtime_pm state.
+ */
+ gl9763e_set_low_power_negotiation(slot, false);
+
+ return 0;
+}
+
+static int gl9763e_suspend(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+ int ret;
+
+ /*
+ * Certain SoCs can suspend only with the bus in low-
+ * power state, notably x86 SoCs when using S0ix.
+ * Re-enable LPM negotiation to allow entering L1 state
+ * and entering system suspend.
+ */
+ gl9763e_set_low_power_negotiation(slot, true);
+
+ ret = cqhci_suspend(slot->host->mmc);
+ if (ret)
+ goto err_suspend;
+
+ ret = sdhci_suspend_host(slot->host);
+ if (ret)
+ goto err_suspend_host;
+
+ return 0;
+
+err_suspend_host:
+ cqhci_resume(slot->host->mmc);
+err_suspend:
+ gl9763e_set_low_power_negotiation(slot, false);
+ return ret;
+}
+#endif
+
static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
@@ -1527,8 +1555,8 @@ const struct sdhci_pci_fixes sdhci_gl9763e = {
.probe_slot = gli_probe_slot_gl9763e,
.ops = &sdhci_gl9763e_ops,
#ifdef CONFIG_PM_SLEEP
- .resume = sdhci_cqhci_gli_resume,
- .suspend = sdhci_cqhci_gli_suspend,
+ .resume = gl9763e_resume,
+ .suspend = gl9763e_suspend,
#endif
#ifdef CONFIG_PM
.runtime_suspend = gl9763e_runtime_suspend,
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 649ae075e229..6b84ba27e6ab 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -644,6 +644,7 @@ static int sdhci_sprd_tuning(struct mmc_host *mmc, struct mmc_card *card,
best_clk_sample = sdhci_sprd_get_best_clk_sample(mmc, value);
if (best_clk_sample < 0) {
dev_err(mmc_dev(host->mmc), "all tuning phase fail!\n");
+ err = best_clk_sample;
goto out;
}
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index be106dc20ff3..aa44a23ec045 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -37,7 +37,7 @@
/* Info for the block device */
struct block2mtd_dev {
struct list_head list;
- struct block_device *blkdev;
+ struct bdev_handle *bdev_handle;
struct mtd_info mtd;
struct mutex write_mutex;
};
@@ -55,7 +55,8 @@ static struct page *page_read(struct address_space *mapping, pgoff_t index)
/* erase a specified part of the device */
static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
{
- struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
+ struct address_space *mapping =
+ dev->bdev_handle->bdev->bd_inode->i_mapping;
struct page *page;
pgoff_t index = to >> PAGE_SHIFT; // page index
int pages = len >> PAGE_SHIFT;
@@ -105,6 +106,8 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct block2mtd_dev *dev = mtd->priv;
+ struct address_space *mapping =
+ dev->bdev_handle->bdev->bd_inode->i_mapping;
struct page *page;
pgoff_t index = from >> PAGE_SHIFT;
int offset = from & (PAGE_SIZE-1);
@@ -117,7 +120,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
cpylen = len; // this page
len = len - cpylen;
- page = page_read(dev->blkdev->bd_inode->i_mapping, index);
+ page = page_read(mapping, index);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -139,7 +142,8 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
loff_t to, size_t len, size_t *retlen)
{
struct page *page;
- struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
+ struct address_space *mapping =
+ dev->bdev_handle->bdev->bd_inode->i_mapping;
pgoff_t index = to >> PAGE_SHIFT; // page index
int offset = to & ~PAGE_MASK; // page offset
int cpylen;
@@ -194,7 +198,7 @@ static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
static void block2mtd_sync(struct mtd_info *mtd)
{
struct block2mtd_dev *dev = mtd->priv;
- sync_blockdev(dev->blkdev);
+ sync_blockdev(dev->bdev_handle->bdev);
return;
}
@@ -206,10 +210,10 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
kfree(dev->mtd.name);
- if (dev->blkdev) {
- invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
- 0, -1);
- blkdev_put(dev->blkdev, NULL);
+ if (dev->bdev_handle) {
+ invalidate_mapping_pages(
+ dev->bdev_handle->bdev->bd_inode->i_mapping, 0, -1);
+ bdev_release(dev->bdev_handle);
}
kfree(dev);
@@ -219,10 +223,10 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
* This function is marked __ref because it calls the __init marked
* early_lookup_bdev when called from the early boot code.
*/
-static struct block_device __ref *mdtblock_early_get_bdev(const char *devname,
+static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname,
blk_mode_t mode, int timeout, struct block2mtd_dev *dev)
{
- struct block_device *bdev = ERR_PTR(-ENODEV);
+ struct bdev_handle *bdev_handle = ERR_PTR(-ENODEV);
#ifndef MODULE
int i;
@@ -230,7 +234,7 @@ static struct block_device __ref *mdtblock_early_get_bdev(const char *devname,
* We can't use early_lookup_bdev from a running system.
*/
if (system_state >= SYSTEM_RUNNING)
- return bdev;
+ return bdev_handle;
/*
* We might not have the root device mounted at this point.
@@ -249,19 +253,20 @@ static struct block_device __ref *mdtblock_early_get_bdev(const char *devname,
wait_for_device_probe();
if (!early_lookup_bdev(devname, &devt)) {
- bdev = blkdev_get_by_dev(devt, mode, dev, NULL);
- if (!IS_ERR(bdev))
+ bdev_handle = bdev_open_by_dev(devt, mode, dev, NULL);
+ if (!IS_ERR(bdev_handle))
break;
}
}
#endif
- return bdev;
+ return bdev_handle;
}
static struct block2mtd_dev *add_device(char *devname, int erase_size,
char *label, int timeout)
{
const blk_mode_t mode = BLK_OPEN_READ | BLK_OPEN_WRITE;
+ struct bdev_handle *bdev_handle;
struct block_device *bdev;
struct block2mtd_dev *dev;
char *name;
@@ -274,21 +279,23 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
return NULL;
/* Get a handle on the device */
- bdev = blkdev_get_by_path(devname, mode, dev, NULL);
- if (IS_ERR(bdev))
- bdev = mdtblock_early_get_bdev(devname, mode, timeout, dev);
- if (IS_ERR(bdev)) {
+ bdev_handle = bdev_open_by_path(devname, mode, dev, NULL);
+ if (IS_ERR(bdev_handle))
+ bdev_handle = mdtblock_early_get_bdev(devname, mode, timeout,
+ dev);
+ if (IS_ERR(bdev_handle)) {
pr_err("error: cannot open device %s\n", devname);
goto err_free_block2mtd;
}
- dev->blkdev = bdev;
+ dev->bdev_handle = bdev_handle;
+ bdev = bdev_handle->bdev;
if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
pr_err("attempting to use an MTD device as a block device\n");
goto err_free_block2mtd;
}
- if ((long)dev->blkdev->bd_inode->i_size % erase_size) {
+ if ((long)bdev->bd_inode->i_size % erase_size) {
pr_err("erasesize must be a divisor of device size\n");
goto err_free_block2mtd;
}
@@ -306,7 +313,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
dev->mtd.name = name;
- dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+ dev->mtd.size = bdev->bd_inode->i_size & PAGE_MASK;
dev->mtd.erasesize = erase_size;
dev->mtd.writesize = 1;
dev->mtd.writebufsize = PAGE_SIZE;
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index 78710fbc8e7f..fc8721339282 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -551,6 +551,17 @@ static int physmap_flash_probe(struct platform_device *dev)
if (info->probe_type) {
info->mtds[i] = do_map_probe(info->probe_type,
&info->maps[i]);
+
+ /* Fall back to mapping region as ROM */
+ if (!info->mtds[i] && IS_ENABLED(CONFIG_MTD_ROM) &&
+ strcmp(info->probe_type, "map_rom")) {
+ dev_warn(&dev->dev,
+ "map_probe() failed for type %s\n",
+ info->probe_type);
+
+ info->mtds[i] = do_map_probe("map_rom",
+ &info->maps[i]);
+ }
} else {
int j;
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index 4621ec549cc7..a492051c46f5 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -515,6 +515,7 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
dma_addr_t dma_addr;
+ u8 status;
int ret;
struct anfc_op nfc_op = {
.pkt_reg =
@@ -561,10 +562,21 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
}
/* Spare data is not protected */
- if (oob_required)
+ if (oob_required) {
ret = nand_write_oob_std(chip, page);
+ if (ret)
+ return ret;
+ }
- return ret;
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
}
static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 2c94da7a3b3a..b841a81cb128 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -1165,6 +1165,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
.ndcb[2] = NDCB2_ADDR5_PAGE(page),
};
unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ u8 status;
int ret;
/* NFCv2 needs more information about the operation being executed */
@@ -1198,7 +1199,18 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
ret = marvell_nfc_wait_op(chip,
PSEC_TO_MSEC(sdr->tPROG_max));
- return ret;
+ if (ret)
+ return ret;
+
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
}
static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct nand_chip *chip,
@@ -1627,6 +1639,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
int data_len = lt->data_bytes;
int spare_len = lt->spare_bytes;
int chunk, ret;
+ u8 status;
marvell_nfc_select_target(chip, chip->cur_cs);
@@ -1663,6 +1676,14 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
if (ret)
return ret;
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
return 0;
}
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index d4b55155aeae..1fcac403cee6 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -5110,6 +5110,9 @@ static void rawnand_check_cont_read_support(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ if (!chip->parameters.supports_read_cache)
+ return;
+
if (chip->read_retries)
return;
diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
index 836757717660..b3cc8f360529 100644
--- a/drivers/mtd/nand/raw/nand_jedec.c
+++ b/drivers/mtd/nand/raw/nand_jedec.c
@@ -94,6 +94,9 @@ int nand_jedec_detect(struct nand_chip *chip)
goto free_jedec_param_page;
}
+ if (p->opt_cmd[0] & JEDEC_OPT_CMD_READ_CACHE)
+ chip->parameters.supports_read_cache = true;
+
memorg->pagesize = le32_to_cpu(p->byte_per_page);
mtd->writesize = memorg->pagesize;
diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
index f15ef90aec8c..861975e44b55 100644
--- a/drivers/mtd/nand/raw/nand_onfi.c
+++ b/drivers/mtd/nand/raw/nand_onfi.c
@@ -303,6 +303,9 @@ int nand_onfi_detect(struct nand_chip *chip)
ONFI_FEATURE_ADDR_TIMING_MODE, 1);
}
+ if (le16_to_cpu(p->opt_cmd) & ONFI_OPT_CMD_READ_CACHE)
+ chip->parameters.supports_read_cache = true;
+
onfi = kzalloc(sizeof(*onfi), GFP_KERNEL);
if (!onfi) {
ret = -ENOMEM;
diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c
index 8da5fee321b5..c506e92a3e45 100644
--- a/drivers/mtd/nand/raw/pl35x-nand-controller.c
+++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c
@@ -511,6 +511,7 @@ static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
u32 addr1 = 0, addr2 = 0, row;
u32 cmd_addr;
int i, ret;
+ u8 status;
ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
if (ret)
@@ -563,6 +564,14 @@ static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
if (ret)
goto disable_ecc_engine;
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ goto disable_ecc_engine;
+
+ if (status & NAND_STATUS_FAIL)
+ ret = -EIO;
+
disable_ecc_engine:
pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 64499c1b3603..b079605c84d3 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -3444,7 +3444,7 @@ err_nandc_alloc:
err_aon_clk:
clk_disable_unprepare(nandc->core_clk);
err_core_clk:
- dma_unmap_resource(dev, res->start, resource_size(res),
+ dma_unmap_resource(dev, nandc->base_dma, resource_size(res),
DMA_BIDIRECTIONAL, 0);
return ret;
}
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 50b7295bc922..12601bc4227a 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -12,7 +12,7 @@
#define SPINAND_MFR_MICRON 0x2c
-#define MICRON_STATUS_ECC_MASK GENMASK(7, 4)
+#define MICRON_STATUS_ECC_MASK GENMASK(6, 4)
#define MICRON_STATUS_ECC_NO_BITFLIPS (0 << 4)
#define MICRON_STATUS_ECC_1TO3_BITFLIPS (1 << 4)
#define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index ed7212e61c54..51d47eda1c87 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4023,7 +4023,7 @@ static inline const void *bond_pull_data(struct sk_buff *skb,
if (likely(n <= hlen))
return data;
else if (skb && likely(pskb_may_pull(skb, n)))
- return skb->head;
+ return skb->data;
return NULL;
}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 649453a3c858..f8cde9f9f554 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -190,7 +190,7 @@ config CAN_SLCAN
config CAN_SUN4I
tristate "Allwinner A10 CAN controller"
- depends on MACH_SUN4I || MACH_SUN7I || RISCV || COMPILE_TEST
+ depends on MACH_SUN4I || MACH_SUN7I || (RISCV && ARCH_SUNXI) || COMPILE_TEST
help
Say Y here if you want to use CAN controller found on Allwinner
A10/A20/D1 SoCs.
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index add39e922b89..d15f85a40c1e 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -348,7 +348,7 @@ static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
static struct flexcan_devtype_data fsl_imx93_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
- FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_AUTO_STOP_MODE |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC |
FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
@@ -544,11 +544,6 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
} else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) {
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
- } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE) {
- /* For the auto stop mode, software do nothing, hardware will cover
- * all the operation automatically after system go into low power mode.
- */
- return 0;
}
return flexcan_low_power_enter_ack(priv);
@@ -574,12 +569,6 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
priv->write(reg_mcr, &regs->mcr);
- /* For the auto stop mode, hardware will exist stop mode
- * automatically after system go out of low power mode.
- */
- if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)
- return 0;
-
return flexcan_low_power_exit_ack(priv);
}
@@ -1994,13 +1983,18 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
ret = flexcan_setup_stop_mode_scfw(pdev);
else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR)
ret = flexcan_setup_stop_mode_gpr(pdev);
- else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)
- ret = 0;
else
/* return 0 directly if doesn't support stop mode feature */
return 0;
- if (ret)
+ /* If ret is -EINVAL, this means SoC claim to support stop mode, but
+ * dts file lack the stop mode property definition. For this case,
+ * directly return 0, this will skip the wakeup capable setting and
+ * will not block the driver probe.
+ */
+ if (ret == -EINVAL)
+ return 0;
+ else if (ret)
return ret;
device_set_wakeup_capable(&pdev->dev, true);
@@ -2320,16 +2314,8 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device)
if (netif_running(dev)) {
int err;
- if (device_may_wakeup(device)) {
+ if (device_may_wakeup(device))
flexcan_enable_wakeup_irq(priv, true);
- /* For auto stop mode, need to keep the clock on before
- * system go into low power mode. After system go into
- * low power mode, hardware will config the flexcan into
- * stop mode, and gate off the clock automatically.
- */
- if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)
- return 0;
- }
err = pm_runtime_force_suspend(device);
if (err)
@@ -2347,15 +2333,9 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
if (netif_running(dev)) {
int err;
- /* For the wakeup in auto stop mode, no need to gate on the
- * clock here, hardware will do this automatically.
- */
- if (!(device_may_wakeup(device) &&
- priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)) {
- err = pm_runtime_force_resume(device);
- if (err)
- return err;
- }
+ err = pm_runtime_force_resume(device);
+ if (err)
+ return err;
if (device_may_wakeup(device))
flexcan_enable_wakeup_irq(priv, false);
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
index 91402977780b..025c3417031f 100644
--- a/drivers/net/can/flexcan/flexcan.h
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -68,8 +68,6 @@
#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR BIT(15)
/* Device supports RX via FIFO */
#define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
-/* auto enter stop mode to support wakeup */
-#define FLEXCAN_QUIRK_AUTO_STOP_MODE BIT(17)
struct flexcan_devtype_data {
u32 quirks; /* quirks needed for different IP cores */
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index 8a4143809d33..ae8c42f5debd 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -125,7 +125,7 @@ static const struct tcan4x5x_version_info tcan4x5x_versions[] = {
},
[TCAN4553] = {
.name = "4553",
- .id2_register = 0x32353534,
+ .id2_register = 0x33353534,
},
/* generic version with no id2_register at the end */
[TCAN4X5X] = {
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 0ada0e160e93..743c2eb62b87 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -392,7 +392,13 @@ static irqreturn_t sja1000_reset_interrupt(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
netdev_dbg(dev, "performing a soft reset upon overrun\n");
- sja1000_start(dev);
+
+ netif_tx_lock(dev);
+
+ can_free_echo_skb(dev, 0, NULL);
+ sja1000_set_mode(dev, CAN_MODE_START);
+
+ netif_tx_unlock(dev);
return IRQ_HANDLED;
}
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 72374b066f64..cd1f240c90f3 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -617,17 +617,16 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
priv->master_mii_bus = of_mdio_find_bus(dn);
if (!priv->master_mii_bus) {
- of_node_put(dn);
- return -EPROBE_DEFER;
+ err = -EPROBE_DEFER;
+ goto err_of_node_put;
}
- get_device(&priv->master_mii_bus->dev);
priv->master_mii_dn = dn;
priv->slave_mii_bus = mdiobus_alloc();
if (!priv->slave_mii_bus) {
- of_node_put(dn);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_put_master_mii_bus_dev;
}
priv->slave_mii_bus->priv = priv;
@@ -684,11 +683,17 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
}
err = mdiobus_register(priv->slave_mii_bus);
- if (err && dn) {
- mdiobus_free(priv->slave_mii_bus);
- of_node_put(dn);
- }
+ if (err && dn)
+ goto err_free_slave_mii_bus;
+ return 0;
+
+err_free_slave_mii_bus:
+ mdiobus_free(priv->slave_mii_bus);
+err_put_master_mii_bus_dev:
+ put_device(&priv->master_mii_bus->dev);
+err_of_node_put:
+ of_node_put(dn);
return err;
}
@@ -696,6 +701,7 @@ static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
{
mdiobus_unregister(priv->slave_mii_bus);
mdiobus_free(priv->slave_mii_bus);
+ put_device(&priv->master_mii_bus->dev);
of_node_put(priv->master_mii_dn);
}
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index de1dc22cf683..4ce68e655a63 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -505,8 +505,8 @@ qca8k_bulk_read(void *ctx, const void *reg_buf, size_t reg_len,
void *val_buf, size_t val_len)
{
int i, count = val_len / sizeof(u32), ret;
- u32 reg = *(u32 *)reg_buf & U16_MAX;
struct qca8k_priv *priv = ctx;
+ u32 reg = *(u16 *)reg_buf;
if (priv->mgmt_master &&
!qca8k_read_eth(priv, reg, val_buf, val_len))
@@ -527,8 +527,8 @@ qca8k_bulk_gather_write(void *ctx, const void *reg_buf, size_t reg_len,
const void *val_buf, size_t val_len)
{
int i, count = val_len / sizeof(u32), ret;
- u32 reg = *(u32 *)reg_buf & U16_MAX;
struct qca8k_priv *priv = ctx;
+ u32 reg = *(u16 *)reg_buf;
u32 *val = (u32 *)val_buf;
if (priv->mgmt_master &&
@@ -666,6 +666,15 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
goto err_read_skb;
}
+ /* It seems that accessing the switch's internal PHYs via management
+ * packets still uses the MDIO bus within the switch internally, and
+ * these accesses can conflict with external MDIO accesses to other
+ * devices on the MDIO bus.
+ * We therefore need to lock the MDIO bus onto which the switch is
+ * connected.
+ */
+ mutex_lock(&priv->bus->mdio_lock);
+
/* Actually start the request:
* 1. Send mdio master packet
* 2. Busy Wait for mdio master command
@@ -678,6 +687,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
mgmt_master = priv->mgmt_master;
if (!mgmt_master) {
mutex_unlock(&mgmt_eth_data->mutex);
+ mutex_unlock(&priv->bus->mdio_lock);
ret = -EINVAL;
goto err_mgmt_master;
}
@@ -765,6 +775,7 @@ exit:
QCA8K_ETHERNET_TIMEOUT);
mutex_unlock(&mgmt_eth_data->mutex);
+ mutex_unlock(&priv->bus->mdio_lock);
return ret;
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index ca66b747b7c5..d7c274af6d4d 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -294,7 +294,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
{
struct adin1110_priv *priv = port_priv->priv;
u32 header_len = ADIN1110_RD_HEADER_LEN;
- struct spi_transfer t;
+ struct spi_transfer t = {0};
u32 frame_size_no_fcs;
struct sk_buff *rxb;
u32 frame_size;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 4d4140b7c450..f3305c434c95 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -2170,7 +2170,7 @@ static void xgene_enet_shutdown(struct platform_device *pdev)
static struct platform_driver xgene_enet_driver = {
.driver = {
.name = "xgene-enet",
- .of_match_table = of_match_ptr(xgene_enet_of_match),
+ .of_match_table = xgene_enet_of_match,
.acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
},
.probe = xgene_enet_probe,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 8d719f82854a..76de55306c4d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3816,6 +3816,8 @@ int t4_load_phy_fw(struct adapter *adap, int win,
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
&param, &val, 30000);
+ if (ret)
+ return ret;
/* If we have version number support, then check to see that the new
* firmware got loaded properly.
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index 5fc64e47568a..d567e42e1760 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -911,7 +911,7 @@ static int csk_wait_memory(struct chtls_dev *cdev,
struct sock *sk, long *timeo_p)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int err = 0;
+ int ret, err = 0;
long current_timeo;
long vm_wait = 0;
bool noblock;
@@ -942,10 +942,13 @@ static int csk_wait_memory(struct chtls_dev *cdev,
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk->sk_write_pending++;
- sk_wait_event(sk, &current_timeo, sk->sk_err ||
- (sk->sk_shutdown & SEND_SHUTDOWN) ||
- (csk_mem_free(cdev, sk) && !vm_wait), &wait);
+ ret = sk_wait_event(sk, &current_timeo, sk->sk_err ||
+ (sk->sk_shutdown & SEND_SHUTDOWN) ||
+ (csk_mem_free(cdev, sk) && !vm_wait),
+ &wait);
sk->sk_write_pending--;
+ if (ret < 0)
+ goto do_error;
if (vm_wait) {
vm_wait -= current_timeo;
@@ -1348,6 +1351,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int copied = 0;
int target;
long timeo;
+ int ret;
buffers_freed = 0;
@@ -1423,7 +1427,11 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (copied >= target)
break;
chtls_cleanup_rbuf(sk, copied);
- sk_wait_data(sk, &timeo, NULL);
+ ret = sk_wait_data(sk, &timeo, NULL);
+ if (ret < 0) {
+ copied = copied ? : ret;
+ goto unlock;
+ }
continue;
found_ok_skb:
if (!skb->len) {
@@ -1518,6 +1526,8 @@ skip_copy:
if (buffers_freed)
chtls_cleanup_rbuf(sk, copied);
+
+unlock:
release_sock(sk);
return copied;
}
@@ -1534,6 +1544,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
int copied = 0;
size_t avail; /* amount of available data in current skb */
long timeo;
+ int ret;
lock_sock(sk);
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
@@ -1585,7 +1596,12 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
release_sock(sk);
lock_sock(sk);
} else {
- sk_wait_data(sk, &timeo, NULL);
+ ret = sk_wait_data(sk, &timeo, NULL);
+ if (ret < 0) {
+ /* here 'copied' is 0 due to previous checks */
+ copied = ret;
+ break;
+ }
}
if (unlikely(peek_seq != tp->copied_seq)) {
@@ -1656,6 +1672,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int copied = 0;
long timeo;
int target; /* Read at least this many bytes */
+ int ret;
buffers_freed = 0;
@@ -1747,7 +1764,11 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (copied >= target)
break;
chtls_cleanup_rbuf(sk, copied);
- sk_wait_data(sk, &timeo, NULL);
+ ret = sk_wait_data(sk, &timeo, NULL);
+ if (ret < 0) {
+ copied = copied ? : ret;
+ goto unlock;
+ }
continue;
found_ok_skb:
@@ -1816,6 +1837,7 @@ skip_copy:
if (buffers_freed)
chtls_cleanup_rbuf(sk, copied);
+unlock:
release_sock(sk);
return copied;
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index d1da7413dc4d..e84a066aa1a4 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -146,7 +146,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i],
&rx->data.data_ring[i]);
if (err)
- goto alloc_err;
+ goto alloc_err_rda;
}
if (!rx->data.raw_addressing) {
@@ -171,12 +171,26 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
return slots;
alloc_err_qpl:
+ /* Fully free the copy pool pages. */
while (j--) {
page_ref_sub(rx->qpl_copy_pool[j].page,
rx->qpl_copy_pool[j].pagecnt_bias - 1);
put_page(rx->qpl_copy_pool[j].page);
}
-alloc_err:
+
+ /* Do not fully free QPL pages - only remove the bias added in this
+ * function with gve_setup_rx_buffer.
+ */
+ while (i--)
+ page_ref_sub(rx->data.page_info[i].page,
+ rx->data.page_info[i].pagecnt_bias - 1);
+
+ gve_unassign_qpl(priv, rx->data.qpl->id);
+ rx->data.qpl = NULL;
+
+ return err;
+
+alloc_err_rda:
while (i--)
gve_rx_free_buffer(&priv->pdev->dev,
&rx->data.page_info[i],
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 6e310a539467..55bb0b5310d5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -580,7 +580,6 @@ struct i40e_pf {
#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
#define I40E_FLAG_RS_FEC BIT(25)
#define I40E_FLAG_BASE_R_FEC BIT(26)
-#define I40E_FLAG_VF_VLAN_PRUNING BIT(27)
/* TOTAL_PORT_SHUTDOWN
* Allows to physically disable the link on the NIC's port.
* If enabled, (after link down request from the OS)
@@ -603,6 +602,7 @@ struct i40e_pf {
* in abilities field of i40e_aq_set_phy_config structure
*/
#define I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED BIT(27)
+#define I40E_FLAG_VF_VLAN_PRUNING BIT(28)
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index eeef20f77106..1b493854f522 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1082,7 +1082,7 @@ void i40e_clear_hw(struct i40e_hw *hw)
I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
I40E_PFLAN_QALLOC_LASTQ_SHIFT;
- if (val & I40E_PFLAN_QALLOC_VALID_MASK)
+ if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue)
num_queues = (j - base_queue) + 1;
else
num_queues = 0;
@@ -1092,7 +1092,7 @@ void i40e_clear_hw(struct i40e_hw *hw)
I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
- if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
+ if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i)
num_vfs = (j - i) + 1;
else
num_vfs = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 0b3a27f118fb..b047c587629b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2544,7 +2544,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
rx_buffer = i40e_rx_bi(rx_ring, ntp);
i40e_inc_ntp(rx_ring);
i40e_reuse_rx_page(rx_ring, rx_buffer);
- cleaned_count++;
+ /* Update ntc and bump cleaned count if not in the
+ * middle of mb packet.
+ */
+ if (rx_ring->next_to_clean == ntp) {
+ rx_ring->next_to_clean =
+ rx_ring->next_to_process;
+ cleaned_count++;
+ }
continue;
}
@@ -2847,7 +2854,7 @@ tx_only:
return budget;
}
- if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+ if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR)
q_vector->arm_wb_state = false;
/* Exit the polling mode, but don't re-enable interrupts if stack might
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 37f41c8a682f..7d991e4d9b89 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -437,12 +437,12 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 next_to_process = rx_ring->next_to_process;
u16 next_to_clean = rx_ring->next_to_clean;
- u16 count_mask = rx_ring->count - 1;
unsigned int xdp_res, xdp_xmit = 0;
struct xdp_buff *first = NULL;
+ u32 count = rx_ring->count;
struct bpf_prog *xdp_prog;
+ u32 entries_to_alloc;
bool failure = false;
- u16 cleaned_count;
if (next_to_process != next_to_clean)
first = *i40e_rx_bi(rx_ring, next_to_clean);
@@ -475,7 +475,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
qword);
bi = *i40e_rx_bi(rx_ring, next_to_process);
xsk_buff_free(bi);
- next_to_process = (next_to_process + 1) & count_mask;
+ if (++next_to_process == count)
+ next_to_process = 0;
continue;
}
@@ -493,7 +494,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
else if (i40e_add_xsk_frag(rx_ring, first, bi, size))
break;
- next_to_process = (next_to_process + 1) & count_mask;
+ if (++next_to_process == count)
+ next_to_process = 0;
if (i40e_is_non_eop(rx_ring, rx_desc))
continue;
@@ -513,10 +515,10 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
rx_ring->next_to_clean = next_to_clean;
rx_ring->next_to_process = next_to_process;
- cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
- if (cleaned_count >= I40E_RX_BUFFER_WRITE)
- failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
+ entries_to_alloc = I40E_DESC_UNUSED(rx_ring);
+ if (entries_to_alloc >= I40E_RX_BUFFER_WRITE)
+ failure |= !i40e_alloc_rx_buffers_zc(rx_ring, entries_to_alloc);
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
@@ -752,14 +754,16 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
{
- u16 count_mask = rx_ring->count - 1;
u16 ntc = rx_ring->next_to_clean;
u16 ntu = rx_ring->next_to_use;
- for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+ while (ntc != ntu) {
struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
xsk_buff_free(rx_bi);
+ ntc++;
+ if (ntc >= rx_ring->count)
+ ntc = 0;
}
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 6a2e6d64bc3a..b3434dbc90d6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1437,9 +1437,9 @@ void iavf_down(struct iavf_adapter *adapter)
adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
if (!list_empty(&adapter->adv_rss_list_head))
adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
- adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
}
+ adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
@@ -4982,8 +4982,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->finish_config, iavf_finish_config);
INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Setup the wait queue for indicating transition to down status */
init_waitqueue_head(&adapter->down_waitqueue);
@@ -4994,6 +4992,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the wait queue for indicating virtchannel events */
init_waitqueue_head(&adapter->vc_waitqueue);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
+ /* Initialization goes on in the work. Do not add more of it below. */
return 0;
err_ioremap:
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 4f39863b5537..7b1256992dcf 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -2093,3 +2093,35 @@ lag_rebuild_out:
}
mutex_unlock(&pf->lag_mutex);
}
+
+/**
+ * ice_lag_is_switchdev_running
+ * @pf: pointer to PF structure
+ *
+ * Check if switchdev is running on any of the interfaces connected to lag.
+ */
+bool ice_lag_is_switchdev_running(struct ice_pf *pf)
+{
+ struct ice_lag *lag = pf->lag;
+ struct net_device *tmp_nd;
+
+ if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || !lag)
+ return false;
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
+ struct ice_netdev_priv *priv = netdev_priv(tmp_nd);
+
+ if (!netif_is_ice(tmp_nd) || !priv || !priv->vsi ||
+ !priv->vsi->back)
+ continue;
+
+ if (ice_is_switchdev_running(priv->vsi->back)) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+
+ return false;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index 18075b82485a..facb6c894b6d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -62,4 +62,5 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf);
int ice_init_lag(struct ice_pf *pf);
void ice_deinit_lag(struct ice_pf *pf);
void ice_lag_rebuild(struct ice_pf *pf);
+bool ice_lag_is_switchdev_running(struct ice_pf *pf);
#endif /* _ICE_LAG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 201570cd2e0b..73bbf06a76db 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1201,8 +1201,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
- ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
- ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+ (hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
}
static void
@@ -3575,6 +3574,12 @@ int ice_set_dflt_vsi(struct ice_vsi *vsi)
dev = ice_pf_to_dev(vsi->back);
+ if (ice_lag_is_switchdev_running(vsi->back)) {
+ dev_dbg(dev, "VSI %d passed is a part of LAG containing interfaces in switchdev mode, nothing to do\n",
+ vsi->vsi_num);
+ return 0;
+ }
+
/* the VSI passed in is already the default VSI */
if (ice_is_vsi_dflt_vsi(vsi)) {
dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index c8286adae946..7784135160fd 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <generated/utsrelease.h>
+#include <linux/crash_dump.h>
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
@@ -4683,6 +4684,9 @@ static void ice_init_features(struct ice_pf *pf)
static void ice_deinit_features(struct ice_pf *pf)
{
+ if (ice_is_safe_mode(pf))
+ return;
+
ice_deinit_lag(pf);
if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
ice_cfg_lldp_mib_change(&pf->hw, false);
@@ -5014,6 +5018,20 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
return -EINVAL;
}
+ /* when under a kdump kernel initiate a reset before enabling the
+ * device in order to clear out any pending DMA transactions. These
+ * transactions can cause some systems to machine check when doing
+ * the pcim_enable_device() below.
+ */
+ if (is_kdump_kernel()) {
+ pci_save_state(pdev);
+ pci_clear_master(pdev);
+ err = pcie_flr(pdev);
+ if (err)
+ return err;
+ pci_restore_state(pdev);
+ }
+
/* this driver uses devres, see
* Documentation/driver-api/driver-model/devres.rst
*/
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 319ed601eaa1..4ee849985e2b 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2978,11 +2978,15 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter,
if (err)
goto err_out_w_lock;
- igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
+ err = igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
+ if (err)
+ goto err_out_input_filter;
spin_unlock(&adapter->nfc_lock);
return 0;
+err_out_input_filter:
+ igb_erase_filter(adapter, input);
err_out_w_lock:
spin_unlock(&adapter->nfc_lock);
err_out:
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 7ab6dd58e400..dd8a9d27a167 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1817,7 +1817,7 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
struct igc_adapter *adapter = netdev_priv(netdev);
struct net_device *dev = adapter->netdev;
struct igc_hw *hw = &adapter->hw;
- u32 advertising;
+ u16 advertised = 0;
/* When adapter in resetting mode, autoneg/speed/duplex
* cannot be changed
@@ -1842,18 +1842,33 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
- /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
- * We have to check this and convert it to ADVERTISE_2500_FULL
- * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
- */
- if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
- advertising |= ADVERTISE_2500_FULL;
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 2500baseT_Full))
+ advertised |= ADVERTISE_2500_FULL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 1000baseT_Full))
+ advertised |= ADVERTISE_1000_FULL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100baseT_Full))
+ advertised |= ADVERTISE_100_FULL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100baseT_Half))
+ advertised |= ADVERTISE_100_HALF;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10baseT_Full))
+ advertised |= ADVERTISE_10_FULL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10baseT_Half))
+ advertised |= ADVERTISE_10_HALF;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
- hw->phy.autoneg_advertised = advertising;
+ hw->phy.autoneg_advertised = advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = igc_fc_default;
} else {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 29cc60988071..ea88ac04ab9a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -28,6 +28,9 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
struct vf_macvlans *mv_list;
int num_vf_macvlans, i;
+ /* Initialize list of VF macvlans */
+ INIT_LIST_HEAD(&adapter->vf_mvs.l);
+
num_vf_macvlans = hw->mac.num_rar_entries -
(IXGBE_MAX_PF_MACVLANS + 1 + num_vfs);
if (!num_vf_macvlans)
@@ -36,8 +39,6 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
GFP_KERNEL);
if (mv_list) {
- /* Initialize list of VF macvlans */
- INIT_LIST_HEAD(&adapter->vf_mvs.l);
for (i = 0; i < num_vf_macvlans; i++) {
mv_list[i].vf = -1;
mv_list[i].free = true;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index dbc518ff8276..5b46ca47c8e5 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -715,20 +715,19 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
hw_desc->dptr = tx_buffer->sglist_dma;
}
- /* Flush the hw descriptor before writing to doorbell */
- wmb();
-
- /* Ring Doorbell to notify the NIC there is a new packet */
- writel(1, iq->doorbell_reg);
+ netdev_tx_sent_queue(iq->netdev_q, skb->len);
+ skb_tx_timestamp(skb);
atomic_inc(&iq->instr_pending);
wi++;
if (wi == iq->max_count)
wi = 0;
iq->host_write_index = wi;
+ /* Flush the hw descriptor before writing to doorbell */
+ wmb();
- netdev_tx_sent_queue(iq->netdev_q, skb->len);
+ /* Ring Doorbell to notify the NIC there is a new packet */
+ writel(1, iq->doorbell_reg);
iq->stats.instr_posted++;
- skb_tx_timestamp(skb);
return NETDEV_TX_OK;
dma_map_sg_err:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
index 59b138214af2..6cc7a78968fc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -1357,10 +1357,12 @@ static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
if (netif_running(secy->netdev)) {
/* Keys cannot be changed after creation */
- err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
- sw_tx_sa->next_pn);
- if (err)
- return err;
+ if (ctx->sa.update_pn) {
+ err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn);
+ if (err)
+ return err;
+ }
err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
sa_num, sw_tx_sa->active);
@@ -1529,6 +1531,9 @@ static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
if (err)
return err;
+ if (!ctx->sa.update_pn)
+ return 0;
+
err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num,
rx_sa->next_pn);
if (err)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 997fedac3a98..818ce76185b2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1403,6 +1403,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
return 0;
}
+ pp_params.order = get_order(buf_size);
pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
pp_params.nid = NUMA_NO_NODE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index afb348579577..c22b0ad0c870 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -2186,52 +2186,23 @@ static u16 cmdif_rev(struct mlx5_core_dev *dev)
int mlx5_cmd_init(struct mlx5_core_dev *dev)
{
- int size = sizeof(struct mlx5_cmd_prot_block);
- int align = roundup_pow_of_two(size);
struct mlx5_cmd *cmd = &dev->cmd;
- u32 cmd_l;
- int err;
-
- cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
- if (!cmd->pool)
- return -ENOMEM;
- err = alloc_cmd_page(dev, cmd);
- if (err)
- goto err_free_pool;
-
- cmd_l = (u32)(cmd->dma);
- if (cmd_l & 0xfff) {
- mlx5_core_err(dev, "invalid command queue address\n");
- err = -ENOMEM;
- goto err_cmd_page;
- }
cmd->checksum_disabled = 1;
spin_lock_init(&cmd->alloc_lock);
spin_lock_init(&cmd->token_lock);
- create_msg_cache(dev);
-
set_wqname(dev);
cmd->wq = create_singlethread_workqueue(cmd->wq_name);
if (!cmd->wq) {
mlx5_core_err(dev, "failed to create command workqueue\n");
- err = -ENOMEM;
- goto err_cache;
+ return -ENOMEM;
}
mlx5_cmdif_debugfs_init(dev);
return 0;
-
-err_cache:
- destroy_msg_cache(dev);
-err_cmd_page:
- free_cmd_page(dev, cmd);
-err_free_pool:
- dma_pool_destroy(cmd->pool);
- return err;
}
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
@@ -2240,15 +2211,15 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
mlx5_cmdif_debugfs_cleanup(dev);
destroy_workqueue(cmd->wq);
- destroy_msg_cache(dev);
- free_cmd_page(dev, cmd);
- dma_pool_destroy(cmd->pool);
}
int mlx5_cmd_enable(struct mlx5_core_dev *dev)
{
+ int size = sizeof(struct mlx5_cmd_prot_block);
+ int align = roundup_pow_of_two(size);
struct mlx5_cmd *cmd = &dev->cmd;
u32 cmd_h, cmd_l;
+ int err;
memset(&cmd->vars, 0, sizeof(cmd->vars));
cmd->vars.cmdif_rev = cmdif_rev(dev);
@@ -2281,10 +2252,21 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
sema_init(&cmd->vars.pages_sem, 1);
sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
+ cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
+ if (!cmd->pool)
+ return -ENOMEM;
+
+ err = alloc_cmd_page(dev, cmd);
+ if (err)
+ goto err_free_pool;
+
cmd_h = (u32)((u64)(cmd->dma) >> 32);
cmd_l = (u32)(cmd->dma);
- if (WARN_ON(cmd_l & 0xfff))
- return -EINVAL;
+ if (cmd_l & 0xfff) {
+ mlx5_core_err(dev, "invalid command queue address\n");
+ err = -ENOMEM;
+ goto err_cmd_page;
+ }
iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
@@ -2297,17 +2279,27 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
cmd->mode = CMD_MODE_POLLING;
cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
+ create_msg_cache(dev);
create_debugfs_files(dev);
return 0;
+
+err_cmd_page:
+ free_cmd_page(dev, cmd);
+err_free_pool:
+ dma_pool_destroy(cmd->pool);
+ return err;
}
void mlx5_cmd_disable(struct mlx5_core_dev *dev)
{
struct mlx5_cmd *cmd = &dev->cmd;
- clean_debug_files(dev);
flush_workqueue(cmd->wq);
+ clean_debug_files(dev);
+ destroy_msg_cache(dev);
+ free_cmd_page(dev, cmd);
+ dma_pool_destroy(cmd->pool);
}
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 7c0f2adbea00..ad789349c06e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -848,7 +848,7 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner);
if (tracer->owner) {
- tracer->owner = false;
+ mlx5_fw_tracer_ownership_acquire(tracer);
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 0fef853eab62..5d128c5b4529 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -467,6 +467,17 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
/* only handle the event on peers */
if (mlx5_esw_bridge_is_local(dev, rep, esw))
break;
+
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+ /* Mark for deletion to prevent the update wq task from
+ * spuriously refreshing the entry which would mark it again as
+ * offloaded in SW bridge. After this fallthrough to regular
+ * async delete code.
+ */
+ mlx5_esw_bridge_fdb_mark_deleted(dev, vport_num, esw_owner_vhca_id, br_offloads,
+ fdb_info);
fallthrough;
case SWITCHDEV_FDB_ADD_TO_DEVICE:
case SWITCHDEV_FDB_DEL_TO_DEVICE:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 1730f6a716ee..b10e40e1a9c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -24,7 +24,8 @@ static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv,
route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex);
- if (!route_dev || !netif_is_ovs_master(route_dev))
+ if (!route_dev || !netif_is_ovs_master(route_dev) ||
+ attr->parse_attr->filter_dev == e->out_dev)
goto out;
err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 12f56d0db0af..8bed17d8fe56 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -874,11 +874,11 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
}
out:
- if (flags & XDP_XMIT_FLUSH) {
- if (sq->mpwqe.wqe)
- mlx5e_xdp_mpwqe_complete(sq);
+ if (sq->mpwqe.wqe)
+ mlx5e_xdp_mpwqe_complete(sq);
+
+ if (flags & XDP_XMIT_FLUSH)
mlx5e_xmit_xdp_doorbell(sq);
- }
return nxmit;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index c9c1db971652..d4ebd8743114 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -580,7 +580,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
}
- if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) {
+ if (ctx->sa.update_pn) {
netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
assoc_num);
err = -EINVAL;
@@ -973,7 +973,7 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
goto out;
}
- if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) {
+ if (ctx->sa.update_pn) {
netdev_err(ctx->netdev,
"MACsec offload update RX sa %d PN isn't supported\n",
assoc_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index a2ae791538ed..acb40770cf0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3952,13 +3952,14 @@ static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
struct mlx5e_channels *chs = &priv->channels;
struct mlx5e_params new_params;
int err;
+ bool rx_ts_over_crc = !enable;
mutex_lock(&priv->state_lock);
new_params = chs->params;
new_params.scatter_fcs_en = enable;
err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap,
- &new_params.scatter_fcs_en, true);
+ &rx_ts_over_crc, true);
mutex_unlock(&priv->state_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2fdb8895aecd..fd1cce542b68 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -701,7 +701,7 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
/* update HW stats in background for next time */
mlx5e_queue_update_stats(priv);
- memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
+ mlx5e_stats_copy_rep_stats(stats, &priv->stats.rep_stats);
}
static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
@@ -769,6 +769,7 @@ static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev)
static void mlx5e_build_rep_params(struct net_device *netdev)
{
+ const bool take_rtnl = netdev->reg_state == NETREG_REGISTERED;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
@@ -794,8 +795,15 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
/* RQ */
mlx5e_build_rq_params(mdev, params);
+ /* If netdev is already registered (e.g. move from nic profile to uplink,
+ * RTNL lock must be held before triggering netdev notifiers.
+ */
+ if (take_rtnl)
+ rtnl_lock();
/* update XDP supported features */
mlx5e_set_xdp_feature(netdev);
+ if (take_rtnl)
+ rtnl_unlock();
/* CQ moderation params */
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 3fd11b0761e0..8d9743a5e42c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -457,26 +457,41 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
static int mlx5e_refill_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
{
int remaining = wqe_bulk;
- int i = 0;
+ int total_alloc = 0;
+ int refill_alloc;
+ int refill;
/* The WQE bulk is split into smaller bulks that are sized
* according to the page pool cache refill size to avoid overflowing
* the page pool cache due to too many page releases at once.
*/
do {
- int refill = min_t(u16, rq->wqe.info.refill_unit, remaining);
- int alloc_count;
+ refill = min_t(u16, rq->wqe.info.refill_unit, remaining);
- mlx5e_free_rx_wqes(rq, ix + i, refill);
- alloc_count = mlx5e_alloc_rx_wqes(rq, ix + i, refill);
- i += alloc_count;
- if (unlikely(alloc_count != refill))
- break;
+ mlx5e_free_rx_wqes(rq, ix + total_alloc, refill);
+ refill_alloc = mlx5e_alloc_rx_wqes(rq, ix + total_alloc, refill);
+ if (unlikely(refill_alloc != refill))
+ goto err_free;
+ total_alloc += refill_alloc;
remaining -= refill;
} while (remaining);
- return i;
+ return total_alloc;
+
+err_free:
+ mlx5e_free_rx_wqes(rq, ix, total_alloc + refill_alloc);
+
+ for (int i = 0; i < total_alloc + refill; i++) {
+ int j = mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, ix + i);
+ struct mlx5e_wqe_frag_info *frag;
+
+ frag = get_frag(rq, j);
+ for (int k = 0; k < rq->wqe.info.num_frags; k++, frag++)
+ frag->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
+ }
+
+ return 0;
}
static void
@@ -816,6 +831,8 @@ err_unmap:
mlx5e_page_release_fragmented(rq, frag_page);
}
+ bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
+
err:
rq->stats->buff_alloc_err++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 176fa5976259..477c547dcc04 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -484,11 +484,20 @@ struct mlx5e_stats {
struct mlx5e_vnic_env_stats vnic;
struct mlx5e_vport_stats vport;
struct mlx5e_pport_stats pport;
- struct rtnl_link_stats64 vf_vport;
struct mlx5e_pcie_stats pcie;
struct mlx5e_rep_stats rep_stats;
};
+static inline void mlx5e_stats_copy_rep_stats(struct rtnl_link_stats64 *vf_vport,
+ struct mlx5e_rep_stats *rep_stats)
+{
+ memset(vf_vport, 0, sizeof(*vf_vport));
+ vf_vport->rx_packets = rep_stats->vport_rx_packets;
+ vf_vport->tx_packets = rep_stats->vport_tx_packets;
+ vf_vport->rx_bytes = rep_stats->vport_rx_bytes;
+ vf_vport->tx_bytes = rep_stats->vport_tx_bytes;
+}
+
extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index c24828b688ac..c8590483ddc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -4972,7 +4972,8 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
if (err)
return err;
- rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
+ mlx5e_stats_copy_rep_stats(&rpriv->prev_vf_vport_stats,
+ &priv->stats.rep_stats);
break;
default:
NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
@@ -5012,7 +5013,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
u64 dbytes;
u64 dpkts;
- cur_stats = priv->stats.vf_vport;
+ mlx5e_stats_copy_rep_stats(&cur_stats, &priv->stats.rep_stats);
dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
rpriv->prev_vf_vport_stats = cur_stats;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index e36294b7ade2..1b9bc32efd6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1748,6 +1748,28 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
entry->lastuse = jiffies;
}
+void mlx5_esw_bridge_fdb_mark_deleted(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct mlx5_esw_bridge_fdb_entry *entry;
+ struct mlx5_esw_bridge *bridge;
+
+ bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
+ if (!bridge)
+ return;
+
+ entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
+ if (!entry) {
+ esw_debug(br_offloads->esw->dev,
+ "FDB mark deleted entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
+ fdb_info->addr, fdb_info->vid, vport_num);
+ return;
+ }
+
+ entry->flags |= MLX5_ESW_BRIDGE_FLAG_DELETED;
+}
+
void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct switchdev_notifier_fdb_info *fdb_info)
@@ -1810,7 +1832,8 @@ void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
unsigned long lastuse =
(unsigned long)mlx5_fc_query_lastuse(entry->ingress_counter);
- if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
+ if (entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER |
+ MLX5_ESW_BRIDGE_FLAG_DELETED))
continue;
if (time_after(lastuse, entry->lastuse))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
index c2c7c70d99eb..d6f539161993 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
@@ -62,6 +62,9 @@ int mlx5_esw_bridge_vport_peer_unlink(struct net_device *br_netdev, u16 vport_nu
void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct switchdev_notifier_fdb_info *fdb_info);
+void mlx5_esw_bridge_fdb_mark_deleted(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct switchdev_notifier_fdb_info *fdb_info);
void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct switchdev_notifier_fdb_info *fdb_info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
index 4911cc32161b..7c251af566c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
@@ -133,6 +133,7 @@ struct mlx5_esw_bridge_mdb_key {
enum {
MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER = BIT(0),
MLX5_ESW_BRIDGE_FLAG_PEER = BIT(1),
+ MLX5_ESW_BRIDGE_FLAG_DELETED = BIT(2),
};
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index d4cde6555063..8d0b915a3121 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1038,11 +1038,8 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
return ERR_PTR(err);
}
-static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
+static void mlx5_eswitch_event_handler_register(struct mlx5_eswitch *esw)
{
- MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
- mlx5_eq_notifier_register(esw->dev, &esw->nb);
-
if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
ESW_FUNCTIONS_CHANGED);
@@ -1050,13 +1047,11 @@ static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
}
}
-static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
+static void mlx5_eswitch_event_handler_unregister(struct mlx5_eswitch *esw)
{
if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
- mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
-
flush_workqueue(esw->work_queue);
}
@@ -1483,6 +1478,9 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
+ MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
+ mlx5_eq_notifier_register(esw->dev, &esw->nb);
+
if (esw->mode == MLX5_ESWITCH_LEGACY) {
err = esw_legacy_enable(esw);
} else {
@@ -1495,7 +1493,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
- mlx5_eswitch_event_handlers_register(esw);
+ mlx5_eswitch_event_handler_register(esw);
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
@@ -1622,7 +1620,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
*/
mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY);
- mlx5_eswitch_event_handlers_unregister(esw);
+ mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
+ mlx5_eswitch_event_handler_unregister(esw);
esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index bb8eeb86edf7..52c2fe3644d4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -310,8 +310,8 @@ const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
.fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
};
-static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
- bool learning_en)
+static int mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
+ bool learning_en)
{
char tnpc_pl[MLXSW_REG_TNPC_LEN];
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index f21cf1f40f98..153533cd8f08 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -210,6 +210,7 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
struct nfp_flower_cmsg_merge_hint *msg;
struct nfp_fl_payload *sub_flows[2];
+ struct nfp_flower_priv *priv;
int err, i, flow_cnt;
msg = nfp_flower_cmsg_get_data(skb);
@@ -228,14 +229,15 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
return;
}
- rtnl_lock();
+ priv = app->priv;
+ mutex_lock(&priv->nfp_fl_lock);
for (i = 0; i < flow_cnt; i++) {
u32 ctx = be32_to_cpu(msg->flow[i].host_ctx);
sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx);
if (!sub_flows[i]) {
nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n");
- goto err_rtnl_unlock;
+ goto err_mutex_unlock;
}
}
@@ -244,8 +246,8 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
if (err == -ENOMEM)
nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n");
-err_rtnl_unlock:
- rtnl_unlock();
+err_mutex_unlock:
+ mutex_unlock(&priv->nfp_fl_lock);
}
static void
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index 2643c4b3ff1f..2967bab72505 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -2131,8 +2131,6 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl
struct nfp_fl_ct_flow_entry *ct_entry;
struct netlink_ext_ack *extack = NULL;
- ASSERT_RTNL();
-
extack = flow->common.extack;
switch (flow->command) {
case FLOW_CLS_REPLACE:
@@ -2178,9 +2176,13 @@ int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb
switch (type) {
case TC_SETUP_CLSFLOWER:
- rtnl_lock();
+ while (!mutex_trylock(&zt->priv->nfp_fl_lock)) {
+ if (!zt->nft) /* avoid deadlock */
+ return err;
+ msleep(20);
+ }
err = nfp_fl_ct_offload_nft_flow(zt, flow);
- rtnl_unlock();
+ mutex_unlock(&zt->priv->nfp_fl_lock);
break;
default:
return -EOPNOTSUPP;
@@ -2208,6 +2210,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
struct nfp_fl_ct_flow_entry *ct_entry;
struct nfp_fl_ct_zone_entry *zt;
struct rhashtable *m_table;
+ struct nf_flowtable *nft;
if (!ct_map_ent)
return -ENOENT;
@@ -2226,8 +2229,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
if (ct_map_ent->cookie > 0)
kfree(ct_map_ent);
- if (!zt->pre_ct_count) {
- zt->nft = NULL;
+ if (!zt->pre_ct_count && zt->nft) {
+ nft = zt->nft;
+ zt->nft = NULL; /* avoid deadlock */
+ nf_flow_table_offload_del_cb(nft,
+ nfp_fl_ct_handle_nft_flow,
+ zt);
nfp_fl_ct_clean_nft_entries(zt);
}
break;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 40372545148e..2b7c947ff4f2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -297,6 +297,7 @@ struct nfp_fl_internal_ports {
* @predt_list: List to keep track of decap pretun flows
* @neigh_table: Table to keep track of neighbor entries
* @predt_lock: Lock to serialise predt/neigh table updates
+ * @nfp_fl_lock: Lock to protect the flow offload operation
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -339,6 +340,7 @@ struct nfp_flower_priv {
struct list_head predt_list;
struct rhashtable neigh_table;
spinlock_t predt_lock; /* Lock to serialise predt/neigh table updates */
+ struct mutex nfp_fl_lock; /* Protect the flow operation */
};
/**
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 0f06ef6e24bf..80e4675582bf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -528,6 +528,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
if (err)
goto err_free_stats_ctx_table;
+ mutex_init(&priv->nfp_fl_lock);
+
err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params);
if (err)
goto err_free_merge_table;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index c153f0575b92..0aceef9fe582 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1009,8 +1009,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
u64 parent_ctx = 0;
int err;
- ASSERT_RTNL();
-
if (sub_flow1 == sub_flow2 ||
nfp_flower_is_merge_flow(sub_flow1) ||
nfp_flower_is_merge_flow(sub_flow2))
@@ -1727,19 +1725,30 @@ static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
struct flow_cls_offload *flower)
{
+ struct nfp_flower_priv *priv = app->priv;
+ int ret;
+
if (!eth_proto_is_802_3(flower->common.protocol))
return -EOPNOTSUPP;
+ mutex_lock(&priv->nfp_fl_lock);
switch (flower->command) {
case FLOW_CLS_REPLACE:
- return nfp_flower_add_offload(app, netdev, flower);
+ ret = nfp_flower_add_offload(app, netdev, flower);
+ break;
case FLOW_CLS_DESTROY:
- return nfp_flower_del_offload(app, netdev, flower);
+ ret = nfp_flower_del_offload(app, netdev, flower);
+ break;
case FLOW_CLS_STATS:
- return nfp_flower_get_stats(app, netdev, flower);
+ ret = nfp_flower_get_stats(app, netdev, flower);
+ break;
default:
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ break;
}
+ mutex_unlock(&priv->nfp_fl_lock);
+
+ return ret;
}
static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
@@ -1778,6 +1787,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
repr_priv = repr->app_priv;
repr_priv->block_shared = f->block_shared;
f->driver_block_list = &nfp_block_cb_list;
+ f->unlocked_driver_cb = true;
switch (f->command) {
case FLOW_BLOCK_BIND:
@@ -1876,6 +1886,8 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str
nfp_flower_internal_port_can_offload(app, netdev)))
return -EOPNOTSUPP;
+ f->unlocked_driver_cb = true;
+
switch (f->command) {
case FLOW_BLOCK_BIND:
cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 99052a925d9e..e7180b4793c7 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -523,25 +523,31 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
{
struct netlink_ext_ack *extack = flow->common.extack;
struct nfp_flower_priv *fl_priv = app->priv;
+ int ret;
if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
return -EOPNOTSUPP;
}
+ mutex_lock(&fl_priv->nfp_fl_lock);
switch (flow->command) {
case TC_CLSMATCHALL_REPLACE:
- return nfp_flower_install_rate_limiter(app, netdev, flow,
- extack);
+ ret = nfp_flower_install_rate_limiter(app, netdev, flow, extack);
+ break;
case TC_CLSMATCHALL_DESTROY:
- return nfp_flower_remove_rate_limiter(app, netdev, flow,
- extack);
+ ret = nfp_flower_remove_rate_limiter(app, netdev, flow, extack);
+ break;
case TC_CLSMATCHALL_STATS:
- return nfp_flower_stats_rate_limiter(app, netdev, flow,
- extack);
+ ret = nfp_flower_stats_rate_limiter(app, netdev, flow, extack);
+ break;
default:
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ break;
}
+ mutex_unlock(&fl_priv->nfp_fl_lock);
+
+ return ret;
}
/* Offload tc action, currently only for tc police */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 717a0b3f89bd..ab5ef254a748 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -113,7 +113,10 @@ static void qed_ll2b_complete_tx_packet(void *cxt,
static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
u8 **data, dma_addr_t *phys_addr)
{
- *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
+ size_t size = cdev->ll2->rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ *data = kmalloc(size, GFP_ATOMIC);
if (!(*data)) {
DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
return -ENOMEM;
@@ -2589,7 +2592,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
INIT_LIST_HEAD(&cdev->ll2->list);
spin_lock_init(&cdev->ll2->lock);
- cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
+ cdev->ll2->rx_size = PRM_DMA_PAD_BYTES_NUM + ETH_HLEN +
L1_CACHE_BYTES + params->mtu;
/* Allocate memory for LL2.
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 6351a2dc13bc..361b90007148 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4364,7 +4364,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
unsigned int entry = dirty_tx % NUM_TX_DESC;
u32 status;
- status = le32_to_cpu(tp->TxDescArray[entry].opts1);
+ status = le32_to_cpu(READ_ONCE(tp->TxDescArray[entry].opts1));
if (status & DescOwn)
break;
@@ -4394,7 +4394,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
* If skb is NULL then we come here again once a tx irq is
* triggered after the last fragment is marked transmitted.
*/
- if (tp->cur_tx != dirty_tx && skb)
+ if (READ_ONCE(tp->cur_tx) != dirty_tx && skb)
rtl8169_doorbell(tp);
}
}
@@ -4427,7 +4427,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget
dma_addr_t addr;
u32 status;
- status = le32_to_cpu(desc->opts1);
+ status = le32_to_cpu(READ_ONCE(desc->opts1));
if (status & DescOwn)
break;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 7df9f9f8e134..0ef0b88b7145 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2167,6 +2167,8 @@ static int ravb_close(struct net_device *ndev)
of_phy_deregister_fixed_link(np);
}
+ cancel_work_sync(&priv->work);
+
if (info->multi_irqs) {
free_irq(priv->tx_irqs[RAVB_NC], ndev);
free_irq(priv->rx_irqs[RAVB_NC], ndev);
@@ -2891,8 +2893,6 @@ static int ravb_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->gptp_clk);
clk_disable_unprepare(priv->refclk);
- dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
- priv->desc_bat_dma);
/* Set reset mode */
ravb_write(ndev, CCC_OPC_RESET, CCC);
unregister_netdev(ndev);
@@ -2900,6 +2900,8 @@ static int ravb_remove(struct platform_device *pdev)
netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
+ dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
+ priv->desc_bat_dma);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
reset_control_assert(priv->rstc);
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index fc01ad3f340d..0fc0b6bea753 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -1254,7 +1254,7 @@ static void rswitch_adjust_link(struct net_device *ndev)
phy_print_status(phydev);
if (phydev->link)
phy_power_on(rdev->serdes);
- else
+ else if (rdev->serdes->power_count)
phy_power_off(rdev->serdes);
rdev->etha->link = phydev->link;
@@ -1964,15 +1964,17 @@ static void rswitch_deinit(struct rswitch_private *priv)
rswitch_gwca_hw_deinit(priv);
rcar_gen4_ptp_unregister(priv->ptp_priv);
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ rswitch_for_each_enabled_port(priv, i) {
struct rswitch_device *rdev = priv->rdev[i];
- phy_exit(priv->rdev[i]->serdes);
- rswitch_ether_port_deinit_one(rdev);
unregister_netdev(rdev->ndev);
- rswitch_device_free(priv, i);
+ rswitch_ether_port_deinit_one(rdev);
+ phy_exit(priv->rdev[i]->serdes);
}
+ for (i = 0; i < RSWITCH_NUM_PORTS; i++)
+ rswitch_device_free(priv, i);
+
rswitch_gwca_ts_queue_free(priv);
rswitch_gwca_linkfix_free(priv);
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 834f000ba1c4..30ebef88248d 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -629,14 +629,14 @@ static int efx_tc_flower_record_encap_match(struct efx_nic *efx,
}
if (child_ip_tos_mask != old->child_ip_tos_mask) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Pseudo encap match for TOS mask %#04x conflicts with existing pseudo(MASK) entry for TOS mask %#04x",
+ "Pseudo encap match for TOS mask %#04x conflicts with existing mask %#04x",
child_ip_tos_mask,
old->child_ip_tos_mask);
return -EEXIST;
}
if (child_udp_sport_mask != old->child_udp_sport_mask) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Pseudo encap match for UDP src port mask %#x conflicts with existing pseudo(MASK) entry for mask %#x",
+ "Pseudo encap match for UDP src port mask %#x conflicts with existing mask %#x",
child_udp_sport_mask,
old->child_udp_sport_mask);
return -EEXIST;
@@ -1081,7 +1081,7 @@ static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act,
/* check that we do not decrement ttl twice */
if (!efx_tc_flower_action_order_ok(act,
EFX_TC_AO_DEC_TTL)) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported: multiple dec ttl");
+ NL_SET_ERR_MSG_MOD(extack, "multiple dec ttl are not supported");
return -EOPNOTSUPP;
}
act->do_ttl_dec = 1;
@@ -1106,7 +1106,7 @@ static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act,
/* check that we do not decrement hoplimit twice */
if (!efx_tc_flower_action_order_ok(act,
EFX_TC_AO_DEC_TTL)) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported: multiple dec ttl");
+ NL_SET_ERR_MSG_MOD(extack, "multiple dec ttl are not supported");
return -EOPNOTSUPP;
}
act->do_ttl_dec = 1;
@@ -1120,7 +1120,7 @@ static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act,
}
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: ttl add action type %x %x %x/%x",
+ "ttl add action type %x %x %x/%x is not supported",
fa->mangle.htype, fa->mangle.offset,
fa->mangle.val, fa->mangle.mask);
return -EOPNOTSUPP;
@@ -1164,7 +1164,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
case 0:
if (fa->mangle.mask) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: mask (%#x) of eth.dst32 mangle",
+ "mask (%#x) of eth.dst32 mangle is not supported",
fa->mangle.mask);
return -EOPNOTSUPP;
}
@@ -1184,7 +1184,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
mung->dst_mac_16 = 1;
} else {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: mask (%#x) of eth+4 mangle is not high or low 16b",
+ "mask (%#x) of eth+4 mangle is not high or low 16b",
fa->mangle.mask);
return -EOPNOTSUPP;
}
@@ -1192,7 +1192,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
case 8:
if (fa->mangle.mask) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: mask (%#x) of eth.src32 mangle",
+ "mask (%#x) of eth.src32 mangle is not supported",
fa->mangle.mask);
return -EOPNOTSUPP;
}
@@ -1201,7 +1201,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
mung->src_mac_32 = 1;
return efx_tc_complete_mac_mangle(efx, act, mung, extack);
default:
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported: mangle eth+%u %x/%x",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "mangle eth+%u %x/%x is not supported",
fa->mangle.offset, fa->mangle.val, fa->mangle.mask);
return -EOPNOTSUPP;
}
@@ -1217,7 +1217,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
/* check that pedit applies to ttl only */
if (fa->mangle.mask != ~EFX_TC_HDR_TYPE_TTL_MASK) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: mask (%#x) out of range, only support mangle action on ipv4.ttl",
+ "mask (%#x) out of range, only support mangle action on ipv4.ttl",
fa->mangle.mask);
return -EOPNOTSUPP;
}
@@ -1227,7 +1227,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
*/
if (match->mask.ip_ttl != U8_MAX) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: only support mangle ipv4.ttl when we have an exact match on ttl, mask used for match (%#x)",
+ "only support mangle ttl when we have an exact match, current mask (%#x)",
match->mask.ip_ttl);
return -EOPNOTSUPP;
}
@@ -1237,7 +1237,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
*/
if (match->value.ip_ttl == 0) {
NL_SET_ERR_MSG_MOD(extack,
- "Unsupported: we cannot decrement ttl past 0");
+ "decrement ttl past 0 is not supported");
return -EOPNOTSUPP;
}
@@ -1245,7 +1245,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
if (!efx_tc_flower_action_order_ok(act,
EFX_TC_AO_DEC_TTL)) {
NL_SET_ERR_MSG_MOD(extack,
- "Unsupported: multiple dec ttl");
+ "multiple dec ttl is not supported");
return -EOPNOTSUPP;
}
@@ -1259,7 +1259,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
fallthrough;
default:
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: only support mangle on the ttl field (offset is %u)",
+ "only support mangle on the ttl field (offset is %u)",
fa->mangle.offset);
return -EOPNOTSUPP;
}
@@ -1275,7 +1275,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
/* check that pedit applies to ttl only */
if (fa->mangle.mask != EFX_TC_HDR_TYPE_HLIMIT_MASK) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: mask (%#x) out of range, only support mangle action on ipv6.hop_limit",
+ "mask (%#x) out of range, only support mangle action on ipv6.hop_limit",
fa->mangle.mask);
return -EOPNOTSUPP;
@@ -1286,7 +1286,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
*/
if (match->mask.ip_ttl != U8_MAX) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: only support mangle ipv6.hop_limit when we have an exact match on ttl, mask used for match (%#x)",
+ "only support hop_limit when we have an exact match, current mask (%#x)",
match->mask.ip_ttl);
return -EOPNOTSUPP;
}
@@ -1296,7 +1296,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
*/
if (match->value.ip_ttl == 0) {
NL_SET_ERR_MSG_MOD(extack,
- "Unsupported: we cannot decrement hop_limit past 0");
+ "decrementing hop_limit past 0 is not supported");
return -EOPNOTSUPP;
}
@@ -1304,7 +1304,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
if (!efx_tc_flower_action_order_ok(act,
EFX_TC_AO_DEC_TTL)) {
NL_SET_ERR_MSG_MOD(extack,
- "Unsupported: multiple dec ttl");
+ "multiple dec ttl is not supported");
return -EOPNOTSUPP;
}
@@ -1318,7 +1318,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
fallthrough;
default:
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: only support mangle on the hop_limit field");
+ "only support mangle on the hop_limit field");
return -EOPNOTSUPP;
}
default:
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ed1a5a31a491..5801f4d50f95 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1197,6 +1197,17 @@ static int stmmac_init_phy(struct net_device *dev)
return ret;
}
+static void stmmac_set_half_duplex(struct stmmac_priv *priv)
+{
+ /* Half-Duplex can only work with single tx queue */
+ if (priv->plat->tx_queues_to_use > 1)
+ priv->phylink_config.mac_capabilities &=
+ ~(MAC_10HD | MAC_100HD | MAC_1000HD);
+ else
+ priv->phylink_config.mac_capabilities |=
+ (MAC_10HD | MAC_100HD | MAC_1000HD);
+}
+
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data;
@@ -1228,10 +1239,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
MAC_10FD | MAC_100FD |
MAC_1000FD;
- /* Half-Duplex can only work with single queue */
- if (priv->plat->tx_queues_to_use <= 1)
- priv->phylink_config.mac_capabilities |= MAC_10HD | MAC_100HD |
- MAC_1000HD;
+ stmmac_set_half_duplex(priv);
/* Get the MAC specific capabilities */
stmmac_mac_phylink_get_caps(priv);
@@ -7172,6 +7180,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
rx_cnt);
+ stmmac_set_half_duplex(priv);
stmmac_napi_add(dev);
if (netif_running(dev))
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 0a3346650e03..cac61f5d3fd4 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -90,12 +90,16 @@ config TI_CPTS
The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the
driver offers a PTP Hardware Clock.
+config TI_K3_CPPI_DESC_POOL
+ tristate
+
config TI_K3_AM65_CPSW_NUSS
tristate "TI K3 AM654x/J721E CPSW Ethernet driver"
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
select NET_DEVLINK
select TI_DAVINCI_MDIO
select PHYLINK
+ select TI_K3_CPPI_DESC_POOL
imply PHY_TI_GMII_SEL
depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
help
@@ -187,6 +191,7 @@ config TI_ICSSG_PRUETH
tristate "TI Gigabit PRU Ethernet driver"
select PHYLIB
select TI_ICSS_IEP
+ select TI_K3_CPPI_DESC_POOL
depends on PRU_REMOTEPROC
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
help
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 34fd7a716ba6..67bed861f31d 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -24,14 +24,15 @@ keystone_netcp-y := netcp_core.o cpsw_ale.o
obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.o
+obj-$(CONFIG_TI_K3_CPPI_DESC_POOL) += k3-cppi-desc-pool.o
+
obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o
-ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o am65-cpsw-qos.o
+ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o am65-cpsw-qos.o
ti-am65-cpsw-nuss-$(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV) += am65-cpsw-switchdev.o
obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg-prueth.o
-icssg-prueth-y := k3-cppi-desc-pool.o \
- icssg/icssg_prueth.o \
+icssg-prueth-y := icssg/icssg_prueth.o \
icssg/icssg_classifier.o \
icssg/icssg_queues.o \
icssg/icssg_config.o \
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index 933b84666574..b272361e378f 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -379,9 +379,9 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
/* Bitmask for ICSSG r30 commands */
static const struct icssg_r30_cmd emac_r32_bitmask[] = {
- {{0xffff0004, 0xffff0100, 0xffff0100, EMAC_NONE}}, /* EMAC_PORT_DISABLE */
+ {{0xffff0004, 0xffff0100, 0xffff0004, EMAC_NONE}}, /* EMAC_PORT_DISABLE */
{{0xfffb0040, 0xfeff0200, 0xfeff0200, EMAC_NONE}}, /* EMAC_PORT_BLOCK */
- {{0xffbb0000, 0xfcff0000, 0xdcff0000, EMAC_NONE}}, /* EMAC_PORT_FORWARD */
+ {{0xffbb0000, 0xfcff0000, 0xdcfb0000, EMAC_NONE}}, /* EMAC_PORT_FORWARD */
{{0xffbb0000, 0xfcff0000, 0xfcff2000, EMAC_NONE}}, /* EMAC_PORT_FORWARD_WO_LEARNING */
{{0xffff0001, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT ALL */
{{0xfffe0002, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT TAGGED */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
index bb0b33927e3b..3dbadddd7e35 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -9,6 +9,9 @@
#include "icssg_stats.h"
#include <linux/regmap.h>
+#define ICSSG_TX_PACKET_OFFSET 0xA0
+#define ICSSG_TX_BYTE_OFFSET 0xEC
+
static u32 stats_base[] = { 0x54c, /* Slice 0 stats start */
0xb18, /* Slice 1 stats start */
};
@@ -18,6 +21,7 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
struct prueth *prueth = emac->prueth;
int slice = prueth_emac_slice(emac);
u32 base = stats_base[slice];
+ u32 tx_pkt_cnt = 0;
u32 val;
int i;
@@ -29,7 +33,12 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
base + icssg_all_stats[i].offset,
val);
+ if (icssg_all_stats[i].offset == ICSSG_TX_PACKET_OFFSET)
+ tx_pkt_cnt = val;
+
emac->stats[i] += val;
+ if (icssg_all_stats[i].offset == ICSSG_TX_BYTE_OFFSET)
+ emac->stats[i] -= tx_pkt_cnt * 8;
}
}
diff --git a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
index 38cc12f9f133..05cc7aab1ec8 100644
--- a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
+++ b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
@@ -39,6 +39,7 @@ void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool)
gen_pool_destroy(pool->gen_pool); /* frees pool->name */
}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_destroy);
struct k3_cppi_desc_pool *
k3_cppi_desc_pool_create_name(struct device *dev, size_t size,
@@ -98,29 +99,38 @@ gen_pool_create_fail:
devm_kfree(pool->dev, pool);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_create_name);
dma_addr_t k3_cppi_desc_pool_virt2dma(struct k3_cppi_desc_pool *pool,
void *addr)
{
return addr ? pool->dma_addr + (addr - pool->cpumem) : 0;
}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_virt2dma);
void *k3_cppi_desc_pool_dma2virt(struct k3_cppi_desc_pool *pool, dma_addr_t dma)
{
return dma ? pool->cpumem + (dma - pool->dma_addr) : NULL;
}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_dma2virt);
void *k3_cppi_desc_pool_alloc(struct k3_cppi_desc_pool *pool)
{
return (void *)gen_pool_alloc(pool->gen_pool, pool->desc_size);
}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_alloc);
void k3_cppi_desc_pool_free(struct k3_cppi_desc_pool *pool, void *addr)
{
gen_pool_free(pool->gen_pool, (unsigned long)addr, pool->desc_size);
}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_free);
size_t k3_cppi_desc_pool_avail(struct k3_cppi_desc_pool *pool)
{
return gen_pool_avail(pool->gen_pool) / pool->desc_size;
}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_avail);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI K3 CPPI5 descriptors pool API");
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index dc14a66583ff..44488c153ea2 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1217,7 +1217,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
key_index = wl->current_key;
if (!enc->length && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) {
- /* reques to change default key index */
+ /* request to change default key index */
pr_debug("%s: request to change default key to %d\n",
__func__, key_index);
wl->current_key = key_index;
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 144ec626230d..b22596b18ee8 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -872,8 +872,9 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
skb_dst_update_pmtu_no_confirm(skb, mtu);
- if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
- mtu < ntohs(iph->tot_len)) {
+ if (iph->frag_off & htons(IP_DF) &&
+ ((!skb_is_gso(skb) && skb->len > mtu) ||
+ (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) {
netdev_dbg(dev, "packet too big, fragmentation needed\n");
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index a03490ba2e5b..cc7ddc40020f 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1162,9 +1162,10 @@ static int adf7242_stats_show(struct seq_file *file, void *offset)
static void adf7242_debugfs_init(struct adf7242_local *lp)
{
- char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "adf7242-";
+ char debugfs_dir_name[DNAME_INLINE_LEN + 1];
- strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN);
+ snprintf(debugfs_dir_name, sizeof(debugfs_dir_name),
+ "adf7242-%s", dev_name(&lp->spi->dev));
lp->debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index aebb19f1b3a4..4ec0dab38872 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2740,7 +2740,6 @@ static int ca8210_register_ext_clock(struct spi_device *spi)
struct device_node *np = spi->dev.of_node;
struct ca8210_priv *priv = spi_get_drvdata(spi);
struct ca8210_platform_data *pdata = spi->dev.platform_data;
- int ret = 0;
if (!np)
return -EFAULT;
@@ -2757,18 +2756,8 @@ static int ca8210_register_ext_clock(struct spi_device *spi)
dev_crit(&spi->dev, "Failed to register external clk\n");
return PTR_ERR(priv->clk);
}
- ret = of_clk_add_provider(np, of_clk_src_simple_get, priv->clk);
- if (ret) {
- clk_unregister(priv->clk);
- dev_crit(
- &spi->dev,
- "Failed to register external clock as clock provider\n"
- );
- } else {
- dev_info(&spi->dev, "External clock set as clock provider\n");
- }
- return ret;
+ return of_clk_add_provider(np, of_clk_src_simple_get, priv->clk);
}
/**
@@ -2780,8 +2769,8 @@ static void ca8210_unregister_ext_clock(struct spi_device *spi)
{
struct ca8210_priv *priv = spi_get_drvdata(spi);
- if (!priv->clk)
- return
+ if (IS_ERR_OR_NULL(priv->clk))
+ return;
of_clk_del_provider(spi->dev.of_node);
clk_unregister(priv->clk);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index b7e151439c48..c5cd4551c67c 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2383,6 +2383,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.tx_sa = tx_sa;
+ ctx.sa.update_pn = !!prev_pn.full64;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
@@ -2476,6 +2477,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.rx_sa = rx_sa;
+ ctx.sa.update_pn = !!prev_pn.full64;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c
index a881e3523328..bef4cce71287 100644
--- a/drivers/net/mdio/mdio-mux.c
+++ b/drivers/net/mdio/mdio-mux.c
@@ -55,6 +55,27 @@ out:
return r;
}
+static int mdio_mux_read_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum)
+{
+ struct mdio_mux_child_bus *cb = bus->priv;
+ struct mdio_mux_parent_bus *pb = cb->parent;
+ int r;
+
+ mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX);
+ r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
+ if (r)
+ goto out;
+
+ pb->current_child = cb->bus_number;
+
+ r = pb->mii_bus->read_c45(pb->mii_bus, phy_id, dev_addr, regnum);
+out:
+ mutex_unlock(&pb->mii_bus->mdio_lock);
+
+ return r;
+}
+
/*
* The parent bus' lock is used to order access to the switch_fn.
*/
@@ -80,6 +101,28 @@ out:
return r;
}
+static int mdio_mux_write_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum, u16 val)
+{
+ struct mdio_mux_child_bus *cb = bus->priv;
+ struct mdio_mux_parent_bus *pb = cb->parent;
+
+ int r;
+
+ mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX);
+ r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
+ if (r)
+ goto out;
+
+ pb->current_child = cb->bus_number;
+
+ r = pb->mii_bus->write_c45(pb->mii_bus, phy_id, dev_addr, regnum, val);
+out:
+ mutex_unlock(&pb->mii_bus->mdio_lock);
+
+ return r;
+}
+
static int parent_count;
static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb)
@@ -173,6 +216,10 @@ int mdio_mux_init(struct device *dev,
cb->mii_bus->parent = dev;
cb->mii_bus->read = mdio_mux_read;
cb->mii_bus->write = mdio_mux_write;
+ if (parent_bus->read_c45)
+ cb->mii_bus->read_c45 = mdio_mux_read_c45;
+ if (parent_bus->write_c45)
+ cb->mii_bus->write_c45 = mdio_mux_write_c45;
r = of_mdiobus_register(cb->mii_bus, child_bus_node);
if (r) {
mdiobus_free(cb->mii_bus);
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 8478b081c058..97638ba7ae85 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -894,6 +894,9 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.name = _name, \
/* PHY_BASIC_FEATURES */ \
.flags = PHY_IS_INTERNAL, \
+ .get_sset_count = bcm_phy_get_sset_count, \
+ .get_strings = bcm_phy_get_strings, \
+ .get_stats = bcm7xxx_28nm_get_phy_stats, \
.probe = bcm7xxx_28nm_probe, \
.config_init = bcm7xxx_16nm_ephy_config_init, \
.config_aneg = genphy_config_aneg, \
diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
index 018253a573b8..4f39ba63a9a9 100644
--- a/drivers/net/phy/mscc/mscc_macsec.c
+++ b/drivers/net/phy/mscc/mscc_macsec.c
@@ -849,6 +849,9 @@ static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
struct macsec_flow *flow;
int ret;
+ if (ctx->sa.update_pn)
+ return -EINVAL;
+
flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
@@ -900,6 +903,9 @@ static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
struct macsec_flow *flow;
int ret;
+ if (ctx->sa.update_pn)
+ return -EINVAL;
+
flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 89ab9efe522c..afa5497f7c35 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -3073,10 +3073,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
struct net *net = sock_net(&tfile->sk);
struct tun_struct *tun;
void __user* argp = (void __user*)arg;
- unsigned int ifindex, carrier;
+ unsigned int carrier;
struct ifreq ifr;
kuid_t owner;
kgid_t group;
+ int ifindex;
int sndbuf;
int vnet_hdr_sz;
int le;
@@ -3132,7 +3133,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = -EFAULT;
if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
goto unlock;
-
+ ret = -EINVAL;
+ if (ifindex < 0)
+ goto unlock;
ret = 0;
tfile->ifindex = ifindex;
goto unlock;
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 48d7d278631e..99ec1d4a972d 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -222,13 +222,18 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
struct usbnet *dev = netdev_priv(netdev);
__le16 res;
+ int err;
if (phy_id) {
netdev_dbg(dev->net, "Only internal phy supported\n");
return 0;
}
- dm_read_shared_word(dev, 1, loc, &res);
+ err = dm_read_shared_word(dev, 1, loc, &res);
+ if (err < 0) {
+ netdev_err(dev->net, "MDIO read error: %d\n", err);
+ return err;
+ }
netdev_dbg(dev->net,
"dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0c13d9950cd8..afb20c0ed688 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -764,7 +764,7 @@ enum rtl_register_content {
/* rtl8152 flags */
enum rtl8152_flags {
- RTL8152_UNPLUG = 0,
+ RTL8152_INACCESSIBLE = 0,
RTL8152_SET_RX_MODE,
WORK_ENABLE,
RTL8152_LINK_CHG,
@@ -773,6 +773,9 @@ enum rtl8152_flags {
SCHEDULE_TASKLET,
GREEN_ETHERNET,
RX_EPROTO,
+ IN_PRE_RESET,
+ PROBED_WITH_NO_ERRORS,
+ PROBE_SHOULD_RETRY,
};
#define DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB 0x721e
@@ -953,6 +956,8 @@ struct r8152 {
u8 version;
u8 duplex;
u8 autoneg;
+
+ unsigned int reg_access_reset_count;
};
/**
@@ -1200,6 +1205,96 @@ static unsigned int agg_buf_sz = 16384;
#define RTL_LIMITED_TSO_SIZE (size_to_mtu(agg_buf_sz) - sizeof(struct tx_desc))
+/* If register access fails then we block access and issue a reset. If this
+ * happens too many times in a row without a successful access then we stop
+ * trying to reset and just leave access blocked.
+ */
+#define REGISTER_ACCESS_MAX_RESETS 3
+
+static void rtl_set_inaccessible(struct r8152 *tp)
+{
+ set_bit(RTL8152_INACCESSIBLE, &tp->flags);
+ smp_mb__after_atomic();
+}
+
+static void rtl_set_accessible(struct r8152 *tp)
+{
+ clear_bit(RTL8152_INACCESSIBLE, &tp->flags);
+ smp_mb__after_atomic();
+}
+
+static
+int r8152_control_msg(struct r8152 *tp, unsigned int pipe, __u8 request,
+ __u8 requesttype, __u16 value, __u16 index, void *data,
+ __u16 size, const char *msg_tag)
+{
+ struct usb_device *udev = tp->udev;
+ int ret;
+
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return -ENODEV;
+
+ ret = usb_control_msg(udev, pipe, request, requesttype,
+ value, index, data, size,
+ USB_CTRL_GET_TIMEOUT);
+
+ /* No need to issue a reset to report an error if the USB device got
+ * unplugged; just return immediately.
+ */
+ if (ret == -ENODEV)
+ return ret;
+
+ /* If the write was successful then we're done */
+ if (ret >= 0) {
+ tp->reg_access_reset_count = 0;
+ return ret;
+ }
+
+ dev_err(&udev->dev,
+ "Failed to %s %d bytes at %#06x/%#06x (%d)\n",
+ msg_tag, size, value, index, ret);
+
+ /* Block all future register access until we reset. Much of the code
+ * in the driver doesn't check for errors. Notably, many parts of the
+ * driver do a read/modify/write of a register value without
+ * confirming that the read succeeded. Writing back modified garbage
+ * like this can fully wedge the adapter, requiring a power cycle.
+ */
+ rtl_set_inaccessible(tp);
+
+ /* If probe hasn't yet finished, then we'll request a retry of the
+ * whole probe routine if we get any control transfer errors. We
+ * never have to clear this bit since we free/reallocate the whole "tp"
+ * structure if we retry probe.
+ */
+ if (!test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) {
+ set_bit(PROBE_SHOULD_RETRY, &tp->flags);
+ return ret;
+ }
+
+ /* Failing to access registers in pre-reset is not surprising since we
+ * wouldn't be resetting if things were behaving normally. The register
+ * access we do in pre-reset isn't truly mandatory--we're just reusing
+ * the disable() function and trying to be nice by powering the
+ * adapter down before resetting it. Thus, if we're in pre-reset,
+ * we'll return right away and not try to queue up yet another reset.
+ * We know the post-reset is already coming.
+ */
+ if (test_bit(IN_PRE_RESET, &tp->flags))
+ return ret;
+
+ if (tp->reg_access_reset_count < REGISTER_ACCESS_MAX_RESETS) {
+ usb_queue_reset_device(tp->intf);
+ tp->reg_access_reset_count++;
+ } else if (tp->reg_access_reset_count == REGISTER_ACCESS_MAX_RESETS) {
+ dev_err(&udev->dev,
+ "Tried to reset %d times; giving up.\n",
+ REGISTER_ACCESS_MAX_RESETS);
+ }
+
+ return ret;
+}
+
static
int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
{
@@ -1210,9 +1305,10 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
if (!tmp)
return -ENOMEM;
- ret = usb_control_msg(tp->udev, tp->pipe_ctrl_in,
- RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
- value, index, tmp, size, 500);
+ ret = r8152_control_msg(tp, tp->pipe_ctrl_in,
+ RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
+ value, index, tmp, size, "read");
+
if (ret < 0)
memset(data, 0xff, size);
else
@@ -1233,9 +1329,9 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
if (!tmp)
return -ENOMEM;
- ret = usb_control_msg(tp->udev, tp->pipe_ctrl_out,
- RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
- value, index, tmp, size, 500);
+ ret = r8152_control_msg(tp, tp->pipe_ctrl_out,
+ RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
+ value, index, tmp, size, "write");
kfree(tmp);
@@ -1244,10 +1340,8 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
static void rtl_set_unplug(struct r8152 *tp)
{
- if (tp->udev->state == USB_STATE_NOTATTACHED) {
- set_bit(RTL8152_UNPLUG, &tp->flags);
- smp_mb__after_atomic();
- }
+ if (tp->udev->state == USB_STATE_NOTATTACHED)
+ rtl_set_inaccessible(tp);
}
static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
@@ -1256,7 +1350,7 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
u16 limit = 64;
int ret = 0;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
/* both size and indix must be 4 bytes align */
@@ -1300,7 +1394,7 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
u16 byteen_start, byteen_end, byen;
u16 limit = 512;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
/* both size and indix must be 4 bytes align */
@@ -1537,7 +1631,7 @@ static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
struct r8152 *tp = netdev_priv(netdev);
int ret;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
if (phy_id != R8152_PHY_ID)
@@ -1553,7 +1647,7 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
{
struct r8152 *tp = netdev_priv(netdev);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (phy_id != R8152_PHY_ID)
@@ -1758,7 +1852,7 @@ static void read_bulk_callback(struct urb *urb)
if (!tp)
return;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (!test_bit(WORK_ENABLE, &tp->flags))
@@ -1850,7 +1944,7 @@ static void write_bulk_callback(struct urb *urb)
if (!test_bit(WORK_ENABLE, &tp->flags))
return;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (!skb_queue_empty(&tp->tx_queue))
@@ -1871,7 +1965,7 @@ static void intr_callback(struct urb *urb)
if (!test_bit(WORK_ENABLE, &tp->flags))
return;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
switch (status) {
@@ -2615,7 +2709,7 @@ static void bottom_half(struct tasklet_struct *t)
{
struct r8152 *tp = from_tasklet(tp, t, tx_tl);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (!test_bit(WORK_ENABLE, &tp->flags))
@@ -2658,7 +2752,7 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
int ret;
/* The rx would be stopped, so skip submitting */
- if (test_bit(RTL8152_UNPLUG, &tp->flags) ||
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) ||
!test_bit(WORK_ENABLE, &tp->flags) || !netif_carrier_ok(tp->netdev))
return 0;
@@ -3058,7 +3152,7 @@ static int rtl_enable(struct r8152 *tp)
static int rtl8152_enable(struct r8152 *tp)
{
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
set_tx_qlen(tp);
@@ -3145,7 +3239,7 @@ static int rtl8153_enable(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
set_tx_qlen(tp);
@@ -3177,7 +3271,7 @@ static void rtl_disable(struct r8152 *tp)
u32 ocp_data;
int i;
- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@@ -3631,7 +3725,7 @@ static u16 r8153_phy_status(struct r8152 *tp, u16 desired)
}
msleep(20);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
break;
}
@@ -3663,6 +3757,8 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable)
int i;
for (i = 0; i < 500; i++) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
AUTOLOAD_DONE)
break;
@@ -3703,6 +3799,8 @@ static void r8153c_ups_en(struct r8152 *tp, bool enable)
int i;
for (i = 0; i < 500; i++) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
AUTOLOAD_DONE)
break;
@@ -4046,6 +4144,9 @@ static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait)
for (i = 0; wait && i < 5000; i++) {
u32 ocp_data;
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return -ENODEV;
+
usleep_range(1000, 2000);
ocp_data = ocp_reg_read(tp, OCP_PHY_PATCH_STAT);
if ((ocp_data & PATCH_READY) ^ check)
@@ -6002,7 +6103,7 @@ static int rtl8156_enable(struct r8152 *tp)
u32 ocp_data;
u16 speed;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
r8156_fc_parameter(tp);
@@ -6060,7 +6161,7 @@ static int rtl8156b_enable(struct r8152 *tp)
u32 ocp_data;
u16 speed;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
set_tx_qlen(tp);
@@ -6246,7 +6347,7 @@ out:
static void rtl8152_up(struct r8152 *tp)
{
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8152_aldps_en(tp, false);
@@ -6256,7 +6357,7 @@ static void rtl8152_up(struct r8152 *tp)
static void rtl8152_down(struct r8152 *tp)
{
- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@@ -6271,7 +6372,7 @@ static void rtl8153_up(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153_u1u2en(tp, false);
@@ -6311,7 +6412,7 @@ static void rtl8153_down(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@@ -6332,7 +6433,7 @@ static void rtl8153b_up(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@@ -6356,7 +6457,7 @@ static void rtl8153b_down(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@@ -6393,7 +6494,7 @@ static void rtl8153c_up(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@@ -6474,7 +6575,7 @@ static void rtl8156_up(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@@ -6547,7 +6648,7 @@ static void rtl8156_down(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@@ -6685,7 +6786,7 @@ static void rtl_work_func_t(struct work_struct *work)
/* If the device is unplugged or !netif_running(), the workqueue
* doesn't need to wake the device, and could return directly.
*/
- if (test_bit(RTL8152_UNPLUG, &tp->flags) || !netif_running(tp->netdev))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) || !netif_running(tp->netdev))
return;
if (usb_autopm_get_interface(tp->intf) < 0)
@@ -6724,7 +6825,7 @@ static void rtl_hw_phy_work_func_t(struct work_struct *work)
{
struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (usb_autopm_get_interface(tp->intf) < 0)
@@ -6851,7 +6952,7 @@ static int rtl8152_close(struct net_device *netdev)
netif_stop_queue(netdev);
res = usb_autopm_get_interface(tp->intf);
- if (res < 0 || test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ if (res < 0 || test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
rtl_stop_rx(tp);
} else {
@@ -6884,7 +6985,7 @@ static void r8152b_init(struct r8152 *tp)
u32 ocp_data;
u16 data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
data = r8152_mdio_read(tp, MII_BMCR);
@@ -6928,7 +7029,7 @@ static void r8153_init(struct r8152 *tp)
u16 data;
int i;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153_u1u2en(tp, false);
@@ -6939,7 +7040,7 @@ static void r8153_init(struct r8152 *tp)
break;
msleep(20);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
break;
}
@@ -7068,7 +7169,7 @@ static void r8153b_init(struct r8152 *tp)
u16 data;
int i;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@@ -7079,7 +7180,7 @@ static void r8153b_init(struct r8152 *tp)
break;
msleep(20);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
break;
}
@@ -7150,7 +7251,7 @@ static void r8153c_init(struct r8152 *tp)
u16 data;
int i;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@@ -7170,7 +7271,7 @@ static void r8153c_init(struct r8152 *tp)
break;
msleep(20);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
}
@@ -7999,7 +8100,7 @@ static void r8156_init(struct r8152 *tp)
u16 data;
int i;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP);
@@ -8020,7 +8121,7 @@ static void r8156_init(struct r8152 *tp)
break;
msleep(20);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
}
@@ -8095,7 +8196,7 @@ static void r8156b_init(struct r8152 *tp)
u16 data;
int i;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP);
@@ -8129,7 +8230,7 @@ static void r8156b_init(struct r8152 *tp)
break;
msleep(20);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
}
@@ -8255,7 +8356,7 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
struct r8152 *tp = usb_get_intfdata(intf);
struct net_device *netdev;
- if (!tp)
+ if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
return 0;
netdev = tp->netdev;
@@ -8270,7 +8371,9 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
napi_disable(&tp->napi);
if (netif_carrier_ok(netdev)) {
mutex_lock(&tp->control);
+ set_bit(IN_PRE_RESET, &tp->flags);
tp->rtl_ops.disable(tp);
+ clear_bit(IN_PRE_RESET, &tp->flags);
mutex_unlock(&tp->control);
}
@@ -8283,9 +8386,11 @@ static int rtl8152_post_reset(struct usb_interface *intf)
struct net_device *netdev;
struct sockaddr sa;
- if (!tp)
+ if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
return 0;
+ rtl_set_accessible(tp);
+
/* reset the MAC address in case of policy change */
if (determine_ethernet_addr(tp, &sa) >= 0) {
rtnl_lock();
@@ -9158,7 +9263,7 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
struct mii_ioctl_data *data = if_mii(rq);
int res;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
res = usb_autopm_get_interface(tp->intf);
@@ -9260,7 +9365,7 @@ static const struct net_device_ops rtl8152_netdev_ops = {
static void rtl8152_unload(struct r8152 *tp)
{
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (tp->version != RTL_VER_01)
@@ -9269,7 +9374,7 @@ static void rtl8152_unload(struct r8152 *tp)
static void rtl8153_unload(struct r8152 *tp)
{
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153_power_cut_en(tp, false);
@@ -9277,7 +9382,7 @@ static void rtl8153_unload(struct r8152 *tp)
static void rtl8153b_unload(struct r8152 *tp)
{
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_power_cut_en(tp, false);
@@ -9487,16 +9592,29 @@ static u8 __rtl_get_hw_ver(struct usb_device *udev)
__le32 *tmp;
u8 version;
int ret;
+ int i;
tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return 0;
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
- PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
- if (ret > 0)
- ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK;
+ /* Retry up to 3 times in case there is a transitory error. We do this
+ * since retrying a read of the version is always safe and this
+ * function doesn't take advantage of r8152_control_msg().
+ */
+ for (i = 0; i < 3; i++) {
+ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
+ PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp),
+ USB_CTRL_GET_TIMEOUT);
+ if (ret > 0) {
+ ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK;
+ break;
+ }
+ }
+
+ if (i != 0 && ret > 0)
+ dev_warn(&udev->dev, "Needed %d retries to read version\n", i);
kfree(tmp);
@@ -9595,25 +9713,14 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev)
return 0;
}
-static int rtl8152_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static int rtl8152_probe_once(struct usb_interface *intf,
+ const struct usb_device_id *id, u8 version)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct r8152 *tp;
struct net_device *netdev;
- u8 version;
int ret;
- if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
- return -ENODEV;
-
- if (!rtl_check_vendor_ok(intf))
- return -ENODEV;
-
- version = rtl8152_get_version(intf);
- if (version == RTL_VER_UNKNOWN)
- return -ENODEV;
-
usb_reset_device(udev);
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
@@ -9776,18 +9883,68 @@ static int rtl8152_probe(struct usb_interface *intf,
else
device_set_wakeup_enable(&udev->dev, false);
+ /* If we saw a control transfer error while probing then we may
+ * want to try probe() again. Consider this an error.
+ */
+ if (test_bit(PROBE_SHOULD_RETRY, &tp->flags))
+ goto out2;
+
+ set_bit(PROBED_WITH_NO_ERRORS, &tp->flags);
netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
return 0;
+out2:
+ unregister_netdev(netdev);
+
out1:
tasklet_kill(&tp->tx_tl);
+ cancel_delayed_work_sync(&tp->hw_phy_work);
+ if (tp->rtl_ops.unload)
+ tp->rtl_ops.unload(tp);
+ rtl8152_release_firmware(tp);
usb_set_intfdata(intf, NULL);
out:
+ if (test_bit(PROBE_SHOULD_RETRY, &tp->flags))
+ ret = -EAGAIN;
+
free_netdev(netdev);
return ret;
}
+#define RTL8152_PROBE_TRIES 3
+
+static int rtl8152_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ u8 version;
+ int ret;
+ int i;
+
+ if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
+ return -ENODEV;
+
+ if (!rtl_check_vendor_ok(intf))
+ return -ENODEV;
+
+ version = rtl8152_get_version(intf);
+ if (version == RTL_VER_UNKNOWN)
+ return -ENODEV;
+
+ for (i = 0; i < RTL8152_PROBE_TRIES; i++) {
+ ret = rtl8152_probe_once(intf, id, version);
+ if (ret != -EAGAIN)
+ break;
+ }
+ if (ret == -EAGAIN) {
+ dev_err(&intf->dev,
+ "r8152 failed probe after %d tries; giving up\n", i);
+ return -ENODEV;
+ }
+
+ return ret;
+}
+
static void rtl8152_disconnect(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 563ecd27b93e..a530f20ee257 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -95,7 +95,9 @@ static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
- if (ret < 0) {
+ if (ret < 4) {
+ ret = ret < 0 ? ret : -ENODATA;
+
if (ret != -ENODEV)
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
index, ret);
@@ -897,7 +899,7 @@ static int smsc95xx_reset(struct usbnet *dev)
if (timeout >= 100) {
netdev_warn(dev->net, "timeout waiting for completion of Lite Reset\n");
- return ret;
+ return -ETIMEDOUT;
}
ret = smsc95xx_set_mac_address(dev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index fe7f314d65c9..d67f742fbd4c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -607,16 +607,16 @@ static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
--dma->ref;
- if (dma->ref) {
- if (dma->need_sync && len) {
- offset = buf - (head + sizeof(*dma));
+ if (dma->need_sync && len) {
+ offset = buf - (head + sizeof(*dma));
- virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, offset,
- len, DMA_FROM_DEVICE);
- }
+ virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
+ offset, len,
+ DMA_FROM_DEVICE);
+ }
+ if (dma->ref)
return;
- }
virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
index 635301d677e1..829515a601b3 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -4,7 +4,6 @@
*/
#include <linux/delay.h>
-#include <linux/pm_runtime.h>
#include "iosm_ipc_chnl_cfg.h"
#include "iosm_ipc_devlink.h"
@@ -632,11 +631,6 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
/* Complete all memory stores after setting bit */
smp_mb__after_atomic();
- if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7560_ID) {
- pm_runtime_mark_last_busy(ipc_imem->dev);
- pm_runtime_put_autosuspend(ipc_imem->dev);
- }
-
return;
err_ipc_mux_deinit:
@@ -1240,7 +1234,6 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
/* forward MDM_NOT_READY to listeners */
ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
- pm_runtime_get_sync(ipc_imem->dev);
hrtimer_cancel(&ipc_imem->td_alloc_timer);
hrtimer_cancel(&ipc_imem->tdupdate_timer);
@@ -1426,16 +1419,6 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
}
-
- if (!pm_runtime_enabled(ipc_imem->dev))
- pm_runtime_enable(ipc_imem->dev);
-
- pm_runtime_set_autosuspend_delay(ipc_imem->dev,
- IPC_MEM_AUTO_SUSPEND_DELAY_MS);
- pm_runtime_use_autosuspend(ipc_imem->dev);
- pm_runtime_allow(ipc_imem->dev);
- pm_runtime_mark_last_busy(ipc_imem->dev);
-
return ipc_imem;
devlink_channel_fail:
ipc_devlink_deinit(ipc_imem->ipc_devlink);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h
index 0144b45e2afb..5664ac507c90 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h
@@ -103,8 +103,6 @@ struct ipc_chnl_cfg;
#define FULLY_FUNCTIONAL 0
#define IOSM_DEVLINK_INIT 1
-#define IPC_MEM_AUTO_SUSPEND_DELAY_MS 5000
-
/* List of the supported UL/DL pipes. */
enum ipc_mem_pipes {
IPC_MEM_PIPE_0 = 0,
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
index 3a259c9abefd..04517bd3325a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -6,7 +6,6 @@
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/module.h>
-#include <linux/pm_runtime.h>
#include <net/rtnetlink.h>
#include "iosm_ipc_imem.h"
@@ -438,8 +437,7 @@ static int __maybe_unused ipc_pcie_resume_cb(struct device *dev)
return 0;
}
-static DEFINE_RUNTIME_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb,
- ipc_pcie_resume_cb, NULL);
+static SIMPLE_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb, ipc_pcie_resume_cb);
static struct pci_driver iosm_ipc_driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/net/wwan/iosm/iosm_ipc_port.c b/drivers/net/wwan/iosm/iosm_ipc_port.c
index 2ba1ddca3945..5d5b4183e14a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_port.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_port.c
@@ -3,8 +3,6 @@
* Copyright (C) 2020-21 Intel Corporation.
*/
-#include <linux/pm_runtime.h>
-
#include "iosm_ipc_chnl_cfg.h"
#include "iosm_ipc_imem_ops.h"
#include "iosm_ipc_port.h"
@@ -15,16 +13,12 @@ static int ipc_port_ctrl_start(struct wwan_port *port)
struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
int ret = 0;
- pm_runtime_get_sync(ipc_port->ipc_imem->dev);
ipc_port->channel = ipc_imem_sys_port_open(ipc_port->ipc_imem,
ipc_port->chl_id,
IPC_HP_CDEV_OPEN);
if (!ipc_port->channel)
ret = -EIO;
- pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev);
- pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev);
-
return ret;
}
@@ -33,24 +27,15 @@ static void ipc_port_ctrl_stop(struct wwan_port *port)
{
struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
- pm_runtime_get_sync(ipc_port->ipc_imem->dev);
ipc_imem_sys_port_close(ipc_port->ipc_imem, ipc_port->channel);
- pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev);
- pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev);
}
/* transfer control data to modem */
static int ipc_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
{
struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
- int ret;
- pm_runtime_get_sync(ipc_port->ipc_imem->dev);
- ret = ipc_imem_sys_cdev_write(ipc_port, skb);
- pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev);
- pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev);
-
- return ret;
+ return ipc_imem_sys_cdev_write(ipc_port, skb);
}
static const struct wwan_port_ops ipc_wwan_ctrl_ops = {
diff --git a/drivers/net/wwan/iosm/iosm_ipc_trace.c b/drivers/net/wwan/iosm/iosm_ipc_trace.c
index 4368373797b6..eeecfa3d10c5 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_trace.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_trace.c
@@ -3,9 +3,7 @@
* Copyright (C) 2020-2021 Intel Corporation.
*/
-#include <linux/pm_runtime.h>
#include <linux/wwan.h>
-
#include "iosm_ipc_trace.h"
/* sub buffer size and number of sub buffer */
@@ -99,8 +97,6 @@ static ssize_t ipc_trace_ctrl_file_write(struct file *filp,
if (ret)
return ret;
- pm_runtime_get_sync(ipc_trace->ipc_imem->dev);
-
mutex_lock(&ipc_trace->trc_mutex);
if (val == TRACE_ENABLE && ipc_trace->mode != TRACE_ENABLE) {
ipc_trace->channel = ipc_imem_sys_port_open(ipc_trace->ipc_imem,
@@ -121,10 +117,6 @@ static ssize_t ipc_trace_ctrl_file_write(struct file *filp,
ret = count;
unlock:
mutex_unlock(&ipc_trace->trc_mutex);
-
- pm_runtime_mark_last_busy(ipc_trace->ipc_imem->dev);
- pm_runtime_put_autosuspend(ipc_trace->ipc_imem->dev);
-
return ret;
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
index 93d17de08786..ff747fc79aaf 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
@@ -6,7 +6,6 @@
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/if_link.h>
-#include <linux/pm_runtime.h>
#include <linux/rtnetlink.h>
#include <linux/wwan.h>
#include <net/pkt_sched.h>
@@ -52,13 +51,11 @@ static int ipc_wwan_link_open(struct net_device *netdev)
struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
int if_id = priv->if_id;
- int ret = 0;
if (if_id < IP_MUX_SESSION_START ||
if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
return -EINVAL;
- pm_runtime_get_sync(ipc_wwan->ipc_imem->dev);
/* get channel id */
priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id);
@@ -66,8 +63,7 @@ static int ipc_wwan_link_open(struct net_device *netdev)
dev_err(ipc_wwan->dev,
"cannot connect wwan0 & id %d to the IPC mem layer",
if_id);
- ret = -ENODEV;
- goto err_out;
+ return -ENODEV;
}
/* enable tx path, DL data may follow */
@@ -76,11 +72,7 @@ static int ipc_wwan_link_open(struct net_device *netdev)
dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d",
priv->ch_id, priv->if_id);
-err_out:
- pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
- pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
-
- return ret;
+ return 0;
}
/* Bring-down the wwan net link */
@@ -90,12 +82,9 @@ static int ipc_wwan_link_stop(struct net_device *netdev)
netif_stop_queue(netdev);
- pm_runtime_get_sync(priv->ipc_wwan->ipc_imem->dev);
ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id,
priv->ch_id);
priv->ch_id = -1;
- pm_runtime_mark_last_busy(priv->ipc_wwan->ipc_imem->dev);
- pm_runtime_put_autosuspend(priv->ipc_wwan->ipc_imem->dev);
return 0;
}
@@ -117,7 +106,6 @@ static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb,
if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
return -EINVAL;
- pm_runtime_get(ipc_wwan->ipc_imem->dev);
/* Send the SKB to device for transmission */
ret = ipc_imem_sys_wwan_transmit(ipc_wwan->ipc_imem,
if_id, priv->ch_id, skb);
@@ -131,14 +119,9 @@ static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb,
ret = NETDEV_TX_BUSY;
dev_err(ipc_wwan->dev, "unable to push packets");
} else {
- pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
- pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
goto exit;
}
- pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
- pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
-
return ret;
exit:
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index f3f2c07423a6..fc3bb63b9ac3 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -41,8 +41,6 @@
#include <asm/xen/hypercall.h>
#include <xen/balloon.h>
-#define XENVIF_QUEUE_LENGTH 32
-
/* Number of bytes allowed on the internal guest Rx queue. */
#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
@@ -530,8 +528,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
dev->features = dev->hw_features | NETIF_F_RXCSUM;
dev->ethtool_ops = &xenvif_ethtool_ops;
- dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
-
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index daf5d144a8ea..064592a5d546 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -341,7 +341,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
struct nvmf_auth_dhchap_success1_data *data = chap->buf;
size_t size = sizeof(*data);
- if (chap->ctrl_key)
+ if (chap->s2)
size += chap->hash_len;
if (size > CHAP_BUF_SIZE) {
@@ -825,7 +825,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
goto fail2;
}
- if (chap->ctrl_key) {
+ if (chap->s2) {
/* DH-HMAC-CHAP Step 5: send success2 */
dev_dbg(ctrl->device, "%s: qid %d send success2\n",
__func__, chap->qid);
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index d8ff796fd5f2..747c879e8982 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -108,9 +108,13 @@ static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
if (!buf)
goto out;
- ret = -EFAULT;
- if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len))
- goto out_free_meta;
+ if (req_op(req) == REQ_OP_DRV_OUT) {
+ ret = -EFAULT;
+ if (copy_from_user(buf, ubuf, len))
+ goto out_free_meta;
+ } else {
+ memset(buf, 0, len);
+ }
bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
if (IS_ERR(bip)) {
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 347cb5daebc3..3f0c9ee09a12 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3329,7 +3329,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES |
- NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ NVME_QUIRK_IGNORE_DEV_SUBNQN |
+ NVME_QUIRK_BOGUS_NID, },
{ PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 337a624a537c..a7fea4cbacd7 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -638,6 +638,9 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{
+ if (!test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
+ return;
+
mutex_lock(&queue->queue_lock);
if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
__nvme_rdma_stop_queue(queue);
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index 586458f765f1..1d9854484e2e 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -333,19 +333,21 @@ done:
__func__, ctrl->cntlid, req->sq->qid,
status, req->error_loc);
req->cqe->result.u64 = 0;
- nvmet_req_complete(req, status);
if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
mod_delayed_work(system_wq, &req->sq->auth_expired_work,
auth_expire_secs * HZ);
- return;
+ goto complete;
}
/* Final states, clear up variables */
nvmet_auth_sq_free(req->sq);
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
nvmet_ctrl_fatal_error(ctrl);
+
+complete:
+ nvmet_req_complete(req, status);
}
static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
@@ -514,11 +516,12 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
kfree(d);
done:
req->cqe->result.u64 = 0;
- nvmet_req_complete(req, status);
+
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
nvmet_auth_sq_free(req->sq);
else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
nvmet_auth_sq_free(req->sq);
nvmet_ctrl_fatal_error(ctrl);
}
+ nvmet_req_complete(req, status);
}
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 468833675cc9..f11400a908f2 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -50,9 +50,10 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
{
- if (ns->bdev) {
- blkdev_put(ns->bdev, NULL);
+ if (ns->bdev_handle) {
+ bdev_release(ns->bdev_handle);
ns->bdev = NULL;
+ ns->bdev_handle = NULL;
}
}
@@ -84,17 +85,18 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
if (ns->buffered_io)
return -ENOTBLK;
- ns->bdev = blkdev_get_by_path(ns->device_path,
- BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL);
- if (IS_ERR(ns->bdev)) {
- ret = PTR_ERR(ns->bdev);
+ ns->bdev_handle = bdev_open_by_path(ns->device_path,
+ BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL);
+ if (IS_ERR(ns->bdev_handle)) {
+ ret = PTR_ERR(ns->bdev_handle);
if (ret != -ENOTBLK) {
- pr_err("failed to open block device %s: (%ld)\n",
- ns->device_path, PTR_ERR(ns->bdev));
+ pr_err("failed to open block device %s: (%d)\n",
+ ns->device_path, ret);
}
- ns->bdev = NULL;
+ ns->bdev_handle = NULL;
return ret;
}
+ ns->bdev = ns->bdev_handle->bdev;
ns->size = bdev_nr_bytes(ns->bdev);
ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 8cfd60f3b564..360e385be33b 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -58,6 +58,7 @@
struct nvmet_ns {
struct percpu_ref ref;
+ struct bdev_handle *bdev_handle;
struct block_device *bdev;
struct file *file;
bool readonly;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index cd92d7ddf5ed..197fc2ecb164 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -372,6 +372,7 @@ static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
{
+ queue->rcv_state = NVMET_TCP_RECV_ERR;
if (status == -EPIPE || status == -ECONNRESET)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
else
@@ -910,15 +911,11 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
iov.iov_len = sizeof(*icresp);
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
if (ret < 0)
- goto free_crypto;
+ return ret; /* queue removal will cleanup */
queue->state = NVMET_TCP_Q_LIVE;
nvmet_prepare_receive_pdu(queue);
return 0;
-free_crypto:
- if (queue->hdr_digest || queue->data_digest)
- nvmet_tcp_free_crypto(queue);
- return ret;
}
static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index a223d9537f22..e8b6f194925d 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -498,7 +498,7 @@ static const struct ocotp_params imx6sl_params = {
};
static const struct ocotp_params imx6sll_params = {
- .nregs = 128,
+ .nregs = 80,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
@@ -512,14 +512,14 @@ static const struct ocotp_params imx6sx_params = {
};
static const struct ocotp_params imx6ul_params = {
- .nregs = 128,
+ .nregs = 144,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx6ull_params = {
- .nregs = 64,
+ .nregs = 80,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
index 1f9a35f724f5..0dda70e1ef90 100644
--- a/drivers/perf/riscv_pmu.c
+++ b/drivers/perf/riscv_pmu.c
@@ -23,7 +23,8 @@ static bool riscv_perf_user_access(struct perf_event *event)
return ((event->attr.type == PERF_TYPE_HARDWARE) ||
(event->attr.type == PERF_TYPE_HW_CACHE) ||
(event->attr.type == PERF_TYPE_RAW)) &&
- !!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT);
+ !!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) &&
+ (event->hw.idx != -1);
}
void arch_perf_update_userpage(struct perf_event *event,
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 9a51053b1f99..96c7f670c8f0 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -510,16 +510,18 @@ static void pmu_sbi_set_scounteren(void *arg)
{
struct perf_event *event = (struct perf_event *)arg;
- csr_write(CSR_SCOUNTEREN,
- csr_read(CSR_SCOUNTEREN) | (1 << pmu_sbi_csr_index(event)));
+ if (event->hw.idx != -1)
+ csr_write(CSR_SCOUNTEREN,
+ csr_read(CSR_SCOUNTEREN) | (1 << pmu_sbi_csr_index(event)));
}
static void pmu_sbi_reset_scounteren(void *arg)
{
struct perf_event *event = (struct perf_event *)arg;
- csr_write(CSR_SCOUNTEREN,
- csr_read(CSR_SCOUNTEREN) & ~(1 << pmu_sbi_csr_index(event)));
+ if (event->hw.idx != -1)
+ csr_write(CSR_SCOUNTEREN,
+ csr_read(CSR_SCOUNTEREN) & ~(1 << pmu_sbi_csr_index(event)));
}
static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
@@ -541,7 +543,8 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
(hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
- pmu_sbi_set_scounteren((void *)event);
+ on_each_cpu_mask(mm_cpumask(event->owner->mm),
+ pmu_sbi_set_scounteren, (void *)event, 1);
}
static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
@@ -551,7 +554,8 @@ static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
(hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
- pmu_sbi_reset_scounteren((void *)event);
+ on_each_cpu_mask(mm_cpumask(event->owner->mm),
+ pmu_sbi_reset_scounteren, (void *)event, 1);
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
diff --git a/drivers/phy/freescale/phy-fsl-lynx-28g.c b/drivers/phy/freescale/phy-fsl-lynx-28g.c
index 4f036c77284e..e2187767ce00 100644
--- a/drivers/phy/freescale/phy-fsl-lynx-28g.c
+++ b/drivers/phy/freescale/phy-fsl-lynx-28g.c
@@ -127,6 +127,10 @@ struct lynx_28g_lane {
struct lynx_28g_priv {
void __iomem *base;
struct device *dev;
+ /* Serialize concurrent access to registers shared between lanes,
+ * like PCCn
+ */
+ spinlock_t pcc_lock;
struct lynx_28g_pll pll[LYNX_28G_NUM_PLL];
struct lynx_28g_lane lane[LYNX_28G_NUM_LANE];
@@ -397,6 +401,8 @@ static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode)
if (powered_up)
lynx_28g_power_off(phy);
+ spin_lock(&priv->pcc_lock);
+
switch (submode) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
@@ -413,6 +419,8 @@ static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode)
lane->interface = submode;
out:
+ spin_unlock(&priv->pcc_lock);
+
/* Power up the lane if necessary */
if (powered_up)
lynx_28g_power_on(phy);
@@ -508,11 +516,12 @@ static void lynx_28g_cdr_lock_check(struct work_struct *work)
for (i = 0; i < LYNX_28G_NUM_LANE; i++) {
lane = &priv->lane[i];
- if (!lane->init)
- continue;
+ mutex_lock(&lane->phy->mutex);
- if (!lane->powered_up)
+ if (!lane->init || !lane->powered_up) {
+ mutex_unlock(&lane->phy->mutex);
continue;
+ }
rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
if (!(rrstctl & LYNX_28G_LNaRRSTCTL_CDR_LOCK)) {
@@ -521,6 +530,8 @@ static void lynx_28g_cdr_lock_check(struct work_struct *work)
rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
} while (!(rrstctl & LYNX_28G_LNaRRSTCTL_RST_DONE));
}
+
+ mutex_unlock(&lane->phy->mutex);
}
queue_delayed_work(system_power_efficient_wq, &priv->cdr_check,
msecs_to_jiffies(1000));
@@ -593,6 +604,7 @@ static int lynx_28g_probe(struct platform_device *pdev)
dev_set_drvdata(dev, priv);
+ spin_lock_init(&priv->pcc_lock);
INIT_DELAYED_WORK(&priv->cdr_check, lynx_28g_cdr_lock_check);
queue_delayed_work(system_power_efficient_wq, &priv->cdr_check,
@@ -604,6 +616,14 @@ static int lynx_28g_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(provider);
}
+static void lynx_28g_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct lynx_28g_priv *priv = dev_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&priv->cdr_check);
+}
+
static const struct of_device_id lynx_28g_of_match_table[] = {
{ .compatible = "fsl,lynx-28g" },
{ },
@@ -612,6 +632,7 @@ MODULE_DEVICE_TABLE(of, lynx_28g_of_match_table);
static struct platform_driver lynx_28g_driver = {
.probe = lynx_28g_probe,
+ .remove_new = lynx_28g_remove,
.driver = {
.name = "lynx-28g",
.of_match_table = lynx_28g_of_match_table,
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index 1d567604b650..376d023a0aa9 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -122,16 +122,10 @@ static int phy_mdm6600_power_on(struct phy *x)
{
struct phy_mdm6600 *ddata = phy_get_drvdata(x);
struct gpio_desc *enable_gpio = ddata->ctrl_gpios[PHY_MDM6600_ENABLE];
- int error;
if (!ddata->enabled)
return -ENODEV;
- error = pinctrl_pm_select_default_state(ddata->dev);
- if (error)
- dev_warn(ddata->dev, "%s: error with default_state: %i\n",
- __func__, error);
-
gpiod_set_value_cansleep(enable_gpio, 1);
/* Allow aggressive PM for USB, it's only needed for n_gsm port */
@@ -160,11 +154,6 @@ static int phy_mdm6600_power_off(struct phy *x)
gpiod_set_value_cansleep(enable_gpio, 0);
- error = pinctrl_pm_select_sleep_state(ddata->dev);
- if (error)
- dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
- __func__, error);
-
return 0;
}
@@ -456,6 +445,7 @@ static void phy_mdm6600_device_power_off(struct phy_mdm6600 *ddata)
{
struct gpio_desc *reset_gpio =
ddata->ctrl_gpios[PHY_MDM6600_RESET];
+ int error;
ddata->enabled = false;
phy_mdm6600_cmd(ddata, PHY_MDM6600_CMD_BP_SHUTDOWN_REQ);
@@ -471,6 +461,17 @@ static void phy_mdm6600_device_power_off(struct phy_mdm6600 *ddata)
} else {
dev_err(ddata->dev, "Timed out powering down\n");
}
+
+ /*
+ * Keep reset gpio high with padconf internal pull-up resistor to
+ * prevent modem from waking up during deeper SoC idle states. The
+ * gpio bank lines can have glitches if not in the always-on wkup
+ * domain.
+ */
+ error = pinctrl_pm_select_sleep_state(ddata->dev);
+ if (error)
+ dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
+ __func__, error);
}
static void phy_mdm6600_deferred_power_on(struct work_struct *work)
@@ -571,12 +572,6 @@ static int phy_mdm6600_probe(struct platform_device *pdev)
ddata->dev = &pdev->dev;
platform_set_drvdata(pdev, ddata);
- /* Active state selected in phy_mdm6600_power_on() */
- error = pinctrl_pm_select_sleep_state(ddata->dev);
- if (error)
- dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
- __func__, error);
-
error = phy_mdm6600_init_lines(ddata);
if (error)
return error;
@@ -627,10 +622,12 @@ idle:
pm_runtime_put_autosuspend(ddata->dev);
cleanup:
- if (error < 0)
+ if (error < 0) {
phy_mdm6600_device_power_off(ddata);
- pm_runtime_disable(ddata->dev);
- pm_runtime_dont_use_autosuspend(ddata->dev);
+ pm_runtime_disable(ddata->dev);
+ pm_runtime_dont_use_autosuspend(ddata->dev);
+ }
+
return error;
}
@@ -639,6 +636,7 @@ static void phy_mdm6600_remove(struct platform_device *pdev)
struct phy_mdm6600 *ddata = platform_get_drvdata(pdev);
struct gpio_desc *reset_gpio = ddata->ctrl_gpios[PHY_MDM6600_RESET];
+ pm_runtime_get_noresume(ddata->dev);
pm_runtime_dont_use_autosuspend(ddata->dev);
pm_runtime_put_sync(ddata->dev);
pm_runtime_disable(ddata->dev);
diff --git a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
index 8814f4322adf..3642a5d4f2f3 100644
--- a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
+++ b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
@@ -152,7 +152,7 @@ static int qcom_apq8064_sata_phy_init(struct phy *generic_phy)
return ret;
}
- /* SATA phy calibrated succesfully, power up to functional mode */
+ /* SATA phy calibrated successfully, power up to functional mode */
writel_relaxed(0x3E, base + SATA_PHY_POW_DWN_CTRL1);
writel_relaxed(0x01, base + SATA_PHY_RX_IMCAL0);
writel_relaxed(0x01, base + SATA_PHY_TX_IMCAL0);
diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
index ed08072ca032..5cb7e79b99b3 100644
--- a/drivers/phy/qualcomm/phy-qcom-m31.c
+++ b/drivers/phy/qualcomm/phy-qcom-m31.c
@@ -82,7 +82,7 @@ struct m31_priv_data {
unsigned int nregs;
};
-struct m31_phy_regs m31_ipq5332_regs[] = {
+static struct m31_phy_regs m31_ipq5332_regs[] = {
{
USB_PHY_CFG0,
UTMI_PHY_OVERRIDE_EN,
@@ -172,8 +172,7 @@ static int m31usb_phy_init(struct phy *phy)
ret = clk_prepare_enable(qphy->clk);
if (ret) {
- if (qphy->vreg)
- regulator_disable(qphy->vreg);
+ regulator_disable(qphy->vreg);
dev_err(&phy->dev, "failed to enable cfg ahb clock, %d\n", ret);
return ret;
}
@@ -256,7 +255,7 @@ static int m31usb_phy_probe(struct platform_device *pdev)
qphy->vreg = devm_regulator_get(dev, "vdda-phy");
if (IS_ERR(qphy->vreg))
- return dev_err_probe(dev, PTR_ERR(qphy->phy),
+ return dev_err_probe(dev, PTR_ERR(qphy->vreg),
"failed to get vreg\n");
phy_set_drvdata(qphy->phy, qphy);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index cbb28afce135..5e6fc8103e9d 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -859,10 +859,10 @@ static const struct qmp_phy_init_tbl sm8550_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_PCS_TX_RX_CONFIG, 0x0c),
QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_EQ_CONFIG1, 0x4b),
QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_EQ_CONFIG5, 0x10),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1, 0x68),
};
static const struct qmp_phy_init_tbl sm8550_usb3_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1, 0x68),
QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
@@ -2555,6 +2555,7 @@ static int qmp_combo_usb_power_on(struct phy *phy)
void __iomem *tx2 = qmp->tx2;
void __iomem *rx2 = qmp->rx2;
void __iomem *pcs = qmp->pcs;
+ void __iomem *pcs_usb = qmp->pcs_usb;
void __iomem *status;
unsigned int val;
int ret;
@@ -2576,6 +2577,9 @@ static int qmp_combo_usb_power_on(struct phy *phy)
qmp_combo_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ if (pcs_usb)
+ qmp_combo_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num);
+
if (cfg->has_pwrdn_delay)
usleep_range(10, 20);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h
index 9510e63ba9d8..c38530d6776b 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h
@@ -12,7 +12,7 @@
#define QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG3 0xcc
#define QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG6 0xd8
#define QPHY_USB_V6_PCS_REFGEN_REQ_CONFIG1 0xdc
-#define QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1 0x90
+#define QPHY_USB_V6_PCS_POWER_STATE_CONFIG1 0x90
#define QPHY_USB_V6_PCS_RX_SIGDET_LVL 0x188
#define QPHY_USB_V6_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
#define QPHY_USB_V6_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
@@ -23,6 +23,7 @@
#define QPHY_USB_V6_PCS_EQ_CONFIG1 0x1dc
#define QPHY_USB_V6_PCS_EQ_CONFIG5 0x1ec
+#define QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1 0x00
#define QPHY_USB_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0x18
#define QPHY_USB_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x3c
#define QPHY_USB_V6_PCS_USB3_RCVR_DTCT_DLY_U3_L 0x40
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 0130bb8e809a..c69577601ae0 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -1112,8 +1112,6 @@ static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0xaa),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_CDR_RESET_TIME, 0x0a),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG1, 0x88),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG2, 0x13),
@@ -1122,6 +1120,11 @@ static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x21),
};
+static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+};
+
static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG1, 0xc4),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG2, 0x89),
@@ -1131,9 +1134,6 @@ static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0xaa),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_POWER_STATE_CONFIG1, 0x6f),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_CDR_RESET_TIME, 0x0a),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG1, 0x88),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG2, 0x13),
@@ -1142,6 +1142,12 @@ static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x21),
};
+static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_POWER_STATE_CONFIG1, 0x6f),
+};
+
struct qmp_usb_offsets {
u16 serdes;
u16 pcs;
@@ -1383,6 +1389,8 @@ static const struct qmp_phy_cfg sa8775p_usb3_uniphy_cfg = {
.rx_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_rx_tbl),
.pcs_tbl = sa8775p_usb3_uniphy_pcs_tbl,
.pcs_tbl_num = ARRAY_SIZE(sa8775p_usb3_uniphy_pcs_tbl),
+ .pcs_usb_tbl = sa8775p_usb3_uniphy_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sa8775p_usb3_uniphy_pcs_usb_tbl),
.clk_list = qmp_v4_phy_clk_l,
.num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
.reset_list = qcm2290_usb3phy_reset_l,
@@ -1405,6 +1413,8 @@ static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = {
.rx_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_rx_tbl),
.pcs_tbl = sc8280xp_usb3_uniphy_pcs_tbl,
.pcs_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_pcs_tbl),
+ .pcs_usb_tbl = sc8280xp_usb3_uniphy_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_pcs_usb_tbl),
.clk_list = qmp_v4_phy_clk_l,
.num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
.reset_list = qcm2290_usb3phy_reset_l,
@@ -1703,6 +1713,7 @@ static int qmp_usb_power_on(struct phy *phy)
void __iomem *tx = qmp->tx;
void __iomem *rx = qmp->rx;
void __iomem *pcs = qmp->pcs;
+ void __iomem *pcs_usb = qmp->pcs_usb;
void __iomem *status;
unsigned int val;
int ret;
@@ -1726,6 +1737,9 @@ static int qmp_usb_power_on(struct phy *phy)
qmp_usb_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ if (pcs_usb)
+ qmp_usb_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num);
+
if (cfg->has_pwrdn_delay)
usleep_range(10, 20);
diff --git a/drivers/phy/realtek/Kconfig b/drivers/phy/realtek/Kconfig
index 650e20ed69af..75ac7e7c31ae 100644
--- a/drivers/phy/realtek/Kconfig
+++ b/drivers/phy/realtek/Kconfig
@@ -2,6 +2,9 @@
#
# Phy drivers for Realtek platforms
#
+
+if ARCH_REALTEK || COMPILE_TEST
+
config PHY_RTK_RTD_USB2PHY
tristate "Realtek RTD USB2 PHY Transceiver Driver"
depends on USB_SUPPORT
@@ -25,3 +28,5 @@ config PHY_RTK_RTD_USB3PHY
The DHC (digital home center) RTD series SoCs used the Synopsys
DWC3 USB IP. This driver will do the PHY initialization
of the parameters.
+
+endif # ARCH_REALTEK || COMPILE_TEST
diff --git a/drivers/phy/realtek/phy-rtk-usb2.c b/drivers/phy/realtek/phy-rtk-usb2.c
index 5e7ee060b404..aedc78bd37f7 100644
--- a/drivers/phy/realtek/phy-rtk-usb2.c
+++ b/drivers/phy/realtek/phy-rtk-usb2.c
@@ -853,17 +853,11 @@ static inline void create_debug_files(struct rtk_phy *rtk_phy)
rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev),
phy_debug_root);
- if (!rtk_phy->debug_dir)
- return;
- if (!debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
- &rtk_usb2_parameter_fops))
- goto file_error;
+ debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
+ &rtk_usb2_parameter_fops);
return;
-
-file_error:
- debugfs_remove_recursive(rtk_phy->debug_dir);
}
static inline void remove_debug_files(struct rtk_phy *rtk_phy)
diff --git a/drivers/phy/realtek/phy-rtk-usb3.c b/drivers/phy/realtek/phy-rtk-usb3.c
index 7881f908aade..dfb3122f3f11 100644
--- a/drivers/phy/realtek/phy-rtk-usb3.c
+++ b/drivers/phy/realtek/phy-rtk-usb3.c
@@ -416,17 +416,11 @@ static inline void create_debug_files(struct rtk_phy *rtk_phy)
return;
rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev), phy_debug_root);
- if (!rtk_phy->debug_dir)
- return;
- if (!debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
- &rtk_usb3_parameter_fops))
- goto file_error;
+ debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
+ &rtk_usb3_parameter_fops);
return;
-
-file_error:
- debugfs_remove_recursive(rtk_phy->debug_dir);
}
static inline void remove_debug_files(struct rtk_phy *rtk_phy)
diff --git a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
index 2d1c1652cfd9..8a9961ac8712 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
@@ -1062,13 +1062,13 @@ static int wpcm450_gpio_register(struct platform_device *pdev,
if (ret < 0)
return ret;
- gpio = &pctrl->gpio_bank[reg];
- gpio->pctrl = pctrl;
-
if (reg >= WPCM450_NUM_BANKS)
return dev_err_probe(dev, -EINVAL,
"GPIO index %d out of range!\n", reg);
+ gpio = &pctrl->gpio_bank[reg];
+ gpio->pctrl = pctrl;
+
bank = &wpcm450_banks[reg];
gpio->bank = bank;
diff --git a/drivers/pinctrl/pinctrl-lantiq.h b/drivers/pinctrl/pinctrl-lantiq.h
index efb25fc34f14..1ac49ae638de 100644
--- a/drivers/pinctrl/pinctrl-lantiq.h
+++ b/drivers/pinctrl/pinctrl-lantiq.h
@@ -198,5 +198,4 @@ enum ltq_pin {
extern int ltq_pinctrl_register(struct platform_device *pdev,
struct ltq_pinmux_info *info);
-extern int ltq_pinctrl_unregister(struct platform_device *pdev);
#endif /* __PINCTRL_LANTIQ_H */
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index e5a418026ba3..0b2839d27fd6 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -32,7 +32,8 @@ struct lpi_pinctrl {
char __iomem *tlmm_base;
char __iomem *slew_base;
struct clk_bulk_data clks[MAX_LPI_NUM_CLKS];
- struct mutex slew_access_lock;
+ /* Protects from concurrent register updates */
+ struct mutex lock;
DECLARE_BITMAP(ever_gpio, MAX_NR_GPIO);
const struct lpi_pinctrl_variant_data *data;
};
@@ -103,6 +104,7 @@ static int lpi_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
if (WARN_ON(i == g->nfuncs))
return -EINVAL;
+ mutex_lock(&pctrl->lock);
val = lpi_gpio_read(pctrl, pin, LPI_GPIO_CFG_REG);
/*
@@ -128,6 +130,7 @@ static int lpi_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
u32p_replace_bits(&val, i, LPI_GPIO_FUNCTION_MASK);
lpi_gpio_write(pctrl, pin, LPI_GPIO_CFG_REG, val);
+ mutex_unlock(&pctrl->lock);
return 0;
}
@@ -233,14 +236,14 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
if (slew_offset == LPI_NO_SLEW)
break;
- mutex_lock(&pctrl->slew_access_lock);
+ mutex_lock(&pctrl->lock);
sval = ioread32(pctrl->slew_base + LPI_SLEW_RATE_CTL_REG);
sval &= ~(LPI_SLEW_RATE_MASK << slew_offset);
sval |= arg << slew_offset;
iowrite32(sval, pctrl->slew_base + LPI_SLEW_RATE_CTL_REG);
- mutex_unlock(&pctrl->slew_access_lock);
+ mutex_unlock(&pctrl->lock);
break;
default:
return -EINVAL;
@@ -256,6 +259,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
lpi_gpio_write(pctrl, group, LPI_GPIO_VALUE_REG, val);
}
+ mutex_lock(&pctrl->lock);
val = lpi_gpio_read(pctrl, group, LPI_GPIO_CFG_REG);
u32p_replace_bits(&val, pullup, LPI_GPIO_PULL_MASK);
@@ -264,6 +268,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
u32p_replace_bits(&val, output_enabled, LPI_GPIO_OE_MASK);
lpi_gpio_write(pctrl, group, LPI_GPIO_CFG_REG, val);
+ mutex_unlock(&pctrl->lock);
return 0;
}
@@ -461,7 +466,7 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
pctrl->chip.label = dev_name(dev);
pctrl->chip.can_sleep = false;
- mutex_init(&pctrl->slew_access_lock);
+ mutex_init(&pctrl->lock);
pctrl->ctrl = devm_pinctrl_register(dev, &pctrl->desc, pctrl);
if (IS_ERR(pctrl->ctrl)) {
@@ -483,7 +488,7 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
return 0;
err_pinctrl:
- mutex_destroy(&pctrl->slew_access_lock);
+ mutex_destroy(&pctrl->lock);
clk_bulk_disable_unprepare(MAX_LPI_NUM_CLKS, pctrl->clks);
return ret;
@@ -495,7 +500,7 @@ int lpi_pinctrl_remove(struct platform_device *pdev)
struct lpi_pinctrl *pctrl = platform_get_drvdata(pdev);
int i;
- mutex_destroy(&pctrl->slew_access_lock);
+ mutex_destroy(&pctrl->lock);
clk_bulk_disable_unprepare(MAX_LPI_NUM_CLKS, pctrl->clks);
for (i = 0; i < pctrl->data->npins; i++)
diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
index 77730dc548ed..c8d519ca53eb 100644
--- a/drivers/pinctrl/renesas/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -235,6 +235,7 @@ config PINCTRL_RZN1
depends on OF
depends on ARCH_RZN1 || COMPILE_TEST
select GENERIC_PINCONF
+ select PINMUX
help
This selects pinctrl driver for Renesas RZ/N1 devices.
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c
index 4bfe3aa57f8a..cf42e204cbf0 100644
--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c
@@ -31,6 +31,8 @@
#define JH7110_AON_NGPIO 4
#define JH7110_AON_GC_BASE 64
+#define JH7110_AON_REGS_NUM 37
+
/* registers */
#define JH7110_AON_DOEN 0x0
#define JH7110_AON_DOUT 0x4
@@ -145,6 +147,7 @@ static const struct jh7110_pinctrl_soc_info jh7110_aon_pinctrl_info = {
.gpi_mask = GENMASK(3, 0),
.gpioin_reg_base = JH7110_AON_GPIOIN,
.irq_reg = &jh7110_aon_irq_reg,
+ .nsaved_regs = JH7110_AON_REGS_NUM,
.jh7110_set_one_pin_mux = jh7110_aon_set_one_pin_mux,
.jh7110_get_padcfg_base = jh7110_aon_get_padcfg_base,
.jh7110_gpio_irq_handler = jh7110_aon_irq_handler,
@@ -165,6 +168,7 @@ static struct platform_driver jh7110_aon_pinctrl_driver = {
.driver = {
.name = "starfive-jh7110-aon-pinctrl",
.of_match_table = jh7110_aon_pinctrl_of_match,
+ .pm = pm_sleep_ptr(&jh7110_pinctrl_pm_ops),
},
};
module_platform_driver(jh7110_aon_pinctrl_driver);
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c
index 20c85db1cd3a..03c2ad808d61 100644
--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c
@@ -31,6 +31,8 @@
#define JH7110_SYS_NGPIO 64
#define JH7110_SYS_GC_BASE 0
+#define JH7110_SYS_REGS_NUM 174
+
/* registers */
#define JH7110_SYS_DOEN 0x000
#define JH7110_SYS_DOUT 0x040
@@ -417,6 +419,7 @@ static const struct jh7110_pinctrl_soc_info jh7110_sys_pinctrl_info = {
.gpi_mask = GENMASK(6, 0),
.gpioin_reg_base = JH7110_SYS_GPIOIN,
.irq_reg = &jh7110_sys_irq_reg,
+ .nsaved_regs = JH7110_SYS_REGS_NUM,
.jh7110_set_one_pin_mux = jh7110_sys_set_one_pin_mux,
.jh7110_get_padcfg_base = jh7110_sys_get_padcfg_base,
.jh7110_gpio_irq_handler = jh7110_sys_irq_handler,
@@ -437,6 +440,7 @@ static struct platform_driver jh7110_sys_pinctrl_driver = {
.driver = {
.name = "starfive-jh7110-sys-pinctrl",
.of_match_table = jh7110_sys_pinctrl_of_match,
+ .pm = pm_sleep_ptr(&jh7110_pinctrl_pm_ops),
},
};
module_platform_driver(jh7110_sys_pinctrl_driver);
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
index b9081805c8f6..640f827a9b2c 100644
--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
@@ -872,6 +872,13 @@ int jh7110_pinctrl_probe(struct platform_device *pdev)
if (!sfp)
return -ENOMEM;
+#if IS_ENABLED(CONFIG_PM_SLEEP)
+ sfp->saved_regs = devm_kcalloc(dev, info->nsaved_regs,
+ sizeof(*sfp->saved_regs), GFP_KERNEL);
+ if (!sfp->saved_regs)
+ return -ENOMEM;
+#endif
+
sfp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sfp->base))
return PTR_ERR(sfp->base);
@@ -967,14 +974,45 @@ int jh7110_pinctrl_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "could not register gpiochip\n");
- irq_domain_set_pm_device(sfp->gc.irq.domain, dev);
-
dev_info(dev, "StarFive GPIO chip registered %d GPIOs\n", sfp->gc.ngpio);
return pinctrl_enable(sfp->pctl);
}
EXPORT_SYMBOL_GPL(jh7110_pinctrl_probe);
+static int jh7110_pinctrl_suspend(struct device *dev)
+{
+ struct jh7110_pinctrl *sfp = dev_get_drvdata(dev);
+ unsigned long flags;
+ unsigned int i;
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ for (i = 0 ; i < sfp->info->nsaved_regs ; i++)
+ sfp->saved_regs[i] = readl_relaxed(sfp->base + 4 * i);
+
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+ return 0;
+}
+
+static int jh7110_pinctrl_resume(struct device *dev)
+{
+ struct jh7110_pinctrl *sfp = dev_get_drvdata(dev);
+ unsigned long flags;
+ unsigned int i;
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ for (i = 0 ; i < sfp->info->nsaved_regs ; i++)
+ writel_relaxed(sfp->saved_regs[i], sfp->base + 4 * i);
+
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+ return 0;
+}
+
+const struct dev_pm_ops jh7110_pinctrl_pm_ops = {
+ LATE_SYSTEM_SLEEP_PM_OPS(jh7110_pinctrl_suspend, jh7110_pinctrl_resume)
+};
+EXPORT_SYMBOL_GPL(jh7110_pinctrl_pm_ops);
+
MODULE_DESCRIPTION("Pinctrl driver for the StarFive JH7110 SoC");
MODULE_AUTHOR("Emil Renner Berthing <[email protected]>");
MODULE_AUTHOR("Jianlong Huang <[email protected]>");
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h
index 3f20b7ff96dd..a33d0d4e1382 100644
--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h
@@ -21,6 +21,7 @@ struct jh7110_pinctrl {
/* register read/write mutex */
struct mutex mutex;
const struct jh7110_pinctrl_soc_info *info;
+ u32 *saved_regs;
};
struct jh7110_gpio_irq_reg {
@@ -50,6 +51,8 @@ struct jh7110_pinctrl_soc_info {
const struct jh7110_gpio_irq_reg *irq_reg;
+ unsigned int nsaved_regs;
+
/* generic pinmux */
int (*jh7110_set_one_pin_mux)(struct jh7110_pinctrl *sfp,
unsigned int pin,
@@ -66,5 +69,6 @@ void jh7110_set_gpiomux(struct jh7110_pinctrl *sfp, unsigned int pin,
unsigned int din, u32 dout, u32 doen);
int jh7110_pinctrl_probe(struct platform_device *pdev);
struct jh7110_pinctrl *jh7110_from_irq_desc(struct irq_desc *desc);
+extern const struct dev_pm_ops jh7110_pinctrl_pm_ops;
#endif /* __PINCTRL_STARFIVE_JH7110_H__ */
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index cfeda5b3e048..734c71ef005b 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -96,7 +96,6 @@ static const struct cfg_param {
{"nvidia,slew-rate-falling", TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING},
{"nvidia,slew-rate-rising", TEGRA_PINCONF_PARAM_SLEW_RATE_RISING},
{"nvidia,drive-type", TEGRA_PINCONF_PARAM_DRIVE_TYPE},
- {"nvidia,function", TEGRA_PINCONF_PARAM_FUNCTION},
};
static int tegra_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
@@ -471,12 +470,6 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx,
*bit = g->drvtype_bit;
*width = 2;
break;
- case TEGRA_PINCONF_PARAM_FUNCTION:
- *bank = g->mux_bank;
- *reg = g->mux_reg;
- *bit = g->mux_bit;
- *width = 2;
- break;
default:
dev_err(pmx->dev, "Invalid config param %04x\n", param);
return -ENOTSUPP;
@@ -640,16 +633,8 @@ static void tegra_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
val >>= bit;
val &= (1 << width) - 1;
- if (cfg_params[i].param == TEGRA_PINCONF_PARAM_FUNCTION) {
- u8 idx = pmx->soc->groups[group].funcs[val];
-
- seq_printf(s, "\n\t%s=%s",
- strip_prefix(cfg_params[i].property),
- pmx->functions[idx].name);
- } else {
- seq_printf(s, "\n\t%s=%u",
- strip_prefix(cfg_params[i].property), val);
- }
+ seq_printf(s, "\n\t%s=%u",
+ strip_prefix(cfg_params[i].property), val);
}
}
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h
index e728efeaa4de..b3289bdf727d 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.h
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.h
@@ -54,8 +54,6 @@ enum tegra_pinconf_param {
TEGRA_PINCONF_PARAM_SLEW_RATE_RISING,
/* argument: Integer, range is HW-dependant */
TEGRA_PINCONF_PARAM_DRIVE_TYPE,
- /* argument: pinmux settings */
- TEGRA_PINCONF_PARAM_FUNCTION,
};
enum tegra_pinconf_pull {
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index fd38d8c8371e..ab7d7a1235b8 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -609,24 +609,25 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
if (vring->cur_len + sizeof(u64) <= len) {
/* The whole word. */
- if (!IS_VRING_DROP(vring)) {
- if (is_rx)
+ if (is_rx) {
+ if (!IS_VRING_DROP(vring))
memcpy(addr + vring->cur_len, &data,
sizeof(u64));
- else
- memcpy(&data, addr + vring->cur_len,
- sizeof(u64));
+ } else {
+ memcpy(&data, addr + vring->cur_len,
+ sizeof(u64));
}
vring->cur_len += sizeof(u64);
} else {
/* Leftover bytes. */
- if (!IS_VRING_DROP(vring)) {
- if (is_rx)
+ if (is_rx) {
+ if (!IS_VRING_DROP(vring))
memcpy(addr + vring->cur_len, &data,
len - vring->cur_len);
- else
- memcpy(&data, addr + vring->cur_len,
- len - vring->cur_len);
+ } else {
+ data = 0;
+ memcpy(&data, addr + vring->cur_len,
+ len - vring->cur_len);
}
vring->cur_len = len;
}
diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c
index f433a13c3689..a5a3941b3f43 100644
--- a/drivers/platform/surface/surface_platform_profile.c
+++ b/drivers/platform/surface/surface_platform_profile.c
@@ -159,8 +159,7 @@ static int surface_platform_profile_probe(struct ssam_device *sdev)
set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, tpd->handler.choices);
set_bit(PLATFORM_PROFILE_PERFORMANCE, tpd->handler.choices);
- platform_profile_register(&tpd->handler);
- return 0;
+ return platform_profile_register(&tpd->handler);
}
static void surface_platform_profile_remove(struct ssam_device *sdev)
diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
index ad702463a65d..6bbffb081053 100644
--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
@@ -111,6 +111,79 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "21A1"),
}
},
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=218024 */
+ {
+ .ident = "V14 G4 AMN",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82YT"),
+ }
+ },
+ {
+ .ident = "V14 G4 AMN",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83GE"),
+ }
+ },
+ {
+ .ident = "V15 G4 AMN",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82YU"),
+ }
+ },
+ {
+ .ident = "V15 G4 AMN",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83CQ"),
+ }
+ },
+ {
+ .ident = "IdeaPad 1 14AMN7",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82VF"),
+ }
+ },
+ {
+ .ident = "IdeaPad 1 15AMN7",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82VG"),
+ }
+ },
+ {
+ .ident = "IdeaPad 1 15AMN7",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82X5"),
+ }
+ },
+ {
+ .ident = "IdeaPad Slim 3 14AMN8",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82XN"),
+ }
+ },
+ {
+ .ident = "IdeaPad Slim 3 15AMN8",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"),
+ }
+ },
/* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */
{
.ident = "HP Laptop 15s-eq2xxx",
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index cadbb557a108..1417e230edbd 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -105,6 +105,8 @@ struct apple_gmux_config {
#define GMUX_BRIGHTNESS_MASK 0x00ffffff
#define GMUX_MAX_BRIGHTNESS GMUX_BRIGHTNESS_MASK
+# define MMIO_GMUX_MAX_BRIGHTNESS 0xffff
+
static u8 gmux_pio_read8(struct apple_gmux_data *gmux_data, int port)
{
return inb(gmux_data->iostart + port);
@@ -857,7 +859,17 @@ get_version:
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_PLATFORM;
- props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS);
+
+ /*
+ * All MMIO gmux's have 0xffff as max brightness, but some iMacs incorrectly
+ * report 0x03ff, despite the firmware being happy to set 0xffff as the brightness
+ * at boot. Force 0xffff for all MMIO gmux's so they all have the correct brightness
+ * range.
+ */
+ if (type == APPLE_GMUX_TYPE_MMIO)
+ props.max_brightness = MMIO_GMUX_MAX_BRIGHTNESS;
+ else
+ props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS);
#if IS_REACHABLE(CONFIG_ACPI_VIDEO)
register_bdev = acpi_video_get_backlight_type() == acpi_backlight_apple_gmux;
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index d85d895fee89..df1db54d4e18 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -531,6 +531,9 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, ASUS_WMI_BRN_DOWN, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, ASUS_WMI_BRN_UP, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0x2a, { KEY_SELECTIVE_SCREENSHOT } },
+ { KE_IGNORE, 0x2b, }, /* PrintScreen (also send via PS/2) on newer models */
+ { KE_IGNORE, 0x2c, }, /* CapsLock (also send via PS/2) on newer models */
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
{ KE_KEY, 0x32, { KEY_MUTE } },
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 9f8cea5f9615..19bfd30861aa 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -3826,7 +3826,6 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
{
unsigned int key_value = 1;
bool autorelease = 1;
- int orig_code = code;
if (asus->driver->key_filter) {
asus->driver->key_filter(asus->driver, &code, &key_value,
@@ -3835,16 +3834,10 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
return;
}
- if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
- code = ASUS_WMI_BRN_UP;
- else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
- code = ASUS_WMI_BRN_DOWN;
-
- if (code == ASUS_WMI_BRN_DOWN || code == ASUS_WMI_BRN_UP) {
- if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
- asus_wmi_backlight_notify(asus, orig_code);
- return;
- }
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor &&
+ code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNDOWN_MAX) {
+ asus_wmi_backlight_notify(asus, code);
+ return;
}
if (code == NOTIFY_KBD_BRTUP) {
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index a478ebfd34df..fc41d1b1bb7f 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -18,7 +18,7 @@
#include <linux/i8042.h>
#define ASUS_WMI_KEY_IGNORE (-1)
-#define ASUS_WMI_BRN_DOWN 0x20
+#define ASUS_WMI_BRN_DOWN 0x2e
#define ASUS_WMI_BRN_UP 0x2f
struct module;
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
index 1152deaa0078..33ab207493e3 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
@@ -176,7 +176,7 @@ show_uncore_data(initial_max_freq_khz);
static int create_attr_group(struct uncore_data *data, char *name)
{
- int ret, index = 0;
+ int ret, freq, index = 0;
init_attribute_rw(max_freq_khz);
init_attribute_rw(min_freq_khz);
@@ -197,7 +197,11 @@ static int create_attr_group(struct uncore_data *data, char *name)
data->uncore_attrs[index++] = &data->min_freq_khz_dev_attr.attr;
data->uncore_attrs[index++] = &data->initial_min_freq_khz_dev_attr.attr;
data->uncore_attrs[index++] = &data->initial_max_freq_khz_dev_attr.attr;
- data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr;
+
+ ret = uncore_read_freq(data, &freq);
+ if (!ret)
+ data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr;
+
data->uncore_attrs[index] = NULL;
data->uncore_attr_group.name = name;
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 3d96dbf79a72..a2ffe4157df1 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -6514,6 +6514,7 @@ static int mlxplat_i2c_main_init(struct mlxplat_priv *priv)
return 0;
fail_mlxplat_i2c_mux_topology_init:
+ platform_device_unregister(priv->pdev_i2c);
fail_platform_i2c_register:
fail_mlxplat_mlxcpld_verify_bus_topology:
return err;
@@ -6521,6 +6522,7 @@ fail_mlxplat_mlxcpld_verify_bus_topology:
static void mlxplat_i2c_main_exit(struct mlxplat_priv *priv)
{
+ mlxplat_pre_exit(priv);
mlxplat_i2c_mux_topology_exit(priv);
if (priv->pdev_i2c)
platform_device_unregister(priv->pdev_i2c);
@@ -6597,7 +6599,7 @@ static int mlxplat_probe(struct platform_device *pdev)
fail_register_reboot_notifier:
fail_regcache_sync:
- mlxplat_pre_exit(priv);
+ mlxplat_i2c_main_exit(priv);
fail_mlxplat_i2c_main_init:
fail_regmap_write:
fail_alloc:
@@ -6614,7 +6616,6 @@ static int mlxplat_remove(struct platform_device *pdev)
pm_power_off = NULL;
if (mlxplat_reboot_nb)
unregister_reboot_notifier(mlxplat_reboot_nb);
- mlxplat_pre_exit(priv);
mlxplat_i2c_main_exit(priv);
mlxplat_post_exit();
return 0;
diff --git a/drivers/platform/x86/msi-ec.c b/drivers/platform/x86/msi-ec.c
index f26a3121092f..492eb383ee7a 100644
--- a/drivers/platform/x86/msi-ec.c
+++ b/drivers/platform/x86/msi-ec.c
@@ -276,14 +276,13 @@ static struct msi_ec_conf CONF2 __initdata = {
static const char * const ALLOWED_FW_3[] __initconst = {
"1592EMS1.111",
- "E1592IMS.10C",
NULL
};
static struct msi_ec_conf CONF3 __initdata = {
.allowed_fw = ALLOWED_FW_3,
.charge_control = {
- .address = 0xef,
+ .address = 0xd7,
.offset_start = 0x8a,
.offset_end = 0x80,
.range_min = 0x8a,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 9569f11dec8c..40878e327afd 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4092,7 +4092,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
if (ret > 0) {
struct inode *inode = file_inode(file);
- inode->i_atime = current_time(inode);
+ inode_set_atime_to_ts(inode, current_time(inode));
}
return ret;
diff --git a/drivers/pmdomain/imx/scu-pd.c b/drivers/pmdomain/imx/scu-pd.c
index 2f693b67ddb4..891c1d925a9d 100644
--- a/drivers/pmdomain/imx/scu-pd.c
+++ b/drivers/pmdomain/imx/scu-pd.c
@@ -150,7 +150,8 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "mclk-out-1", IMX_SC_R_MCLK_OUT_1, 1, false, 0 },
{ "dma0-ch", IMX_SC_R_DMA_0_CH0, 32, true, 0 },
{ "dma1-ch", IMX_SC_R_DMA_1_CH0, 16, true, 0 },
- { "dma2-ch", IMX_SC_R_DMA_2_CH0, 32, true, 0 },
+ { "dma2-ch-0", IMX_SC_R_DMA_2_CH0, 5, true, 0 },
+ { "dma2-ch-1", IMX_SC_R_DMA_2_CH5, 27, true, 0 },
{ "dma3-ch", IMX_SC_R_DMA_3_CH0, 32, true, 0 },
{ "asrc0", IMX_SC_R_ASRC_0, 1, false, 0 },
{ "asrc1", IMX_SC_R_ASRC_1, 1, false, 0 },
diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c
index de77df97b3a4..ec163d1bcd18 100644
--- a/drivers/power/supply/qcom_battmgr.c
+++ b/drivers/power/supply/qcom_battmgr.c
@@ -105,7 +105,7 @@ struct qcom_battmgr_property_request {
struct qcom_battmgr_update_request {
struct pmic_glink_hdr hdr;
- u32 battery_id;
+ __le32 battery_id;
};
struct qcom_battmgr_charge_time_request {
@@ -1282,9 +1282,9 @@ static void qcom_battmgr_enable_worker(struct work_struct *work)
{
struct qcom_battmgr *battmgr = container_of(work, struct qcom_battmgr, enable_work);
struct qcom_battmgr_enable_request req = {
- .hdr.owner = PMIC_GLINK_OWNER_BATTMGR,
- .hdr.type = PMIC_GLINK_NOTIFY,
- .hdr.opcode = BATTMGR_REQUEST_NOTIFICATION,
+ .hdr.owner = cpu_to_le32(PMIC_GLINK_OWNER_BATTMGR),
+ .hdr.type = cpu_to_le32(PMIC_GLINK_NOTIFY),
+ .hdr.opcode = cpu_to_le32(BATTMGR_REQUEST_NOTIFICATION),
};
int ret;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 215597f73be4..d440319a7945 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -412,7 +412,8 @@ dasd_state_ready_to_online(struct dasd_device * device)
KOBJ_CHANGE);
return 0;
}
- disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE);
+ disk_uevent(device->block->bdev_handle->bdev->bd_disk,
+ KOBJ_CHANGE);
}
return 0;
}
@@ -432,7 +433,8 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
device->state = DASD_STATE_READY;
if (device->block && !(device->features & DASD_FEATURE_USERAW))
- disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE);
+ disk_uevent(device->block->bdev_handle->bdev->bd_disk,
+ KOBJ_CHANGE);
return 0;
}
@@ -3590,7 +3592,7 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* in the other openers.
*/
if (device->block) {
- max_count = device->block->bdev ? 0 : -1;
+ max_count = device->block->bdev_handle ? 0 : -1;
open_count = atomic_read(&device->block->open_count);
if (open_count > max_count) {
if (open_count > 0)
@@ -3636,8 +3638,8 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* so sync bdev first and then wait for our queues to become
* empty
*/
- if (device->block)
- bdev_mark_dead(device->block->bdev, false);
+ if (device->block && device->block->bdev_handle)
+ bdev_mark_dead(device->block->bdev_handle->bdev, false);
dasd_schedule_device_bh(device);
rc = wait_event_interruptible(shutdown_waitq,
_wait_for_empty_queues(device));
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index fe5108a1b332..55e3abe94cde 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -127,15 +127,15 @@ void dasd_gendisk_free(struct dasd_block *block)
*/
int dasd_scan_partitions(struct dasd_block *block)
{
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
int rc;
- bdev = blkdev_get_by_dev(disk_devt(block->gdp), BLK_OPEN_READ, NULL,
- NULL);
- if (IS_ERR(bdev)) {
+ bdev_handle = bdev_open_by_dev(disk_devt(block->gdp), BLK_OPEN_READ,
+ NULL, NULL);
+ if (IS_ERR(bdev_handle)) {
DBF_DEV_EVENT(DBF_ERR, block->base,
"scan partitions error, blkdev_get returned %ld",
- PTR_ERR(bdev));
+ PTR_ERR(bdev_handle));
return -ENODEV;
}
@@ -147,16 +147,15 @@ int dasd_scan_partitions(struct dasd_block *block)
"scan partitions error, rc %d", rc);
/*
- * Since the matching blkdev_put call to the blkdev_get in
- * this function is not called before dasd_destroy_partitions
- * the offline open_count limit needs to be increased from
- * 0 to 1. This is done by setting device->bdev (see
- * dasd_generic_set_offline). As long as the partition
- * detection is running no offline should be allowed. That
- * is why the assignment to device->bdev is done AFTER
- * the BLKRRPART ioctl.
+ * Since the matching bdev_release() call to the
+ * bdev_open_by_path() in this function is not called before
+ * dasd_destroy_partitions the offline open_count limit needs to be
+ * increased from 0 to 1. This is done by setting device->bdev_handle
+ * (see dasd_generic_set_offline). As long as the partition detection
+ * is running no offline should be allowed. That is why the assignment
+ * to block->bdev_handle is done AFTER the BLKRRPART ioctl.
*/
- block->bdev = bdev;
+ block->bdev_handle = bdev_handle;
return 0;
}
@@ -166,21 +165,21 @@ int dasd_scan_partitions(struct dasd_block *block)
*/
void dasd_destroy_partitions(struct dasd_block *block)
{
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
/*
- * Get the bdev pointer from the device structure and clear
- * device->bdev to lower the offline open_count limit again.
+ * Get the bdev_handle pointer from the device structure and clear
+ * device->bdev_handle to lower the offline open_count limit again.
*/
- bdev = block->bdev;
- block->bdev = NULL;
+ bdev_handle = block->bdev_handle;
+ block->bdev_handle = NULL;
- mutex_lock(&bdev->bd_disk->open_mutex);
- bdev_disk_changed(bdev->bd_disk, true);
- mutex_unlock(&bdev->bd_disk->open_mutex);
+ mutex_lock(&bdev_handle->bdev->bd_disk->open_mutex);
+ bdev_disk_changed(bdev_handle->bdev->bd_disk, true);
+ mutex_unlock(&bdev_handle->bdev->bd_disk->open_mutex);
/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
- blkdev_put(bdev, NULL);
+ bdev_release(bdev_handle);
}
int dasd_gendisk_init(void)
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 8a4dbe9d7741..2e663131adaf 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -650,7 +650,7 @@ struct dasd_block {
struct gendisk *gdp;
spinlock_t request_queue_lock;
struct blk_mq_tag_set tag_set;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
atomic_t open_count;
unsigned long blocks; /* size of volume in blocks */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index d55862605b82..61b9675e2a67 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -537,7 +537,7 @@ static int __dasd_ioctl_information(struct dasd_block *block,
* This must be hidden from user-space.
*/
dasd_info->open_count = atomic_read(&block->open_count);
- if (!block->bdev)
+ if (!block->bdev_handle)
dasd_info->open_count++;
/*
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 3ef636935a54..3ff46fc694f8 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -233,17 +233,19 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
*/
ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
if (ret)
- goto err;
+ goto err_lock;
/*
* But we don't have such restrictions imposed on the stuff that
* is handled by the streaming API.
*/
ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
if (ret)
- goto err;
+ goto err_lock;
return sch;
+err_lock:
+ kfree(sch->lock);
err:
kfree(sch);
return ERR_PTR(ret);
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 74760c1a163b..4902d45e929c 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -102,7 +102,7 @@ config CCWGROUP
config ISM
tristate "Support for ISM vPCI Adapter"
- depends on PCI && SMC
+ depends on PCI
default n
help
Select this option if you want to use the Internal Shared Memory
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index c3c1f466fe01..605013d3ee83 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -12913,8 +12913,10 @@ _mpt3sas_init(void)
mpt3sas_ctl_init(hbas_to_enumerate);
error = pci_register_driver(&mpt3sas_driver);
- if (error)
+ if (error) {
+ mpt3sas_ctl_exit(hbas_to_enumerate);
scsih_exit();
+ }
return error;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 50db08265c51..dcae09a37d49 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4953,7 +4953,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->gid_list = NULL;
ha->gid_list_dma = 0;
- if (!list_empty(&ha->base_qpair->dsd_list)) {
+ if (ha->base_qpair && !list_empty(&ha->base_qpair->dsd_list)) {
struct dsd_dma *dsd_ptr, *tdsd_ptr;
/* clean up allocated prev pool */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 83b6a3f3863b..6effa13039f3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -209,7 +209,8 @@ manage_start_stop_show(struct device *dev,
return sysfs_emit(buf, "%u\n",
sdp->manage_system_start_stop &&
- sdp->manage_runtime_start_stop);
+ sdp->manage_runtime_start_stop &&
+ sdp->manage_shutdown);
}
static DEVICE_ATTR_RO(manage_start_stop);
@@ -275,6 +276,35 @@ manage_runtime_start_stop_store(struct device *dev,
}
static DEVICE_ATTR_RW(manage_runtime_start_stop);
+static ssize_t manage_shutdown_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ return sysfs_emit(buf, "%u\n", sdp->manage_shutdown);
+}
+
+static ssize_t manage_shutdown_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+ bool v;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdp->manage_shutdown = v;
+
+ return count;
+}
+static DEVICE_ATTR_RW(manage_shutdown);
+
static ssize_t
allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -607,6 +637,7 @@ static struct attribute *sd_disk_attrs[] = {
&dev_attr_manage_start_stop.attr,
&dev_attr_manage_system_start_stop.attr,
&dev_attr_manage_runtime_start_stop.attr,
+ &dev_attr_manage_shutdown.attr,
&dev_attr_protection_type.attr,
&dev_attr_protection_mode.attr,
&dev_attr_app_tag_own.attr,
@@ -3819,8 +3850,10 @@ static void sd_shutdown(struct device *dev)
sd_sync_cache(sdkp, NULL);
}
- if (system_state != SYSTEM_RESTART &&
- sdkp->device->manage_system_start_stop) {
+ if ((system_state != SYSTEM_RESTART &&
+ sdkp->device->manage_system_start_stop) ||
+ (system_state == SYSTEM_POWER_OFF &&
+ sdkp->device->manage_shutdown)) {
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
sd_start_stop_device(sdkp, 0);
}
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 5a75ab64d1ed..acc812e490d0 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -333,12 +333,15 @@ if RISCV
config ARCH_R9A07G043
bool "RISC-V Platform support for RZ/Five"
+ depends on NONPORTABLE
+ depends on RISCV_ALTERNATIVE
+ depends on !RISCV_ISA_ZICBOM
+ depends on RISCV_SBI
select ARCH_RZG2L
- select AX45MP_L2_CACHE if RISCV_DMA_NONCOHERENT
+ select AX45MP_L2_CACHE
select DMA_GLOBAL_POOL
- select ERRATA_ANDES if RISCV_SBI
- select ERRATA_ANDES_CMO if ERRATA_ANDES
-
+ select ERRATA_ANDES
+ select ERRATA_ANDES_CMO
help
This enables support for the Renesas RZ/Five SoC.
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index c3d3ab3262d3..657f5888a77b 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -15,6 +15,10 @@ ifdef CONFIG_DEBUG_FS
soundwire-bus-y += debugfs.o
endif
+ifdef CONFIG_IRQ_DOMAIN
+soundwire-bus-y += irq.o
+endif
+
#AMD driver
soundwire-amd-y := amd_manager.o
obj-$(CONFIG_SOUNDWIRE_AMD) += soundwire-amd.o
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 1720031f35a3..0e7bc3c40f9d 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -3,13 +3,13 @@
#include <linux/acpi.h>
#include <linux/delay.h>
-#include <linux/irq.h>
#include <linux/mod_devicetable.h>
#include <linux/pm_runtime.h>
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_type.h>
#include "bus.h"
+#include "irq.h"
#include "sysfs_local.h"
static DEFINE_IDA(sdw_bus_ida);
@@ -25,23 +25,6 @@ static int sdw_get_id(struct sdw_bus *bus)
return 0;
}
-static int sdw_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- struct sdw_bus *bus = h->host_data;
-
- irq_set_chip_data(virq, bus);
- irq_set_chip(virq, &bus->irq_chip);
- irq_set_nested_thread(virq, 1);
- irq_set_noprobe(virq);
-
- return 0;
-}
-
-static const struct irq_domain_ops sdw_domain_ops = {
- .map = sdw_irq_map,
-};
-
/**
* sdw_bus_master_add() - add a bus Master instance
* @bus: bus instance
@@ -168,13 +151,9 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
bus->params.curr_bank = SDW_BANK0;
bus->params.next_bank = SDW_BANK1;
- bus->irq_chip.name = dev_name(bus->dev);
- bus->domain = irq_domain_create_linear(fwnode, SDW_MAX_DEVICES,
- &sdw_domain_ops, bus);
- if (!bus->domain) {
- dev_err(bus->dev, "Failed to add IRQ domain\n");
- return -EINVAL;
- }
+ ret = sdw_irq_create(bus, fwnode);
+ if (ret)
+ return ret;
return 0;
}
@@ -213,7 +192,7 @@ void sdw_bus_master_delete(struct sdw_bus *bus)
{
device_for_each_child(bus->dev, NULL, sdw_delete_slave);
- irq_domain_remove(bus->domain);
+ sdw_irq_delete(bus);
sdw_master_device_del(bus);
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index fafbc284e82d..9fa93bb923d7 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -7,6 +7,7 @@
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_type.h>
#include "bus.h"
+#include "irq.h"
#include "sysfs_local.h"
/**
@@ -122,11 +123,8 @@ static int sdw_drv_probe(struct device *dev)
if (drv->ops && drv->ops->read_prop)
drv->ops->read_prop(slave);
- if (slave->prop.use_domain_irq) {
- slave->irq = irq_create_mapping(slave->bus->domain, slave->dev_num);
- if (!slave->irq)
- dev_warn(dev, "Failed to map IRQ\n");
- }
+ if (slave->prop.use_domain_irq)
+ sdw_irq_create_mapping(slave);
/* init the sysfs as we have properties now */
ret = sdw_slave_sysfs_init(slave);
@@ -176,8 +174,7 @@ static int sdw_drv_remove(struct device *dev)
slave->probed = false;
if (slave->prop.use_domain_irq)
- irq_dispose_mapping(irq_find_mapping(slave->bus->domain,
- slave->dev_num));
+ sdw_irq_dispose_mapping(slave);
mutex_unlock(&slave->sdw_dev_lock);
diff --git a/drivers/soundwire/irq.c b/drivers/soundwire/irq.c
new file mode 100644
index 000000000000..0c08cebb1235
--- /dev/null
+++ b/drivers/soundwire/irq.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2023 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <linux/device.h>
+#include <linux/fwnode.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/soundwire/sdw.h>
+#include "irq.h"
+
+static int sdw_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct sdw_bus *bus = h->host_data;
+
+ irq_set_chip_data(virq, bus);
+ irq_set_chip(virq, &bus->irq_chip);
+ irq_set_nested_thread(virq, 1);
+ irq_set_noprobe(virq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops sdw_domain_ops = {
+ .map = sdw_irq_map,
+};
+
+int sdw_irq_create(struct sdw_bus *bus,
+ struct fwnode_handle *fwnode)
+{
+ bus->irq_chip.name = dev_name(bus->dev);
+
+ bus->domain = irq_domain_create_linear(fwnode, SDW_MAX_DEVICES,
+ &sdw_domain_ops, bus);
+ if (!bus->domain) {
+ dev_err(bus->dev, "Failed to add IRQ domain\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void sdw_irq_delete(struct sdw_bus *bus)
+{
+ irq_domain_remove(bus->domain);
+}
+
+void sdw_irq_create_mapping(struct sdw_slave *slave)
+{
+ slave->irq = irq_create_mapping(slave->bus->domain, slave->dev_num);
+ if (!slave->irq)
+ dev_warn(&slave->dev, "Failed to map IRQ\n");
+}
+
+void sdw_irq_dispose_mapping(struct sdw_slave *slave)
+{
+ irq_dispose_mapping(irq_find_mapping(slave->bus->domain, slave->dev_num));
+}
diff --git a/drivers/soundwire/irq.h b/drivers/soundwire/irq.h
new file mode 100644
index 000000000000..58a58046d92b
--- /dev/null
+++ b/drivers/soundwire/irq.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef __SDW_IRQ_H
+#define __SDW_IRQ_H
+
+#include <linux/soundwire/sdw.h>
+#include <linux/fwnode.h>
+
+#if IS_ENABLED(CONFIG_IRQ_DOMAIN)
+
+int sdw_irq_create(struct sdw_bus *bus,
+ struct fwnode_handle *fwnode);
+void sdw_irq_delete(struct sdw_bus *bus);
+void sdw_irq_create_mapping(struct sdw_slave *slave);
+void sdw_irq_dispose_mapping(struct sdw_slave *slave);
+
+#else /* CONFIG_IRQ_DOMAIN */
+
+static inline int sdw_irq_create(struct sdw_bus *bus,
+ struct fwnode_handle *fwnode)
+{
+ return 0;
+}
+
+static inline void sdw_irq_delete(struct sdw_bus *bus)
+{
+}
+
+static inline void sdw_irq_create_mapping(struct sdw_slave *slave)
+{
+}
+
+static inline void sdw_irq_dispose_mapping(struct sdw_slave *slave)
+{
+}
+
+#endif /* CONFIG_IRQ_DOMAIN */
+
+#endif /* __SDW_IRQ_H */
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index 0ca21ff0e9cc..e42248519688 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -353,8 +353,9 @@ static int npcm_fiu_uma_read(struct spi_mem *mem,
uma_cfg |= ilog2(op->cmd.buswidth);
uma_cfg |= ilog2(op->addr.buswidth)
<< NPCM_FIU_UMA_CFG_ADBPCK_SHIFT;
- uma_cfg |= ilog2(op->dummy.buswidth)
- << NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
+ if (op->dummy.nbytes)
+ uma_cfg |= ilog2(op->dummy.buswidth)
+ << NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
uma_cfg |= ilog2(op->data.buswidth)
<< NPCM_FIU_UMA_CFG_RDBPCK_SHIFT;
uma_cfg |= op->dummy.nbytes << NPCM_FIU_UMA_CFG_DBSIZ_SHIFT;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index a6a06a5f7483..8eb9eb7ce5df 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -91,7 +91,8 @@ static int iblock_configure_device(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct request_queue *q;
- struct block_device *bd = NULL;
+ struct bdev_handle *bdev_handle;
+ struct block_device *bd;
struct blk_integrity *bi;
blk_mode_t mode = BLK_OPEN_READ;
unsigned int max_write_zeroes_sectors;
@@ -116,12 +117,14 @@ static int iblock_configure_device(struct se_device *dev)
else
dev->dev_flags |= DF_READ_ONLY;
- bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev, NULL);
- if (IS_ERR(bd)) {
- ret = PTR_ERR(bd);
+ bdev_handle = bdev_open_by_path(ib_dev->ibd_udev_path, mode, ib_dev,
+ NULL);
+ if (IS_ERR(bdev_handle)) {
+ ret = PTR_ERR(bdev_handle);
goto out_free_bioset;
}
- ib_dev->ibd_bd = bd;
+ ib_dev->ibd_bdev_handle = bdev_handle;
+ ib_dev->ibd_bd = bd = bdev_handle->bdev;
q = bdev_get_queue(bd);
@@ -177,7 +180,7 @@ static int iblock_configure_device(struct se_device *dev)
return 0;
out_blkdev_put:
- blkdev_put(ib_dev->ibd_bd, ib_dev);
+ bdev_release(ib_dev->ibd_bdev_handle);
out_free_bioset:
bioset_exit(&ib_dev->ibd_bio_set);
out:
@@ -202,8 +205,8 @@ static void iblock_destroy_device(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
- if (ib_dev->ibd_bd != NULL)
- blkdev_put(ib_dev->ibd_bd, ib_dev);
+ if (ib_dev->ibd_bdev_handle)
+ bdev_release(ib_dev->ibd_bdev_handle);
bioset_exit(&ib_dev->ibd_bio_set);
}
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 8c55375d2f75..683f9a55945b 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -32,6 +32,7 @@ struct iblock_dev {
u32 ibd_flags;
struct bio_set ibd_bio_set;
struct block_device *ibd_bd;
+ struct bdev_handle *ibd_bdev_handle;
bool ibd_readonly;
struct iblock_dev_plug *ibd_plug;
} ____cacheline_aligned;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 0d4f09693ef4..41b7489d37ce 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -352,7 +352,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct Scsi_Host *sh = sd->host;
- struct block_device *bd;
+ struct bdev_handle *bdev_handle;
int ret;
if (scsi_device_get(sd)) {
@@ -366,18 +366,18 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
* Claim exclusive struct block_device access to struct scsi_device
* for TYPE_DISK and TYPE_ZBC using supplied udev_path
*/
- bd = blkdev_get_by_path(dev->udev_path, BLK_OPEN_WRITE | BLK_OPEN_READ,
- pdv, NULL);
- if (IS_ERR(bd)) {
- pr_err("pSCSI: blkdev_get_by_path() failed\n");
+ bdev_handle = bdev_open_by_path(dev->udev_path,
+ BLK_OPEN_WRITE | BLK_OPEN_READ, pdv, NULL);
+ if (IS_ERR(bdev_handle)) {
+ pr_err("pSCSI: bdev_open_by_path() failed\n");
scsi_device_put(sd);
- return PTR_ERR(bd);
+ return PTR_ERR(bdev_handle);
}
- pdv->pdv_bd = bd;
+ pdv->pdv_bdev_handle = bdev_handle;
ret = pscsi_add_device_to_list(dev, sd);
if (ret) {
- blkdev_put(pdv->pdv_bd, pdv);
+ bdev_release(bdev_handle);
scsi_device_put(sd);
return ret;
}
@@ -564,9 +564,9 @@ static void pscsi_destroy_device(struct se_device *dev)
* from pscsi_create_type_disk()
*/
if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) &&
- pdv->pdv_bd) {
- blkdev_put(pdv->pdv_bd, pdv);
- pdv->pdv_bd = NULL;
+ pdv->pdv_bdev_handle) {
+ bdev_release(pdv->pdv_bdev_handle);
+ pdv->pdv_bdev_handle = NULL;
}
/*
* For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
@@ -994,8 +994,8 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
- if (pdv->pdv_bd)
- return bdev_nr_sectors(pdv->pdv_bd);
+ if (pdv->pdv_bdev_handle)
+ return bdev_nr_sectors(pdv->pdv_bdev_handle->bdev);
return 0;
}
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index 23d9a6e340d4..b0a3ef136592 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -37,7 +37,7 @@ struct pscsi_dev_virt {
int pdv_channel_id;
int pdv_target_id;
int pdv_lun_id;
- struct block_device *pdv_bd;
+ struct bdev_handle *pdv_bdev_handle;
struct scsi_device *pdv_sd;
struct Scsi_Host *pdv_lld_host;
} ____cacheline_aligned;
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
index 372d64756ed6..3c15f6a9e91c 100644
--- a/drivers/tee/amdtee/core.c
+++ b/drivers/tee/amdtee/core.c
@@ -217,12 +217,12 @@ unlock:
return rc;
}
+/* mutex must be held by caller */
static void destroy_session(struct kref *ref)
{
struct amdtee_session *sess = container_of(ref, struct amdtee_session,
refcount);
- mutex_lock(&session_list_mutex);
list_del(&sess->list_node);
mutex_unlock(&session_list_mutex);
kfree(sess);
@@ -272,7 +272,8 @@ int amdtee_open_session(struct tee_context *ctx,
if (arg->ret != TEEC_SUCCESS) {
pr_err("open_session failed %d\n", arg->ret);
handle_unload_ta(ta_handle);
- kref_put(&sess->refcount, destroy_session);
+ kref_put_mutex(&sess->refcount, destroy_session,
+ &session_list_mutex);
goto out;
}
@@ -290,7 +291,8 @@ int amdtee_open_session(struct tee_context *ctx,
pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
handle_close_session(ta_handle, session_info);
handle_unload_ta(ta_handle);
- kref_put(&sess->refcount, destroy_session);
+ kref_put_mutex(&sess->refcount, destroy_session,
+ &session_list_mutex);
rc = -ENOMEM;
goto out;
}
@@ -331,7 +333,7 @@ int amdtee_close_session(struct tee_context *ctx, u32 session)
handle_close_session(ta_handle, session_info);
handle_unload_ta(ta_handle);
- kref_put(&sess->refcount, destroy_session);
+ kref_put_mutex(&sess->refcount, destroy_session, &session_list_mutex);
return 0;
}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index dbdcad8d73bf..d8b9c734abd3 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -41,6 +41,7 @@
#define PHY_PORT_CS1_LINK_STATE_SHIFT 26
#define ICM_TIMEOUT 5000 /* ms */
+#define ICM_RETRIES 3
#define ICM_APPROVE_TIMEOUT 10000 /* ms */
#define ICM_MAX_LINK 4
@@ -296,10 +297,9 @@ static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
static int icm_request(struct tb *tb, const void *request, size_t request_size,
void *response, size_t response_size, size_t npackets,
- unsigned int timeout_msec)
+ int retries, unsigned int timeout_msec)
{
struct icm *icm = tb_priv(tb);
- int retries = 3;
do {
struct tb_cfg_request *req;
@@ -410,7 +410,7 @@ static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
return -ENOMEM;
ret = icm_request(tb, &request, sizeof(request), switches,
- sizeof(*switches), npackets, ICM_TIMEOUT);
+ sizeof(*switches), npackets, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
goto err_free;
@@ -463,7 +463,7 @@ icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
- 1, ICM_TIMEOUT);
+ 1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@@ -488,7 +488,7 @@ static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
memset(&reply, 0, sizeof(reply));
/* Use larger timeout as establishing tunnels can take some time */
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
- 1, ICM_APPROVE_TIMEOUT);
+ 1, ICM_RETRIES, ICM_APPROVE_TIMEOUT);
if (ret)
return ret;
@@ -515,7 +515,7 @@ static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
- 1, ICM_TIMEOUT);
+ 1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@@ -543,7 +543,7 @@ static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
- 1, ICM_TIMEOUT);
+ 1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@@ -577,7 +577,7 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
- 1, ICM_TIMEOUT);
+ 1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@@ -1020,7 +1020,7 @@ icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
- 1, 20000);
+ 1, 10, 2000);
if (ret)
return ret;
@@ -1053,7 +1053,7 @@ static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
- 1, ICM_APPROVE_TIMEOUT);
+ 1, ICM_RETRIES, ICM_APPROVE_TIMEOUT);
if (ret)
return ret;
@@ -1081,7 +1081,7 @@ static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
- 1, ICM_TIMEOUT);
+ 1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@@ -1110,7 +1110,7 @@ static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
- 1, ICM_TIMEOUT);
+ 1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@@ -1144,7 +1144,7 @@ static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
- 1, ICM_TIMEOUT);
+ 1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@@ -1170,7 +1170,7 @@ static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
- 1, ICM_TIMEOUT);
+ 1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@@ -1496,7 +1496,7 @@ icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
- 1, ICM_TIMEOUT);
+ 1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@@ -1522,7 +1522,7 @@ static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
- 1, ICM_TIMEOUT);
+ 1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@@ -1543,7 +1543,7 @@ static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
- 1, ICM_TIMEOUT);
+ 1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@@ -1604,7 +1604,7 @@ static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
- 1, ICM_TIMEOUT);
+ 1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
@@ -1626,7 +1626,7 @@ icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
- 1, 20000);
+ 1, ICM_RETRIES, 20000);
if (ret)
return ret;
@@ -2298,7 +2298,7 @@ static int icm_usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
- 1, ICM_TIMEOUT);
+ 1, ICM_RETRIES, ICM_TIMEOUT);
if (ret)
return ret;
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 43171cc1cc2d..bd5815f8f23b 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -2725,6 +2725,13 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw)
!tb_port_is_width_supported(down, TB_LINK_WIDTH_DUAL))
return 0;
+ /*
+ * Both lanes need to be in CL0. Here we assume lane 0 already be in
+ * CL0 and check just for lane 1.
+ */
+ if (tb_wait_for_port(down->dual_link_port, false) <= 0)
+ return -ENOTCONN;
+
ret = tb_port_lane_bonding_enable(up);
if (ret) {
tb_port_warn(up, "failed to enable lane bonding\n");
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index dd0a1ef8cf12..27bd6ca6f99e 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -1907,14 +1907,14 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
in = &sw->ports[ev->port];
if (!tb_port_is_dpin(in)) {
tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
- goto unlock;
+ goto put_sw;
}
tb_port_dbg(in, "handling bandwidth allocation request\n");
if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
tb_port_warn(in, "bandwidth allocation mode not enabled\n");
- goto unlock;
+ goto put_sw;
}
ret = usb4_dp_port_requested_bandwidth(in);
@@ -1923,7 +1923,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
tb_port_dbg(in, "no bandwidth request active\n");
else
tb_port_warn(in, "failed to read requested bandwidth\n");
- goto unlock;
+ goto put_sw;
}
requested_bw = ret;
@@ -1932,7 +1932,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
if (!tunnel) {
tb_port_warn(in, "failed to find tunnel\n");
- goto unlock;
+ goto put_sw;
}
out = tunnel->dst_port;
@@ -1959,6 +1959,8 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
tb_recalc_estimated_bandwidth(tb);
}
+put_sw:
+ tb_switch_put(sw);
unlock:
mutex_unlock(&tb->lock);
diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c
index 747f88703d5c..11f2aec2a5d3 100644
--- a/drivers/thunderbolt/tmu.c
+++ b/drivers/thunderbolt/tmu.c
@@ -382,7 +382,7 @@ static int tmu_mode_init(struct tb_switch *sw)
} else if (ucap && tb_port_tmu_is_unidirectional(up)) {
if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
sw->tmu.mode = TB_SWITCH_TMU_MODE_LOWRES;
- else if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
+ else if (tmu_rates[TB_SWITCH_TMU_MODE_HIFI_UNI] == rate)
sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
} else if (rate) {
sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 5b5566862318..9803f0bbf20d 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -703,6 +703,27 @@ out_unlock:
mutex_unlock(&xdomain_lock);
}
+static void start_handshake(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_INIT;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+}
+
+/* Can be called from state_work */
+static void __stop_handshake(struct tb_xdomain *xd)
+{
+ cancel_delayed_work_sync(&xd->properties_changed_work);
+ xd->properties_changed_retries = 0;
+ xd->state_retries = 0;
+}
+
+static void stop_handshake(struct tb_xdomain *xd)
+{
+ cancel_delayed_work_sync(&xd->state_work);
+ __stop_handshake(xd);
+}
+
static void tb_xdp_handle_request(struct work_struct *work)
{
struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
@@ -765,6 +786,15 @@ static void tb_xdp_handle_request(struct work_struct *work)
case UUID_REQUEST:
tb_dbg(tb, "%llx: received XDomain UUID request\n", route);
ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
+ /*
+ * If we've stopped the discovery with an error such as
+ * timing out, we will restart the handshake now that we
+ * received UUID request from the remote host.
+ */
+ if (!ret && xd && xd->state == XDOMAIN_STATE_ERROR) {
+ dev_dbg(&xd->dev, "restarting handshake\n");
+ start_handshake(xd);
+ }
break;
case LINK_STATE_STATUS_REQUEST:
@@ -1521,6 +1551,13 @@ static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd)
msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
}
+static void tb_xdomain_failed(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_ERROR;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
static void tb_xdomain_state_work(struct work_struct *work)
{
struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work);
@@ -1547,7 +1584,7 @@ static void tb_xdomain_state_work(struct work_struct *work)
if (ret) {
if (ret == -EAGAIN)
goto retry_state;
- xd->state = XDOMAIN_STATE_ERROR;
+ tb_xdomain_failed(xd);
} else {
tb_xdomain_queue_properties_changed(xd);
if (xd->bonding_possible)
@@ -1612,7 +1649,7 @@ static void tb_xdomain_state_work(struct work_struct *work)
if (ret) {
if (ret == -EAGAIN)
goto retry_state;
- xd->state = XDOMAIN_STATE_ERROR;
+ tb_xdomain_failed(xd);
} else {
xd->state = XDOMAIN_STATE_ENUMERATED;
}
@@ -1623,6 +1660,8 @@ static void tb_xdomain_state_work(struct work_struct *work)
break;
case XDOMAIN_STATE_ERROR:
+ dev_dbg(&xd->dev, "discovery failed, stopping handshake\n");
+ __stop_handshake(xd);
break;
default:
@@ -1833,21 +1872,6 @@ static void tb_xdomain_release(struct device *dev)
kfree(xd);
}
-static void start_handshake(struct tb_xdomain *xd)
-{
- xd->state = XDOMAIN_STATE_INIT;
- queue_delayed_work(xd->tb->wq, &xd->state_work,
- msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
-}
-
-static void stop_handshake(struct tb_xdomain *xd)
-{
- cancel_delayed_work_sync(&xd->properties_changed_work);
- cancel_delayed_work_sync(&xd->state_work);
- xd->properties_changed_retries = 0;
- xd->state_retries = 0;
-}
-
static int __maybe_unused tb_xdomain_suspend(struct device *dev)
{
stop_handshake(tb_to_xdomain(dev));
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 26dd089d8e82..ca972fd37725 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1617,7 +1617,7 @@ static int omap8250_suspend(struct device *dev)
{
struct omap8250_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up = serial8250_get_port(priv->line);
- int err;
+ int err = 0;
serial8250_suspend_port(priv->line);
@@ -1627,7 +1627,8 @@ static int omap8250_suspend(struct device *dev)
if (!device_may_wakeup(dev))
priv->wer = 0;
serial_out(up, UART_OMAP_WER, priv->wer);
- err = pm_runtime_force_suspend(dev);
+ if (uart_console(&up->port) && console_suspend_enabled)
+ err = pm_runtime_force_suspend(dev);
flush_work(&priv->qos_work);
return err;
@@ -1636,11 +1637,15 @@ static int omap8250_suspend(struct device *dev)
static int omap8250_resume(struct device *dev)
{
struct omap8250_priv *priv = dev_get_drvdata(dev);
+ struct uart_8250_port *up = serial8250_get_port(priv->line);
int err;
- err = pm_runtime_force_resume(dev);
- if (err)
- return err;
+ if (uart_console(&up->port) && console_suspend_enabled) {
+ err = pm_runtime_force_resume(dev);
+ if (err)
+ return err;
+ }
+
serial8250_resume_port(priv->line);
/* Paired with pm_runtime_resume_and_get() in omap8250_suspend() */
pm_runtime_mark_last_busy(dev);
@@ -1717,16 +1722,6 @@ static int omap8250_runtime_suspend(struct device *dev)
if (priv->line >= 0)
up = serial8250_get_port(priv->line);
- /*
- * When using 'no_console_suspend', the console UART must not be
- * suspended. Since driver suspend is managed by runtime suspend,
- * preventing runtime suspend (by returning error) will keep device
- * active during suspend.
- */
- if (priv->is_suspending && !console_suspend_enabled) {
- if (up && uart_console(&up->port))
- return -EBUSY;
- }
if (priv->habit & UART_ERRATA_CLOCK_DISABLE) {
int ret;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 7bdc21d5e13b..d5ba6e90bd95 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -156,7 +156,7 @@ static void __uart_start(struct uart_state *state)
* enabled, serial_port_runtime_resume() calls start_tx() again
* after enabling the device.
*/
- if (pm_runtime_active(&port_dev->dev))
+ if (!pm_runtime_enabled(port->dev) || pm_runtime_active(port->dev))
port->ops->start_tx(port);
pm_runtime_mark_last_busy(&port_dev->dev);
pm_runtime_put_autosuspend(&port_dev->dev);
@@ -1404,12 +1404,18 @@ static void uart_set_rs485_termination(struct uart_port *port,
static int uart_rs485_config(struct uart_port *port)
{
struct serial_rs485 *rs485 = &port->rs485;
+ unsigned long flags;
int ret;
+ if (!(rs485->flags & SER_RS485_ENABLED))
+ return 0;
+
uart_sanitize_serial_rs485(port, rs485);
uart_set_rs485_termination(port, rs485);
+ spin_lock_irqsave(&port->lock, flags);
ret = port->rs485_config(port, NULL, rs485);
+ spin_unlock_irqrestore(&port->lock, flags);
if (ret)
memset(rs485, 0, sizeof(*rs485));
@@ -2474,11 +2480,10 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
if (ret == 0) {
if (tty)
uart_change_line_settings(tty, state, NULL);
+ uart_rs485_config(uport);
spin_lock_irq(&uport->lock);
if (!(uport->rs485.flags & SER_RS485_ENABLED))
ops->set_mctrl(uport, uport->mctrl);
- else
- uart_rs485_config(uport);
ops->start_tx(uport);
spin_unlock_irq(&uport->lock);
tty_port_set_initialized(port, true);
@@ -2587,10 +2592,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
port->mctrl &= TIOCM_DTR;
if (!(port->rs485.flags & SER_RS485_ENABLED))
port->ops->set_mctrl(port, port->mctrl);
- else
- uart_rs485_config(port);
spin_unlock_irqrestore(&port->lock, flags);
+ uart_rs485_config(port);
+
/*
* If this driver supports console, and it hasn't been
* successfully registered yet, try to re-register it.
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8a94e5a43c6d..d13d2f2e76c7 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -818,7 +818,7 @@ static void tty_update_time(struct tty_struct *tty, bool mtime)
spin_lock(&tty->files_lock);
list_for_each_entry(priv, &tty->tty_files, list) {
struct inode *inode = file_inode(priv->file);
- struct timespec64 *time = mtime ? &inode->i_mtime : &inode->i_atime;
+ struct timespec64 time = mtime ? inode_get_mtime(inode) : inode_get_atime(inode);
/*
* We only care if the two values differ in anything other than the
@@ -826,8 +826,12 @@ static void tty_update_time(struct tty_struct *tty, bool mtime)
* the time of the tty device, otherwise it could be construded as a
* security leak to let userspace know the exact timing of the tty.
*/
- if ((sec ^ time->tv_sec) & ~7)
- time->tv_sec = sec;
+ if ((sec ^ time.tv_sec) & ~7) {
+ if (mtime)
+ inode_set_mtime(inode, sec, 0);
+ else
+ inode_set_atime(inode, sec, 0);
+ }
}
spin_unlock(&tty->files_lock);
}
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index c2df07545f96..8382e8cfa414 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -6895,7 +6895,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
mask, 0, 1000, 1000);
dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
- tag, err ? "succeeded" : "failed");
+ tag, err < 0 ? "failed" : "succeeded");
out:
return err;
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index fff9ec9c391f..4b67749edb99 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -1125,6 +1125,9 @@ static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep,
unsigned long flags;
int ret;
+ if (request->status != -EINPROGRESS)
+ return 0;
+
if (!pep->endpoint.desc) {
dev_err(pdev->dev,
"%s: can't dequeue to disabled endpoint\n",
diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
index 4a4dbc2c1561..81a9c9d6be08 100644
--- a/drivers/usb/cdns3/core.h
+++ b/drivers/usb/cdns3/core.h
@@ -131,8 +131,7 @@ void cdns_set_active(struct cdns *cdns, u8 set_active);
#else /* CONFIG_PM_SLEEP */
static inline int cdns_resume(struct cdns *cdns)
{ return 0; }
-static inline int cdns_set_active(struct cdns *cdns, u8 set_active)
-{ return 0; }
+static inline void cdns_set_active(struct cdns *cdns, u8 set_active) { }
static inline int cdns_suspend(struct cdns *cdns)
{ return 0; }
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 4f68f6ef3cc1..3beb6a862e80 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -2642,21 +2642,24 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: CONTROL\n", __func__);
ret = proc_control(ps, p);
if (ret >= 0)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
break;
case USBDEVFS_BULK:
snoop(&dev->dev, "%s: BULK\n", __func__);
ret = proc_bulk(ps, p);
if (ret >= 0)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
break;
case USBDEVFS_RESETEP:
snoop(&dev->dev, "%s: RESETEP\n", __func__);
ret = proc_resetep(ps, p);
if (ret >= 0)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
break;
case USBDEVFS_RESET:
@@ -2668,7 +2671,8 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: CLEAR_HALT\n", __func__);
ret = proc_clearhalt(ps, p);
if (ret >= 0)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
break;
case USBDEVFS_GETDRIVER:
@@ -2695,7 +2699,8 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: SUBMITURB\n", __func__);
ret = proc_submiturb(ps, p);
if (ret >= 0)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
break;
#ifdef CONFIG_COMPAT
@@ -2703,14 +2708,16 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: CONTROL32\n", __func__);
ret = proc_control_compat(ps, p);
if (ret >= 0)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
break;
case USBDEVFS_BULK32:
snoop(&dev->dev, "%s: BULK32\n", __func__);
ret = proc_bulk_compat(ps, p);
if (ret >= 0)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
break;
case USBDEVFS_DISCSIGNAL32:
@@ -2722,7 +2729,8 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: SUBMITURB32\n", __func__);
ret = proc_submiturb_compat(ps, p);
if (ret >= 0)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
break;
case USBDEVFS_IOCTL32:
@@ -2804,7 +2812,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
done:
usb_unlock_device(dev);
if (ret >= 0)
- inode->i_atime = current_time(inode);
+ inode_set_atime_to_ts(inode, current_time(inode));
return ret;
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 3c54b218301c..0ff47eeffb49 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -151,6 +151,10 @@ int usb_device_supports_lpm(struct usb_device *udev)
if (udev->quirks & USB_QUIRK_NO_LPM)
return 0;
+ /* Skip if the device BOS descriptor couldn't be read */
+ if (!udev->bos)
+ return 0;
+
/* USB 2.1 (and greater) devices indicate LPM support through
* their USB 2.0 Extended Capabilities BOS descriptor.
*/
@@ -327,6 +331,10 @@ static void usb_set_lpm_parameters(struct usb_device *udev)
if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER)
return;
+ /* Skip if the device BOS descriptor couldn't be read */
+ if (!udev->bos)
+ return;
+
hub = usb_hub_to_struct_hub(udev->parent);
/* It doesn't take time to transition the roothub into U0, since it
* doesn't have an upstream link.
@@ -2704,13 +2712,17 @@ out_authorized:
static enum usb_ssp_rate get_port_ssp_rate(struct usb_device *hdev,
u32 ext_portstatus)
{
- struct usb_ssp_cap_descriptor *ssp_cap = hdev->bos->ssp_cap;
+ struct usb_ssp_cap_descriptor *ssp_cap;
u32 attr;
u8 speed_id;
u8 ssac;
u8 lanes;
int i;
+ if (!hdev->bos)
+ goto out;
+
+ ssp_cap = hdev->bos->ssp_cap;
if (!ssp_cap)
goto out;
@@ -4215,8 +4227,15 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
enum usb3_link_state state)
{
int timeout;
- __u8 u1_mel = udev->bos->ss_cap->bU1devExitLat;
- __le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat;
+ __u8 u1_mel;
+ __le16 u2_mel;
+
+ /* Skip if the device BOS descriptor couldn't be read */
+ if (!udev->bos)
+ return;
+
+ u1_mel = udev->bos->ss_cap->bU1devExitLat;
+ u2_mel = udev->bos->ss_cap->bU2DevExitLat;
/* If the device says it doesn't have *any* exit latency to come out of
* U1 or U2, it's probably lying. Assume it doesn't implement that link
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 37897afd1b64..d44dd7f6623e 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -153,7 +153,7 @@ static inline int hub_is_superspeedplus(struct usb_device *hdev)
{
return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS &&
le16_to_cpu(hdev->descriptor.bcdUSB) >= 0x0310 &&
- hdev->bos->ssp_cap);
+ hdev->bos && hdev->bos->ssp_cap);
}
static inline unsigned hub_power_on_good_delay(struct usb_hub *hub)
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 9c6bf054f15d..343d2570189f 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -279,9 +279,46 @@ int dwc3_core_soft_reset(struct dwc3 *dwc)
* XHCI driver will reset the host block. If dwc3 was configured for
* host-only mode or current role is host, then we can return early.
*/
- if (dwc->dr_mode == USB_DR_MODE_HOST || dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
+ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
return 0;
+ /*
+ * If the dr_mode is host and the dwc->current_dr_role is not the
+ * corresponding DWC3_GCTL_PRTCAP_HOST, then the dwc3_core_init_mode
+ * isn't executed yet. Ensure the phy is ready before the controller
+ * updates the GCTL.PRTCAPDIR or other settings by soft-resetting
+ * the phy.
+ *
+ * Note: GUSB3PIPECTL[n] and GUSB2PHYCFG[n] are port settings where n
+ * is port index. If this is a multiport host, then we need to reset
+ * all active ports.
+ */
+ if (dwc->dr_mode == USB_DR_MODE_HOST) {
+ u32 usb3_port;
+ u32 usb2_port;
+
+ usb3_port = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+ usb3_port |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
+
+ usb2_port = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ usb2_port |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
+
+ /* Small delay for phy reset assertion */
+ usleep_range(1000, 2000);
+
+ usb3_port &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
+
+ usb2_port &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
+
+ /* Wait for clock synchronization */
+ msleep(50);
+ return 0;
+ }
+
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg |= DWC3_DCTL_CSFTRST;
reg &= ~DWC3_DCTL_RUN_STOP;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 6e9ef35a43a7..ec26df0306f2 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1383,8 +1383,8 @@ ffs_sb_make_inode(struct super_block *sb, void *data,
inode->i_mode = perms->mode;
inode->i_uid = perms->uid;
inode->i_gid = perms->gid;
- inode->i_atime = ts;
- inode->i_mtime = ts;
+ inode_set_atime_to_ts(inode, ts);
+ inode_set_mtime_to_ts(inode, ts);
inode->i_private = data;
if (fops)
inode->i_fop = fops;
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index feccf4c8cc4f..e6ab8cc225ff 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1156,7 +1156,8 @@ static int ncm_unwrap_ntb(struct gether *port,
struct sk_buff_head *list)
{
struct f_ncm *ncm = func_to_ncm(&port->func);
- __le16 *tmp = (void *) skb->data;
+ unsigned char *ntb_ptr = skb->data;
+ __le16 *tmp;
unsigned index, index2;
int ndp_index;
unsigned dg_len, dg_len2;
@@ -1169,6 +1170,10 @@ static int ncm_unwrap_ntb(struct gether *port,
const struct ndp_parser_opts *opts = ncm->parser_opts;
unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
int dgram_counter;
+ int to_process = skb->len;
+
+parse_ntb:
+ tmp = (__le16 *)ntb_ptr;
/* dwSignature */
if (get_unaligned_le32(tmp) != opts->nth_sign) {
@@ -1215,7 +1220,7 @@ static int ncm_unwrap_ntb(struct gether *port,
* walk through NDP
* dwSignature
*/
- tmp = (void *)(skb->data + ndp_index);
+ tmp = (__le16 *)(ntb_ptr + ndp_index);
if (get_unaligned_le32(tmp) != ncm->ndp_sign) {
INFO(port->func.config->cdev, "Wrong NDP SIGN\n");
goto err;
@@ -1272,11 +1277,11 @@ static int ncm_unwrap_ntb(struct gether *port,
if (ncm->is_crc) {
uint32_t crc, crc2;
- crc = get_unaligned_le32(skb->data +
+ crc = get_unaligned_le32(ntb_ptr +
index + dg_len -
crc_len);
crc2 = ~crc32_le(~0,
- skb->data + index,
+ ntb_ptr + index,
dg_len - crc_len);
if (crc != crc2) {
INFO(port->func.config->cdev,
@@ -1303,7 +1308,7 @@ static int ncm_unwrap_ntb(struct gether *port,
dg_len - crc_len);
if (skb2 == NULL)
goto err;
- skb_put_data(skb2, skb->data + index,
+ skb_put_data(skb2, ntb_ptr + index,
dg_len - crc_len);
skb_queue_tail(list, skb2);
@@ -1316,10 +1321,17 @@ static int ncm_unwrap_ntb(struct gether *port,
} while (ndp_len > 2 * (opts->dgram_item_len * 2));
} while (ndp_index);
- dev_consume_skb_any(skb);
-
VDBG(port->func.config->cdev,
"Parsed NTB with %d frames\n", dgram_counter);
+
+ to_process -= block_len;
+ if (to_process != 0) {
+ ntb_ptr = (unsigned char *)(ntb_ptr + block_len);
+ goto parse_ntb;
+ }
+
+ dev_consume_skb_any(skb);
+
return 0;
err:
skb_queue_purge(list);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index ce9e31f3d26b..cdc0926100fd 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1969,7 +1969,7 @@ gadgetfs_make_inode (struct super_block *sb,
inode->i_mode = mode;
inode->i_uid = make_kuid(&init_user_ns, default_uid);
inode->i_gid = make_kgid(&init_user_ns, default_gid);
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_private = data;
inode->i_fop = fops;
}
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 56b8286a8009..74590f93ea61 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -497,11 +497,13 @@ static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req,
/* Get the Buffer address and copy the transmit data.*/
eprambase = (u32 __force *)(udc->addr + ep->rambase);
if (ep->is_in) {
- memcpy(eprambase, bufferptr, bytestosend);
+ memcpy_toio((void __iomem *)eprambase, bufferptr,
+ bytestosend);
udc->write_fn(udc->addr, ep->offset +
XUSB_EP_BUF0COUNT_OFFSET, bufferlen);
} else {
- memcpy(bufferptr, eprambase, bytestosend);
+ memcpy_toio((void __iomem *)bufferptr, eprambase,
+ bytestosend);
}
/*
* Enable the buffer for transmission.
@@ -515,11 +517,13 @@ static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req,
eprambase = (u32 __force *)(udc->addr + ep->rambase +
ep->ep_usb.maxpacket);
if (ep->is_in) {
- memcpy(eprambase, bufferptr, bytestosend);
+ memcpy_toio((void __iomem *)eprambase, bufferptr,
+ bytestosend);
udc->write_fn(udc->addr, ep->offset +
XUSB_EP_BUF1COUNT_OFFSET, bufferlen);
} else {
- memcpy(bufferptr, eprambase, bytestosend);
+ memcpy_toio((void __iomem *)bufferptr, eprambase,
+ bytestosend);
}
/*
* Enable the buffer for transmission.
@@ -1021,7 +1025,7 @@ static int __xudc_ep0_queue(struct xusb_ep *ep0, struct xusb_req *req)
udc->addr);
length = req->usb_req.actual = min_t(u32, length,
EP0_MAX_PACKET);
- memcpy(corebuf, req->usb_req.buf, length);
+ memcpy_toio((void __iomem *)corebuf, req->usb_req.buf, length);
udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, length);
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
} else {
@@ -1752,7 +1756,7 @@ static void xudc_handle_setup(struct xusb_udc *udc)
/* Load up the chapter 9 command buffer.*/
ep0rambase = (u32 __force *) (udc->addr + XUSB_SETUP_PKT_ADDR_OFFSET);
- memcpy(&setup, ep0rambase, 8);
+ memcpy_toio((void __iomem *)&setup, ep0rambase, 8);
udc->setup = setup;
udc->setup.wValue = cpu_to_le16((u16 __force)setup.wValue);
@@ -1839,7 +1843,7 @@ static void xudc_ep0_out(struct xusb_udc *udc)
(ep0->rambase << 2));
buffer = req->usb_req.buf + req->usb_req.actual;
req->usb_req.actual = req->usb_req.actual + bytes_to_rx;
- memcpy(buffer, ep0rambase, bytes_to_rx);
+ memcpy_toio((void __iomem *)buffer, ep0rambase, bytes_to_rx);
if (req->usb_req.length == req->usb_req.actual) {
/* Data transfer completed get ready for Status stage */
@@ -1915,7 +1919,7 @@ static void xudc_ep0_in(struct xusb_udc *udc)
(ep0->rambase << 2));
buffer = req->usb_req.buf + req->usb_req.actual;
req->usb_req.actual = req->usb_req.actual + length;
- memcpy(ep0rambase, buffer, length);
+ memcpy_toio((void __iomem *)ep0rambase, buffer, length);
}
udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, count);
udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0054d02239e2..0df5d807a77e 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1062,19 +1062,19 @@ static void xhci_get_usb3_port_status(struct xhci_port *port, u32 *status,
*status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
/* USB3 specific wPortStatus bits */
- if (portsc & PORT_POWER) {
+ if (portsc & PORT_POWER)
*status |= USB_SS_PORT_STAT_POWER;
- /* link state handling */
- if (link_state == XDEV_U0)
- bus_state->suspended_ports &= ~(1 << portnum);
- }
- /* remote wake resume signaling complete */
- if (bus_state->port_remote_wakeup & (1 << portnum) &&
+ /* no longer suspended or resuming */
+ if (link_state != XDEV_U3 &&
link_state != XDEV_RESUME &&
link_state != XDEV_RECOVERY) {
- bus_state->port_remote_wakeup &= ~(1 << portnum);
- usb_hcd_end_port_resume(&hcd->self, portnum);
+ /* remote wake resume signaling complete */
+ if (bus_state->port_remote_wakeup & (1 << portnum)) {
+ bus_state->port_remote_wakeup &= ~(1 << portnum);
+ usb_hcd_end_port_resume(&hcd->self, portnum);
+ }
+ bus_state->suspended_ports &= ~(1 << portnum);
}
xhci_hub_report_usb3_link_state(xhci, status, portsc);
@@ -1131,6 +1131,7 @@ static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status,
usb_hcd_end_port_resume(&port->rhub->hcd->self, portnum);
}
port->rexit_active = 0;
+ bus_state->suspended_ports &= ~(1 << portnum);
}
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 8714ab5bf04d..0a37f0d511cf 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2285,8 +2285,8 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
writel(erst_size, &ir->ir_set->erst_size);
erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
- erst_base &= ERST_PTR_MASK;
- erst_base |= (ir->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
+ erst_base &= ERST_BASE_RSVDP;
+ erst_base |= ir->erst.erst_dma_addr & ~ERST_BASE_RSVDP;
xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base);
/* Set the event ring dequeue address of this interrupter */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 1dde53f6eb31..3e5dc0723a8f 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -798,7 +798,7 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
struct xhci_ring *ring, struct xhci_td *td)
{
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
struct xhci_segment *seg = td->bounce_seg;
struct urb *urb = td->urb;
size_t len;
@@ -2996,7 +2996,8 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
*/
static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
struct xhci_interrupter *ir,
- union xhci_trb *event_ring_deq)
+ union xhci_trb *event_ring_deq,
+ bool clear_ehb)
{
u64 temp_64;
dma_addr_t deq;
@@ -3017,12 +3018,13 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
return;
/* Update HC event ring dequeue pointer */
- temp_64 &= ERST_PTR_MASK;
+ temp_64 &= ERST_DESI_MASK;
temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
}
/* Clear the event handler busy flag (RW1C) */
- temp_64 |= ERST_EHB;
+ if (clear_ehb)
+ temp_64 |= ERST_EHB;
xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue);
}
@@ -3103,7 +3105,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
while (xhci_handle_event(xhci, ir) > 0) {
if (event_loop++ < TRBS_PER_SEGMENT / 2)
continue;
- xhci_update_erst_dequeue(xhci, ir, event_ring_deq);
+ xhci_update_erst_dequeue(xhci, ir, event_ring_deq, false);
event_ring_deq = ir->event_ring->dequeue;
/* ring is half-full, force isoc trbs to interrupt more often */
@@ -3113,7 +3115,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
event_loop = 0;
}
- xhci_update_erst_dequeue(xhci, ir, event_ring_deq);
+ xhci_update_erst_dequeue(xhci, ir, event_ring_deq, true);
ret = IRQ_HANDLED;
out:
@@ -3469,7 +3471,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
u32 *trb_buff_len, struct xhci_segment *seg)
{
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
unsigned int unalign;
unsigned int max_pkt;
u32 new_buff_len;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7e282b4522c0..5df370482521 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -514,7 +514,7 @@ struct xhci_intr_reg {
#define ERST_SIZE_MASK (0xffff << 16)
/* erst_base bitmasks */
-#define ERST_BASE_RSVDP (0x3f)
+#define ERST_BASE_RSVDP (GENMASK_ULL(5, 0))
/* erst_dequeue bitmasks */
/* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index 3da1a4659c5f..57bbe1309094 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -434,6 +434,7 @@ static const struct usb_device_id onboard_hub_id_table[] = {
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0620) }, /* Genesys Logic GL3523 USB 3.1 */
+ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2412) }, /* USB2412 USB 2.0 */
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
index 4026ba64c592..2a4ab5ac0ebe 100644
--- a/drivers/usb/misc/onboard_usb_hub.h
+++ b/drivers/usb/misc/onboard_usb_hub.h
@@ -47,6 +47,7 @@ static const struct onboard_hub_pdata vialab_vl817_data = {
};
static const struct of_device_id onboard_hub_match[] = {
+ { .compatible = "usb424,2412", .data = &microchip_usb424_data, },
{ .compatible = "usb424,2514", .data = &microchip_usb424_data, },
{ .compatible = "usb424,2517", .data = &microchip_usb424_data, },
{ .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index 78c726a71b17..2d623284edf6 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -39,7 +39,7 @@ static const struct musb_register_map musb_regmap[] = {
{ "IntrUsbE", MUSB_INTRUSBE, 8 },
{ "DevCtl", MUSB_DEVCTL, 8 },
{ "VControl", 0x68, 32 },
- { "HWVers", 0x69, 16 },
+ { "HWVers", MUSB_HWVERS, 16 },
{ "LinkInfo", MUSB_LINKINFO, 8 },
{ "VPLen", MUSB_VPLEN, 8 },
{ "HS_EOF1", MUSB_HS_EOF1, 8 },
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index a02c29216955..bc4507781167 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -321,10 +321,16 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
musb_giveback(musb, urb, status);
qh->is_ready = ready;
+ /*
+ * musb->lock had been unlocked in musb_giveback, so qh may
+ * be freed, need to get it again
+ */
+ qh = musb_ep_get_qh(hw_ep, is_in);
+
/* reclaim resources (and bandwidth) ASAP; deschedule it, and
* invalidate qh as soon as list_empty(&hep->urb_list)
*/
- if (list_empty(&qh->hep->urb_list)) {
+ if (qh && list_empty(&qh->hep->urb_list)) {
struct list_head *head;
struct dma_controller *dma = musb->dma_controller;
@@ -2398,6 +2404,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
* and its URB list has emptied, recycle this qh.
*/
if (ready && list_empty(&qh->hep->urb_list)) {
+ musb_ep_set_qh(qh->hw_ep, is_in, NULL);
qh->hep->hcpriv = NULL;
list_del(&qh->ring);
kfree(qh);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7994a4549a6c..45dcfaadaf98 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -203,6 +203,9 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5829E_ESIM 0x81e4
#define DELL_PRODUCT_5829E 0x81e6
+#define DELL_PRODUCT_FM101R 0x8213
+#define DELL_PRODUCT_FM101R_ESIM 0x8215
+
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
#define KYOCERA_PRODUCT_KPC680 0x180a
@@ -1108,6 +1111,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | RSVD(6) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM),
.driver_info = RSVD(0) | RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(DELL_VENDOR_ID, DELL_PRODUCT_FM101R, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(DELL_VENDOR_ID, DELL_PRODUCT_FM101R_ESIM, 0xff) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -1290,6 +1295,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff), /* Telit LE910C1-EUX (ECM) */
.driver_info = NCTRL(0) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1035, 0xff) }, /* Telit LE910C4-WWX (ECM) */
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
.driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
@@ -2262,6 +2268,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
{ } /* Terminating entry */
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 426c88a516e5..59e0218a8bc5 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -304,6 +304,11 @@ static int dp_altmode_vdm(struct typec_altmode *alt,
typec_altmode_update_active(alt, false);
dp->data.status = 0;
dp->data.conf = 0;
+ if (dp->hpd) {
+ drm_connector_oob_hotplug_event(dp->connector_fwnode);
+ dp->hpd = false;
+ sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd");
+ }
break;
case DP_CMD_STATUS_UPDATE:
dp->data.status = *vdo;
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
index bb0b8479d80f..52c81378e36e 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
@@ -381,10 +381,6 @@ static int qcom_pmic_typec_pdphy_enable(struct pmic_typec_pdphy *pmic_typec_pdph
struct device *dev = pmic_typec_pdphy->dev;
int ret;
- ret = regulator_enable(pmic_typec_pdphy->vdd_pdphy);
- if (ret)
- return ret;
-
/* PD 2.0, DR=TYPEC_DEVICE, PR=TYPEC_SINK */
ret = regmap_update_bits(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
@@ -422,8 +418,6 @@ static int qcom_pmic_typec_pdphy_disable(struct pmic_typec_pdphy *pmic_typec_pdp
ret = regmap_write(pmic_typec_pdphy->regmap,
pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0);
- regulator_disable(pmic_typec_pdphy->vdd_pdphy);
-
return ret;
}
@@ -447,6 +441,10 @@ int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
int i;
int ret;
+ ret = regulator_enable(pmic_typec_pdphy->vdd_pdphy);
+ if (ret)
+ return ret;
+
pmic_typec_pdphy->tcpm_port = tcpm_port;
ret = pmic_typec_pdphy_reset(pmic_typec_pdphy);
@@ -467,6 +465,8 @@ void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy)
disable_irq(pmic_typec_pdphy->irq_data[i].irq);
qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
+
+ regulator_disable(pmic_typec_pdphy->vdd_pdphy);
}
struct pmic_typec_pdphy *qcom_pmic_typec_pdphy_alloc(struct device *dev)
diff --git a/drivers/usb/typec/ucsi/psy.c b/drivers/usb/typec/ucsi/psy.c
index 384b42267f1f..b35c6e07911e 100644
--- a/drivers/usb/typec/ucsi/psy.c
+++ b/drivers/usb/typec/ucsi/psy.c
@@ -37,6 +37,15 @@ static int ucsi_psy_get_scope(struct ucsi_connector *con,
struct device *dev = con->ucsi->dev;
device_property_read_u8(dev, "scope", &scope);
+ if (scope == POWER_SUPPLY_SCOPE_UNKNOWN) {
+ u32 mask = UCSI_CAP_ATTR_POWER_AC_SUPPLY |
+ UCSI_CAP_ATTR_BATTERY_CHARGING;
+
+ if (con->ucsi->cap.attributes & mask)
+ scope = POWER_SUPPLY_SCOPE_SYSTEM;
+ else
+ scope = POWER_SUPPLY_SCOPE_DEVICE;
+ }
val->intval = scope;
return 0;
}
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index c6dfe3dff346..61b64558f96c 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -787,6 +787,7 @@ static void ucsi_unregister_partner(struct ucsi_connector *con)
typec_set_mode(con->port, TYPEC_STATE_SAFE);
+ typec_partner_set_usb_power_delivery(con->partner, NULL);
ucsi_unregister_partner_pdos(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_SOP);
typec_unregister_partner(con->partner);
@@ -884,6 +885,7 @@ static void ucsi_handle_connector_change(struct work_struct *work)
if (ret < 0) {
dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
__func__, ret);
+ clear_bit(EVENT_PENDING, &con->ucsi->flags);
goto out_unlock;
}
diff --git a/drivers/vdpa/mlx5/net/debug.c b/drivers/vdpa/mlx5/net/debug.c
index 60d6ac68cdc4..9c85162c19fc 100644
--- a/drivers/vdpa/mlx5/net/debug.c
+++ b/drivers/vdpa/mlx5/net/debug.c
@@ -146,7 +146,8 @@ void mlx5_vdpa_add_debugfs(struct mlx5_vdpa_net *ndev)
ndev->rx_dent = debugfs_create_dir("rx", ndev->debugfs);
}
-void mlx5_vdpa_remove_debugfs(struct dentry *dbg)
+void mlx5_vdpa_remove_debugfs(struct mlx5_vdpa_net *ndev)
{
- debugfs_remove_recursive(dbg);
+ debugfs_remove_recursive(ndev->debugfs);
+ ndev->debugfs = NULL;
}
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 40a03b08d7cf..946488b8989f 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -625,30 +625,70 @@ static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx)
mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
}
+static int read_umem_params(struct mlx5_vdpa_net *ndev)
+{
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
+ u16 opmod = (MLX5_CAP_VDPA_EMULATION << 1) | (HCA_CAP_OPMOD_GET_CUR & 0x01);
+ struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+ int out_size;
+ void *caps;
+ void *out;
+ int err;
+
+ out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ out = kzalloc(out_size, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
+ err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+ if (err) {
+ mlx5_vdpa_warn(&ndev->mvdev,
+ "Failed reading vdpa umem capabilities with err %d\n", err);
+ goto out;
+ }
+
+ caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+
+ ndev->umem_1_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_a);
+ ndev->umem_1_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_b);
+
+ ndev->umem_2_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_a);
+ ndev->umem_2_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_b);
+
+ ndev->umem_3_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_a);
+ ndev->umem_3_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_b);
+
+out:
+ kfree(out);
+ return 0;
+}
+
static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
struct mlx5_vdpa_umem **umemp)
{
- struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
- int p_a;
- int p_b;
+ u32 p_a;
+ u32 p_b;
switch (num) {
case 1:
- p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_a);
- p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_b);
+ p_a = ndev->umem_1_buffer_param_a;
+ p_b = ndev->umem_1_buffer_param_b;
*umemp = &mvq->umem1;
break;
case 2:
- p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_a);
- p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_b);
+ p_a = ndev->umem_2_buffer_param_a;
+ p_b = ndev->umem_2_buffer_param_b;
*umemp = &mvq->umem2;
break;
case 3:
- p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_a);
- p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_b);
+ p_a = ndev->umem_3_buffer_param_a;
+ p_b = ndev->umem_3_buffer_param_b;
*umemp = &mvq->umem3;
break;
}
+
(*umemp)->size = p_a * mvq->num_ent + p_b;
}
@@ -2679,6 +2719,11 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
goto out;
}
mlx5_vdpa_add_debugfs(ndev);
+
+ err = read_umem_params(ndev);
+ if (err)
+ goto err_setup;
+
err = setup_virtqueues(mvdev);
if (err) {
mlx5_vdpa_warn(mvdev, "setup_virtqueues\n");
@@ -2713,7 +2758,7 @@ err_tir:
err_rqt:
teardown_virtqueues(ndev);
err_setup:
- mlx5_vdpa_remove_debugfs(ndev->debugfs);
+ mlx5_vdpa_remove_debugfs(ndev);
out:
return err;
}
@@ -2727,8 +2772,7 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev)
if (!ndev->setup)
return;
- mlx5_vdpa_remove_debugfs(ndev->debugfs);
- ndev->debugfs = NULL;
+ mlx5_vdpa_remove_debugfs(ndev);
teardown_steering(ndev);
destroy_tir(ndev);
destroy_rqt(ndev);
@@ -3489,8 +3533,6 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct workqueue_struct *wq;
- mlx5_vdpa_remove_debugfs(ndev->debugfs);
- ndev->debugfs = NULL;
unregister_link_notifier(ndev);
_vdpa_unregister_device(dev);
wq = mvdev->wq;
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.h b/drivers/vdpa/mlx5/net/mlx5_vnet.h
index 36c44d9fdd16..90b556a57971 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.h
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.h
@@ -65,6 +65,15 @@ struct mlx5_vdpa_net {
struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE];
struct mlx5_vdpa_irq_pool irqp;
struct dentry *debugfs;
+
+ u32 umem_1_buffer_param_a;
+ u32 umem_1_buffer_param_b;
+
+ u32 umem_2_buffer_param_a;
+ u32 umem_2_buffer_param_b;
+
+ u32 umem_3_buffer_param_a;
+ u32 umem_3_buffer_param_b;
};
struct mlx5_vdpa_counter {
@@ -88,7 +97,7 @@ struct macvlan_node {
};
void mlx5_vdpa_add_debugfs(struct mlx5_vdpa_net *ndev);
-void mlx5_vdpa_remove_debugfs(struct dentry *dbg);
+void mlx5_vdpa_remove_debugfs(struct mlx5_vdpa_net *ndev);
void mlx5_vdpa_add_rx_flow_table(struct mlx5_vdpa_net *ndev);
void mlx5_vdpa_remove_rx_flow_table(struct mlx5_vdpa_net *ndev);
void mlx5_vdpa_add_tirn(struct mlx5_vdpa_net *ndev);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
index 00d7d72713be..b3a3cb165795 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
@@ -499,12 +499,13 @@ static int __init vdpasim_blk_init(void)
GFP_KERNEL);
if (!shared_buffer) {
ret = -ENOMEM;
- goto parent_err;
+ goto mgmt_dev_err;
}
}
return 0;
-
+mgmt_dev_err:
+ vdpa_mgmtdev_unregister(&mgmt_dev);
parent_err:
device_unregister(&vdpasim_blk_mgmtdev);
return ret;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index c71d573f1c94..e0c181ad17e3 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1458,9 +1458,7 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
goto done;
}
- if ((msg.type == VHOST_IOTLB_UPDATE ||
- msg.type == VHOST_IOTLB_INVALIDATE) &&
- msg.size == 0) {
+ if (msg.type == VHOST_IOTLB_UPDATE && msg.size == 0) {
ret = -EINVAL;
goto done;
}
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 5c87817a4f4c..3dcf83f5e7b4 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -3440,11 +3440,15 @@ static int atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info,
}
info->fix.mmio_start = raddr;
+#if defined(__i386__) || defined(__ia64__)
/*
* By using strong UC we force the MTRR to never have an
* effect on the MMIO region on both non-PAT and PAT systems.
*/
par->ati_regbase = ioremap_uc(info->fix.mmio_start, 0x1000);
+#else
+ par->ati_regbase = ioremap(info->fix.mmio_start, 0x1000);
+#endif
if (par->ati_regbase == NULL)
return -ENOMEM;
diff --git a/drivers/video/fbdev/core/cfbcopyarea.c b/drivers/video/fbdev/core/cfbcopyarea.c
index 6d4bfeecee35..5b80bf3dae50 100644
--- a/drivers/video/fbdev/core/cfbcopyarea.c
+++ b/drivers/video/fbdev/core/cfbcopyarea.c
@@ -382,7 +382,7 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
{
u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
u32 height = area->height, width = area->width;
- unsigned long const bits_per_line = p->fix.line_length*8u;
+ unsigned int const bits_per_line = p->fix.line_length * 8u;
unsigned long __iomem *base = NULL;
int bits = BITS_PER_LONG, bytes = bits >> 3;
unsigned dst_idx = 0, src_idx = 0, rev_copy = 0;
diff --git a/drivers/video/fbdev/core/syscopyarea.c b/drivers/video/fbdev/core/syscopyarea.c
index c1eda3190968..7b8bd3a2bedc 100644
--- a/drivers/video/fbdev/core/syscopyarea.c
+++ b/drivers/video/fbdev/core/syscopyarea.c
@@ -316,7 +316,7 @@ void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area)
{
u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
u32 height = area->height, width = area->width;
- unsigned long const bits_per_line = p->fix.line_length*8u;
+ unsigned int const bits_per_line = p->fix.line_length * 8u;
unsigned long *base = NULL;
int bits = BITS_PER_LONG, bytes = bits >> 3;
unsigned dst_idx = 0, src_idx = 0, rev_copy = 0;
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.h b/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
index 167585a889d3..719b99a9bc77 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
@@ -1406,7 +1406,7 @@ struct mmphw_ctrl {
/*pathes*/
int path_num;
- struct mmphw_path_plat path_plats[];
+ struct mmphw_path_plat path_plats[] __counted_by(path_num);
};
static inline int overlay_is_vid(struct mmp_overlay *overlay)
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index f28cb90947a3..42c96f1cfc93 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1645,13 +1645,13 @@ static int omapfb_do_probe(struct platform_device *pdev,
}
fbdev->int_irq = platform_get_irq(pdev, 0);
if (fbdev->int_irq < 0) {
- r = ENXIO;
+ r = -ENXIO;
goto cleanup;
}
fbdev->ext_irq = platform_get_irq(pdev, 1);
if (fbdev->ext_irq < 0) {
- r = ENXIO;
+ r = -ENXIO;
goto cleanup;
}
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index 3d76ce111488..cf0f706762b4 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -1214,7 +1214,7 @@ static struct platform_driver sa1100fb_driver = {
},
};
-int __init sa1100fb_init(void)
+static int __init sa1100fb_init(void)
{
if (fb_get_options("sa1100fb", NULL))
return -ENODEV;
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index a1a67830fbbc..e1f421e91b4f 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -1928,10 +1928,10 @@ static void uvesafb_exit(void)
}
}
- cn_del_callback(&uvesafb_cn_id);
driver_remove_file(&uvesafb_driver.driver, &driver_attr_v86d);
platform_device_unregister(uvesafb_device);
platform_driver_unregister(&uvesafb_driver);
+ cn_del_callback(&uvesafb_cn_id);
}
module_exit(uvesafb_exit);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 5b15936a5214..2d5d252ef419 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -395,7 +395,11 @@ static inline s64 towards_target(struct virtio_balloon *vb)
virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages,
&num_pages);
- target = num_pages;
+ /*
+ * Aligned up to guest page size to avoid inflating and deflating
+ * balloon endlessly.
+ */
+ target = ALIGN(num_pages, VIRTIO_BALLOON_PAGES_PER_PAGE);
return target - vb->num_pages;
}
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 97760f611295..59892a31cf76 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -631,14 +631,17 @@ static int virtio_mmio_probe(struct platform_device *pdev)
spin_lock_init(&vm_dev->lock);
vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(vm_dev->base))
- return PTR_ERR(vm_dev->base);
+ if (IS_ERR(vm_dev->base)) {
+ rc = PTR_ERR(vm_dev->base);
+ goto free_vm_dev;
+ }
/* Check magic value */
magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_vm_dev;
}
/* Check device version */
@@ -646,7 +649,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
if (vm_dev->version < 1 || vm_dev->version > 2) {
dev_err(&pdev->dev, "Version %ld not supported!\n",
vm_dev->version);
- return -ENXIO;
+ rc = -ENXIO;
+ goto free_vm_dev;
}
vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
@@ -655,7 +659,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
* virtio-mmio device with an ID 0 is a (dummy) placeholder
* with no function. End probing now with no error reported.
*/
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_vm_dev;
}
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
@@ -685,6 +690,10 @@ static int virtio_mmio_probe(struct platform_device *pdev)
put_device(&vm_dev->vdev.dev);
return rc;
+
+free_vm_dev:
+ kfree(vm_dev);
+ return rc;
}
static int virtio_mmio_remove(struct platform_device *pdev)
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index aad7d9296e77..9cb601e16688 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -291,7 +291,7 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
err = -EINVAL;
mdev->common = vp_modern_map_capability(mdev, common,
sizeof(struct virtio_pci_common_cfg), 4,
- 0, sizeof(struct virtio_pci_common_cfg),
+ 0, sizeof(struct virtio_pci_modern_common_cfg),
NULL, NULL);
if (!mdev->common)
goto err_map_common;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 0d28ecf668d0..b845ee18a80b 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -260,7 +260,7 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
inode_init_owner(&nop_mnt_idmap, inode, NULL, mode);
inode->i_blocks = 0;
inode->i_rdev = rdev;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_mapping->a_ops = &v9fs_addr_operations;
inode->i_private = NULL;
@@ -1150,8 +1150,8 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
set_nlink(inode, 1);
- inode->i_atime.tv_sec = stat->atime;
- inode->i_mtime.tv_sec = stat->mtime;
+ inode_set_atime(inode, stat->atime, 0);
+ inode_set_mtime(inode, stat->mtime, 0);
inode_set_ctime(inode, stat->mtime, 0);
inode->i_uid = v9ses->dfltuid;
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 1312f68965ac..c7319af2f471 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -641,10 +641,10 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
struct v9fs_inode *v9inode = V9FS_I(inode);
if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
- inode->i_atime.tv_sec = stat->st_atime_sec;
- inode->i_atime.tv_nsec = stat->st_atime_nsec;
- inode->i_mtime.tv_sec = stat->st_mtime_sec;
- inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
+ inode_set_atime(inode, stat->st_atime_sec,
+ stat->st_atime_nsec);
+ inode_set_mtime(inode, stat->st_mtime_sec,
+ stat->st_mtime_nsec);
inode_set_ctime(inode, stat->st_ctime_sec,
stat->st_ctime_nsec);
inode->i_uid = stat->st_uid;
@@ -660,12 +660,12 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
inode->i_blocks = stat->st_blocks;
} else {
if (stat->st_result_mask & P9_STATS_ATIME) {
- inode->i_atime.tv_sec = stat->st_atime_sec;
- inode->i_atime.tv_nsec = stat->st_atime_nsec;
+ inode_set_atime(inode, stat->st_atime_sec,
+ stat->st_atime_nsec);
}
if (stat->st_result_mask & P9_STATS_MTIME) {
- inode->i_mtime.tv_sec = stat->st_mtime_sec;
- inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
+ inode_set_mtime(inode, stat->st_mtime_sec,
+ stat->st_mtime_nsec);
}
if (stat->st_result_mask & P9_STATS_CTIME) {
inode_set_ctime(inode, stat->st_ctime_sec,
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index e00cf8109b3f..053d1cef6e13 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -162,27 +162,27 @@ static int v9fs_xattr_handler_set(const struct xattr_handler *handler,
return v9fs_xattr_set(dentry, full_name, value, size, flags);
}
-static struct xattr_handler v9fs_xattr_user_handler = {
+static const struct xattr_handler v9fs_xattr_user_handler = {
.prefix = XATTR_USER_PREFIX,
.get = v9fs_xattr_handler_get,
.set = v9fs_xattr_handler_set,
};
-static struct xattr_handler v9fs_xattr_trusted_handler = {
+static const struct xattr_handler v9fs_xattr_trusted_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.get = v9fs_xattr_handler_get,
.set = v9fs_xattr_handler_set,
};
#ifdef CONFIG_9P_FS_SECURITY
-static struct xattr_handler v9fs_xattr_security_handler = {
+static const struct xattr_handler v9fs_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.get = v9fs_xattr_handler_get,
.set = v9fs_xattr_handler_set,
};
#endif
-const struct xattr_handler *v9fs_xattr_handlers[] = {
+const struct xattr_handler * const v9fs_xattr_handlers[] = {
&v9fs_xattr_user_handler,
&v9fs_xattr_trusted_handler,
#ifdef CONFIG_9P_FS_SECURITY
diff --git a/fs/9p/xattr.h b/fs/9p/xattr.h
index b5636e544c8a..3ad5a802352a 100644
--- a/fs/9p/xattr.h
+++ b/fs/9p/xattr.h
@@ -10,7 +10,7 @@
#include <net/9p/9p.h>
#include <net/9p/client.h>
-extern const struct xattr_handler *v9fs_xattr_handlers[];
+extern const struct xattr_handler * const v9fs_xattr_handlers[];
ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
void *buffer, size_t buffer_size);
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 20963002578a..3081edb09e46 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -242,6 +242,7 @@ struct inode *
adfs_iget(struct super_block *sb, struct object_info *obj)
{
struct inode *inode;
+ struct timespec64 ts;
inode = new_inode(sb);
if (!inode)
@@ -268,9 +269,10 @@ adfs_iget(struct super_block *sb, struct object_info *obj)
ADFS_I(inode)->attr = obj->attr;
inode->i_mode = adfs_atts2mode(sb, inode);
- adfs_adfs2unix_time(&inode->i_mtime, inode);
- inode->i_atime = inode->i_mtime;
- inode_set_ctime_to_ts(inode, inode->i_mtime);
+ adfs_adfs2unix_time(&ts, inode);
+ inode_set_atime_to_ts(inode, ts);
+ inode_set_mtime_to_ts(inode, ts);
+ inode_set_ctime_to_ts(inode, ts);
if (S_ISDIR(inode->i_mode)) {
inode->i_op = &adfs_dir_inode_operations;
@@ -321,7 +323,8 @@ adfs_notify_change(struct mnt_idmap *idmap, struct dentry *dentry,
if (ia_valid & ATTR_MTIME && adfs_inode_is_stamped(inode)) {
adfs_unix2adfs_time(inode, &attr->ia_mtime);
- adfs_adfs2unix_time(&inode->i_mtime, inode);
+ adfs_adfs2unix_time(&attr->ia_mtime, inode);
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
}
/*
@@ -329,7 +332,7 @@ adfs_notify_change(struct mnt_idmap *idmap, struct dentry *dentry,
* have the ability to represent them in our filesystem?
*/
if (ia_valid & ATTR_ATIME)
- inode->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(inode, attr->ia_atime);
if (ia_valid & ATTR_CTIME)
inode_set_ctime_to_ts(inode, attr->ia_ctime);
if (ia_valid & ATTR_MODE) {
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 7ba93efc1143..fd669daa4e7b 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -60,7 +60,7 @@ affs_insert_hash(struct inode *dir, struct buffer_head *bh)
mark_buffer_dirty_inode(dir_bh, dir);
affs_brelse(dir_bh);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
inode_inc_iversion(dir);
mark_inode_dirty(dir);
@@ -114,7 +114,7 @@ affs_remove_hash(struct inode *dir, struct buffer_head *rem_bh)
affs_brelse(bh);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
inode_inc_iversion(dir);
mark_inode_dirty(dir);
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 060746c63151..0210df8d3500 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -149,13 +149,9 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino)
break;
}
- inode->i_mtime.tv_sec = inode->i_atime.tv_sec =
- inode_set_ctime(inode,
- (be32_to_cpu(tail->change.days) * 86400LL +
- be32_to_cpu(tail->change.mins) * 60 +
- be32_to_cpu(tail->change.ticks) / 50 + AFFS_EPOCH_DELTA)
- + sys_tz.tz_minuteswest * 60, 0).tv_sec;
- inode->i_mtime.tv_nsec = inode->i_atime.tv_nsec = 0;
+ inode_set_mtime(inode,
+ inode_set_atime(inode, inode_set_ctime(inode, (be32_to_cpu(tail->change.days) * 86400LL + be32_to_cpu(tail->change.mins) * 60 + be32_to_cpu(tail->change.ticks) / 50 + AFFS_EPOCH_DELTA) + sys_tz.tz_minuteswest * 60, 0).tv_sec, 0).tv_sec,
+ 0);
affs_brelse(bh);
unlock_new_inode(inode);
return inode;
@@ -187,12 +183,13 @@ affs_write_inode(struct inode *inode, struct writeback_control *wbc)
}
tail = AFFS_TAIL(sb, bh);
if (tail->stype == cpu_to_be32(ST_ROOT)) {
- affs_secs_to_datestamp(inode->i_mtime.tv_sec,
+ affs_secs_to_datestamp(inode_get_mtime_sec(inode),
&AFFS_ROOT_TAIL(sb, bh)->root_change);
} else {
tail->protect = cpu_to_be32(AFFS_I(inode)->i_protect);
tail->size = cpu_to_be32(inode->i_size);
- affs_secs_to_datestamp(inode->i_mtime.tv_sec, &tail->change);
+ affs_secs_to_datestamp(inode_get_mtime_sec(inode),
+ &tail->change);
if (!(inode->i_ino == AFFS_SB(sb)->s_root_block)) {
uid = i_uid_read(inode);
gid = i_gid_read(inode);
@@ -314,7 +311,7 @@ affs_new_inode(struct inode *dir)
inode->i_gid = current_fsgid();
inode->i_ino = block;
set_nlink(inode, 1);
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
atomic_set(&AFFS_I(inode)->i_opencnt, 0);
AFFS_I(inode)->i_blkcnt = 0;
AFFS_I(inode)->i_lc = NULL;
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 95bcbd7654d1..4d04ef2d3ae7 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -88,7 +88,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
set_nlink(inode, 2);
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_blocks = 0;
inode->i_generation = 0;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 1c794a1896aa..78efc9719349 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -91,8 +91,8 @@ static int afs_inode_init_from_status(struct afs_operation *op,
t = status->mtime_client;
inode_set_ctime_to_ts(inode, t);
- inode->i_mtime = t;
- inode->i_atime = t;
+ inode_set_mtime_to_ts(inode, t);
+ inode_set_atime_to_ts(inode, t);
inode->i_flags |= S_NOATIME;
inode->i_uid = make_kuid(&init_user_ns, status->owner);
inode->i_gid = make_kgid(&init_user_ns, status->group);
@@ -204,7 +204,7 @@ static void afs_apply_status(struct afs_operation *op,
}
t = status->mtime_client;
- inode->i_mtime = t;
+ inode_set_mtime_to_ts(inode, t);
if (vp->update_ctime)
inode_set_ctime_to_ts(inode, op->ctime);
@@ -253,7 +253,7 @@ static void afs_apply_status(struct afs_operation *op,
if (change_size) {
afs_set_i_size(vnode, status->size);
inode_set_ctime_to_ts(inode, t);
- inode->i_atime = t;
+ inode_set_atime_to_ts(inode, t);
}
}
}
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index da73b97e19a9..23e2cc4efe41 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -1541,7 +1541,7 @@ int afs_launder_folio(struct folio *);
/*
* xattr.c
*/
-extern const struct xattr_handler *afs_xattr_handlers[];
+extern const struct xattr_handler * const afs_xattr_handlers[];
/*
* yfsclient.c
diff --git a/fs/afs/write.c b/fs/afs/write.c
index e1c45341719b..4a168781936b 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -424,7 +424,7 @@ try_next_key:
op->store.write_iter = iter;
op->store.i_size = max(pos + size, vnode->netfs.remote_i_size);
- op->mtime = vnode->netfs.inode.i_mtime;
+ op->mtime = inode_get_mtime(&vnode->netfs.inode);
afs_wait_for_operation(op);
diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
index 9048d8ccc715..64b2c0224f62 100644
--- a/fs/afs/xattr.c
+++ b/fs/afs/xattr.c
@@ -353,7 +353,7 @@ static const struct xattr_handler afs_xattr_afs_volume_handler = {
.get = afs_xattr_get_volume,
};
-const struct xattr_handler *afs_xattr_handlers[] = {
+const struct xattr_handler * const afs_xattr_handlers[] = {
&afs_xattr_afs_acl_handler,
&afs_xattr_afs_cell_handler,
&afs_xattr_afs_fid_handler,
diff --git a/fs/attr.c b/fs/attr.c
index a8ae5f6d9b16..bdf5deb06ea9 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -308,9 +308,9 @@ void setattr_copy(struct mnt_idmap *idmap, struct inode *inode,
i_uid_update(idmap, attr, inode);
i_gid_update(idmap, attr, inode);
if (ia_valid & ATTR_ATIME)
- inode->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(inode, attr->ia_atime);
if (ia_valid & ATTR_MTIME)
- inode->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
if (ia_valid & ATTR_CTIME)
inode_set_ctime_to_ts(inode, attr->ia_ctime);
if (ia_valid & ATTR_MODE) {
diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h
index d5a44fa88acf..8c1d587b3eef 100644
--- a/fs/autofs/autofs_i.h
+++ b/fs/autofs/autofs_i.h
@@ -25,6 +25,8 @@
#include <linux/completion.h>
#include <linux/file.h>
#include <linux/magic.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
/* This is the range of ioctl() numbers we claim as ours */
#define AUTOFS_IOC_FIRST AUTOFS_IOC_READY
@@ -205,20 +207,34 @@ static inline void managed_dentry_clear_managed(struct dentry *dentry)
/* Initializing function */
-int autofs_fill_super(struct super_block *, void *, int);
+extern const struct fs_parameter_spec autofs_param_specs[];
+int autofs_init_fs_context(struct fs_context *fc);
struct autofs_info *autofs_new_ino(struct autofs_sb_info *);
void autofs_clean_ino(struct autofs_info *);
-static inline int autofs_prepare_pipe(struct file *pipe)
+static inline int autofs_check_pipe(struct file *pipe)
{
if (!(pipe->f_mode & FMODE_CAN_WRITE))
return -EINVAL;
if (!S_ISFIFO(file_inode(pipe)->i_mode))
return -EINVAL;
+ return 0;
+}
+
+static inline void autofs_set_packet_pipe_flags(struct file *pipe)
+{
/* We want a packet pipe */
pipe->f_flags |= O_DIRECT;
/* We don't expect -EAGAIN */
pipe->f_flags &= ~O_NONBLOCK;
+}
+
+static inline int autofs_prepare_pipe(struct file *pipe)
+{
+ int ret = autofs_check_pipe(pipe);
+ if (ret < 0)
+ return ret;
+ autofs_set_packet_pipe_flags(pipe);
return 0;
}
diff --git a/fs/autofs/init.c b/fs/autofs/init.c
index d3f55e874338..b5e4dfa04ed0 100644
--- a/fs/autofs/init.c
+++ b/fs/autofs/init.c
@@ -7,16 +7,11 @@
#include <linux/init.h>
#include "autofs_i.h"
-static struct dentry *autofs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
-{
- return mount_nodev(fs_type, flags, data, autofs_fill_super);
-}
-
struct file_system_type autofs_fs_type = {
.owner = THIS_MODULE,
.name = "autofs",
- .mount = autofs_mount,
+ .init_fs_context = autofs_init_fs_context,
+ .parameters = autofs_param_specs,
.kill_sb = autofs_kill_sb,
};
MODULE_ALIAS_FS("autofs");
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index 2b49662ed237..a5083d447a62 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -6,7 +6,6 @@
#include <linux/seq_file.h>
#include <linux/pagemap.h>
-#include <linux/parser.h>
#include "autofs_i.h"
@@ -110,189 +109,179 @@ static const struct super_operations autofs_sops = {
.evict_inode = autofs_evict_inode,
};
-enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto,
- Opt_indirect, Opt_direct, Opt_offset, Opt_strictexpire,
- Opt_ignore};
-
-static const match_table_t tokens = {
- {Opt_fd, "fd=%u"},
- {Opt_uid, "uid=%u"},
- {Opt_gid, "gid=%u"},
- {Opt_pgrp, "pgrp=%u"},
- {Opt_minproto, "minproto=%u"},
- {Opt_maxproto, "maxproto=%u"},
- {Opt_indirect, "indirect"},
- {Opt_direct, "direct"},
- {Opt_offset, "offset"},
- {Opt_strictexpire, "strictexpire"},
- {Opt_ignore, "ignore"},
- {Opt_err, NULL}
+enum {
+ Opt_direct,
+ Opt_fd,
+ Opt_gid,
+ Opt_ignore,
+ Opt_indirect,
+ Opt_maxproto,
+ Opt_minproto,
+ Opt_offset,
+ Opt_pgrp,
+ Opt_strictexpire,
+ Opt_uid,
};
-static int parse_options(char *options,
- struct inode *root, int *pgrp, bool *pgrp_set,
- struct autofs_sb_info *sbi)
+const struct fs_parameter_spec autofs_param_specs[] = {
+ fsparam_flag ("direct", Opt_direct),
+ fsparam_fd ("fd", Opt_fd),
+ fsparam_u32 ("gid", Opt_gid),
+ fsparam_flag ("ignore", Opt_ignore),
+ fsparam_flag ("indirect", Opt_indirect),
+ fsparam_u32 ("maxproto", Opt_maxproto),
+ fsparam_u32 ("minproto", Opt_minproto),
+ fsparam_flag ("offset", Opt_offset),
+ fsparam_u32 ("pgrp", Opt_pgrp),
+ fsparam_flag ("strictexpire", Opt_strictexpire),
+ fsparam_u32 ("uid", Opt_uid),
+ {}
+};
+
+struct autofs_fs_context {
+ kuid_t uid;
+ kgid_t gid;
+ int pgrp;
+ bool pgrp_set;
+};
+
+/*
+ * Open the fd. We do it here rather than in get_tree so that it's done in the
+ * context of the system call that passed the data and not the one that
+ * triggered the superblock creation, lest the fd gets reassigned.
+ */
+static int autofs_parse_fd(struct fs_context *fc, struct autofs_sb_info *sbi,
+ struct fs_parameter *param,
+ struct fs_parse_result *result)
{
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int option;
- int pipefd = -1;
- kuid_t uid;
- kgid_t gid;
+ struct file *pipe;
+ int ret;
- root->i_uid = current_uid();
- root->i_gid = current_gid();
+ if (param->type == fs_value_is_file) {
+ /* came through the new api */
+ pipe = param->file;
+ param->file = NULL;
+ } else {
+ pipe = fget(result->uint_32);
+ }
+ if (!pipe) {
+ errorf(fc, "could not open pipe file descriptor");
+ return -EBADF;
+ }
- sbi->min_proto = AUTOFS_MIN_PROTO_VERSION;
- sbi->max_proto = AUTOFS_MAX_PROTO_VERSION;
+ ret = autofs_check_pipe(pipe);
+ if (ret < 0) {
+ errorf(fc, "Invalid/unusable pipe");
+ if (param->type != fs_value_is_file)
+ fput(pipe);
+ return -EBADF;
+ }
- sbi->pipefd = -1;
+ autofs_set_packet_pipe_flags(pipe);
- if (!options)
- return 1;
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
-
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_fd:
- if (match_int(args, &pipefd))
- return 1;
- sbi->pipefd = pipefd;
- break;
- case Opt_uid:
- if (match_int(args, &option))
- return 1;
- uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(uid))
- return 1;
- root->i_uid = uid;
- break;
- case Opt_gid:
- if (match_int(args, &option))
- return 1;
- gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(gid))
- return 1;
- root->i_gid = gid;
- break;
- case Opt_pgrp:
- if (match_int(args, &option))
- return 1;
- *pgrp = option;
- *pgrp_set = true;
- break;
- case Opt_minproto:
- if (match_int(args, &option))
- return 1;
- sbi->min_proto = option;
- break;
- case Opt_maxproto:
- if (match_int(args, &option))
- return 1;
- sbi->max_proto = option;
- break;
- case Opt_indirect:
- set_autofs_type_indirect(&sbi->type);
- break;
- case Opt_direct:
- set_autofs_type_direct(&sbi->type);
- break;
- case Opt_offset:
- set_autofs_type_offset(&sbi->type);
- break;
- case Opt_strictexpire:
- sbi->flags |= AUTOFS_SBI_STRICTEXPIRE;
- break;
- case Opt_ignore:
- sbi->flags |= AUTOFS_SBI_IGNORE;
- break;
- default:
- return 1;
- }
+ if (sbi->pipe)
+ fput(sbi->pipe);
+
+ sbi->pipefd = result->uint_32;
+ sbi->pipe = pipe;
+
+ return 0;
+}
+
+static int autofs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct autofs_fs_context *ctx = fc->fs_private;
+ struct autofs_sb_info *sbi = fc->s_fs_info;
+ struct fs_parse_result result;
+ kuid_t uid;
+ kgid_t gid;
+ int opt;
+
+ opt = fs_parse(fc, autofs_param_specs, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_fd:
+ return autofs_parse_fd(fc, sbi, param, &result);
+ case Opt_uid:
+ uid = make_kuid(current_user_ns(), result.uint_32);
+ if (!uid_valid(uid))
+ return invalfc(fc, "Invalid uid");
+ ctx->uid = uid;
+ break;
+ case Opt_gid:
+ gid = make_kgid(current_user_ns(), result.uint_32);
+ if (!gid_valid(gid))
+ return invalfc(fc, "Invalid gid");
+ ctx->gid = gid;
+ break;
+ case Opt_pgrp:
+ ctx->pgrp = result.uint_32;
+ ctx->pgrp_set = true;
+ break;
+ case Opt_minproto:
+ sbi->min_proto = result.uint_32;
+ break;
+ case Opt_maxproto:
+ sbi->max_proto = result.uint_32;
+ break;
+ case Opt_indirect:
+ set_autofs_type_indirect(&sbi->type);
+ break;
+ case Opt_direct:
+ set_autofs_type_direct(&sbi->type);
+ break;
+ case Opt_offset:
+ set_autofs_type_offset(&sbi->type);
+ break;
+ case Opt_strictexpire:
+ sbi->flags |= AUTOFS_SBI_STRICTEXPIRE;
+ break;
+ case Opt_ignore:
+ sbi->flags |= AUTOFS_SBI_IGNORE;
}
- return (sbi->pipefd < 0);
+
+ return 0;
}
-int autofs_fill_super(struct super_block *s, void *data, int silent)
+static struct autofs_sb_info *autofs_alloc_sbi(void)
{
- struct inode *root_inode;
- struct dentry *root;
- struct file *pipe;
struct autofs_sb_info *sbi;
- struct autofs_info *ino;
- int pgrp = 0;
- bool pgrp_set = false;
- int ret = -EINVAL;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
- return -ENOMEM;
- pr_debug("starting up, sbi = %p\n", sbi);
+ return NULL;
- s->s_fs_info = sbi;
sbi->magic = AUTOFS_SBI_MAGIC;
- sbi->pipefd = -1;
- sbi->pipe = NULL;
- sbi->exp_timeout = 0;
- sbi->oz_pgrp = NULL;
- sbi->sb = s;
- sbi->version = 0;
- sbi->sub_version = 0;
sbi->flags = AUTOFS_SBI_CATATONIC;
+ sbi->min_proto = AUTOFS_MIN_PROTO_VERSION;
+ sbi->max_proto = AUTOFS_MAX_PROTO_VERSION;
+ sbi->pipefd = -1;
+
set_autofs_type_indirect(&sbi->type);
- sbi->min_proto = 0;
- sbi->max_proto = 0;
mutex_init(&sbi->wq_mutex);
mutex_init(&sbi->pipe_mutex);
spin_lock_init(&sbi->fs_lock);
- sbi->queues = NULL;
spin_lock_init(&sbi->lookup_lock);
INIT_LIST_HEAD(&sbi->active_list);
INIT_LIST_HEAD(&sbi->expiring_list);
- s->s_blocksize = 1024;
- s->s_blocksize_bits = 10;
- s->s_magic = AUTOFS_SUPER_MAGIC;
- s->s_op = &autofs_sops;
- s->s_d_op = &autofs_dentry_operations;
- s->s_time_gran = 1;
- /*
- * Get the root inode and dentry, but defer checking for errors.
- */
- ino = autofs_new_ino(sbi);
- if (!ino) {
- ret = -ENOMEM;
- goto fail_free;
- }
- root_inode = autofs_get_inode(s, S_IFDIR | 0755);
- root = d_make_root(root_inode);
- if (!root) {
- ret = -ENOMEM;
- goto fail_ino;
- }
- pipe = NULL;
-
- root->d_fsdata = ino;
+ return sbi;
+}
- /* Can this call block? */
- if (parse_options(data, root_inode, &pgrp, &pgrp_set, sbi)) {
- pr_err("called with bogus options\n");
- goto fail_dput;
- }
+static int autofs_validate_protocol(struct fs_context *fc)
+{
+ struct autofs_sb_info *sbi = fc->s_fs_info;
/* Test versions first */
if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION ||
sbi->min_proto > AUTOFS_MAX_PROTO_VERSION) {
- pr_err("kernel does not match daemon version "
+ errorf(fc, "kernel does not match daemon version "
"daemon (%d, %d) kernel (%d, %d)\n",
sbi->min_proto, sbi->max_proto,
AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION);
- goto fail_dput;
+ return -EINVAL;
}
/* Establish highest kernel protocol version */
@@ -300,13 +289,62 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
sbi->version = AUTOFS_MAX_PROTO_VERSION;
else
sbi->version = sbi->max_proto;
- sbi->sub_version = AUTOFS_PROTO_SUBVERSION;
- if (pgrp_set) {
- sbi->oz_pgrp = find_get_pid(pgrp);
+ switch (sbi->version) {
+ case 4:
+ sbi->sub_version = 7;
+ break;
+ case 5:
+ sbi->sub_version = AUTOFS_PROTO_SUBVERSION;
+ break;
+ default:
+ sbi->sub_version = 0;
+ }
+
+ return 0;
+}
+
+static int autofs_fill_super(struct super_block *s, struct fs_context *fc)
+{
+ struct autofs_fs_context *ctx = fc->fs_private;
+ struct autofs_sb_info *sbi = s->s_fs_info;
+ struct inode *root_inode;
+ struct dentry *root;
+ struct autofs_info *ino;
+ int ret = -ENOMEM;
+
+ pr_debug("starting up, sbi = %p\n", sbi);
+
+ sbi->sb = s;
+ s->s_blocksize = 1024;
+ s->s_blocksize_bits = 10;
+ s->s_magic = AUTOFS_SUPER_MAGIC;
+ s->s_op = &autofs_sops;
+ s->s_d_op = &autofs_dentry_operations;
+ s->s_time_gran = 1;
+
+ /*
+ * Get the root inode and dentry, but defer checking for errors.
+ */
+ ino = autofs_new_ino(sbi);
+ if (!ino)
+ goto fail;
+
+ root_inode = autofs_get_inode(s, S_IFDIR | 0755);
+ root_inode->i_uid = ctx->uid;
+ root_inode->i_gid = ctx->gid;
+
+ root = d_make_root(root_inode);
+ if (!root)
+ goto fail_ino;
+
+ root->d_fsdata = ino;
+
+ if (ctx->pgrp_set) {
+ sbi->oz_pgrp = find_get_pid(ctx->pgrp);
if (!sbi->oz_pgrp) {
- pr_err("could not find process group %d\n",
- pgrp);
+ ret = invalf(fc, "Could not find process group %d",
+ ctx->pgrp);
goto fail_dput;
}
} else {
@@ -321,16 +359,7 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
pr_debug("pipe fd = %d, pgrp = %u\n",
sbi->pipefd, pid_nr(sbi->oz_pgrp));
- pipe = fget(sbi->pipefd);
- if (!pipe) {
- pr_err("could not open pipe file descriptor\n");
- goto fail_put_pid;
- }
- ret = autofs_prepare_pipe(pipe);
- if (ret < 0)
- goto fail_fput;
- sbi->pipe = pipe;
sbi->flags &= ~AUTOFS_SBI_CATATONIC;
/*
@@ -342,22 +371,82 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
/*
* Failure ... clean up.
*/
-fail_fput:
- pr_err("pipe file descriptor does not contain proper ops\n");
- fput(pipe);
-fail_put_pid:
- put_pid(sbi->oz_pgrp);
fail_dput:
dput(root);
- goto fail_free;
+ goto fail;
fail_ino:
autofs_free_ino(ino);
-fail_free:
- kfree(sbi);
- s->s_fs_info = NULL;
+fail:
return ret;
}
+/*
+ * Validate the parameters and then request a superblock.
+ */
+static int autofs_get_tree(struct fs_context *fc)
+{
+ struct autofs_sb_info *sbi = fc->s_fs_info;
+ int ret;
+
+ ret = autofs_validate_protocol(fc);
+ if (ret)
+ return ret;
+
+ if (sbi->pipefd < 0)
+ return invalf(fc, "No control pipe specified");
+
+ return get_tree_nodev(fc, autofs_fill_super);
+}
+
+static void autofs_free_fc(struct fs_context *fc)
+{
+ struct autofs_fs_context *ctx = fc->fs_private;
+ struct autofs_sb_info *sbi = fc->s_fs_info;
+
+ if (sbi) {
+ if (sbi->pipe)
+ fput(sbi->pipe);
+ kfree(sbi);
+ }
+ kfree(ctx);
+}
+
+static const struct fs_context_operations autofs_context_ops = {
+ .free = autofs_free_fc,
+ .parse_param = autofs_parse_param,
+ .get_tree = autofs_get_tree,
+};
+
+/*
+ * Set up the filesystem mount context.
+ */
+int autofs_init_fs_context(struct fs_context *fc)
+{
+ struct autofs_fs_context *ctx;
+ struct autofs_sb_info *sbi;
+
+ ctx = kzalloc(sizeof(struct autofs_fs_context), GFP_KERNEL);
+ if (!ctx)
+ goto nomem;
+
+ ctx->uid = current_uid();
+ ctx->gid = current_gid();
+
+ sbi = autofs_alloc_sbi();
+ if (!sbi)
+ goto nomem_ctx;
+
+ fc->fs_private = ctx;
+ fc->s_fs_info = sbi;
+ fc->ops = &autofs_context_ops;
+ return 0;
+
+nomem_ctx:
+ kfree(ctx);
+nomem:
+ return -ENOMEM;
+}
+
struct inode *autofs_get_inode(struct super_block *sb, umode_t mode)
{
struct inode *inode = new_inode(sb);
@@ -370,7 +459,7 @@ struct inode *autofs_get_inode(struct super_block *sb, umode_t mode)
inode->i_uid = d_inode(sb->s_root)->i_uid;
inode->i_gid = d_inode(sb->s_root)->i_gid;
}
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_ino = get_next_ino();
if (S_ISDIR(mode)) {
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 512b9a26c63d..530d18827e35 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -600,7 +600,7 @@ static int autofs_dir_symlink(struct mnt_idmap *idmap,
p_ino = autofs_dentry_ino(dentry->d_parent);
p_ino->count++;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
return 0;
}
@@ -633,7 +633,7 @@ static int autofs_dir_unlink(struct inode *dir, struct dentry *dentry)
d_inode(dentry)->i_size = 0;
clear_nlink(d_inode(dentry));
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
spin_lock(&sbi->lookup_lock);
__autofs_add_expiring(dentry);
@@ -749,7 +749,7 @@ static int autofs_dir_mkdir(struct mnt_idmap *idmap,
p_ino = autofs_dentry_ino(dentry->d_parent);
p_ino->count++;
inc_nlink(dir);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
return 0;
}
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 83f9566c973b..316d88da2ce1 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -208,7 +208,7 @@ void make_bad_inode(struct inode *inode)
remove_inode_hash(inode);
inode->i_mode = S_IFREG;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_op = &bad_inode_ops;
inode->i_opflags &= ~IOP_XATTR;
inode->i_fop = &bad_file_ops;
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 9a16a51fbb88..9acdec56f626 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -360,11 +360,11 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
* for indexing purposes. (PFD, page 54)
*/
- inode->i_mtime.tv_sec =
- fs64_to_cpu(sb, raw_inode->last_modified_time) >> 16;
- inode->i_mtime.tv_nsec = 0; /* lower 16 bits are not a time */
- inode_set_ctime_to_ts(inode, inode->i_mtime);
- inode->i_atime = inode->i_mtime;
+ inode_set_mtime(inode,
+ fs64_to_cpu(sb, raw_inode->last_modified_time) >> 16,
+ 0);/* lower 16 bits are not a time */
+ inode_set_ctime_to_ts(inode, inode_get_mtime(inode));
+ inode_set_atime_to_ts(inode, inode_get_mtime(inode));
befs_ino->i_inode_num = fsrun_to_cpu(sb, raw_inode->inode_num);
befs_ino->i_parent = fsrun_to_cpu(sb, raw_inode->parent);
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 12b8af04dcb3..fbc4ae80a4b2 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -97,7 +97,7 @@ static int bfs_create(struct mnt_idmap *idmap, struct inode *dir,
set_bit(ino, info->si_imap);
info->si_freei--;
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_blocks = 0;
inode->i_op = &bfs_file_inops;
inode->i_fop = &bfs_file_operations;
@@ -187,7 +187,7 @@ static int bfs_unlink(struct inode *dir, struct dentry *dentry)
}
de->ino = 0;
mark_buffer_dirty_inode(bh, dir);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
inode_dec_link_count(inode);
@@ -240,7 +240,7 @@ static int bfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
goto end_rename;
}
old_de->ino = 0;
- old_dir->i_mtime = inode_set_ctime_current(old_dir);
+ inode_set_mtime_to_ts(old_dir, inode_set_ctime_current(old_dir));
mark_inode_dirty(old_dir);
if (new_inode) {
inode_set_ctime_current(new_inode);
@@ -294,7 +294,8 @@ static int bfs_add_entry(struct inode *dir, const struct qstr *child, int ino)
dir->i_size += BFS_DIRENT_SIZE;
inode_set_ctime_current(dir);
}
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_current(dir));
mark_inode_dirty(dir);
de->ino = cpu_to_le16((u16)ino);
for (i = 0; i < BFS_NAMELEN; i++)
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index e6a76ae9eb44..355957dbce39 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -80,11 +80,9 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
set_nlink(inode, le32_to_cpu(di->i_nlink));
inode->i_size = BFS_FILESIZE(di);
inode->i_blocks = BFS_FILEBLOCKS(di);
- inode->i_atime.tv_sec = le32_to_cpu(di->i_atime);
- inode->i_mtime.tv_sec = le32_to_cpu(di->i_mtime);
+ inode_set_atime(inode, le32_to_cpu(di->i_atime), 0);
+ inode_set_mtime(inode, le32_to_cpu(di->i_mtime), 0);
inode_set_ctime(inode, le32_to_cpu(di->i_ctime), 0);
- inode->i_atime.tv_nsec = 0;
- inode->i_mtime.tv_nsec = 0;
brelse(bh);
unlock_new_inode(inode);
@@ -140,9 +138,9 @@ static int bfs_write_inode(struct inode *inode, struct writeback_control *wbc)
di->i_uid = cpu_to_le32(i_uid_read(inode));
di->i_gid = cpu_to_le32(i_gid_read(inode));
di->i_nlink = cpu_to_le32(inode->i_nlink);
- di->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
- di->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
- di->i_ctime = cpu_to_le32(inode_get_ctime(inode).tv_sec);
+ di->i_atime = cpu_to_le32(inode_get_atime_sec(inode));
+ di->i_mtime = cpu_to_le32(inode_get_mtime_sec(inode));
+ di->i_ctime = cpu_to_le32(inode_get_ctime_sec(inode));
i_sblock = BFS_I(inode)->i_sblock;
di->i_sblock = cpu_to_le32(i_sblock);
di->i_eblock = cpu_to_le32(BFS_I(inode)->i_eblock);
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index e0108d17b085..5d2be9b0a0a5 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -547,7 +547,7 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
if (inode) {
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
}
return inode;
}
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 0dc91bf654b5..beed7e459dab 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -3199,12 +3199,14 @@ static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
* We still need to do a tree search to find out the parents. This is for
* TREE_BLOCK_REF backref (keyed or inlined).
*
+ * @trans: Transaction handle.
* @ref_key: The same as @ref_key in handle_direct_tree_backref()
* @tree_key: The first key of this tree block.
* @path: A clean (released) path, to avoid allocating path every time
* the function get called.
*/
-static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
+static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
+ struct btrfs_backref_cache *cache,
struct btrfs_path *path,
struct btrfs_key *ref_key,
struct btrfs_key *tree_key,
@@ -3318,7 +3320,7 @@ static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
* If we know the block isn't shared we can avoid
* checking its backrefs.
*/
- if (btrfs_block_can_be_shared(root, eb))
+ if (btrfs_block_can_be_shared(trans, root, eb))
upper->checked = 0;
else
upper->checked = 1;
@@ -3366,11 +3368,13 @@ out:
* links aren't yet bi-directional. Needs to finish such links.
* Use btrfs_backref_finish_upper_links() to finish such linkage.
*
+ * @trans: Transaction handle.
* @path: Released path for indirect tree backref lookup
* @iter: Released backref iter for extent tree search
* @node_key: The first key of the tree block
*/
-int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
+int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
+ struct btrfs_backref_cache *cache,
struct btrfs_path *path,
struct btrfs_backref_iter *iter,
struct btrfs_key *node_key,
@@ -3470,8 +3474,8 @@ int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
* offset means the root objectid. We need to search
* the tree to get its parent bytenr.
*/
- ret = handle_indirect_tree_backref(cache, path, &key, node_key,
- cur);
+ ret = handle_indirect_tree_backref(trans, cache, path,
+ &key, node_key, cur);
if (ret < 0)
goto out;
}
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index 83a9a34e948e..ab4ca0eda605 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -540,7 +540,8 @@ static inline void btrfs_backref_panic(struct btrfs_fs_info *fs_info,
bytenr);
}
-int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
+int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
+ struct btrfs_backref_cache *cache,
struct btrfs_path *path,
struct btrfs_backref_iter *iter,
struct btrfs_key *node_key,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index c0c5f2239820..2a9344a3fcee 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -370,7 +370,8 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
/*
* check if the tree block can be shared by multiple trees
*/
-int btrfs_block_can_be_shared(struct btrfs_root *root,
+int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
struct extent_buffer *buf)
{
/*
@@ -379,11 +380,21 @@ int btrfs_block_can_be_shared(struct btrfs_root *root,
* not allocated by tree relocation, we know the block is not shared.
*/
if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
- buf != root->node && buf != root->commit_root &&
+ buf != root->node &&
(btrfs_header_generation(buf) <=
btrfs_root_last_snapshot(&root->root_item) ||
- btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
- return 1;
+ btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) {
+ if (buf != root->commit_root)
+ return 1;
+ /*
+ * An extent buffer that used to be the commit root may still be
+ * shared because the tree height may have increased and it
+ * became a child of a higher level root. This can happen when
+ * snapshotting a subvolume created in the current transaction.
+ */
+ if (btrfs_header_generation(buf) == trans->transid)
+ return 1;
+ }
return 0;
}
@@ -418,7 +429,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
* are only allowed for blocks use full backrefs.
*/
- if (btrfs_block_can_be_shared(root, buf)) {
+ if (btrfs_block_can_be_shared(trans, root, buf)) {
ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
btrfs_header_level(buf), 1,
&refs, &flags);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index c8f1d2d7c46c..196c005c31f6 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -561,7 +561,8 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
struct extent_buffer **cow_ret, u64 new_root_objectid);
-int btrfs_block_can_be_shared(struct btrfs_root *root,
+int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
struct extent_buffer *buf);
int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int level, int slot);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index c640f87038a6..7381241334e8 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1833,19 +1833,19 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
btrfs_set_stack_inode_block_group(inode_item, 0);
btrfs_set_stack_timespec_sec(&inode_item->atime,
- inode->i_atime.tv_sec);
+ inode_get_atime_sec(inode));
btrfs_set_stack_timespec_nsec(&inode_item->atime,
- inode->i_atime.tv_nsec);
+ inode_get_atime_nsec(inode));
btrfs_set_stack_timespec_sec(&inode_item->mtime,
- inode->i_mtime.tv_sec);
+ inode_get_mtime_sec(inode));
btrfs_set_stack_timespec_nsec(&inode_item->mtime,
- inode->i_mtime.tv_nsec);
+ inode_get_mtime_nsec(inode));
btrfs_set_stack_timespec_sec(&inode_item->ctime,
- inode_get_ctime(inode).tv_sec);
+ inode_get_ctime_sec(inode));
btrfs_set_stack_timespec_nsec(&inode_item->ctime,
- inode_get_ctime(inode).tv_nsec);
+ inode_get_ctime_nsec(inode));
btrfs_set_stack_timespec_sec(&inode_item->otime, BTRFS_I(inode)->i_otime_sec);
btrfs_set_stack_timespec_nsec(&inode_item->otime, BTRFS_I(inode)->i_otime_nsec);
@@ -1888,11 +1888,11 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
&BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
- inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
- inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
+ inode_set_atime(inode, btrfs_stack_timespec_sec(&inode_item->atime),
+ btrfs_stack_timespec_nsec(&inode_item->atime));
- inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
- inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
+ inode_set_mtime(inode, btrfs_stack_timespec_sec(&inode_item->mtime),
+ btrfs_stack_timespec_nsec(&inode_item->mtime));
inode_set_ctime(inode, btrfs_stack_timespec_sec(&inode_item->ctime),
btrfs_stack_timespec_nsec(&inode_item->ctime));
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index d3998cad62c2..f9544fda38e9 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -246,6 +246,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
+ struct bdev_handle *bdev_handle;
struct block_device *bdev;
u64 devid = BTRFS_DEV_REPLACE_DEVID;
int ret = 0;
@@ -256,12 +257,13 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
return -EINVAL;
}
- bdev = blkdev_get_by_path(device_path, BLK_OPEN_WRITE,
- fs_info->bdev_holder, NULL);
- if (IS_ERR(bdev)) {
+ bdev_handle = bdev_open_by_path(device_path, BLK_OPEN_WRITE,
+ fs_info->bdev_holder, NULL);
+ if (IS_ERR(bdev_handle)) {
btrfs_err(fs_info, "target device %s is invalid!", device_path);
- return PTR_ERR(bdev);
+ return PTR_ERR(bdev_handle);
}
+ bdev = bdev_handle->bdev;
if (!btrfs_check_device_zone_type(fs_info, bdev)) {
btrfs_err(fs_info,
@@ -312,9 +314,9 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
device->commit_bytes_used = device->bytes_used;
device->fs_info = fs_info;
device->bdev = bdev;
+ device->bdev_handle = bdev_handle;
set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
set_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
- device->holder = fs_info->bdev_holder;
device->dev_stats_valid = 1;
set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
device->fs_devices = fs_devices;
@@ -333,7 +335,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
return 0;
error:
- blkdev_put(bdev, fs_info->bdev_holder);
+ bdev_release(bdev_handle);
return ret;
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 92419cb8508a..f47731c45bb5 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1111,17 +1111,18 @@ void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
static void update_time_for_write(struct inode *inode)
{
- struct timespec64 now, ctime;
+ struct timespec64 now, ts;
if (IS_NOCMTIME(inode))
return;
now = current_time(inode);
- if (!timespec64_equal(&inode->i_mtime, &now))
- inode->i_mtime = now;
+ ts = inode_get_mtime(inode);
+ if (!timespec64_equal(&ts, &now))
+ inode_set_mtime_to_ts(inode, now);
- ctime = inode_get_ctime(inode);
- if (!timespec64_equal(&ctime, &now))
+ ts = inode_get_ctime(inode);
+ if (!timespec64_equal(&ts, &now))
inode_set_ctime_to_ts(inode, now);
if (IS_I_VERSION(inode))
@@ -2476,7 +2477,8 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
inode_inc_iversion(&inode->vfs_inode);
if (!extent_info || extent_info->update_times)
- inode->vfs_inode.i_mtime = inode_set_ctime_current(&inode->vfs_inode);
+ inode_set_mtime_to_ts(&inode->vfs_inode,
+ inode_set_ctime_current(&inode->vfs_inode));
ret = btrfs_update_inode(trans, inode);
if (ret)
@@ -2717,7 +2719,7 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
ASSERT(trans != NULL);
inode_inc_iversion(inode);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
ret = btrfs_update_inode(trans, BTRFS_I(inode));
updated_inode = true;
btrfs_end_transaction(trans);
@@ -2737,7 +2739,7 @@ out_only_mutex:
struct timespec64 now = inode_set_ctime_current(inode);
inode_inc_iversion(inode);
- inode->i_mtime = now;
+ inode_set_mtime_to_ts(inode, now);
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index b388505c91cc..5e3fccddde0c 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3762,11 +3762,11 @@ static int btrfs_read_locked_inode(struct inode *inode,
btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
round_up(i_size_read(inode), fs_info->sectorsize));
- inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
- inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
+ inode_set_atime(inode, btrfs_timespec_sec(leaf, &inode_item->atime),
+ btrfs_timespec_nsec(leaf, &inode_item->atime));
- inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
- inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
+ inode_set_mtime(inode, btrfs_timespec_sec(leaf, &inode_item->mtime),
+ btrfs_timespec_nsec(leaf, &inode_item->mtime));
inode_set_ctime(inode, btrfs_timespec_sec(leaf, &inode_item->ctime),
btrfs_timespec_nsec(leaf, &inode_item->ctime));
@@ -3928,19 +3928,19 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
btrfs_set_token_timespec_sec(&token, &item->atime,
- inode->i_atime.tv_sec);
+ inode_get_atime_sec(inode));
btrfs_set_token_timespec_nsec(&token, &item->atime,
- inode->i_atime.tv_nsec);
+ inode_get_atime_nsec(inode));
btrfs_set_token_timespec_sec(&token, &item->mtime,
- inode->i_mtime.tv_sec);
+ inode_get_mtime_sec(inode));
btrfs_set_token_timespec_nsec(&token, &item->mtime,
- inode->i_mtime.tv_nsec);
+ inode_get_mtime_nsec(inode));
btrfs_set_token_timespec_sec(&token, &item->ctime,
- inode_get_ctime(inode).tv_sec);
+ inode_get_ctime_sec(inode));
btrfs_set_token_timespec_nsec(&token, &item->ctime,
- inode_get_ctime(inode).tv_nsec);
+ inode_get_ctime_nsec(inode));
btrfs_set_token_timespec_sec(&token, &item->otime, BTRFS_I(inode)->i_otime_sec);
btrfs_set_token_timespec_nsec(&token, &item->otime, BTRFS_I(inode)->i_otime_nsec);
@@ -4135,8 +4135,7 @@ err:
btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
inode_inc_iversion(&inode->vfs_inode);
inode_inc_iversion(&dir->vfs_inode);
- inode_set_ctime_current(&inode->vfs_inode);
- dir->vfs_inode.i_mtime = inode_set_ctime_current(&dir->vfs_inode);
+ inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
ret = btrfs_update_inode(trans, dir);
out:
return ret;
@@ -4309,7 +4308,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2);
inode_inc_iversion(&dir->vfs_inode);
- dir->vfs_inode.i_mtime = inode_set_ctime_current(&dir->vfs_inode);
+ inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
ret = btrfs_update_inode_fallback(trans, dir);
if (ret)
btrfs_abort_transaction(trans, ret);
@@ -4959,7 +4958,8 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
if (newsize != oldsize) {
inode_inc_iversion(inode);
if (!(mask & (ATTR_CTIME | ATTR_MTIME))) {
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
}
}
@@ -5585,6 +5585,7 @@ static struct inode *new_simple_dir(struct inode *dir,
struct btrfs_key *key,
struct btrfs_root *root)
{
+ struct timespec64 ts;
struct inode *inode = new_inode(dir->i_sb);
if (!inode)
@@ -5603,10 +5604,13 @@ static struct inode *new_simple_dir(struct inode *dir,
inode->i_opflags &= ~IOP_XATTR;
inode->i_fop = &simple_dir_operations;
inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
- inode->i_mtime = inode_set_ctime_current(inode);
- inode->i_atime = dir->i_atime;
- BTRFS_I(inode)->i_otime_sec = inode->i_mtime.tv_sec;
- BTRFS_I(inode)->i_otime_nsec = inode->i_mtime.tv_nsec;
+
+ ts = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, ts);
+ inode_set_atime_to_ts(inode, inode_get_atime(dir));
+ BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
+ BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
+
inode->i_uid = dir->i_uid;
inode->i_gid = dir->i_gid;
@@ -6164,6 +6168,7 @@ static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *
int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
struct btrfs_new_inode_args *args)
{
+ struct timespec64 ts;
struct inode *dir = args->dir;
struct inode *inode = args->inode;
const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name;
@@ -6281,10 +6286,9 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
goto discard;
}
- inode->i_mtime = inode_set_ctime_current(inode);
- inode->i_atime = inode->i_mtime;
- BTRFS_I(inode)->i_otime_sec = inode->i_mtime.tv_sec;
- BTRFS_I(inode)->i_otime_nsec = inode->i_mtime.tv_nsec;
+ ts = simple_inode_init_ts(inode);
+ BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
+ BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
/*
* We're going to fill the inode item now, so at this point the inode
@@ -6449,8 +6453,8 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
* values (the ones it had when the fsync was done).
*/
if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags))
- parent_inode->vfs_inode.i_mtime =
- inode_set_ctime_current(&parent_inode->vfs_inode);
+ inode_set_mtime_to_ts(&parent_inode->vfs_inode,
+ inode_set_ctime_current(&parent_inode->vfs_inode));
ret = btrfs_update_inode(trans, parent_inode);
if (ret)
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 89cd212735ea..752acff2c734 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2682,8 +2682,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_vol_args_v2 *vol_args;
- struct block_device *bdev = NULL;
- void *holder;
+ struct bdev_handle *bdev_handle = NULL;
int ret;
bool cancel = false;
@@ -2720,7 +2719,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
goto err_drop;
/* Exclusive operation is now claimed */
- ret = btrfs_rm_device(fs_info, &args, &bdev, &holder);
+ ret = btrfs_rm_device(fs_info, &args, &bdev_handle);
btrfs_exclop_finish(fs_info);
@@ -2734,8 +2733,8 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
}
err_drop:
mnt_drop_write_file(file);
- if (bdev)
- blkdev_put(bdev, holder);
+ if (bdev_handle)
+ bdev_release(bdev_handle);
out:
btrfs_put_dev_args_from_path(&args);
kfree(vol_args);
@@ -2748,8 +2747,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_vol_args *vol_args;
- struct block_device *bdev = NULL;
- void *holder;
+ struct bdev_handle *bdev_handle = NULL;
int ret;
bool cancel = false;
@@ -2776,15 +2774,15 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE,
cancel);
if (ret == 0) {
- ret = btrfs_rm_device(fs_info, &args, &bdev, &holder);
+ ret = btrfs_rm_device(fs_info, &args, &bdev_handle);
if (!ret)
btrfs_info(fs_info, "disk deleted %s", vol_args->name);
btrfs_exclop_finish(fs_info);
}
mnt_drop_write_file(file);
- if (bdev)
- blkdev_put(bdev, holder);
+ if (bdev_handle)
+ bdev_release(bdev_handle);
out:
btrfs_put_dev_args_from_path(&args);
kfree(vol_args);
diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
index fabd856e5079..f88b0c2ac3fe 100644
--- a/fs/btrfs/reflink.c
+++ b/fs/btrfs/reflink.c
@@ -29,7 +29,7 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
inode_inc_iversion(inode);
if (!no_time_update) {
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
}
/*
* We round up to the block size at eof when determining which
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index a896f22e138b..f5d9e5f74a52 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -461,6 +461,7 @@ static bool handle_useless_nodes(struct reloc_control *rc,
* cached.
*/
static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
+ struct btrfs_trans_handle *trans,
struct reloc_control *rc, struct btrfs_key *node_key,
int level, u64 bytenr)
{
@@ -494,8 +495,8 @@ static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
/* Breadth-first search to build backref cache */
do {
- ret = btrfs_backref_add_tree_node(cache, path, iter, node_key,
- cur);
+ ret = btrfs_backref_add_tree_node(trans, cache, path, iter,
+ node_key, cur);
if (ret < 0) {
err = ret;
goto out;
@@ -2802,7 +2803,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
/* Do tree relocation */
rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
- node = build_backref_tree(rc, &block->key,
+ node = build_backref_tree(trans, rc, &block->key,
block->level, block->bytenr);
if (IS_ERR(node)) {
err = PTR_ERR(node);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 9694a3ca1739..6e63816dddcb 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1911,7 +1911,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
fname.disk_name.len * 2);
- parent_inode->i_mtime = inode_set_ctime_current(parent_inode);
+ inode_set_mtime_to_ts(parent_inode,
+ inode_set_ctime_current(parent_inode));
ret = btrfs_update_inode_fallback(trans, BTRFS_I(parent_inode));
if (ret) {
btrfs_abort_transaction(trans, ret);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 6c7e7e723e3a..7d6729d9fd2f 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4131,19 +4131,19 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
btrfs_set_token_timespec_sec(&token, &item->atime,
- inode->i_atime.tv_sec);
+ inode_get_atime_sec(inode));
btrfs_set_token_timespec_nsec(&token, &item->atime,
- inode->i_atime.tv_nsec);
+ inode_get_atime_nsec(inode));
btrfs_set_token_timespec_sec(&token, &item->mtime,
- inode->i_mtime.tv_sec);
+ inode_get_mtime_sec(inode));
btrfs_set_token_timespec_nsec(&token, &item->mtime,
- inode->i_mtime.tv_nsec);
+ inode_get_mtime_nsec(inode));
btrfs_set_token_timespec_sec(&token, &item->ctime,
- inode_get_ctime(inode).tv_sec);
+ inode_get_ctime_sec(inode));
btrfs_set_token_timespec_nsec(&token, &item->ctime,
- inode_get_ctime(inode).tv_nsec);
+ inode_get_ctime_nsec(inode));
/*
* We do not need to set the nbytes field, in fact during a fast fsync
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 1fdfa9153e30..c87e18827a0a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -457,37 +457,39 @@ static noinline struct btrfs_fs_devices *find_fsid(
static int
btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder,
- int flush, struct block_device **bdev,
+ int flush, struct bdev_handle **bdev_handle,
struct btrfs_super_block **disk_super)
{
+ struct block_device *bdev;
int ret;
- *bdev = blkdev_get_by_path(device_path, flags, holder, NULL);
+ *bdev_handle = bdev_open_by_path(device_path, flags, holder, NULL);
- if (IS_ERR(*bdev)) {
- ret = PTR_ERR(*bdev);
+ if (IS_ERR(*bdev_handle)) {
+ ret = PTR_ERR(*bdev_handle);
goto error;
}
+ bdev = (*bdev_handle)->bdev;
if (flush)
- sync_blockdev(*bdev);
- ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
+ sync_blockdev(bdev);
+ ret = set_blocksize(bdev, BTRFS_BDEV_BLOCKSIZE);
if (ret) {
- blkdev_put(*bdev, holder);
+ bdev_release(*bdev_handle);
goto error;
}
- invalidate_bdev(*bdev);
- *disk_super = btrfs_read_dev_super(*bdev);
+ invalidate_bdev(bdev);
+ *disk_super = btrfs_read_dev_super(bdev);
if (IS_ERR(*disk_super)) {
ret = PTR_ERR(*disk_super);
- blkdev_put(*bdev, holder);
+ bdev_release(*bdev_handle);
goto error;
}
return 0;
error:
- *bdev = NULL;
+ *bdev_handle = NULL;
return ret;
}
@@ -630,7 +632,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
struct btrfs_device *device, blk_mode_t flags,
void *holder)
{
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct btrfs_super_block *disk_super;
u64 devid;
int ret;
@@ -641,7 +643,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
return -EINVAL;
ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
- &bdev, &disk_super);
+ &bdev_handle, &disk_super);
if (ret)
return ret;
@@ -665,21 +667,21 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
fs_devices->seeding = true;
} else {
- if (bdev_read_only(bdev))
+ if (bdev_read_only(bdev_handle->bdev))
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
else
set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
}
- if (!bdev_nonrot(bdev))
+ if (!bdev_nonrot(bdev_handle->bdev))
fs_devices->rotating = true;
- if (bdev_max_discard_sectors(bdev))
+ if (bdev_max_discard_sectors(bdev_handle->bdev))
fs_devices->discardable = true;
- device->bdev = bdev;
+ device->bdev_handle = bdev_handle;
+ device->bdev = bdev_handle->bdev;
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
- device->holder = holder;
fs_devices->open_devices++;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
@@ -693,7 +695,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
error_free_page:
btrfs_release_disk_super(disk_super);
- blkdev_put(bdev, holder);
+ bdev_release(bdev_handle);
return -EINVAL;
}
@@ -1002,9 +1004,10 @@ static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
if (device->devid == BTRFS_DEV_REPLACE_DEVID)
continue;
- if (device->bdev) {
- blkdev_put(device->bdev, device->holder);
+ if (device->bdev_handle) {
+ bdev_release(device->bdev_handle);
device->bdev = NULL;
+ device->bdev_handle = NULL;
fs_devices->open_devices--;
}
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
@@ -1049,7 +1052,7 @@ static void btrfs_close_bdev(struct btrfs_device *device)
invalidate_bdev(device->bdev);
}
- blkdev_put(device->bdev, device->holder);
+ bdev_release(device->bdev_handle);
}
static void btrfs_close_one_device(struct btrfs_device *device)
@@ -1302,7 +1305,7 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
struct btrfs_super_block *disk_super;
bool new_device_added = false;
struct btrfs_device *device = NULL;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
u64 bytenr, bytenr_orig;
int ret;
@@ -1325,18 +1328,19 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
* values temporarily, as the device paths of the fsid are the only
* required information for assembling the volume.
*/
- bdev = blkdev_get_by_path(path, flags, NULL, NULL);
- if (IS_ERR(bdev))
- return ERR_CAST(bdev);
+ bdev_handle = bdev_open_by_path(path, flags, NULL, NULL);
+ if (IS_ERR(bdev_handle))
+ return ERR_CAST(bdev_handle);
bytenr_orig = btrfs_sb_offset(0);
- ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr);
+ ret = btrfs_sb_log_location_bdev(bdev_handle->bdev, 0, READ, &bytenr);
if (ret) {
device = ERR_PTR(ret);
goto error_bdev_put;
}
- disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig);
+ disk_super = btrfs_read_disk_super(bdev_handle->bdev, bytenr,
+ bytenr_orig);
if (IS_ERR(disk_super)) {
device = ERR_CAST(disk_super);
goto error_bdev_put;
@@ -1366,7 +1370,7 @@ free_disk_super:
btrfs_release_disk_super(disk_super);
error_bdev_put:
- blkdev_put(bdev, NULL);
+ bdev_release(bdev_handle);
return device;
}
@@ -2043,7 +2047,7 @@ void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
int btrfs_rm_device(struct btrfs_fs_info *fs_info,
struct btrfs_dev_lookup_args *args,
- struct block_device **bdev, void **holder)
+ struct bdev_handle **bdev_handle)
{
struct btrfs_trans_handle *trans;
struct btrfs_device *device;
@@ -2152,7 +2156,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
btrfs_assign_next_active_device(device, NULL);
- if (device->bdev) {
+ if (device->bdev_handle) {
cur_devices->open_devices--;
/* remove sysfs entry */
btrfs_sysfs_remove_device(device);
@@ -2168,9 +2172,9 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
* free the device.
*
* We cannot call btrfs_close_bdev() here because we're holding the sb
- * write lock, and blkdev_put() will pull in the ->open_mutex on the
- * block device and it's dependencies. Instead just flush the device
- * and let the caller do the final blkdev_put.
+ * write lock, and bdev_release() will pull in the ->open_mutex on
+ * the block device and it's dependencies. Instead just flush the
+ * device and let the caller do the final bdev_release.
*/
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
btrfs_scratch_superblocks(fs_info, device->bdev,
@@ -2181,8 +2185,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
}
}
- *bdev = device->bdev;
- *holder = device->holder;
+ *bdev_handle = device->bdev_handle;
synchronize_rcu();
btrfs_free_device(device);
@@ -2319,7 +2322,7 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
const char *path)
{
struct btrfs_super_block *disk_super;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
int ret;
if (!path || !path[0])
@@ -2337,7 +2340,7 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
}
ret = btrfs_get_bdev_and_sb(path, BLK_OPEN_READ, NULL, 0,
- &bdev, &disk_super);
+ &bdev_handle, &disk_super);
if (ret) {
btrfs_put_dev_args_from_path(args);
return ret;
@@ -2350,7 +2353,7 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
else
memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
btrfs_release_disk_super(disk_super);
- blkdev_put(bdev, NULL);
+ bdev_release(bdev_handle);
return 0;
}
@@ -2570,7 +2573,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_trans_handle *trans;
struct btrfs_device *device;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct super_block *sb = fs_info->sb;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_fs_devices *seed_devices = NULL;
@@ -2583,12 +2586,12 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
if (sb_rdonly(sb) && !fs_devices->seeding)
return -EROFS;
- bdev = blkdev_get_by_path(device_path, BLK_OPEN_WRITE,
- fs_info->bdev_holder, NULL);
- if (IS_ERR(bdev))
- return PTR_ERR(bdev);
+ bdev_handle = bdev_open_by_path(device_path, BLK_OPEN_WRITE,
+ fs_info->bdev_holder, NULL);
+ if (IS_ERR(bdev_handle))
+ return PTR_ERR(bdev_handle);
- if (!btrfs_check_device_zone_type(fs_info, bdev)) {
+ if (!btrfs_check_device_zone_type(fs_info, bdev_handle->bdev)) {
ret = -EINVAL;
goto error;
}
@@ -2600,11 +2603,11 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
locked = true;
}
- sync_blockdev(bdev);
+ sync_blockdev(bdev_handle->bdev);
rcu_read_lock();
list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
- if (device->bdev == bdev) {
+ if (device->bdev == bdev_handle->bdev) {
ret = -EEXIST;
rcu_read_unlock();
goto error;
@@ -2620,7 +2623,8 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
}
device->fs_info = fs_info;
- device->bdev = bdev;
+ device->bdev_handle = bdev_handle;
+ device->bdev = bdev_handle->bdev;
ret = lookup_bdev(device_path, &device->devt);
if (ret)
goto error_free_device;
@@ -2641,12 +2645,11 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
device->io_align = fs_info->sectorsize;
device->sector_size = fs_info->sectorsize;
device->total_bytes =
- round_down(bdev_nr_bytes(bdev), fs_info->sectorsize);
+ round_down(bdev_nr_bytes(device->bdev), fs_info->sectorsize);
device->disk_total_bytes = device->total_bytes;
device->commit_total_bytes = device->total_bytes;
set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
- device->holder = fs_info->bdev_holder;
device->dev_stats_valid = 1;
set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
@@ -2682,7 +2685,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
- if (!bdev_nonrot(bdev))
+ if (!bdev_nonrot(device->bdev))
fs_devices->rotating = true;
orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
@@ -2804,7 +2807,7 @@ error_free_zone:
error_free_device:
btrfs_free_device(device);
error:
- blkdev_put(bdev, fs_info->bdev_holder);
+ bdev_release(bdev_handle);
if (locked) {
mutex_unlock(&uuid_mutex);
up_write(&sb->s_umount);
@@ -5081,7 +5084,7 @@ static void init_alloc_chunk_ctl_policy_regular(
ASSERT(space_info);
ctl->max_chunk_size = READ_ONCE(space_info->chunk_size);
- ctl->max_stripe_size = ctl->max_chunk_size;
+ ctl->max_stripe_size = min_t(u64, ctl->max_chunk_size, SZ_1G);
if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM)
ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 5dd4ad775e5d..9cc374864a79 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -90,13 +90,11 @@ struct btrfs_device {
u64 generation;
+ struct bdev_handle *bdev_handle;
struct block_device *bdev;
struct btrfs_zoned_device_info *zone_info;
- /* block device holder for blkdev_get/put */
- void *holder;
-
/*
* Device's major-minor number. Must be set even if the device is not
* opened (bdev == NULL), unless the device is missing.
@@ -648,7 +646,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args);
int btrfs_rm_device(struct btrfs_fs_info *fs_info,
struct btrfs_dev_lookup_args *args,
- struct block_device **bdev, void **holder);
+ struct bdev_handle **bdev_handle);
void __exit btrfs_cleanup_fs_uuids(void);
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
int btrfs_grow_device(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 76ff93b3eb27..3cf236fb40a4 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -442,7 +442,7 @@ static const struct xattr_handler btrfs_btrfs_xattr_handler = {
.set = btrfs_xattr_handler_set_prop,
};
-const struct xattr_handler *btrfs_xattr_handlers[] = {
+const struct xattr_handler * const btrfs_xattr_handlers[] = {
&btrfs_security_xattr_handler,
&btrfs_trusted_xattr_handler,
&btrfs_user_xattr_handler,
diff --git a/fs/btrfs/xattr.h b/fs/btrfs/xattr.h
index 1cd3fc0a8f17..118118ca3e1d 100644
--- a/fs/btrfs/xattr.h
+++ b/fs/btrfs/xattr.h
@@ -8,7 +8,7 @@
#include <linux/xattr.h>
-extern const struct xattr_handler *btrfs_xattr_handlers[];
+extern const struct xattr_handler * const btrfs_xattr_handlers[];
int btrfs_getxattr(struct inode *inode, const char *name,
void *buffer, size_t size);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index f4863078f7fe..936b9e0b351d 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -750,7 +750,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
dout("writepage %llu~%llu (%llu bytes, %sencrypted)\n",
page_off, len, wlen, IS_ENCRYPTED(inode) ? "" : "not ");
- req->r_mtime = inode->i_mtime;
+ req->r_mtime = inode_get_mtime(inode);
ceph_osdc_start_request(osdc, req);
err = ceph_osdc_wait_request(osdc, req);
@@ -1327,7 +1327,7 @@ new_request:
pages = NULL;
}
- req->r_mtime = inode->i_mtime;
+ req->r_mtime = inode_get_mtime(inode);
ceph_osdc_start_request(&fsc->client->osdc, req);
req = NULL;
@@ -1875,7 +1875,7 @@ int ceph_uninline_data(struct file *file)
goto out_unlock;
}
- req->r_mtime = inode->i_mtime;
+ req->r_mtime = inode_get_mtime(inode);
ceph_osdc_start_request(&fsc->client->osdc, req);
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
ceph_osdc_put_request(req);
@@ -1917,7 +1917,7 @@ int ceph_uninline_data(struct file *file)
goto out_put_req;
}
- req->r_mtime = inode->i_mtime;
+ req->r_mtime = inode_get_mtime(inode);
ceph_osdc_start_request(&fsc->client->osdc, req);
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
@@ -2092,7 +2092,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
0, false, true);
ceph_osdc_start_request(&fsc->client->osdc, rd_req);
- wr_req->r_mtime = ci->netfs.inode.i_mtime;
+ wr_req->r_mtime = inode_get_mtime(&ci->netfs.inode);
ceph_osdc_start_request(&fsc->client->osdc, wr_req);
err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 14215ec646f7..a104669fcf4c 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1421,8 +1421,8 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
arg->old_xattr_buf = NULL;
}
- arg->mtime = inode->i_mtime;
- arg->atime = inode->i_atime;
+ arg->mtime = inode_get_mtime(inode);
+ arg->atime = inode_get_atime(inode);
arg->ctime = inode_get_ctime(inode);
arg->btime = ci->i_btime;
arg->change_attr = inode_peek_iversion_raw(inode);
diff --git a/fs/ceph/crypto.c b/fs/ceph/crypto.c
index e1f31b86fd48..e3b1c3fab412 100644
--- a/fs/ceph/crypto.c
+++ b/fs/ceph/crypto.c
@@ -133,6 +133,7 @@ static const union fscrypt_policy *ceph_get_dummy_policy(struct super_block *sb)
}
static struct fscrypt_operations ceph_fscrypt_ops = {
+ .needs_bounce_pages = 1,
.get_context = ceph_crypt_get_context,
.set_context = ceph_crypt_set_context,
.get_dummy_policy = ceph_get_dummy_policy,
@@ -460,7 +461,7 @@ int ceph_fname_to_usr(const struct ceph_fname *fname, struct fscrypt_str *tname,
out:
fscrypt_fname_free_buffer(&_tname);
out_inode:
- if ((dir != fname->dir) && !IS_ERR(dir)) {
+ if (dir != fname->dir) {
if ((dir->i_state & I_NEW))
discard_new_inode(dir);
else
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index b1da02f5dbe3..649600d0a7b6 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -2489,7 +2489,7 @@ static int ceph_zero_partial_object(struct inode *inode,
goto out;
}
- req->r_mtime = inode->i_mtime;
+ req->r_mtime = inode_get_mtime(inode);
ceph_osdc_start_request(&fsc->client->osdc, req);
ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
if (ret == -ENOENT)
@@ -2969,7 +2969,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
ret = do_splice_direct(src_file, &src_off, dst_file,
&dst_off, src_objlen, flags);
/* Abort on short copies or on error */
- if (ret < src_objlen) {
+ if (ret < (long)src_objlen) {
dout("Failed partial copy (%zd)\n", ret);
goto out;
}
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 800ab7920513..2e2a303b9e64 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -185,9 +185,9 @@ struct inode *ceph_get_snapdir(struct inode *parent)
inode->i_mode = parent->i_mode;
inode->i_uid = parent->i_uid;
inode->i_gid = parent->i_gid;
- inode->i_mtime = parent->i_mtime;
+ inode_set_mtime_to_ts(inode, inode_get_mtime(parent));
inode_set_ctime_to_ts(inode, inode_get_ctime(parent));
- inode->i_atime = parent->i_atime;
+ inode_set_atime_to_ts(inode, inode_get_atime(parent));
ci->i_rbytes = 0;
ci->i_btime = ceph_inode(parent)->i_btime;
@@ -769,9 +769,7 @@ int ceph_fill_file_size(struct inode *inode, int issued,
ci->i_truncate_seq = truncate_seq;
/* the MDS should have revoked these caps */
- WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
- CEPH_CAP_FILE_RD |
- CEPH_CAP_FILE_WR |
+ WARN_ON_ONCE(issued & (CEPH_CAP_FILE_RD |
CEPH_CAP_FILE_LAZYIO));
/*
* If we hold relevant caps, or in the case where we're
@@ -837,28 +835,31 @@ void ceph_fill_file_time(struct inode *inode, int issued,
/* the MDS did a utimes() */
dout("mtime %lld.%09ld -> %lld.%09ld "
"tw %d -> %d\n",
- inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
+ inode_get_mtime_sec(inode),
+ inode_get_mtime_nsec(inode),
mtime->tv_sec, mtime->tv_nsec,
ci->i_time_warp_seq, (int)time_warp_seq);
- inode->i_mtime = *mtime;
- inode->i_atime = *atime;
+ inode_set_mtime_to_ts(inode, *mtime);
+ inode_set_atime_to_ts(inode, *atime);
ci->i_time_warp_seq = time_warp_seq;
} else if (time_warp_seq == ci->i_time_warp_seq) {
+ struct timespec64 ts;
+
/* nobody did utimes(); take the max */
- if (timespec64_compare(mtime, &inode->i_mtime) > 0) {
+ ts = inode_get_mtime(inode);
+ if (timespec64_compare(mtime, &ts) > 0) {
dout("mtime %lld.%09ld -> %lld.%09ld inc\n",
- inode->i_mtime.tv_sec,
- inode->i_mtime.tv_nsec,
+ ts.tv_sec, ts.tv_nsec,
mtime->tv_sec, mtime->tv_nsec);
- inode->i_mtime = *mtime;
+ inode_set_mtime_to_ts(inode, *mtime);
}
- if (timespec64_compare(atime, &inode->i_atime) > 0) {
+ ts = inode_get_atime(inode);
+ if (timespec64_compare(atime, &ts) > 0) {
dout("atime %lld.%09ld -> %lld.%09ld inc\n",
- inode->i_atime.tv_sec,
- inode->i_atime.tv_nsec,
+ ts.tv_sec, ts.tv_nsec,
atime->tv_sec, atime->tv_nsec);
- inode->i_atime = *atime;
+ inode_set_atime_to_ts(inode, *atime);
}
} else if (issued & CEPH_CAP_FILE_EXCL) {
/* we did a utimes(); ignore mds values */
@@ -869,8 +870,8 @@ void ceph_fill_file_time(struct inode *inode, int issued,
/* we have no write|excl caps; whatever the MDS says is true */
if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
inode_set_ctime_to_ts(inode, *ctime);
- inode->i_mtime = *mtime;
- inode->i_atime = *atime;
+ inode_set_mtime_to_ts(inode, *mtime);
+ inode_set_atime_to_ts(inode, *atime);
ci->i_time_warp_seq = time_warp_seq;
} else {
warn = 1;
@@ -2553,20 +2554,22 @@ retry:
}
if (ia_valid & ATTR_ATIME) {
+ struct timespec64 atime = inode_get_atime(inode);
+
dout("setattr %p atime %lld.%ld -> %lld.%ld\n", inode,
- inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
+ atime.tv_sec, atime.tv_nsec,
attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
if (issued & CEPH_CAP_FILE_EXCL) {
ci->i_time_warp_seq++;
- inode->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(inode, attr->ia_atime);
dirtied |= CEPH_CAP_FILE_EXCL;
} else if ((issued & CEPH_CAP_FILE_WR) &&
- timespec64_compare(&inode->i_atime,
- &attr->ia_atime) < 0) {
- inode->i_atime = attr->ia_atime;
+ timespec64_compare(&atime,
+ &attr->ia_atime) < 0) {
+ inode_set_atime_to_ts(inode, attr->ia_atime);
dirtied |= CEPH_CAP_FILE_WR;
} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
- !timespec64_equal(&inode->i_atime, &attr->ia_atime)) {
+ !timespec64_equal(&atime, &attr->ia_atime)) {
ceph_encode_timespec64(&req->r_args.setattr.atime,
&attr->ia_atime);
mask |= CEPH_SETATTR_ATIME;
@@ -2626,20 +2629,21 @@ retry:
}
}
if (ia_valid & ATTR_MTIME) {
+ struct timespec64 mtime = inode_get_mtime(inode);
+
dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode,
- inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
+ mtime.tv_sec, mtime.tv_nsec,
attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
if (issued & CEPH_CAP_FILE_EXCL) {
ci->i_time_warp_seq++;
- inode->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
dirtied |= CEPH_CAP_FILE_EXCL;
} else if ((issued & CEPH_CAP_FILE_WR) &&
- timespec64_compare(&inode->i_mtime,
- &attr->ia_mtime) < 0) {
- inode->i_mtime = attr->ia_mtime;
+ timespec64_compare(&mtime, &attr->ia_mtime) < 0) {
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
dirtied |= CEPH_CAP_FILE_WR;
} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
- !timespec64_equal(&inode->i_mtime, &attr->ia_mtime)) {
+ !timespec64_equal(&mtime, &attr->ia_mtime)) {
ceph_encode_timespec64(&req->r_args.setattr.mtime,
&attr->ia_mtime);
mask |= CEPH_SETATTR_MTIME;
@@ -2653,8 +2657,8 @@ retry:
bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
dout("setattr %p ctime %lld.%ld -> %lld.%ld (%s)\n", inode,
- inode_get_ctime(inode).tv_sec,
- inode_get_ctime(inode).tv_nsec,
+ inode_get_ctime_sec(inode),
+ inode_get_ctime_nsec(inode),
attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
only ? "ctime only" : "ignored");
if (only) {
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 615db141b6c4..de798444bb97 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -861,8 +861,8 @@ int ceph_wait_on_conflict_unlink(struct dentry *dentry)
if (!d_same_name(udentry, pdentry, &dname))
goto next;
+ found = dget_dlock(udentry);
spin_unlock(&udentry->d_lock);
- found = dget(udentry);
break;
next:
spin_unlock(&udentry->d_lock);
@@ -4353,12 +4353,16 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
rec.v2.flock_len = (__force __le32)
((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
} else {
+ struct timespec64 ts;
+
rec.v1.cap_id = cpu_to_le64(cap->cap_id);
rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
rec.v1.issued = cpu_to_le32(cap->issued);
rec.v1.size = cpu_to_le64(i_size_read(inode));
- ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
- ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
+ ts = inode_get_mtime(inode);
+ ceph_encode_timespec64(&rec.v1.mtime, &ts);
+ ts = inode_get_atime(inode);
+ ceph_encode_timespec64(&rec.v1.atime, &ts);
rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
rec.v1.pathbase = cpu_to_le64(pathbase);
}
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 813f21add992..6732e1ea97d9 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -658,8 +658,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
BUG_ON(capsnap->writing);
capsnap->size = i_size_read(inode);
- capsnap->mtime = inode->i_mtime;
- capsnap->atime = inode->i_atime;
+ capsnap->mtime = inode_get_mtime(inode);
+ capsnap->atime = inode_get_atime(inode);
capsnap->ctime = inode_get_ctime(inode);
capsnap->btime = ci->i_btime;
capsnap->change_attr = inode_peek_iversion_raw(inode);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 51c7f2b14f6f..98844fc8a2f7 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -1119,7 +1119,7 @@ ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
-extern const struct xattr_handler *ceph_xattr_handlers[];
+extern const struct xattr_handler * const ceph_xattr_handlers[];
struct ceph_acl_sec_ctx {
#ifdef CONFIG_CEPH_FS_POSIX_ACL
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 0deae4a0f5f1..097ce7f74073 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -1446,7 +1446,7 @@ void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx)
* List of handlers for synthetic system.* attributes. Other
* attributes are handled directly.
*/
-const struct xattr_handler *ceph_xattr_handlers[] = {
+const struct xattr_handler * const ceph_xattr_handlers[] = {
&ceph_other_xattr_handler,
NULL,
};
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 950b6919fb87..6ba032442b39 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -350,7 +350,7 @@ static struct kobject *cdev_get(struct cdev *p)
struct module *owner = p->owner;
struct kobject *kobj;
- if (owner && !try_module_get(owner))
+ if (!try_module_get(owner))
return NULL;
kobj = kobject_get_unless_zero(&p->kobj);
if (!kobj)
diff --git a/fs/coda/coda_linux.c b/fs/coda/coda_linux.c
index ae023853a98f..1d2dac95f86a 100644
--- a/fs/coda/coda_linux.c
+++ b/fs/coda/coda_linux.c
@@ -123,9 +123,11 @@ void coda_vattr_to_iattr(struct inode *inode, struct coda_vattr *attr)
if (attr->va_size != -1)
inode->i_blocks = (attr->va_size + 511) >> 9;
if (attr->va_atime.tv_sec != -1)
- inode->i_atime = coda_to_timespec64(attr->va_atime);
+ inode_set_atime_to_ts(inode,
+ coda_to_timespec64(attr->va_atime));
if (attr->va_mtime.tv_sec != -1)
- inode->i_mtime = coda_to_timespec64(attr->va_mtime);
+ inode_set_mtime_to_ts(inode,
+ coda_to_timespec64(attr->va_mtime));
if (attr->va_ctime.tv_sec != -1)
inode_set_ctime_to_ts(inode,
coda_to_timespec64(attr->va_ctime));
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index cb512b10473b..4e552ba7bd43 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -111,7 +111,7 @@ static inline void coda_dir_update_mtime(struct inode *dir)
/* optimistically we can also act as if our nose bleeds. The
* granularity of the mtime is coarse anyways so we might actually be
* right most of the time. Note: we only do this for directories. */
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
#endif
}
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 42346618b4ed..16acc58311ea 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -84,7 +84,7 @@ coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos, 0);
coda_inode->i_size = file_inode(host_file)->i_size;
coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9;
- coda_inode->i_mtime = inode_set_ctime_current(coda_inode);
+ inode_set_mtime_to_ts(coda_inode, inode_set_ctime_current(coda_inode));
inode_unlock(coda_inode);
file_end_write(host_file);
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index fbdcb3582926..dcc22f593e43 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -88,7 +88,7 @@ int configfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
static inline void set_default_inode_attr(struct inode * inode, umode_t mode)
{
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
}
static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
@@ -96,8 +96,8 @@ static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
inode->i_mode = iattr->ia_mode;
inode->i_uid = iattr->ia_uid;
inode->i_gid = iattr->ia_gid;
- inode->i_atime = iattr->ia_atime;
- inode->i_mtime = iattr->ia_mtime;
+ inode_set_atime_to_ts(inode, iattr->ia_atime);
+ inode_set_mtime_to_ts(inode, iattr->ia_mtime);
inode_set_ctime_to_ts(inode, iattr->ia_ctime);
}
@@ -171,7 +171,7 @@ struct inode *configfs_create(struct dentry *dentry, umode_t mode)
return ERR_PTR(-ENOMEM);
p_inode = d_inode(dentry->d_parent);
- p_inode->i_mtime = inode_set_ctime_current(p_inode);
+ inode_set_mtime_to_ts(p_inode, inode_set_ctime_current(p_inode));
configfs_set_inode_lock_class(sd, inode);
return inode;
}
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 5ee7d7bbb361..60dbfa0f8805 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -133,8 +133,8 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
}
/* Struct copy intentional */
- inode->i_mtime = inode->i_atime = inode_set_ctime_to_ts(inode,
- zerotime);
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, zerotime)));
/* inode->i_nlink is left 1 - arguably wrong for directories,
but it's the best we can do without reading the directory
contents. 1 yields the right result in GNU find, even
@@ -495,7 +495,7 @@ static void cramfs_kill_sb(struct super_block *sb)
sb->s_mtd = NULL;
} else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV) && sb->s_bdev) {
sync_blockdev(sb->s_bdev);
- blkdev_put(sb->s_bdev, sb);
+ bdev_release(sb->s_bdev_handle);
}
kfree(sbi);
}
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 62e1a3dd8357..0ad8c30b8fa5 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -111,10 +111,14 @@ out:
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
- const unsigned int blockbits = inode->i_blkbits;
- const unsigned int blocksize = 1 << blockbits;
- const unsigned int blocks_per_page_bits = PAGE_SHIFT - blockbits;
- const unsigned int blocks_per_page = 1 << blocks_per_page_bits;
+ const struct fscrypt_inode_info *ci = inode->i_crypt_info;
+ const unsigned int du_bits = ci->ci_data_unit_bits;
+ const unsigned int du_size = 1U << du_bits;
+ const unsigned int du_per_page_bits = PAGE_SHIFT - du_bits;
+ const unsigned int du_per_page = 1U << du_per_page_bits;
+ u64 du_index = (u64)lblk << (inode->i_blkbits - du_bits);
+ u64 du_remaining = (u64)len << (inode->i_blkbits - du_bits);
+ sector_t sector = pblk << (inode->i_blkbits - SECTOR_SHIFT);
struct page *pages[16]; /* write up to 16 pages at a time */
unsigned int nr_pages;
unsigned int i;
@@ -130,8 +134,8 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
len);
BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_VECS);
- nr_pages = min_t(unsigned int, ARRAY_SIZE(pages),
- (len + blocks_per_page - 1) >> blocks_per_page_bits);
+ nr_pages = min_t(u64, ARRAY_SIZE(pages),
+ (du_remaining + du_per_page - 1) >> du_per_page_bits);
/*
* We need at least one page for ciphertext. Allocate the first one
@@ -154,21 +158,22 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
bio = bio_alloc(inode->i_sb->s_bdev, nr_pages, REQ_OP_WRITE, GFP_NOFS);
do {
- bio->bi_iter.bi_sector = pblk << (blockbits - 9);
+ bio->bi_iter.bi_sector = sector;
i = 0;
offset = 0;
do {
- err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
- ZERO_PAGE(0), pages[i],
- blocksize, offset, GFP_NOFS);
+ err = fscrypt_crypt_data_unit(ci, FS_ENCRYPT, du_index,
+ ZERO_PAGE(0), pages[i],
+ du_size, offset,
+ GFP_NOFS);
if (err)
goto out;
- lblk++;
- pblk++;
- len--;
- offset += blocksize;
- if (offset == PAGE_SIZE || len == 0) {
+ du_index++;
+ sector += 1U << (du_bits - SECTOR_SHIFT);
+ du_remaining--;
+ offset += du_size;
+ if (offset == PAGE_SIZE || du_remaining == 0) {
ret = bio_add_page(bio, pages[i++], offset, 0);
if (WARN_ON_ONCE(ret != offset)) {
err = -EIO;
@@ -176,13 +181,13 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
}
offset = 0;
}
- } while (i != nr_pages && len != 0);
+ } while (i != nr_pages && du_remaining != 0);
err = submit_bio_wait(bio);
if (err)
goto out;
bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
- } while (len != 0);
+ } while (du_remaining != 0);
err = 0;
out:
bio_put(bio);
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 6a837e4b80dc..328470d40dec 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -39,7 +39,7 @@ static mempool_t *fscrypt_bounce_page_pool = NULL;
static struct workqueue_struct *fscrypt_read_workqueue;
static DEFINE_MUTEX(fscrypt_init_mutex);
-struct kmem_cache *fscrypt_info_cachep;
+struct kmem_cache *fscrypt_inode_info_cachep;
void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
@@ -49,6 +49,13 @@ EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
{
+ if (WARN_ON_ONCE(!fscrypt_bounce_page_pool)) {
+ /*
+ * Oops, the filesystem called a function that uses the bounce
+ * page pool, but it didn't set needs_bounce_pages.
+ */
+ return NULL;
+ }
return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
}
@@ -70,44 +77,44 @@ void fscrypt_free_bounce_page(struct page *bounce_page)
EXPORT_SYMBOL(fscrypt_free_bounce_page);
/*
- * Generate the IV for the given logical block number within the given file.
- * For filenames encryption, lblk_num == 0.
+ * Generate the IV for the given data unit index within the given file.
+ * For filenames encryption, index == 0.
*
* Keep this in sync with fscrypt_limit_io_blocks(). fscrypt_limit_io_blocks()
* needs to know about any IV generation methods where the low bits of IV don't
- * simply contain the lblk_num (e.g., IV_INO_LBLK_32).
+ * simply contain the data unit index (e.g., IV_INO_LBLK_32).
*/
-void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
- const struct fscrypt_info *ci)
+void fscrypt_generate_iv(union fscrypt_iv *iv, u64 index,
+ const struct fscrypt_inode_info *ci)
{
u8 flags = fscrypt_policy_flags(&ci->ci_policy);
memset(iv, 0, ci->ci_mode->ivsize);
if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
- WARN_ON_ONCE(lblk_num > U32_MAX);
+ WARN_ON_ONCE(index > U32_MAX);
WARN_ON_ONCE(ci->ci_inode->i_ino > U32_MAX);
- lblk_num |= (u64)ci->ci_inode->i_ino << 32;
+ index |= (u64)ci->ci_inode->i_ino << 32;
} else if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) {
- WARN_ON_ONCE(lblk_num > U32_MAX);
- lblk_num = (u32)(ci->ci_hashed_ino + lblk_num);
+ WARN_ON_ONCE(index > U32_MAX);
+ index = (u32)(ci->ci_hashed_ino + index);
} else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
memcpy(iv->nonce, ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE);
}
- iv->lblk_num = cpu_to_le64(lblk_num);
+ iv->index = cpu_to_le64(index);
}
-/* Encrypt or decrypt a single filesystem block of file contents */
-int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
- u64 lblk_num, struct page *src_page,
- struct page *dest_page, unsigned int len,
- unsigned int offs, gfp_t gfp_flags)
+/* Encrypt or decrypt a single "data unit" of file contents. */
+int fscrypt_crypt_data_unit(const struct fscrypt_inode_info *ci,
+ fscrypt_direction_t rw, u64 index,
+ struct page *src_page, struct page *dest_page,
+ unsigned int len, unsigned int offs,
+ gfp_t gfp_flags)
{
union fscrypt_iv iv;
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
struct scatterlist dst, src;
- struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
int res = 0;
@@ -116,7 +123,7 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
if (WARN_ON_ONCE(len % FSCRYPT_CONTENTS_ALIGNMENT != 0))
return -EINVAL;
- fscrypt_generate_iv(&iv, lblk_num, ci);
+ fscrypt_generate_iv(&iv, index, ci);
req = skcipher_request_alloc(tfm, gfp_flags);
if (!req)
@@ -137,28 +144,29 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
skcipher_request_free(req);
if (res) {
- fscrypt_err(inode, "%scryption failed for block %llu: %d",
- (rw == FS_DECRYPT ? "De" : "En"), lblk_num, res);
+ fscrypt_err(ci->ci_inode,
+ "%scryption failed for data unit %llu: %d",
+ (rw == FS_DECRYPT ? "De" : "En"), index, res);
return res;
}
return 0;
}
/**
- * fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a
- * pagecache page
- * @page: The locked pagecache page containing the block(s) to encrypt
- * @len: Total size of the block(s) to encrypt. Must be a nonzero
- * multiple of the filesystem's block size.
- * @offs: Byte offset within @page of the first block to encrypt. Must be
- * a multiple of the filesystem's block size.
- * @gfp_flags: Memory allocation flags. See details below.
+ * fscrypt_encrypt_pagecache_blocks() - Encrypt data from a pagecache page
+ * @page: the locked pagecache page containing the data to encrypt
+ * @len: size of the data to encrypt, in bytes
+ * @offs: offset within @page of the data to encrypt, in bytes
+ * @gfp_flags: memory allocation flags; see details below
+ *
+ * This allocates a new bounce page and encrypts the given data into it. The
+ * length and offset of the data must be aligned to the file's crypto data unit
+ * size. Alignment to the filesystem block size fulfills this requirement, as
+ * the filesystem block size is always a multiple of the data unit size.
*
- * A new bounce page is allocated, and the specified block(s) are encrypted into
- * it. In the bounce page, the ciphertext block(s) will be located at the same
- * offsets at which the plaintext block(s) were located in the source page; any
- * other parts of the bounce page will be left uninitialized. However, normally
- * blocksize == PAGE_SIZE and the whole page is encrypted at once.
+ * In the bounce page, the ciphertext data will be located at the same offset at
+ * which the plaintext data was located in the source page. Any other parts of
+ * the bounce page will be left uninitialized.
*
* This is for use by the filesystem's ->writepages() method.
*
@@ -176,28 +184,29 @@ struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
{
const struct inode *inode = page->mapping->host;
- const unsigned int blockbits = inode->i_blkbits;
- const unsigned int blocksize = 1 << blockbits;
+ const struct fscrypt_inode_info *ci = inode->i_crypt_info;
+ const unsigned int du_bits = ci->ci_data_unit_bits;
+ const unsigned int du_size = 1U << du_bits;
struct page *ciphertext_page;
- u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
- (offs >> blockbits);
+ u64 index = ((u64)page->index << (PAGE_SHIFT - du_bits)) +
+ (offs >> du_bits);
unsigned int i;
int err;
if (WARN_ON_ONCE(!PageLocked(page)))
return ERR_PTR(-EINVAL);
- if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
+ if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, du_size)))
return ERR_PTR(-EINVAL);
ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags);
if (!ciphertext_page)
return ERR_PTR(-ENOMEM);
- for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
- err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num,
- page, ciphertext_page,
- blocksize, i, gfp_flags);
+ for (i = offs; i < offs + len; i += du_size, index++) {
+ err = fscrypt_crypt_data_unit(ci, FS_ENCRYPT, index,
+ page, ciphertext_page,
+ du_size, i, gfp_flags);
if (err) {
fscrypt_free_bounce_page(ciphertext_page);
return ERR_PTR(err);
@@ -224,30 +233,33 @@ EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
* arbitrary page, not necessarily in the original pagecache page. The @inode
* and @lblk_num must be specified, as they can't be determined from @page.
*
+ * This is not compatible with fscrypt_operations::supports_subblock_data_units.
+ *
* Return: 0 on success; -errno on failure
*/
int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num, gfp_t gfp_flags)
{
- return fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page,
- len, offs, gfp_flags);
+ if (WARN_ON_ONCE(inode->i_sb->s_cop->supports_subblock_data_units))
+ return -EOPNOTSUPP;
+ return fscrypt_crypt_data_unit(inode->i_crypt_info, FS_ENCRYPT,
+ lblk_num, page, page, len, offs,
+ gfp_flags);
}
EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
/**
- * fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a
- * pagecache folio
- * @folio: The locked pagecache folio containing the block(s) to decrypt
- * @len: Total size of the block(s) to decrypt. Must be a nonzero
- * multiple of the filesystem's block size.
- * @offs: Byte offset within @folio of the first block to decrypt. Must be
- * a multiple of the filesystem's block size.
+ * fscrypt_decrypt_pagecache_blocks() - Decrypt data from a pagecache folio
+ * @folio: the pagecache folio containing the data to decrypt
+ * @len: size of the data to decrypt, in bytes
+ * @offs: offset within @folio of the data to decrypt, in bytes
*
- * The specified block(s) are decrypted in-place within the pagecache folio,
- * which must still be locked and not uptodate.
- *
- * This is for use by the filesystem's ->readahead() method.
+ * Decrypt data that has just been read from an encrypted file. The data must
+ * be located in a pagecache folio that is still locked and not yet uptodate.
+ * The length and offset of the data must be aligned to the file's crypto data
+ * unit size. Alignment to the filesystem block size fulfills this requirement,
+ * as the filesystem block size is always a multiple of the data unit size.
*
* Return: 0 on success; -errno on failure
*/
@@ -255,25 +267,26 @@ int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
size_t offs)
{
const struct inode *inode = folio->mapping->host;
- const unsigned int blockbits = inode->i_blkbits;
- const unsigned int blocksize = 1 << blockbits;
- u64 lblk_num = ((u64)folio->index << (PAGE_SHIFT - blockbits)) +
- (offs >> blockbits);
+ const struct fscrypt_inode_info *ci = inode->i_crypt_info;
+ const unsigned int du_bits = ci->ci_data_unit_bits;
+ const unsigned int du_size = 1U << du_bits;
+ u64 index = ((u64)folio->index << (PAGE_SHIFT - du_bits)) +
+ (offs >> du_bits);
size_t i;
int err;
if (WARN_ON_ONCE(!folio_test_locked(folio)))
return -EINVAL;
- if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
+ if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, du_size)))
return -EINVAL;
- for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
+ for (i = offs; i < offs + len; i += du_size, index++) {
struct page *page = folio_page(folio, i >> PAGE_SHIFT);
- err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
- page, blocksize, i & ~PAGE_MASK,
- GFP_NOFS);
+ err = fscrypt_crypt_data_unit(ci, FS_DECRYPT, index, page,
+ page, du_size, i & ~PAGE_MASK,
+ GFP_NOFS);
if (err)
return err;
}
@@ -295,14 +308,19 @@ EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks);
* arbitrary page, not necessarily in the original pagecache page. The @inode
* and @lblk_num must be specified, as they can't be determined from @page.
*
+ * This is not compatible with fscrypt_operations::supports_subblock_data_units.
+ *
* Return: 0 on success; -errno on failure
*/
int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num)
{
- return fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page,
- len, offs, GFP_NOFS);
+ if (WARN_ON_ONCE(inode->i_sb->s_cop->supports_subblock_data_units))
+ return -EOPNOTSUPP;
+ return fscrypt_crypt_data_unit(inode->i_crypt_info, FS_DECRYPT,
+ lblk_num, page, page, len, offs,
+ GFP_NOFS);
}
EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
@@ -325,7 +343,7 @@ int fscrypt_initialize(struct super_block *sb)
return 0;
/* No need to allocate a bounce page pool if this FS won't use it. */
- if (sb->s_cop->flags & FS_CFLG_OWN_PAGES)
+ if (!sb->s_cop->needs_bounce_pages)
return 0;
mutex_lock(&fscrypt_init_mutex);
@@ -391,18 +409,19 @@ static int __init fscrypt_init(void)
if (!fscrypt_read_workqueue)
goto fail;
- fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
- if (!fscrypt_info_cachep)
+ fscrypt_inode_info_cachep = KMEM_CACHE(fscrypt_inode_info,
+ SLAB_RECLAIM_ACCOUNT);
+ if (!fscrypt_inode_info_cachep)
goto fail_free_queue;
err = fscrypt_init_keyring();
if (err)
- goto fail_free_info;
+ goto fail_free_inode_info;
return 0;
-fail_free_info:
- kmem_cache_destroy(fscrypt_info_cachep);
+fail_free_inode_info:
+ kmem_cache_destroy(fscrypt_inode_info_cachep);
fail_free_queue:
destroy_workqueue(fscrypt_read_workqueue);
fail:
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 6eae3f12ad50..7b3fc189593a 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -100,7 +100,7 @@ int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
{
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
- const struct fscrypt_info *ci = inode->i_crypt_info;
+ const struct fscrypt_inode_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
union fscrypt_iv iv;
struct scatterlist sg;
@@ -157,7 +157,7 @@ static int fname_decrypt(const struct inode *inode,
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
struct scatterlist src_sg, dst_sg;
- const struct fscrypt_info *ci = inode->i_crypt_info;
+ const struct fscrypt_inode_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
union fscrypt_iv iv;
int res;
@@ -568,7 +568,7 @@ EXPORT_SYMBOL_GPL(fscrypt_match_name);
*/
u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name)
{
- const struct fscrypt_info *ci = dir->i_crypt_info;
+ const struct fscrypt_inode_info *ci = dir->i_crypt_info;
WARN_ON_ONCE(!ci->ci_dirhash_key_initialized);
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 2d63da48635a..1892356cf924 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -47,7 +47,8 @@ struct fscrypt_context_v2 {
u8 contents_encryption_mode;
u8 filenames_encryption_mode;
u8 flags;
- u8 __reserved[4];
+ u8 log2_data_unit_size;
+ u8 __reserved[3];
u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE];
u8 nonce[FSCRYPT_FILE_NONCE_SIZE];
};
@@ -165,6 +166,26 @@ fscrypt_policy_flags(const union fscrypt_policy *policy)
BUG();
}
+static inline int
+fscrypt_policy_v2_du_bits(const struct fscrypt_policy_v2 *policy,
+ const struct inode *inode)
+{
+ return policy->log2_data_unit_size ?: inode->i_blkbits;
+}
+
+static inline int
+fscrypt_policy_du_bits(const union fscrypt_policy *policy,
+ const struct inode *inode)
+{
+ switch (policy->version) {
+ case FSCRYPT_POLICY_V1:
+ return inode->i_blkbits;
+ case FSCRYPT_POLICY_V2:
+ return fscrypt_policy_v2_du_bits(&policy->v2, inode);
+ }
+ BUG();
+}
+
/*
* For encrypted symlinks, the ciphertext length is stored at the beginning
* of the string in little-endian format.
@@ -189,18 +210,18 @@ struct fscrypt_prepared_key {
};
/*
- * fscrypt_info - the "encryption key" for an inode
+ * fscrypt_inode_info - the "encryption key" for an inode
*
* When an encrypted file's key is made available, an instance of this struct is
* allocated and stored in ->i_crypt_info. Once created, it remains until the
* inode is evicted.
*/
-struct fscrypt_info {
+struct fscrypt_inode_info {
/* The key in a form prepared for actual encryption/decryption */
struct fscrypt_prepared_key ci_enc_key;
- /* True if ci_enc_key should be freed when this fscrypt_info is freed */
+ /* True if ci_enc_key should be freed when this struct is freed */
bool ci_owns_key;
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
@@ -212,6 +233,16 @@ struct fscrypt_info {
#endif
/*
+ * log2 of the data unit size (granularity of contents encryption) of
+ * this file. This is computable from ci_policy and ci_inode but is
+ * cached here for efficiency. Only used for regular files.
+ */
+ u8 ci_data_unit_bits;
+
+ /* Cached value: log2 of number of data units per FS block */
+ u8 ci_data_units_per_block_bits;
+
+ /*
* Encryption mode used for this inode. It corresponds to either the
* contents or filenames encryption mode, depending on the inode type.
*/
@@ -263,12 +294,13 @@ typedef enum {
} fscrypt_direction_t;
/* crypto.c */
-extern struct kmem_cache *fscrypt_info_cachep;
+extern struct kmem_cache *fscrypt_inode_info_cachep;
int fscrypt_initialize(struct super_block *sb);
-int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
- u64 lblk_num, struct page *src_page,
- struct page *dest_page, unsigned int len,
- unsigned int offs, gfp_t gfp_flags);
+int fscrypt_crypt_data_unit(const struct fscrypt_inode_info *ci,
+ fscrypt_direction_t rw, u64 index,
+ struct page *src_page, struct page *dest_page,
+ unsigned int len, unsigned int offs,
+ gfp_t gfp_flags);
struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags);
void __printf(3, 4) __cold
@@ -283,8 +315,8 @@ fscrypt_msg(const struct inode *inode, const char *level, const char *fmt, ...);
union fscrypt_iv {
struct {
- /* logical block number within the file */
- __le64 lblk_num;
+ /* zero-based index of data unit within the file */
+ __le64 index;
/* per-file nonce; only set in DIRECT_KEY mode */
u8 nonce[FSCRYPT_FILE_NONCE_SIZE];
@@ -293,8 +325,18 @@ union fscrypt_iv {
__le64 dun[FSCRYPT_MAX_IV_SIZE / sizeof(__le64)];
};
-void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
- const struct fscrypt_info *ci);
+void fscrypt_generate_iv(union fscrypt_iv *iv, u64 index,
+ const struct fscrypt_inode_info *ci);
+
+/*
+ * Return the number of bits used by the maximum file data unit index that is
+ * possible on the given filesystem, using the given log2 data unit size.
+ */
+static inline int
+fscrypt_max_file_dun_bits(const struct super_block *sb, int du_bits)
+{
+ return fls64(sb->s_maxbytes - 1) - du_bits;
+}
/* fname.c */
bool __fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
@@ -332,17 +374,17 @@ void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf);
/* inline_crypt.c */
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
-int fscrypt_select_encryption_impl(struct fscrypt_info *ci);
+int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci);
static inline bool
-fscrypt_using_inline_encryption(const struct fscrypt_info *ci)
+fscrypt_using_inline_encryption(const struct fscrypt_inode_info *ci)
{
return ci->ci_inlinecrypt;
}
int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
const u8 *raw_key,
- const struct fscrypt_info *ci);
+ const struct fscrypt_inode_info *ci);
void fscrypt_destroy_inline_crypt_key(struct super_block *sb,
struct fscrypt_prepared_key *prep_key);
@@ -353,7 +395,7 @@ void fscrypt_destroy_inline_crypt_key(struct super_block *sb,
*/
static inline bool
fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
- const struct fscrypt_info *ci)
+ const struct fscrypt_inode_info *ci)
{
/*
* The two smp_load_acquire()'s here pair with the smp_store_release()'s
@@ -370,13 +412,13 @@ fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
-static inline int fscrypt_select_encryption_impl(struct fscrypt_info *ci)
+static inline int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci)
{
return 0;
}
static inline bool
-fscrypt_using_inline_encryption(const struct fscrypt_info *ci)
+fscrypt_using_inline_encryption(const struct fscrypt_inode_info *ci)
{
return false;
}
@@ -384,7 +426,7 @@ fscrypt_using_inline_encryption(const struct fscrypt_info *ci)
static inline int
fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
const u8 *raw_key,
- const struct fscrypt_info *ci)
+ const struct fscrypt_inode_info *ci)
{
WARN_ON_ONCE(1);
return -EOPNOTSUPP;
@@ -398,7 +440,7 @@ fscrypt_destroy_inline_crypt_key(struct super_block *sb,
static inline bool
fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
- const struct fscrypt_info *ci)
+ const struct fscrypt_inode_info *ci)
{
return smp_load_acquire(&prep_key->tfm) != NULL;
}
@@ -433,8 +475,28 @@ struct fscrypt_master_key_secret {
* fscrypt_master_key - an in-use master key
*
* This represents a master encryption key which has been added to the
- * filesystem and can be used to "unlock" the encrypted files which were
- * encrypted with it.
+ * filesystem. There are three high-level states that a key can be in:
+ *
+ * FSCRYPT_KEY_STATUS_PRESENT
+ * Key is fully usable; it can be used to unlock inodes that are encrypted
+ * with it (this includes being able to create new inodes). ->mk_present
+ * indicates whether the key is in this state. ->mk_secret exists, the key
+ * is in the keyring, and ->mk_active_refs > 0 due to ->mk_present.
+ *
+ * FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED
+ * Removal of this key has been initiated, but some inodes that were
+ * unlocked with it are still in-use. Like ABSENT, ->mk_secret is wiped,
+ * and the key can no longer be used to unlock inodes. Unlike ABSENT, the
+ * key is still in the keyring; ->mk_decrypted_inodes is nonempty; and
+ * ->mk_active_refs > 0, being equal to the size of ->mk_decrypted_inodes.
+ *
+ * This state transitions to ABSENT if ->mk_decrypted_inodes becomes empty,
+ * or to PRESENT if FS_IOC_ADD_ENCRYPTION_KEY is called again for this key.
+ *
+ * FSCRYPT_KEY_STATUS_ABSENT
+ * Key is fully removed. The key is no longer in the keyring,
+ * ->mk_decrypted_inodes is empty, ->mk_active_refs == 0, ->mk_secret is
+ * wiped, and the key can no longer be used to unlock inodes.
*/
struct fscrypt_master_key {
@@ -444,7 +506,7 @@ struct fscrypt_master_key {
*/
struct hlist_node mk_node;
- /* Semaphore that protects ->mk_secret and ->mk_users */
+ /* Semaphore that protects ->mk_secret, ->mk_users, and ->mk_present */
struct rw_semaphore mk_sem;
/*
@@ -454,8 +516,8 @@ struct fscrypt_master_key {
* ->mk_direct_keys) that have been prepared continue to exist.
* A structural ref only guarantees that the struct continues to exist.
*
- * There is one active ref associated with ->mk_secret being present,
- * and one active ref for each inode in ->mk_decrypted_inodes.
+ * There is one active ref associated with ->mk_present being true, and
+ * one active ref for each inode in ->mk_decrypted_inodes.
*
* There is one structural ref associated with the active refcount being
* nonzero. Finding a key in the keyring also takes a structural ref,
@@ -467,17 +529,10 @@ struct fscrypt_master_key {
struct rcu_head mk_rcu_head;
/*
- * The secret key material. After FS_IOC_REMOVE_ENCRYPTION_KEY is
- * executed, this is wiped and no new inodes can be unlocked with this
- * key; however, there may still be inodes in ->mk_decrypted_inodes
- * which could not be evicted. As long as some inodes still remain,
- * FS_IOC_REMOVE_ENCRYPTION_KEY can be retried, or
- * FS_IOC_ADD_ENCRYPTION_KEY can add the secret again.
+ * The secret key material. Wiped as soon as it is no longer needed;
+ * for details, see the fscrypt_master_key struct comment.
*
- * While ->mk_secret is present, one ref in ->mk_active_refs is held.
- *
- * Locking: protected by ->mk_sem. The manipulation of ->mk_active_refs
- * associated with this field is protected by ->mk_sem as well.
+ * Locking: protected by ->mk_sem.
*/
struct fscrypt_master_key_secret mk_secret;
@@ -500,7 +555,7 @@ struct fscrypt_master_key {
*
* Locking: protected by ->mk_sem. (We don't just rely on the keyrings
* subsystem semaphore ->mk_users->sem, as we need support for atomic
- * search+insert along with proper synchronization with ->mk_secret.)
+ * search+insert along with proper synchronization with other fields.)
*/
struct key *mk_users;
@@ -523,20 +578,17 @@ struct fscrypt_master_key {
siphash_key_t mk_ino_hash_key;
bool mk_ino_hash_key_initialized;
-} __randomize_layout;
-
-static inline bool
-is_master_key_secret_present(const struct fscrypt_master_key_secret *secret)
-{
/*
- * The READ_ONCE() is only necessary for fscrypt_drop_inode().
- * fscrypt_drop_inode() runs in atomic context, so it can't take the key
- * semaphore and thus 'secret' can change concurrently which would be a
- * data race. But fscrypt_drop_inode() only need to know whether the
- * secret *was* present at the time of check, so READ_ONCE() suffices.
+ * Whether this key is in the "present" state, i.e. fully usable. For
+ * details, see the fscrypt_master_key struct comment.
+ *
+ * Locking: protected by ->mk_sem, but can be read locklessly using
+ * READ_ONCE(). Writers must use WRITE_ONCE() when concurrent readers
+ * are possible.
*/
- return READ_ONCE(secret->size) != 0;
-}
+ bool mk_present;
+
+} __randomize_layout;
static inline const char *master_key_spec_type(
const struct fscrypt_key_specifier *spec)
@@ -598,17 +650,18 @@ struct fscrypt_mode {
extern struct fscrypt_mode fscrypt_modes[];
int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
- const u8 *raw_key, const struct fscrypt_info *ci);
+ const u8 *raw_key, const struct fscrypt_inode_info *ci);
void fscrypt_destroy_prepared_key(struct super_block *sb,
struct fscrypt_prepared_key *prep_key);
-int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key);
+int fscrypt_set_per_file_enc_key(struct fscrypt_inode_info *ci,
+ const u8 *raw_key);
-int fscrypt_derive_dirhash_key(struct fscrypt_info *ci,
+int fscrypt_derive_dirhash_key(struct fscrypt_inode_info *ci,
const struct fscrypt_master_key *mk);
-void fscrypt_hash_inode_number(struct fscrypt_info *ci,
+void fscrypt_hash_inode_number(struct fscrypt_inode_info *ci,
const struct fscrypt_master_key *mk);
int fscrypt_get_encryption_info(struct inode *inode, bool allow_unsupported);
@@ -643,10 +696,11 @@ static inline int fscrypt_require_key(struct inode *inode)
void fscrypt_put_direct_key(struct fscrypt_direct_key *dk);
-int fscrypt_setup_v1_file_key(struct fscrypt_info *ci,
+int fscrypt_setup_v1_file_key(struct fscrypt_inode_info *ci,
const u8 *raw_master_key);
-int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci);
+int fscrypt_setup_v1_file_key_via_subscribed_keyrings(
+ struct fscrypt_inode_info *ci);
/* policy.c */
diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
index 6238dbcadcad..52504dd478d3 100644
--- a/fs/crypto/hooks.c
+++ b/fs/crypto/hooks.c
@@ -169,7 +169,7 @@ EXPORT_SYMBOL_GPL(__fscrypt_prepare_setattr);
int fscrypt_prepare_setflags(struct inode *inode,
unsigned int oldflags, unsigned int flags)
{
- struct fscrypt_info *ci;
+ struct fscrypt_inode_info *ci;
struct fscrypt_master_key *mk;
int err;
@@ -187,7 +187,7 @@ int fscrypt_prepare_setflags(struct inode *inode,
return -EINVAL;
mk = ci->ci_master_key;
down_read(&mk->mk_sem);
- if (is_master_key_secret_present(&mk->mk_secret))
+ if (mk->mk_present)
err = fscrypt_derive_dirhash_key(ci, mk);
else
err = -ENOKEY;
diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c
index 8bfb3ce86476..b4002aea7cdb 100644
--- a/fs/crypto/inline_crypt.c
+++ b/fs/crypto/inline_crypt.c
@@ -39,11 +39,11 @@ static struct block_device **fscrypt_get_devices(struct super_block *sb,
return devs;
}
-static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci)
+static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_inode_info *ci)
{
- struct super_block *sb = ci->ci_inode->i_sb;
+ const struct super_block *sb = ci->ci_inode->i_sb;
unsigned int flags = fscrypt_policy_flags(&ci->ci_policy);
- int ino_bits = 64, lblk_bits = 64;
+ int dun_bits;
if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY)
return offsetofend(union fscrypt_iv, nonce);
@@ -54,10 +54,9 @@ static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci)
if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)
return sizeof(__le32);
- /* Default case: IVs are just the file logical block number */
- if (sb->s_cop->get_ino_and_lblk_bits)
- sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits);
- return DIV_ROUND_UP(lblk_bits, 8);
+ /* Default case: IVs are just the file data unit index */
+ dun_bits = fscrypt_max_file_dun_bits(sb, ci->ci_data_unit_bits);
+ return DIV_ROUND_UP(dun_bits, 8);
}
/*
@@ -90,7 +89,7 @@ static void fscrypt_log_blk_crypto_impl(struct fscrypt_mode *mode,
}
/* Enable inline encryption for this file if supported. */
-int fscrypt_select_encryption_impl(struct fscrypt_info *ci)
+int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci)
{
const struct inode *inode = ci->ci_inode;
struct super_block *sb = inode->i_sb;
@@ -129,7 +128,7 @@ int fscrypt_select_encryption_impl(struct fscrypt_info *ci)
* crypto configuration that the file would use.
*/
crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode;
- crypto_cfg.data_unit_size = sb->s_blocksize;
+ crypto_cfg.data_unit_size = 1U << ci->ci_data_unit_bits;
crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci);
devs = fscrypt_get_devices(sb, &num_devs);
@@ -152,7 +151,7 @@ out_free_devs:
int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
const u8 *raw_key,
- const struct fscrypt_info *ci)
+ const struct fscrypt_inode_info *ci)
{
const struct inode *inode = ci->ci_inode;
struct super_block *sb = inode->i_sb;
@@ -168,7 +167,8 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
return -ENOMEM;
err = blk_crypto_init_key(blk_key, raw_key, crypto_mode,
- fscrypt_get_dun_bytes(ci), sb->s_blocksize);
+ fscrypt_get_dun_bytes(ci),
+ 1U << ci->ci_data_unit_bits);
if (err) {
fscrypt_err(inode, "error %d initializing blk-crypto key", err);
goto fail;
@@ -232,13 +232,15 @@ bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
}
EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto);
-static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num,
+static void fscrypt_generate_dun(const struct fscrypt_inode_info *ci,
+ u64 lblk_num,
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
{
+ u64 index = lblk_num << ci->ci_data_units_per_block_bits;
union fscrypt_iv iv;
int i;
- fscrypt_generate_iv(&iv, lblk_num, ci);
+ fscrypt_generate_iv(&iv, index, ci);
BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE);
memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE);
@@ -265,7 +267,7 @@ static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num,
void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
u64 first_lblk, gfp_t gfp_mask)
{
- const struct fscrypt_info *ci;
+ const struct fscrypt_inode_info *ci;
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
if (!fscrypt_inode_uses_inline_crypto(inode))
@@ -456,7 +458,7 @@ EXPORT_SYMBOL_GPL(fscrypt_dio_supported);
*/
u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks)
{
- const struct fscrypt_info *ci;
+ const struct fscrypt_inode_info *ci;
u32 dun;
if (!fscrypt_inode_uses_inline_crypto(inode))
diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c
index 7cbb1fd872ac..f34a9b0b9e92 100644
--- a/fs/crypto/keyring.c
+++ b/fs/crypto/keyring.c
@@ -99,10 +99,10 @@ void fscrypt_put_master_key_activeref(struct super_block *sb,
spin_unlock(&sb->s_master_keys->lock);
/*
- * ->mk_active_refs == 0 implies that ->mk_secret is not present and
- * that ->mk_decrypted_inodes is empty.
+ * ->mk_active_refs == 0 implies that ->mk_present is false and
+ * ->mk_decrypted_inodes is empty.
*/
- WARN_ON_ONCE(is_master_key_secret_present(&mk->mk_secret));
+ WARN_ON_ONCE(mk->mk_present);
WARN_ON_ONCE(!list_empty(&mk->mk_decrypted_inodes));
for (i = 0; i <= FSCRYPT_MODE_MAX; i++) {
@@ -121,6 +121,18 @@ void fscrypt_put_master_key_activeref(struct super_block *sb,
fscrypt_put_master_key(mk);
}
+/*
+ * This transitions the key state from present to incompletely removed, and then
+ * potentially to absent (depending on whether inodes remain).
+ */
+static void fscrypt_initiate_key_removal(struct super_block *sb,
+ struct fscrypt_master_key *mk)
+{
+ WRITE_ONCE(mk->mk_present, false);
+ wipe_master_key_secret(&mk->mk_secret);
+ fscrypt_put_master_key_activeref(sb, mk);
+}
+
static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec)
{
if (spec->__reserved)
@@ -234,14 +246,13 @@ void fscrypt_destroy_keyring(struct super_block *sb)
* evicted, every key remaining in the keyring should
* have an empty inode list, and should only still be in
* the keyring due to the single active ref associated
- * with ->mk_secret. There should be no structural refs
- * beyond the one associated with the active ref.
+ * with ->mk_present. There should be no structural
+ * refs beyond the one associated with the active ref.
*/
WARN_ON_ONCE(refcount_read(&mk->mk_active_refs) != 1);
WARN_ON_ONCE(refcount_read(&mk->mk_struct_refs) != 1);
- WARN_ON_ONCE(!is_master_key_secret_present(&mk->mk_secret));
- wipe_master_key_secret(&mk->mk_secret);
- fscrypt_put_master_key_activeref(sb, mk);
+ WARN_ON_ONCE(!mk->mk_present);
+ fscrypt_initiate_key_removal(sb, mk);
}
}
kfree_sensitive(keyring);
@@ -439,7 +450,8 @@ static int add_new_master_key(struct super_block *sb,
}
move_master_key_secret(&mk->mk_secret, secret);
- refcount_set(&mk->mk_active_refs, 1); /* ->mk_secret is present */
+ mk->mk_present = true;
+ refcount_set(&mk->mk_active_refs, 1); /* ->mk_present is true */
spin_lock(&keyring->lock);
hlist_add_head_rcu(&mk->mk_node,
@@ -478,11 +490,18 @@ static int add_existing_master_key(struct fscrypt_master_key *mk,
return err;
}
- /* Re-add the secret if needed. */
- if (!is_master_key_secret_present(&mk->mk_secret)) {
- if (!refcount_inc_not_zero(&mk->mk_active_refs))
+ /* If the key is incompletely removed, make it present again. */
+ if (!mk->mk_present) {
+ if (!refcount_inc_not_zero(&mk->mk_active_refs)) {
+ /*
+ * Raced with the last active ref being dropped, so the
+ * key has become, or is about to become, "absent".
+ * Therefore, we need to allocate a new key struct.
+ */
return KEY_DEAD;
+ }
move_master_key_secret(&mk->mk_secret, secret);
+ WRITE_ONCE(mk->mk_present, true);
}
return 0;
@@ -506,8 +525,8 @@ static int do_add_master_key(struct super_block *sb,
err = add_new_master_key(sb, secret, mk_spec);
} else {
/*
- * Found the key in ->s_master_keys. Re-add the secret if
- * needed, and add the user to ->mk_users if needed.
+ * Found the key in ->s_master_keys. Add the user to ->mk_users
+ * if needed, and make the key "present" again if possible.
*/
down_write(&mk->mk_sem);
err = add_existing_master_key(mk, secret);
@@ -867,7 +886,7 @@ static void shrink_dcache_inode(struct inode *inode)
static void evict_dentries_for_decrypted_inodes(struct fscrypt_master_key *mk)
{
- struct fscrypt_info *ci;
+ struct fscrypt_inode_info *ci;
struct inode *inode;
struct inode *toput_inode = NULL;
@@ -917,7 +936,7 @@ static int check_for_busy_inodes(struct super_block *sb,
/* select an example file to show for debugging purposes */
struct inode *inode =
list_first_entry(&mk->mk_decrypted_inodes,
- struct fscrypt_info,
+ struct fscrypt_inode_info,
ci_master_key_link)->ci_inode;
ino = inode->i_ino;
}
@@ -989,9 +1008,8 @@ static int try_to_lock_encrypted_files(struct super_block *sb,
*
* If all inodes were evicted, then we unlink the fscrypt_master_key from the
* keyring. Otherwise it remains in the keyring in the "incompletely removed"
- * state (without the actual secret key) where it tracks the list of remaining
- * inodes. Userspace can execute the ioctl again later to retry eviction, or
- * alternatively can re-add the secret key again.
+ * state where it tracks the list of remaining inodes. Userspace can execute
+ * the ioctl again later to retry eviction, or alternatively can re-add the key.
*
* For more details, see the "Removing keys" section of
* Documentation/filesystems/fscrypt.rst.
@@ -1053,11 +1071,10 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
}
}
- /* No user claims remaining. Go ahead and wipe the secret. */
+ /* No user claims remaining. Initiate removal of the key. */
err = -ENOKEY;
- if (is_master_key_secret_present(&mk->mk_secret)) {
- wipe_master_key_secret(&mk->mk_secret);
- fscrypt_put_master_key_activeref(sb, mk);
+ if (mk->mk_present) {
+ fscrypt_initiate_key_removal(sb, mk);
err = 0;
}
inodes_remain = refcount_read(&mk->mk_active_refs) > 0;
@@ -1074,9 +1091,9 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
}
/*
* We return 0 if we successfully did something: removed a claim to the
- * key, wiped the secret, or tried locking the files again. Users need
- * to check the informational status flags if they care whether the key
- * has been fully removed including all files locked.
+ * key, initiated removal of the key, or tried locking the files again.
+ * Users need to check the informational status flags if they care
+ * whether the key has been fully removed including all files locked.
*/
out_put_key:
fscrypt_put_master_key(mk);
@@ -1103,12 +1120,11 @@ EXPORT_SYMBOL_GPL(fscrypt_ioctl_remove_key_all_users);
* Retrieve the status of an fscrypt master encryption key.
*
* We set ->status to indicate whether the key is absent, present, or
- * incompletely removed. "Incompletely removed" means that the master key
- * secret has been removed, but some files which had been unlocked with it are
- * still in use. This field allows applications to easily determine the state
- * of an encrypted directory without using a hack such as trying to open a
- * regular file in it (which can confuse the "incompletely removed" state with
- * absent or present).
+ * incompletely removed. (For an explanation of what these statuses mean and
+ * how they are represented internally, see struct fscrypt_master_key.) This
+ * field allows applications to easily determine the status of an encrypted
+ * directory without using a hack such as trying to open a regular file in it
+ * (which can confuse the "incompletely removed" status with absent or present).
*
* In addition, for v2 policy keys we allow applications to determine, via
* ->status_flags and ->user_count, whether the key has been added by the
@@ -1150,7 +1166,7 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg)
}
down_read(&mk->mk_sem);
- if (!is_master_key_secret_present(&mk->mk_secret)) {
+ if (!mk->mk_present) {
arg.status = refcount_read(&mk->mk_active_refs) > 0 ?
FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED :
FSCRYPT_KEY_STATUS_ABSENT /* raced with full removal */;
diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
index 361f41ef46c7..d71f7c799e79 100644
--- a/fs/crypto/keysetup.c
+++ b/fs/crypto/keysetup.c
@@ -148,7 +148,7 @@ err_free_tfm:
* and IV generation method (@ci->ci_policy.flags).
*/
int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
- const u8 *raw_key, const struct fscrypt_info *ci)
+ const u8 *raw_key, const struct fscrypt_inode_info *ci)
{
struct crypto_skcipher *tfm;
@@ -178,13 +178,14 @@ void fscrypt_destroy_prepared_key(struct super_block *sb,
}
/* Given a per-file encryption key, set up the file's crypto transform object */
-int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key)
+int fscrypt_set_per_file_enc_key(struct fscrypt_inode_info *ci,
+ const u8 *raw_key)
{
ci->ci_owns_key = true;
return fscrypt_prepare_key(&ci->ci_enc_key, raw_key, ci);
}
-static int setup_per_mode_enc_key(struct fscrypt_info *ci,
+static int setup_per_mode_enc_key(struct fscrypt_inode_info *ci,
struct fscrypt_master_key *mk,
struct fscrypt_prepared_key *keys,
u8 hkdf_context, bool include_fs_uuid)
@@ -265,7 +266,7 @@ static int fscrypt_derive_siphash_key(const struct fscrypt_master_key *mk,
return 0;
}
-int fscrypt_derive_dirhash_key(struct fscrypt_info *ci,
+int fscrypt_derive_dirhash_key(struct fscrypt_inode_info *ci,
const struct fscrypt_master_key *mk)
{
int err;
@@ -279,7 +280,7 @@ int fscrypt_derive_dirhash_key(struct fscrypt_info *ci,
return 0;
}
-void fscrypt_hash_inode_number(struct fscrypt_info *ci,
+void fscrypt_hash_inode_number(struct fscrypt_inode_info *ci,
const struct fscrypt_master_key *mk)
{
WARN_ON_ONCE(ci->ci_inode->i_ino == 0);
@@ -289,7 +290,7 @@ void fscrypt_hash_inode_number(struct fscrypt_info *ci,
&mk->mk_ino_hash_key);
}
-static int fscrypt_setup_iv_ino_lblk_32_key(struct fscrypt_info *ci,
+static int fscrypt_setup_iv_ino_lblk_32_key(struct fscrypt_inode_info *ci,
struct fscrypt_master_key *mk)
{
int err;
@@ -329,7 +330,7 @@ unlock:
return 0;
}
-static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
+static int fscrypt_setup_v2_file_key(struct fscrypt_inode_info *ci,
struct fscrypt_master_key *mk,
bool need_dirhash_key)
{
@@ -404,7 +405,7 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
* still allow 512-bit master keys if the user chooses to use them, though.)
*/
static bool fscrypt_valid_master_key_size(const struct fscrypt_master_key *mk,
- const struct fscrypt_info *ci)
+ const struct fscrypt_inode_info *ci)
{
unsigned int min_keysize;
@@ -430,11 +431,12 @@ static bool fscrypt_valid_master_key_size(const struct fscrypt_master_key *mk,
*
* If the master key is found in the filesystem-level keyring, then it is
* returned in *mk_ret with its semaphore read-locked. This is needed to ensure
- * that only one task links the fscrypt_info into ->mk_decrypted_inodes (as
- * multiple tasks may race to create an fscrypt_info for the same inode), and to
- * synchronize the master key being removed with a new inode starting to use it.
+ * that only one task links the fscrypt_inode_info into ->mk_decrypted_inodes
+ * (as multiple tasks may race to create an fscrypt_inode_info for the same
+ * inode), and to synchronize the master key being removed with a new inode
+ * starting to use it.
*/
-static int setup_file_encryption_key(struct fscrypt_info *ci,
+static int setup_file_encryption_key(struct fscrypt_inode_info *ci,
bool need_dirhash_key,
struct fscrypt_master_key **mk_ret)
{
@@ -484,8 +486,8 @@ static int setup_file_encryption_key(struct fscrypt_info *ci,
}
down_read(&mk->mk_sem);
- /* Has the secret been removed (via FS_IOC_REMOVE_ENCRYPTION_KEY)? */
- if (!is_master_key_secret_present(&mk->mk_secret)) {
+ if (!mk->mk_present) {
+ /* FS_IOC_REMOVE_ENCRYPTION_KEY has been executed on this key */
err = -ENOKEY;
goto out_release_key;
}
@@ -519,7 +521,7 @@ out_release_key:
return err;
}
-static void put_crypt_info(struct fscrypt_info *ci)
+static void put_crypt_info(struct fscrypt_inode_info *ci)
{
struct fscrypt_master_key *mk;
@@ -537,8 +539,8 @@ static void put_crypt_info(struct fscrypt_info *ci)
/*
* Remove this inode from the list of inodes that were unlocked
* with the master key. In addition, if we're removing the last
- * inode from a master key struct that already had its secret
- * removed, then complete the full removal of the struct.
+ * inode from an incompletely removed key, then complete the
+ * full removal of the key.
*/
spin_lock(&mk->mk_decrypted_inodes_lock);
list_del(&ci->ci_master_key_link);
@@ -546,7 +548,7 @@ static void put_crypt_info(struct fscrypt_info *ci)
fscrypt_put_master_key_activeref(ci->ci_inode->i_sb, mk);
}
memzero_explicit(ci, sizeof(*ci));
- kmem_cache_free(fscrypt_info_cachep, ci);
+ kmem_cache_free(fscrypt_inode_info_cachep, ci);
}
static int
@@ -555,7 +557,7 @@ fscrypt_setup_encryption_info(struct inode *inode,
const u8 nonce[FSCRYPT_FILE_NONCE_SIZE],
bool need_dirhash_key)
{
- struct fscrypt_info *crypt_info;
+ struct fscrypt_inode_info *crypt_info;
struct fscrypt_mode *mode;
struct fscrypt_master_key *mk = NULL;
int res;
@@ -564,7 +566,7 @@ fscrypt_setup_encryption_info(struct inode *inode,
if (res)
return res;
- crypt_info = kmem_cache_zalloc(fscrypt_info_cachep, GFP_KERNEL);
+ crypt_info = kmem_cache_zalloc(fscrypt_inode_info_cachep, GFP_KERNEL);
if (!crypt_info)
return -ENOMEM;
@@ -580,6 +582,11 @@ fscrypt_setup_encryption_info(struct inode *inode,
WARN_ON_ONCE(mode->ivsize > FSCRYPT_MAX_IV_SIZE);
crypt_info->ci_mode = mode;
+ crypt_info->ci_data_unit_bits =
+ fscrypt_policy_du_bits(&crypt_info->ci_policy, inode);
+ crypt_info->ci_data_units_per_block_bits =
+ inode->i_blkbits - crypt_info->ci_data_unit_bits;
+
res = setup_file_encryption_key(crypt_info, need_dirhash_key, &mk);
if (res)
goto out;
@@ -587,8 +594,8 @@ fscrypt_setup_encryption_info(struct inode *inode,
/*
* For existing inodes, multiple tasks may race to set ->i_crypt_info.
* So use cmpxchg_release(). This pairs with the smp_load_acquire() in
- * fscrypt_get_info(). I.e., here we publish ->i_crypt_info with a
- * RELEASE barrier so that other tasks can ACQUIRE it.
+ * fscrypt_get_inode_info(). I.e., here we publish ->i_crypt_info with
+ * a RELEASE barrier so that other tasks can ACQUIRE it.
*/
if (cmpxchg_release(&inode->i_crypt_info, NULL, crypt_info) == NULL) {
/*
@@ -735,8 +742,8 @@ EXPORT_SYMBOL_GPL(fscrypt_prepare_new_inode);
* fscrypt_put_encryption_info() - free most of an inode's fscrypt data
* @inode: an inode being evicted
*
- * Free the inode's fscrypt_info. Filesystems must call this when the inode is
- * being evicted. An RCU grace period need not have elapsed yet.
+ * Free the inode's fscrypt_inode_info. Filesystems must call this when the
+ * inode is being evicted. An RCU grace period need not have elapsed yet.
*/
void fscrypt_put_encryption_info(struct inode *inode)
{
@@ -773,7 +780,7 @@ EXPORT_SYMBOL(fscrypt_free_inode);
*/
int fscrypt_drop_inode(struct inode *inode)
{
- const struct fscrypt_info *ci = fscrypt_get_info(inode);
+ const struct fscrypt_inode_info *ci = fscrypt_get_inode_info(inode);
/*
* If ci is NULL, then the inode doesn't have an encryption key set up
@@ -794,13 +801,14 @@ int fscrypt_drop_inode(struct inode *inode)
return 0;
/*
- * Note: since we aren't holding the key semaphore, the result here can
+ * We can't take ->mk_sem here, since this runs in atomic context.
+ * Therefore, ->mk_present can change concurrently, and our result may
* immediately become outdated. But there's no correctness problem with
* unnecessarily evicting. Nor is there a correctness problem with not
* evicting while iput() is racing with the key being removed, since
* then the thread removing the key will either evict the inode itself
* or will correctly detect that it wasn't evicted due to the race.
*/
- return !is_master_key_secret_present(&ci->ci_master_key->mk_secret);
+ return !READ_ONCE(ci->ci_master_key->mk_present);
}
EXPORT_SYMBOL_GPL(fscrypt_drop_inode);
diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c
index 75dabd9b27f9..a10710bc8123 100644
--- a/fs/crypto/keysetup_v1.c
+++ b/fs/crypto/keysetup_v1.c
@@ -178,7 +178,8 @@ void fscrypt_put_direct_key(struct fscrypt_direct_key *dk)
*/
static struct fscrypt_direct_key *
find_or_insert_direct_key(struct fscrypt_direct_key *to_insert,
- const u8 *raw_key, const struct fscrypt_info *ci)
+ const u8 *raw_key,
+ const struct fscrypt_inode_info *ci)
{
unsigned long hash_key;
struct fscrypt_direct_key *dk;
@@ -218,7 +219,7 @@ find_or_insert_direct_key(struct fscrypt_direct_key *to_insert,
/* Prepare to encrypt directly using the master key in the given mode */
static struct fscrypt_direct_key *
-fscrypt_get_direct_key(const struct fscrypt_info *ci, const u8 *raw_key)
+fscrypt_get_direct_key(const struct fscrypt_inode_info *ci, const u8 *raw_key)
{
struct fscrypt_direct_key *dk;
int err;
@@ -250,7 +251,7 @@ err_free_dk:
}
/* v1 policy, DIRECT_KEY: use the master key directly */
-static int setup_v1_file_key_direct(struct fscrypt_info *ci,
+static int setup_v1_file_key_direct(struct fscrypt_inode_info *ci,
const u8 *raw_master_key)
{
struct fscrypt_direct_key *dk;
@@ -264,7 +265,7 @@ static int setup_v1_file_key_direct(struct fscrypt_info *ci,
}
/* v1 policy, !DIRECT_KEY: derive the file's encryption key */
-static int setup_v1_file_key_derived(struct fscrypt_info *ci,
+static int setup_v1_file_key_derived(struct fscrypt_inode_info *ci,
const u8 *raw_master_key)
{
u8 *derived_key;
@@ -289,7 +290,8 @@ out:
return err;
}
-int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, const u8 *raw_master_key)
+int fscrypt_setup_v1_file_key(struct fscrypt_inode_info *ci,
+ const u8 *raw_master_key)
{
if (ci->ci_policy.v1.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY)
return setup_v1_file_key_direct(ci, raw_master_key);
@@ -297,8 +299,10 @@ int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, const u8 *raw_master_key)
return setup_v1_file_key_derived(ci, raw_master_key);
}
-int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci)
+int
+fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_inode_info *ci)
{
+ const struct super_block *sb = ci->ci_inode->i_sb;
struct key *key;
const struct fscrypt_key *payload;
int err;
@@ -306,8 +310,8 @@ int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci)
key = find_and_lock_process_key(FSCRYPT_KEY_DESC_PREFIX,
ci->ci_policy.v1.master_key_descriptor,
ci->ci_mode->keysize, &payload);
- if (key == ERR_PTR(-ENOKEY) && ci->ci_inode->i_sb->s_cop->key_prefix) {
- key = find_and_lock_process_key(ci->ci_inode->i_sb->s_cop->key_prefix,
+ if (key == ERR_PTR(-ENOKEY) && sb->s_cop->legacy_key_prefix) {
+ key = find_and_lock_process_key(sb->s_cop->legacy_key_prefix,
ci->ci_policy.v1.master_key_descriptor,
ci->ci_mode->keysize, &payload);
}
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index f4456ecb3f87..701259991277 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -118,12 +118,11 @@ static bool supported_direct_key_modes(const struct inode *inode,
}
static bool supported_iv_ino_lblk_policy(const struct fscrypt_policy_v2 *policy,
- const struct inode *inode,
- const char *type,
- int max_ino_bits, int max_lblk_bits)
+ const struct inode *inode)
{
+ const char *type = (policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64)
+ ? "IV_INO_LBLK_64" : "IV_INO_LBLK_32";
struct super_block *sb = inode->i_sb;
- int ino_bits = 64, lblk_bits = 64;
/*
* IV_INO_LBLK_* exist only because of hardware limitations, and
@@ -150,17 +149,29 @@ static bool supported_iv_ino_lblk_policy(const struct fscrypt_policy_v2 *policy,
type, sb->s_id);
return false;
}
- if (sb->s_cop->get_ino_and_lblk_bits)
- sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits);
- if (ino_bits > max_ino_bits) {
+
+ /*
+ * IV_INO_LBLK_64 and IV_INO_LBLK_32 both require that inode numbers fit
+ * in 32 bits. In principle, IV_INO_LBLK_32 could support longer inode
+ * numbers because it hashes the inode number; however, currently the
+ * inode number is gotten from inode::i_ino which is 'unsigned long'.
+ * So for now the implementation limit is 32 bits.
+ */
+ if (!sb->s_cop->has_32bit_inodes) {
fscrypt_warn(inode,
"Can't use %s policy on filesystem '%s' because its inode numbers are too long",
type, sb->s_id);
return false;
}
- if (lblk_bits > max_lblk_bits) {
+
+ /*
+ * IV_INO_LBLK_64 and IV_INO_LBLK_32 both require that file data unit
+ * indices fit in 32 bits.
+ */
+ if (fscrypt_max_file_dun_bits(sb,
+ fscrypt_policy_v2_du_bits(policy, inode)) > 32) {
fscrypt_warn(inode,
- "Can't use %s policy on filesystem '%s' because its block numbers are too long",
+ "Can't use %s policy on filesystem '%s' because its maximum file size is too large",
type, sb->s_id);
return false;
}
@@ -233,25 +244,39 @@ static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy,
return false;
}
+ if (policy->log2_data_unit_size) {
+ if (!inode->i_sb->s_cop->supports_subblock_data_units) {
+ fscrypt_warn(inode,
+ "Filesystem does not support configuring crypto data unit size");
+ return false;
+ }
+ if (policy->log2_data_unit_size > inode->i_blkbits ||
+ policy->log2_data_unit_size < SECTOR_SHIFT /* 9 */) {
+ fscrypt_warn(inode,
+ "Unsupported log2_data_unit_size in encryption policy: %d",
+ policy->log2_data_unit_size);
+ return false;
+ }
+ if (policy->log2_data_unit_size != inode->i_blkbits &&
+ (policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) {
+ /*
+ * Not safe to enable yet, as we need to ensure that DUN
+ * wraparound can only occur on a FS block boundary.
+ */
+ fscrypt_warn(inode,
+ "Sub-block data units not yet supported with IV_INO_LBLK_32");
+ return false;
+ }
+ }
+
if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) &&
!supported_direct_key_modes(inode, policy->contents_encryption_mode,
policy->filenames_encryption_mode))
return false;
- if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) &&
- !supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_64",
- 32, 32))
- return false;
-
- /*
- * IV_INO_LBLK_32 hashes the inode number, so in principle it can
- * support any ino_bits. However, currently the inode number is gotten
- * from inode::i_ino which is 'unsigned long'. So for now the
- * implementation limit is 32 bits.
- */
- if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) &&
- !supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_32",
- 32, 32))
+ if ((policy->flags & (FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 |
+ FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) &&
+ !supported_iv_ino_lblk_policy(policy, inode))
return false;
if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) {
@@ -330,6 +355,7 @@ static int fscrypt_new_context(union fscrypt_context *ctx_u,
ctx->filenames_encryption_mode =
policy->filenames_encryption_mode;
ctx->flags = policy->flags;
+ ctx->log2_data_unit_size = policy->log2_data_unit_size;
memcpy(ctx->master_key_identifier,
policy->master_key_identifier,
sizeof(ctx->master_key_identifier));
@@ -390,6 +416,7 @@ int fscrypt_policy_from_context(union fscrypt_policy *policy_u,
policy->filenames_encryption_mode =
ctx->filenames_encryption_mode;
policy->flags = ctx->flags;
+ policy->log2_data_unit_size = ctx->log2_data_unit_size;
memcpy(policy->__reserved, ctx->__reserved,
sizeof(policy->__reserved));
memcpy(policy->master_key_identifier,
@@ -405,11 +432,11 @@ int fscrypt_policy_from_context(union fscrypt_policy *policy_u,
/* Retrieve an inode's encryption policy */
static int fscrypt_get_policy(struct inode *inode, union fscrypt_policy *policy)
{
- const struct fscrypt_info *ci;
+ const struct fscrypt_inode_info *ci;
union fscrypt_context ctx;
int ret;
- ci = fscrypt_get_info(inode);
+ ci = fscrypt_get_inode_info(inode);
if (ci) {
/* key available, use the cached policy */
*policy = ci->ci_policy;
@@ -647,7 +674,7 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
/*
* Both parent and child are encrypted, so verify they use the same
- * encryption policy. Compare the fscrypt_info structs if the keys are
+ * encryption policy. Compare the cached policies if the keys are
* available, otherwise retrieve and compare the fscrypt_contexts.
*
* Note that the fscrypt_context retrieval will be required frequently
@@ -717,7 +744,7 @@ const union fscrypt_policy *fscrypt_policy_to_inherit(struct inode *dir)
*/
int fscrypt_context_for_new_inode(void *ctx, struct inode *inode)
{
- struct fscrypt_info *ci = inode->i_crypt_info;
+ struct fscrypt_inode_info *ci = inode->i_crypt_info;
BUILD_BUG_ON(sizeof(union fscrypt_context) !=
FSCRYPT_SET_CONTEXT_MAX_SIZE);
@@ -742,7 +769,7 @@ EXPORT_SYMBOL_GPL(fscrypt_context_for_new_inode);
*/
int fscrypt_set_context(struct inode *inode, void *fs_data)
{
- struct fscrypt_info *ci = inode->i_crypt_info;
+ struct fscrypt_inode_info *ci = inode->i_crypt_info;
union fscrypt_context ctx;
int ctxsize;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 83e57e9f9fa0..5d41765e0c77 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -72,7 +72,7 @@ static struct inode *debugfs_get_inode(struct super_block *sb)
struct inode *inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
}
return inode;
}
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 299c295a27a0..c830261aa883 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -338,7 +338,7 @@ static int mknod_ptmx(struct super_block *sb)
}
inode->i_ino = 2;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
mode = S_IFCHR|opts->ptmxmode;
init_special_inode(inode, mode, MKDEV(TTYAUX_MAJOR, 2));
@@ -451,7 +451,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
if (!inode)
goto fail;
inode->i_ino = 1;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
@@ -560,7 +560,7 @@ struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
inode->i_ino = index + 3;
inode->i_uid = opts->setuid ? opts->uid : current_fsuid();
inode->i_gid = opts->setgid ? opts->gid : current_fsgid();
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
init_special_inode(inode, S_IFCHR|opts->mode, MKDEV(UNIX98_PTY_SLAVE_MAJOR, index));
sprintf(s, "%d", index);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index f2ed0c0266cb..c586c5db18b5 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -702,6 +702,6 @@ int ecryptfs_set_f_namelen(long *namelen, long lower_namelen,
int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
loff_t offset);
-extern const struct xattr_handler *ecryptfs_xattr_handlers[];
+extern const struct xattr_handler * const ecryptfs_xattr_handlers[];
#endif /* #ifndef ECRYPTFS_KERNEL_H */
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 992d9c7e64ae..a25dd3d20008 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -1210,7 +1210,7 @@ static const struct xattr_handler ecryptfs_xattr_handler = {
.set = ecryptfs_xattr_set,
};
-const struct xattr_handler *ecryptfs_xattr_handlers[] = {
+const struct xattr_handler * const ecryptfs_xattr_handlers[] = {
&ecryptfs_xattr_handler,
NULL
};
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index 59b52718a3a2..7e9961639802 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -51,7 +51,7 @@ static ssize_t efivarfs_file_write(struct file *file,
} else {
inode_lock(inode);
i_size_write(inode, datasize + sizeof(attributes));
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
inode_unlock(inode);
}
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index db9231f0e77b..76dd3c7295d9 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -25,7 +25,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
if (inode) {
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
switch (mode & S_IFMT) {
case S_IFREG:
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index 3789d22ba501..7844ab24b813 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -103,10 +103,9 @@ struct inode *efs_iget(struct super_block *super, unsigned long ino)
i_uid_write(inode, (uid_t)be16_to_cpu(efs_inode->di_uid));
i_gid_write(inode, (gid_t)be16_to_cpu(efs_inode->di_gid));
inode->i_size = be32_to_cpu(efs_inode->di_size);
- inode->i_atime.tv_sec = be32_to_cpu(efs_inode->di_atime);
- inode->i_mtime.tv_sec = be32_to_cpu(efs_inode->di_mtime);
+ inode_set_atime(inode, be32_to_cpu(efs_inode->di_atime), 0);
+ inode_set_mtime(inode, be32_to_cpu(efs_inode->di_mtime), 0);
inode_set_ctime(inode, be32_to_cpu(efs_inode->di_ctime), 0);
- inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0;
/* this is the number of blocks in the file */
if (inode->i_size == 0) {
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index 0c2c99c58b5e..f6a0a1748521 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -222,7 +222,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
up_read(&devs->rwsem);
return 0;
}
- map->m_bdev = dif->bdev;
+ map->m_bdev = dif->bdev_handle->bdev;
map->m_daxdev = dif->dax_dev;
map->m_dax_part_off = dif->dax_part_off;
map->m_fscache = dif->fscache;
@@ -240,7 +240,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
if (map->m_pa >= startoff &&
map->m_pa < startoff + length) {
map->m_pa -= startoff;
- map->m_bdev = dif->bdev;
+ map->m_bdev = dif->bdev_handle->bdev;
map->m_daxdev = dif->dax_dev;
map->m_dax_part_off = dif->dax_part_off;
map->m_fscache = dif->fscache;
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
index edc8ec7581b8..b8ad05b4509d 100644
--- a/fs/erofs/inode.c
+++ b/fs/erofs/inode.c
@@ -175,7 +175,8 @@ static void *erofs_read_inode(struct erofs_buf *buf,
vi->chunkbits = sb->s_blocksize_bits +
(vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
}
- inode->i_mtime = inode->i_atime = inode_get_ctime(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_get_ctime(inode)));
inode->i_flags &= ~S_DAX;
if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 4ff88d0dd980..cf04e21bfda2 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -47,7 +47,7 @@ typedef u32 erofs_blk_t;
struct erofs_device_info {
char *path;
struct erofs_fscache *fscache;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct dax_device *dax_dev;
u64 dax_part_off;
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 3700af9ee173..6fd04781fec5 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -227,7 +227,7 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_fscache *fscache;
struct erofs_deviceslot *dis;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
void *ptr;
ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP);
@@ -251,13 +251,13 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
return PTR_ERR(fscache);
dif->fscache = fscache;
} else if (!sbi->devs->flatdev) {
- bdev = blkdev_get_by_path(dif->path, BLK_OPEN_READ, sb->s_type,
- NULL);
- if (IS_ERR(bdev))
- return PTR_ERR(bdev);
- dif->bdev = bdev;
- dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off,
- NULL, NULL);
+ bdev_handle = bdev_open_by_path(dif->path, BLK_OPEN_READ,
+ sb->s_type, NULL);
+ if (IS_ERR(bdev_handle))
+ return PTR_ERR(bdev_handle);
+ dif->bdev_handle = bdev_handle;
+ dif->dax_dev = fs_dax_get_by_bdev(bdev_handle->bdev,
+ &dif->dax_part_off, NULL, NULL);
}
dif->blocks = le32_to_cpu(dis->blocks);
@@ -806,8 +806,8 @@ static int erofs_release_device_info(int id, void *ptr, void *data)
struct erofs_device_info *dif = ptr;
fs_put_dax(dif->dax_dev, NULL);
- if (dif->bdev)
- blkdev_put(dif->bdev, &erofs_fs_type);
+ if (dif->bdev_handle)
+ bdev_release(dif->bdev_handle);
erofs_fscache_unregister_cookie(dif->fscache);
dif->fscache = NULL;
kfree(dif->path);
diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
index 09d341675e89..b58316b49a43 100644
--- a/fs/erofs/xattr.c
+++ b/fs/erofs/xattr.c
@@ -168,7 +168,7 @@ const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
};
#endif
-const struct xattr_handler *erofs_xattr_handlers[] = {
+const struct xattr_handler * const erofs_xattr_handlers[] = {
&erofs_xattr_user_handler,
&erofs_xattr_trusted_handler,
#ifdef CONFIG_EROFS_FS_SECURITY
diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h
index f16283cb8c93..b246cd0e135e 100644
--- a/fs/erofs/xattr.h
+++ b/fs/erofs/xattr.h
@@ -23,7 +23,7 @@ static inline const char *erofs_xattr_prefix(unsigned int idx,
{
const struct xattr_handler *handler = NULL;
- static const struct xattr_handler *xattr_handler_map[] = {
+ static const struct xattr_handler * const xattr_handler_map[] = {
[EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
#ifdef CONFIG_EROFS_FS_POSIX_ACL
[EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access,
@@ -44,7 +44,7 @@ static inline const char *erofs_xattr_prefix(unsigned int idx,
return xattr_prefix(handler);
}
-extern const struct xattr_handler *erofs_xattr_handlers[];
+extern const struct xattr_handler * const erofs_xattr_handlers[];
int erofs_xattr_prefixes_init(struct super_block *sb);
void erofs_xattr_prefixes_cleanup(struct super_block *sb);
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index f55498e5c23d..f78b614f44dc 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -549,6 +549,7 @@ void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
u8 tz, __le16 time, __le16 date, u8 time_cs);
void exfat_truncate_atime(struct timespec64 *ts);
+void exfat_truncate_inode_atime(struct inode *inode);
void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
u8 *tz, __le16 *time, __le16 *date, u8 *time_cs);
u16 exfat_calc_chksum16(void *data, int len, u16 chksum, int type);
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index 32395ef686a2..30ee2c8d36a5 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -22,7 +22,7 @@ static int exfat_cont_expand(struct inode *inode, loff_t size)
if (err)
return err;
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
if (!IS_SYNC(inode))
@@ -290,10 +290,9 @@ int exfat_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
}
if (attr->ia_valid & ATTR_SIZE)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
- setattr_copy(&nop_mnt_idmap, inode, attr);
- exfat_truncate_atime(&inode->i_atime);
+ exfat_truncate_inode_atime(inode);
if (attr->ia_valid & ATTR_SIZE) {
error = exfat_block_truncate_page(inode, attr->ia_size);
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index 13329baeafbc..a2185e6f0548 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -26,6 +26,7 @@ int __exfat_write_inode(struct inode *inode, int sync)
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
bool is_dir = (ei->type == TYPE_DIR) ? true : false;
+ struct timespec64 ts;
if (inode->i_ino == EXFAT_ROOT_INO)
return 0;
@@ -55,16 +56,18 @@ int __exfat_write_inode(struct inode *inode, int sync)
&ep->dentry.file.create_time,
&ep->dentry.file.create_date,
&ep->dentry.file.create_time_cs);
- exfat_set_entry_time(sbi, &inode->i_mtime,
- &ep->dentry.file.modify_tz,
- &ep->dentry.file.modify_time,
- &ep->dentry.file.modify_date,
- &ep->dentry.file.modify_time_cs);
- exfat_set_entry_time(sbi, &inode->i_atime,
- &ep->dentry.file.access_tz,
- &ep->dentry.file.access_time,
- &ep->dentry.file.access_date,
- NULL);
+ exfat_set_entry_time(sbi, &ts,
+ &ep->dentry.file.modify_tz,
+ &ep->dentry.file.modify_time,
+ &ep->dentry.file.modify_date,
+ &ep->dentry.file.modify_time_cs);
+ inode_set_mtime_to_ts(inode, ts);
+ exfat_set_entry_time(sbi, &ts,
+ &ep->dentry.file.access_tz,
+ &ep->dentry.file.access_time,
+ &ep->dentry.file.access_date,
+ NULL);
+ inode_set_atime_to_ts(inode, ts);
/* File size should be zero if there is no cluster allocated */
on_disk_size = i_size_read(inode);
@@ -355,7 +358,7 @@ static void exfat_write_failed(struct address_space *mapping, loff_t to)
if (to > i_size_read(inode)) {
truncate_pagecache(inode, i_size_read(inode));
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
exfat_truncate(inode);
}
}
@@ -398,7 +401,7 @@ static int exfat_write_end(struct file *file, struct address_space *mapping,
exfat_write_failed(mapping, pos+len);
if (!(err < 0) && !(ei->attr & ATTR_ARCHIVE)) {
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
ei->attr |= ATTR_ARCHIVE;
mark_inode_dirty(inode);
}
@@ -576,10 +579,10 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info)
exfat_save_attr(inode, info->attr);
inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
- inode->i_mtime = info->mtime;
+ inode_set_mtime_to_ts(inode, info->mtime);
inode_set_ctime_to_ts(inode, info->mtime);
ei->i_crtime = info->crtime;
- inode->i_atime = info->atime;
+ inode_set_atime_to_ts(inode, info->atime);
return 0;
}
diff --git a/fs/exfat/misc.c b/fs/exfat/misc.c
index 2e1a1a6b1021..fa8459828046 100644
--- a/fs/exfat/misc.c
+++ b/fs/exfat/misc.c
@@ -126,6 +126,14 @@ void exfat_truncate_atime(struct timespec64 *ts)
ts->tv_nsec = 0;
}
+void exfat_truncate_inode_atime(struct inode *inode)
+{
+ struct timespec64 atime = inode_get_atime(inode);
+
+ exfat_truncate_atime(&atime);
+ inode_set_atime_to_ts(inode, atime);
+}
+
u16 exfat_calc_chksum16(void *data, int len, u16 chksum, int type)
{
int i;
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index 1b9f587f6cca..b92e46916dea 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -569,7 +569,7 @@ static int exfat_create(struct mnt_idmap *idmap, struct inode *dir,
goto unlock;
inode_inc_iversion(dir);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
if (IS_DIRSYNC(dir))
exfat_sync_inode(dir);
else
@@ -582,8 +582,9 @@ static int exfat_create(struct mnt_idmap *idmap, struct inode *dir,
goto unlock;
inode_inc_iversion(inode);
- inode->i_mtime = inode->i_atime = EXFAT_I(inode)->i_crtime = inode_set_ctime_current(inode);
- exfat_truncate_atime(&inode->i_atime);
+ EXFAT_I(inode)->i_crtime = simple_inode_init_ts(inode);
+ exfat_truncate_inode_atime(inode);
+
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
@@ -816,16 +817,16 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
ei->dir.dir = DIR_DELETED;
inode_inc_iversion(dir);
- dir->i_mtime = dir->i_atime = inode_set_ctime_current(dir);
- exfat_truncate_atime(&dir->i_atime);
+ simple_inode_init_ts(dir);
+ exfat_truncate_inode_atime(dir);
if (IS_DIRSYNC(dir))
exfat_sync_inode(dir);
else
mark_inode_dirty(dir);
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
- exfat_truncate_atime(&inode->i_atime);
+ simple_inode_init_ts(inode);
+ exfat_truncate_inode_atime(inode);
exfat_unhash_inode(inode);
exfat_d_version_set(dentry, inode_query_iversion(dir));
unlock:
@@ -851,7 +852,7 @@ static int exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
goto unlock;
inode_inc_iversion(dir);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
if (IS_DIRSYNC(dir))
exfat_sync_inode(dir);
else
@@ -865,8 +866,8 @@ static int exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
goto unlock;
inode_inc_iversion(inode);
- inode->i_mtime = inode->i_atime = EXFAT_I(inode)->i_crtime = inode_set_ctime_current(inode);
- exfat_truncate_atime(&inode->i_atime);
+ EXFAT_I(inode)->i_crtime = simple_inode_init_ts(inode);
+ exfat_truncate_inode_atime(inode);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
@@ -977,8 +978,8 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
ei->dir.dir = DIR_DELETED;
inode_inc_iversion(dir);
- dir->i_mtime = dir->i_atime = inode_set_ctime_current(dir);
- exfat_truncate_atime(&dir->i_atime);
+ simple_inode_init_ts(dir);
+ exfat_truncate_inode_atime(dir);
if (IS_DIRSYNC(dir))
exfat_sync_inode(dir);
else
@@ -986,8 +987,8 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
drop_nlink(dir);
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
- exfat_truncate_atime(&inode->i_atime);
+ simple_inode_init_ts(inode);
+ exfat_truncate_inode_atime(inode);
exfat_unhash_inode(inode);
exfat_d_version_set(dentry, inode_query_iversion(dir));
unlock:
@@ -1312,7 +1313,7 @@ static int exfat_rename(struct mnt_idmap *idmap,
inode_inc_iversion(new_dir);
simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
EXFAT_I(new_dir)->i_crtime = current_time(new_dir);
- exfat_truncate_atime(&new_dir->i_atime);
+ exfat_truncate_inode_atime(new_dir);
if (IS_DIRSYNC(new_dir))
exfat_sync_inode(new_dir);
else
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index 2778bd9b631e..e919a68bf4a1 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -370,8 +370,8 @@ static int exfat_read_root(struct inode *inode)
ei->i_size_ondisk = i_size_read(inode);
exfat_save_attr(inode, ATTR_SUBDIR);
- inode->i_mtime = inode->i_atime = ei->i_crtime = inode_set_ctime_current(inode);
- exfat_truncate_atime(&inode->i_atime);
+ ei->i_crtime = simple_inode_init_ts(inode);
+ exfat_truncate_inode_atime(inode);
return 0;
}
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index b335f17f682f..c7900868171b 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -468,7 +468,7 @@ int ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
ext2_set_de_type(de, inode);
ext2_commit_chunk(page, pos, len);
if (update_times)
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
mark_inode_dirty(dir);
return ext2_handle_dirsync(dir);
@@ -555,7 +555,7 @@ got_it:
de->inode = cpu_to_le32(inode->i_ino);
ext2_set_de_type (de, inode);
ext2_commit_chunk(page, pos, rec_len);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
mark_inode_dirty(dir);
err = ext2_handle_dirsync(dir);
@@ -606,7 +606,7 @@ int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct page *page)
pde->rec_len = ext2_rec_len_to_disk(to - from);
dir->inode = 0;
ext2_commit_chunk(page, pos, to - from);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
EXT2_I(inode)->i_flags &= ~EXT2_BTREE_FL;
mark_inode_dirty(inode);
return ext2_handle_dirsync(inode);
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index c24d0de95a83..fdf63e9c6e7c 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -546,7 +546,7 @@ got:
inode->i_ino = ino;
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
memset(ei->i_data, 0, sizeof(ei->i_data));
ei->i_flags =
ext2_mask_flags(mode, EXT2_I(dir)->i_flags & EXT2_FL_INHERITED);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 314b415ee518..464faf6c217e 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1291,7 +1291,7 @@ static int ext2_setsize(struct inode *inode, loff_t newsize)
__ext2_truncate_blocks(inode, newsize);
filemap_invalidate_unlock(inode->i_mapping);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
if (inode_needs_sync(inode)) {
sync_mapping_buffers(inode->i_mapping);
sync_inode_metadata(inode, 1);
@@ -1412,10 +1412,9 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
i_gid_write(inode, i_gid);
set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
inode->i_size = le32_to_cpu(raw_inode->i_size);
- inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
+ inode_set_atime(inode, (signed)le32_to_cpu(raw_inode->i_atime), 0);
inode_set_ctime(inode, (signed)le32_to_cpu(raw_inode->i_ctime), 0);
- inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
- inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0;
+ inode_set_mtime(inode, (signed)le32_to_cpu(raw_inode->i_mtime), 0);
ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
/* We now have enough fields to check if the inode was active or not.
* This is needed because nfsd might try to access dead inodes
@@ -1544,9 +1543,9 @@ static int __ext2_write_inode(struct inode *inode, int do_sync)
}
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
raw_inode->i_size = cpu_to_le32(inode->i_size);
- raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
- raw_inode->i_ctime = cpu_to_le32(inode_get_ctime(inode).tv_sec);
- raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
+ raw_inode->i_atime = cpu_to_le32(inode_get_atime_sec(inode));
+ raw_inode->i_ctime = cpu_to_le32(inode_get_ctime_sec(inode));
+ raw_inode->i_mtime = cpu_to_le32(inode_get_mtime_sec(inode));
raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index aaf3e3e88cb2..645ee6142f69 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1572,7 +1572,7 @@ out:
if (inode->i_size < off+len-towrite)
i_size_write(inode, off+len-towrite);
inode_inc_iversion(inode);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
return len - towrite;
}
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 20f741184673..e849241ebb8f 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -98,7 +98,7 @@ static struct buffer_head *ext2_xattr_cache_find(struct inode *,
static void ext2_xattr_rehash(struct ext2_xattr_header *,
struct ext2_xattr_entry *);
-static const struct xattr_handler *ext2_xattr_handler_map[] = {
+static const struct xattr_handler * const ext2_xattr_handler_map[] = {
[EXT2_XATTR_INDEX_USER] = &ext2_xattr_user_handler,
#ifdef CONFIG_EXT2_FS_POSIX_ACL
[EXT2_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access,
@@ -110,7 +110,7 @@ static const struct xattr_handler *ext2_xattr_handler_map[] = {
#endif
};
-const struct xattr_handler *ext2_xattr_handlers[] = {
+const struct xattr_handler * const ext2_xattr_handlers[] = {
&ext2_xattr_user_handler,
&ext2_xattr_trusted_handler,
#ifdef CONFIG_EXT2_FS_SECURITY
diff --git a/fs/ext2/xattr.h b/fs/ext2/xattr.h
index 7925f596e8e2..6a4966949047 100644
--- a/fs/ext2/xattr.h
+++ b/fs/ext2/xattr.h
@@ -72,7 +72,7 @@ extern void ext2_xattr_delete_inode(struct inode *);
extern struct mb_cache *ext2_xattr_create_cache(void);
extern void ext2_xattr_destroy_cache(struct mb_cache *cache);
-extern const struct xattr_handler *ext2_xattr_handlers[];
+extern const struct xattr_handler * const ext2_xattr_handlers[];
# else /* CONFIG_EXT2_FS_XATTR */
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index 453d4da5de52..7ae0b61258a7 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -232,19 +232,14 @@ static bool ext4_has_stable_inodes(struct super_block *sb)
return ext4_has_feature_stable_inodes(sb);
}
-static void ext4_get_ino_and_lblk_bits(struct super_block *sb,
- int *ino_bits_ret, int *lblk_bits_ret)
-{
- *ino_bits_ret = 8 * sizeof(EXT4_SB(sb)->s_es->s_inodes_count);
- *lblk_bits_ret = 8 * sizeof(ext4_lblk_t);
-}
-
const struct fscrypt_operations ext4_cryptops = {
- .key_prefix = "ext4:",
+ .needs_bounce_pages = 1,
+ .has_32bit_inodes = 1,
+ .supports_subblock_data_units = 1,
+ .legacy_key_prefix = "ext4:",
.get_context = ext4_get_context,
.set_context = ext4_set_context,
.get_dummy_policy = ext4_get_dummy_policy,
.empty_dir = ext4_empty_dir,
.has_stable_inodes = ext4_has_stable_inodes,
- .get_ino_and_lblk_bits = ext4_get_ino_and_lblk_bits,
};
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 9418359b1d9d..8da5fb680210 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -891,10 +891,13 @@ do { \
(raw_inode)->xtime = cpu_to_le32(clamp_t(int32_t, (ts).tv_sec, S32_MIN, S32_MAX)); \
} while (0)
-#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \
- EXT4_INODE_SET_XTIME_VAL(xtime, inode, raw_inode, (inode)->xtime)
+#define EXT4_INODE_SET_ATIME(inode, raw_inode) \
+ EXT4_INODE_SET_XTIME_VAL(i_atime, inode, raw_inode, inode_get_atime(inode))
-#define EXT4_INODE_SET_CTIME(inode, raw_inode) \
+#define EXT4_INODE_SET_MTIME(inode, raw_inode) \
+ EXT4_INODE_SET_XTIME_VAL(i_mtime, inode, raw_inode, inode_get_mtime(inode))
+
+#define EXT4_INODE_SET_CTIME(inode, raw_inode) \
EXT4_INODE_SET_XTIME_VAL(i_ctime, inode, raw_inode, inode_get_ctime(inode))
#define EXT4_EINODE_SET_XTIME(xtime, einode, raw_inode) \
@@ -910,9 +913,16 @@ do { \
.tv_sec = (signed)le32_to_cpu((raw_inode)->xtime) \
})
-#define EXT4_INODE_GET_XTIME(xtime, inode, raw_inode) \
+#define EXT4_INODE_GET_ATIME(inode, raw_inode) \
+do { \
+ inode_set_atime_to_ts(inode, \
+ EXT4_INODE_GET_XTIME_VAL(i_atime, inode, raw_inode)); \
+} while (0)
+
+#define EXT4_INODE_GET_MTIME(inode, raw_inode) \
do { \
- (inode)->xtime = EXT4_INODE_GET_XTIME_VAL(xtime, inode, raw_inode); \
+ inode_set_mtime_to_ts(inode, \
+ EXT4_INODE_GET_XTIME_VAL(i_mtime, inode, raw_inode)); \
} while (0)
#define EXT4_INODE_GET_CTIME(inode, raw_inode) \
@@ -1537,7 +1547,7 @@ struct ext4_sb_info {
unsigned long s_commit_interval;
u32 s_max_batch_time;
u32 s_min_batch_time;
- struct block_device *s_journal_bdev;
+ struct bdev_handle *s_journal_bdev_handle;
#ifdef CONFIG_QUOTA
/* Names of quota files with journalled quota */
char __rcu *s_qf_names[EXT4_MAXQUOTAS];
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 202c76996b62..4c4176ee1749 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4481,7 +4481,8 @@ retry:
if (epos > new_size)
epos = new_size;
if (ext4_update_inode_size(inode, epos) & 0x1)
- inode->i_mtime = inode_get_ctime(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_get_ctime(inode));
}
ret2 = ext4_mark_inode_dirty(handle, inode);
ext4_update_inode_fsync_trans(handle, inode, 1);
@@ -4617,7 +4618,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
/* Now release the pages and zero block aligned part of pages */
truncate_pagecache_range(inode, start, end - 1);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
flags);
@@ -4642,7 +4643,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
goto out_mutex;
}
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
if (new_size)
ext4_update_inode_size(inode, new_size);
ret = ext4_mark_inode_dirty(handle, inode);
@@ -5378,7 +5379,7 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
ret = ext4_mark_inode_dirty(handle, inode);
ext4_update_inode_fsync_trans(handle, inode, 1);
@@ -5488,7 +5489,7 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
/* Expand file to avoid data loss if there is error while shifting */
inode->i_size += len;
EXT4_I(inode)->i_disksize += len;
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
ret = ext4_mark_inode_dirty(handle, inode);
if (ret)
goto out_stop;
diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
index cdf9bfe10137..11e6f33677a2 100644
--- a/fs/ext4/fsmap.c
+++ b/fs/ext4/fsmap.c
@@ -576,8 +576,9 @@ static bool ext4_getfsmap_is_valid_device(struct super_block *sb,
if (fm->fmr_device == 0 || fm->fmr_device == UINT_MAX ||
fm->fmr_device == new_encode_dev(sb->s_bdev->bd_dev))
return true;
- if (EXT4_SB(sb)->s_journal_bdev &&
- fm->fmr_device == new_encode_dev(EXT4_SB(sb)->s_journal_bdev->bd_dev))
+ if (EXT4_SB(sb)->s_journal_bdev_handle &&
+ fm->fmr_device ==
+ new_encode_dev(EXT4_SB(sb)->s_journal_bdev_handle->bdev->bd_dev))
return true;
return false;
}
@@ -647,9 +648,9 @@ int ext4_getfsmap(struct super_block *sb, struct ext4_fsmap_head *head,
memset(handlers, 0, sizeof(handlers));
handlers[0].gfd_dev = new_encode_dev(sb->s_bdev->bd_dev);
handlers[0].gfd_fn = ext4_getfsmap_datadev;
- if (EXT4_SB(sb)->s_journal_bdev) {
+ if (EXT4_SB(sb)->s_journal_bdev_handle) {
handlers[1].gfd_dev = new_encode_dev(
- EXT4_SB(sb)->s_journal_bdev->bd_dev);
+ EXT4_SB(sb)->s_journal_bdev_handle->bdev->bd_dev);
handlers[1].gfd_fn = ext4_getfsmap_logdev;
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index b65058d972f9..e9bbb1da2d0a 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1250,8 +1250,8 @@ got:
inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
/* This is the optimal IO size (for stat), not the fs block size */
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
- ei->i_crtime = inode->i_mtime;
+ simple_inode_init_ts(inode);
+ ei->i_crtime = inode_get_mtime(inode);
memset(ei->i_data, 0, sizeof(ei->i_data));
ei->i_dir_start_lookup = 0;
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 012d9259ff53..9a84a5f9fef4 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1037,7 +1037,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
* happen is that the times are slightly out of date
* and/or different from the directory change time.
*/
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
ext4_update_dx_flag(dir);
inode_inc_iversion(dir);
return 1;
@@ -1991,7 +1991,7 @@ out:
ext4_orphan_del(handle, inode);
if (err == 0) {
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
err = ext4_mark_inode_dirty(handle, inode);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 4ce35f1c8b0a..08cb5c0e0d51 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4020,7 +4020,7 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
if (IS_SYNC(inode))
ext4_handle_sync(handle);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
ret2 = ext4_mark_inode_dirty(handle, inode);
if (unlikely(ret2))
ret = ret2;
@@ -4180,7 +4180,7 @@ out_stop:
if (inode->i_nlink)
ext4_orphan_del(handle, inode);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
err2 = ext4_mark_inode_dirty(handle, inode);
if (unlikely(err2 && !err))
err = err2;
@@ -4284,8 +4284,8 @@ static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
EXT4_INODE_SET_CTIME(inode, raw_inode);
- EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
- EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
+ EXT4_INODE_SET_MTIME(inode, raw_inode);
+ EXT4_INODE_SET_ATIME(inode, raw_inode);
EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
@@ -4893,8 +4893,8 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
}
EXT4_INODE_GET_CTIME(inode, raw_inode);
- EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
- EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
+ EXT4_INODE_GET_ATIME(inode, raw_inode);
+ EXT4_INODE_GET_MTIME(inode, raw_inode);
EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
@@ -5019,8 +5019,8 @@ static void __ext4_update_other_inode_time(struct super_block *sb,
spin_lock(&ei->i_raw_lock);
EXT4_INODE_SET_CTIME(inode, raw_inode);
- EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
- EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
+ EXT4_INODE_SET_MTIME(inode, raw_inode);
+ EXT4_INODE_SET_ATIME(inode, raw_inode);
ext4_inode_csum_set(inode, raw_inode, ei);
spin_unlock(&ei->i_raw_lock);
trace_ext4_other_inode_update_time(inode, orig_ino);
@@ -5413,7 +5413,8 @@ int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
* update c/mtime in shrink case below
*/
if (!shrink)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
if (shrink)
ext4_fc_track_range(handle, inode,
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 0bfe2ce589e2..4f931f80cb34 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -312,13 +312,22 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
struct ext4_inode_info *ei1;
struct ext4_inode_info *ei2;
unsigned long tmp;
+ struct timespec64 ts1, ts2;
ei1 = EXT4_I(inode1);
ei2 = EXT4_I(inode2);
swap(inode1->i_version, inode2->i_version);
- swap(inode1->i_atime, inode2->i_atime);
- swap(inode1->i_mtime, inode2->i_mtime);
+
+ ts1 = inode_get_atime(inode1);
+ ts2 = inode_get_atime(inode2);
+ inode_set_atime_to_ts(inode1, ts2);
+ inode_set_atime_to_ts(inode2, ts1);
+
+ ts1 = inode_get_mtime(inode1);
+ ts2 = inode_get_mtime(inode2);
+ inode_set_mtime_to_ts(inode1, ts2);
+ inode_set_mtime_to_ts(inode2, ts1);
memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
tmp = ei1->i_flags & EXT4_FL_SHOULD_SWAP;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index bbda587f76b8..057d74467293 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2207,7 +2207,7 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
* happen is that the times are slightly out of date
* and/or different from the directory change time.
*/
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
ext4_update_dx_flag(dir);
inode_inc_iversion(dir);
err2 = ext4_mark_inode_dirty(handle, dir);
@@ -3202,7 +3202,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
* recovery. */
inode->i_size = 0;
ext4_orphan_add(handle, inode);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
inode_set_ctime_current(inode);
retval = ext4_mark_inode_dirty(handle, inode);
if (retval)
@@ -3277,7 +3277,7 @@ int __ext4_unlink(struct inode *dir, const struct qstr *d_name,
retval = ext4_delete_entry(handle, dir, de, bh);
if (retval)
goto out_handle;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
ext4_update_dx_flag(dir);
retval = ext4_mark_inode_dirty(handle, dir);
if (retval)
@@ -3648,7 +3648,7 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
if (ext4_has_feature_filetype(ent->dir->i_sb))
ent->de->file_type = file_type;
inode_inc_iversion(ent->dir);
- ent->dir->i_mtime = inode_set_ctime_current(ent->dir);
+ inode_set_mtime_to_ts(ent->dir, inode_set_ctime_current(ent->dir));
retval = ext4_mark_inode_dirty(handle, ent->dir);
BUFFER_TRACE(ent->bh, "call ext4_handle_dirty_metadata");
if (!ent->inlined) {
@@ -3963,7 +3963,7 @@ static int ext4_rename(struct mnt_idmap *idmap, struct inode *old_dir,
ext4_dec_count(new.inode);
inode_set_ctime_current(new.inode);
}
- old.dir->i_mtime = inode_set_ctime_current(old.dir);
+ inode_set_mtime_to_ts(old.dir, inode_set_ctime_current(old.dir));
ext4_update_dx_flag(old.dir);
if (old.dir_bh) {
retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index dbebd8b3127e..42a44990d99c 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1351,14 +1351,14 @@ static void ext4_put_super(struct super_block *sb)
sync_blockdev(sb->s_bdev);
invalidate_bdev(sb->s_bdev);
- if (sbi->s_journal_bdev) {
+ if (sbi->s_journal_bdev_handle) {
/*
* Invalidate the journal device's buffers. We don't want them
* floating about in memory - the physical journal device may
* hotswapped, and it breaks the `ro-after' testing code.
*/
- sync_blockdev(sbi->s_journal_bdev);
- invalidate_bdev(sbi->s_journal_bdev);
+ sync_blockdev(sbi->s_journal_bdev_handle->bdev);
+ invalidate_bdev(sbi->s_journal_bdev_handle->bdev);
}
ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
@@ -4233,7 +4233,7 @@ int ext4_calculate_overhead(struct super_block *sb)
* Add the internal journal blocks whether the journal has been
* loaded or not
*/
- if (sbi->s_journal && !sbi->s_journal_bdev)
+ if (sbi->s_journal && !sbi->s_journal_bdev_handle)
overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_total_len);
else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
/* j_inum for internal journal is non-zero */
@@ -5670,9 +5670,9 @@ failed_mount:
#endif
fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
brelse(sbi->s_sbh);
- if (sbi->s_journal_bdev) {
- invalidate_bdev(sbi->s_journal_bdev);
- blkdev_put(sbi->s_journal_bdev, sb);
+ if (sbi->s_journal_bdev_handle) {
+ invalidate_bdev(sbi->s_journal_bdev_handle->bdev);
+ bdev_release(sbi->s_journal_bdev_handle);
}
out_fail:
invalidate_bdev(sb->s_bdev);
@@ -5842,12 +5842,13 @@ static journal_t *ext4_open_inode_journal(struct super_block *sb,
return journal;
}
-static struct block_device *ext4_get_journal_blkdev(struct super_block *sb,
+static struct bdev_handle *ext4_get_journal_blkdev(struct super_block *sb,
dev_t j_dev, ext4_fsblk_t *j_start,
ext4_fsblk_t *j_len)
{
struct buffer_head *bh;
struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
int hblock, blocksize;
ext4_fsblk_t sb_block;
unsigned long offset;
@@ -5856,16 +5857,17 @@ static struct block_device *ext4_get_journal_blkdev(struct super_block *sb,
/* see get_tree_bdev why this is needed and safe */
up_write(&sb->s_umount);
- bdev = blkdev_get_by_dev(j_dev, BLK_OPEN_READ | BLK_OPEN_WRITE, sb,
- &fs_holder_ops);
+ bdev_handle = bdev_open_by_dev(j_dev, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ sb, &fs_holder_ops);
down_write(&sb->s_umount);
- if (IS_ERR(bdev)) {
+ if (IS_ERR(bdev_handle)) {
ext4_msg(sb, KERN_ERR,
"failed to open journal device unknown-block(%u,%u) %ld",
- MAJOR(j_dev), MINOR(j_dev), PTR_ERR(bdev));
- return ERR_CAST(bdev);
+ MAJOR(j_dev), MINOR(j_dev), PTR_ERR(bdev_handle));
+ return bdev_handle;
}
+ bdev = bdev_handle->bdev;
blocksize = sb->s_blocksize;
hblock = bdev_logical_block_size(bdev);
if (blocksize < hblock) {
@@ -5912,12 +5914,12 @@ static struct block_device *ext4_get_journal_blkdev(struct super_block *sb,
*j_start = sb_block + 1;
*j_len = ext4_blocks_count(es);
brelse(bh);
- return bdev;
+ return bdev_handle;
out_bh:
brelse(bh);
out_bdev:
- blkdev_put(bdev, sb);
+ bdev_release(bdev_handle);
return ERR_PTR(errno);
}
@@ -5927,14 +5929,14 @@ static journal_t *ext4_open_dev_journal(struct super_block *sb,
journal_t *journal;
ext4_fsblk_t j_start;
ext4_fsblk_t j_len;
- struct block_device *journal_bdev;
+ struct bdev_handle *bdev_handle;
int errno = 0;
- journal_bdev = ext4_get_journal_blkdev(sb, j_dev, &j_start, &j_len);
- if (IS_ERR(journal_bdev))
- return ERR_CAST(journal_bdev);
+ bdev_handle = ext4_get_journal_blkdev(sb, j_dev, &j_start, &j_len);
+ if (IS_ERR(bdev_handle))
+ return ERR_CAST(bdev_handle);
- journal = jbd2_journal_init_dev(journal_bdev, sb->s_bdev, j_start,
+ journal = jbd2_journal_init_dev(bdev_handle->bdev, sb->s_bdev, j_start,
j_len, sb->s_blocksize);
if (IS_ERR(journal)) {
ext4_msg(sb, KERN_ERR, "failed to create device journal");
@@ -5949,14 +5951,14 @@ static journal_t *ext4_open_dev_journal(struct super_block *sb,
goto out_journal;
}
journal->j_private = sb;
- EXT4_SB(sb)->s_journal_bdev = journal_bdev;
+ EXT4_SB(sb)->s_journal_bdev_handle = bdev_handle;
ext4_init_journal_params(sb, journal);
return journal;
out_journal:
jbd2_journal_destroy(journal);
out_bdev:
- blkdev_put(journal_bdev, sb);
+ bdev_release(bdev_handle);
return ERR_PTR(errno);
}
@@ -7127,7 +7129,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
}
EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
err = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
out_unlock:
@@ -7300,12 +7302,12 @@ static inline int ext3_feature_set_ok(struct super_block *sb)
static void ext4_kill_sb(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct block_device *journal_bdev = sbi ? sbi->s_journal_bdev : NULL;
+ struct bdev_handle *handle = sbi ? sbi->s_journal_bdev_handle : NULL;
kill_block_super(sb);
- if (journal_bdev)
- blkdev_put(journal_bdev, sb);
+ if (handle)
+ bdev_release(handle);
}
static struct file_system_type ext4_fs_type = {
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 92ba28cebac6..82dc5e673d5c 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -98,7 +98,7 @@ static const struct xattr_handler * const ext4_xattr_handler_map[] = {
[EXT4_XATTR_INDEX_HURD] = &ext4_xattr_hurd_handler,
};
-const struct xattr_handler *ext4_xattr_handlers[] = {
+const struct xattr_handler * const ext4_xattr_handlers[] = {
&ext4_xattr_user_handler,
&ext4_xattr_trusted_handler,
#ifdef CONFIG_EXT4_FS_SECURITY
@@ -356,7 +356,7 @@ ext4_xattr_inode_hash(struct ext4_sb_info *sbi, const void *buffer, size_t size)
static u64 ext4_xattr_inode_get_ref(struct inode *ea_inode)
{
- return ((u64) inode_get_ctime(ea_inode).tv_sec << 32) |
+ return ((u64) inode_get_ctime_sec(ea_inode) << 32) |
(u32) inode_peek_iversion_raw(ea_inode);
}
@@ -368,12 +368,12 @@ static void ext4_xattr_inode_set_ref(struct inode *ea_inode, u64 ref_count)
static u32 ext4_xattr_inode_get_hash(struct inode *ea_inode)
{
- return (u32)ea_inode->i_atime.tv_sec;
+ return (u32) inode_get_atime_sec(ea_inode);
}
static void ext4_xattr_inode_set_hash(struct inode *ea_inode, u32 hash)
{
- ea_inode->i_atime.tv_sec = hash;
+ inode_set_atime(ea_inode, hash, 0);
}
/*
@@ -418,7 +418,7 @@ free_bhs:
return ret;
}
-#define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode)->i_mtime.tv_sec)
+#define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode_get_mtime_sec(inode)))
static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
u32 ea_inode_hash, struct inode **ea_inode)
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index 824faf0b15a8..bd97c4aa8177 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -193,7 +193,7 @@ extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
struct ext4_inode *raw_inode, handle_t *handle);
extern void ext4_evict_ea_inode(struct inode *inode);
-extern const struct xattr_handler *ext4_xattr_handlers[];
+extern const struct xattr_handler * const ext4_xattr_handlers[];
extern int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
struct ext4_xattr_ibody_find *is);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 8aa29fe2e87b..042593aed1ec 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -455,7 +455,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
de->file_type = fs_umode_to_ftype(inode->i_mode);
set_page_dirty(page);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
f2fs_mark_inode_dirty_sync(dir, false);
f2fs_put_page(page, 1);
}
@@ -609,7 +609,7 @@ void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
f2fs_i_links_write(dir, true);
clear_inode_flag(inode, FI_NEW_INODE);
}
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
f2fs_mark_inode_dirty_sync(dir, false);
if (F2FS_I(dir)->i_current_depth != current_depth)
@@ -919,7 +919,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
}
f2fs_put_page(page, 1);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
f2fs_mark_inode_dirty_sync(dir, false);
if (inode)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 6d688e42d89c..9043cedfa12b 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1234,6 +1234,7 @@ struct f2fs_bio_info {
#define FDEV(i) (sbi->devs[i])
#define RDEV(i) (raw_super->devs[i])
struct f2fs_dev_info {
+ struct bdev_handle *bdev_handle;
struct block_device *bdev;
char path[MAX_PATH_LEN];
unsigned int total_segments;
@@ -3317,13 +3318,15 @@ static inline void clear_file(struct inode *inode, int type)
static inline bool f2fs_is_time_consistent(struct inode *inode)
{
- struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 ts = inode_get_atime(inode);
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &ts))
return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &ctime))
+ ts = inode_get_ctime(inode);
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &ts))
return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
+ ts = inode_get_mtime(inode);
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &ts))
return false;
return true;
}
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index ca5904129b16..dd99abbb7186 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -798,7 +798,7 @@ int f2fs_truncate(struct inode *inode)
if (err)
return err;
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
f2fs_mark_inode_dirty_sync(inode, false);
return 0;
}
@@ -905,9 +905,9 @@ static void __setattr_copy(struct mnt_idmap *idmap,
i_uid_update(idmap, attr, inode);
i_gid_update(idmap, attr, inode);
if (ia_valid & ATTR_ATIME)
- inode->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(inode, attr->ia_atime);
if (ia_valid & ATTR_MTIME)
- inode->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
if (ia_valid & ATTR_CTIME)
inode_set_ctime_to_ts(inode, attr->ia_ctime);
if (ia_valid & ATTR_MODE) {
@@ -1012,7 +1012,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
return err;
spin_lock(&F2FS_I(inode)->i_size_lock);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
F2FS_I(inode)->last_disk_size = i_size_read(inode);
spin_unlock(&F2FS_I(inode)->i_size_lock);
}
@@ -1840,7 +1840,7 @@ static long f2fs_fallocate(struct file *file, int mode,
}
if (!ret) {
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
f2fs_mark_inode_dirty_sync(inode, false);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
}
@@ -2888,10 +2888,10 @@ out_src:
if (ret)
goto out_unlock;
- src->i_mtime = inode_set_ctime_current(src);
+ inode_set_mtime_to_ts(src, inode_set_ctime_current(src));
f2fs_mark_inode_dirty_sync(src, false);
if (src != dst) {
- dst->i_mtime = inode_set_ctime_current(dst);
+ inode_set_mtime_to_ts(dst, inode_set_ctime_current(dst));
f2fs_mark_inode_dirty_sync(dst, false);
}
f2fs_update_time(sbi, REQ_TIME);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 2fe25619ccb5..ac00423f117b 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -699,7 +699,7 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
set_page_dirty(page);
f2fs_put_page(page, 1);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
f2fs_mark_inode_dirty_sync(dir, false);
if (inode)
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index cde243840abd..5779c7edd49b 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -386,9 +386,9 @@ static void init_idisk_time(struct inode *inode)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
- fi->i_disk_time[0] = inode->i_atime;
+ fi->i_disk_time[0] = inode_get_atime(inode);
fi->i_disk_time[1] = inode_get_ctime(inode);
- fi->i_disk_time[2] = inode->i_mtime;
+ fi->i_disk_time[2] = inode_get_mtime(inode);
}
static int do_read_inode(struct inode *inode)
@@ -417,12 +417,12 @@ static int do_read_inode(struct inode *inode)
inode->i_size = le64_to_cpu(ri->i_size);
inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
- inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
+ inode_set_atime(inode, le64_to_cpu(ri->i_atime),
+ le32_to_cpu(ri->i_atime_nsec));
inode_set_ctime(inode, le64_to_cpu(ri->i_ctime),
le32_to_cpu(ri->i_ctime_nsec));
- inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
- inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
- inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
+ inode_set_mtime(inode, le64_to_cpu(ri->i_mtime),
+ le32_to_cpu(ri->i_mtime_nsec));
inode->i_generation = le32_to_cpu(ri->i_generation);
if (S_ISDIR(inode->i_mode))
fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
@@ -698,12 +698,12 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
}
set_raw_inline(inode, ri);
- ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
- ri->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
- ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
- ri->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
- ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+ ri->i_atime = cpu_to_le64(inode_get_atime_sec(inode));
+ ri->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
+ ri->i_mtime = cpu_to_le64(inode_get_mtime_sec(inode));
+ ri->i_atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode));
+ ri->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
+ ri->i_mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
if (S_ISDIR(inode->i_mode))
ri->i_current_depth =
cpu_to_le32(F2FS_I(inode)->i_current_depth);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 193b22a2d6bf..d0053b0284d8 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -243,8 +243,8 @@ static struct inode *f2fs_new_inode(struct mnt_idmap *idmap,
inode->i_ino = ino;
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
- F2FS_I(inode)->i_crtime = inode->i_mtime;
+ simple_inode_init_ts(inode);
+ F2FS_I(inode)->i_crtime = inode_get_mtime(inode);
inode->i_generation = get_random_u32();
if (S_ISDIR(inode->i_mode))
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 7be60df277a5..b56d0f1078a7 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -320,12 +320,12 @@ static int recover_inode(struct inode *inode, struct page *page)
}
f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
- inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
+ inode_set_atime(inode, le64_to_cpu(raw->i_atime),
+ le32_to_cpu(raw->i_atime_nsec));
inode_set_ctime(inode, le64_to_cpu(raw->i_ctime),
le32_to_cpu(raw->i_ctime_nsec));
- inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
- inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
- inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
+ inode_set_mtime(inode, le64_to_cpu(raw->i_mtime),
+ le32_to_cpu(raw->i_mtime_nsec));
F2FS_I(inode)->i_advise = raw->i_advise;
F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index a8c8232852bb..be17d77513d5 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1562,7 +1562,7 @@ static void destroy_device_list(struct f2fs_sb_info *sbi)
for (i = 0; i < sbi->s_ndevs; i++) {
if (i > 0)
- blkdev_put(FDEV(i).bdev, sbi->sb);
+ bdev_release(FDEV(i).bdev_handle);
#ifdef CONFIG_BLK_DEV_ZONED
kvfree(FDEV(i).blkz_seq);
#endif
@@ -2710,7 +2710,7 @@ retry:
if (len == towrite)
return err;
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
f2fs_mark_inode_dirty_sync(inode, false);
return len - towrite;
}
@@ -3203,13 +3203,6 @@ static bool f2fs_has_stable_inodes(struct super_block *sb)
return true;
}
-static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
- int *ino_bits_ret, int *lblk_bits_ret)
-{
- *ino_bits_ret = 8 * sizeof(nid_t);
- *lblk_bits_ret = 8 * sizeof(block_t);
-}
-
static struct block_device **f2fs_get_devices(struct super_block *sb,
unsigned int *num_devs)
{
@@ -3231,13 +3224,15 @@ static struct block_device **f2fs_get_devices(struct super_block *sb,
}
static const struct fscrypt_operations f2fs_cryptops = {
- .key_prefix = "f2fs:",
+ .needs_bounce_pages = 1,
+ .has_32bit_inodes = 1,
+ .supports_subblock_data_units = 1,
+ .legacy_key_prefix = "f2fs:",
.get_context = f2fs_get_context,
.set_context = f2fs_set_context,
.get_dummy_policy = f2fs_get_dummy_policy,
.empty_dir = f2fs_empty_dir,
.has_stable_inodes = f2fs_has_stable_inodes,
- .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits,
.get_devices = f2fs_get_devices,
};
#endif
@@ -4198,7 +4193,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
for (i = 0; i < max_devices; i++) {
if (i == 0)
- FDEV(0).bdev = sbi->sb->s_bdev;
+ FDEV(0).bdev_handle = sbi->sb->s_bdev_handle;
else if (!RDEV(i).path[0])
break;
@@ -4218,13 +4213,14 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
FDEV(i).end_blk = FDEV(i).start_blk +
(FDEV(i).total_segments <<
sbi->log_blocks_per_seg) - 1;
- FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
- mode, sbi->sb, NULL);
+ FDEV(i).bdev_handle = bdev_open_by_path(
+ FDEV(i).path, mode, sbi->sb, NULL);
}
}
- if (IS_ERR(FDEV(i).bdev))
- return PTR_ERR(FDEV(i).bdev);
+ if (IS_ERR(FDEV(i).bdev_handle))
+ return PTR_ERR(FDEV(i).bdev_handle);
+ FDEV(i).bdev = FDEV(i).bdev_handle->bdev;
/* to release errored devices */
sbi->s_ndevs = i + 1;
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index a657284faee3..4314456854f6 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -189,7 +189,7 @@ const struct xattr_handler f2fs_xattr_security_handler = {
.set = f2fs_xattr_generic_set,
};
-static const struct xattr_handler *f2fs_xattr_handler_map[] = {
+static const struct xattr_handler * const f2fs_xattr_handler_map[] = {
[F2FS_XATTR_INDEX_USER] = &f2fs_xattr_user_handler,
#ifdef CONFIG_F2FS_FS_POSIX_ACL
[F2FS_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access,
@@ -202,7 +202,7 @@ static const struct xattr_handler *f2fs_xattr_handler_map[] = {
[F2FS_XATTR_INDEX_ADVISE] = &f2fs_xattr_advise_handler,
};
-const struct xattr_handler *f2fs_xattr_handlers[] = {
+const struct xattr_handler * const f2fs_xattr_handlers[] = {
&f2fs_xattr_user_handler,
&f2fs_xattr_trusted_handler,
#ifdef CONFIG_F2FS_FS_SECURITY
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index b1811c392e6f..a005ffdcf717 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -125,7 +125,7 @@ extern const struct xattr_handler f2fs_xattr_trusted_handler;
extern const struct xattr_handler f2fs_xattr_advise_handler;
extern const struct xattr_handler f2fs_xattr_security_handler;
-extern const struct xattr_handler *f2fs_xattr_handlers[];
+extern const struct xattr_handler * const f2fs_xattr_handlers[];
extern int f2fs_setxattr(struct inode *, int, const char *,
const void *, size_t, struct page *, int);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index cdd39b6020f3..1fac3dabf130 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -512,6 +512,7 @@ static int fat_validate_dir(struct inode *dir)
int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+ struct timespec64 mtime;
int error;
MSDOS_I(inode)->i_pos = 0;
@@ -561,14 +562,18 @@ int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1))
& ~((loff_t)sbi->cluster_size - 1)) >> 9;
- fat_time_fat2unix(sbi, &inode->i_mtime, de->time, de->date, 0);
- inode_set_ctime_to_ts(inode, inode->i_mtime);
+ fat_time_fat2unix(sbi, &mtime, de->time, de->date, 0);
+ inode_set_mtime_to_ts(inode, mtime);
+ inode_set_ctime_to_ts(inode, mtime);
if (sbi->options.isvfat) {
- fat_time_fat2unix(sbi, &inode->i_atime, 0, de->adate, 0);
+ struct timespec64 atime;
+
+ fat_time_fat2unix(sbi, &atime, 0, de->adate, 0);
+ inode_set_atime_to_ts(inode, atime);
fat_time_fat2unix(sbi, &MSDOS_I(inode)->i_crtime, de->ctime,
de->cdate, de->ctime_cs);
} else
- inode->i_atime = fat_truncate_atime(sbi, &inode->i_mtime);
+ inode_set_atime_to_ts(inode, fat_truncate_atime(sbi, &mtime));
return 0;
}
@@ -849,6 +854,7 @@ static int __fat_write_inode(struct inode *inode, int wait)
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct buffer_head *bh;
struct msdos_dir_entry *raw_entry;
+ struct timespec64 mtime;
loff_t i_pos;
sector_t blocknr;
int err, offset;
@@ -882,12 +888,14 @@ retry:
raw_entry->size = cpu_to_le32(inode->i_size);
raw_entry->attr = fat_make_attrs(inode);
fat_set_start(raw_entry, MSDOS_I(inode)->i_logstart);
- fat_time_unix2fat(sbi, &inode->i_mtime, &raw_entry->time,
+ mtime = inode_get_mtime(inode);
+ fat_time_unix2fat(sbi, &mtime, &raw_entry->time,
&raw_entry->date, NULL);
if (sbi->options.isvfat) {
+ struct timespec64 ts = inode_get_atime(inode);
__le16 atime;
- fat_time_unix2fat(sbi, &inode->i_atime, &atime,
- &raw_entry->adate, NULL);
+
+ fat_time_unix2fat(sbi, &ts, &atime, &raw_entry->adate, NULL);
fat_time_unix2fat(sbi, &MSDOS_I(inode)->i_crtime, &raw_entry->ctime,
&raw_entry->cdate, &raw_entry->ctime_cs);
}
@@ -1407,7 +1415,8 @@ static int fat_read_root(struct inode *inode)
MSDOS_I(inode)->mmu_private = inode->i_size;
fat_save_attrs(inode, ATTR_DIR);
- inode->i_mtime = inode->i_atime = inode_set_ctime(inode, 0, 0);
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime(inode, 0, 0)));
set_nlink(inode, fat_subdirs(inode)+2);
return 0;
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index f2304a1054aa..c7a2d27120ba 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -325,15 +325,15 @@ int fat_truncate_time(struct inode *inode, struct timespec64 *now, int flags)
}
if (flags & S_ATIME)
- inode->i_atime = fat_truncate_atime(sbi, now);
+ inode_set_atime_to_ts(inode, fat_truncate_atime(sbi, now));
/*
* ctime and mtime share the same on-disk field, and should be
* identical in memory. all mtime updates will be applied to ctime,
* but ctime updates are ignored.
*/
if (flags & S_MTIME)
- inode->i_mtime = inode_set_ctime_to_ts(inode,
- fat_truncate_mtime(sbi, now));
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_to_ts(inode, fat_truncate_mtime(sbi, now)));
return 0;
}
diff --git a/fs/file.c b/fs/file.c
index 3e4a4dfa38fc..5fb0b146e79e 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -604,6 +604,9 @@ void fd_install(unsigned int fd, struct file *file)
struct files_struct *files = current->files;
struct fdtable *fdt;
+ if (WARN_ON_ONCE(unlikely(file->f_mode & FMODE_BACKING)))
+ return;
+
rcu_read_lock_sched();
if (unlikely(files->resize_in_progress)) {
@@ -853,8 +856,104 @@ void do_close_on_exec(struct files_struct *files)
spin_unlock(&files->file_lock);
}
+static struct file *__get_file_rcu(struct file __rcu **f)
+{
+ struct file __rcu *file;
+ struct file __rcu *file_reloaded;
+ struct file __rcu *file_reloaded_cmp;
+
+ file = rcu_dereference_raw(*f);
+ if (!file)
+ return NULL;
+
+ if (unlikely(!atomic_long_inc_not_zero(&file->f_count)))
+ return ERR_PTR(-EAGAIN);
+
+ file_reloaded = rcu_dereference_raw(*f);
+
+ /*
+ * Ensure that all accesses have a dependency on the load from
+ * rcu_dereference_raw() above so we get correct ordering
+ * between reuse/allocation and the pointer check below.
+ */
+ file_reloaded_cmp = file_reloaded;
+ OPTIMIZER_HIDE_VAR(file_reloaded_cmp);
+
+ /*
+ * atomic_long_inc_not_zero() above provided a full memory
+ * barrier when we acquired a reference.
+ *
+ * This is paired with the write barrier from assigning to the
+ * __rcu protected file pointer so that if that pointer still
+ * matches the current file, we know we have successfully
+ * acquired a reference to the right file.
+ *
+ * If the pointers don't match the file has been reallocated by
+ * SLAB_TYPESAFE_BY_RCU.
+ */
+ if (file == file_reloaded_cmp)
+ return file_reloaded;
+
+ fput(file);
+ return ERR_PTR(-EAGAIN);
+}
+
+/**
+ * get_file_rcu - try go get a reference to a file under rcu
+ * @f: the file to get a reference on
+ *
+ * This function tries to get a reference on @f carefully verifying that
+ * @f hasn't been reused.
+ *
+ * This function should rarely have to be used and only by users who
+ * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
+ *
+ * Return: Returns @f with the reference count increased or NULL.
+ */
+struct file *get_file_rcu(struct file __rcu **f)
+{
+ for (;;) {
+ struct file __rcu *file;
+
+ file = __get_file_rcu(f);
+ if (unlikely(!file))
+ return NULL;
+
+ if (unlikely(IS_ERR(file)))
+ continue;
+
+ return file;
+ }
+}
+EXPORT_SYMBOL_GPL(get_file_rcu);
+
+/**
+ * get_file_active - try go get a reference to a file
+ * @f: the file to get a reference on
+ *
+ * In contast to get_file_rcu() the pointer itself isn't part of the
+ * reference counting.
+ *
+ * This function should rarely have to be used and only by users who
+ * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
+ *
+ * Return: Returns @f with the reference count increased or NULL.
+ */
+struct file *get_file_active(struct file **f)
+{
+ struct file __rcu *file;
+
+ rcu_read_lock();
+ file = __get_file_rcu(f);
+ rcu_read_unlock();
+ if (IS_ERR(file))
+ file = NULL;
+ return file;
+}
+EXPORT_SYMBOL_GPL(get_file_active);
+
static inline struct file *__fget_files_rcu(struct files_struct *files,
- unsigned int fd, fmode_t mask)
+ unsigned int fd, fmode_t mask)
{
for (;;) {
struct file *file;
@@ -865,12 +964,6 @@ static inline struct file *__fget_files_rcu(struct files_struct *files,
return NULL;
fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds);
- file = rcu_dereference_raw(*fdentry);
- if (unlikely(!file))
- return NULL;
-
- if (unlikely(file->f_mode & mask))
- return NULL;
/*
* Ok, we have a file pointer. However, because we do
@@ -879,10 +972,15 @@ static inline struct file *__fget_files_rcu(struct files_struct *files,
*
* Such a race can take two forms:
*
- * (a) the file ref already went down to zero,
- * and get_file_rcu() fails. Just try again:
+ * (a) the file ref already went down to zero and the
+ * file hasn't been reused yet or the file count
+ * isn't zero but the file has already been reused.
*/
- if (unlikely(!get_file_rcu(file)))
+ file = __get_file_rcu(fdentry);
+ if (unlikely(!file))
+ return NULL;
+
+ if (unlikely(IS_ERR(file)))
continue;
/*
@@ -893,13 +991,21 @@ static inline struct file *__fget_files_rcu(struct files_struct *files,
*
* If so, we need to put our ref and try again.
*/
- if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
- unlikely(rcu_dereference_raw(*fdentry) != file)) {
+ if (unlikely(rcu_dereference_raw(files->fdt) != fdt)) {
fput(file);
continue;
}
/*
+ * This isn't the file we're looking for or we're not
+ * allowed to get a reference to it.
+ */
+ if (unlikely(file->f_mode & mask)) {
+ fput(file);
+ return NULL;
+ }
+
+ /*
* Ok, we have a ref to the file, and checked that it
* still exists.
*/
@@ -948,7 +1054,14 @@ struct file *fget_task(struct task_struct *task, unsigned int fd)
return file;
}
-struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd)
+struct file *lookup_fdget_rcu(unsigned int fd)
+{
+ return __fget_files_rcu(current->files, fd, 0);
+
+}
+EXPORT_SYMBOL_GPL(lookup_fdget_rcu);
+
+struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd)
{
/* Must be called with rcu_read_lock held */
struct files_struct *files;
@@ -957,13 +1070,13 @@ struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd)
task_lock(task);
files = task->files;
if (files)
- file = files_lookup_fd_rcu(files, fd);
+ file = __fget_files_rcu(files, fd, 0);
task_unlock(task);
return file;
}
-struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret_fd)
+struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *ret_fd)
{
/* Must be called with rcu_read_lock held */
struct files_struct *files;
@@ -974,7 +1087,7 @@ struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret
files = task->files;
if (files) {
for (; fd < files_fdtable(files)->max_fds; fd++) {
- file = files_lookup_fd_rcu(files, fd);
+ file = __fget_files_rcu(files, fd, 0);
if (file)
break;
}
@@ -983,7 +1096,7 @@ struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret
*ret_fd = fd;
return file;
}
-EXPORT_SYMBOL(task_lookup_next_fd_rcu);
+EXPORT_SYMBOL(task_lookup_next_fdget_rcu);
/*
* Lightweight file lookup - no refcnt increment if fd table isn't shared.
@@ -1272,12 +1385,16 @@ SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
{
if (unlikely(newfd == oldfd)) { /* corner case */
struct files_struct *files = current->files;
+ struct file *f;
int retval = oldfd;
rcu_read_lock();
- if (!files_lookup_fd_rcu(files, oldfd))
+ f = __fget_files_rcu(files, oldfd, 0);
+ if (!f)
retval = -EBADF;
rcu_read_unlock();
+ if (f)
+ fput(f);
return retval;
}
return ksys_dup3(oldfd, newfd, 0);
diff --git a/fs/file_table.c b/fs/file_table.c
index ee21b3da9d08..fa92743ba6a9 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -44,10 +44,10 @@ static struct kmem_cache *filp_cachep __read_mostly;
static struct percpu_counter nr_files __cacheline_aligned_in_smp;
-/* Container for backing file with optional real path */
+/* Container for backing file with optional user path */
struct backing_file {
struct file file;
- struct path real_path;
+ struct path user_path;
};
static inline struct backing_file *backing_file(struct file *f)
@@ -55,31 +55,36 @@ static inline struct backing_file *backing_file(struct file *f)
return container_of(f, struct backing_file, file);
}
-struct path *backing_file_real_path(struct file *f)
+struct path *backing_file_user_path(struct file *f)
{
- return &backing_file(f)->real_path;
+ return &backing_file(f)->user_path;
}
-EXPORT_SYMBOL_GPL(backing_file_real_path);
+EXPORT_SYMBOL_GPL(backing_file_user_path);
-static void file_free_rcu(struct rcu_head *head)
+static inline void file_free(struct file *f)
{
- struct file *f = container_of(head, struct file, f_rcuhead);
-
+ security_file_free(f);
+ if (likely(!(f->f_mode & FMODE_NOACCOUNT)))
+ percpu_counter_dec(&nr_files);
put_cred(f->f_cred);
- if (unlikely(f->f_mode & FMODE_BACKING))
+ if (unlikely(f->f_mode & FMODE_BACKING)) {
+ path_put(backing_file_user_path(f));
kfree(backing_file(f));
- else
+ } else {
kmem_cache_free(filp_cachep, f);
+ }
}
-static inline void file_free(struct file *f)
+void release_empty_file(struct file *f)
{
- security_file_free(f);
- if (unlikely(f->f_mode & FMODE_BACKING))
- path_put(backing_file_real_path(f));
- if (likely(!(f->f_mode & FMODE_NOACCOUNT)))
- percpu_counter_dec(&nr_files);
- call_rcu(&f->f_rcuhead, file_free_rcu);
+ WARN_ON_ONCE(f->f_mode & (FMODE_BACKING | FMODE_OPENED));
+ if (atomic_long_dec_and_test(&f->f_count)) {
+ security_file_free(f);
+ put_cred(f->f_cred);
+ if (likely(!(f->f_mode & FMODE_NOACCOUNT)))
+ percpu_counter_dec(&nr_files);
+ kmem_cache_free(filp_cachep, f);
+ }
}
/*
@@ -164,7 +169,6 @@ static int init_file(struct file *f, int flags, const struct cred *cred)
return error;
}
- atomic_long_set(&f->f_count, 1);
rwlock_init(&f->f_owner.lock);
spin_lock_init(&f->f_lock);
mutex_init(&f->f_pos_lock);
@@ -172,6 +176,12 @@ static int init_file(struct file *f, int flags, const struct cred *cred)
f->f_mode = OPEN_FMODE(flags);
/* f->f_version: 0 */
+ /*
+ * We're SLAB_TYPESAFE_BY_RCU so initialize f_count last. While
+ * fget-rcu pattern users need to be able to handle spurious
+ * refcount bumps we should reinitialize the reused file first.
+ */
+ atomic_long_set(&f->f_count, 1);
return 0;
}
@@ -471,7 +481,8 @@ EXPORT_SYMBOL(__fput_sync);
void __init files_init(void)
{
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
- SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL);
+ SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN |
+ SLAB_PANIC | SLAB_ACCOUNT, NULL);
percpu_counter_init(&nr_files, 0, GFP_KERNEL);
}
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index ac5d43b164b5..20600e9ea202 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -109,11 +109,9 @@ static inline void dip2vip_cpy(struct vxfs_sb_info *sbi,
set_nlink(inode, vip->vii_nlink);
inode->i_size = vip->vii_size;
- inode->i_atime.tv_sec = vip->vii_atime;
+ inode_set_atime(inode, vip->vii_atime, 0);
inode_set_ctime(inode, vip->vii_ctime, 0);
- inode->i_mtime.tv_sec = vip->vii_mtime;
- inode->i_atime.tv_nsec = 0;
- inode->i_mtime.tv_nsec = 0;
+ inode_set_mtime(inode, vip->vii_mtime, 0);
inode->i_blocks = vip->vii_blocks;
inode->i_generation = vip->vii_gen;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index c1af01b2c42d..1767493dffda 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -613,6 +613,24 @@ out_free:
kfree(isw);
}
+static bool isw_prepare_wbs_switch(struct inode_switch_wbs_context *isw,
+ struct list_head *list, int *nr)
+{
+ struct inode *inode;
+
+ list_for_each_entry(inode, list, i_io_list) {
+ if (!inode_prepare_wbs_switch(inode, isw->new_wb))
+ continue;
+
+ isw->inodes[*nr] = inode;
+ (*nr)++;
+
+ if (*nr >= WB_MAX_INODES_PER_ISW - 1)
+ return true;
+ }
+ return false;
+}
+
/**
* cleanup_offline_cgwb - detach associated inodes
* @wb: target wb
@@ -625,7 +643,6 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
{
struct cgroup_subsys_state *memcg_css;
struct inode_switch_wbs_context *isw;
- struct inode *inode;
int nr;
bool restart = false;
@@ -647,17 +664,17 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
nr = 0;
spin_lock(&wb->list_lock);
- list_for_each_entry(inode, &wb->b_attached, i_io_list) {
- if (!inode_prepare_wbs_switch(inode, isw->new_wb))
- continue;
-
- isw->inodes[nr++] = inode;
-
- if (nr >= WB_MAX_INODES_PER_ISW - 1) {
- restart = true;
- break;
- }
- }
+ /*
+ * In addition to the inodes that have completed writeback, also switch
+ * cgwbs for those inodes only with dirty timestamps. Otherwise, those
+ * inodes won't be written back for a long time when lazytime is
+ * enabled, and thus pinning the dying cgwbs. It won't break the
+ * bandwidth restrictions, as writeback of inode metadata is not
+ * accounted for.
+ */
+ restart = isw_prepare_wbs_switch(isw, &wb->b_attached, &nr);
+ if (!restart)
+ restart = isw_prepare_wbs_switch(isw, &wb->b_dirty_time, &nr);
spin_unlock(&wb->list_lock);
/* no attached inodes? bail out */
diff --git a/fs/fs_context.c b/fs/fs_context.c
index a0ad7a0c4680..98589aae5208 100644
--- a/fs/fs_context.c
+++ b/fs/fs_context.c
@@ -192,17 +192,19 @@ int vfs_parse_fs_string(struct fs_context *fc, const char *key,
EXPORT_SYMBOL(vfs_parse_fs_string);
/**
- * generic_parse_monolithic - Parse key[=val][,key[=val]]* mount data
+ * vfs_parse_monolithic_sep - Parse key[=val][,key[=val]]* mount data
* @fc: The superblock configuration to fill in.
* @data: The data to parse
+ * @sep: callback for separating next option
*
- * Parse a blob of data that's in key[=val][,key[=val]]* form. This can be
- * called from the ->monolithic_mount_data() fs_context operation.
+ * Parse a blob of data that's in key[=val][,key[=val]]* form with a custom
+ * option separator callback.
*
* Returns 0 on success or the error returned by the ->parse_option() fs_context
* operation on failure.
*/
-int generic_parse_monolithic(struct fs_context *fc, void *data)
+int vfs_parse_monolithic_sep(struct fs_context *fc, void *data,
+ char *(*sep)(char **))
{
char *options = data, *key;
int ret = 0;
@@ -214,7 +216,7 @@ int generic_parse_monolithic(struct fs_context *fc, void *data)
if (ret)
return ret;
- while ((key = strsep(&options, ",")) != NULL) {
+ while ((key = sep(&options)) != NULL) {
if (*key) {
size_t v_len = 0;
char *value = strchr(key, '=');
@@ -233,6 +235,28 @@ int generic_parse_monolithic(struct fs_context *fc, void *data)
return ret;
}
+EXPORT_SYMBOL(vfs_parse_monolithic_sep);
+
+static char *vfs_parse_comma_sep(char **s)
+{
+ return strsep(s, ",");
+}
+
+/**
+ * generic_parse_monolithic - Parse key[=val][,key[=val]]* mount data
+ * @fc: The superblock configuration to fill in.
+ * @data: The data to parse
+ *
+ * Parse a blob of data that's in key[=val][,key[=val]]* form. This can be
+ * called from the ->monolithic_mount_data() fs_context operation.
+ *
+ * Returns 0 on success or the error returned by the ->parse_option() fs_context
+ * operation on failure.
+ */
+int generic_parse_monolithic(struct fs_context *fc, void *data)
+{
+ return vfs_parse_monolithic_sep(fc, data, vfs_parse_comma_sep);
+}
EXPORT_SYMBOL(generic_parse_monolithic);
/**
diff --git a/fs/fsopen.c b/fs/fsopen.c
index ce03f6521c88..6593ae518115 100644
--- a/fs/fsopen.c
+++ b/fs/fsopen.c
@@ -465,6 +465,7 @@ SYSCALL_DEFINE5(fsconfig,
param.file = fget(aux);
if (!param.file)
goto out_key;
+ param.dirfd = aux;
break;
default:
break;
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index ab62e4624256..284a35006462 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -235,7 +235,7 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
inode->i_mode = mode;
inode->i_uid = fc->user_id;
inode->i_gid = fc->group_id;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
/* setting ->i_op to NULL is not allowed */
if (iop)
inode->i_op = iop;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index d707e6987da9..d19cbf34c634 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1812,12 +1812,12 @@ int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
memset(&outarg, 0, sizeof(outarg));
inarg.valid = FATTR_MTIME;
- inarg.mtime = inode->i_mtime.tv_sec;
- inarg.mtimensec = inode->i_mtime.tv_nsec;
+ inarg.mtime = inode_get_mtime_sec(inode);
+ inarg.mtimensec = inode_get_mtime_nsec(inode);
if (fm->fc->minor >= 23) {
inarg.valid |= FATTR_CTIME;
- inarg.ctime = inode_get_ctime(inode).tv_sec;
- inarg.ctimensec = inode_get_ctime(inode).tv_nsec;
+ inarg.ctime = inode_get_ctime_sec(inode);
+ inarg.ctimensec = inode_get_ctime_nsec(inode);
}
if (ff) {
inarg.valid |= FATTR_FH;
@@ -1956,7 +1956,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
/* the kernel maintains i_mtime locally */
if (trust_local_cmtime) {
if (attr->ia_valid & ATTR_MTIME)
- inode->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
if (attr->ia_valid & ATTR_CTIME)
inode_set_ctime_to_ts(inode, attr->ia_ctime);
/* FIXME: clear I_DIRTY_SYNC? */
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index bf0b85d0b95c..6e6e721f421b 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -1284,7 +1284,7 @@ ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value,
size_t size);
ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size);
int fuse_removexattr(struct inode *inode, const char *name);
-extern const struct xattr_handler *fuse_xattr_handlers[];
+extern const struct xattr_handler * const fuse_xattr_handlers[];
struct posix_acl;
struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 2e4eb7cf26fb..caa8121ad99c 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -188,12 +188,10 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
attr->mtimensec = min_t(u32, attr->mtimensec, NSEC_PER_SEC - 1);
attr->ctimensec = min_t(u32, attr->ctimensec, NSEC_PER_SEC - 1);
- inode->i_atime.tv_sec = attr->atime;
- inode->i_atime.tv_nsec = attr->atimensec;
+ inode_set_atime(inode, attr->atime, attr->atimensec);
/* mtime from server may be stale due to local buffered write */
if (!(cache_mask & STATX_MTIME)) {
- inode->i_mtime.tv_sec = attr->mtime;
- inode->i_mtime.tv_nsec = attr->mtimensec;
+ inode_set_mtime(inode, attr->mtime, attr->mtimensec);
}
if (!(cache_mask & STATX_CTIME)) {
inode_set_ctime(inode, attr->ctime, attr->ctimensec);
@@ -276,12 +274,12 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
attr->size = i_size_read(inode);
if (cache_mask & STATX_MTIME) {
- attr->mtime = inode->i_mtime.tv_sec;
- attr->mtimensec = inode->i_mtime.tv_nsec;
+ attr->mtime = inode_get_mtime_sec(inode);
+ attr->mtimensec = inode_get_mtime_nsec(inode);
}
if (cache_mask & STATX_CTIME) {
- attr->ctime = inode_get_ctime(inode).tv_sec;
- attr->ctimensec = inode_get_ctime(inode).tv_nsec;
+ attr->ctime = inode_get_ctime_sec(inode);
+ attr->ctimensec = inode_get_ctime_nsec(inode);
}
if ((attr_version != 0 && fi->attr_version > attr_version) ||
@@ -290,7 +288,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
return;
}
- old_mtime = inode->i_mtime;
+ old_mtime = inode_get_mtime(inode);
fuse_change_attributes_common(inode, attr, sx, attr_valid, cache_mask);
oldsize = inode->i_size;
@@ -337,8 +335,7 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr,
{
inode->i_mode = attr->mode & S_IFMT;
inode->i_size = attr->size;
- inode->i_mtime.tv_sec = attr->mtime;
- inode->i_mtime.tv_nsec = attr->mtimensec;
+ inode_set_mtime(inode, attr->mtime, attr->mtimensec);
inode_set_ctime(inode, attr->ctime, attr->ctimensec);
if (S_ISREG(inode->i_mode)) {
fuse_init_common(inode);
@@ -1423,17 +1420,19 @@ EXPORT_SYMBOL_GPL(fuse_dev_free);
static void fuse_fill_attr_from_inode(struct fuse_attr *attr,
const struct fuse_inode *fi)
{
+ struct timespec64 atime = inode_get_atime(&fi->inode);
+ struct timespec64 mtime = inode_get_mtime(&fi->inode);
struct timespec64 ctime = inode_get_ctime(&fi->inode);
*attr = (struct fuse_attr){
.ino = fi->inode.i_ino,
.size = fi->inode.i_size,
.blocks = fi->inode.i_blocks,
- .atime = fi->inode.i_atime.tv_sec,
- .mtime = fi->inode.i_mtime.tv_sec,
+ .atime = atime.tv_sec,
+ .mtime = mtime.tv_sec,
.ctime = ctime.tv_sec,
- .atimensec = fi->inode.i_atime.tv_nsec,
- .mtimensec = fi->inode.i_mtime.tv_nsec,
+ .atimensec = atime.tv_nsec,
+ .mtimensec = mtime.tv_nsec,
.ctimensec = ctime.tv_nsec,
.mode = fi->inode.i_mode,
.nlink = fi->inode.i_nlink,
diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
index 9e6d587b3e67..c66a54d6c7d3 100644
--- a/fs/fuse/readdir.c
+++ b/fs/fuse/readdir.c
@@ -476,7 +476,7 @@ retry_locked:
if (!fi->rdc.cached) {
/* Starting cache? Set cache mtime. */
if (!ctx->pos && !fi->rdc.size) {
- fi->rdc.mtime = inode->i_mtime;
+ fi->rdc.mtime = inode_get_mtime(inode);
fi->rdc.iversion = inode_query_iversion(inode);
}
spin_unlock(&fi->rdc.lock);
@@ -488,8 +488,10 @@ retry_locked:
* changed, and reset the cache if so.
*/
if (!ctx->pos) {
+ struct timespec64 mtime = inode_get_mtime(inode);
+
if (inode_peek_iversion(inode) != fi->rdc.iversion ||
- !timespec64_equal(&fi->rdc.mtime, &inode->i_mtime)) {
+ !timespec64_equal(&fi->rdc.mtime, &mtime)) {
fuse_rdc_reset(inode);
goto retry_locked;
}
diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c
index 49c01559580f..5b423fdbb13f 100644
--- a/fs/fuse/xattr.c
+++ b/fs/fuse/xattr.c
@@ -209,7 +209,7 @@ static const struct xattr_handler fuse_xattr_handler = {
.set = fuse_xattr_set,
};
-const struct xattr_handler *fuse_xattr_handlers[] = {
+const struct xattr_handler * const fuse_xattr_handlers[] = {
&fuse_xattr_handler,
NULL
};
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index ef7017fb6951..011cd992e0e6 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1386,7 +1386,7 @@ static int trunc_start(struct inode *inode, u64 newsize)
ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
i_size_write(inode, newsize);
- ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
gfs2_dinode_out(ip, dibh->b_data);
if (journaled)
@@ -1583,7 +1583,7 @@ out_unlock:
/* Every transaction boundary, we rewrite the dinode
to keep its di_blocks current in case of failure. */
- ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
@@ -1949,7 +1949,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
gfs2_statfs_change(sdp, 0, +btotal, 0);
gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
ip->i_inode.i_gid);
- ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
up_write(&ip->i_rw_mutex);
@@ -1992,7 +1992,7 @@ static int trunc_end(struct gfs2_inode *ip)
gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
gfs2_ordered_del_inode(ip);
}
- ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
gfs2_trans_add_meta(ip->i_gl, dibh);
@@ -2093,7 +2093,7 @@ static int do_grow(struct inode *inode, u64 size)
goto do_end_trans;
truncate_setsize(inode, size);
- ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 1a2afa88f8be..61ddd03ea111 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -130,7 +130,7 @@ static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
if (ip->i_inode.i_size < offset + size)
i_size_write(&ip->i_inode, offset + size);
- ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
@@ -227,7 +227,7 @@ out:
if (ip->i_inode.i_size < offset + copied)
i_size_write(&ip->i_inode, offset + copied);
- ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
@@ -1825,7 +1825,7 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
da->bh = NULL;
brelse(bh);
ip->i_entries++;
- ip->i_inode.i_mtime = tv;
+ inode_set_mtime_to_ts(&ip->i_inode, tv);
if (S_ISDIR(nip->i_inode.i_mode))
inc_nlink(&ip->i_inode);
mark_inode_dirty(inode);
@@ -1911,7 +1911,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry)
if (!dip->i_entries)
gfs2_consist_inode(dip);
dip->i_entries--;
- dip->i_inode.i_mtime = tv;
+ inode_set_mtime_to_ts(&dip->i_inode, tv);
if (d_is_dir(dentry))
drop_nlink(&dip->i_inode);
mark_inode_dirty(&dip->i_inode);
@@ -1952,7 +1952,7 @@ int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
dent->de_type = cpu_to_be16(new_type);
brelse(bh);
- dip->i_inode.i_mtime = inode_set_ctime_current(&dip->i_inode);
+ inode_set_mtime_to_ts(&dip->i_inode, inode_set_ctime_current(&dip->i_inode));
mark_inode_dirty_sync(&dip->i_inode);
return 0;
}
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 4a280be229a6..3772a5d9e85c 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -2719,16 +2719,19 @@ static struct file *gfs2_glockfd_next_file(struct gfs2_glockfd_iter *i)
for(;; i->fd++) {
struct inode *inode;
- i->file = task_lookup_next_fd_rcu(i->task, &i->fd);
+ i->file = task_lookup_next_fdget_rcu(i->task, &i->fd);
if (!i->file) {
i->fd = 0;
break;
}
+
inode = file_inode(i->file);
- if (inode->i_sb != i->sb)
- continue;
- if (get_file_rcu(i->file))
+ if (inode->i_sb == i->sb)
break;
+
+ rcu_read_unlock();
+ fput(i->file);
+ rcu_read_lock();
}
rcu_read_unlock();
return i->file;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index f41ca89d216b..e7d334c277a1 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -403,7 +403,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
const struct gfs2_dinode *str = buf;
- struct timespec64 atime;
+ struct timespec64 atime, iatime;
u16 height, depth;
umode_t mode = be32_to_cpu(str->di_mode);
struct inode *inode = &ip->i_inode;
@@ -433,10 +433,11 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks));
atime.tv_sec = be64_to_cpu(str->di_atime);
atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
- if (timespec64_compare(&inode->i_atime, &atime) < 0)
- inode->i_atime = atime;
- inode->i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
- inode->i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
+ iatime = inode_get_atime(inode);
+ if (timespec64_compare(&iatime, &atime) < 0)
+ inode_set_atime_to_ts(inode, atime);
+ inode_set_mtime(inode, be64_to_cpu(str->di_mtime),
+ be32_to_cpu(str->di_mtime_nsec));
inode_set_ctime(inode, be64_to_cpu(str->di_ctime),
be32_to_cpu(str->di_ctime_nsec));
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 0eac04507904..7fe77bc771e5 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -185,8 +185,9 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
set_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags);
/* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
- inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
- inode->i_atime.tv_nsec = 0;
+ inode_set_atime(inode,
+ 1LL << (8 * sizeof(inode_get_atime_sec(inode)) - 1),
+ 0);
glock_set_object(ip->i_gl, ip);
@@ -696,7 +697,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
set_nlink(inode, S_ISDIR(mode) ? 2 : 1);
inode->i_rdev = dev;
inode->i_size = size;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
munge_mode_uid_gid(dip, inode);
check_and_update_goal(dip);
ip->i_goal = dip->i_goal;
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 171b2713d2e5..d9854aece15b 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -886,7 +886,7 @@ static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc,
size = loc + sizeof(struct gfs2_quota);
if (size > inode->i_size)
i_size_write(inode, size);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
set_bit(QDF_REFRESH, &qd->qd_flags);
}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 02d93da21b2b..52a878fa7139 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -410,9 +410,9 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
str->di_nlink = cpu_to_be32(inode->i_nlink);
str->di_size = cpu_to_be64(i_size_read(inode));
str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(inode));
- str->di_atime = cpu_to_be64(inode->i_atime.tv_sec);
- str->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec);
- str->di_ctime = cpu_to_be64(inode_get_ctime(inode).tv_sec);
+ str->di_atime = cpu_to_be64(inode_get_atime_sec(inode));
+ str->di_mtime = cpu_to_be64(inode_get_mtime_sec(inode));
+ str->di_ctime = cpu_to_be64(inode_get_ctime_sec(inode));
str->di_goal_meta = cpu_to_be64(ip->i_goal);
str->di_goal_data = cpu_to_be64(ip->i_goal);
@@ -427,9 +427,9 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
str->di_entries = cpu_to_be32(ip->i_entries);
str->di_eattr = cpu_to_be64(ip->i_eattr);
- str->di_atime_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
- str->di_mtime_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
- str->di_ctime_nsec = cpu_to_be32(inode_get_ctime(inode).tv_nsec);
+ str->di_atime_nsec = cpu_to_be32(inode_get_atime_nsec(inode));
+ str->di_mtime_nsec = cpu_to_be32(inode_get_mtime_nsec(inode));
+ str->di_ctime_nsec = cpu_to_be32(inode_get_ctime_nsec(inode));
}
/**
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index ab9c83106932..b4ddf6244586 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -60,8 +60,8 @@ extern const struct export_operations gfs2_export_ops;
extern const struct super_operations gfs2_super_ops;
extern const struct dentry_operations gfs2_dops;
-extern const struct xattr_handler *gfs2_xattr_handlers_max[];
-extern const struct xattr_handler **gfs2_xattr_handlers_min;
+extern const struct xattr_handler * const gfs2_xattr_handlers_max[];
+extern const struct xattr_handler * const *gfs2_xattr_handlers_min;
#endif /* __SUPER_DOT_H__ */
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 4fea70c0fe3d..79d5c5559512 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -1494,7 +1494,7 @@ static const struct xattr_handler gfs2_xattr_trusted_handler = {
.set = gfs2_xattr_set,
};
-const struct xattr_handler *gfs2_xattr_handlers_max[] = {
+const struct xattr_handler * const gfs2_xattr_handlers_max[] = {
/* GFS2_FS_FORMAT_MAX */
&gfs2_xattr_trusted_handler,
@@ -1504,4 +1504,4 @@ const struct xattr_handler *gfs2_xattr_handlers_max[] = {
NULL,
};
-const struct xattr_handler **gfs2_xattr_handlers_min = gfs2_xattr_handlers_max + 1;
+const struct xattr_handler * const *gfs2_xattr_handlers_min = gfs2_xattr_handlers_max + 1;
diff --git a/fs/hfs/attr.c b/fs/hfs/attr.c
index 6341bb248247..f8395cdd1adf 100644
--- a/fs/hfs/attr.c
+++ b/fs/hfs/attr.c
@@ -146,7 +146,7 @@ static const struct xattr_handler hfs_type_handler = {
.set = hfs_xattr_set,
};
-const struct xattr_handler *hfs_xattr_handlers[] = {
+const struct xattr_handler * const hfs_xattr_handlers[] = {
&hfs_creator_handler,
&hfs_type_handler,
NULL
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index 632c226a3972..d63880e7d9d6 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -133,7 +133,7 @@ int hfs_cat_create(u32 cnid, struct inode *dir, const struct qstr *str, struct i
goto err1;
dir->i_size++;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
hfs_find_exit(&fd);
return 0;
@@ -269,7 +269,7 @@ int hfs_cat_delete(u32 cnid, struct inode *dir, const struct qstr *str)
}
dir->i_size--;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
res = 0;
out:
@@ -337,7 +337,7 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
if (err)
goto out;
dst_dir->i_size++;
- dst_dir->i_mtime = inode_set_ctime_current(dst_dir);
+ inode_set_mtime_to_ts(dst_dir, inode_set_ctime_current(dst_dir));
mark_inode_dirty(dst_dir);
/* finally remove the old entry */
@@ -349,7 +349,7 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
if (err)
goto out;
src_dir->i_size--;
- src_dir->i_mtime = inode_set_ctime_current(src_dir);
+ inode_set_mtime_to_ts(src_dir, inode_set_ctime_current(src_dir));
mark_inode_dirty(src_dir);
type = entry.type;
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 49d02524e667..b5a6ad5df357 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -215,7 +215,7 @@ extern void hfs_evict_inode(struct inode *);
extern void hfs_delete_inode(struct inode *);
/* attr.c */
-extern const struct xattr_handler *hfs_xattr_handlers[];
+extern const struct xattr_handler * const hfs_xattr_handlers[];
/* mdb.c */
extern int hfs_mdb_get(struct super_block *);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index ee349b72cfb3..a7bc4690a780 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -200,7 +200,7 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
set_nlink(inode, 1);
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
HFS_I(inode)->flags = 0;
HFS_I(inode)->rsrc_inode = NULL;
HFS_I(inode)->fs_blocks = 0;
@@ -355,8 +355,8 @@ static int hfs_read_inode(struct inode *inode, void *data)
inode->i_mode |= S_IWUGO;
inode->i_mode &= ~hsb->s_file_umask;
inode->i_mode |= S_IFREG;
- inode->i_atime = inode->i_mtime = inode_set_ctime_to_ts(inode,
- hfs_m_to_utime(rec->file.MdDat));
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, hfs_m_to_utime(rec->file.MdDat))));
inode->i_op = &hfs_file_inode_operations;
inode->i_fop = &hfs_file_operations;
inode->i_mapping->a_ops = &hfs_aops;
@@ -366,8 +366,8 @@ static int hfs_read_inode(struct inode *inode, void *data)
inode->i_size = be16_to_cpu(rec->dir.Val) + 2;
HFS_I(inode)->fs_blocks = 0;
inode->i_mode = S_IFDIR | (S_IRWXUGO & ~hsb->s_dir_umask);
- inode->i_atime = inode->i_mtime = inode_set_ctime_to_ts(inode,
- hfs_m_to_utime(rec->dir.MdDat));
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, hfs_m_to_utime(rec->dir.MdDat))));
inode->i_op = &hfs_dir_inode_operations;
inode->i_fop = &hfs_dir_operations;
break;
@@ -474,7 +474,7 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
be32_to_cpu(rec.dir.DirID) != inode->i_ino) {
}
- rec.dir.MdDat = hfs_u_to_mtime(inode->i_mtime);
+ rec.dir.MdDat = hfs_u_to_mtime(inode_get_mtime(inode));
rec.dir.Val = cpu_to_be16(inode->i_size - 2);
hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
@@ -502,7 +502,7 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
else
rec.file.Flags |= HFS_FIL_LOCK;
hfs_inode_write_fork(inode, rec.file.ExtRec, &rec.file.LgLen, &rec.file.PyLen);
- rec.file.MdDat = hfs_u_to_mtime(inode->i_mtime);
+ rec.file.MdDat = hfs_u_to_mtime(inode_get_mtime(inode));
hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_file));
@@ -654,7 +654,7 @@ int hfs_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
truncate_setsize(inode, attr->ia_size);
hfs_file_truncate(inode);
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
}
setattr_copy(&nop_mnt_idmap, inode, attr);
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
index dc27d418fbcd..76fa02e3835b 100644
--- a/fs/hfs/sysdep.c
+++ b/fs/hfs/sysdep.c
@@ -28,11 +28,13 @@ static int hfs_revalidate_dentry(struct dentry *dentry, unsigned int flags)
/* fix up inode on a timezone change */
diff = sys_tz.tz_minuteswest * 60 - HFS_I(inode)->tz_secondswest;
if (diff) {
- struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 ts = inode_get_ctime(inode);
- inode_set_ctime(inode, ctime.tv_sec + diff, ctime.tv_nsec);
- inode->i_atime.tv_sec += diff;
- inode->i_mtime.tv_sec += diff;
+ inode_set_ctime(inode, ts.tv_sec + diff, ts.tv_nsec);
+ ts = inode_get_atime(inode);
+ inode_set_atime(inode, ts.tv_sec + diff, ts.tv_nsec);
+ ts = inode_get_mtime(inode);
+ inode_set_mtime(inode, ts.tv_sec + diff, ts.tv_nsec);
HFS_I(inode)->tz_secondswest += diff;
}
return 1;
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index e71ae2537eaa..1995bafee839 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -312,7 +312,7 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
dir->i_size++;
if (S_ISDIR(inode->i_mode))
hfsplus_subfolders_inc(dir);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
hfs_find_exit(&fd);
@@ -417,7 +417,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str)
dir->i_size--;
if (type == HFSPLUS_FOLDER)
hfsplus_subfolders_dec(dir);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
if (type == HFSPLUS_FILE || type == HFSPLUS_FOLDER) {
@@ -494,7 +494,7 @@ int hfsplus_rename_cat(u32 cnid,
dst_dir->i_size++;
if (type == HFSPLUS_FOLDER)
hfsplus_subfolders_inc(dst_dir);
- dst_dir->i_mtime = inode_set_ctime_current(dst_dir);
+ inode_set_mtime_to_ts(dst_dir, inode_set_ctime_current(dst_dir));
/* finally remove the old entry */
err = hfsplus_cat_build_key(sb, src_fd.search_key,
@@ -511,7 +511,7 @@ int hfsplus_rename_cat(u32 cnid,
src_dir->i_size--;
if (type == HFSPLUS_FOLDER)
hfsplus_subfolders_dec(src_dir);
- src_dir->i_mtime = inode_set_ctime_current(src_dir);
+ inode_set_mtime_to_ts(src_dir, inode_set_ctime_current(src_dir));
/* remove old thread entry */
hfsplus_cat_build_key_with_cnid(sb, src_fd.search_key, cnid);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index c65c8c4b03dd..702a0663b1d8 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -267,7 +267,7 @@ static int hfsplus_setattr(struct mnt_idmap *idmap,
}
truncate_setsize(inode, attr->ia_size);
hfsplus_file_truncate(inode);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
}
setattr_copy(&nop_mnt_idmap, inode, attr);
@@ -392,7 +392,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir,
inode->i_ino = sbi->next_cnid++;
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
set_nlink(inode, 1);
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
hip = HFSPLUS_I(inode);
INIT_LIST_HEAD(&hip->open_dir_list);
@@ -521,8 +521,9 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
hfsplus_get_perms(inode, &folder->permissions, 1);
set_nlink(inode, 1);
inode->i_size = 2 + be32_to_cpu(folder->valence);
- inode->i_atime = hfsp_mt2ut(folder->access_date);
- inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
+ inode_set_atime_to_ts(inode, hfsp_mt2ut(folder->access_date));
+ inode_set_mtime_to_ts(inode,
+ hfsp_mt2ut(folder->content_mod_date));
inode_set_ctime_to_ts(inode,
hfsp_mt2ut(folder->attribute_mod_date));
HFSPLUS_I(inode)->create_date = folder->create_date;
@@ -563,8 +564,9 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
init_special_inode(inode, inode->i_mode,
be32_to_cpu(file->permissions.dev));
}
- inode->i_atime = hfsp_mt2ut(file->access_date);
- inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
+ inode_set_atime_to_ts(inode, hfsp_mt2ut(file->access_date));
+ inode_set_mtime_to_ts(inode,
+ hfsp_mt2ut(file->content_mod_date));
inode_set_ctime_to_ts(inode,
hfsp_mt2ut(file->attribute_mod_date));
HFSPLUS_I(inode)->create_date = file->create_date;
@@ -609,8 +611,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
sizeof(struct hfsplus_cat_folder));
/* simple node checks? */
hfsplus_cat_set_perms(inode, &folder->permissions);
- folder->access_date = hfsp_ut2mt(inode->i_atime);
- folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
+ folder->access_date = hfsp_ut2mt(inode_get_atime(inode));
+ folder->content_mod_date = hfsp_ut2mt(inode_get_mtime(inode));
folder->attribute_mod_date = hfsp_ut2mt(inode_get_ctime(inode));
folder->valence = cpu_to_be32(inode->i_size - 2);
if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
@@ -644,8 +646,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
else
file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED);
- file->access_date = hfsp_ut2mt(inode->i_atime);
- file->content_mod_date = hfsp_ut2mt(inode->i_mtime);
+ file->access_date = hfsp_ut2mt(inode_get_atime(inode));
+ file->content_mod_date = hfsp_ut2mt(inode_get_mtime(inode));
file->attribute_mod_date = hfsp_ut2mt(inode_get_ctime(inode));
hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_file));
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index 58021e73c00b..9c9ff6b8c6f7 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -13,7 +13,7 @@
static int hfsplus_removexattr(struct inode *inode, const char *name);
-const struct xattr_handler *hfsplus_xattr_handlers[] = {
+const struct xattr_handler * const hfsplus_xattr_handlers[] = {
&hfsplus_xattr_osx_handler,
&hfsplus_xattr_user_handler,
&hfsplus_xattr_trusted_handler,
diff --git a/fs/hfsplus/xattr.h b/fs/hfsplus/xattr.h
index d14e362b3eba..15cc55e41410 100644
--- a/fs/hfsplus/xattr.h
+++ b/fs/hfsplus/xattr.h
@@ -17,7 +17,7 @@ extern const struct xattr_handler hfsplus_xattr_user_handler;
extern const struct xattr_handler hfsplus_xattr_trusted_handler;
extern const struct xattr_handler hfsplus_xattr_security_handler;
-extern const struct xattr_handler *hfsplus_xattr_handlers[];
+extern const struct xattr_handler * const hfsplus_xattr_handlers[];
int __hfsplus_setxattr(struct inode *inode, const char *name,
const void *value, size_t size, int flags);
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index dc5a5cea5fae..ea87f24c6c3f 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -513,10 +513,14 @@ static int hostfs_inode_update(struct inode *ino, const struct hostfs_stat *st)
set_nlink(ino, st->nlink);
i_uid_write(ino, st->uid);
i_gid_write(ino, st->gid);
- ino->i_atime =
- (struct timespec64){ st->atime.tv_sec, st->atime.tv_nsec };
- ino->i_mtime =
- (struct timespec64){ st->mtime.tv_sec, st->mtime.tv_nsec };
+ inode_set_atime_to_ts(ino, (struct timespec64){
+ st->atime.tv_sec,
+ st->atime.tv_nsec,
+ });
+ inode_set_mtime_to_ts(ino, (struct timespec64){
+ st->mtime.tv_sec,
+ st->mtime.tv_nsec,
+ });
inode_set_ctime(ino, st->ctime.tv_sec, st->ctime.tv_nsec);
ino->i_size = st->size;
ino->i_blocks = st->blocks;
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index f36566d61215..49dd585c2b17 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -277,14 +277,16 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned in
* inode.
*/
- if (!inode_get_ctime(result).tv_sec) {
+ if (!inode_get_ctime_sec(result)) {
time64_t csec = local_to_gmt(dir->i_sb, le32_to_cpu(de->creation_date));
inode_set_ctime(result, csec ? csec : 1, 0);
- result->i_mtime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->write_date));
- result->i_mtime.tv_nsec = 0;
- result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->read_date));
- result->i_atime.tv_nsec = 0;
+ inode_set_mtime(result,
+ local_to_gmt(dir->i_sb, le32_to_cpu(de->write_date)),
+ 0);
+ inode_set_atime(result,
+ local_to_gmt(dir->i_sb, le32_to_cpu(de->read_date)),
+ 0);
hpfs_result->i_ea_size = le32_to_cpu(de->ea_size);
if (!hpfs_result->i_ea_mode && de->read_only)
result->i_mode &= ~0222;
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 479166378bae..a59e8fa630db 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -37,8 +37,8 @@ void hpfs_init_inode(struct inode *i)
hpfs_inode->i_dirty = 0;
inode_set_ctime(i, 0, 0);
- i->i_mtime.tv_sec = i->i_mtime.tv_nsec = 0;
- i->i_atime.tv_sec = i->i_atime.tv_nsec = 0;
+ inode_set_mtime(i, 0, 0);
+ inode_set_atime(i, 0, 0);
}
void hpfs_read_inode(struct inode *i)
@@ -230,9 +230,9 @@ void hpfs_write_inode_nolock(struct inode *i)
}
hpfs_write_inode_ea(i, fnode);
if (de) {
- de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec));
- de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec));
- de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_ctime(i).tv_sec));
+ de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_mtime_sec(i)));
+ de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_atime_sec(i)));
+ de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_ctime_sec(i)));
de->read_only = !(i->i_mode & 0222);
de->ea_size = cpu_to_le32(hpfs_inode->i_ea_size);
hpfs_mark_4buffers_dirty(&qbh);
@@ -240,9 +240,9 @@ void hpfs_write_inode_nolock(struct inode *i)
}
if (S_ISDIR(i->i_mode)) {
if ((de = map_dirent(i, hpfs_inode->i_dno, "\001\001", 2, NULL, &qbh))) {
- de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec));
- de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec));
- de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_ctime(i).tv_sec));
+ de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_mtime_sec(i)));
+ de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_atime_sec(i)));
+ de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_ctime_sec(i)));
de->read_only = !(i->i_mode & 0222);
de->ea_size = cpu_to_le32(/*hpfs_inode->i_ea_size*/0);
de->file_size = cpu_to_le32(0);
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index f4eb8d6f5989..9184b4584b01 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -12,10 +12,10 @@
static void hpfs_update_directory_times(struct inode *dir)
{
time64_t t = local_to_gmt(dir->i_sb, local_get_seconds(dir->i_sb));
- if (t == dir->i_mtime.tv_sec &&
- t == inode_get_ctime(dir).tv_sec)
+ if (t == inode_get_mtime_sec(dir) &&
+ t == inode_get_ctime_sec(dir))
return;
- dir->i_mtime = inode_set_ctime(dir, t, 0);
+ inode_set_mtime_to_ts(dir, inode_set_ctime(dir, t, 0));
hpfs_write_inode_nolock(dir);
}
@@ -58,8 +58,8 @@ static int hpfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
result->i_ino = fno;
hpfs_i(result)->i_parent_dir = dir->i_ino;
hpfs_i(result)->i_dno = dno;
- result->i_mtime = result->i_atime =
- inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0);
+ inode_set_mtime_to_ts(result,
+ inode_set_atime_to_ts(result, inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0)));
hpfs_i(result)->i_ea_size = 0;
result->i_mode |= S_IFDIR;
result->i_op = &hpfs_dir_iops;
@@ -164,8 +164,8 @@ static int hpfs_create(struct mnt_idmap *idmap, struct inode *dir,
result->i_fop = &hpfs_file_ops;
set_nlink(result, 1);
hpfs_i(result)->i_parent_dir = dir->i_ino;
- result->i_mtime = result->i_atime =
- inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0);
+ inode_set_mtime_to_ts(result,
+ inode_set_atime_to_ts(result, inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0)));
hpfs_i(result)->i_ea_size = 0;
if (dee.read_only)
result->i_mode &= ~0222;
@@ -245,8 +245,8 @@ static int hpfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
hpfs_init_inode(result);
result->i_ino = fno;
hpfs_i(result)->i_parent_dir = dir->i_ino;
- result->i_mtime = result->i_atime =
- inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0);
+ inode_set_mtime_to_ts(result,
+ inode_set_atime_to_ts(result, inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0)));
hpfs_i(result)->i_ea_size = 0;
result->i_uid = current_fsuid();
result->i_gid = current_fsgid();
@@ -319,8 +319,8 @@ static int hpfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
result->i_ino = fno;
hpfs_init_inode(result);
hpfs_i(result)->i_parent_dir = dir->i_ino;
- result->i_mtime = result->i_atime =
- inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0);
+ inode_set_mtime_to_ts(result,
+ inode_set_atime_to_ts(result, inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0)));
hpfs_i(result)->i_ea_size = 0;
result->i_mode = S_IFLNK | 0777;
result->i_uid = current_fsuid();
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 758a51564124..6b0ba3c1efba 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -725,10 +725,12 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
if (!de)
hpfs_error(s, "unable to find root dir");
else {
- root->i_atime.tv_sec = local_to_gmt(s, le32_to_cpu(de->read_date));
- root->i_atime.tv_nsec = 0;
- root->i_mtime.tv_sec = local_to_gmt(s, le32_to_cpu(de->write_date));
- root->i_mtime.tv_nsec = 0;
+ inode_set_atime(root,
+ local_to_gmt(s, le32_to_cpu(de->read_date)),
+ 0);
+ inode_set_mtime(root,
+ local_to_gmt(s, le32_to_cpu(de->write_date)),
+ 0);
inode_set_ctime(root,
local_to_gmt(s, le32_to_cpu(de->creation_date)),
0);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 316c4cebd3f3..da217eaba102 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -980,7 +980,7 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
inode->i_mode = S_IFDIR | ctx->mode;
inode->i_uid = ctx->uid;
inode->i_gid = ctx->gid;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_op = &hugetlbfs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
@@ -1024,7 +1024,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
&hugetlbfs_i_mmap_rwsem_key);
inode->i_mapping->a_ops = &hugetlbfs_aops;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_mapping->private_data = resv_map;
info->seals = F_SEAL_SEAL;
switch (mode & S_IFMT) {
@@ -1067,7 +1067,7 @@ static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
if (!inode)
return -ENOSPC;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
d_instantiate(dentry, inode);
dget(dentry);/* Extra count - pin the dentry in core */
return 0;
@@ -1099,7 +1099,7 @@ static int hugetlbfs_tmpfile(struct mnt_idmap *idmap,
inode = hugetlbfs_get_inode(dir->i_sb, dir, mode | S_IFREG, 0);
if (!inode)
return -ENOSPC;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
d_tmpfile(file, inode);
return finish_open_simple(file, 0);
}
@@ -1121,7 +1121,7 @@ static int hugetlbfs_symlink(struct mnt_idmap *idmap,
} else
iput(inode);
}
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
return error;
}
diff --git a/fs/init.c b/fs/init.c
index 9684406a8416..e9387b6c4f30 100644
--- a/fs/init.c
+++ b/fs/init.c
@@ -153,8 +153,7 @@ int __init init_mknod(const char *filename, umode_t mode, unsigned int dev)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (!IS_POSIXACL(path.dentry->d_inode))
- mode &= ~current_umask();
+ mode = mode_strip_umask(d_inode(path.dentry), mode);
error = security_path_mknod(&path, dentry, mode, dev);
if (!error)
error = vfs_mknod(mnt_idmap(path.mnt), path.dentry->d_inode,
@@ -229,8 +228,7 @@ int __init init_mkdir(const char *pathname, umode_t mode)
dentry = kern_path_create(AT_FDCWD, pathname, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (!IS_POSIXACL(path.dentry->d_inode))
- mode &= ~current_umask();
+ mode = mode_strip_umask(d_inode(path.dentry), mode);
error = security_path_mkdir(&path, dentry, mode);
if (!error)
error = vfs_mkdir(mnt_idmap(path.mnt), path.dentry->d_inode,
diff --git a/fs/inode.c b/fs/inode.c
index 84bc3c76e5cc..4f8984b97df0 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1837,27 +1837,29 @@ EXPORT_SYMBOL(bmap);
static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
struct timespec64 now)
{
- struct timespec64 ctime;
+ struct timespec64 atime, mtime, ctime;
if (!(mnt->mnt_flags & MNT_RELATIME))
return 1;
/*
* Is mtime younger than or equal to atime? If yes, update atime:
*/
- if (timespec64_compare(&inode->i_mtime, &inode->i_atime) >= 0)
+ atime = inode_get_atime(inode);
+ mtime = inode_get_mtime(inode);
+ if (timespec64_compare(&mtime, &atime) >= 0)
return 1;
/*
* Is ctime younger than or equal to atime? If yes, update atime:
*/
ctime = inode_get_ctime(inode);
- if (timespec64_compare(&ctime, &inode->i_atime) >= 0)
+ if (timespec64_compare(&ctime, &atime) >= 0)
return 1;
/*
* Is the previous atime value older than a day? If yes,
* update atime:
*/
- if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
+ if ((long)(now.tv_sec - atime.tv_sec) >= 24*60*60)
return 1;
/*
* Good, we can skip the atime update:
@@ -1888,12 +1890,13 @@ int inode_update_timestamps(struct inode *inode, int flags)
if (flags & (S_MTIME|S_CTIME|S_VERSION)) {
struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 mtime = inode_get_mtime(inode);
now = inode_set_ctime_current(inode);
if (!timespec64_equal(&now, &ctime))
updated |= S_CTIME;
- if (!timespec64_equal(&now, &inode->i_mtime)) {
- inode->i_mtime = now;
+ if (!timespec64_equal(&now, &mtime)) {
+ inode_set_mtime_to_ts(inode, now);
updated |= S_MTIME;
}
if (IS_I_VERSION(inode) && inode_maybe_inc_iversion(inode, updated))
@@ -1903,8 +1906,10 @@ int inode_update_timestamps(struct inode *inode, int flags)
}
if (flags & S_ATIME) {
- if (!timespec64_equal(&now, &inode->i_atime)) {
- inode->i_atime = now;
+ struct timespec64 atime = inode_get_atime(inode);
+
+ if (!timespec64_equal(&now, &atime)) {
+ inode_set_atime_to_ts(inode, now);
updated |= S_ATIME;
}
}
@@ -1963,7 +1968,7 @@ EXPORT_SYMBOL(inode_update_time);
bool atime_needs_update(const struct path *path, struct inode *inode)
{
struct vfsmount *mnt = path->mnt;
- struct timespec64 now;
+ struct timespec64 now, atime;
if (inode->i_flags & S_NOATIME)
return false;
@@ -1989,7 +1994,8 @@ bool atime_needs_update(const struct path *path, struct inode *inode)
if (!relatime_need_update(mnt, inode, now))
return false;
- if (timespec64_equal(&inode->i_atime, &now))
+ atime = inode_get_atime(inode);
+ if (timespec64_equal(&atime, &now))
return false;
return true;
@@ -2006,7 +2012,7 @@ void touch_atime(const struct path *path)
if (!sb_start_write_trylock(inode->i_sb))
return;
- if (__mnt_want_write(mnt) != 0)
+ if (mnt_get_write_access(mnt) != 0)
goto skip_update;
/*
* File systems can error out when updating inodes if they need to
@@ -2018,7 +2024,7 @@ void touch_atime(const struct path *path)
* of the fs read only, e.g. subvolumes in Btrfs.
*/
inode_update_time(inode, S_ATIME);
- __mnt_drop_write(mnt);
+ mnt_put_write_access(mnt);
skip_update:
sb_end_write(inode->i_sb);
}
@@ -2106,17 +2112,18 @@ static int inode_needs_update_time(struct inode *inode)
{
int sync_it = 0;
struct timespec64 now = current_time(inode);
- struct timespec64 ctime;
+ struct timespec64 ts;
/* First try to exhaust all avenues to not sync */
if (IS_NOCMTIME(inode))
return 0;
- if (!timespec64_equal(&inode->i_mtime, &now))
+ ts = inode_get_mtime(inode);
+ if (!timespec64_equal(&ts, &now))
sync_it = S_MTIME;
- ctime = inode_get_ctime(inode);
- if (!timespec64_equal(&ctime, &now))
+ ts = inode_get_ctime(inode);
+ if (!timespec64_equal(&ts, &now))
sync_it |= S_CTIME;
if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
@@ -2131,9 +2138,9 @@ static int __file_update_time(struct file *file, int sync_mode)
struct inode *inode = file_inode(file);
/* try to update time settings */
- if (!__mnt_want_write_file(file)) {
+ if (!mnt_get_write_access_file(file)) {
ret = inode_update_time(inode, sync_mode);
- __mnt_drop_write_file(file);
+ mnt_put_write_access_file(file);
}
return ret;
diff --git a/fs/internal.h b/fs/internal.h
index d64ae03998cc..58e43341aebf 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -73,8 +73,8 @@ extern int sb_prepare_remount_readonly(struct super_block *);
extern void __init mnt_init(void);
-extern int __mnt_want_write_file(struct file *);
-extern void __mnt_drop_write_file(struct file *);
+int mnt_get_write_access_file(struct file *file);
+void mnt_put_write_access_file(struct file *file);
extern void dissolve_on_fput(struct vfsmount *);
extern bool may_mount(void);
@@ -94,14 +94,22 @@ extern void chroot_fs_refs(const struct path *, const struct path *);
struct file *alloc_empty_file(int flags, const struct cred *cred);
struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred);
struct file *alloc_empty_backing_file(int flags, const struct cred *cred);
+void release_empty_file(struct file *f);
+
+static inline void file_put_write_access(struct file *file)
+{
+ put_write_access(file->f_inode);
+ mnt_put_write_access(file->f_path.mnt);
+ if (unlikely(file->f_mode & FMODE_BACKING))
+ mnt_put_write_access(backing_file_user_path(file)->mnt);
+}
static inline void put_file_access(struct file *file)
{
if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
i_readcount_dec(file->f_inode);
} else if (file->f_mode & FMODE_WRITER) {
- put_write_access(file->f_inode);
- __mnt_drop_write(file->f_path.mnt);
+ file_put_write_access(file);
}
}
@@ -130,9 +138,9 @@ static inline void sb_start_ro_state_change(struct super_block *sb)
* mnt_is_readonly() making sure if mnt_is_readonly() sees SB_RDONLY
* cleared, it will see s_readonly_remount set.
* For RW->RO transition, the barrier pairs with the barrier in
- * __mnt_want_write() before the mnt_is_readonly() check. The barrier
- * makes sure if __mnt_want_write() sees MNT_WRITE_HOLD already
- * cleared, it will see s_readonly_remount set.
+ * mnt_get_write_access() before the mnt_is_readonly() check.
+ * The barrier makes sure if mnt_get_write_access() sees MNT_WRITE_HOLD
+ * already cleared, it will see s_readonly_remount set.
*/
smp_wmb();
}
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 5db54ca29a35..2bc0aa23fde3 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -881,8 +881,10 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
size_t bytes; /* Bytes to write to folio */
size_t copied; /* Bytes copied from user */
+ bytes = iov_iter_count(i);
+retry:
offset = pos & (chunk - 1);
- bytes = min(chunk - offset, iov_iter_count(i));
+ bytes = min(chunk - offset, bytes);
status = balance_dirty_pages_ratelimited_flags(mapping,
bdp_flags);
if (unlikely(status))
@@ -933,10 +935,12 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
* halfway through, might be a race with munmap,
* might be severe memory pressure.
*/
- if (copied)
- bytes = copied;
if (chunk > PAGE_SIZE)
chunk /= 2;
+ if (copied) {
+ bytes = copied;
+ goto retry;
+ }
} else {
pos += status;
written += status;
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 2ee21286ac8f..3e4d53e26f94 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -1422,8 +1422,8 @@ static int isofs_read_inode(struct inode *inode, int relocated)
inode->i_ino, de->flags[-high_sierra]);
}
#endif
- inode->i_mtime = inode->i_atime =
- inode_set_ctime(inode, iso_date(de->date, high_sierra), 0);
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime(inode, iso_date(de->date, high_sierra), 0)));
ei->i_first_extent = (isonum_733(de->extent) +
isonum_711(de->ext_attr_length));
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index 348783a70f57..d6c17ad69dee 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -426,16 +426,14 @@ repeat:
0);
}
if (rr->u.TF.flags & TF_MODIFY) {
- inode->i_mtime.tv_sec =
- iso_date(rr->u.TF.times[cnt++].time,
- 0);
- inode->i_mtime.tv_nsec = 0;
+ inode_set_mtime(inode,
+ iso_date(rr->u.TF.times[cnt++].time, 0),
+ 0);
}
if (rr->u.TF.flags & TF_ACCESS) {
- inode->i_atime.tv_sec =
- iso_date(rr->u.TF.times[cnt++].time,
- 0);
- inode->i_atime.tv_nsec = 0;
+ inode_set_atime(inode,
+ iso_date(rr->u.TF.times[cnt++].time, 0),
+ 0);
}
if (rr->u.TF.flags & TF_ATTRIBUTES) {
inode_set_ctime(inode,
@@ -531,9 +529,9 @@ repeat:
inode->i_rdev = reloc->i_rdev;
inode->i_size = reloc->i_size;
inode->i_blocks = reloc->i_blocks;
- inode->i_atime = reloc->i_atime;
+ inode_set_atime_to_ts(inode, inode_get_atime(reloc));
inode_set_ctime_to_ts(inode, inode_get_ctime(reloc));
- inode->i_mtime = reloc->i_mtime;
+ inode_set_mtime_to_ts(inode, inode_get_mtime(reloc));
iput(reloc);
break;
#ifdef CONFIG_ZISOFS
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 091ab0eaabbe..2b2938970da3 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -204,8 +204,8 @@ static int jffs2_create(struct mnt_idmap *idmap, struct inode *dir_i,
if (ret)
goto fail;
- dir_i->i_mtime = inode_set_ctime_to_ts(dir_i,
- ITIME(je32_to_cpu(ri->ctime)));
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(je32_to_cpu(ri->ctime))));
jffs2_free_raw_inode(ri);
@@ -238,7 +238,8 @@ static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry)
if (dead_f->inocache)
set_nlink(d_inode(dentry), dead_f->inocache->pino_nlink);
if (!ret)
- dir_i->i_mtime = inode_set_ctime_to_ts(dir_i, ITIME(now));
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(now)));
return ret;
}
/***********************************************************************/
@@ -272,7 +273,8 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
set_nlink(d_inode(old_dentry), ++f->inocache->pino_nlink);
mutex_unlock(&f->sem);
d_instantiate(dentry, d_inode(old_dentry));
- dir_i->i_mtime = inode_set_ctime_to_ts(dir_i, ITIME(now));
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(now)));
ihold(d_inode(old_dentry));
}
return ret;
@@ -423,8 +425,8 @@ static int jffs2_symlink (struct mnt_idmap *idmap, struct inode *dir_i,
goto fail;
}
- dir_i->i_mtime = inode_set_ctime_to_ts(dir_i,
- ITIME(je32_to_cpu(rd->mctime)));
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(je32_to_cpu(rd->mctime))));
jffs2_free_raw_dirent(rd);
@@ -568,8 +570,8 @@ static int jffs2_mkdir (struct mnt_idmap *idmap, struct inode *dir_i,
goto fail;
}
- dir_i->i_mtime = inode_set_ctime_to_ts(dir_i,
- ITIME(je32_to_cpu(rd->mctime)));
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(je32_to_cpu(rd->mctime))));
inc_nlink(dir_i);
jffs2_free_raw_dirent(rd);
@@ -610,7 +612,8 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
dentry->d_name.len, f, now);
if (!ret) {
- dir_i->i_mtime = inode_set_ctime_to_ts(dir_i, ITIME(now));
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(now)));
clear_nlink(d_inode(dentry));
drop_nlink(dir_i);
}
@@ -746,8 +749,8 @@ static int jffs2_mknod (struct mnt_idmap *idmap, struct inode *dir_i,
goto fail;
}
- dir_i->i_mtime = inode_set_ctime_to_ts(dir_i,
- ITIME(je32_to_cpu(rd->mctime)));
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(je32_to_cpu(rd->mctime))));
jffs2_free_raw_dirent(rd);
@@ -868,16 +871,18 @@ static int jffs2_rename (struct mnt_idmap *idmap,
* caller won't do it on its own since we are returning an error.
*/
d_invalidate(new_dentry);
- new_dir_i->i_mtime = inode_set_ctime_to_ts(new_dir_i,
- ITIME(now));
+ inode_set_mtime_to_ts(new_dir_i,
+ inode_set_ctime_to_ts(new_dir_i, ITIME(now)));
return ret;
}
if (d_is_dir(old_dentry))
drop_nlink(old_dir_i);
- old_dir_i->i_mtime = inode_set_ctime_to_ts(old_dir_i, ITIME(now));
- new_dir_i->i_mtime = inode_set_ctime_to_ts(new_dir_i, ITIME(now));
+ inode_set_mtime_to_ts(old_dir_i,
+ inode_set_ctime_to_ts(old_dir_i, ITIME(now)));
+ inode_set_mtime_to_ts(new_dir_i,
+ inode_set_ctime_to_ts(new_dir_i, ITIME(now)));
return 0;
}
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 11c66793960e..62ea76da7fdf 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -317,8 +317,8 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
inode->i_size = pos + writtenlen;
inode->i_blocks = (inode->i_size + 511) >> 9;
- inode->i_mtime = inode_set_ctime_to_ts(inode,
- ITIME(je32_to_cpu(ri->ctime)));
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_to_ts(inode, ITIME(je32_to_cpu(ri->ctime))));
}
}
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 0403efab4089..d175cccb7c55 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -113,8 +113,8 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
- ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
- ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
+ ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode_get_atime(inode)));
+ ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode_get_mtime(inode)));
ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode_get_ctime(inode)));
ri->offset = cpu_to_je32(0);
@@ -147,9 +147,9 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
return PTR_ERR(new_metadata);
}
/* It worked. Update the inode */
- inode->i_atime = ITIME(je32_to_cpu(ri->atime));
+ inode_set_atime_to_ts(inode, ITIME(je32_to_cpu(ri->atime)));
inode_set_ctime_to_ts(inode, ITIME(je32_to_cpu(ri->ctime)));
- inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
+ inode_set_mtime_to_ts(inode, ITIME(je32_to_cpu(ri->mtime)));
inode->i_mode = jemode_to_cpu(ri->mode);
i_uid_write(inode, je16_to_cpu(ri->uid));
i_gid_write(inode, je16_to_cpu(ri->gid));
@@ -282,8 +282,8 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
i_uid_write(inode, je16_to_cpu(latest_node.uid));
i_gid_write(inode, je16_to_cpu(latest_node.gid));
inode->i_size = je32_to_cpu(latest_node.isize);
- inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
- inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
+ inode_set_atime_to_ts(inode, ITIME(je32_to_cpu(latest_node.atime)));
+ inode_set_mtime_to_ts(inode, ITIME(je32_to_cpu(latest_node.mtime)));
inode_set_ctime_to_ts(inode, ITIME(je32_to_cpu(latest_node.ctime)));
set_nlink(inode, f->inocache->pino_nlink);
@@ -386,8 +386,8 @@ void jffs2_dirty_inode(struct inode *inode, int flags)
iattr.ia_mode = inode->i_mode;
iattr.ia_uid = inode->i_uid;
iattr.ia_gid = inode->i_gid;
- iattr.ia_atime = inode->i_atime;
- iattr.ia_mtime = inode->i_mtime;
+ iattr.ia_atime = inode_get_atime(inode);
+ iattr.ia_mtime = inode_get_mtime(inode);
iattr.ia_ctime = inode_get_ctime(inode);
jffs2_do_setattr(inode, &iattr);
@@ -475,8 +475,8 @@ struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_r
inode->i_mode = jemode_to_cpu(ri->mode);
i_gid_write(inode, je16_to_cpu(ri->gid));
i_uid_write(inode, je16_to_cpu(ri->uid));
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
- ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
+ simple_inode_init_ts(inode);
+ ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode_get_mtime(inode)));
inode->i_blocks = 0;
inode->i_size = 0;
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 50727a1ff931..86ab014a349c 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -36,8 +36,8 @@ struct kvec;
#define JFFS2_NOW() JFFS2_CLAMP_TIME(ktime_get_real_seconds())
#define I_SEC(tv) JFFS2_CLAMP_TIME((tv).tv_sec)
#define JFFS2_F_I_CTIME(f) I_SEC(inode_get_ctime(OFNI_EDONI_2SFFJ(f)))
-#define JFFS2_F_I_MTIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_mtime)
-#define JFFS2_F_I_ATIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_atime)
+#define JFFS2_F_I_MTIME(f) I_SEC(inode_get_mtime(OFNI_EDONI_2SFFJ(f)))
+#define JFFS2_F_I_ATIME(f) I_SEC(inode_get_atime(OFNI_EDONI_2SFFJ(f)))
#define sleep_on_spinunlock(wq, s) \
do { \
DECLARE_WAITQUEUE(__wait, current); \
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 3b6bdc9a49e1..00224f3a8d6e 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -920,7 +920,7 @@ struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
* do_jffs2_setxattr(inode, xprefix, xname, buffer, size, flags)
* is an implementation of setxattr handler on jffs2.
* -------------------------------------------------- */
-const struct xattr_handler *jffs2_xattr_handlers[] = {
+const struct xattr_handler * const jffs2_xattr_handlers[] = {
&jffs2_user_xattr_handler,
#ifdef CONFIG_JFFS2_FS_SECURITY
&jffs2_security_xattr_handler,
diff --git a/fs/jffs2/xattr.h b/fs/jffs2/xattr.h
index 1b5030a3349d..7e7de093ec0a 100644
--- a/fs/jffs2/xattr.h
+++ b/fs/jffs2/xattr.h
@@ -94,7 +94,7 @@ extern int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname
extern int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
const char *buffer, size_t size, int flags);
-extern const struct xattr_handler *jffs2_xattr_handlers[];
+extern const struct xattr_handler * const jffs2_xattr_handlers[];
extern const struct xattr_handler jffs2_user_xattr_handler;
extern const struct xattr_handler jffs2_trusted_xattr_handler;
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 920d58a1566b..1a6b5921d17a 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -393,7 +393,7 @@ void jfs_truncate_nolock(struct inode *ip, loff_t length)
break;
}
- ip->i_mtime = inode_set_ctime_current(ip);
+ inode_set_mtime_to_ts(ip, inode_set_ctime_current(ip));
mark_inode_dirty(ip);
txCommit(tid, 1, &ip, 0);
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 923a58422c46..8e87264e56ce 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -3061,10 +3061,10 @@ static int copy_from_dinode(struct dinode * dip, struct inode *ip)
}
ip->i_size = le64_to_cpu(dip->di_size);
- ip->i_atime.tv_sec = le32_to_cpu(dip->di_atime.tv_sec);
- ip->i_atime.tv_nsec = le32_to_cpu(dip->di_atime.tv_nsec);
- ip->i_mtime.tv_sec = le32_to_cpu(dip->di_mtime.tv_sec);
- ip->i_mtime.tv_nsec = le32_to_cpu(dip->di_mtime.tv_nsec);
+ inode_set_atime(ip, le32_to_cpu(dip->di_atime.tv_sec),
+ le32_to_cpu(dip->di_atime.tv_nsec));
+ inode_set_mtime(ip, le32_to_cpu(dip->di_mtime.tv_sec),
+ le32_to_cpu(dip->di_mtime.tv_nsec));
inode_set_ctime(ip, le32_to_cpu(dip->di_ctime.tv_sec),
le32_to_cpu(dip->di_ctime.tv_nsec));
ip->i_blocks = LBLK2PBLK(ip->i_sb, le64_to_cpu(dip->di_nblocks));
@@ -3138,12 +3138,12 @@ static void copy_to_dinode(struct dinode * dip, struct inode *ip)
else /* Leave the original permissions alone */
dip->di_mode = cpu_to_le32(jfs_ip->mode2);
- dip->di_atime.tv_sec = cpu_to_le32(ip->i_atime.tv_sec);
- dip->di_atime.tv_nsec = cpu_to_le32(ip->i_atime.tv_nsec);
- dip->di_ctime.tv_sec = cpu_to_le32(inode_get_ctime(ip).tv_sec);
- dip->di_ctime.tv_nsec = cpu_to_le32(inode_get_ctime(ip).tv_nsec);
- dip->di_mtime.tv_sec = cpu_to_le32(ip->i_mtime.tv_sec);
- dip->di_mtime.tv_nsec = cpu_to_le32(ip->i_mtime.tv_nsec);
+ dip->di_atime.tv_sec = cpu_to_le32(inode_get_atime_sec(ip));
+ dip->di_atime.tv_nsec = cpu_to_le32(inode_get_atime_nsec(ip));
+ dip->di_ctime.tv_sec = cpu_to_le32(inode_get_ctime_sec(ip));
+ dip->di_ctime.tv_nsec = cpu_to_le32(inode_get_ctime_nsec(ip));
+ dip->di_mtime.tv_sec = cpu_to_le32(inode_get_mtime_sec(ip));
+ dip->di_mtime.tv_nsec = cpu_to_le32(inode_get_mtime_nsec(ip));
dip->di_ixpxd = jfs_ip->ixpxd; /* in-memory pxd's are little-endian */
dip->di_acl = jfs_ip->acl; /* as are dxd's */
dip->di_ea = jfs_ip->ea;
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index 87594efa7f7c..f10f295d1502 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -97,8 +97,8 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
jfs_inode->mode2 |= inode->i_mode;
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
- jfs_inode->otime = inode_get_ctime(inode).tv_sec;
+ simple_inode_init_ts(inode);
+ jfs_inode->otime = inode_get_ctime_sec(inode);
inode->i_generation = JFS_SBI(sb)->gengen++;
jfs_inode->cflag = 0;
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index e855b8fde76c..cb6d1fda66a7 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1058,7 +1058,7 @@ void jfs_syncpt(struct jfs_log *log, int hard_sync)
int lmLogOpen(struct super_block *sb)
{
int rc;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct jfs_log *log;
struct jfs_sb_info *sbi = JFS_SBI(sb);
@@ -1070,7 +1070,7 @@ int lmLogOpen(struct super_block *sb)
mutex_lock(&jfs_log_mutex);
list_for_each_entry(log, &jfs_external_logs, journal_list) {
- if (log->bdev->bd_dev == sbi->logdev) {
+ if (log->bdev_handle->bdev->bd_dev == sbi->logdev) {
if (!uuid_equal(&log->uuid, &sbi->loguuid)) {
jfs_warn("wrong uuid on JFS journal");
mutex_unlock(&jfs_log_mutex);
@@ -1100,14 +1100,14 @@ int lmLogOpen(struct super_block *sb)
* file systems to log may have n-to-1 relationship;
*/
- bdev = blkdev_get_by_dev(sbi->logdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
- log, NULL);
- if (IS_ERR(bdev)) {
- rc = PTR_ERR(bdev);
+ bdev_handle = bdev_open_by_dev(sbi->logdev,
+ BLK_OPEN_READ | BLK_OPEN_WRITE, log, NULL);
+ if (IS_ERR(bdev_handle)) {
+ rc = PTR_ERR(bdev_handle);
goto free;
}
- log->bdev = bdev;
+ log->bdev_handle = bdev_handle;
uuid_copy(&log->uuid, &sbi->loguuid);
/*
@@ -1141,7 +1141,7 @@ journal_found:
lbmLogShutdown(log);
close: /* close external log device */
- blkdev_put(bdev, log);
+ bdev_release(bdev_handle);
free: /* free log descriptor */
mutex_unlock(&jfs_log_mutex);
@@ -1162,7 +1162,7 @@ static int open_inline_log(struct super_block *sb)
init_waitqueue_head(&log->syncwait);
set_bit(log_INLINELOG, &log->flag);
- log->bdev = sb->s_bdev;
+ log->bdev_handle = sb->s_bdev_handle;
log->base = addressPXD(&JFS_SBI(sb)->logpxd);
log->size = lengthPXD(&JFS_SBI(sb)->logpxd) >>
(L2LOGPSIZE - sb->s_blocksize_bits);
@@ -1436,7 +1436,7 @@ int lmLogClose(struct super_block *sb)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct jfs_log *log = sbi->log;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
int rc = 0;
jfs_info("lmLogClose: log:0x%p", log);
@@ -1482,10 +1482,10 @@ int lmLogClose(struct super_block *sb)
* external log as separate logical volume
*/
list_del(&log->journal_list);
- bdev = log->bdev;
+ bdev_handle = log->bdev_handle;
rc = lmLogShutdown(log);
- blkdev_put(bdev, log);
+ bdev_release(bdev_handle);
kfree(log);
@@ -1972,7 +1972,7 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
bp->l_flag |= lbmREAD;
- bio = bio_alloc(log->bdev, 1, REQ_OP_READ, GFP_NOFS);
+ bio = bio_alloc(log->bdev_handle->bdev, 1, REQ_OP_READ, GFP_NOFS);
bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
__bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
@@ -2110,10 +2110,15 @@ static void lbmStartIO(struct lbuf * bp)
{
struct bio *bio;
struct jfs_log *log = bp->l_log;
+ struct block_device *bdev = NULL;
jfs_info("lbmStartIO");
- bio = bio_alloc(log->bdev, 1, REQ_OP_WRITE | REQ_SYNC, GFP_NOFS);
+ if (!log->no_integrity)
+ bdev = log->bdev_handle->bdev;
+
+ bio = bio_alloc(bdev, 1, REQ_OP_WRITE | REQ_SYNC,
+ GFP_NOFS);
bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
__bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
diff --git a/fs/jfs/jfs_logmgr.h b/fs/jfs/jfs_logmgr.h
index 805877ce5020..84aa2d253907 100644
--- a/fs/jfs/jfs_logmgr.h
+++ b/fs/jfs/jfs_logmgr.h
@@ -356,7 +356,7 @@ struct jfs_log {
* before writing syncpt.
*/
struct list_head journal_list; /* Global list */
- struct block_device *bdev; /* 4: log lv pointer */
+ struct bdev_handle *bdev_handle; /* 4: log lv pointer */
int serial; /* 4: log mount serial number */
s64 base; /* @8: log extent address (inline log ) */
diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c
index b83aae56a1f2..415eb65a36ff 100644
--- a/fs/jfs/jfs_mount.c
+++ b/fs/jfs/jfs_mount.c
@@ -430,7 +430,8 @@ int updateSuper(struct super_block *sb, uint state)
if (state == FM_MOUNT) {
/* record log's dev_t and mount serial number */
- j_sb->s_logdev = cpu_to_le32(new_encode_dev(sbi->log->bdev->bd_dev));
+ j_sb->s_logdev = cpu_to_le32(
+ new_encode_dev(sbi->log->bdev_handle->bdev->bd_dev));
j_sb->s_logserial = cpu_to_le32(sbi->log->serial);
} else if (state == FM_CLEAN) {
/*
diff --git a/fs/jfs/jfs_xattr.h b/fs/jfs/jfs_xattr.h
index 0d33816d251d..ec67d8554d2c 100644
--- a/fs/jfs/jfs_xattr.h
+++ b/fs/jfs/jfs_xattr.h
@@ -46,7 +46,7 @@ extern int __jfs_setxattr(tid_t, struct inode *, const char *, const void *,
extern ssize_t __jfs_getxattr(struct inode *, const char *, void *, size_t);
extern ssize_t jfs_listxattr(struct dentry *, char *, size_t);
-extern const struct xattr_handler *jfs_xattr_handlers[];
+extern const struct xattr_handler * const jfs_xattr_handlers[];
#ifdef CONFIG_JFS_SECURITY
extern int jfs_init_security(tid_t, struct inode *, struct inode *,
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 57d7a4300210..d68a4e6ac345 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -149,7 +149,7 @@ static int jfs_create(struct mnt_idmap *idmap, struct inode *dip,
mark_inode_dirty(ip);
- dip->i_mtime = inode_set_ctime_current(dip);
+ inode_set_mtime_to_ts(dip, inode_set_ctime_current(dip));
mark_inode_dirty(dip);
@@ -284,7 +284,7 @@ static int jfs_mkdir(struct mnt_idmap *idmap, struct inode *dip,
/* update parent directory inode */
inc_nlink(dip); /* for '..' from child directory */
- dip->i_mtime = inode_set_ctime_current(dip);
+ inode_set_mtime_to_ts(dip, inode_set_ctime_current(dip));
mark_inode_dirty(dip);
rc = txCommit(tid, 2, &iplist[0], 0);
@@ -390,7 +390,7 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
/* update parent directory's link count corresponding
* to ".." entry of the target directory deleted
*/
- dip->i_mtime = inode_set_ctime_current(dip);
+ inode_set_mtime_to_ts(dip, inode_set_ctime_current(dip));
inode_dec_link_count(dip);
/*
@@ -512,7 +512,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
ASSERT(ip->i_nlink);
- dip->i_mtime = inode_set_ctime_to_ts(dip, inode_set_ctime_current(ip));
+ inode_set_mtime_to_ts(dip,
+ inode_set_ctime_to_ts(dip, inode_set_ctime_current(ip)));
mark_inode_dirty(dip);
/* update target's inode */
@@ -828,7 +829,7 @@ static int jfs_link(struct dentry *old_dentry,
/* update object inode */
inc_nlink(ip); /* for new link */
inode_set_ctime_current(ip);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
ihold(ip);
@@ -1028,7 +1029,7 @@ static int jfs_symlink(struct mnt_idmap *idmap, struct inode *dip,
mark_inode_dirty(ip);
- dip->i_mtime = inode_set_ctime_current(dip);
+ inode_set_mtime_to_ts(dip, inode_set_ctime_current(dip));
mark_inode_dirty(dip);
/*
* commit update of parent directory and link object
@@ -1271,7 +1272,7 @@ static int jfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
inode_set_ctime_current(old_ip);
mark_inode_dirty(old_ip);
- new_dir->i_mtime = inode_set_ctime_current(new_dir);
+ inode_set_mtime_to_ts(new_dir, inode_set_ctime_current(new_dir));
mark_inode_dirty(new_dir);
/* Build list of inodes modified by this transaction */
@@ -1283,7 +1284,8 @@ static int jfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
if (old_dir != new_dir) {
iplist[ipcount++] = new_dir;
- old_dir->i_mtime = inode_set_ctime_current(old_dir);
+ inode_set_mtime_to_ts(old_dir,
+ inode_set_ctime_current(old_dir));
mark_inode_dirty(old_dir);
}
@@ -1416,7 +1418,7 @@ static int jfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
mark_inode_dirty(ip);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 2e2f7f6d36a0..966826c394ee 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -818,7 +818,7 @@ out:
}
if (inode->i_size < off+len-towrite)
i_size_write(inode, off+len-towrite);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
inode_unlock(inode);
return len - towrite;
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 8577ad494e05..0fb7afac298e 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -985,7 +985,7 @@ static const struct xattr_handler jfs_trusted_xattr_handler = {
.set = jfs_xattr_set,
};
-const struct xattr_handler *jfs_xattr_handlers[] = {
+const struct xattr_handler * const jfs_xattr_handlers[] = {
&jfs_os2_xattr_handler,
&jfs_user_xattr_handler,
&jfs_security_xattr_handler,
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index 922719a343a7..b83054da68b3 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -151,7 +151,7 @@ ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size)
static inline void set_default_inode_attr(struct inode *inode, umode_t mode)
{
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
}
static inline void set_inode_attr(struct inode *inode,
@@ -159,8 +159,8 @@ static inline void set_inode_attr(struct inode *inode,
{
inode->i_uid = attrs->ia_uid;
inode->i_gid = attrs->ia_gid;
- inode->i_atime = attrs->ia_atime;
- inode->i_mtime = attrs->ia_mtime;
+ inode_set_atime_to_ts(inode, attrs->ia_atime);
+ inode_set_mtime_to_ts(inode, attrs->ia_mtime);
inode_set_ctime_to_ts(inode, attrs->ia_ctime);
}
@@ -445,7 +445,7 @@ static const struct xattr_handler kernfs_user_xattr_handler = {
.set = kernfs_vfs_user_xattr_set,
};
-const struct xattr_handler *kernfs_xattr_handlers[] = {
+const struct xattr_handler * const kernfs_xattr_handlers[] = {
&kernfs_trusted_xattr_handler,
&kernfs_security_xattr_handler,
&kernfs_user_xattr_handler,
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index a9b854cdfdb5..237f2764b941 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -127,7 +127,7 @@ extern struct kmem_cache *kernfs_node_cache, *kernfs_iattrs_cache;
/*
* inode.c
*/
-extern const struct xattr_handler *kernfs_xattr_handlers[];
+extern const struct xattr_handler * const kernfs_xattr_handlers[];
void kernfs_evict_inode(struct inode *inode);
int kernfs_iop_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask);
diff --git a/fs/libfs.c b/fs/libfs.c
index 37f2d34ee090..abe2b5a40ba1 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -541,7 +541,8 @@ void simple_recursive_removal(struct dentry *dentry,
dput(victim); // unpin it
}
if (victim == dentry) {
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
if (d_is_dir(dentry))
drop_nlink(inode);
inode_unlock(inode);
@@ -582,7 +583,7 @@ static int pseudo_fs_fill_super(struct super_block *s, struct fs_context *fc)
*/
root->i_ino = 1;
root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
- root->i_atime = root->i_mtime = inode_set_ctime_current(root);
+ simple_inode_init_ts(root);
s->s_root = d_make_root(root);
if (!s->s_root)
return -ENOMEM;
@@ -638,8 +639,8 @@ int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *den
{
struct inode *inode = d_inode(old_dentry);
- dir->i_mtime = inode_set_ctime_to_ts(dir,
- inode_set_ctime_current(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
inc_nlink(inode);
ihold(inode);
dget(dentry);
@@ -673,8 +674,8 @@ int simple_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
- dir->i_mtime = inode_set_ctime_to_ts(dir,
- inode_set_ctime_current(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
drop_nlink(inode);
dput(dentry);
return 0;
@@ -709,9 +710,10 @@ void simple_rename_timestamp(struct inode *old_dir, struct dentry *old_dentry,
{
struct inode *newino = d_inode(new_dentry);
- old_dir->i_mtime = inode_set_ctime_current(old_dir);
+ inode_set_mtime_to_ts(old_dir, inode_set_ctime_current(old_dir));
if (new_dir != old_dir)
- new_dir->i_mtime = inode_set_ctime_current(new_dir);
+ inode_set_mtime_to_ts(new_dir,
+ inode_set_ctime_current(new_dir));
inode_set_ctime_current(d_inode(old_dentry));
if (newino)
inode_set_ctime_current(newino);
@@ -926,7 +928,7 @@ int simple_fill_super(struct super_block *s, unsigned long magic,
*/
inode->i_ino = 1;
inode->i_mode = S_IFDIR | 0755;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
set_nlink(inode, 2);
@@ -952,7 +954,7 @@ int simple_fill_super(struct super_block *s, unsigned long magic,
goto out;
}
inode->i_mode = S_IFREG | files->mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_fop = files->ops;
inode->i_ino = i;
d_add(dentry, inode);
@@ -1520,7 +1522,7 @@ struct inode *alloc_anon_inode(struct super_block *s)
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
inode->i_flags |= S_PRIVATE;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
return inode;
}
EXPORT_SYMBOL(alloc_anon_inode);
@@ -1912,3 +1914,20 @@ ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter,
return direct_written + buffered_written;
}
EXPORT_SYMBOL_GPL(direct_write_fallback);
+
+/**
+ * simple_inode_init_ts - initialize the timestamps for a new inode
+ * @inode: inode to be initialized
+ *
+ * When a new inode is created, most filesystems set the timestamps to the
+ * current time. Add a helper to do this.
+ */
+struct timespec64 simple_inode_init_ts(struct inode *inode)
+{
+ struct timespec64 ts = inode_set_ctime_current(inode);
+
+ inode_set_atime_to_ts(inode, ts);
+ inode_set_mtime_to_ts(inode, ts);
+ return ts;
+}
+EXPORT_SYMBOL(simple_inode_init_ts);
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 6579948070a4..81be07c1d3d1 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -24,7 +24,6 @@
#include <linux/uio.h>
#include <linux/smp.h>
#include <linux/mutex.h>
-#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/inetdevice.h>
@@ -135,11 +134,11 @@ lockd(void *vrqstp)
* The main request loop. We don't terminate until the last
* NFS mount or NFS daemon has gone away.
*/
- while (!kthread_should_stop()) {
+ while (!svc_thread_should_stop(rqstp)) {
/* update sv_maxconn if it has changed */
rqstp->rq_server->sv_maxconn = nlm_max_connections;
- nlmsvc_retry_blocked();
+ nlmsvc_retry_blocked(rqstp);
svc_recv(rqstp);
}
if (nlmsvc_ops)
@@ -373,7 +372,9 @@ static void lockd_put(void)
unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
#endif
+ svc_get(nlmsvc_serv);
svc_set_num_threads(nlmsvc_serv, NULL, 0);
+ svc_put(nlmsvc_serv);
timer_delete_sync(&nlmsvc_retry);
nlmsvc_serv = NULL;
dprintk("lockd_down: service destroyed\n");
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 43aeba9de55c..2dc10900ad1c 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -30,7 +30,6 @@
#include <linux/sunrpc/svc_xprt.h>
#include <linux/lockd/nlm.h>
#include <linux/lockd/lockd.h>
-#include <linux/kthread.h>
#include <linux/exportfs.h>
#define NLMDBG_FACILITY NLMDBG_SVCLOCK
@@ -481,9 +480,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
struct nlm_host *host, struct nlm_lock *lock, int wait,
struct nlm_cookie *cookie, int reclaim)
{
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct inode *inode = nlmsvc_file_inode(file);
-#endif
struct nlm_block *block = NULL;
int error;
int mode;
@@ -497,7 +494,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
(long long)lock->fl.fl_end,
wait);
- if (nlmsvc_file_file(file)->f_op->lock) {
+ if (!exportfs_lock_op_is_async(inode->i_sb->s_export_op)) {
async_block = wait;
wait = 0;
}
@@ -543,6 +540,25 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
goto out;
}
+ spin_lock(&nlm_blocked_lock);
+ /*
+ * If this is a lock request for an already pending
+ * lock request we return nlm_lck_blocked without calling
+ * vfs_lock_file() again. Otherwise we have two pending
+ * requests on the underlaying ->lock() implementation but
+ * only one nlm_block to being granted by lm_grant().
+ */
+ if (exportfs_lock_op_is_async(inode->i_sb->s_export_op) &&
+ !list_empty(&block->b_list)) {
+ spin_unlock(&nlm_blocked_lock);
+ ret = nlm_lck_blocked;
+ goto out;
+ }
+
+ /* Append to list of blocked */
+ nlmsvc_insert_block_locked(block, NLM_NEVER);
+ spin_unlock(&nlm_blocked_lock);
+
if (!wait)
lock->fl.fl_flags &= ~FL_SLEEP;
mode = lock_to_openmode(&lock->fl);
@@ -552,16 +568,12 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
dprintk("lockd: vfs_lock_file returned %d\n", error);
switch (error) {
case 0:
+ nlmsvc_remove_block(block);
ret = nlm_granted;
goto out;
case -EAGAIN:
- /*
- * If this is a blocking request for an
- * already pending lock request then we need
- * to put it back on lockd's block list
- */
- if (wait)
- break;
+ if (!wait)
+ nlmsvc_remove_block(block);
ret = async_block ? nlm_lck_blocked : nlm_lck_denied;
goto out;
case FILE_LOCK_DEFERRED:
@@ -572,17 +584,16 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
ret = nlmsvc_defer_lock_rqst(rqstp, block);
goto out;
case -EDEADLK:
+ nlmsvc_remove_block(block);
ret = nlm_deadlock;
goto out;
default: /* includes ENOLCK */
+ nlmsvc_remove_block(block);
ret = nlm_lck_denied_nolocks;
goto out;
}
ret = nlm_lck_blocked;
-
- /* Append to list of blocked */
- nlmsvc_insert_block(block, NLM_NEVER);
out:
mutex_unlock(&file->f_mutex);
nlmsvc_release_block(block);
@@ -1020,13 +1031,13 @@ retry_deferred_block(struct nlm_block *block)
* be retransmitted.
*/
void
-nlmsvc_retry_blocked(void)
+nlmsvc_retry_blocked(struct svc_rqst *rqstp)
{
unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
struct nlm_block *block;
spin_lock(&nlm_blocked_lock);
- while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
+ while (!list_empty(&nlm_blocked) && !svc_thread_should_stop(rqstp)) {
block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
if (block->b_when == NLM_NEVER)
diff --git a/fs/locks.c b/fs/locks.c
index 76ad05f8070a..d4e49a990a8d 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -2264,11 +2264,13 @@ out:
* To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
* locks, the ->lock() interface may return asynchronously, before the lock has
* been granted or denied by the underlying filesystem, if (and only if)
- * lm_grant is set. Callers expecting ->lock() to return asynchronously
- * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
- * the request is for a blocking lock. When ->lock() does return asynchronously,
- * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
- * request completes.
+ * lm_grant is set. Additionally EXPORT_OP_ASYNC_LOCK in export_operations
+ * flags need to be set.
+ *
+ * Callers expecting ->lock() to return asynchronously will only use F_SETLK,
+ * not F_SETLKW; they will set FL_SLEEP if (and only if) the request is for a
+ * blocking lock. When ->lock() does return asynchronously, it must return
+ * FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock request completes.
* If the request is for non-blocking lock the file system should return
* FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
* with the result. If the request timed out the callback routine will return a
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
index 25c08fbfcb9d..7da66ca184f4 100644
--- a/fs/minix/bitmap.c
+++ b/fs/minix/bitmap.c
@@ -251,7 +251,7 @@ struct inode *minix_new_inode(const struct inode *dir, umode_t mode)
}
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
inode->i_ino = j;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_blocks = 0;
memset(&minix_i(inode)->u, 0, sizeof(minix_i(inode)->u));
insert_inode_hash(inode);
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index 20f23e6e58ad..62c313fc9a49 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -281,7 +281,7 @@ got_it:
de->inode = inode->i_ino;
}
dir_commit_chunk(page, pos, sbi->s_dirsize);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
err = minix_handle_dirsync(dir);
out_put:
@@ -313,7 +313,7 @@ int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
else
de->inode = 0;
dir_commit_chunk(page, pos, len);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
return minix_handle_dirsync(inode);
}
@@ -436,7 +436,7 @@ int minix_set_link(struct minix_dir_entry *de, struct page *page,
else
de->inode = inode->i_ino;
dir_commit_chunk(page, pos, sbi->s_dirsize);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
return minix_handle_dirsync(dir);
}
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index df575473c1cc..f8af6c3ae336 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -501,7 +501,8 @@ static struct inode *V1_minix_iget(struct inode *inode)
i_gid_write(inode, raw_inode->i_gid);
set_nlink(inode, raw_inode->i_nlinks);
inode->i_size = raw_inode->i_size;
- inode->i_mtime = inode->i_atime = inode_set_ctime(inode, raw_inode->i_time, 0);
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime(inode, raw_inode->i_time, 0)));
inode->i_blocks = 0;
for (i = 0; i < 9; i++)
minix_inode->u.i1_data[i] = raw_inode->i_zone[i];
@@ -538,11 +539,9 @@ static struct inode *V2_minix_iget(struct inode *inode)
i_gid_write(inode, raw_inode->i_gid);
set_nlink(inode, raw_inode->i_nlinks);
inode->i_size = raw_inode->i_size;
- inode->i_mtime.tv_sec = raw_inode->i_mtime;
- inode->i_atime.tv_sec = raw_inode->i_atime;
+ inode_set_mtime(inode, raw_inode->i_mtime, 0);
+ inode_set_atime(inode, raw_inode->i_atime, 0);
inode_set_ctime(inode, raw_inode->i_ctime, 0);
- inode->i_mtime.tv_nsec = 0;
- inode->i_atime.tv_nsec = 0;
inode->i_blocks = 0;
for (i = 0; i < 10; i++)
minix_inode->u.i2_data[i] = raw_inode->i_zone[i];
@@ -589,7 +588,7 @@ static struct buffer_head * V1_minix_update_inode(struct inode * inode)
raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode));
raw_inode->i_nlinks = inode->i_nlink;
raw_inode->i_size = inode->i_size;
- raw_inode->i_time = inode->i_mtime.tv_sec;
+ raw_inode->i_time = inode_get_mtime_sec(inode);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev);
else for (i = 0; i < 9; i++)
@@ -616,9 +615,9 @@ static struct buffer_head * V2_minix_update_inode(struct inode * inode)
raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode));
raw_inode->i_nlinks = inode->i_nlink;
raw_inode->i_size = inode->i_size;
- raw_inode->i_mtime = inode->i_mtime.tv_sec;
- raw_inode->i_atime = inode->i_atime.tv_sec;
- raw_inode->i_ctime = inode_get_ctime(inode).tv_sec;
+ raw_inode->i_mtime = inode_get_mtime_sec(inode);
+ raw_inode->i_atime = inode_get_atime_sec(inode);
+ raw_inode->i_ctime = inode_get_ctime_sec(inode);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev);
else for (i = 0; i < 10; i++)
diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c
index ce18ae37c29d..dad131e30c05 100644
--- a/fs/minix/itree_common.c
+++ b/fs/minix/itree_common.c
@@ -350,7 +350,7 @@ do_indirects:
}
first_whole++;
}
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
}
diff --git a/fs/namei.c b/fs/namei.c
index 567ee547492b..71c13b2990b4 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -188,7 +188,7 @@ getname_flags(const char __user *filename, int flags, int *empty)
}
}
- result->refcnt = 1;
+ atomic_set(&result->refcnt, 1);
/* The empty path is special. */
if (unlikely(!len)) {
if (empty)
@@ -249,7 +249,7 @@ getname_kernel(const char * filename)
memcpy((char *)result->name, filename, len);
result->uptr = NULL;
result->aname = NULL;
- result->refcnt = 1;
+ atomic_set(&result->refcnt, 1);
audit_getname(result);
return result;
@@ -261,9 +261,10 @@ void putname(struct filename *name)
if (IS_ERR(name))
return;
- BUG_ON(name->refcnt <= 0);
+ if (WARN_ON_ONCE(!atomic_read(&name->refcnt)))
+ return;
- if (--name->refcnt > 0)
+ if (!atomic_dec_and_test(&name->refcnt))
return;
if (name->name != name->iname) {
@@ -3104,25 +3105,6 @@ void unlock_rename(struct dentry *p1, struct dentry *p2)
EXPORT_SYMBOL(unlock_rename);
/**
- * mode_strip_umask - handle vfs umask stripping
- * @dir: parent directory of the new inode
- * @mode: mode of the new inode to be created in @dir
- *
- * Umask stripping depends on whether or not the filesystem supports POSIX
- * ACLs. If the filesystem doesn't support it umask stripping is done directly
- * in here. If the filesystem does support POSIX ACLs umask stripping is
- * deferred until the filesystem calls posix_acl_create().
- *
- * Returns: mode
- */
-static inline umode_t mode_strip_umask(const struct inode *dir, umode_t mode)
-{
- if (!IS_POSIXACL(dir))
- mode &= ~current_umask();
- return mode;
-}
-
-/**
* vfs_prepare_mode - prepare the mode to be used for a new inode
* @idmap: idmap of the mount the inode was found from
* @dir: parent directory of the new inode
@@ -3535,7 +3517,8 @@ static const char *open_last_lookups(struct nameidata *nd,
if (likely(dentry))
goto finish_lookup;
- BUG_ON(nd->flags & LOOKUP_RCU);
+ if (WARN_ON_ONCE(nd->flags & LOOKUP_RCU))
+ return ERR_PTR(-ECHILD);
} else {
/* create side of things */
if (nd->flags & LOOKUP_RCU) {
@@ -3802,7 +3785,10 @@ static struct file *path_openat(struct nameidata *nd,
WARN_ON(1);
error = -EINVAL;
}
- fput(file);
+ if (unlikely(file->f_mode & FMODE_OPENED))
+ fput(file);
+ else
+ release_empty_file(file);
if (error == -EOPENSTALE) {
if (flags & LOOKUP_RCU)
error = -ECHILD;
@@ -4386,11 +4372,9 @@ retry_deleg:
if (!IS_ERR(dentry)) {
/* Why not before? Because we want correct error value */
- if (last.name[last.len])
+ if (last.name[last.len] || d_is_negative(dentry))
goto slashes;
inode = dentry->d_inode;
- if (d_is_negative(dentry))
- goto slashes;
ihold(inode);
error = security_path_unlink(&path, dentry);
if (error)
diff --git a/fs/namespace.c b/fs/namespace.c
index e157efc54023..6bde71735efa 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -330,16 +330,16 @@ static int mnt_is_readonly(struct vfsmount *mnt)
* can determine when writes are able to occur to a filesystem.
*/
/**
- * __mnt_want_write - get write access to a mount without freeze protection
+ * mnt_get_write_access - get write access to a mount without freeze protection
* @m: the mount on which to take a write
*
* This tells the low-level filesystem that a write is about to be performed to
* it, and makes sure that writes are allowed (mnt it read-write) before
* returning success. This operation does not protect against filesystem being
- * frozen. When the write operation is finished, __mnt_drop_write() must be
+ * frozen. When the write operation is finished, mnt_put_write_access() must be
* called. This is effectively a refcount.
*/
-int __mnt_want_write(struct vfsmount *m)
+int mnt_get_write_access(struct vfsmount *m)
{
struct mount *mnt = real_mount(m);
int ret = 0;
@@ -386,6 +386,7 @@ int __mnt_want_write(struct vfsmount *m)
return ret;
}
+EXPORT_SYMBOL_GPL(mnt_get_write_access);
/**
* mnt_want_write - get write access to a mount
@@ -401,7 +402,7 @@ int mnt_want_write(struct vfsmount *m)
int ret;
sb_start_write(m->mnt_sb);
- ret = __mnt_want_write(m);
+ ret = mnt_get_write_access(m);
if (ret)
sb_end_write(m->mnt_sb);
return ret;
@@ -409,15 +410,15 @@ int mnt_want_write(struct vfsmount *m)
EXPORT_SYMBOL_GPL(mnt_want_write);
/**
- * __mnt_want_write_file - get write access to a file's mount
+ * mnt_get_write_access_file - get write access to a file's mount
* @file: the file who's mount on which to take a write
*
- * This is like __mnt_want_write, but if the file is already open for writing it
+ * This is like mnt_get_write_access, but if @file is already open for write it
* skips incrementing mnt_writers (since the open file already has a reference)
* and instead only does the check for emergency r/o remounts. This must be
- * paired with __mnt_drop_write_file.
+ * paired with mnt_put_write_access_file.
*/
-int __mnt_want_write_file(struct file *file)
+int mnt_get_write_access_file(struct file *file)
{
if (file->f_mode & FMODE_WRITER) {
/*
@@ -428,7 +429,7 @@ int __mnt_want_write_file(struct file *file)
return -EROFS;
return 0;
}
- return __mnt_want_write(file->f_path.mnt);
+ return mnt_get_write_access(file->f_path.mnt);
}
/**
@@ -445,7 +446,7 @@ int mnt_want_write_file(struct file *file)
int ret;
sb_start_write(file_inode(file)->i_sb);
- ret = __mnt_want_write_file(file);
+ ret = mnt_get_write_access_file(file);
if (ret)
sb_end_write(file_inode(file)->i_sb);
return ret;
@@ -453,19 +454,20 @@ int mnt_want_write_file(struct file *file)
EXPORT_SYMBOL_GPL(mnt_want_write_file);
/**
- * __mnt_drop_write - give up write access to a mount
+ * mnt_put_write_access - give up write access to a mount
* @mnt: the mount on which to give up write access
*
* Tells the low-level filesystem that we are done
* performing writes to it. Must be matched with
- * __mnt_want_write() call above.
+ * mnt_get_write_access() call above.
*/
-void __mnt_drop_write(struct vfsmount *mnt)
+void mnt_put_write_access(struct vfsmount *mnt)
{
preempt_disable();
mnt_dec_writers(real_mount(mnt));
preempt_enable();
}
+EXPORT_SYMBOL_GPL(mnt_put_write_access);
/**
* mnt_drop_write - give up write access to a mount
@@ -477,20 +479,20 @@ void __mnt_drop_write(struct vfsmount *mnt)
*/
void mnt_drop_write(struct vfsmount *mnt)
{
- __mnt_drop_write(mnt);
+ mnt_put_write_access(mnt);
sb_end_write(mnt->mnt_sb);
}
EXPORT_SYMBOL_GPL(mnt_drop_write);
-void __mnt_drop_write_file(struct file *file)
+void mnt_put_write_access_file(struct file *file)
{
if (!(file->f_mode & FMODE_WRITER))
- __mnt_drop_write(file->f_path.mnt);
+ mnt_put_write_access(file->f_path.mnt);
}
void mnt_drop_write_file(struct file *file)
{
- __mnt_drop_write_file(file);
+ mnt_put_write_access_file(file);
sb_end_write(file_inode(file)->i_sb);
}
EXPORT_SYMBOL(mnt_drop_write_file);
@@ -1344,9 +1346,9 @@ void mntput(struct vfsmount *mnt)
{
if (mnt) {
struct mount *m = real_mount(mnt);
- /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
+ /* avoid cacheline pingpong */
if (unlikely(m->mnt_expiry_mark))
- m->mnt_expiry_mark = 0;
+ WRITE_ONCE(m->mnt_expiry_mark, 0);
mntput_no_expire(m);
}
}
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index 716bc75e9ed2..b4294a8aa2d4 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -108,7 +108,7 @@ struct pnfs_block_dev {
struct pnfs_block_dev *children;
u64 chunk_size;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
u64 disk_offset;
u64 pr_key;
diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
index 65cbb5607a5f..f318a05a80e1 100644
--- a/fs/nfs/blocklayout/dev.c
+++ b/fs/nfs/blocklayout/dev.c
@@ -25,17 +25,17 @@ bl_free_device(struct pnfs_block_dev *dev)
} else {
if (dev->pr_registered) {
const struct pr_ops *ops =
- dev->bdev->bd_disk->fops->pr_ops;
+ dev->bdev_handle->bdev->bd_disk->fops->pr_ops;
int error;
- error = ops->pr_register(dev->bdev, dev->pr_key, 0,
- false);
+ error = ops->pr_register(dev->bdev_handle->bdev,
+ dev->pr_key, 0, false);
if (error)
pr_err("failed to unregister PR key.\n");
}
- if (dev->bdev)
- blkdev_put(dev->bdev, NULL);
+ if (dev->bdev_handle)
+ bdev_release(dev->bdev_handle);
}
}
@@ -169,7 +169,7 @@ static bool bl_map_simple(struct pnfs_block_dev *dev, u64 offset,
map->start = dev->start;
map->len = dev->len;
map->disk_offset = dev->disk_offset;
- map->bdev = dev->bdev;
+ map->bdev = dev->bdev_handle->bdev;
return true;
}
@@ -236,28 +236,26 @@ bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d,
struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
{
struct pnfs_block_volume *v = &volumes[idx];
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
dev_t dev;
dev = bl_resolve_deviceid(server, v, gfp_mask);
if (!dev)
return -EIO;
- bdev = blkdev_get_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_WRITE, NULL,
- NULL);
- if (IS_ERR(bdev)) {
+ bdev_handle = bdev_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ NULL, NULL);
+ if (IS_ERR(bdev_handle)) {
printk(KERN_WARNING "pNFS: failed to open device %d:%d (%ld)\n",
- MAJOR(dev), MINOR(dev), PTR_ERR(bdev));
- return PTR_ERR(bdev);
+ MAJOR(dev), MINOR(dev), PTR_ERR(bdev_handle));
+ return PTR_ERR(bdev_handle);
}
- d->bdev = bdev;
-
-
- d->len = bdev_nr_bytes(d->bdev);
+ d->bdev_handle = bdev_handle;
+ d->len = bdev_nr_bytes(bdev_handle->bdev);
d->map = bl_map_simple;
printk(KERN_INFO "pNFS: using block device %s\n",
- d->bdev->bd_disk->disk_name);
+ bdev_handle->bdev->bd_disk->disk_name);
return 0;
}
@@ -302,10 +300,10 @@ bl_validate_designator(struct pnfs_block_volume *v)
}
}
-static struct block_device *
+static struct bdev_handle *
bl_open_path(struct pnfs_block_volume *v, const char *prefix)
{
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
const char *devname;
devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/%s%*phN",
@@ -313,15 +311,15 @@ bl_open_path(struct pnfs_block_volume *v, const char *prefix)
if (!devname)
return ERR_PTR(-ENOMEM);
- bdev = blkdev_get_by_path(devname, BLK_OPEN_READ | BLK_OPEN_WRITE, NULL,
- NULL);
- if (IS_ERR(bdev)) {
+ bdev_handle = bdev_open_by_path(devname, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ NULL, NULL);
+ if (IS_ERR(bdev_handle)) {
pr_warn("pNFS: failed to open device %s (%ld)\n",
- devname, PTR_ERR(bdev));
+ devname, PTR_ERR(bdev_handle));
}
kfree(devname);
- return bdev;
+ return bdev_handle;
}
static int
@@ -329,7 +327,7 @@ bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
{
struct pnfs_block_volume *v = &volumes[idx];
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
const struct pr_ops *ops;
int error;
@@ -342,32 +340,32 @@ bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
* On other distributions like Debian, the default SCSI by-id path will
* point to the dm-multipath device if one exists.
*/
- bdev = bl_open_path(v, "dm-uuid-mpath-0x");
- if (IS_ERR(bdev))
- bdev = bl_open_path(v, "wwn-0x");
- if (IS_ERR(bdev))
- return PTR_ERR(bdev);
- d->bdev = bdev;
-
- d->len = bdev_nr_bytes(d->bdev);
+ bdev_handle = bl_open_path(v, "dm-uuid-mpath-0x");
+ if (IS_ERR(bdev_handle))
+ bdev_handle = bl_open_path(v, "wwn-0x");
+ if (IS_ERR(bdev_handle))
+ return PTR_ERR(bdev_handle);
+ d->bdev_handle = bdev_handle;
+
+ d->len = bdev_nr_bytes(d->bdev_handle->bdev);
d->map = bl_map_simple;
d->pr_key = v->scsi.pr_key;
pr_info("pNFS: using block device %s (reservation key 0x%llx)\n",
- d->bdev->bd_disk->disk_name, d->pr_key);
+ d->bdev_handle->bdev->bd_disk->disk_name, d->pr_key);
- ops = d->bdev->bd_disk->fops->pr_ops;
+ ops = d->bdev_handle->bdev->bd_disk->fops->pr_ops;
if (!ops) {
pr_err("pNFS: block device %s does not support reservations.",
- d->bdev->bd_disk->disk_name);
+ d->bdev_handle->bdev->bd_disk->disk_name);
error = -EINVAL;
goto out_blkdev_put;
}
- error = ops->pr_register(d->bdev, 0, d->pr_key, true);
+ error = ops->pr_register(d->bdev_handle->bdev, 0, d->pr_key, true);
if (error) {
pr_err("pNFS: failed to register key for block device %s.",
- d->bdev->bd_disk->disk_name);
+ d->bdev_handle->bdev->bd_disk->disk_name);
goto out_blkdev_put;
}
@@ -375,7 +373,7 @@ bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
return 0;
out_blkdev_put:
- blkdev_put(d->bdev, NULL);
+ bdev_release(d->bdev_handle);
return error;
}
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 466ebf1d41b2..4ffa1f469e90 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -78,7 +78,7 @@ nfs4_callback_svc(void *vrqstp)
set_freezable();
- while (!kthread_freezable_should_stop(NULL))
+ while (!svc_thread_should_stop(rqstp))
svc_recv(rqstp);
svc_exit_thread(rqstp);
@@ -86,45 +86,6 @@ nfs4_callback_svc(void *vrqstp)
}
#if defined(CONFIG_NFS_V4_1)
-/*
- * The callback service for NFSv4.1 callbacks
- */
-static int
-nfs41_callback_svc(void *vrqstp)
-{
- struct svc_rqst *rqstp = vrqstp;
- struct svc_serv *serv = rqstp->rq_server;
- struct rpc_rqst *req;
- int error;
- DEFINE_WAIT(wq);
-
- set_freezable();
-
- while (!kthread_freezable_should_stop(NULL)) {
- prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_IDLE);
- spin_lock_bh(&serv->sv_cb_lock);
- if (!list_empty(&serv->sv_cb_list)) {
- req = list_first_entry(&serv->sv_cb_list,
- struct rpc_rqst, rq_bc_list);
- list_del(&req->rq_bc_list);
- spin_unlock_bh(&serv->sv_cb_lock);
- finish_wait(&serv->sv_cb_waitq, &wq);
- dprintk("Invoking bc_svc_process()\n");
- error = bc_svc_process(serv, req, rqstp);
- dprintk("bc_svc_process() returned w/ error code= %d\n",
- error);
- } else {
- spin_unlock_bh(&serv->sv_cb_lock);
- if (!kthread_should_stop())
- schedule();
- finish_wait(&serv->sv_cb_waitq, &wq);
- }
- }
-
- svc_exit_thread(rqstp);
- return 0;
-}
-
static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
struct svc_serv *serv)
{
@@ -237,10 +198,7 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion)
cb_info->users);
threadfn = nfs4_callback_svc;
-#if defined(CONFIG_NFS_V4_1)
- if (minorversion)
- threadfn = nfs41_callback_svc;
-#else
+#if !defined(CONFIG_NFS_V4_1)
if (minorversion)
return ERR_PTR(-ENOTSUPP);
#endif
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 6bed1394d748..96a4923080ae 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -60,7 +60,7 @@ __be32 nfs4_callback_getattr(void *argp, void *resp,
if (nfs_have_writebacks(inode))
res->change_attr++;
res->ctime = inode_get_ctime(inode);
- res->mtime = inode->i_mtime;
+ res->mtime = inode_get_mtime(inode);
res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
args->bitmap[0];
res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index a1dc33864906..ef817a0475ff 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -2520,9 +2520,9 @@ ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
return i;
}
-static int
-ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
+static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
{
+ struct pnfs_layout_hdr *lo;
struct nfs4_flexfile_layout *ff_layout;
const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
@@ -2533,11 +2533,14 @@ ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
return -ENOMEM;
spin_lock(&args->inode->i_lock);
- ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
- args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
- &args->devinfo[0],
- dev_count,
- NFS4_FF_OP_LAYOUTSTATS);
+ lo = NFS_I(args->inode)->layout;
+ if (lo && pnfs_layout_is_valid(lo)) {
+ ff_layout = FF_LAYOUT_FROM_HDR(lo);
+ args->num_dev = ff_layout_mirror_prepare_stats(
+ &ff_layout->generic_hdr, &args->devinfo[0], dev_count,
+ NFS4_FF_OP_LAYOUTSTATS);
+ } else
+ args->num_dev = 0;
spin_unlock(&args->inode->i_lock);
if (!args->num_dev) {
kfree(args->devinfo);
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index 2dc64454492b..5407ab8c8783 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -114,8 +114,8 @@ static inline void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *
struct inode *inode)
{
memset(auxdata, 0, sizeof(*auxdata));
- auxdata->mtime_sec = inode->i_mtime.tv_sec;
- auxdata->mtime_nsec = inode->i_mtime.tv_nsec;
+ auxdata->mtime_sec = inode_get_mtime(inode).tv_sec;
+ auxdata->mtime_nsec = inode_get_mtime(inode).tv_nsec;
auxdata->ctime_sec = inode_get_ctime(inode).tv_sec;
auxdata->ctime_nsec = inode_get_ctime(inode).tv_nsec;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index e21c073158e5..ebb8d60e1152 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -512,8 +512,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
} else
init_special_inode(inode, inode->i_mode, fattr->rdev);
- memset(&inode->i_atime, 0, sizeof(inode->i_atime));
- memset(&inode->i_mtime, 0, sizeof(inode->i_mtime));
+ inode_set_atime(inode, 0, 0);
+ inode_set_mtime(inode, 0, 0);
inode_set_ctime(inode, 0, 0);
inode_set_iversion_raw(inode, 0);
inode->i_size = 0;
@@ -527,11 +527,11 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
nfsi->read_cache_jiffies = fattr->time_start;
nfsi->attr_gencount = fattr->gencount;
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
- inode->i_atime = fattr->atime;
+ inode_set_atime_to_ts(inode, fattr->atime);
else if (fattr_supported & NFS_ATTR_FATTR_ATIME)
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
if (fattr->valid & NFS_ATTR_FATTR_MTIME)
- inode->i_mtime = fattr->mtime;
+ inode_set_mtime_to_ts(inode, fattr->mtime);
else if (fattr_supported & NFS_ATTR_FATTR_MTIME)
nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME);
if (fattr->valid & NFS_ATTR_FATTR_CTIME)
@@ -742,9 +742,9 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
NFS_I(inode)->cache_validity &= ~(NFS_INO_INVALID_ATIME
| NFS_INO_INVALID_CTIME);
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
- inode->i_atime = fattr->atime;
+ inode_set_atime_to_ts(inode, fattr->atime);
else if (attr->ia_valid & ATTR_ATIME_SET)
- inode->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(inode, attr->ia_atime);
else
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
@@ -758,9 +758,9 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
NFS_I(inode)->cache_validity &= ~(NFS_INO_INVALID_MTIME
| NFS_INO_INVALID_CTIME);
if (fattr->valid & NFS_ATTR_FATTR_MTIME)
- inode->i_mtime = fattr->mtime;
+ inode_set_mtime_to_ts(inode, fattr->mtime);
else if (attr->ia_valid & ATTR_MTIME_SET)
- inode->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
else
nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME);
@@ -1451,11 +1451,11 @@ static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
inode_set_ctime_to_ts(inode, fattr->ctime);
}
- ts = inode->i_mtime;
+ ts = inode_get_mtime(inode);
if ((fattr->valid & NFS_ATTR_FATTR_PREMTIME)
&& (fattr->valid & NFS_ATTR_FATTR_MTIME)
&& timespec64_equal(&ts, &fattr->pre_mtime)) {
- inode->i_mtime = fattr->mtime;
+ inode_set_mtime_to_ts(inode, fattr->mtime);
}
if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE)
&& (fattr->valid & NFS_ATTR_FATTR_SIZE)
@@ -1506,7 +1506,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr))
invalid |= NFS_INO_INVALID_CHANGE;
- ts = inode->i_mtime;
+ ts = inode_get_mtime(inode);
if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec64_equal(&ts, &fattr->mtime))
invalid |= NFS_INO_INVALID_MTIME;
@@ -1534,7 +1534,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
if ((fattr->valid & NFS_ATTR_FATTR_NLINK) && inode->i_nlink != fattr->nlink)
invalid |= NFS_INO_INVALID_NLINK;
- ts = inode->i_atime;
+ ts = inode_get_atime(inode);
if ((fattr->valid & NFS_ATTR_FATTR_ATIME) && !timespec64_equal(&ts, &fattr->atime))
invalid |= NFS_INO_INVALID_ATIME;
@@ -2002,7 +2002,7 @@ int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fa
}
if ((fattr->valid & NFS_ATTR_FATTR_MTIME) != 0 &&
(fattr->valid & NFS_ATTR_FATTR_PREMTIME) == 0) {
- fattr->pre_mtime = inode->i_mtime;
+ fattr->pre_mtime = inode_get_mtime(inode);
fattr->valid |= NFS_ATTR_FATTR_PREMTIME;
}
if ((fattr->valid & NFS_ATTR_FATTR_SIZE) != 0 &&
@@ -2184,7 +2184,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
}
if (fattr->valid & NFS_ATTR_FATTR_MTIME)
- inode->i_mtime = fattr->mtime;
+ inode_set_mtime_to_ts(inode, fattr->mtime);
else if (fattr_supported & NFS_ATTR_FATTR_MTIME)
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_MTIME;
@@ -2220,7 +2220,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
save_cache_validity & NFS_INO_INVALID_SIZE;
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
- inode->i_atime = fattr->atime;
+ inode_set_atime_to_ts(inode, fattr->atime);
else if (fattr_supported & NFS_ATTR_FATTR_ATIME)
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_ATIME;
diff --git a/fs/nfs/nfs.h b/fs/nfs/nfs.h
index 5ba00610aede..0d3ce0460e35 100644
--- a/fs/nfs/nfs.h
+++ b/fs/nfs/nfs.h
@@ -18,7 +18,7 @@ struct nfs_subversion {
const struct rpc_version *rpc_vers; /* NFS version information */
const struct nfs_rpc_ops *rpc_ops; /* NFS operations */
const struct super_operations *sops; /* NFS Super operations */
- const struct xattr_handler **xattr; /* NFS xattr handlers */
+ const struct xattr_handler * const *xattr; /* NFS xattr handlers */
struct list_head list; /* List of NFS versions */
};
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 063e00aff87e..28704f924612 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -81,7 +81,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
if (status == 0) {
if (nfs_should_remove_suid(inode)) {
spin_lock(&inode->i_lock);
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
+ nfs_set_cache_invalid(inode,
+ NFS_INO_REVAL_FORCED | NFS_INO_INVALID_MODE);
spin_unlock(&inode->i_lock);
}
status = nfs_post_op_update_inode_force_wcc(inode,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 47c5c1f86d66..827d00e2f094 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -315,7 +315,7 @@ extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *,
struct nfs_fh *,
struct nfs_fattr *);
extern int nfs4_proc_secinfo(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
-extern const struct xattr_handler *nfs4_xattr_handlers[];
+extern const struct xattr_handler * const nfs4_xattr_handlers[];
extern int nfs4_set_rw_stateid(nfs4_stateid *stateid,
const struct nfs_open_context *ctx,
const struct nfs_lock_context *l_ctx,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 7016eaadf555..a654d7234f51 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -8870,8 +8870,6 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
/* Save the EXCHANGE_ID verifier session trunk tests */
memcpy(clp->cl_confirm.data, argp->verifier.data,
sizeof(clp->cl_confirm.data));
- if (resp->flags & EXCHGID4_FLAG_USE_PNFS_DS)
- set_bit(NFS_CS_DS, &clp->cl_flags);
out:
trace_nfs4_exchange_id(clp, status);
rpc_put_task(task);
@@ -10739,7 +10737,7 @@ static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
};
#endif
-const struct xattr_handler *nfs4_xattr_handlers[] = {
+const struct xattr_handler * const nfs4_xattr_handlers[] = {
&nfs4_xattr_nfs4_acl_handler,
#if defined(CONFIG_NFS_V4_1)
&nfs4_xattr_nfs4_dacl_handler,
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 306cba0b9e69..84343aefbbd6 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -2634,31 +2634,44 @@ pnfs_should_return_unused_layout(struct pnfs_layout_hdr *lo,
return mode == 0;
}
-static int
-pnfs_layout_return_unused_byserver(struct nfs_server *server, void *data)
+static int pnfs_layout_return_unused_byserver(struct nfs_server *server,
+ void *data)
{
const struct pnfs_layout_range *range = data;
+ const struct cred *cred;
struct pnfs_layout_hdr *lo;
struct inode *inode;
+ nfs4_stateid stateid;
+ enum pnfs_iomode iomode;
+
restart:
rcu_read_lock();
list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
- if (!pnfs_layout_can_be_returned(lo) ||
+ inode = lo->plh_inode;
+ if (!inode || !pnfs_layout_can_be_returned(lo) ||
test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
continue;
- inode = lo->plh_inode;
spin_lock(&inode->i_lock);
- if (!pnfs_should_return_unused_layout(lo, range)) {
+ if (!lo->plh_inode ||
+ !pnfs_should_return_unused_layout(lo, range)) {
spin_unlock(&inode->i_lock);
continue;
}
+ pnfs_get_layout_hdr(lo);
+ pnfs_set_plh_return_info(lo, range->iomode, 0);
+ if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
+ range, 0) != 0 ||
+ !pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode)) {
+ spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
+ pnfs_put_layout_hdr(lo);
+ cond_resched();
+ goto restart;
+ }
spin_unlock(&inode->i_lock);
- inode = pnfs_grab_inode_layout_hdr(lo);
- if (!inode)
- continue;
rcu_read_unlock();
- pnfs_mark_layout_for_return(inode, range);
- iput(inode);
+ pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
+ pnfs_put_layout_hdr(lo);
cond_resched();
goto restart;
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 0d6473cb00cb..9b1cfca8112a 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1071,7 +1071,7 @@ static void nfs_fill_super(struct super_block *sb, struct nfs_fs_context *ctx)
sb->s_export_op = &nfs_export_ops;
break;
case 4:
- sb->s_flags |= SB_POSIXACL;
+ sb->s_iflags |= SB_I_NOUMASK;
sb->s_time_gran = 1;
sb->s_time_min = S64_MIN;
sb->s_time_max = S64_MAX;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 7720b5e43014..9d82d50ce0b1 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -788,6 +788,8 @@ static void nfs_inode_add_request(struct nfs_page *req)
*/
static void nfs_inode_remove_request(struct nfs_page *req)
{
+ struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
+
if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
struct folio *folio = nfs_page_to_folio(req->wb_head);
struct address_space *mapping = folio_file_mapping(folio);
@@ -802,7 +804,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
}
if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
- atomic_long_dec(&NFS_I(nfs_page_to_inode(req))->nrequests);
+ atomic_long_dec(&nfsi->nrequests);
nfs_release_request(req);
}
}
diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile
index 6fffc8f03f74..b8736a82e57c 100644
--- a/fs/nfsd/Makefile
+++ b/fs/nfsd/Makefile
@@ -12,7 +12,8 @@ nfsd-y += trace.o
nfsd-y += nfssvc.o nfsctl.o nfsfh.o vfs.o \
export.o auth.o lockd.o nfscache.o \
- stats.o filecache.o nfs3proc.o nfs3xdr.o
+ stats.o filecache.o nfs3proc.o nfs3xdr.o \
+ netlink.o
nfsd-$(CONFIG_NFSD_V2) += nfsproc.o nfsxdr.o
nfsd-$(CONFIG_NFSD_V2_ACL) += nfs2acl.o
nfsd-$(CONFIG_NFSD_V3_ACL) += nfs3acl.o
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index 01d7fd108cf3..46fd74d91ea9 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -117,12 +117,13 @@ static __be32
nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp,
struct iomap *iomaps, int nr_iomaps)
{
+ struct timespec64 mtime = inode_get_mtime(inode);
loff_t new_size = lcp->lc_last_wr + 1;
struct iattr iattr = { .ia_valid = 0 };
int error;
if (lcp->lc_mtime.tv_nsec == UTIME_NOW ||
- timespec64_compare(&lcp->lc_mtime, &inode->i_mtime) < 0)
+ timespec64_compare(&lcp->lc_mtime, &mtime) < 0)
lcp->lc_mtime = current_time(inode);
iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME;
iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime;
diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c
index 1ed2f691ebb9..ce78f74715ee 100644
--- a/fs/nfsd/blocklayoutxdr.c
+++ b/fs/nfsd/blocklayoutxdr.c
@@ -16,9 +16,9 @@
__be32
nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
- struct nfsd4_layoutget *lgp)
+ const struct nfsd4_layoutget *lgp)
{
- struct pnfs_block_extent *b = lgp->lg_content;
+ const struct pnfs_block_extent *b = lgp->lg_content;
int len = sizeof(__be32) + 5 * sizeof(__be64) + sizeof(__be32);
__be32 *p;
@@ -77,7 +77,7 @@ nfsd4_block_encode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b)
__be32
nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
- struct nfsd4_getdeviceinfo *gdp)
+ const struct nfsd4_getdeviceinfo *gdp)
{
struct pnfs_block_deviceaddr *dev = gdp->gd_device;
int len = sizeof(__be32), ret, i;
diff --git a/fs/nfsd/blocklayoutxdr.h b/fs/nfsd/blocklayoutxdr.h
index bc5166bfe46b..b0361e8aa9a7 100644
--- a/fs/nfsd/blocklayoutxdr.h
+++ b/fs/nfsd/blocklayoutxdr.h
@@ -51,9 +51,9 @@ struct pnfs_block_deviceaddr {
};
__be32 nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
- struct nfsd4_getdeviceinfo *gdp);
+ const struct nfsd4_getdeviceinfo *gdp);
__be32 nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
- struct nfsd4_layoutget *lgp);
+ const struct nfsd4_layoutget *lgp);
int nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
u32 block_size);
int nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 11a0eaa2f914..b7da17e53007 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -339,12 +339,16 @@ static int export_stats_init(struct export_stats *stats)
static void export_stats_reset(struct export_stats *stats)
{
- nfsd_percpu_counters_reset(stats->counter, EXP_STATS_COUNTERS_NUM);
+ if (stats)
+ nfsd_percpu_counters_reset(stats->counter,
+ EXP_STATS_COUNTERS_NUM);
}
static void export_stats_destroy(struct export_stats *stats)
{
- nfsd_percpu_counters_destroy(stats->counter, EXP_STATS_COUNTERS_NUM);
+ if (stats)
+ nfsd_percpu_counters_destroy(stats->counter,
+ EXP_STATS_COUNTERS_NUM);
}
static void svc_export_put(struct kref *ref)
@@ -353,7 +357,8 @@ static void svc_export_put(struct kref *ref)
path_put(&exp->ex_path);
auth_domain_put(exp->ex_client);
nfsd4_fslocs_free(&exp->ex_fslocs);
- export_stats_destroy(&exp->ex_stats);
+ export_stats_destroy(exp->ex_stats);
+ kfree(exp->ex_stats);
kfree(exp->ex_uuid);
kfree_rcu(exp, ex_rcu);
}
@@ -767,13 +772,15 @@ static int svc_export_show(struct seq_file *m,
seq_putc(m, '\t');
seq_escape(m, exp->ex_client->name, " \t\n\\");
if (export_stats) {
- seq_printf(m, "\t%lld\n", exp->ex_stats.start_time);
+ struct percpu_counter *counter = exp->ex_stats->counter;
+
+ seq_printf(m, "\t%lld\n", exp->ex_stats->start_time);
seq_printf(m, "\tfh_stale: %lld\n",
- percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_FH_STALE]));
+ percpu_counter_sum_positive(&counter[EXP_STATS_FH_STALE]));
seq_printf(m, "\tio_read: %lld\n",
- percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_READ]));
+ percpu_counter_sum_positive(&counter[EXP_STATS_IO_READ]));
seq_printf(m, "\tio_write: %lld\n",
- percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_WRITE]));
+ percpu_counter_sum_positive(&counter[EXP_STATS_IO_WRITE]));
seq_putc(m, '\n');
return 0;
}
@@ -819,7 +826,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
new->ex_layout_types = 0;
new->ex_uuid = NULL;
new->cd = item->cd;
- export_stats_reset(&new->ex_stats);
+ export_stats_reset(new->ex_stats);
}
static void export_update(struct cache_head *cnew, struct cache_head *citem)
@@ -856,7 +863,14 @@ static struct cache_head *svc_export_alloc(void)
if (!i)
return NULL;
- if (export_stats_init(&i->ex_stats)) {
+ i->ex_stats = kmalloc(sizeof(*(i->ex_stats)), GFP_KERNEL);
+ if (!i->ex_stats) {
+ kfree(i);
+ return NULL;
+ }
+
+ if (export_stats_init(i->ex_stats)) {
+ kfree(i->ex_stats);
kfree(i);
return NULL;
}
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index 2df8ae25aad3..ca9dc230ae3d 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -64,10 +64,10 @@ struct svc_export {
struct cache_head h;
struct auth_domain * ex_client;
int ex_flags;
+ int ex_fsid;
struct path ex_path;
kuid_t ex_anon_uid;
kgid_t ex_anon_gid;
- int ex_fsid;
unsigned char * ex_uuid; /* 16 byte fsid */
struct nfsd4_fs_locations ex_fslocs;
uint32_t ex_nflavors;
@@ -76,8 +76,8 @@ struct svc_export {
struct nfsd4_deviceid_map *ex_devid_map;
struct cache_detail *cd;
struct rcu_head ex_rcu;
- struct export_stats ex_stats;
unsigned long ex_xprtsec_modes;
+ struct export_stats *ex_stats;
};
/* an "export key" (expkey) maps a filehandlefragement to an
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index ee9c923192e0..07bf219f9ae4 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -989,22 +989,21 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
struct net *net = SVC_NET(rqstp);
struct nfsd_file *new, *nf;
- const struct cred *cred;
+ bool stale_retry = true;
bool open_retry = true;
struct inode *inode;
__be32 status;
int ret;
+retry:
status = fh_verify(rqstp, fhp, S_IFREG,
may_flags|NFSD_MAY_OWNER_OVERRIDE);
if (status != nfs_ok)
return status;
inode = d_inode(fhp->fh_dentry);
- cred = get_current_cred();
-retry:
rcu_read_lock();
- nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc);
+ nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc);
rcu_read_unlock();
if (nf) {
@@ -1026,7 +1025,7 @@ retry:
rcu_read_lock();
spin_lock(&inode->i_lock);
- nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc);
+ nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc);
if (unlikely(nf)) {
spin_unlock(&inode->i_lock);
rcu_read_unlock();
@@ -1058,6 +1057,7 @@ wait_for_construction:
goto construction_err;
}
open_retry = false;
+ fh_put(fhp);
goto retry;
}
this_cpu_inc(nfsd_file_cache_hits);
@@ -1074,7 +1074,6 @@ out:
nfsd_file_check_write_error(nf);
*pnf = nf;
}
- put_cred(cred);
trace_nfsd_file_acquire(rqstp, inode, may_flags, nf, status);
return status;
@@ -1088,8 +1087,20 @@ open_file:
status = nfs_ok;
trace_nfsd_file_opened(nf, status);
} else {
- status = nfsd_open_verified(rqstp, fhp, may_flags,
- &nf->nf_file);
+ ret = nfsd_open_verified(rqstp, fhp, may_flags,
+ &nf->nf_file);
+ if (ret == -EOPENSTALE && stale_retry) {
+ stale_retry = false;
+ nfsd_file_unhash(nf);
+ clear_and_wake_up_bit(NFSD_FILE_PENDING,
+ &nf->nf_flags);
+ if (refcount_dec_and_test(&nf->nf_ref))
+ nfsd_file_free(nf);
+ nf = NULL;
+ fh_put(fhp);
+ goto retry;
+ }
+ status = nfserrno(ret);
trace_nfsd_file_open(nf, status);
}
} else
diff --git a/fs/nfsd/flexfilelayoutxdr.c b/fs/nfsd/flexfilelayoutxdr.c
index bb205328e043..aeb71c10ff1b 100644
--- a/fs/nfsd/flexfilelayoutxdr.c
+++ b/fs/nfsd/flexfilelayoutxdr.c
@@ -17,9 +17,9 @@ struct ff_idmap {
__be32
nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
- struct nfsd4_layoutget *lgp)
+ const struct nfsd4_layoutget *lgp)
{
- struct pnfs_ff_layout *fl = lgp->lg_content;
+ const struct pnfs_ff_layout *fl = lgp->lg_content;
int len, mirror_len, ds_len, fh_len;
__be32 *p;
@@ -77,7 +77,7 @@ nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
__be32
nfsd4_ff_encode_getdeviceinfo(struct xdr_stream *xdr,
- struct nfsd4_getdeviceinfo *gdp)
+ const struct nfsd4_getdeviceinfo *gdp)
{
struct pnfs_ff_device_addr *da = gdp->gd_device;
int len;
diff --git a/fs/nfsd/flexfilelayoutxdr.h b/fs/nfsd/flexfilelayoutxdr.h
index 8e195aeca023..6d5a1066a903 100644
--- a/fs/nfsd/flexfilelayoutxdr.h
+++ b/fs/nfsd/flexfilelayoutxdr.h
@@ -43,8 +43,8 @@ struct pnfs_ff_layout {
};
__be32 nfsd4_ff_encode_getdeviceinfo(struct xdr_stream *xdr,
- struct nfsd4_getdeviceinfo *gdp);
+ const struct nfsd4_getdeviceinfo *gdp);
__be32 nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
- struct nfsd4_layoutget *lgp);
+ const struct nfsd4_layoutget *lgp);
#endif /* _NFSD_FLEXFILELAYOUTXDR_H */
diff --git a/fs/nfsd/netlink.c b/fs/nfsd/netlink.c
new file mode 100644
index 000000000000..0e1d635ec5f9
--- /dev/null
+++ b/fs/nfsd/netlink.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/nfsd.yaml */
+/* YNL-GEN kernel source */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "netlink.h"
+
+#include <uapi/linux/nfsd_netlink.h>
+
+/* Ops table for nfsd */
+static const struct genl_split_ops nfsd_nl_ops[] = {
+ {
+ .cmd = NFSD_CMD_RPC_STATUS_GET,
+ .start = nfsd_nl_rpc_status_get_start,
+ .dumpit = nfsd_nl_rpc_status_get_dumpit,
+ .done = nfsd_nl_rpc_status_get_done,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+};
+
+struct genl_family nfsd_nl_family __ro_after_init = {
+ .name = NFSD_FAMILY_NAME,
+ .version = NFSD_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = nfsd_nl_ops,
+ .n_split_ops = ARRAY_SIZE(nfsd_nl_ops),
+};
diff --git a/fs/nfsd/netlink.h b/fs/nfsd/netlink.h
new file mode 100644
index 000000000000..d83dd6bdee92
--- /dev/null
+++ b/fs/nfsd/netlink.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/nfsd.yaml */
+/* YNL-GEN kernel header */
+
+#ifndef _LINUX_NFSD_GEN_H
+#define _LINUX_NFSD_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/nfsd_netlink.h>
+
+int nfsd_nl_rpc_status_get_start(struct netlink_callback *cb);
+int nfsd_nl_rpc_status_get_done(struct netlink_callback *cb);
+
+int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+
+extern struct genl_family nfsd_nl_family;
+
+#endif /* _LINUX_NFSD_GEN_H */
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 268ef57751c4..b78eceebd945 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -171,7 +171,8 @@ nfsd3_proc_read(struct svc_rqst *rqstp)
* + 1 (xdr opaque byte count) = 26
*/
resp->count = argp->count;
- svc_reserve_auth(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3)<<2) + resp->count +4);
+ svc_reserve_auth(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3) << 2) +
+ resp->count + 4);
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_read(rqstp, &resp->fh, argp->offset,
@@ -194,7 +195,7 @@ nfsd3_proc_write(struct svc_rqst *rqstp)
SVCFH_fmt(&argp->fh),
argp->len,
(unsigned long long) argp->offset,
- argp->stable? " stable" : "");
+ argp->stable ? " stable" : "");
resp->status = nfserr_fbig;
if (argp->offset > (u64)OFFSET_MAX ||
@@ -294,8 +295,8 @@ nfsd3_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
status = nfserr_exist;
break;
case NFS3_CREATE_EXCLUSIVE:
- if (d_inode(child)->i_mtime.tv_sec == v_mtime &&
- d_inode(child)->i_atime.tv_sec == v_atime &&
+ if (inode_get_mtime_sec(d_inode(child)) == v_mtime &&
+ inode_get_atime_sec(d_inode(child)) == v_atime &&
d_inode(child)->i_size == 0) {
break;
}
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 4039ffcf90ba..92bc109dabe6 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -84,7 +84,21 @@ static void encode_uint32(struct xdr_stream *xdr, u32 n)
static void encode_bitmap4(struct xdr_stream *xdr, const __u32 *bitmap,
size_t len)
{
- WARN_ON_ONCE(xdr_stream_encode_uint32_array(xdr, bitmap, len) < 0);
+ xdr_stream_encode_uint32_array(xdr, bitmap, len);
+}
+
+static int decode_cb_fattr4(struct xdr_stream *xdr, uint32_t *bitmap,
+ struct nfs4_cb_fattr *fattr)
+{
+ fattr->ncf_cb_change = 0;
+ fattr->ncf_cb_fsize = 0;
+ if (bitmap[0] & FATTR4_WORD0_CHANGE)
+ if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_change) < 0)
+ return -NFSERR_BAD_XDR;
+ if (bitmap[0] & FATTR4_WORD0_SIZE)
+ if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_fsize) < 0)
+ return -NFSERR_BAD_XDR;
+ return 0;
}
/*
@@ -358,6 +372,30 @@ encode_cb_recallany4args(struct xdr_stream *xdr,
}
/*
+ * CB_GETATTR4args
+ * struct CB_GETATTR4args {
+ * nfs_fh4 fh;
+ * bitmap4 attr_request;
+ * };
+ *
+ * The size and change attributes are the only one
+ * guaranteed to be serviced by the client.
+ */
+static void
+encode_cb_getattr4args(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr,
+ struct nfs4_cb_fattr *fattr)
+{
+ struct nfs4_delegation *dp =
+ container_of(fattr, struct nfs4_delegation, dl_cb_fattr);
+ struct knfsd_fh *fh = &dp->dl_stid.sc_file->fi_fhandle;
+
+ encode_nfs_cb_opnum4(xdr, OP_CB_GETATTR);
+ encode_nfs_fh4(xdr, fh);
+ encode_bitmap4(xdr, fattr->ncf_cb_bmap, ARRAY_SIZE(fattr->ncf_cb_bmap));
+ hdr->nops++;
+}
+
+/*
* CB_SEQUENCE4args
*
* struct CB_SEQUENCE4args {
@@ -493,6 +531,26 @@ static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
}
/*
+ * 20.1. Operation 3: CB_GETATTR - Get Attributes
+ */
+static void nfs4_xdr_enc_cb_getattr(struct rpc_rqst *req,
+ struct xdr_stream *xdr, const void *data)
+{
+ const struct nfsd4_callback *cb = data;
+ struct nfs4_cb_fattr *ncf =
+ container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
+ struct nfs4_cb_compound_hdr hdr = {
+ .ident = cb->cb_clp->cl_cb_ident,
+ .minorversion = cb->cb_clp->cl_minorversion,
+ };
+
+ encode_cb_compound4args(xdr, &hdr);
+ encode_cb_sequence4args(xdr, cb, &hdr);
+ encode_cb_getattr4args(xdr, &hdr, ncf);
+ encode_cb_nops(&hdr);
+}
+
+/*
* 20.2. Operation 4: CB_RECALL - Recall a Delegation
*/
static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
@@ -548,6 +606,42 @@ static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
}
/*
+ * 20.1. Operation 3: CB_GETATTR - Get Attributes
+ */
+static int nfs4_xdr_dec_cb_getattr(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *data)
+{
+ struct nfsd4_callback *cb = data;
+ struct nfs4_cb_compound_hdr hdr;
+ int status;
+ u32 bitmap[3] = {0};
+ u32 attrlen;
+ struct nfs4_cb_fattr *ncf =
+ container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
+
+ status = decode_cb_compound4res(xdr, &hdr);
+ if (unlikely(status))
+ return status;
+
+ status = decode_cb_sequence4res(xdr, cb);
+ if (unlikely(status || cb->cb_seq_status))
+ return status;
+
+ status = decode_cb_op_status(xdr, OP_CB_GETATTR, &cb->cb_status);
+ if (status)
+ return status;
+ if (xdr_stream_decode_uint32_array(xdr, bitmap, 3) < 0)
+ return -NFSERR_BAD_XDR;
+ if (xdr_stream_decode_u32(xdr, &attrlen) < 0)
+ return -NFSERR_BAD_XDR;
+ if (attrlen > (sizeof(ncf->ncf_cb_change) + sizeof(ncf->ncf_cb_fsize)))
+ return -NFSERR_BAD_XDR;
+ status = decode_cb_fattr4(xdr, bitmap, ncf);
+ return status;
+}
+
+/*
* 20.2. Operation 4: CB_RECALL - Recall a Delegation
*/
static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
@@ -855,6 +949,7 @@ static const struct rpc_procinfo nfs4_cb_procedures[] = {
PROC(CB_NOTIFY_LOCK, COMPOUND, cb_notify_lock, cb_notify_lock),
PROC(CB_OFFLOAD, COMPOUND, cb_offload, cb_offload),
PROC(CB_RECALL_ANY, COMPOUND, cb_recall_any, cb_recall_any),
+ PROC(CB_GETATTR, COMPOUND, cb_getattr, cb_getattr),
};
static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)];
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index e8a80052cb1b..5e8096bc5eaa 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -515,11 +515,11 @@ nfsd4_return_file_layouts(struct svc_rqst *rqstp,
if (!list_empty(&ls->ls_layouts)) {
if (found)
nfs4_inc_and_copy_stateid(&lrp->lr_sid, &ls->ls_stid);
- lrp->lrs_present = 1;
+ lrp->lrs_present = true;
} else {
trace_nfsd_layoutstate_unhash(&ls->ls_stid.sc_stateid);
nfs4_unhash_stid(&ls->ls_stid);
- lrp->lrs_present = 0;
+ lrp->lrs_present = false;
}
spin_unlock(&ls->ls_lock);
@@ -539,7 +539,7 @@ nfsd4_return_client_layouts(struct svc_rqst *rqstp,
struct nfs4_layout *lp, *t;
LIST_HEAD(reaplist);
- lrp->lrs_present = 0;
+ lrp->lrs_present = false;
spin_lock(&clp->cl_lock);
list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 4199ede0583c..6f2d4aa4970d 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -322,8 +322,8 @@ nfsd4_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
status = nfserr_exist;
break;
case NFS4_CREATE_EXCLUSIVE:
- if (d_inode(child)->i_mtime.tv_sec == v_mtime &&
- d_inode(child)->i_atime.tv_sec == v_atime &&
+ if (inode_get_mtime_sec(d_inode(child)) == v_mtime &&
+ inode_get_atime_sec(d_inode(child)) == v_atime &&
d_inode(child)->i_size == 0) {
open->op_created = true;
break; /* subtle */
@@ -331,8 +331,8 @@ nfsd4_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
status = nfserr_exist;
break;
case NFS4_CREATE_EXCLUSIVE4_1:
- if (d_inode(child)->i_mtime.tv_sec == v_mtime &&
- d_inode(child)->i_atime.tv_sec == v_atime &&
+ if (inode_get_mtime_sec(d_inode(child)) == v_mtime &&
+ inode_get_atime_sec(d_inode(child)) == v_atime &&
d_inode(child)->i_size == 0) {
open->op_created = true;
goto set_attr; /* subtle */
@@ -1329,7 +1329,8 @@ extern void nfs_sb_deactive(struct super_block *sb);
* setup a work entry in the ssc delayed unmount list.
*/
static __be32 nfsd4_ssc_setup_dul(struct nfsd_net *nn, char *ipaddr,
- struct nfsd4_ssc_umount_item **nsui)
+ struct nfsd4_ssc_umount_item **nsui,
+ struct svc_rqst *rqstp)
{
struct nfsd4_ssc_umount_item *ni = NULL;
struct nfsd4_ssc_umount_item *work = NULL;
@@ -1351,7 +1352,7 @@ try_again:
spin_unlock(&nn->nfsd_ssc_lock);
/* allow 20secs for mount/unmount for now - revisit */
- if (kthread_should_stop() ||
+ if (svc_thread_should_stop(rqstp) ||
(schedule_timeout(20*HZ) == 0)) {
finish_wait(&nn->nfsd_ssc_waitq, &wait);
kfree(work);
@@ -1467,7 +1468,7 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
goto out_free_rawdata;
snprintf(dev_name, len + 5, "%s%s%s:/", startsep, ipaddr, endsep);
- status = nfsd4_ssc_setup_dul(nn, ipaddr, nsui);
+ status = nfsd4_ssc_setup_dul(nn, ipaddr, nsui, rqstp);
if (status)
goto out_free_devname;
if ((*nsui)->nsui_vfsmount)
@@ -1642,6 +1643,7 @@ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy,
if (bytes_total == 0)
bytes_total = ULLONG_MAX;
do {
+ /* Only async copies can be stopped here */
if (kthread_should_stop())
break;
bytes_copied = nfsd_copy_file_range(src, src_pos, dst, dst_pos,
@@ -1760,6 +1762,7 @@ static int nfsd4_do_async_copy(void *data)
struct nfsd4_copy *copy = (struct nfsd4_copy *)data;
__be32 nfserr;
+ trace_nfsd_copy_do_async(copy);
if (nfsd4_ssc_is_inter(copy)) {
struct file *filp;
@@ -1798,21 +1801,27 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 status;
struct nfsd4_copy *async_copy = NULL;
+ copy->cp_clp = cstate->clp;
if (nfsd4_ssc_is_inter(copy)) {
+ trace_nfsd_copy_inter(copy);
if (!inter_copy_offload_enable || nfsd4_copy_is_sync(copy)) {
status = nfserr_notsupp;
goto out;
}
status = nfsd4_setup_inter_ssc(rqstp, cstate, copy);
- if (status)
+ if (status) {
+ trace_nfsd_copy_done(copy, status);
return nfserr_offload_denied;
+ }
} else {
+ trace_nfsd_copy_intra(copy);
status = nfsd4_setup_intra_ssc(rqstp, cstate, copy);
- if (status)
+ if (status) {
+ trace_nfsd_copy_done(copy, status);
return status;
+ }
}
- copy->cp_clp = cstate->clp;
memcpy(&copy->fh, &cstate->current_fh.fh_handle,
sizeof(struct knfsd_fh));
if (nfsd4_copy_is_async(copy)) {
@@ -1847,6 +1856,7 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
copy->nf_dst->nf_file, true);
}
out:
+ trace_nfsd_copy_done(copy, status);
release_copy_files(copy);
return status;
out_err:
@@ -1929,8 +1939,8 @@ nfsd4_copy_notify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
return status;
- cn->cpn_sec = nn->nfsd4_lease;
- cn->cpn_nsec = 0;
+ cn->cpn_lease_time.tv_sec = nn->nfsd4_lease;
+ cn->cpn_lease_time.tv_nsec = 0;
status = nfserrno(-ENOMEM);
cps = nfs4_alloc_init_cpntf_state(nn, stid);
@@ -2347,10 +2357,10 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp,
mutex_unlock(&ls->ls_mutex);
if (new_size > i_size_read(inode)) {
- lcp->lc_size_chg = 1;
+ lcp->lc_size_chg = true;
lcp->lc_newsize = new_size;
} else {
- lcp->lc_size_chg = 0;
+ lcp->lc_size_chg = false;
}
nfserr = ops->proc_layoutcommit(inode, lcp);
@@ -3200,6 +3210,7 @@ static const struct nfsd4_operation nfsd4_ops[] = {
},
[OP_LOCK] = {
.op_func = nfsd4_lock,
+ .op_release = nfsd4_lock_release,
.op_flags = OP_MODIFIES_SOMETHING |
OP_NONTRIVIAL_ERROR_ENCODE,
.op_name = "OP_LOCK",
@@ -3208,6 +3219,7 @@ static const struct nfsd4_operation nfsd4_ops[] = {
},
[OP_LOCKT] = {
.op_func = nfsd4_lockt,
+ .op_release = nfsd4_lockt_release,
.op_flags = OP_NONTRIVIAL_ERROR_ENCODE,
.op_name = "OP_LOCKT",
.op_rsize_bop = nfsd4_lock_rsize,
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 8534693eb6a4..65fd5510323a 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -59,7 +59,7 @@
#define NFSDDBG_FACILITY NFSDDBG_PROC
-#define all_ones {{~0,~0},~0}
+#define all_ones {{ ~0, ~0}, ~0}
static const stateid_t one_stateid = {
.si_generation = ~0,
.si_opaque = all_ones,
@@ -127,6 +127,7 @@ static void free_session(struct nfsd4_session *);
static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
+static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops;
static struct workqueue_struct *laundry_wq;
@@ -297,7 +298,7 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
nbl = find_blocked_lock(lo, fh, nn);
if (!nbl) {
- nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
+ nbl = kmalloc(sizeof(*nbl), GFP_KERNEL);
if (nbl) {
INIT_LIST_HEAD(&nbl->nbl_list);
INIT_LIST_HEAD(&nbl->nbl_lru);
@@ -1159,6 +1160,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
struct nfs4_clnt_odstate *odstate, u32 dl_type)
{
struct nfs4_delegation *dp;
+ struct nfs4_stid *stid;
long n;
dprintk("NFSD alloc_init_deleg\n");
@@ -1167,9 +1169,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
goto out_dec;
if (delegation_blocked(&fp->fi_fhandle))
goto out_dec;
- dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
- if (dp == NULL)
+ stid = nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg);
+ if (stid == NULL)
goto out_dec;
+ dp = delegstateid(stid);
/*
* delegation seqid's are never incremented. The 4.1 special
@@ -1187,6 +1190,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
dp->dl_recalled = false;
nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
&nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
+ nfsd4_init_cb(&dp->dl_cb_fattr.ncf_getattr, dp->dl_stid.sc_client,
+ &nfsd4_cb_getattr_ops, NFSPROC4_CLNT_CB_GETATTR);
+ dp->dl_cb_fattr.ncf_file_modified = false;
+ dp->dl_cb_fattr.ncf_cb_bmap[0] = FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE;
get_nfs4_file(fp);
dp->dl_stid.sc_file = fp;
return dp;
@@ -2894,11 +2901,56 @@ nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
spin_unlock(&nn->client_lock);
}
+static int
+nfsd4_cb_getattr_done(struct nfsd4_callback *cb, struct rpc_task *task)
+{
+ struct nfs4_cb_fattr *ncf =
+ container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
+
+ ncf->ncf_cb_status = task->tk_status;
+ switch (task->tk_status) {
+ case -NFS4ERR_DELAY:
+ rpc_delay(task, 2 * HZ);
+ return 0;
+ default:
+ return 1;
+ }
+}
+
+static void
+nfsd4_cb_getattr_release(struct nfsd4_callback *cb)
+{
+ struct nfs4_cb_fattr *ncf =
+ container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
+ struct nfs4_delegation *dp =
+ container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
+
+ nfs4_put_stid(&dp->dl_stid);
+ clear_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags);
+ wake_up_bit(&ncf->ncf_cb_flags, CB_GETATTR_BUSY);
+}
+
static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
.done = nfsd4_cb_recall_any_done,
.release = nfsd4_cb_recall_any_release,
};
+static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops = {
+ .done = nfsd4_cb_getattr_done,
+ .release = nfsd4_cb_getattr_release,
+};
+
+void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf)
+{
+ struct nfs4_delegation *dp =
+ container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
+
+ if (test_and_set_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags))
+ return;
+ refcount_inc(&dp->dl_stid.sc_count);
+ nfsd4_run_cb(&ncf->ncf_getattr);
+}
+
static struct nfs4_client *create_client(struct xdr_netobj name,
struct svc_rqst *rqstp, nfs4_verifier *verf)
{
@@ -5634,13 +5686,15 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
struct svc_fh *parent = NULL;
int cb_up;
int status = 0;
+ struct kstat stat;
+ struct path path;
cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
- open->op_recall = 0;
+ open->op_recall = false;
switch (open->op_claim_type) {
case NFS4_OPEN_CLAIM_PREVIOUS:
if (!cb_up)
- open->op_recall = 1;
+ open->op_recall = true;
break;
case NFS4_OPEN_CLAIM_NULL:
parent = currentfh;
@@ -5671,6 +5725,18 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) {
open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE;
trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
+ path.mnt = currentfh->fh_export->ex_path.mnt;
+ path.dentry = currentfh->fh_dentry;
+ if (vfs_getattr(&path, &stat,
+ (STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE),
+ AT_STATX_SYNC_AS_STAT)) {
+ nfs4_put_stid(&dp->dl_stid);
+ destroy_delegation(dp);
+ goto out_no_deleg;
+ }
+ dp->dl_cb_fattr.ncf_cur_fsize = stat.size;
+ dp->dl_cb_fattr.ncf_initial_cinfo =
+ nfsd4_change_attribute(&stat, d_inode(currentfh->fh_dentry));
} else {
open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
@@ -5682,7 +5748,7 @@ out_no_deleg:
if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
dprintk("NFSD: WARNING: refusing delegation reclaim\n");
- open->op_recall = 1;
+ open->op_recall = true;
}
/* 4.1 client asking for a delegation? */
@@ -7487,6 +7553,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_blocked_lock *nbl = NULL;
struct file_lock *file_lock = NULL;
struct file_lock *conflock = NULL;
+ struct super_block *sb;
__be32 status = 0;
int lkflg;
int err;
@@ -7508,6 +7575,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
dprintk("NFSD: nfsd4_lock: permission denied!\n");
return status;
}
+ sb = cstate->current_fh.fh_dentry->d_sb;
if (lock->lk_is_new) {
if (nfsd4_has_session(cstate))
@@ -7559,7 +7627,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
fp = lock_stp->st_stid.sc_file;
switch (lock->lk_type) {
case NFS4_READW_LT:
- if (nfsd4_has_session(cstate))
+ if (nfsd4_has_session(cstate) ||
+ exportfs_lock_op_is_async(sb->s_export_op))
fl_flags |= FL_SLEEP;
fallthrough;
case NFS4_READ_LT:
@@ -7571,7 +7640,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
fl_type = F_RDLCK;
break;
case NFS4_WRITEW_LT:
- if (nfsd4_has_session(cstate))
+ if (nfsd4_has_session(cstate) ||
+ exportfs_lock_op_is_async(sb->s_export_op))
fl_flags |= FL_SLEEP;
fallthrough;
case NFS4_WRITE_LT:
@@ -7599,7 +7669,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* for file locks), so don't attempt blocking lock notifications
* on those filesystems:
*/
- if (nf->nf_file->f_op->lock)
+ if (!exportfs_lock_op_is_async(sb->s_export_op))
fl_flags &= ~FL_SLEEP;
nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
@@ -7705,6 +7775,14 @@ out:
return status;
}
+void nfsd4_lock_release(union nfsd4_op_u *u)
+{
+ struct nfsd4_lock *lock = &u->lock;
+ struct nfsd4_lock_denied *deny = &lock->lk_denied;
+
+ kfree(deny->ld_owner.data);
+}
+
/*
* The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
* so we do a temporary open here just to get an open file to pass to
@@ -7810,6 +7888,14 @@ out:
return status;
}
+void nfsd4_lockt_release(union nfsd4_op_u *u)
+{
+ struct nfsd4_lockt *lockt = &u->lockt;
+ struct nfsd4_lock_denied *deny = &lockt->lt_denied;
+
+ kfree(deny->ld_owner.data);
+}
+
__be32
nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
@@ -8403,6 +8489,8 @@ nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
* nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
* @rqstp: RPC transaction context
* @inode: file to be checked for a conflict
+ * @modified: return true if file was modified
+ * @size: new size of file if modified is true
*
* This function is called when there is a conflict between a write
* delegation and a change/size GETATTR from another client. The server
@@ -8411,21 +8499,23 @@ nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
* delegation before replying to the GETATTR. See RFC 8881 section
* 18.7.4.
*
- * The current implementation does not support CB_GETATTR yet. However
- * this can avoid recalling the delegation could be added in follow up
- * work.
- *
* Returns 0 if there is no conflict; otherwise an nfs_stat
* code is returned.
*/
__be32
-nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode)
+nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode,
+ bool *modified, u64 *size)
{
- __be32 status;
struct file_lock_context *ctx;
- struct file_lock *fl;
struct nfs4_delegation *dp;
+ struct nfs4_cb_fattr *ncf;
+ struct file_lock *fl;
+ struct iattr attrs;
+ __be32 status;
+ might_sleep();
+
+ *modified = false;
ctx = locks_inode_context(inode);
if (!ctx)
return 0;
@@ -8452,10 +8542,34 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode)
break_lease:
spin_unlock(&ctx->flc_lock);
nfsd_stats_wdeleg_getattr_inc();
- status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
- if (status != nfserr_jukebox ||
- !nfsd_wait_for_delegreturn(rqstp, inode))
- return status;
+
+ dp = fl->fl_owner;
+ ncf = &dp->dl_cb_fattr;
+ nfs4_cb_getattr(&dp->dl_cb_fattr);
+ wait_on_bit(&ncf->ncf_cb_flags, CB_GETATTR_BUSY, TASK_INTERRUPTIBLE);
+ if (ncf->ncf_cb_status) {
+ status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
+ if (status != nfserr_jukebox ||
+ !nfsd_wait_for_delegreturn(rqstp, inode))
+ return status;
+ }
+ if (!ncf->ncf_file_modified &&
+ (ncf->ncf_initial_cinfo != ncf->ncf_cb_change ||
+ ncf->ncf_cur_fsize != ncf->ncf_cb_fsize))
+ ncf->ncf_file_modified = true;
+ if (ncf->ncf_file_modified) {
+ /*
+ * The server would not update the file's metadata
+ * with the client's modified size.
+ */
+ attrs.ia_mtime = attrs.ia_ctime = current_time(inode);
+ attrs.ia_valid = ATTR_MTIME | ATTR_CTIME;
+ setattr_copy(&nop_mnt_idmap, inode, &attrs);
+ mark_inode_dirty(inode);
+ ncf->ncf_cur_fsize = ncf->ncf_cb_fsize;
+ *size = ncf->ncf_cur_fsize;
+ *modified = true;
+ }
return 0;
}
break;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 92c7dde148a4..ec4ed6206df1 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2530,66 +2530,62 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
return true;
}
-static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode,
- struct svc_export *exp)
+static __be32 nfsd4_encode_nfs_fh4(struct xdr_stream *xdr,
+ struct knfsd_fh *fh_handle)
{
- if (exp->ex_flags & NFSEXP_V4ROOT) {
- *p++ = cpu_to_be32(convert_to_wallclock(exp->cd->flush_time));
- *p++ = 0;
- } else
- p = xdr_encode_hyper(p, nfsd4_change_attribute(stat, inode));
- return p;
+ return nfsd4_encode_opaque(xdr, fh_handle->fh_raw, fh_handle->fh_size);
}
+/* This is a frequently-encoded type; open-coded for speed */
static __be32 nfsd4_encode_nfstime4(struct xdr_stream *xdr,
- struct timespec64 *tv)
+ const struct timespec64 *tv)
{
__be32 *p;
p = xdr_reserve_space(xdr, XDR_UNIT * 3);
if (!p)
return nfserr_resource;
-
- p = xdr_encode_hyper(p, (s64)tv->tv_sec);
+ p = xdr_encode_hyper(p, tv->tv_sec);
*p = cpu_to_be32(tv->tv_nsec);
return nfs_ok;
}
-/*
- * ctime (in NFSv4, time_metadata) is not writeable, and the client
- * doesn't really care what resolution could theoretically be stored by
- * the filesystem.
- *
- * The client cares how close together changes can be while still
- * guaranteeing ctime changes. For most filesystems (which have
- * timestamps with nanosecond fields) that is limited by the resolution
- * of the time returned from current_time() (which I'm assuming to be
- * 1/HZ).
- */
-static __be32 *encode_time_delta(__be32 *p, struct inode *inode)
+static __be32 nfsd4_encode_specdata4(struct xdr_stream *xdr,
+ unsigned int major, unsigned int minor)
{
- struct timespec64 ts;
- u32 ns;
+ __be32 status;
- ns = max_t(u32, NSEC_PER_SEC/HZ, inode->i_sb->s_time_gran);
- ts = ns_to_timespec64(ns);
+ status = nfsd4_encode_uint32_t(xdr, major);
+ if (status != nfs_ok)
+ return status;
+ return nfsd4_encode_uint32_t(xdr, minor);
+}
- p = xdr_encode_hyper(p, ts.tv_sec);
- *p++ = cpu_to_be32(ts.tv_nsec);
+static __be32
+nfsd4_encode_change_info4(struct xdr_stream *xdr, const struct nfsd4_change_info *c)
+{
+ __be32 status;
- return p;
+ status = nfsd4_encode_bool(xdr, c->atomic);
+ if (status != nfs_ok)
+ return status;
+ status = nfsd4_encode_changeid4(xdr, c->before_change);
+ if (status != nfs_ok)
+ return status;
+ return nfsd4_encode_changeid4(xdr, c->after_change);
}
-static __be32
-nfsd4_encode_change_info4(struct xdr_stream *xdr, struct nfsd4_change_info *c)
+static __be32 nfsd4_encode_netaddr4(struct xdr_stream *xdr,
+ const struct nfs42_netaddr *addr)
{
- if (xdr_stream_encode_bool(xdr, c->atomic) < 0)
- return nfserr_resource;
- if (xdr_stream_encode_u64(xdr, c->before_change) < 0)
- return nfserr_resource;
- if (xdr_stream_encode_u64(xdr, c->after_change) < 0)
- return nfserr_resource;
- return nfs_ok;
+ __be32 status;
+
+ /* na_r_netid */
+ status = nfsd4_encode_opaque(xdr, addr->netid, addr->netid_len);
+ if (status != nfs_ok)
+ return status;
+ /* na_r_addr */
+ return nfsd4_encode_opaque(xdr, addr->addr, addr->addr_len);
}
/* Encode as an array of strings the string given with components
@@ -2661,9 +2657,6 @@ static __be32 nfsd4_encode_components(struct xdr_stream *xdr, char sep,
return nfsd4_encode_components_esc(xdr, sep, components, 0, 0);
}
-/*
- * encode a location element of a fs_locations structure
- */
static __be32 nfsd4_encode_fs_location4(struct xdr_stream *xdr,
struct nfsd4_fs_location *location)
{
@@ -2676,15 +2669,12 @@ static __be32 nfsd4_encode_fs_location4(struct xdr_stream *xdr,
status = nfsd4_encode_components(xdr, '/', location->path);
if (status)
return status;
- return 0;
+ return nfs_ok;
}
-/*
- * Encode a path in RFC3530 'pathname4' format
- */
-static __be32 nfsd4_encode_path(struct xdr_stream *xdr,
- const struct path *root,
- const struct path *path)
+static __be32 nfsd4_encode_pathname4(struct xdr_stream *xdr,
+ const struct path *root,
+ const struct path *path)
{
struct path cur = *path;
__be32 *p;
@@ -2752,89 +2742,59 @@ out_free:
return err;
}
-static __be32 nfsd4_encode_fsloc_fsroot(struct xdr_stream *xdr,
- struct svc_rqst *rqstp, const struct path *path)
+static __be32 nfsd4_encode_fs_locations4(struct xdr_stream *xdr,
+ struct svc_rqst *rqstp,
+ struct svc_export *exp)
{
+ struct nfsd4_fs_locations *fslocs = &exp->ex_fslocs;
struct svc_export *exp_ps;
- __be32 res;
+ unsigned int i;
+ __be32 status;
+ /* fs_root */
exp_ps = rqst_find_fsidzero_export(rqstp);
if (IS_ERR(exp_ps))
return nfserrno(PTR_ERR(exp_ps));
- res = nfsd4_encode_path(xdr, &exp_ps->ex_path, path);
+ status = nfsd4_encode_pathname4(xdr, &exp_ps->ex_path, &exp->ex_path);
exp_put(exp_ps);
- return res;
-}
-
-/*
- * encode a fs_locations structure
- */
-static __be32 nfsd4_encode_fs_locations(struct xdr_stream *xdr,
- struct svc_rqst *rqstp, struct svc_export *exp)
-{
- __be32 status;
- int i;
- __be32 *p;
- struct nfsd4_fs_locations *fslocs = &exp->ex_fslocs;
-
- status = nfsd4_encode_fsloc_fsroot(xdr, rqstp, &exp->ex_path);
- if (status)
+ if (status != nfs_ok)
return status;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+
+ /* locations<> */
+ if (xdr_stream_encode_u32(xdr, fslocs->locations_count) != XDR_UNIT)
return nfserr_resource;
- *p++ = cpu_to_be32(fslocs->locations_count);
- for (i=0; i<fslocs->locations_count; i++) {
+ for (i = 0; i < fslocs->locations_count; i++) {
status = nfsd4_encode_fs_location4(xdr, &fslocs->locations[i]);
- if (status)
+ if (status != nfs_ok)
return status;
}
- return 0;
-}
-static u32 nfs4_file_type(umode_t mode)
-{
- switch (mode & S_IFMT) {
- case S_IFIFO: return NF4FIFO;
- case S_IFCHR: return NF4CHR;
- case S_IFDIR: return NF4DIR;
- case S_IFBLK: return NF4BLK;
- case S_IFLNK: return NF4LNK;
- case S_IFREG: return NF4REG;
- case S_IFSOCK: return NF4SOCK;
- default: return NF4BAD;
- }
+ return nfs_ok;
}
-static inline __be32
-nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
- struct nfs4_ace *ace)
+static __be32 nfsd4_encode_nfsace4(struct xdr_stream *xdr, struct svc_rqst *rqstp,
+ struct nfs4_ace *ace)
{
+ __be32 status;
+
+ /* type */
+ status = nfsd4_encode_acetype4(xdr, ace->type);
+ if (status != nfs_ok)
+ return nfserr_resource;
+ /* flag */
+ status = nfsd4_encode_aceflag4(xdr, ace->flag);
+ if (status != nfs_ok)
+ return nfserr_resource;
+ /* access mask */
+ status = nfsd4_encode_acemask4(xdr, ace->access_mask & NFS4_ACE_MASK_ALL);
+ if (status != nfs_ok)
+ return nfserr_resource;
+ /* who */
if (ace->whotype != NFS4_ACL_WHO_NAMED)
return nfs4_acl_write_who(xdr, ace->whotype);
- else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
+ if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
return nfsd4_encode_group(xdr, rqstp, ace->who_gid);
- else
- return nfsd4_encode_user(xdr, rqstp, ace->who_uid);
-}
-
-static inline __be32
-nfsd4_encode_layout_types(struct xdr_stream *xdr, u32 layout_types)
-{
- __be32 *p;
- unsigned long i = hweight_long(layout_types);
-
- p = xdr_reserve_space(xdr, 4 + 4 * i);
- if (!p)
- return nfserr_resource;
-
- *p++ = cpu_to_be32(i);
-
- for (i = LAYOUT_NFSV4_1_FILES; i < LAYOUT_TYPE_MAX; ++i)
- if (layout_types & (1 << i))
- *p++ = cpu_to_be32(i);
-
- return 0;
+ return nfsd4_encode_user(xdr, rqstp, ace->who_uid);
}
#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
@@ -2906,12 +2866,12 @@ static int nfsd4_get_mounted_on_ino(struct svc_export *exp, u64 *pino)
}
static __be32
-nfsd4_encode_bitmap(struct xdr_stream *xdr, u32 bmval0, u32 bmval1, u32 bmval2)
+nfsd4_encode_bitmap4(struct xdr_stream *xdr, u32 bmval0, u32 bmval1, u32 bmval2)
{
__be32 *p;
if (bmval2) {
- p = xdr_reserve_space(xdr, 16);
+ p = xdr_reserve_space(xdr, XDR_UNIT * 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(3);
@@ -2919,94 +2879,687 @@ nfsd4_encode_bitmap(struct xdr_stream *xdr, u32 bmval0, u32 bmval1, u32 bmval2)
*p++ = cpu_to_be32(bmval1);
*p++ = cpu_to_be32(bmval2);
} else if (bmval1) {
- p = xdr_reserve_space(xdr, 12);
+ p = xdr_reserve_space(xdr, XDR_UNIT * 3);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(2);
*p++ = cpu_to_be32(bmval0);
*p++ = cpu_to_be32(bmval1);
} else {
- p = xdr_reserve_space(xdr, 8);
+ p = xdr_reserve_space(xdr, XDR_UNIT * 2);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
*p++ = cpu_to_be32(bmval0);
}
- return 0;
+ return nfs_ok;
out_resource:
return nfserr_resource;
}
+struct nfsd4_fattr_args {
+ struct svc_rqst *rqstp;
+ struct svc_fh *fhp;
+ struct svc_export *exp;
+ struct dentry *dentry;
+ struct kstat stat;
+ struct kstatfs statfs;
+ struct nfs4_acl *acl;
+ u64 size;
+#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+ void *context;
+ int contextlen;
+#endif
+ u32 rdattr_err;
+ bool contextsupport;
+ bool ignore_crossmnt;
+};
+
+typedef __be32(*nfsd4_enc_attr)(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args);
+
+static __be32 nfsd4_encode_fattr4__noop(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfs_ok;
+}
+
+static __be32 nfsd4_encode_fattr4__true(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_bool(xdr, true);
+}
+
+static __be32 nfsd4_encode_fattr4__false(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_bool(xdr, false);
+}
+
+static __be32 nfsd4_encode_fattr4_supported_attrs(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ struct nfsd4_compoundres *resp = args->rqstp->rq_resp;
+ u32 minorversion = resp->cstate.minorversion;
+ u32 supp[3];
+
+ memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
+ if (!IS_POSIXACL(d_inode(args->dentry)))
+ supp[0] &= ~FATTR4_WORD0_ACL;
+ if (!args->contextsupport)
+ supp[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+
+ return nfsd4_encode_bitmap4(xdr, supp[0], supp[1], supp[2]);
+}
+
+static __be32 nfsd4_encode_fattr4_type(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, XDR_UNIT);
+ if (!p)
+ return nfserr_resource;
+
+ switch (args->stat.mode & S_IFMT) {
+ case S_IFIFO:
+ *p = cpu_to_be32(NF4FIFO);
+ break;
+ case S_IFCHR:
+ *p = cpu_to_be32(NF4CHR);
+ break;
+ case S_IFDIR:
+ *p = cpu_to_be32(NF4DIR);
+ break;
+ case S_IFBLK:
+ *p = cpu_to_be32(NF4BLK);
+ break;
+ case S_IFLNK:
+ *p = cpu_to_be32(NF4LNK);
+ break;
+ case S_IFREG:
+ *p = cpu_to_be32(NF4REG);
+ break;
+ case S_IFSOCK:
+ *p = cpu_to_be32(NF4SOCK);
+ break;
+ default:
+ return nfserr_serverfault;
+ }
+
+ return nfs_ok;
+}
+
+static __be32 nfsd4_encode_fattr4_fh_expire_type(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ u32 mask;
+
+ mask = NFS4_FH_PERSISTENT;
+ if (!(args->exp->ex_flags & NFSEXP_NOSUBTREECHECK))
+ mask |= NFS4_FH_VOL_RENAME;
+ return nfsd4_encode_uint32_t(xdr, mask);
+}
+
+static __be32 nfsd4_encode_fattr4_change(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ const struct svc_export *exp = args->exp;
+ u64 c;
+
+ if (unlikely(exp->ex_flags & NFSEXP_V4ROOT)) {
+ u32 flush_time = convert_to_wallclock(exp->cd->flush_time);
+
+ if (xdr_stream_encode_u32(xdr, flush_time) != XDR_UNIT)
+ return nfserr_resource;
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
+ return nfserr_resource;
+ return nfs_ok;
+ }
+
+ c = nfsd4_change_attribute(&args->stat, d_inode(args->dentry));
+ return nfsd4_encode_changeid4(xdr, c);
+}
+
+static __be32 nfsd4_encode_fattr4_size(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, args->size);
+}
+
+static __be32 nfsd4_encode_fattr4_fsid(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, XDR_UNIT * 2 + XDR_UNIT * 2);
+ if (!p)
+ return nfserr_resource;
+
+ if (unlikely(args->exp->ex_fslocs.migrated)) {
+ p = xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MAJOR);
+ xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MINOR);
+ return nfs_ok;
+ }
+ switch (fsid_source(args->fhp)) {
+ case FSIDSOURCE_FSID:
+ p = xdr_encode_hyper(p, (u64)args->exp->ex_fsid);
+ xdr_encode_hyper(p, (u64)0);
+ break;
+ case FSIDSOURCE_DEV:
+ *p++ = xdr_zero;
+ *p++ = cpu_to_be32(MAJOR(args->stat.dev));
+ *p++ = xdr_zero;
+ *p = cpu_to_be32(MINOR(args->stat.dev));
+ break;
+ case FSIDSOURCE_UUID:
+ xdr_encode_opaque_fixed(p, args->exp->ex_uuid, EX_UUID_LEN);
+ break;
+ }
+
+ return nfs_ok;
+}
+
+static __be32 nfsd4_encode_fattr4_lease_time(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ struct nfsd_net *nn = net_generic(SVC_NET(args->rqstp), nfsd_net_id);
+
+ return nfsd4_encode_nfs_lease4(xdr, nn->nfsd4_lease);
+}
+
+static __be32 nfsd4_encode_fattr4_rdattr_error(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint32_t(xdr, args->rdattr_err);
+}
+
+static __be32 nfsd4_encode_fattr4_aclsupport(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ u32 mask;
+
+ mask = 0;
+ if (IS_POSIXACL(d_inode(args->dentry)))
+ mask = ACL4_SUPPORT_ALLOW_ACL | ACL4_SUPPORT_DENY_ACL;
+ return nfsd4_encode_uint32_t(xdr, mask);
+}
+
+static __be32 nfsd4_encode_fattr4_acl(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ struct nfs4_acl *acl = args->acl;
+ struct nfs4_ace *ace;
+ __be32 status;
+
+ /* nfsace4<> */
+ if (!acl) {
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
+ return nfserr_resource;
+ } else {
+ if (xdr_stream_encode_u32(xdr, acl->naces) != XDR_UNIT)
+ return nfserr_resource;
+ for (ace = acl->aces; ace < acl->aces + acl->naces; ace++) {
+ status = nfsd4_encode_nfsace4(xdr, args->rqstp, ace);
+ if (status != nfs_ok)
+ return status;
+ }
+ }
+ return nfs_ok;
+}
+
+static __be32 nfsd4_encode_fattr4_filehandle(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_nfs_fh4(xdr, &args->fhp->fh_handle);
+}
+
+static __be32 nfsd4_encode_fattr4_fileid(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, args->stat.ino);
+}
+
+static __be32 nfsd4_encode_fattr4_files_avail(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, args->statfs.f_ffree);
+}
+
+static __be32 nfsd4_encode_fattr4_files_free(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, args->statfs.f_ffree);
+}
+
+static __be32 nfsd4_encode_fattr4_files_total(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, args->statfs.f_files);
+}
+
+static __be32 nfsd4_encode_fattr4_fs_locations(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_fs_locations4(xdr, args->rqstp, args->exp);
+}
+
+static __be32 nfsd4_encode_fattr4_maxfilesize(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ struct super_block *sb = args->exp->ex_path.mnt->mnt_sb;
+
+ return nfsd4_encode_uint64_t(xdr, sb->s_maxbytes);
+}
+
+static __be32 nfsd4_encode_fattr4_maxlink(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint32_t(xdr, 255);
+}
+
+static __be32 nfsd4_encode_fattr4_maxname(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint32_t(xdr, args->statfs.f_namelen);
+}
+
+static __be32 nfsd4_encode_fattr4_maxread(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, svc_max_payload(args->rqstp));
+}
+
+static __be32 nfsd4_encode_fattr4_maxwrite(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, svc_max_payload(args->rqstp));
+}
+
+static __be32 nfsd4_encode_fattr4_mode(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_mode4(xdr, args->stat.mode & S_IALLUGO);
+}
+
+static __be32 nfsd4_encode_fattr4_numlinks(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint32_t(xdr, args->stat.nlink);
+}
+
+static __be32 nfsd4_encode_fattr4_owner(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_user(xdr, args->rqstp, args->stat.uid);
+}
+
+static __be32 nfsd4_encode_fattr4_owner_group(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_group(xdr, args->rqstp, args->stat.gid);
+}
+
+static __be32 nfsd4_encode_fattr4_rawdev(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_specdata4(xdr, MAJOR(args->stat.rdev),
+ MINOR(args->stat.rdev));
+}
+
+static __be32 nfsd4_encode_fattr4_space_avail(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ u64 avail = (u64)args->statfs.f_bavail * (u64)args->statfs.f_bsize;
+
+ return nfsd4_encode_uint64_t(xdr, avail);
+}
+
+static __be32 nfsd4_encode_fattr4_space_free(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ u64 free = (u64)args->statfs.f_bfree * (u64)args->statfs.f_bsize;
+
+ return nfsd4_encode_uint64_t(xdr, free);
+}
+
+static __be32 nfsd4_encode_fattr4_space_total(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ u64 total = (u64)args->statfs.f_blocks * (u64)args->statfs.f_bsize;
+
+ return nfsd4_encode_uint64_t(xdr, total);
+}
+
+static __be32 nfsd4_encode_fattr4_space_used(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, (u64)args->stat.blocks << 9);
+}
+
+static __be32 nfsd4_encode_fattr4_time_access(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_nfstime4(xdr, &args->stat.atime);
+}
+
+static __be32 nfsd4_encode_fattr4_time_create(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_nfstime4(xdr, &args->stat.btime);
+}
+
+/*
+ * ctime (in NFSv4, time_metadata) is not writeable, and the client
+ * doesn't really care what resolution could theoretically be stored by
+ * the filesystem.
+ *
+ * The client cares how close together changes can be while still
+ * guaranteeing ctime changes. For most filesystems (which have
+ * timestamps with nanosecond fields) that is limited by the resolution
+ * of the time returned from current_time() (which I'm assuming to be
+ * 1/HZ).
+ */
+static __be32 nfsd4_encode_fattr4_time_delta(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ const struct inode *inode = d_inode(args->dentry);
+ u32 ns = max_t(u32, NSEC_PER_SEC/HZ, inode->i_sb->s_time_gran);
+ struct timespec64 ts = ns_to_timespec64(ns);
+
+ return nfsd4_encode_nfstime4(xdr, &ts);
+}
+
+static __be32 nfsd4_encode_fattr4_time_metadata(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_nfstime4(xdr, &args->stat.ctime);
+}
+
+static __be32 nfsd4_encode_fattr4_time_modify(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_nfstime4(xdr, &args->stat.mtime);
+}
+
+static __be32 nfsd4_encode_fattr4_mounted_on_fileid(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ u64 ino;
+ int err;
+
+ if (!args->ignore_crossmnt &&
+ args->dentry == args->exp->ex_path.mnt->mnt_root) {
+ err = nfsd4_get_mounted_on_ino(args->exp, &ino);
+ if (err)
+ return nfserrno(err);
+ } else
+ ino = args->stat.ino;
+
+ return nfsd4_encode_uint64_t(xdr, ino);
+}
+
+#ifdef CONFIG_NFSD_PNFS
+
+static __be32 nfsd4_encode_fattr4_fs_layout_types(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ unsigned long mask = args->exp->ex_layout_types;
+ int i;
+
+ /* Hamming weight of @mask is the number of layout types to return */
+ if (xdr_stream_encode_u32(xdr, hweight_long(mask)) != XDR_UNIT)
+ return nfserr_resource;
+ for (i = LAYOUT_NFSV4_1_FILES; i < LAYOUT_TYPE_MAX; ++i)
+ if (mask & BIT(i)) {
+ /* layouttype4 */
+ if (xdr_stream_encode_u32(xdr, i) != XDR_UNIT)
+ return nfserr_resource;
+ }
+ return nfs_ok;
+}
+
+static __be32 nfsd4_encode_fattr4_layout_types(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ unsigned long mask = args->exp->ex_layout_types;
+ int i;
+
+ /* Hamming weight of @mask is the number of layout types to return */
+ if (xdr_stream_encode_u32(xdr, hweight_long(mask)) != XDR_UNIT)
+ return nfserr_resource;
+ for (i = LAYOUT_NFSV4_1_FILES; i < LAYOUT_TYPE_MAX; ++i)
+ if (mask & BIT(i)) {
+ /* layouttype4 */
+ if (xdr_stream_encode_u32(xdr, i) != XDR_UNIT)
+ return nfserr_resource;
+ }
+ return nfs_ok;
+}
+
+static __be32 nfsd4_encode_fattr4_layout_blksize(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint32_t(xdr, args->stat.blksize);
+}
+
+#endif
+
+static __be32 nfsd4_encode_fattr4_suppattr_exclcreat(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ struct nfsd4_compoundres *resp = args->rqstp->rq_resp;
+ u32 supp[3];
+
+ memcpy(supp, nfsd_suppattrs[resp->cstate.minorversion], sizeof(supp));
+ supp[0] &= NFSD_SUPPATTR_EXCLCREAT_WORD0;
+ supp[1] &= NFSD_SUPPATTR_EXCLCREAT_WORD1;
+ supp[2] &= NFSD_SUPPATTR_EXCLCREAT_WORD2;
+
+ return nfsd4_encode_bitmap4(xdr, supp[0], supp[1], supp[2]);
+}
+
+#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+static __be32 nfsd4_encode_fattr4_sec_label(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_security_label(xdr, args->rqstp,
+ args->context, args->contextlen);
+}
+#endif
+
+static __be32 nfsd4_encode_fattr4_xattr_support(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ int err = xattr_supports_user_prefix(d_inode(args->dentry));
+
+ return nfsd4_encode_bool(xdr, err == 0);
+}
+
+static const nfsd4_enc_attr nfsd4_enc_fattr4_encode_ops[] = {
+ [FATTR4_SUPPORTED_ATTRS] = nfsd4_encode_fattr4_supported_attrs,
+ [FATTR4_TYPE] = nfsd4_encode_fattr4_type,
+ [FATTR4_FH_EXPIRE_TYPE] = nfsd4_encode_fattr4_fh_expire_type,
+ [FATTR4_CHANGE] = nfsd4_encode_fattr4_change,
+ [FATTR4_SIZE] = nfsd4_encode_fattr4_size,
+ [FATTR4_LINK_SUPPORT] = nfsd4_encode_fattr4__true,
+ [FATTR4_SYMLINK_SUPPORT] = nfsd4_encode_fattr4__true,
+ [FATTR4_NAMED_ATTR] = nfsd4_encode_fattr4__false,
+ [FATTR4_FSID] = nfsd4_encode_fattr4_fsid,
+ [FATTR4_UNIQUE_HANDLES] = nfsd4_encode_fattr4__true,
+ [FATTR4_LEASE_TIME] = nfsd4_encode_fattr4_lease_time,
+ [FATTR4_RDATTR_ERROR] = nfsd4_encode_fattr4_rdattr_error,
+ [FATTR4_ACL] = nfsd4_encode_fattr4_acl,
+ [FATTR4_ACLSUPPORT] = nfsd4_encode_fattr4_aclsupport,
+ [FATTR4_ARCHIVE] = nfsd4_encode_fattr4__noop,
+ [FATTR4_CANSETTIME] = nfsd4_encode_fattr4__true,
+ [FATTR4_CASE_INSENSITIVE] = nfsd4_encode_fattr4__false,
+ [FATTR4_CASE_PRESERVING] = nfsd4_encode_fattr4__true,
+ [FATTR4_CHOWN_RESTRICTED] = nfsd4_encode_fattr4__true,
+ [FATTR4_FILEHANDLE] = nfsd4_encode_fattr4_filehandle,
+ [FATTR4_FILEID] = nfsd4_encode_fattr4_fileid,
+ [FATTR4_FILES_AVAIL] = nfsd4_encode_fattr4_files_avail,
+ [FATTR4_FILES_FREE] = nfsd4_encode_fattr4_files_free,
+ [FATTR4_FILES_TOTAL] = nfsd4_encode_fattr4_files_total,
+ [FATTR4_FS_LOCATIONS] = nfsd4_encode_fattr4_fs_locations,
+ [FATTR4_HIDDEN] = nfsd4_encode_fattr4__noop,
+ [FATTR4_HOMOGENEOUS] = nfsd4_encode_fattr4__true,
+ [FATTR4_MAXFILESIZE] = nfsd4_encode_fattr4_maxfilesize,
+ [FATTR4_MAXLINK] = nfsd4_encode_fattr4_maxlink,
+ [FATTR4_MAXNAME] = nfsd4_encode_fattr4_maxname,
+ [FATTR4_MAXREAD] = nfsd4_encode_fattr4_maxread,
+ [FATTR4_MAXWRITE] = nfsd4_encode_fattr4_maxwrite,
+ [FATTR4_MIMETYPE] = nfsd4_encode_fattr4__noop,
+ [FATTR4_MODE] = nfsd4_encode_fattr4_mode,
+ [FATTR4_NO_TRUNC] = nfsd4_encode_fattr4__true,
+ [FATTR4_NUMLINKS] = nfsd4_encode_fattr4_numlinks,
+ [FATTR4_OWNER] = nfsd4_encode_fattr4_owner,
+ [FATTR4_OWNER_GROUP] = nfsd4_encode_fattr4_owner_group,
+ [FATTR4_QUOTA_AVAIL_HARD] = nfsd4_encode_fattr4__noop,
+ [FATTR4_QUOTA_AVAIL_SOFT] = nfsd4_encode_fattr4__noop,
+ [FATTR4_QUOTA_USED] = nfsd4_encode_fattr4__noop,
+ [FATTR4_RAWDEV] = nfsd4_encode_fattr4_rawdev,
+ [FATTR4_SPACE_AVAIL] = nfsd4_encode_fattr4_space_avail,
+ [FATTR4_SPACE_FREE] = nfsd4_encode_fattr4_space_free,
+ [FATTR4_SPACE_TOTAL] = nfsd4_encode_fattr4_space_total,
+ [FATTR4_SPACE_USED] = nfsd4_encode_fattr4_space_used,
+ [FATTR4_SYSTEM] = nfsd4_encode_fattr4__noop,
+ [FATTR4_TIME_ACCESS] = nfsd4_encode_fattr4_time_access,
+ [FATTR4_TIME_ACCESS_SET] = nfsd4_encode_fattr4__noop,
+ [FATTR4_TIME_BACKUP] = nfsd4_encode_fattr4__noop,
+ [FATTR4_TIME_CREATE] = nfsd4_encode_fattr4_time_create,
+ [FATTR4_TIME_DELTA] = nfsd4_encode_fattr4_time_delta,
+ [FATTR4_TIME_METADATA] = nfsd4_encode_fattr4_time_metadata,
+ [FATTR4_TIME_MODIFY] = nfsd4_encode_fattr4_time_modify,
+ [FATTR4_TIME_MODIFY_SET] = nfsd4_encode_fattr4__noop,
+ [FATTR4_MOUNTED_ON_FILEID] = nfsd4_encode_fattr4_mounted_on_fileid,
+ [FATTR4_DIR_NOTIF_DELAY] = nfsd4_encode_fattr4__noop,
+ [FATTR4_DIRENT_NOTIF_DELAY] = nfsd4_encode_fattr4__noop,
+ [FATTR4_DACL] = nfsd4_encode_fattr4__noop,
+ [FATTR4_SACL] = nfsd4_encode_fattr4__noop,
+ [FATTR4_CHANGE_POLICY] = nfsd4_encode_fattr4__noop,
+ [FATTR4_FS_STATUS] = nfsd4_encode_fattr4__noop,
+
+#ifdef CONFIG_NFSD_PNFS
+ [FATTR4_FS_LAYOUT_TYPES] = nfsd4_encode_fattr4_fs_layout_types,
+ [FATTR4_LAYOUT_HINT] = nfsd4_encode_fattr4__noop,
+ [FATTR4_LAYOUT_TYPES] = nfsd4_encode_fattr4_layout_types,
+ [FATTR4_LAYOUT_BLKSIZE] = nfsd4_encode_fattr4_layout_blksize,
+ [FATTR4_LAYOUT_ALIGNMENT] = nfsd4_encode_fattr4__noop,
+#else
+ [FATTR4_FS_LAYOUT_TYPES] = nfsd4_encode_fattr4__noop,
+ [FATTR4_LAYOUT_HINT] = nfsd4_encode_fattr4__noop,
+ [FATTR4_LAYOUT_TYPES] = nfsd4_encode_fattr4__noop,
+ [FATTR4_LAYOUT_BLKSIZE] = nfsd4_encode_fattr4__noop,
+ [FATTR4_LAYOUT_ALIGNMENT] = nfsd4_encode_fattr4__noop,
+#endif
+
+ [FATTR4_FS_LOCATIONS_INFO] = nfsd4_encode_fattr4__noop,
+ [FATTR4_MDSTHRESHOLD] = nfsd4_encode_fattr4__noop,
+ [FATTR4_RETENTION_GET] = nfsd4_encode_fattr4__noop,
+ [FATTR4_RETENTION_SET] = nfsd4_encode_fattr4__noop,
+ [FATTR4_RETENTEVT_GET] = nfsd4_encode_fattr4__noop,
+ [FATTR4_RETENTEVT_SET] = nfsd4_encode_fattr4__noop,
+ [FATTR4_RETENTION_HOLD] = nfsd4_encode_fattr4__noop,
+ [FATTR4_MODE_SET_MASKED] = nfsd4_encode_fattr4__noop,
+ [FATTR4_SUPPATTR_EXCLCREAT] = nfsd4_encode_fattr4_suppattr_exclcreat,
+ [FATTR4_FS_CHARSET_CAP] = nfsd4_encode_fattr4__noop,
+ [FATTR4_CLONE_BLKSIZE] = nfsd4_encode_fattr4__noop,
+ [FATTR4_SPACE_FREED] = nfsd4_encode_fattr4__noop,
+ [FATTR4_CHANGE_ATTR_TYPE] = nfsd4_encode_fattr4__noop,
+
+#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+ [FATTR4_SEC_LABEL] = nfsd4_encode_fattr4_sec_label,
+#else
+ [FATTR4_SEC_LABEL] = nfsd4_encode_fattr4__noop,
+#endif
+
+ [FATTR4_MODE_UMASK] = nfsd4_encode_fattr4__noop,
+ [FATTR4_XATTR_SUPPORT] = nfsd4_encode_fattr4_xattr_support,
+};
+
/*
* Note: @fhp can be NULL; in this case, we might have to compose the filehandle
* ourselves.
*/
static __be32
-nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
- struct svc_export *exp,
- struct dentry *dentry, u32 *bmval,
- struct svc_rqst *rqstp, int ignore_crossmnt)
+nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
+ struct svc_fh *fhp, struct svc_export *exp,
+ struct dentry *dentry, const u32 *bmval,
+ int ignore_crossmnt)
{
- u32 bmval0 = bmval[0];
- u32 bmval1 = bmval[1];
- u32 bmval2 = bmval[2];
- struct kstat stat;
+ struct nfsd4_fattr_args args;
struct svc_fh *tempfh = NULL;
- struct kstatfs statfs;
- __be32 *p, *attrlen_p;
int starting_len = xdr->buf->len;
+ __be32 *attrlen_p, status;
int attrlen_offset;
- u32 dummy;
- u64 dummy64;
- u32 rdattr_err = 0;
- __be32 status;
int err;
- struct nfs4_acl *acl = NULL;
-#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- void *context = NULL;
- int contextlen;
-#endif
- bool contextsupport = false;
struct nfsd4_compoundres *resp = rqstp->rq_resp;
u32 minorversion = resp->cstate.minorversion;
struct path path = {
.mnt = exp->ex_path.mnt,
.dentry = dentry,
};
- struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ union {
+ u32 attrmask[3];
+ unsigned long mask[2];
+ } u;
+ bool file_modified;
+ unsigned long bit;
+ u64 size = 0;
+
+ WARN_ON_ONCE(bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1);
+ WARN_ON_ONCE(!nfsd_attrs_supported(minorversion, bmval));
+
+ args.rqstp = rqstp;
+ args.exp = exp;
+ args.dentry = dentry;
+ args.ignore_crossmnt = (ignore_crossmnt != 0);
- BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
- BUG_ON(!nfsd_attrs_supported(minorversion, bmval));
+ /*
+ * Make a local copy of the attribute bitmap that can be modified.
+ */
+ memset(&u, 0, sizeof(u));
+ u.attrmask[0] = bmval[0];
+ u.attrmask[1] = bmval[1];
+ u.attrmask[2] = bmval[2];
+ args.rdattr_err = 0;
if (exp->ex_fslocs.migrated) {
- status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
+ status = fattr_handle_absent_fs(&u.attrmask[0], &u.attrmask[1],
+ &u.attrmask[2], &args.rdattr_err);
if (status)
goto out;
}
- if (bmval0 & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
- status = nfsd4_deleg_getattr_conflict(rqstp, d_inode(dentry));
+ args.size = 0;
+ if (u.attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
+ status = nfsd4_deleg_getattr_conflict(rqstp, d_inode(dentry),
+ &file_modified, &size);
if (status)
goto out;
}
- err = vfs_getattr(&path, &stat,
+ err = vfs_getattr(&path, &args.stat,
STATX_BASIC_STATS | STATX_BTIME | STATX_CHANGE_COOKIE,
AT_STATX_SYNC_AS_STAT);
if (err)
goto out_nfserr;
- if (!(stat.result_mask & STATX_BTIME))
+ args.size = file_modified ? size : args.stat.size;
+
+ if (!(args.stat.result_mask & STATX_BTIME))
/* underlying FS does not offer btime so we can't share it */
- bmval1 &= ~FATTR4_WORD1_TIME_CREATE;
- if ((bmval0 & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
+ u.attrmask[1] &= ~FATTR4_WORD1_TIME_CREATE;
+ if ((u.attrmask[0] & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) ||
- (bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
+ (u.attrmask[1] & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
FATTR4_WORD1_SPACE_TOTAL))) {
- err = vfs_statfs(&path, &statfs);
+ err = vfs_statfs(&path, &args.statfs);
if (err)
goto out_nfserr;
}
- if ((bmval0 & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) && !fhp) {
+ if ((u.attrmask[0] & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) &&
+ !fhp) {
tempfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
status = nfserr_jukebox;
if (!tempfh)
@@ -3015,12 +3568,15 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
status = fh_compose(tempfh, exp, dentry, NULL);
if (status)
goto out;
- fhp = tempfh;
- }
- if (bmval0 & FATTR4_WORD0_ACL) {
- err = nfsd4_get_nfs4_acl(rqstp, dentry, &acl);
+ args.fhp = tempfh;
+ } else
+ args.fhp = fhp;
+
+ args.acl = NULL;
+ if (u.attrmask[0] & FATTR4_WORD0_ACL) {
+ err = nfsd4_get_nfs4_acl(rqstp, dentry, &args.acl);
if (err == -EOPNOTSUPP)
- bmval0 &= ~FATTR4_WORD0_ACL;
+ u.attrmask[0] &= ~FATTR4_WORD0_ACL;
else if (err == -EINVAL) {
status = nfserr_attrnotsupp;
goto out;
@@ -3028,452 +3584,53 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
goto out_nfserr;
}
+ args.contextsupport = false;
+
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
- bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
+ args.context = NULL;
+ if ((u.attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) ||
+ u.attrmask[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
if (exp->ex_flags & NFSEXP_SECURITY_LABEL)
err = security_inode_getsecctx(d_inode(dentry),
- &context, &contextlen);
+ &args.context, &args.contextlen);
else
err = -EOPNOTSUPP;
- contextsupport = (err == 0);
- if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
+ args.contextsupport = (err == 0);
+ if (u.attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) {
if (err == -EOPNOTSUPP)
- bmval2 &= ~FATTR4_WORD2_SECURITY_LABEL;
+ u.attrmask[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
else if (err)
goto out_nfserr;
}
}
#endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
- status = nfsd4_encode_bitmap(xdr, bmval0, bmval1, bmval2);
+ /* attrmask */
+ status = nfsd4_encode_bitmap4(xdr, u.attrmask[0],
+ u.attrmask[1], u.attrmask[2]);
if (status)
goto out;
+ /* attr_vals */
attrlen_offset = xdr->buf->len;
attrlen_p = xdr_reserve_space(xdr, XDR_UNIT);
if (!attrlen_p)
goto out_resource;
-
- if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
- u32 supp[3];
-
- memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
-
- if (!IS_POSIXACL(dentry->d_inode))
- supp[0] &= ~FATTR4_WORD0_ACL;
- if (!contextsupport)
- supp[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
- if (!supp[2]) {
- p = xdr_reserve_space(xdr, 12);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(2);
- *p++ = cpu_to_be32(supp[0]);
- *p++ = cpu_to_be32(supp[1]);
- } else {
- p = xdr_reserve_space(xdr, 16);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(3);
- *p++ = cpu_to_be32(supp[0]);
- *p++ = cpu_to_be32(supp[1]);
- *p++ = cpu_to_be32(supp[2]);
- }
- }
- if (bmval0 & FATTR4_WORD0_TYPE) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- dummy = nfs4_file_type(stat.mode);
- if (dummy == NF4BAD) {
- status = nfserr_serverfault;
+ for_each_set_bit(bit, (const unsigned long *)&u.mask,
+ ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops)) {
+ status = nfsd4_enc_fattr4_encode_ops[bit](xdr, &args);
+ if (status != nfs_ok)
goto out;
- }
- *p++ = cpu_to_be32(dummy);
- }
- if (bmval0 & FATTR4_WORD0_FH_EXPIRE_TYPE) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- if (exp->ex_flags & NFSEXP_NOSUBTREECHECK)
- *p++ = cpu_to_be32(NFS4_FH_PERSISTENT);
- else
- *p++ = cpu_to_be32(NFS4_FH_PERSISTENT|
- NFS4_FH_VOL_RENAME);
}
- if (bmval0 & FATTR4_WORD0_CHANGE) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = encode_change(p, &stat, d_inode(dentry), exp);
- }
- if (bmval0 & FATTR4_WORD0_SIZE) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, stat.size);
- }
- if (bmval0 & FATTR4_WORD0_LINK_SUPPORT) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval0 & FATTR4_WORD0_SYMLINK_SUPPORT) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval0 & FATTR4_WORD0_NAMED_ATTR) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(0);
- }
- if (bmval0 & FATTR4_WORD0_FSID) {
- p = xdr_reserve_space(xdr, 16);
- if (!p)
- goto out_resource;
- if (exp->ex_fslocs.migrated) {
- p = xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MAJOR);
- p = xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MINOR);
- } else switch(fsid_source(fhp)) {
- case FSIDSOURCE_FSID:
- p = xdr_encode_hyper(p, (u64)exp->ex_fsid);
- p = xdr_encode_hyper(p, (u64)0);
- break;
- case FSIDSOURCE_DEV:
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(MAJOR(stat.dev));
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(MINOR(stat.dev));
- break;
- case FSIDSOURCE_UUID:
- p = xdr_encode_opaque_fixed(p, exp->ex_uuid,
- EX_UUID_LEN);
- break;
- }
- }
- if (bmval0 & FATTR4_WORD0_UNIQUE_HANDLES) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(0);
- }
- if (bmval0 & FATTR4_WORD0_LEASE_TIME) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(nn->nfsd4_lease);
- }
- if (bmval0 & FATTR4_WORD0_RDATTR_ERROR) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(rdattr_err);
- }
- if (bmval0 & FATTR4_WORD0_ACL) {
- struct nfs4_ace *ace;
-
- if (acl == NULL) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
-
- *p++ = cpu_to_be32(0);
- goto out_acl;
- }
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(acl->naces);
-
- for (ace = acl->aces; ace < acl->aces + acl->naces; ace++) {
- p = xdr_reserve_space(xdr, 4*3);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(ace->type);
- *p++ = cpu_to_be32(ace->flag);
- *p++ = cpu_to_be32(ace->access_mask &
- NFS4_ACE_MASK_ALL);
- status = nfsd4_encode_aclname(xdr, rqstp, ace);
- if (status)
- goto out;
- }
- }
-out_acl:
- if (bmval0 & FATTR4_WORD0_ACLSUPPORT) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(IS_POSIXACL(dentry->d_inode) ?
- ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL : 0);
- }
- if (bmval0 & FATTR4_WORD0_CANSETTIME) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(0);
- }
- if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval0 & FATTR4_WORD0_CHOWN_RESTRICTED) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval0 & FATTR4_WORD0_FILEHANDLE) {
- p = xdr_reserve_space(xdr, fhp->fh_handle.fh_size + 4);
- if (!p)
- goto out_resource;
- p = xdr_encode_opaque(p, &fhp->fh_handle.fh_raw,
- fhp->fh_handle.fh_size);
- }
- if (bmval0 & FATTR4_WORD0_FILEID) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, stat.ino);
- }
- if (bmval0 & FATTR4_WORD0_FILES_AVAIL) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, (u64) statfs.f_ffree);
- }
- if (bmval0 & FATTR4_WORD0_FILES_FREE) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, (u64) statfs.f_ffree);
- }
- if (bmval0 & FATTR4_WORD0_FILES_TOTAL) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, (u64) statfs.f_files);
- }
- if (bmval0 & FATTR4_WORD0_FS_LOCATIONS) {
- status = nfsd4_encode_fs_locations(xdr, rqstp, exp);
- if (status)
- goto out;
- }
- if (bmval0 & FATTR4_WORD0_HOMOGENEOUS) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval0 & FATTR4_WORD0_MAXFILESIZE) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, exp->ex_path.mnt->mnt_sb->s_maxbytes);
- }
- if (bmval0 & FATTR4_WORD0_MAXLINK) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(255);
- }
- if (bmval0 & FATTR4_WORD0_MAXNAME) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(statfs.f_namelen);
- }
- if (bmval0 & FATTR4_WORD0_MAXREAD) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, (u64) svc_max_payload(rqstp));
- }
- if (bmval0 & FATTR4_WORD0_MAXWRITE) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, (u64) svc_max_payload(rqstp));
- }
- if (bmval1 & FATTR4_WORD1_MODE) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(stat.mode & S_IALLUGO);
- }
- if (bmval1 & FATTR4_WORD1_NO_TRUNC) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval1 & FATTR4_WORD1_NUMLINKS) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(stat.nlink);
- }
- if (bmval1 & FATTR4_WORD1_OWNER) {
- status = nfsd4_encode_user(xdr, rqstp, stat.uid);
- if (status)
- goto out;
- }
- if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
- status = nfsd4_encode_group(xdr, rqstp, stat.gid);
- if (status)
- goto out;
- }
- if (bmval1 & FATTR4_WORD1_RAWDEV) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32((u32) MAJOR(stat.rdev));
- *p++ = cpu_to_be32((u32) MINOR(stat.rdev));
- }
- if (bmval1 & FATTR4_WORD1_SPACE_AVAIL) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- dummy64 = (u64)statfs.f_bavail * (u64)statfs.f_bsize;
- p = xdr_encode_hyper(p, dummy64);
- }
- if (bmval1 & FATTR4_WORD1_SPACE_FREE) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- dummy64 = (u64)statfs.f_bfree * (u64)statfs.f_bsize;
- p = xdr_encode_hyper(p, dummy64);
- }
- if (bmval1 & FATTR4_WORD1_SPACE_TOTAL) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- dummy64 = (u64)statfs.f_blocks * (u64)statfs.f_bsize;
- p = xdr_encode_hyper(p, dummy64);
- }
- if (bmval1 & FATTR4_WORD1_SPACE_USED) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- dummy64 = (u64)stat.blocks << 9;
- p = xdr_encode_hyper(p, dummy64);
- }
- if (bmval1 & FATTR4_WORD1_TIME_ACCESS) {
- status = nfsd4_encode_nfstime4(xdr, &stat.atime);
- if (status)
- goto out;
- }
- if (bmval1 & FATTR4_WORD1_TIME_CREATE) {
- status = nfsd4_encode_nfstime4(xdr, &stat.btime);
- if (status)
- goto out;
- }
- if (bmval1 & FATTR4_WORD1_TIME_DELTA) {
- p = xdr_reserve_space(xdr, 12);
- if (!p)
- goto out_resource;
- p = encode_time_delta(p, d_inode(dentry));
- }
- if (bmval1 & FATTR4_WORD1_TIME_METADATA) {
- status = nfsd4_encode_nfstime4(xdr, &stat.ctime);
- if (status)
- goto out;
- }
- if (bmval1 & FATTR4_WORD1_TIME_MODIFY) {
- status = nfsd4_encode_nfstime4(xdr, &stat.mtime);
- if (status)
- goto out;
- }
- if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) {
- u64 ino = stat.ino;
-
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- /*
- * Get ino of mountpoint in parent filesystem, if not ignoring
- * crossmount and this is the root of a cross-mounted
- * filesystem.
- */
- if (ignore_crossmnt == 0 &&
- dentry == exp->ex_path.mnt->mnt_root) {
- err = nfsd4_get_mounted_on_ino(exp, &ino);
- if (err)
- goto out_nfserr;
- }
- p = xdr_encode_hyper(p, ino);
- }
-#ifdef CONFIG_NFSD_PNFS
- if (bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) {
- status = nfsd4_encode_layout_types(xdr, exp->ex_layout_types);
- if (status)
- goto out;
- }
-
- if (bmval2 & FATTR4_WORD2_LAYOUT_TYPES) {
- status = nfsd4_encode_layout_types(xdr, exp->ex_layout_types);
- if (status)
- goto out;
- }
-
- if (bmval2 & FATTR4_WORD2_LAYOUT_BLKSIZE) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(stat.blksize);
- }
-#endif /* CONFIG_NFSD_PNFS */
- if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
- u32 supp[3];
-
- memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
- supp[0] &= NFSD_SUPPATTR_EXCLCREAT_WORD0;
- supp[1] &= NFSD_SUPPATTR_EXCLCREAT_WORD1;
- supp[2] &= NFSD_SUPPATTR_EXCLCREAT_WORD2;
-
- status = nfsd4_encode_bitmap(xdr, supp[0], supp[1], supp[2]);
- if (status)
- goto out;
- }
-
-#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
- status = nfsd4_encode_security_label(xdr, rqstp, context,
- contextlen);
- if (status)
- goto out;
- }
-#endif
-
- if (bmval2 & FATTR4_WORD2_XATTR_SUPPORT) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- err = xattr_supports_user_prefix(d_inode(dentry));
- *p++ = cpu_to_be32(err == 0);
- }
-
*attrlen_p = cpu_to_be32(xdr->buf->len - attrlen_offset - XDR_UNIT);
status = nfs_ok;
out:
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- if (context)
- security_release_secctx(context, contextlen);
+ if (args.context)
+ security_release_secctx(args.context, args.contextlen);
#endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
- kfree(acl);
+ kfree(args.acl);
if (tempfh) {
fh_put(tempfh);
kfree(tempfh);
@@ -3514,12 +3671,28 @@ __be32 nfsd4_encode_fattr_to_buf(__be32 **p, int words,
__be32 ret;
svcxdr_init_encode_from_buffer(&xdr, &dummy, *p, words << 2);
- ret = nfsd4_encode_fattr(&xdr, fhp, exp, dentry, bmval, rqstp,
- ignore_crossmnt);
+ ret = nfsd4_encode_fattr4(rqstp, &xdr, fhp, exp, dentry, bmval,
+ ignore_crossmnt);
*p = xdr.p;
return ret;
}
+/*
+ * The buffer space for this field was reserved during a previous
+ * call to nfsd4_encode_entry4().
+ */
+static void nfsd4_encode_entry4_nfs_cookie4(const struct nfsd4_readdir *readdir,
+ u64 offset)
+{
+ __be64 cookie = cpu_to_be64(offset);
+ struct xdr_stream *xdr = readdir->xdr;
+
+ if (!readdir->cookie_offset)
+ return;
+ write_bytes_to_xdr_buf(xdr->buf, readdir->cookie_offset, &cookie,
+ sizeof(cookie));
+}
+
static inline int attributes_need_mount(u32 *bmval)
{
if (bmval[0] & ~(FATTR4_WORD0_RDATTR_ERROR | FATTR4_WORD0_LEASE_TIME))
@@ -3530,8 +3703,8 @@ static inline int attributes_need_mount(u32 *bmval)
}
static __be32
-nfsd4_encode_dirent_fattr(struct xdr_stream *xdr, struct nfsd4_readdir *cd,
- const char *name, int namlen)
+nfsd4_encode_entry4_fattr(struct nfsd4_readdir *cd, const char *name,
+ int namlen)
{
struct svc_export *exp = cd->rd_fhp->fh_export;
struct dentry *dentry;
@@ -3574,33 +3747,34 @@ nfsd4_encode_dirent_fattr(struct xdr_stream *xdr, struct nfsd4_readdir *cd,
}
out_encode:
- nfserr = nfsd4_encode_fattr(xdr, NULL, exp, dentry, cd->rd_bmval,
- cd->rd_rqstp, ignore_crossmnt);
+ nfserr = nfsd4_encode_fattr4(cd->rd_rqstp, cd->xdr, NULL, exp, dentry,
+ cd->rd_bmval, ignore_crossmnt);
out_put:
dput(dentry);
exp_put(exp);
return nfserr;
}
-static __be32 *
-nfsd4_encode_rdattr_error(struct xdr_stream *xdr, __be32 nfserr)
+static __be32
+nfsd4_encode_entry4_rdattr_error(struct xdr_stream *xdr, __be32 nfserr)
{
- __be32 *p;
-
- p = xdr_reserve_space(xdr, 20);
- if (!p)
- return NULL;
- *p++ = htonl(2);
- *p++ = htonl(FATTR4_WORD0_RDATTR_ERROR); /* bmval0 */
- *p++ = htonl(0); /* bmval1 */
+ __be32 status;
- *p++ = htonl(4); /* attribute length */
- *p++ = nfserr; /* no htonl */
- return p;
+ /* attrmask */
+ status = nfsd4_encode_bitmap4(xdr, FATTR4_WORD0_RDATTR_ERROR, 0, 0);
+ if (status != nfs_ok)
+ return status;
+ /* attr_vals */
+ if (xdr_stream_encode_u32(xdr, XDR_UNIT) != XDR_UNIT)
+ return nfserr_resource;
+ /* rdattr_error */
+ if (xdr_stream_encode_be32(xdr, nfserr) != XDR_UNIT)
+ return nfserr_resource;
+ return nfs_ok;
}
static int
-nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
+nfsd4_encode_entry4(void *ccdv, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
struct readdir_cd *ccd = ccdv;
@@ -3611,8 +3785,6 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
u32 name_and_cookie;
int entry_bytes;
__be32 nfserr = nfserr_toosmall;
- __be64 wire_offset;
- __be32 *p;
/* In nfsv4, "." and ".." never make it onto the wire.. */
if (name && isdotent(name, namlen)) {
@@ -3620,24 +3792,19 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
return 0;
}
- if (cd->cookie_offset) {
- wire_offset = cpu_to_be64(offset);
- write_bytes_to_xdr_buf(xdr->buf, cd->cookie_offset,
- &wire_offset, 8);
- }
+ /* Encode the previous entry's cookie value */
+ nfsd4_encode_entry4_nfs_cookie4(cd, offset);
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+ if (xdr_stream_encode_item_present(xdr) != XDR_UNIT)
goto fail;
- *p++ = xdr_one; /* mark entry present */
+
+ /* Reserve send buffer space for this entry's cookie value. */
cookie_offset = xdr->buf->len;
- p = xdr_reserve_space(xdr, 3*4 + namlen);
- if (!p)
+ if (nfsd4_encode_nfs_cookie4(xdr, OFFSET_MAX) != nfs_ok)
goto fail;
- p = xdr_encode_hyper(p, OFFSET_MAX); /* offset of next entry */
- p = xdr_encode_array(p, name, namlen); /* name length & name */
-
- nfserr = nfsd4_encode_dirent_fattr(xdr, cd, name, namlen);
+ if (nfsd4_encode_component4(xdr, name, namlen) != nfs_ok)
+ goto fail;
+ nfserr = nfsd4_encode_entry4_fattr(cd, name, namlen);
switch (nfserr) {
case nfs_ok:
break;
@@ -3668,8 +3835,7 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
*/
if (!(cd->rd_bmval[0] & FATTR4_WORD0_RDATTR_ERROR))
goto fail;
- p = nfsd4_encode_rdattr_error(xdr, nfserr);
- if (p == NULL) {
+ if (nfsd4_encode_entry4_rdattr_error(xdr, nfserr)) {
nfserr = nfserr_toosmall;
goto fail;
}
@@ -3727,18 +3893,26 @@ nfsd4_encode_clientid4(struct xdr_stream *xdr, const clientid_t *clientid)
return nfs_ok;
}
+/* This is a frequently-encoded item; open-coded for speed */
static __be32
-nfsd4_encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
+nfsd4_encode_stateid4(struct xdr_stream *xdr, const stateid_t *sid)
{
__be32 *p;
- p = xdr_reserve_space(xdr, sizeof(stateid_t));
+ p = xdr_reserve_space(xdr, NFS4_STATEID_SIZE);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(sid->si_generation);
- p = xdr_encode_opaque_fixed(p, &sid->si_opaque,
- sizeof(stateid_opaque_t));
- return 0;
+ memcpy(p, &sid->si_opaque, sizeof(sid->si_opaque));
+ return nfs_ok;
+}
+
+static __be32
+nfsd4_encode_sessionid4(struct xdr_stream *xdr,
+ const struct nfs4_sessionid *sessionid)
+{
+ return nfsd4_encode_opaque_fixed(xdr, sessionid->data,
+ NFS4_MAX_SESSIONID_LEN);
}
static __be32
@@ -3747,14 +3921,14 @@ nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr,
{
struct nfsd4_access *access = &u->access;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
+ __be32 status;
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(access->ac_supported);
- *p++ = cpu_to_be32(access->ac_resp_access);
- return 0;
+ /* supported */
+ status = nfsd4_encode_uint32_t(xdr, access->ac_supported);
+ if (status != nfs_ok)
+ return status;
+ /* access */
+ return nfsd4_encode_uint32_t(xdr, access->ac_resp_access);
}
static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr,
@@ -3762,17 +3936,16 @@ static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp,
{
struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 8);
- if (!p)
+ /* bctsr_sessid */
+ nfserr = nfsd4_encode_sessionid4(xdr, &bcts->sessionid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* bctsr_dir */
+ if (xdr_stream_encode_u32(xdr, bcts->dir) != XDR_UNIT)
return nfserr_resource;
- p = xdr_encode_opaque_fixed(p, bcts->sessionid.data,
- NFS4_MAX_SESSIONID_LEN);
- *p++ = cpu_to_be32(bcts->dir);
- /* Upshifting from TCP to RDMA is not supported */
- *p++ = cpu_to_be32(0);
- return 0;
+ /* bctsr_use_conn_in_rdma_mode */
+ return nfsd4_encode_bool(xdr, false);
}
static __be32
@@ -3782,7 +3955,8 @@ nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_close *close = &u->close;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_encode_stateid(xdr, &close->cl_stateid);
+ /* open_stateid */
+ return nfsd4_encode_stateid4(xdr, &close->cl_stateid);
}
@@ -3802,11 +3976,13 @@ nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_create *create = &u->create;
struct xdr_stream *xdr = resp->xdr;
+ /* cinfo */
nfserr = nfsd4_encode_change_info4(xdr, &create->cr_cinfo);
if (nfserr)
return nfserr;
- return nfsd4_encode_bitmap(xdr, create->cr_bmval[0],
- create->cr_bmval[1], create->cr_bmval[2]);
+ /* attrset */
+ return nfsd4_encode_bitmap4(xdr, create->cr_bmval[0],
+ create->cr_bmval[1], create->cr_bmval[2]);
}
static __be32
@@ -3817,65 +3993,56 @@ nfsd4_encode_getattr(struct nfsd4_compoundres *resp, __be32 nfserr,
struct svc_fh *fhp = getattr->ga_fhp;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_encode_fattr(xdr, fhp, fhp->fh_export, fhp->fh_dentry,
- getattr->ga_bmval, resp->rqstp, 0);
+ /* obj_attributes */
+ return nfsd4_encode_fattr4(resp->rqstp, xdr, fhp, fhp->fh_export,
+ fhp->fh_dentry, getattr->ga_bmval, 0);
}
static __be32
nfsd4_encode_getfh(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
- struct svc_fh **fhpp = &u->getfh;
struct xdr_stream *xdr = resp->xdr;
- struct svc_fh *fhp = *fhpp;
- unsigned int len;
- __be32 *p;
+ struct svc_fh *fhp = u->getfh;
- len = fhp->fh_handle.fh_size;
- p = xdr_reserve_space(xdr, len + 4);
- if (!p)
- return nfserr_resource;
- p = xdr_encode_opaque(p, &fhp->fh_handle.fh_raw, len);
- return 0;
+ /* object */
+ return nfsd4_encode_nfs_fh4(xdr, &fhp->fh_handle);
}
-/*
-* Including all fields other than the name, a LOCK4denied structure requires
-* 8(clientid) + 4(namelen) + 8(offset) + 8(length) + 4(type) = 32 bytes.
-*/
static __be32
-nfsd4_encode_lock_denied(struct xdr_stream *xdr, struct nfsd4_lock_denied *ld)
+nfsd4_encode_lock_owner4(struct xdr_stream *xdr, const clientid_t *clientid,
+ const struct xdr_netobj *owner)
{
- struct xdr_netobj *conf = &ld->ld_owner;
- __be32 *p;
+ __be32 status;
-again:
- p = xdr_reserve_space(xdr, 32 + XDR_LEN(conf->len));
- if (!p) {
- /*
- * Don't fail to return the result just because we can't
- * return the conflicting open:
- */
- if (conf->len) {
- kfree(conf->data);
- conf->len = 0;
- conf->data = NULL;
- goto again;
- }
+ /* clientid */
+ status = nfsd4_encode_clientid4(xdr, clientid);
+ if (status != nfs_ok)
+ return status;
+ /* owner */
+ return nfsd4_encode_opaque(xdr, owner->data, owner->len);
+}
+
+static __be32
+nfsd4_encode_lock4denied(struct xdr_stream *xdr,
+ const struct nfsd4_lock_denied *ld)
+{
+ __be32 status;
+
+ /* offset */
+ status = nfsd4_encode_offset4(xdr, ld->ld_start);
+ if (status != nfs_ok)
+ return status;
+ /* length */
+ status = nfsd4_encode_length4(xdr, ld->ld_length);
+ if (status != nfs_ok)
+ return status;
+ /* locktype */
+ if (xdr_stream_encode_u32(xdr, ld->ld_type) != XDR_UNIT)
return nfserr_resource;
- }
- p = xdr_encode_hyper(p, ld->ld_start);
- p = xdr_encode_hyper(p, ld->ld_length);
- *p++ = cpu_to_be32(ld->ld_type);
- if (conf->len) {
- p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
- p = xdr_encode_opaque(p, conf->data, conf->len);
- kfree(conf->data);
- } else { /* non - nfsv4 lock in conflict, no clientid nor owner */
- p = xdr_encode_hyper(p, (u64)0); /* clientid */
- *p++ = cpu_to_be32(0); /* length of owner name */
- }
- return nfserr_denied;
+ /* owner */
+ return nfsd4_encode_lock_owner4(xdr, &ld->ld_clientid,
+ &ld->ld_owner);
}
static __be32
@@ -3884,13 +4051,21 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr,
{
struct nfsd4_lock *lock = &u->lock;
struct xdr_stream *xdr = resp->xdr;
+ __be32 status;
- if (!nfserr)
- nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid);
- else if (nfserr == nfserr_denied)
- nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied);
-
- return nfserr;
+ switch (nfserr) {
+ case nfs_ok:
+ /* resok4 */
+ status = nfsd4_encode_stateid4(xdr, &lock->lk_resp_stateid);
+ break;
+ case nfserr_denied:
+ /* denied */
+ status = nfsd4_encode_lock4denied(xdr, &lock->lk_denied);
+ break;
+ default:
+ return nfserr;
+ }
+ return status != nfs_ok ? status : nfserr;
}
static __be32
@@ -3899,9 +4074,14 @@ nfsd4_encode_lockt(struct nfsd4_compoundres *resp, __be32 nfserr,
{
struct nfsd4_lockt *lockt = &u->lockt;
struct xdr_stream *xdr = resp->xdr;
+ __be32 status;
- if (nfserr == nfserr_denied)
- nfsd4_encode_lock_denied(xdr, &lockt->lt_denied);
+ if (nfserr == nfserr_denied) {
+ /* denied */
+ status = nfsd4_encode_lock4denied(xdr, &lockt->lt_denied);
+ if (status != nfs_ok)
+ return status;
+ }
return nfserr;
}
@@ -3912,7 +4092,8 @@ nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_locku *locku = &u->locku;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_encode_stateid(xdr, &locku->lu_stateid);
+ /* lock_stateid */
+ return nfsd4_encode_stateid4(xdr, &locku->lu_stateid);
}
@@ -3926,104 +4107,159 @@ nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr,
return nfsd4_encode_change_info4(xdr, &link->li_cinfo);
}
+/*
+ * This implementation does not yet support returning an ACE in an
+ * OPEN that offers a delegation.
+ */
+static __be32
+nfsd4_encode_open_nfsace4(struct xdr_stream *xdr)
+{
+ __be32 status;
+
+ /* type */
+ status = nfsd4_encode_acetype4(xdr, NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
+ if (status != nfs_ok)
+ return nfserr_resource;
+ /* flag */
+ status = nfsd4_encode_aceflag4(xdr, 0);
+ if (status != nfs_ok)
+ return nfserr_resource;
+ /* access mask */
+ status = nfsd4_encode_acemask4(xdr, 0);
+ if (status != nfs_ok)
+ return nfserr_resource;
+ /* who - empty for now */
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
+ return nfserr_resource;
+ return nfs_ok;
+}
static __be32
-nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr,
- union nfsd4_op_u *u)
+nfsd4_encode_open_read_delegation4(struct xdr_stream *xdr, struct nfsd4_open *open)
{
- struct nfsd4_open *open = &u->open;
- struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
+ __be32 status;
- nfserr = nfsd4_encode_stateid(xdr, &open->op_stateid);
- if (nfserr)
- return nfserr;
- nfserr = nfsd4_encode_change_info4(xdr, &open->op_cinfo);
- if (nfserr)
- return nfserr;
- if (xdr_stream_encode_u32(xdr, open->op_rflags) < 0)
+ /* stateid */
+ status = nfsd4_encode_stateid4(xdr, &open->op_delegate_stateid);
+ if (status != nfs_ok)
+ return status;
+ /* recall */
+ status = nfsd4_encode_bool(xdr, open->op_recall);
+ if (status != nfs_ok)
+ return status;
+ /* permissions */
+ return nfsd4_encode_open_nfsace4(xdr);
+}
+
+static __be32
+nfsd4_encode_nfs_space_limit4(struct xdr_stream *xdr, u64 filesize)
+{
+ /* limitby */
+ if (xdr_stream_encode_u32(xdr, NFS4_LIMIT_SIZE) != XDR_UNIT)
return nfserr_resource;
+ /* filesize */
+ return nfsd4_encode_uint64_t(xdr, filesize);
+}
- nfserr = nfsd4_encode_bitmap(xdr, open->op_bmval[0], open->op_bmval[1],
- open->op_bmval[2]);
- if (nfserr)
- return nfserr;
+static __be32
+nfsd4_encode_open_write_delegation4(struct xdr_stream *xdr,
+ struct nfsd4_open *open)
+{
+ __be32 status;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+ /* stateid */
+ status = nfsd4_encode_stateid4(xdr, &open->op_delegate_stateid);
+ if (status != nfs_ok)
+ return status;
+ /* recall */
+ status = nfsd4_encode_bool(xdr, open->op_recall);
+ if (status != nfs_ok)
+ return status;
+ /* space_limit */
+ status = nfsd4_encode_nfs_space_limit4(xdr, 0);
+ if (status != nfs_ok)
+ return status;
+ return nfsd4_encode_open_nfsace4(xdr);
+}
+
+static __be32
+nfsd4_encode_open_none_delegation4(struct xdr_stream *xdr,
+ struct nfsd4_open *open)
+{
+ __be32 status = nfs_ok;
+
+ /* ond_why */
+ if (xdr_stream_encode_u32(xdr, open->op_why_no_deleg) != XDR_UNIT)
return nfserr_resource;
+ switch (open->op_why_no_deleg) {
+ case WND4_CONTENTION:
+ /* ond_server_will_push_deleg */
+ status = nfsd4_encode_bool(xdr, false);
+ break;
+ case WND4_RESOURCE:
+ /* ond_server_will_signal_avail */
+ status = nfsd4_encode_bool(xdr, false);
+ }
+ return status;
+}
+
+static __be32
+nfsd4_encode_open_delegation4(struct xdr_stream *xdr, struct nfsd4_open *open)
+{
+ __be32 status;
- *p++ = cpu_to_be32(open->op_delegate_type);
+ /* delegation_type */
+ if (xdr_stream_encode_u32(xdr, open->op_delegate_type) != XDR_UNIT)
+ return nfserr_resource;
switch (open->op_delegate_type) {
case NFS4_OPEN_DELEGATE_NONE:
+ status = nfs_ok;
break;
case NFS4_OPEN_DELEGATE_READ:
- nfserr = nfsd4_encode_stateid(xdr, &open->op_delegate_stateid);
- if (nfserr)
- return nfserr;
- p = xdr_reserve_space(xdr, 20);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(open->op_recall);
-
- /*
- * TODO: ACE's in delegations
- */
- *p++ = cpu_to_be32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0); /* XXX: is NULL principal ok? */
+ /* read */
+ status = nfsd4_encode_open_read_delegation4(xdr, open);
break;
case NFS4_OPEN_DELEGATE_WRITE:
- nfserr = nfsd4_encode_stateid(xdr, &open->op_delegate_stateid);
- if (nfserr)
- return nfserr;
-
- p = xdr_reserve_space(xdr, XDR_UNIT * 8);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(open->op_recall);
-
- /*
- * Always flush on close
- *
- * TODO: space_limit's in delegations
- */
- *p++ = cpu_to_be32(NFS4_LIMIT_SIZE);
- *p++ = xdr_zero;
- *p++ = xdr_zero;
-
- /*
- * TODO: ACE's in delegations
- */
- *p++ = cpu_to_be32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0); /* XXX: is NULL principal ok? */
+ /* write */
+ status = nfsd4_encode_open_write_delegation4(xdr, open);
break;
- case NFS4_OPEN_DELEGATE_NONE_EXT: /* 4.1 */
- switch (open->op_why_no_deleg) {
- case WND4_CONTENTION:
- case WND4_RESOURCE:
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(open->op_why_no_deleg);
- /* deleg signaling not supported yet: */
- *p++ = cpu_to_be32(0);
- break;
- default:
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(open->op_why_no_deleg);
- }
+ case NFS4_OPEN_DELEGATE_NONE_EXT:
+ /* od_whynone */
+ status = nfsd4_encode_open_none_delegation4(xdr, open);
break;
default:
- BUG();
+ status = nfserr_serverfault;
}
- /* XXX save filehandle here */
- return 0;
+
+ return status;
+}
+
+static __be32
+nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_open *open = &u->open;
+ struct xdr_stream *xdr = resp->xdr;
+
+ /* stateid */
+ nfserr = nfsd4_encode_stateid4(xdr, &open->op_stateid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* cinfo */
+ nfserr = nfsd4_encode_change_info4(xdr, &open->op_cinfo);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* rflags */
+ nfserr = nfsd4_encode_uint32_t(xdr, open->op_rflags);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* attrset */
+ nfserr = nfsd4_encode_bitmap4(xdr, open->op_bmval[0],
+ open->op_bmval[1], open->op_bmval[2]);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* delegation */
+ return nfsd4_encode_open_delegation4(xdr, open);
}
static __be32
@@ -4033,7 +4269,8 @@ nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_open_confirm *oc = &u->open_confirm;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_encode_stateid(xdr, &oc->oc_resp_stateid);
+ /* open_stateid */
+ return nfsd4_encode_stateid4(xdr, &oc->oc_resp_stateid);
}
static __be32
@@ -4043,7 +4280,8 @@ nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_open_downgrade *od = &u->open_downgrade;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_encode_stateid(xdr, &od->od_stateid);
+ /* open_stateid */
+ return nfsd4_encode_stateid4(xdr, &od->od_stateid);
}
/*
@@ -4227,90 +4465,83 @@ out_err:
return nfserr;
}
-static __be32
-nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr,
- union nfsd4_op_u *u)
+static __be32 nfsd4_encode_dirlist4(struct xdr_stream *xdr,
+ struct nfsd4_readdir *readdir,
+ u32 max_payload)
{
- struct nfsd4_readdir *readdir = &u->readdir;
- int maxcount;
- int bytes_left;
+ int bytes_left, maxcount, starting_len = xdr->buf->len;
loff_t offset;
- __be64 wire_offset;
- struct xdr_stream *xdr = resp->xdr;
- int starting_len = xdr->buf->len;
- __be32 *p;
-
- nfserr = nfsd4_encode_verifier4(xdr, &readdir->rd_verf);
- if (nfserr != nfs_ok)
- return nfserr;
+ __be32 status;
/*
* Number of bytes left for directory entries allowing for the
- * final 8 bytes of the readdir and a following failed op:
+ * final 8 bytes of the readdir and a following failed op.
*/
- bytes_left = xdr->buf->buflen - xdr->buf->len
- - COMPOUND_ERR_SLACK_SPACE - 8;
- if (bytes_left < 0) {
- nfserr = nfserr_resource;
- goto err_no_verf;
- }
- maxcount = svc_max_payload(resp->rqstp);
- maxcount = min_t(u32, readdir->rd_maxcount, maxcount);
+ bytes_left = xdr->buf->buflen - xdr->buf->len -
+ COMPOUND_ERR_SLACK_SPACE - XDR_UNIT * 2;
+ if (bytes_left < 0)
+ return nfserr_resource;
+ maxcount = min_t(u32, readdir->rd_maxcount, max_payload);
+
/*
- * Note the rfc defines rd_maxcount as the size of the
- * READDIR4resok structure, which includes the verifier above
- * and the 8 bytes encoded at the end of this function:
+ * The RFC defines rd_maxcount as the size of the
+ * READDIR4resok structure, which includes the verifier
+ * and the 8 bytes encoded at the end of this function.
*/
- if (maxcount < 16) {
- nfserr = nfserr_toosmall;
- goto err_no_verf;
- }
- maxcount = min_t(int, maxcount-16, bytes_left);
+ if (maxcount < XDR_UNIT * 4)
+ return nfserr_toosmall;
+ maxcount = min_t(int, maxcount - XDR_UNIT * 4, bytes_left);
- /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */
+ /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0 */
if (!readdir->rd_dircount)
- readdir->rd_dircount = svc_max_payload(resp->rqstp);
+ readdir->rd_dircount = max_payload;
+ /* *entries */
readdir->xdr = xdr;
readdir->rd_maxcount = maxcount;
readdir->common.err = 0;
readdir->cookie_offset = 0;
-
offset = readdir->rd_cookie;
- nfserr = nfsd_readdir(readdir->rd_rqstp, readdir->rd_fhp,
- &offset,
- &readdir->common, nfsd4_encode_dirent);
- if (nfserr == nfs_ok &&
- readdir->common.err == nfserr_toosmall &&
- xdr->buf->len == starting_len + 8) {
- /* nothing encoded; which limit did we hit?: */
- if (maxcount - 16 < bytes_left)
- /* It was the fault of rd_maxcount: */
- nfserr = nfserr_toosmall;
- else
- /* We ran out of buffer space: */
- nfserr = nfserr_resource;
+ status = nfsd_readdir(readdir->rd_rqstp, readdir->rd_fhp, &offset,
+ &readdir->common, nfsd4_encode_entry4);
+ if (status)
+ return status;
+ if (readdir->common.err == nfserr_toosmall &&
+ xdr->buf->len == starting_len) {
+ /* No entries were encoded. Which limit did we hit? */
+ if (maxcount - XDR_UNIT * 4 < bytes_left)
+ /* It was the fault of rd_maxcount */
+ return nfserr_toosmall;
+ /* We ran out of buffer space */
+ return nfserr_resource;
}
- if (nfserr)
- goto err_no_verf;
+ /* Encode the final entry's cookie value */
+ nfsd4_encode_entry4_nfs_cookie4(readdir, offset);
+ /* No entries follow */
+ if (xdr_stream_encode_item_absent(xdr) != XDR_UNIT)
+ return nfserr_resource;
- if (readdir->cookie_offset) {
- wire_offset = cpu_to_be64(offset);
- write_bytes_to_xdr_buf(xdr->buf, readdir->cookie_offset,
- &wire_offset, 8);
- }
+ /* eof */
+ return nfsd4_encode_bool(xdr, readdir->common.err == nfserr_eof);
+}
- p = xdr_reserve_space(xdr, 8);
- if (!p) {
- WARN_ON_ONCE(1);
- goto err_no_verf;
- }
- *p++ = 0; /* no more entries */
- *p++ = htonl(readdir->common.err == nfserr_eof);
+static __be32
+nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_readdir *readdir = &u->readdir;
+ struct xdr_stream *xdr = resp->xdr;
+ int starting_len = xdr->buf->len;
- return 0;
-err_no_verf:
- xdr_truncate_encode(xdr, starting_len);
+ /* cookieverf */
+ nfserr = nfsd4_encode_verifier4(xdr, &readdir->rd_verf);
+ if (nfserr != nfs_ok)
+ return nfserr;
+
+ /* reply */
+ nfserr = nfsd4_encode_dirlist4(xdr, readdir, svc_max_payload(resp->rqstp));
+ if (nfserr != nfs_ok)
+ xdr_truncate_encode(xdr, starting_len);
return nfserr;
}
@@ -4338,13 +4569,34 @@ nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr,
}
static __be32
+nfsd4_encode_rpcsec_gss_info(struct xdr_stream *xdr,
+ struct rpcsec_gss_info *info)
+{
+ __be32 status;
+
+ /* oid */
+ if (xdr_stream_encode_opaque(xdr, info->oid.data, info->oid.len) < 0)
+ return nfserr_resource;
+ /* qop */
+ status = nfsd4_encode_qop4(xdr, info->qop);
+ if (status != nfs_ok)
+ return status;
+ /* service */
+ if (xdr_stream_encode_u32(xdr, info->service) != XDR_UNIT)
+ return nfserr_resource;
+
+ return nfs_ok;
+}
+
+static __be32
nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp)
{
u32 i, nflavs, supported;
struct exp_flavor_info *flavs;
struct exp_flavor_info def_flavs[2];
- __be32 *p, *flavorsp;
static bool report = true;
+ __be32 *flavorsp;
+ __be32 status;
if (exp->ex_nflavors) {
flavs = exp->ex_flavors;
@@ -4367,10 +4619,9 @@ nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp)
}
supported = 0;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+ flavorsp = xdr_reserve_space(xdr, XDR_UNIT);
+ if (!flavorsp)
return nfserr_resource;
- flavorsp = p++; /* to be backfilled later */
for (i = 0; i < nflavs; i++) {
rpc_authflavor_t pf = flavs[i].pseudoflavor;
@@ -4378,20 +4629,22 @@ nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp)
if (rpcauth_get_gssinfo(pf, &info) == 0) {
supported++;
- p = xdr_reserve_space(xdr, 4 + 4 +
- XDR_LEN(info.oid.len) + 4 + 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(RPC_AUTH_GSS);
- p = xdr_encode_opaque(p, info.oid.data, info.oid.len);
- *p++ = cpu_to_be32(info.qop);
- *p++ = cpu_to_be32(info.service);
+
+ /* flavor */
+ status = nfsd4_encode_uint32_t(xdr, RPC_AUTH_GSS);
+ if (status != nfs_ok)
+ return status;
+ /* flavor_info */
+ status = nfsd4_encode_rpcsec_gss_info(xdr, &info);
+ if (status != nfs_ok)
+ return status;
} else if (pf < RPC_AUTH_MAXFLAVOR) {
supported++;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(pf);
+
+ /* flavor */
+ status = nfsd4_encode_uint32_t(xdr, pf);
+ if (status != nfs_ok)
+ return status;
} else {
if (report)
pr_warn("NFS: SECINFO: security flavor %u "
@@ -4401,7 +4654,7 @@ nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp)
if (nflavs != supported)
report = false;
- *flavorsp = htonl(supported);
+ *flavorsp = cpu_to_be32(supported);
return 0;
}
@@ -4425,34 +4678,25 @@ nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr,
return nfsd4_do_encode_secinfo(xdr, secinfo->sin_exp);
}
-/*
- * The SETATTR encode routine is special -- it always encodes a bitmap,
- * regardless of the error status.
- */
static __be32
nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_setattr *setattr = &u->setattr;
- struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
+ __be32 status;
- p = xdr_reserve_space(xdr, 16);
- if (!p)
- return nfserr_resource;
- if (nfserr) {
- *p++ = cpu_to_be32(3);
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0);
- }
- else {
- *p++ = cpu_to_be32(3);
- *p++ = cpu_to_be32(setattr->sa_bmval[0]);
- *p++ = cpu_to_be32(setattr->sa_bmval[1]);
- *p++ = cpu_to_be32(setattr->sa_bmval[2]);
+ switch (nfserr) {
+ case nfs_ok:
+ /* attrsset */
+ status = nfsd4_encode_bitmap4(resp->xdr, setattr->sa_bmval[0],
+ setattr->sa_bmval[1],
+ setattr->sa_bmval[2]);
+ break;
+ default:
+ /* attrsset */
+ status = nfsd4_encode_bitmap4(resp->xdr, 0, 0, 0);
}
- return nfserr;
+ return status != nfs_ok ? status : nfserr;
}
static __be32
@@ -4488,86 +4732,148 @@ nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_write *write = &u->write;
+ struct xdr_stream *xdr = resp->xdr;
- if (xdr_stream_encode_u32(resp->xdr, write->wr_bytes_written) < 0)
- return nfserr_resource;
- if (xdr_stream_encode_u32(resp->xdr, write->wr_how_written) < 0)
+ /* count */
+ nfserr = nfsd4_encode_count4(xdr, write->wr_bytes_written);
+ if (nfserr)
+ return nfserr;
+ /* committed */
+ if (xdr_stream_encode_u32(xdr, write->wr_how_written) != XDR_UNIT)
return nfserr_resource;
- return nfsd4_encode_verifier4(resp->xdr, &write->wr_verifier);
+ /* writeverf */
+ return nfsd4_encode_verifier4(xdr, &write->wr_verifier);
}
static __be32
-nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
- union nfsd4_op_u *u)
+nfsd4_encode_state_protect_ops4(struct xdr_stream *xdr,
+ struct nfsd4_exchange_id *exid)
{
- struct nfsd4_exchange_id *exid = &u->exchange_id;
- struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- char *major_id;
- char *server_scope;
- int major_id_sz;
- int server_scope_sz;
- uint64_t minor_id = 0;
- struct nfsd_net *nn = net_generic(SVC_NET(resp->rqstp), nfsd_net_id);
+ __be32 status;
- major_id = nn->nfsd_name;
- major_id_sz = strlen(nn->nfsd_name);
- server_scope = nn->nfsd_name;
- server_scope_sz = strlen(nn->nfsd_name);
+ /* spo_must_enforce */
+ status = nfsd4_encode_bitmap4(xdr, exid->spo_must_enforce[0],
+ exid->spo_must_enforce[1],
+ exid->spo_must_enforce[2]);
+ if (status != nfs_ok)
+ return status;
+ /* spo_must_allow */
+ return nfsd4_encode_bitmap4(xdr, exid->spo_must_allow[0],
+ exid->spo_must_allow[1],
+ exid->spo_must_allow[2]);
+}
- if (nfsd4_encode_clientid4(xdr, &exid->clientid) != nfs_ok)
- return nfserr_resource;
- if (xdr_stream_encode_u32(xdr, exid->seqid) < 0)
- return nfserr_resource;
- if (xdr_stream_encode_u32(xdr, exid->flags) < 0)
- return nfserr_resource;
+static __be32
+nfsd4_encode_state_protect4_r(struct xdr_stream *xdr, struct nfsd4_exchange_id *exid)
+{
+ __be32 status;
- if (xdr_stream_encode_u32(xdr, exid->spa_how) < 0)
+ if (xdr_stream_encode_u32(xdr, exid->spa_how) != XDR_UNIT)
return nfserr_resource;
switch (exid->spa_how) {
case SP4_NONE:
+ status = nfs_ok;
break;
case SP4_MACH_CRED:
- /* spo_must_enforce bitmap: */
- nfserr = nfsd4_encode_bitmap(xdr,
- exid->spo_must_enforce[0],
- exid->spo_must_enforce[1],
- exid->spo_must_enforce[2]);
- if (nfserr)
- return nfserr;
- /* spo_must_allow bitmap: */
- nfserr = nfsd4_encode_bitmap(xdr,
- exid->spo_must_allow[0],
- exid->spo_must_allow[1],
- exid->spo_must_allow[2]);
- if (nfserr)
- return nfserr;
+ /* spr_mach_ops */
+ status = nfsd4_encode_state_protect_ops4(xdr, exid);
break;
default:
- WARN_ON_ONCE(1);
+ status = nfserr_serverfault;
}
+ return status;
+}
- p = xdr_reserve_space(xdr,
- 8 /* so_minor_id */ +
- 4 /* so_major_id.len */ +
- (XDR_QUADLEN(major_id_sz) * 4) +
- 4 /* eir_server_scope.len */ +
- (XDR_QUADLEN(server_scope_sz) * 4) +
- 4 /* eir_server_impl_id.count (0) */);
- if (!p)
+static __be32
+nfsd4_encode_server_owner4(struct xdr_stream *xdr, struct svc_rqst *rqstp)
+{
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ __be32 status;
+
+ /* so_minor_id */
+ status = nfsd4_encode_uint64_t(xdr, 0);
+ if (status != nfs_ok)
+ return status;
+ /* so_major_id */
+ return nfsd4_encode_opaque(xdr, nn->nfsd_name, strlen(nn->nfsd_name));
+}
+
+static __be32
+nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
+{
+ struct nfsd_net *nn = net_generic(SVC_NET(resp->rqstp), nfsd_net_id);
+ struct nfsd4_exchange_id *exid = &u->exchange_id;
+ struct xdr_stream *xdr = resp->xdr;
+
+ /* eir_clientid */
+ nfserr = nfsd4_encode_clientid4(xdr, &exid->clientid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* eir_sequenceid */
+ nfserr = nfsd4_encode_sequenceid4(xdr, exid->seqid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* eir_flags */
+ nfserr = nfsd4_encode_uint32_t(xdr, exid->flags);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* eir_state_protect */
+ nfserr = nfsd4_encode_state_protect4_r(xdr, exid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* eir_server_owner */
+ nfserr = nfsd4_encode_server_owner4(xdr, resp->rqstp);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* eir_server_scope */
+ nfserr = nfsd4_encode_opaque(xdr, nn->nfsd_name,
+ strlen(nn->nfsd_name));
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* eir_server_impl_id<1> */
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
return nfserr_resource;
- /* The server_owner struct */
- p = xdr_encode_hyper(p, minor_id); /* Minor id */
- /* major id */
- p = xdr_encode_opaque(p, major_id, major_id_sz);
+ return nfs_ok;
+}
- /* Server scope */
- p = xdr_encode_opaque(p, server_scope, server_scope_sz);
+static __be32
+nfsd4_encode_channel_attrs4(struct xdr_stream *xdr,
+ const struct nfsd4_channel_attrs *attrs)
+{
+ __be32 status;
- /* Implementation id */
- *p++ = cpu_to_be32(0); /* zero length nfs_impl_id4 array */
- return 0;
+ /* ca_headerpadsize */
+ status = nfsd4_encode_count4(xdr, 0);
+ if (status != nfs_ok)
+ return status;
+ /* ca_maxrequestsize */
+ status = nfsd4_encode_count4(xdr, attrs->maxreq_sz);
+ if (status != nfs_ok)
+ return status;
+ /* ca_maxresponsesize */
+ status = nfsd4_encode_count4(xdr, attrs->maxresp_sz);
+ if (status != nfs_ok)
+ return status;
+ /* ca_maxresponsesize_cached */
+ status = nfsd4_encode_count4(xdr, attrs->maxresp_cached);
+ if (status != nfs_ok)
+ return status;
+ /* ca_maxoperations */
+ status = nfsd4_encode_count4(xdr, attrs->maxops);
+ if (status != nfs_ok)
+ return status;
+ /* ca_maxrequests */
+ status = nfsd4_encode_count4(xdr, attrs->maxreqs);
+ if (status != nfs_ok)
+ return status;
+ /* ca_rdma_ird<1> */
+ if (xdr_stream_encode_u32(xdr, attrs->nr_rdma_attrs) != XDR_UNIT)
+ return nfserr_resource;
+ if (attrs->nr_rdma_attrs)
+ return nfsd4_encode_uint32_t(xdr, attrs->rdma_attrs);
+ return nfs_ok;
}
static __be32
@@ -4576,52 +4882,25 @@ nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
{
struct nfsd4_create_session *sess = &u->create_session;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
-
- p = xdr_reserve_space(xdr, 24);
- if (!p)
- return nfserr_resource;
- p = xdr_encode_opaque_fixed(p, sess->sessionid.data,
- NFS4_MAX_SESSIONID_LEN);
- *p++ = cpu_to_be32(sess->seqid);
- *p++ = cpu_to_be32(sess->flags);
- p = xdr_reserve_space(xdr, 28);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(0); /* headerpadsz */
- *p++ = cpu_to_be32(sess->fore_channel.maxreq_sz);
- *p++ = cpu_to_be32(sess->fore_channel.maxresp_sz);
- *p++ = cpu_to_be32(sess->fore_channel.maxresp_cached);
- *p++ = cpu_to_be32(sess->fore_channel.maxops);
- *p++ = cpu_to_be32(sess->fore_channel.maxreqs);
- *p++ = cpu_to_be32(sess->fore_channel.nr_rdma_attrs);
-
- if (sess->fore_channel.nr_rdma_attrs) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(sess->fore_channel.rdma_attrs);
- }
-
- p = xdr_reserve_space(xdr, 28);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(0); /* headerpadsz */
- *p++ = cpu_to_be32(sess->back_channel.maxreq_sz);
- *p++ = cpu_to_be32(sess->back_channel.maxresp_sz);
- *p++ = cpu_to_be32(sess->back_channel.maxresp_cached);
- *p++ = cpu_to_be32(sess->back_channel.maxops);
- *p++ = cpu_to_be32(sess->back_channel.maxreqs);
- *p++ = cpu_to_be32(sess->back_channel.nr_rdma_attrs);
-
- if (sess->back_channel.nr_rdma_attrs) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(sess->back_channel.rdma_attrs);
- }
- return 0;
+ /* csr_sessionid */
+ nfserr = nfsd4_encode_sessionid4(xdr, &sess->sessionid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* csr_sequence */
+ nfserr = nfsd4_encode_sequenceid4(xdr, sess->seqid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* csr_flags */
+ nfserr = nfsd4_encode_uint32_t(xdr, sess->flags);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* csr_fore_chan_attrs */
+ nfserr = nfsd4_encode_channel_attrs4(xdr, &sess->fore_channel);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* csr_back_chan_attrs */
+ return nfsd4_encode_channel_attrs4(xdr, &sess->back_channel);
}
static __be32
@@ -4630,22 +4909,35 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
{
struct nfsd4_sequence *seq = &u->sequence;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 20);
- if (!p)
- return nfserr_resource;
- p = xdr_encode_opaque_fixed(p, seq->sessionid.data,
- NFS4_MAX_SESSIONID_LEN);
- *p++ = cpu_to_be32(seq->seqid);
- *p++ = cpu_to_be32(seq->slotid);
+ /* sr_sessionid */
+ nfserr = nfsd4_encode_sessionid4(xdr, &seq->sessionid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* sr_sequenceid */
+ nfserr = nfsd4_encode_sequenceid4(xdr, seq->seqid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* sr_slotid */
+ nfserr = nfsd4_encode_slotid4(xdr, seq->slotid);
+ if (nfserr != nfs_ok)
+ return nfserr;
/* Note slotid's are numbered from zero: */
- *p++ = cpu_to_be32(seq->maxslots - 1); /* sr_highest_slotid */
- *p++ = cpu_to_be32(seq->maxslots - 1); /* sr_target_highest_slotid */
- *p++ = cpu_to_be32(seq->status_flags);
+ /* sr_highest_slotid */
+ nfserr = nfsd4_encode_slotid4(xdr, seq->maxslots - 1);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* sr_target_highest_slotid */
+ nfserr = nfsd4_encode_slotid4(xdr, seq->maxslots - 1);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* sr_status_flags */
+ nfserr = nfsd4_encode_uint32_t(xdr, seq->status_flags);
+ if (nfserr != nfs_ok)
+ return nfserr;
resp->cstate.data_offset = xdr->buf->len; /* DRC cache data pointer */
- return 0;
+ return nfs_ok;
}
static __be32
@@ -4653,125 +4945,132 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
- struct xdr_stream *xdr = resp->xdr;
struct nfsd4_test_stateid_id *stateid, *next;
- __be32 *p;
+ struct xdr_stream *xdr = resp->xdr;
- p = xdr_reserve_space(xdr, 4 + (4 * test_stateid->ts_num_ids));
- if (!p)
+ /* tsr_status_codes<> */
+ if (xdr_stream_encode_u32(xdr, test_stateid->ts_num_ids) != XDR_UNIT)
return nfserr_resource;
- *p++ = htonl(test_stateid->ts_num_ids);
-
- list_for_each_entry_safe(stateid, next, &test_stateid->ts_stateid_list, ts_id_list) {
- *p++ = stateid->ts_id_status;
+ list_for_each_entry_safe(stateid, next,
+ &test_stateid->ts_stateid_list, ts_id_list) {
+ if (xdr_stream_encode_be32(xdr, stateid->ts_id_status) != XDR_UNIT)
+ return nfserr_resource;
}
-
- return 0;
+ return nfs_ok;
}
#ifdef CONFIG_NFSD_PNFS
static __be32
-nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
- union nfsd4_op_u *u)
+nfsd4_encode_device_addr4(struct xdr_stream *xdr,
+ const struct nfsd4_getdeviceinfo *gdev)
{
- struct nfsd4_getdeviceinfo *gdev = &u->getdeviceinfo;
- struct xdr_stream *xdr = resp->xdr;
+ u32 needed_len, starting_len = xdr->buf->len;
const struct nfsd4_layout_ops *ops;
- u32 starting_len = xdr->buf->len, needed_len;
- __be32 *p;
+ __be32 status;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+ /* da_layout_type */
+ if (xdr_stream_encode_u32(xdr, gdev->gd_layout_type) != XDR_UNIT)
return nfserr_resource;
-
- *p++ = cpu_to_be32(gdev->gd_layout_type);
-
+ /* da_addr_body */
ops = nfsd4_layout_ops[gdev->gd_layout_type];
- nfserr = ops->encode_getdeviceinfo(xdr, gdev);
- if (nfserr) {
+ status = ops->encode_getdeviceinfo(xdr, gdev);
+ if (status != nfs_ok) {
/*
- * We don't bother to burden the layout drivers with
- * enforcing gd_maxcount, just tell the client to
- * come back with a bigger buffer if it's not enough.
+ * Don't burden the layout drivers with enforcing
+ * gd_maxcount. Just tell the client to come back
+ * with a bigger buffer if it's not enough.
*/
- if (xdr->buf->len + 4 > gdev->gd_maxcount)
+ if (xdr->buf->len + XDR_UNIT > gdev->gd_maxcount)
goto toosmall;
- return nfserr;
+ return status;
}
- if (gdev->gd_notify_types) {
- p = xdr_reserve_space(xdr, 4 + 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(1); /* bitmap length */
- *p++ = cpu_to_be32(gdev->gd_notify_types);
- } else {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = 0;
- }
+ return nfs_ok;
- return 0;
toosmall:
- dprintk("%s: maxcount too small\n", __func__);
- needed_len = xdr->buf->len + 4 /* notifications */;
+ needed_len = xdr->buf->len + XDR_UNIT; /* notifications */
xdr_truncate_encode(xdr, starting_len);
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(needed_len);
+
+ status = nfsd4_encode_count4(xdr, needed_len);
+ if (status != nfs_ok)
+ return status;
return nfserr_toosmall;
}
static __be32
-nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
+nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
- struct nfsd4_layoutget *lgp = &u->layoutget;
+ struct nfsd4_getdeviceinfo *gdev = &u->getdeviceinfo;
struct xdr_stream *xdr = resp->xdr;
- const struct nfsd4_layout_ops *ops;
- __be32 *p;
-
- p = xdr_reserve_space(xdr, 36 + sizeof(stateid_opaque_t));
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(1); /* we always set return-on-close */
- *p++ = cpu_to_be32(lgp->lg_sid.si_generation);
- p = xdr_encode_opaque_fixed(p, &lgp->lg_sid.si_opaque,
- sizeof(stateid_opaque_t));
+ /* gdir_device_addr */
+ nfserr = nfsd4_encode_device_addr4(xdr, gdev);
+ if (nfserr)
+ return nfserr;
+ /* gdir_notification */
+ return nfsd4_encode_bitmap4(xdr, gdev->gd_notify_types, 0, 0);
+}
- *p++ = cpu_to_be32(1); /* we always return a single layout */
- p = xdr_encode_hyper(p, lgp->lg_seg.offset);
- p = xdr_encode_hyper(p, lgp->lg_seg.length);
- *p++ = cpu_to_be32(lgp->lg_seg.iomode);
- *p++ = cpu_to_be32(lgp->lg_layout_type);
+static __be32
+nfsd4_encode_layout4(struct xdr_stream *xdr, const struct nfsd4_layoutget *lgp)
+{
+ const struct nfsd4_layout_ops *ops = nfsd4_layout_ops[lgp->lg_layout_type];
+ __be32 status;
- ops = nfsd4_layout_ops[lgp->lg_layout_type];
+ /* lo_offset */
+ status = nfsd4_encode_offset4(xdr, lgp->lg_seg.offset);
+ if (status != nfs_ok)
+ return status;
+ /* lo_length */
+ status = nfsd4_encode_length4(xdr, lgp->lg_seg.length);
+ if (status != nfs_ok)
+ return status;
+ /* lo_iomode */
+ if (xdr_stream_encode_u32(xdr, lgp->lg_seg.iomode) != XDR_UNIT)
+ return nfserr_resource;
+ /* lo_content */
+ if (xdr_stream_encode_u32(xdr, lgp->lg_layout_type) != XDR_UNIT)
+ return nfserr_resource;
return ops->encode_layoutget(xdr, lgp);
}
static __be32
+nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_layoutget *lgp = &u->layoutget;
+ struct xdr_stream *xdr = resp->xdr;
+
+ /* logr_return_on_close */
+ nfserr = nfsd4_encode_bool(xdr, true);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* logr_stateid */
+ nfserr = nfsd4_encode_stateid4(xdr, &lgp->lg_sid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* logr_layout<> */
+ if (xdr_stream_encode_u32(xdr, 1) != XDR_UNIT)
+ return nfserr_resource;
+ return nfsd4_encode_layout4(xdr, lgp);
+}
+
+static __be32
nfsd4_encode_layoutcommit(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_layoutcommit *lcp = &u->layoutcommit;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(lcp->lc_size_chg);
- if (lcp->lc_size_chg) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- return nfserr_resource;
- p = xdr_encode_hyper(p, lcp->lc_newsize);
- }
-
- return 0;
+ /* ns_sizechanged */
+ nfserr = nfsd4_encode_bool(xdr, lcp->lc_size_chg);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ if (lcp->lc_size_chg)
+ /* ns_size */
+ return nfsd4_encode_length4(xdr, lcp->lc_newsize);
+ return nfs_ok;
}
static __be32
@@ -4780,103 +5079,108 @@ nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
{
struct nfsd4_layoutreturn *lrp = &u->layoutreturn;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(lrp->lrs_present);
+ /* lrs_present */
+ nfserr = nfsd4_encode_bool(xdr, lrp->lrs_present);
+ if (nfserr != nfs_ok)
+ return nfserr;
if (lrp->lrs_present)
- return nfsd4_encode_stateid(xdr, &lrp->lr_sid);
- return 0;
+ /* lrs_stateid */
+ return nfsd4_encode_stateid4(xdr, &lrp->lr_sid);
+ return nfs_ok;
}
#endif /* CONFIG_NFSD_PNFS */
static __be32
-nfsd42_encode_write_res(struct nfsd4_compoundres *resp,
- struct nfsd42_write_res *write, bool sync)
+nfsd4_encode_write_response4(struct xdr_stream *xdr,
+ const struct nfsd4_copy *copy)
{
- __be32 *p;
- p = xdr_reserve_space(resp->xdr, 4);
- if (!p)
- return nfserr_resource;
+ const struct nfsd42_write_res *write = &copy->cp_res;
+ u32 count = nfsd4_copy_is_sync(copy) ? 0 : 1;
+ __be32 status;
- if (sync)
- *p++ = cpu_to_be32(0);
- else {
- __be32 nfserr;
- *p++ = cpu_to_be32(1);
- nfserr = nfsd4_encode_stateid(resp->xdr, &write->cb_stateid);
- if (nfserr)
- return nfserr;
+ /* wr_callback_id<1> */
+ if (xdr_stream_encode_u32(xdr, count) != XDR_UNIT)
+ return nfserr_resource;
+ if (count) {
+ status = nfsd4_encode_stateid4(xdr, &write->cb_stateid);
+ if (status != nfs_ok)
+ return status;
}
- p = xdr_reserve_space(resp->xdr, 8 + 4 + NFS4_VERIFIER_SIZE);
- if (!p)
+
+ /* wr_count */
+ status = nfsd4_encode_length4(xdr, write->wr_bytes_written);
+ if (status != nfs_ok)
+ return status;
+ /* wr_committed */
+ if (xdr_stream_encode_u32(xdr, write->wr_stable_how) != XDR_UNIT)
return nfserr_resource;
+ /* wr_writeverf */
+ return nfsd4_encode_verifier4(xdr, &write->wr_verifier);
+}
- p = xdr_encode_hyper(p, write->wr_bytes_written);
- *p++ = cpu_to_be32(write->wr_stable_how);
- p = xdr_encode_opaque_fixed(p, write->wr_verifier.data,
- NFS4_VERIFIER_SIZE);
- return nfs_ok;
+static __be32 nfsd4_encode_copy_requirements4(struct xdr_stream *xdr,
+ const struct nfsd4_copy *copy)
+{
+ __be32 status;
+
+ /* cr_consecutive */
+ status = nfsd4_encode_bool(xdr, true);
+ if (status != nfs_ok)
+ return status;
+ /* cr_synchronous */
+ return nfsd4_encode_bool(xdr, nfsd4_copy_is_sync(copy));
}
static __be32
-nfsd42_encode_nl4_server(struct nfsd4_compoundres *resp, struct nl4_server *ns)
+nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
- struct xdr_stream *xdr = resp->xdr;
- struct nfs42_netaddr *addr;
- __be32 *p;
+ struct nfsd4_copy *copy = &u->copy;
- p = xdr_reserve_space(xdr, 4);
- *p++ = cpu_to_be32(ns->nl4_type);
+ nfserr = nfsd4_encode_write_response4(resp->xdr, copy);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ return nfsd4_encode_copy_requirements4(resp->xdr, copy);
+}
+static __be32
+nfsd4_encode_netloc4(struct xdr_stream *xdr, const struct nl4_server *ns)
+{
+ __be32 status;
+
+ if (xdr_stream_encode_u32(xdr, ns->nl4_type) != XDR_UNIT)
+ return nfserr_resource;
switch (ns->nl4_type) {
case NL4_NETADDR:
- addr = &ns->u.nl4_addr;
-
- /* netid_len, netid, uaddr_len, uaddr (port included
- * in RPCBIND_MAXUADDRLEN)
- */
- p = xdr_reserve_space(xdr,
- 4 /* netid len */ +
- (XDR_QUADLEN(addr->netid_len) * 4) +
- 4 /* uaddr len */ +
- (XDR_QUADLEN(addr->addr_len) * 4));
- if (!p)
- return nfserr_resource;
-
- *p++ = cpu_to_be32(addr->netid_len);
- p = xdr_encode_opaque_fixed(p, addr->netid,
- addr->netid_len);
- *p++ = cpu_to_be32(addr->addr_len);
- p = xdr_encode_opaque_fixed(p, addr->addr,
- addr->addr_len);
+ /* nl_addr */
+ status = nfsd4_encode_netaddr4(xdr, &ns->u.nl4_addr);
break;
default:
- WARN_ON_ONCE(ns->nl4_type != NL4_NETADDR);
- return nfserr_inval;
+ status = nfserr_serverfault;
}
-
- return 0;
+ return status;
}
static __be32
-nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
- union nfsd4_op_u *u)
+nfsd4_encode_copy_notify(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
- struct nfsd4_copy *copy = &u->copy;
- __be32 *p;
+ struct nfsd4_copy_notify *cn = &u->copy_notify;
+ struct xdr_stream *xdr = resp->xdr;
- nfserr = nfsd42_encode_write_res(resp, &copy->cp_res,
- nfsd4_copy_is_sync(copy));
+ /* cnr_lease_time */
+ nfserr = nfsd4_encode_nfstime4(xdr, &cn->cpn_lease_time);
if (nfserr)
return nfserr;
-
- p = xdr_reserve_space(resp->xdr, 4 + 4);
- *p++ = xdr_one; /* cr_consecutive */
- *p = nfsd4_copy_is_sync(copy) ? xdr_one : xdr_zero;
- return 0;
+ /* cnr_stateid */
+ nfserr = nfsd4_encode_stateid4(xdr, &cn->cpn_cnr_stateid);
+ if (nfserr)
+ return nfserr;
+ /* cnr_source_server<> */
+ if (xdr_stream_encode_u32(xdr, 1) != XDR_UNIT)
+ return nfserr_resource;
+ return nfsd4_encode_netloc4(xdr, cn->cpn_src);
}
static __be32
@@ -4885,14 +5189,15 @@ nfsd4_encode_offload_status(struct nfsd4_compoundres *resp, __be32 nfserr,
{
struct nfsd4_offload_status *os = &u->offload_status;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, 8 + 4);
- if (!p)
+ /* osr_count */
+ nfserr = nfsd4_encode_length4(xdr, os->count);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* osr_complete<1> */
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
return nfserr_resource;
- p = xdr_encode_hyper(p, os->count);
- *p++ = cpu_to_be32(0);
- return nfserr;
+ return nfs_ok;
}
static __be32
@@ -4970,53 +5275,18 @@ out:
}
static __be32
-nfsd4_encode_copy_notify(struct nfsd4_compoundres *resp, __be32 nfserr,
- union nfsd4_op_u *u)
-{
- struct nfsd4_copy_notify *cn = &u->copy_notify;
- struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
-
- if (nfserr)
- return nfserr;
-
- /* 8 sec, 4 nsec */
- p = xdr_reserve_space(xdr, 12);
- if (!p)
- return nfserr_resource;
-
- /* cnr_lease_time */
- p = xdr_encode_hyper(p, cn->cpn_sec);
- *p++ = cpu_to_be32(cn->cpn_nsec);
-
- /* cnr_stateid */
- nfserr = nfsd4_encode_stateid(xdr, &cn->cpn_cnr_stateid);
- if (nfserr)
- return nfserr;
-
- /* cnr_src.nl_nsvr */
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
-
- *p++ = cpu_to_be32(1);
-
- nfserr = nfsd42_encode_nl4_server(resp, cn->cpn_src);
- return nfserr;
-}
-
-static __be32
nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_seek *seek = &u->seek;
- __be32 *p;
-
- p = xdr_reserve_space(resp->xdr, 4 + 8);
- *p++ = cpu_to_be32(seek->seek_eof);
- p = xdr_encode_hyper(p, seek->seek_pos);
+ struct xdr_stream *xdr = resp->xdr;
- return 0;
+ /* sr_eof */
+ nfserr = nfsd4_encode_bool(xdr, seek->seek_eof);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* sr_offset */
+ return nfsd4_encode_offset4(xdr, seek->seek_pos);
}
static __be32
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 7ed02fb88a36..3e15b72f421d 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -26,6 +26,7 @@
#include "pnfs.h"
#include "filecache.h"
#include "trace.h"
+#include "netlink.h"
/*
* We have a single directory with several nodes in it.
@@ -1132,7 +1133,7 @@ static struct inode *nfsd_get_inode(struct super_block *sb, umode_t mode)
/* Following advice from simple_fill_super documentation: */
inode->i_ino = iunique(sb, NFSD_MaxReserved);
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
switch (mode & S_IFMT) {
case S_IFDIR:
inode->i_fop = &simple_dir_operations;
@@ -1496,6 +1497,203 @@ static int create_proc_exports_entry(void)
unsigned int nfsd_net_id;
/**
+ * nfsd_nl_rpc_status_get_start - Prepare rpc_status_get dumpit
+ * @cb: netlink metadata and command arguments
+ *
+ * Return values:
+ * %0: The rpc_status_get command may proceed
+ * %-ENODEV: There is no NFSD running in this namespace
+ */
+int nfsd_nl_rpc_status_get_start(struct netlink_callback *cb)
+{
+ struct nfsd_net *nn = net_generic(sock_net(cb->skb->sk), nfsd_net_id);
+ int ret = -ENODEV;
+
+ mutex_lock(&nfsd_mutex);
+ if (nn->nfsd_serv) {
+ svc_get(nn->nfsd_serv);
+ ret = 0;
+ }
+ mutex_unlock(&nfsd_mutex);
+
+ return ret;
+}
+
+static int nfsd_genl_rpc_status_compose_msg(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct nfsd_genl_rqstp *rqstp)
+{
+ void *hdr;
+ u32 i;
+
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &nfsd_nl_family, 0, NFSD_CMD_RPC_STATUS_GET);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put_be32(skb, NFSD_A_RPC_STATUS_XID, rqstp->rq_xid) ||
+ nla_put_u32(skb, NFSD_A_RPC_STATUS_FLAGS, rqstp->rq_flags) ||
+ nla_put_u32(skb, NFSD_A_RPC_STATUS_PROG, rqstp->rq_prog) ||
+ nla_put_u32(skb, NFSD_A_RPC_STATUS_PROC, rqstp->rq_proc) ||
+ nla_put_u8(skb, NFSD_A_RPC_STATUS_VERSION, rqstp->rq_vers) ||
+ nla_put_s64(skb, NFSD_A_RPC_STATUS_SERVICE_TIME,
+ ktime_to_us(rqstp->rq_stime),
+ NFSD_A_RPC_STATUS_PAD))
+ return -ENOBUFS;
+
+ switch (rqstp->rq_saddr.sa_family) {
+ case AF_INET: {
+ const struct sockaddr_in *s_in, *d_in;
+
+ s_in = (const struct sockaddr_in *)&rqstp->rq_saddr;
+ d_in = (const struct sockaddr_in *)&rqstp->rq_daddr;
+ if (nla_put_in_addr(skb, NFSD_A_RPC_STATUS_SADDR4,
+ s_in->sin_addr.s_addr) ||
+ nla_put_in_addr(skb, NFSD_A_RPC_STATUS_DADDR4,
+ d_in->sin_addr.s_addr) ||
+ nla_put_be16(skb, NFSD_A_RPC_STATUS_SPORT,
+ s_in->sin_port) ||
+ nla_put_be16(skb, NFSD_A_RPC_STATUS_DPORT,
+ d_in->sin_port))
+ return -ENOBUFS;
+ break;
+ }
+ case AF_INET6: {
+ const struct sockaddr_in6 *s_in, *d_in;
+
+ s_in = (const struct sockaddr_in6 *)&rqstp->rq_saddr;
+ d_in = (const struct sockaddr_in6 *)&rqstp->rq_daddr;
+ if (nla_put_in6_addr(skb, NFSD_A_RPC_STATUS_SADDR6,
+ &s_in->sin6_addr) ||
+ nla_put_in6_addr(skb, NFSD_A_RPC_STATUS_DADDR6,
+ &d_in->sin6_addr) ||
+ nla_put_be16(skb, NFSD_A_RPC_STATUS_SPORT,
+ s_in->sin6_port) ||
+ nla_put_be16(skb, NFSD_A_RPC_STATUS_DPORT,
+ d_in->sin6_port))
+ return -ENOBUFS;
+ break;
+ }
+ }
+
+ for (i = 0; i < rqstp->rq_opcnt; i++)
+ if (nla_put_u32(skb, NFSD_A_RPC_STATUS_COMPOUND_OPS,
+ rqstp->rq_opnum[i]))
+ return -ENOBUFS;
+
+ genlmsg_end(skb, hdr);
+ return 0;
+}
+
+/**
+ * nfsd_nl_rpc_status_get_dumpit - Handle rpc_status_get dumpit
+ * @skb: reply buffer
+ * @cb: netlink metadata and command arguments
+ *
+ * Returns the size of the reply or a negative errno.
+ */
+int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct nfsd_net *nn = net_generic(sock_net(skb->sk), nfsd_net_id);
+ int i, ret, rqstp_index = 0;
+
+ rcu_read_lock();
+
+ for (i = 0; i < nn->nfsd_serv->sv_nrpools; i++) {
+ struct svc_rqst *rqstp;
+
+ if (i < cb->args[0]) /* already consumed */
+ continue;
+
+ rqstp_index = 0;
+ list_for_each_entry_rcu(rqstp,
+ &nn->nfsd_serv->sv_pools[i].sp_all_threads,
+ rq_all) {
+ struct nfsd_genl_rqstp genl_rqstp;
+ unsigned int status_counter;
+
+ if (rqstp_index++ < cb->args[1]) /* already consumed */
+ continue;
+ /*
+ * Acquire rq_status_counter before parsing the rqst
+ * fields. rq_status_counter is set to an odd value in
+ * order to notify the consumers the rqstp fields are
+ * meaningful.
+ */
+ status_counter =
+ smp_load_acquire(&rqstp->rq_status_counter);
+ if (!(status_counter & 1))
+ continue;
+
+ genl_rqstp.rq_xid = rqstp->rq_xid;
+ genl_rqstp.rq_flags = rqstp->rq_flags;
+ genl_rqstp.rq_vers = rqstp->rq_vers;
+ genl_rqstp.rq_prog = rqstp->rq_prog;
+ genl_rqstp.rq_proc = rqstp->rq_proc;
+ genl_rqstp.rq_stime = rqstp->rq_stime;
+ genl_rqstp.rq_opcnt = 0;
+ memcpy(&genl_rqstp.rq_daddr, svc_daddr(rqstp),
+ sizeof(struct sockaddr));
+ memcpy(&genl_rqstp.rq_saddr, svc_addr(rqstp),
+ sizeof(struct sockaddr));
+
+#ifdef CONFIG_NFSD_V4
+ if (rqstp->rq_vers == NFS4_VERSION &&
+ rqstp->rq_proc == NFSPROC4_COMPOUND) {
+ /* NFSv4 compound */
+ struct nfsd4_compoundargs *args;
+ int j;
+
+ args = rqstp->rq_argp;
+ genl_rqstp.rq_opcnt = args->opcnt;
+ for (j = 0; j < genl_rqstp.rq_opcnt; j++)
+ genl_rqstp.rq_opnum[j] =
+ args->ops[j].opnum;
+ }
+#endif /* CONFIG_NFSD_V4 */
+
+ /*
+ * Acquire rq_status_counter before reporting the rqst
+ * fields to the user.
+ */
+ if (smp_load_acquire(&rqstp->rq_status_counter) !=
+ status_counter)
+ continue;
+
+ ret = nfsd_genl_rpc_status_compose_msg(skb, cb,
+ &genl_rqstp);
+ if (ret)
+ goto out;
+ }
+ }
+
+ cb->args[0] = i;
+ cb->args[1] = rqstp_index;
+ ret = skb->len;
+out:
+ rcu_read_unlock();
+
+ return ret;
+}
+
+/**
+ * nfsd_nl_rpc_status_get_done - rpc_status_get dumpit post-processing
+ * @cb: netlink metadata and command arguments
+ *
+ * Return values:
+ * %0: Success
+ */
+int nfsd_nl_rpc_status_get_done(struct netlink_callback *cb)
+{
+ mutex_lock(&nfsd_mutex);
+ nfsd_put(sock_net(cb->skb->sk));
+ mutex_unlock(&nfsd_mutex);
+
+ return 0;
+}
+
+/**
* nfsd_net_init - Prepare the nfsd_net portion of a new net namespace
* @net: a freshly-created network namespace
*
@@ -1589,6 +1787,10 @@ static int __init init_nfsd(void)
retval = register_filesystem(&nfsd_fs_type);
if (retval)
goto out_free_all;
+ retval = genl_register_family(&nfsd_nl_family);
+ if (retval)
+ goto out_free_all;
+
return 0;
out_free_all:
nfsd4_destroy_laundry_wq();
@@ -1613,6 +1815,7 @@ out_free_slabs:
static void __exit exit_nfsd(void)
{
+ genl_unregister_family(&nfsd_nl_family);
unregister_filesystem(&nfsd_fs_type);
nfsd4_destroy_laundry_wq();
unregister_cld_notifier();
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 11c14faa6c67..f5ff42f41ee7 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -62,6 +62,23 @@ struct readdir_cd {
__be32 err; /* 0, nfserr, or nfserr_eof */
};
+/* Maximum number of operations per session compound */
+#define NFSD_MAX_OPS_PER_COMPOUND 50
+
+struct nfsd_genl_rqstp {
+ struct sockaddr rq_daddr;
+ struct sockaddr rq_saddr;
+ unsigned long rq_flags;
+ ktime_t rq_stime;
+ __be32 rq_xid;
+ u32 rq_vers;
+ u32 rq_prog;
+ u32 rq_proc;
+
+ /* NFSv4 compound */
+ u32 rq_opcnt;
+ u32 rq_opnum[NFSD_MAX_OPS_PER_COMPOUND];
+};
extern struct svc_program nfsd_program;
extern const struct svc_version nfsd_version2, nfsd_version3, nfsd_version4;
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 355bf0db3235..dbfa0ac13564 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -771,7 +771,7 @@ enum fsid_source fsid_source(const struct svc_fh *fhp)
* assume that the new change attr is always logged to stable storage in some
* fashion before the results can be seen.
*/
-u64 nfsd4_change_attribute(struct kstat *stat, struct inode *inode)
+u64 nfsd4_change_attribute(const struct kstat *stat, const struct inode *inode)
{
u64 chattr;
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index 40426f899e76..6ebdf7ea27bf 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -293,7 +293,8 @@ static inline void fh_clear_pre_post_attrs(struct svc_fh *fhp)
fhp->fh_pre_saved = false;
}
-u64 nfsd4_change_attribute(struct kstat *stat, struct inode *inode);
+u64 nfsd4_change_attribute(const struct kstat *stat,
+ const struct inode *inode);
__be32 __must_check fh_fill_pre_attrs(struct svc_fh *fhp);
__be32 fh_fill_post_attrs(struct svc_fh *fhp);
__be32 __must_check fh_fill_both_attrs(struct svc_fh *fhp);
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index c7af1095f6b5..d6122bb2d167 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -572,7 +572,6 @@ static void nfsd_last_thread(struct net *net)
return;
nfsd_shutdown_net(net);
- pr_info("nfsd: last server has exited, flushing export cache\n");
nfsd_export_flush(net);
}
@@ -713,14 +712,13 @@ int nfsd_nrpools(struct net *net)
int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
{
- int i = 0;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct svc_serv *serv = nn->nfsd_serv;
+ int i;
- if (nn->nfsd_serv != NULL) {
- for (i = 0; i < nn->nfsd_serv->sv_nrpools && i < n; i++)
- nthreads[i] = nn->nfsd_serv->sv_pools[i].sp_nrthreads;
- }
-
+ if (serv)
+ for (i = 0; i < serv->sv_nrpools && i < n; i++)
+ nthreads[i] = atomic_read(&serv->sv_pools[i].sp_nrthreads);
return 0;
}
@@ -787,7 +785,6 @@ int
nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
{
int error;
- bool nfsd_up_before;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_serv *serv;
@@ -807,8 +804,6 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
error = nfsd_create_serv(net);
if (error)
goto out;
-
- nfsd_up_before = nn->nfsd_net_up;
serv = nn->nfsd_serv;
error = nfsd_startup_net(net, cred);
@@ -816,17 +811,15 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
goto out_put;
error = svc_set_num_threads(serv, NULL, nrservs);
if (error)
- goto out_shutdown;
+ goto out_put;
error = serv->sv_nrthreads;
- if (error == 0)
- nfsd_last_thread(net);
-out_shutdown:
- if (error < 0 && !nfsd_up_before)
- nfsd_shutdown_net(net);
out_put:
/* Threads now hold service active */
if (xchg(&nn->keep_active, 0))
svc_put(serv);
+
+ if (serv->sv_nrthreads == 0)
+ nfsd_last_thread(net);
svc_put(serv);
out:
mutex_unlock(&nfsd_mutex);
@@ -957,7 +950,7 @@ nfsd(void *vrqstp)
/*
* The main request loop
*/
- while (!kthread_should_stop()) {
+ while (!svc_thread_should_stop(rqstp)) {
/* Update sv_maxconn if it has changed */
rqstp->rq_server->sv_maxconn = nn->max_connections;
@@ -998,6 +991,15 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream))
goto out_decode_err;
+ /*
+ * Release rq_status_counter setting it to an odd value after the rpc
+ * request has been properly parsed. rq_status_counter is used to
+ * notify the consumers if the rqstp fields are stable
+ * (rq_status_counter is odd) or not meaningful (rq_status_counter
+ * is even).
+ */
+ smp_store_release(&rqstp->rq_status_counter, rqstp->rq_status_counter | 1);
+
rp = NULL;
switch (nfsd_cache_lookup(rqstp, &rp)) {
case RC_DOIT:
@@ -1015,6 +1017,12 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
goto out_encode_err;
+ /*
+ * Release rq_status_counter setting it to an even value after the rpc
+ * request has been properly processed.
+ */
+ smp_store_release(&rqstp->rq_status_counter, rqstp->rq_status_counter + 1);
+
nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, statp + 1);
out_cached_reply:
return 1;
diff --git a/fs/nfsd/pnfs.h b/fs/nfsd/pnfs.h
index 4f4282d4eeca..de1e0dfed06a 100644
--- a/fs/nfsd/pnfs.h
+++ b/fs/nfsd/pnfs.h
@@ -27,12 +27,12 @@ struct nfsd4_layout_ops {
struct nfs4_client *clp,
struct nfsd4_getdeviceinfo *gdevp);
__be32 (*encode_getdeviceinfo)(struct xdr_stream *xdr,
- struct nfsd4_getdeviceinfo *gdevp);
+ const struct nfsd4_getdeviceinfo *gdevp);
__be32 (*proc_layoutget)(struct inode *, const struct svc_fh *fhp,
struct nfsd4_layoutget *lgp);
- __be32 (*encode_layoutget)(struct xdr_stream *,
- struct nfsd4_layoutget *lgp);
+ __be32 (*encode_layoutget)(struct xdr_stream *xdr,
+ const struct nfsd4_layoutget *lgp);
__be32 (*proc_layoutcommit)(struct inode *inode,
struct nfsd4_layoutcommit *lcp);
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index cbddcf484dba..f96eaa8e9413 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -117,6 +117,24 @@ struct nfs4_cpntf_state {
time64_t cpntf_time; /* last time stateid used */
};
+struct nfs4_cb_fattr {
+ struct nfsd4_callback ncf_getattr;
+ u32 ncf_cb_status;
+ u32 ncf_cb_bmap[1];
+
+ /* from CB_GETATTR reply */
+ u64 ncf_cb_change;
+ u64 ncf_cb_fsize;
+
+ unsigned long ncf_cb_flags;
+ bool ncf_file_modified;
+ u64 ncf_initial_cinfo;
+ u64 ncf_cur_fsize;
+};
+
+/* bits for ncf_cb_flags */
+#define CB_GETATTR_BUSY 0
+
/*
* Represents a delegation stateid. The nfs4_client holds references to these
* and they are put when it is being destroyed or when the delegation is
@@ -150,6 +168,9 @@ struct nfs4_delegation {
int dl_retries;
struct nfsd4_callback dl_recall;
bool dl_recalled;
+
+ /* for CB_GETATTR */
+ struct nfs4_cb_fattr dl_cb_fattr;
};
#define cb_to_delegation(cb) \
@@ -174,8 +195,6 @@ static inline struct nfs4_delegation *delegstateid(struct nfs4_stid *s)
/* Maximum number of slots per session. 160 is useful for long haul TCP */
#define NFSD_MAX_SLOTS_PER_SESSION 160
-/* Maximum number of operations per session compound */
-#define NFSD_MAX_OPS_PER_COMPOUND 50
/* Maximum session per slot cache size */
#define NFSD_SLOT_CACHE_SIZE 2048
/* Maximum number of NFSD_SLOT_CACHE_SIZE slots per session */
@@ -642,6 +661,7 @@ enum nfsd4_cb_op {
NFSPROC4_CLNT_CB_SEQUENCE,
NFSPROC4_CLNT_CB_NOTIFY_LOCK,
NFSPROC4_CLNT_CB_RECALL_ANY,
+ NFSPROC4_CLNT_CB_GETATTR,
};
/* Returns true iff a is later than b: */
@@ -734,5 +754,6 @@ static inline bool try_to_expire_client(struct nfs4_client *clp)
}
extern __be32 nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp,
- struct inode *inode);
+ struct inode *inode, bool *file_modified, u64 *size);
+extern void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf);
#endif /* NFSD4_STATE_H */
diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
index 63797635e1c3..12d79f5d4eb1 100644
--- a/fs/nfsd/stats.c
+++ b/fs/nfsd/stats.c
@@ -60,7 +60,7 @@ static int nfsd_show(struct seq_file *seq, void *v)
#ifdef CONFIG_NFSD_V4
/* Show count for individual nfsv4 operations */
/* Writing operation numbers 0 1 2 also for maintaining uniformity */
- seq_printf(seq,"proc4ops %u", LAST_NFS4_OP + 1);
+ seq_printf(seq, "proc4ops %u", LAST_NFS4_OP + 1);
for (i = 0; i <= LAST_NFS4_OP; i++) {
seq_printf(seq, " %lld",
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_NFS4_OP(i)]));
@@ -76,7 +76,7 @@ static int nfsd_show(struct seq_file *seq, void *v)
DEFINE_PROC_SHOW_ATTRIBUTE(nfsd);
-int nfsd_percpu_counters_init(struct percpu_counter counters[], int num)
+int nfsd_percpu_counters_init(struct percpu_counter *counters, int num)
{
int i, err = 0;
diff --git a/fs/nfsd/stats.h b/fs/nfsd/stats.h
index cf5524e7ca06..14f50c660b61 100644
--- a/fs/nfsd/stats.h
+++ b/fs/nfsd/stats.h
@@ -37,9 +37,9 @@ extern struct nfsd_stats nfsdstats;
extern struct svc_stat nfsd_svcstats;
-int nfsd_percpu_counters_init(struct percpu_counter counters[], int num);
-void nfsd_percpu_counters_reset(struct percpu_counter counters[], int num);
-void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num);
+int nfsd_percpu_counters_init(struct percpu_counter *counters, int num);
+void nfsd_percpu_counters_reset(struct percpu_counter *counters, int num);
+void nfsd_percpu_counters_destroy(struct percpu_counter *counters, int num);
int nfsd_stat_init(void);
void nfsd_stat_shutdown(void);
@@ -61,22 +61,22 @@ static inline void nfsd_stats_rc_nocache_inc(void)
static inline void nfsd_stats_fh_stale_inc(struct svc_export *exp)
{
percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_FH_STALE]);
- if (exp)
- percpu_counter_inc(&exp->ex_stats.counter[EXP_STATS_FH_STALE]);
+ if (exp && exp->ex_stats)
+ percpu_counter_inc(&exp->ex_stats->counter[EXP_STATS_FH_STALE]);
}
static inline void nfsd_stats_io_read_add(struct svc_export *exp, s64 amount)
{
percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_READ], amount);
- if (exp)
- percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_READ], amount);
+ if (exp && exp->ex_stats)
+ percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_READ], amount);
}
static inline void nfsd_stats_io_write_add(struct svc_export *exp, s64 amount)
{
percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_WRITE], amount);
- if (exp)
- percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_WRITE], amount);
+ if (exp && exp->ex_stats)
+ percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_WRITE], amount);
}
static inline void nfsd_stats_payload_misses_inc(struct nfsd_net *nn)
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index 803904348871..fbc0ccb40424 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -1863,6 +1863,93 @@ TRACE_EVENT(nfsd_end_grace,
)
);
+DECLARE_EVENT_CLASS(nfsd_copy_class,
+ TP_PROTO(
+ const struct nfsd4_copy *copy
+ ),
+ TP_ARGS(copy),
+ TP_STRUCT__entry(
+ __field(bool, intra)
+ __field(bool, async)
+ __field(u32, src_cl_boot)
+ __field(u32, src_cl_id)
+ __field(u32, src_so_id)
+ __field(u32, src_si_generation)
+ __field(u32, dst_cl_boot)
+ __field(u32, dst_cl_id)
+ __field(u32, dst_so_id)
+ __field(u32, dst_si_generation)
+ __field(u64, src_cp_pos)
+ __field(u64, dst_cp_pos)
+ __field(u64, cp_count)
+ __sockaddr(addr, sizeof(struct sockaddr_in6))
+ ),
+ TP_fast_assign(
+ const stateid_t *src_stp = &copy->cp_src_stateid;
+ const stateid_t *dst_stp = &copy->cp_dst_stateid;
+
+ __entry->intra = test_bit(NFSD4_COPY_F_INTRA, &copy->cp_flags);
+ __entry->async = !test_bit(NFSD4_COPY_F_SYNCHRONOUS, &copy->cp_flags);
+ __entry->src_cl_boot = src_stp->si_opaque.so_clid.cl_boot;
+ __entry->src_cl_id = src_stp->si_opaque.so_clid.cl_id;
+ __entry->src_so_id = src_stp->si_opaque.so_id;
+ __entry->src_si_generation = src_stp->si_generation;
+ __entry->dst_cl_boot = dst_stp->si_opaque.so_clid.cl_boot;
+ __entry->dst_cl_id = dst_stp->si_opaque.so_clid.cl_id;
+ __entry->dst_so_id = dst_stp->si_opaque.so_id;
+ __entry->dst_si_generation = dst_stp->si_generation;
+ __entry->src_cp_pos = copy->cp_src_pos;
+ __entry->dst_cp_pos = copy->cp_dst_pos;
+ __entry->cp_count = copy->cp_count;
+ __assign_sockaddr(addr, &copy->cp_clp->cl_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("client=%pISpc intra=%d async=%d "
+ "src_stateid[si_generation:0x%x cl_boot:0x%x cl_id:0x%x so_id:0x%x] "
+ "dst_stateid[si_generation:0x%x cl_boot:0x%x cl_id:0x%x so_id:0x%x] "
+ "cp_src_pos=%llu cp_dst_pos=%llu cp_count=%llu",
+ __get_sockaddr(addr), __entry->intra, __entry->async,
+ __entry->src_si_generation, __entry->src_cl_boot,
+ __entry->src_cl_id, __entry->src_so_id,
+ __entry->dst_si_generation, __entry->dst_cl_boot,
+ __entry->dst_cl_id, __entry->dst_so_id,
+ __entry->src_cp_pos, __entry->dst_cp_pos, __entry->cp_count
+ )
+);
+
+#define DEFINE_COPY_EVENT(name) \
+DEFINE_EVENT(nfsd_copy_class, nfsd_copy_##name, \
+ TP_PROTO(const struct nfsd4_copy *copy), \
+ TP_ARGS(copy))
+
+DEFINE_COPY_EVENT(inter);
+DEFINE_COPY_EVENT(intra);
+DEFINE_COPY_EVENT(do_async);
+
+TRACE_EVENT(nfsd_copy_done,
+ TP_PROTO(
+ const struct nfsd4_copy *copy,
+ __be32 status
+ ),
+ TP_ARGS(copy, status),
+ TP_STRUCT__entry(
+ __field(int, status)
+ __field(bool, intra)
+ __field(bool, async)
+ __sockaddr(addr, sizeof(struct sockaddr_in6))
+ ),
+ TP_fast_assign(
+ __entry->status = be32_to_cpu(status);
+ __entry->intra = test_bit(NFSD4_COPY_F_INTRA, &copy->cp_flags);
+ __entry->async = !test_bit(NFSD4_COPY_F_SYNCHRONOUS, &copy->cp_flags);
+ __assign_sockaddr(addr, &copy->cp_clp->cl_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("addr=%pISpc status=%d intra=%d async=%d ",
+ __get_sockaddr(addr), __entry->status, __entry->intra, __entry->async
+ )
+);
+
#endif /* _NFSD_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 48260cf68fde..fbbea7498f02 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -337,6 +337,24 @@ out:
return err;
}
+static void
+commit_reset_write_verifier(struct nfsd_net *nn, struct svc_rqst *rqstp,
+ int err)
+{
+ switch (err) {
+ case -EAGAIN:
+ case -ESTALE:
+ /*
+ * Neither of these are the result of a problem with
+ * durable storage, so avoid a write verifier reset.
+ */
+ break;
+ default:
+ nfsd_reset_write_verifier(nn);
+ trace_nfsd_writeverf_reset(nn, rqstp, err);
+ }
+}
+
/*
* Commit metadata changes to stable storage.
*/
@@ -520,7 +538,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
nfsd_sanitize_attrs(inode, iap);
- if (check_guard && guardtime != inode_get_ctime(inode).tv_sec)
+ if (check_guard && guardtime != inode_get_ctime_sec(inode))
return nfserr_notsync;
/*
@@ -647,8 +665,7 @@ __be32 nfsd4_clone_file_range(struct svc_rqst *rqstp,
&nfsd4_get_cstate(rqstp)->current_fh,
dst_pos,
count, status);
- nfsd_reset_write_verifier(nn);
- trace_nfsd_writeverf_reset(nn, rqstp, status);
+ commit_reset_write_verifier(nn, rqstp, status);
ret = nfserrno(status);
}
}
@@ -823,7 +840,7 @@ int nfsd_open_break_lease(struct inode *inode, int access)
* and additional flags.
* N.B. After this call fhp needs an fh_put
*/
-static __be32
+static int
__nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
int may_flags, struct file **filp)
{
@@ -831,14 +848,12 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
struct inode *inode;
struct file *file;
int flags = O_RDONLY|O_LARGEFILE;
- __be32 err;
- int host_err = 0;
+ int host_err = -EPERM;
path.mnt = fhp->fh_export->ex_path.mnt;
path.dentry = fhp->fh_dentry;
inode = d_inode(path.dentry);
- err = nfserr_perm;
if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
goto out;
@@ -847,7 +862,7 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
host_err = nfsd_open_break_lease(inode, may_flags);
if (host_err) /* NOMEM or WOULDBLOCK */
- goto out_nfserr;
+ goto out;
if (may_flags & NFSD_MAY_WRITE) {
if (may_flags & NFSD_MAY_READ)
@@ -859,13 +874,13 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
file = dentry_open(&path, flags, current_cred());
if (IS_ERR(file)) {
host_err = PTR_ERR(file);
- goto out_nfserr;
+ goto out;
}
host_err = ima_file_check(file, may_flags);
if (host_err) {
fput(file);
- goto out_nfserr;
+ goto out;
}
if (may_flags & NFSD_MAY_64BIT_COOKIE)
@@ -874,10 +889,8 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
file->f_mode |= FMODE_32BITHASH;
*filp = file;
-out_nfserr:
- err = nfserrno(host_err);
out:
- return err;
+ return host_err;
}
__be32
@@ -885,6 +898,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
int may_flags, struct file **filp)
{
__be32 err;
+ int host_err;
bool retried = false;
validate_process_creds();
@@ -904,12 +918,13 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
retry:
err = fh_verify(rqstp, fhp, type, may_flags);
if (!err) {
- err = __nfsd_open(rqstp, fhp, type, may_flags, filp);
- if (err == nfserr_stale && !retried) {
+ host_err = __nfsd_open(rqstp, fhp, type, may_flags, filp);
+ if (host_err == -EOPENSTALE && !retried) {
retried = true;
fh_put(fhp);
goto retry;
}
+ err = nfserrno(host_err);
}
validate_process_creds();
return err;
@@ -922,13 +937,13 @@ retry:
* @may_flags: internal permission flags
* @filp: OUT: open "struct file *"
*
- * Returns an nfsstat value in network byte order.
+ * Returns zero on success, or a negative errno value.
*/
-__be32
+int
nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp, int may_flags,
struct file **filp)
{
- __be32 err;
+ int err;
validate_process_creds();
err = __nfsd_open(rqstp, fhp, S_IFREG, may_flags, filp);
@@ -1172,8 +1187,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
host_err = vfs_iter_write(file, &iter, &pos, flags);
file_end_write(file);
if (host_err < 0) {
- nfsd_reset_write_verifier(nn);
- trace_nfsd_writeverf_reset(nn, rqstp, host_err);
+ commit_reset_write_verifier(nn, rqstp, host_err);
goto out_nfserr;
}
*cnt = host_err;
@@ -1185,10 +1199,8 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
if (stable && use_wgather) {
host_err = wait_for_concurrent_writes(file);
- if (host_err < 0) {
- nfsd_reset_write_verifier(nn);
- trace_nfsd_writeverf_reset(nn, rqstp, host_err);
- }
+ if (host_err < 0)
+ commit_reset_write_verifier(nn, rqstp, host_err);
}
out_nfserr:
@@ -1331,8 +1343,7 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
err = nfserr_notsupp;
break;
default:
- nfsd_reset_write_verifier(nn);
- trace_nfsd_writeverf_reset(nn, rqstp, err2);
+ commit_reset_write_verifier(nn, rqstp, err2);
err = nfserrno(err2);
}
} else
@@ -1788,6 +1799,12 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
goto out;
+ err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
+ if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
+ goto out;
+ if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
+ goto out;
+
retry:
host_err = fh_want_write(ffhp);
if (host_err) {
@@ -1823,12 +1840,6 @@ retry:
if (ndentry == trap)
goto out_dput_new;
- host_err = -EXDEV;
- if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
- goto out_dput_new;
- if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
- goto out_dput_new;
-
if ((ndentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK) &&
nfsd_has_cached_files(ndentry)) {
close_cached = true;
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index a6890ea7b765..e3c29596f4df 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -104,8 +104,8 @@ __be32 nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
int nfsd_open_break_lease(struct inode *, int);
__be32 nfsd_open(struct svc_rqst *, struct svc_fh *, umode_t,
int, struct file **);
-__be32 nfsd_open_verified(struct svc_rqst *, struct svc_fh *,
- int, struct file **);
+int nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ int may_flags, struct file **filp);
__be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct file *file, loff_t offset,
unsigned long *count,
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 9d918a79dc16..80e859dc84d8 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -50,6 +50,134 @@
#define HAS_CSTATE_FLAG(c, f) ((c)->sid_flags & (f))
#define CLEAR_CSTATE_FLAG(c, f) ((c)->sid_flags &= ~(f))
+/**
+ * nfsd4_encode_bool - Encode an XDR bool type result
+ * @xdr: target XDR stream
+ * @val: boolean value to encode
+ *
+ * Return values:
+ * %nfs_ok: @val encoded; @xdr advanced to next position
+ * %nfserr_resource: stream buffer space exhausted
+ */
+static __always_inline __be32
+nfsd4_encode_bool(struct xdr_stream *xdr, bool val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(p == NULL))
+ return nfserr_resource;
+ *p = val ? xdr_one : xdr_zero;
+ return nfs_ok;
+}
+
+/**
+ * nfsd4_encode_uint32_t - Encode an XDR uint32_t type result
+ * @xdr: target XDR stream
+ * @val: integer value to encode
+ *
+ * Return values:
+ * %nfs_ok: @val encoded; @xdr advanced to next position
+ * %nfserr_resource: stream buffer space exhausted
+ */
+static __always_inline __be32
+nfsd4_encode_uint32_t(struct xdr_stream *xdr, u32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(p == NULL))
+ return nfserr_resource;
+ *p = cpu_to_be32(val);
+ return nfs_ok;
+}
+
+#define nfsd4_encode_aceflag4(x, v) nfsd4_encode_uint32_t(x, v)
+#define nfsd4_encode_acemask4(x, v) nfsd4_encode_uint32_t(x, v)
+#define nfsd4_encode_acetype4(x, v) nfsd4_encode_uint32_t(x, v)
+#define nfsd4_encode_count4(x, v) nfsd4_encode_uint32_t(x, v)
+#define nfsd4_encode_mode4(x, v) nfsd4_encode_uint32_t(x, v)
+#define nfsd4_encode_nfs_lease4(x, v) nfsd4_encode_uint32_t(x, v)
+#define nfsd4_encode_qop4(x, v) nfsd4_encode_uint32_t(x, v)
+#define nfsd4_encode_sequenceid4(x, v) nfsd4_encode_uint32_t(x, v)
+#define nfsd4_encode_slotid4(x, v) nfsd4_encode_uint32_t(x, v)
+
+/**
+ * nfsd4_encode_uint64_t - Encode an XDR uint64_t type result
+ * @xdr: target XDR stream
+ * @val: integer value to encode
+ *
+ * Return values:
+ * %nfs_ok: @val encoded; @xdr advanced to next position
+ * %nfserr_resource: stream buffer space exhausted
+ */
+static __always_inline __be32
+nfsd4_encode_uint64_t(struct xdr_stream *xdr, u64 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT * 2);
+
+ if (unlikely(p == NULL))
+ return nfserr_resource;
+ put_unaligned_be64(val, p);
+ return nfs_ok;
+}
+
+#define nfsd4_encode_changeid4(x, v) nfsd4_encode_uint64_t(x, v)
+#define nfsd4_encode_nfs_cookie4(x, v) nfsd4_encode_uint64_t(x, v)
+#define nfsd4_encode_length4(x, v) nfsd4_encode_uint64_t(x, v)
+#define nfsd4_encode_offset4(x, v) nfsd4_encode_uint64_t(x, v)
+
+/**
+ * nfsd4_encode_opaque_fixed - Encode a fixed-length XDR opaque type result
+ * @xdr: target XDR stream
+ * @data: pointer to data
+ * @size: length of data in bytes
+ *
+ * Return values:
+ * %nfs_ok: @data encoded; @xdr advanced to next position
+ * %nfserr_resource: stream buffer space exhausted
+ */
+static __always_inline __be32
+nfsd4_encode_opaque_fixed(struct xdr_stream *xdr, const void *data,
+ size_t size)
+{
+ __be32 *p = xdr_reserve_space(xdr, xdr_align_size(size));
+ size_t pad = xdr_pad_size(size);
+
+ if (unlikely(p == NULL))
+ return nfserr_resource;
+ memcpy(p, data, size);
+ if (pad)
+ memset((char *)p + size, 0, pad);
+ return nfs_ok;
+}
+
+/**
+ * nfsd4_encode_opaque - Encode a variable-length XDR opaque type result
+ * @xdr: target XDR stream
+ * @data: pointer to data
+ * @size: length of data in bytes
+ *
+ * Return values:
+ * %nfs_ok: @data encoded; @xdr advanced to next position
+ * %nfserr_resource: stream buffer space exhausted
+ */
+static __always_inline __be32
+nfsd4_encode_opaque(struct xdr_stream *xdr, const void *data, size_t size)
+{
+ size_t pad = xdr_pad_size(size);
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, XDR_UNIT + xdr_align_size(size));
+ if (unlikely(p == NULL))
+ return nfserr_resource;
+ *p++ = cpu_to_be32(size);
+ memcpy(p, data, size);
+ if (pad)
+ memset((char *)p + size, 0, pad);
+ return nfs_ok;
+}
+
+#define nfsd4_encode_component4(x, d, s) nfsd4_encode_opaque(x, d, s)
+
struct nfsd4_compound_state {
struct svc_fh current_fh;
struct svc_fh save_fh;
@@ -170,12 +298,8 @@ struct nfsd4_lock {
} v;
/* response */
- union {
- struct {
- stateid_t stateid;
- } ok;
- struct nfsd4_lock_denied denied;
- } u;
+ stateid_t lk_resp_stateid;
+ struct nfsd4_lock_denied lk_denied;
};
#define lk_new_open_seqid v.new.open_seqid
#define lk_new_open_stateid v.new.open_stateid
@@ -185,20 +309,15 @@ struct nfsd4_lock {
#define lk_old_lock_stateid v.old.lock_stateid
#define lk_old_lock_seqid v.old.lock_seqid
-#define lk_resp_stateid u.ok.stateid
-#define lk_denied u.denied
-
-
struct nfsd4_lockt {
u32 lt_type;
clientid_t lt_clientid;
struct xdr_netobj lt_owner;
u64 lt_offset;
u64 lt_length;
- struct nfsd4_lock_denied lt_denied;
+ struct nfsd4_lock_denied lt_denied;
};
-
struct nfsd4_locku {
u32 lu_type;
u32 lu_seqid;
@@ -267,9 +386,9 @@ struct nfsd4_open {
u32 op_deleg_want; /* request */
stateid_t op_stateid; /* response */
__be32 op_xdr_error; /* see nfsd4_open_omfg() */
- u32 op_recall; /* recall */
struct nfsd4_change_info op_cinfo; /* response */
u32 op_rflags; /* response */
+ bool op_recall; /* response */
bool op_truncate; /* used during processing */
bool op_created; /* used during processing */
struct nfs4_openowner *op_openowner; /* used during processing */
@@ -496,7 +615,7 @@ struct nfsd4_layoutcommit {
u32 lc_layout_type; /* request */
u32 lc_up_len; /* layout length */
void *lc_up_layout; /* decoded by callback */
- u32 lc_size_chg; /* boolean for response */
+ bool lc_size_chg; /* response */
u64 lc_newsize; /* response */
};
@@ -508,7 +627,7 @@ struct nfsd4_layoutreturn {
u32 lrf_body_len; /* request */
void *lrf_body; /* request */
stateid_t lr_sid; /* request/response */
- u32 lrs_present; /* response */
+ bool lrs_present; /* response */
};
struct nfsd4_fallocate {
@@ -626,8 +745,7 @@ struct nfsd4_copy_notify {
/* response */
stateid_t cpn_cnr_stateid;
- u64 cpn_sec;
- u32 cpn_nsec;
+ struct timespec64 cpn_lease_time;
struct nl4_server *cpn_src;
};
@@ -820,8 +938,10 @@ extern __be32 nfsd4_open_downgrade(struct svc_rqst *rqstp,
struct nfsd4_compound_state *, union nfsd4_op_u *u);
extern __be32 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *,
union nfsd4_op_u *u);
+extern void nfsd4_lock_release(union nfsd4_op_u *u);
extern __be32 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *,
union nfsd4_op_u *u);
+extern void nfsd4_lockt_release(union nfsd4_op_u *u);
extern __be32 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *,
union nfsd4_op_u *u);
extern __be32
diff --git a/fs/nfsd/xdr4cb.h b/fs/nfsd/xdr4cb.h
index 0d39af1b00a0..e8b00309c449 100644
--- a/fs/nfsd/xdr4cb.h
+++ b/fs/nfsd/xdr4cb.h
@@ -54,3 +54,21 @@
#define NFS4_dec_cb_recall_any_sz (cb_compound_dec_hdr_sz + \
cb_sequence_dec_sz + \
op_dec_sz)
+
+/*
+ * 1: CB_GETATTR opcode (32-bit)
+ * N: file_handle
+ * 1: number of entry in attribute array (32-bit)
+ * 1: entry 0 in attribute array (32-bit)
+ */
+#define NFS4_enc_cb_getattr_sz (cb_compound_enc_hdr_sz + \
+ cb_sequence_enc_sz + \
+ 1 + enc_nfs4_fh_sz + 1 + 1)
+/*
+ * 4: fattr_bitmap_maxsz
+ * 1: attribute array len
+ * 2: change attr (64-bit)
+ * 2: size (64-bit)
+ */
+#define NFS4_dec_cb_getattr_sz (cb_compound_dec_hdr_sz + \
+ cb_sequence_dec_sz + 4 + 1 + 2 + 2 + op_dec_sz)
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index bce734b68f08..de2073c47651 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -429,7 +429,7 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
nilfs_set_de_type(de, inode);
nilfs_commit_chunk(page, mapping, from, to);
nilfs_put_page(page);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
}
/*
@@ -519,7 +519,7 @@ got_it:
de->inode = cpu_to_le64(inode->i_ino);
nilfs_set_de_type(de, inode);
nilfs_commit_chunk(page, page->mapping, from, to);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
nilfs_mark_inode_dirty(dir);
/* OFFSET_CACHE */
out_put:
@@ -567,7 +567,7 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
pde->rec_len = nilfs_rec_len_to_disk(to - from);
dir->inode = 0;
nilfs_commit_chunk(page, mapping, from, to);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
out:
nilfs_put_page(page);
return err;
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 1a8bd5993476..f861f3a0bf5c 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -366,7 +366,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
atomic64_inc(&root->inodes_count);
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
inode->i_ino = ino;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
err = nilfs_bmap_read(ii->i_bmap, NULL);
@@ -449,12 +449,12 @@ int nilfs_read_inode_common(struct inode *inode,
i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
inode->i_size = le64_to_cpu(raw_inode->i_size);
- inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
+ inode_set_atime(inode, le64_to_cpu(raw_inode->i_mtime),
+ le32_to_cpu(raw_inode->i_mtime_nsec));
inode_set_ctime(inode, le64_to_cpu(raw_inode->i_ctime),
le32_to_cpu(raw_inode->i_ctime_nsec));
- inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
- inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
- inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
+ inode_set_mtime(inode, le64_to_cpu(raw_inode->i_mtime),
+ le32_to_cpu(raw_inode->i_mtime_nsec));
if (nilfs_is_metadata_file_inode(inode) && !S_ISREG(inode->i_mode))
return -EIO; /* this inode is for metadata and corrupted */
if (inode->i_nlink == 0)
@@ -768,10 +768,10 @@ void nilfs_write_inode_common(struct inode *inode,
raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
raw_inode->i_size = cpu_to_le64(inode->i_size);
- raw_inode->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
- raw_inode->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
- raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+ raw_inode->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
+ raw_inode->i_mtime = cpu_to_le64(inode_get_mtime_sec(inode));
+ raw_inode->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
+ raw_inode->i_mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
raw_inode->i_flags = cpu_to_le32(ii->i_flags);
@@ -875,7 +875,7 @@ void nilfs_truncate(struct inode *inode)
nilfs_truncate_bmap(ii, blkoff);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
if (IS_SYNC(inode))
nilfs_set_transaction_flag(NILFS_TI_SYNC);
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index ebdcc25df0f7..869b016014d2 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -265,7 +265,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg)
struct dnotify_struct *dn;
struct inode *inode;
fl_owner_t id = current->files;
- struct file *f;
+ struct file *f = NULL;
int destroy = 0, error = 0;
__u32 mask;
@@ -345,7 +345,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg)
}
rcu_read_lock();
- f = lookup_fd_rcu(fd);
+ f = lookup_fdget_rcu(fd);
rcu_read_unlock();
/* if (f != filp) means that we lost a race and another task/thread
@@ -392,6 +392,8 @@ out_err:
fsnotify_put_mark(new_fsn_mark);
if (dn)
kmem_cache_free(dnotify_struct_cache, dn);
+ if (f)
+ fput(f);
return error;
}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index f69c451018e3..62fe0b679e58 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -1585,16 +1585,25 @@ static int fanotify_test_fsid(struct dentry *dentry, __kernel_fsid_t *fsid)
}
/* Check if filesystem can encode a unique fid */
-static int fanotify_test_fid(struct dentry *dentry)
+static int fanotify_test_fid(struct dentry *dentry, unsigned int flags)
{
+ unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS;
+ const struct export_operations *nop = dentry->d_sb->s_export_op;
+
+ /*
+ * We need to make sure that the filesystem supports encoding of
+ * file handles so user can use name_to_handle_at() to compare fids
+ * reported with events to the file handle of watched objects.
+ */
+ if (!nop)
+ return -EOPNOTSUPP;
+
/*
- * We need to make sure that the file system supports at least
- * encoding a file handle so user can use name_to_handle_at() to
- * compare fid returned with event to the file handle of watched
- * objects. However, even the relaxed AT_HANDLE_FID flag requires
- * at least empty export_operations for ecoding unique file ids.
+ * For sb/mount mark, we also need to make sure that the filesystem
+ * supports decoding file handles, so user has a way to map back the
+ * reported fids to filesystem objects.
*/
- if (!dentry->d_sb->s_export_op)
+ if (mark_type != FAN_MARK_INODE && !nop->fh_to_dentry)
return -EOPNOTSUPP;
return 0;
@@ -1812,7 +1821,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
if (ret)
goto path_put_and_out;
- ret = fanotify_test_fid(path.dentry);
+ ret = fanotify_test_fid(path.dentry, flags);
if (ret)
goto path_put_and_out;
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 647a22433bd8..9a4b228d42fa 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -84,7 +84,7 @@ slow:
return -ENOMEM;
}
inode->i_ino = ns->inum;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_flags |= S_IMMUTABLE;
inode->i_mode = S_IFREG | S_IRUGO;
inode->i_fop = &ns_file_operations;
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 99ac6ea277c4..aba1e22db4e9 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -648,7 +648,7 @@ static int ntfs_read_locked_inode(struct inode *vi)
* mtime is the last change of the data within the file. Not changed
* when only metadata is changed, e.g. a rename doesn't affect mtime.
*/
- vi->i_mtime = ntfs2utc(si->last_data_change_time);
+ inode_set_mtime_to_ts(vi, ntfs2utc(si->last_data_change_time));
/*
* ctime is the last change of the metadata of the file. This obviously
* always changes, when mtime is changed. ctime can be changed on its
@@ -659,7 +659,7 @@ static int ntfs_read_locked_inode(struct inode *vi)
* Last access to the data within the file. Not changed during a rename
* for example but changed whenever the file is written to.
*/
- vi->i_atime = ntfs2utc(si->last_access_time);
+ inode_set_atime_to_ts(vi, ntfs2utc(si->last_access_time));
/* Find the attribute list attribute if present. */
ntfs_attr_reinit_search_ctx(ctx);
@@ -1217,9 +1217,9 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
vi->i_uid = base_vi->i_uid;
vi->i_gid = base_vi->i_gid;
set_nlink(vi, base_vi->i_nlink);
- vi->i_mtime = base_vi->i_mtime;
+ inode_set_mtime_to_ts(vi, inode_get_mtime(base_vi));
inode_set_ctime_to_ts(vi, inode_get_ctime(base_vi));
- vi->i_atime = base_vi->i_atime;
+ inode_set_atime_to_ts(vi, inode_get_atime(base_vi));
vi->i_generation = ni->seq_no = base_ni->seq_no;
/* Set inode type to zero but preserve permissions. */
@@ -1483,9 +1483,9 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
vi->i_uid = base_vi->i_uid;
vi->i_gid = base_vi->i_gid;
set_nlink(vi, base_vi->i_nlink);
- vi->i_mtime = base_vi->i_mtime;
+ inode_set_mtime_to_ts(vi, inode_get_mtime(base_vi));
inode_set_ctime_to_ts(vi, inode_get_ctime(base_vi));
- vi->i_atime = base_vi->i_atime;
+ inode_set_atime_to_ts(vi, inode_get_atime(base_vi));
vi->i_generation = ni->seq_no = base_ni->seq_no;
/* Set inode type to zero but preserve permissions. */
vi->i_mode = base_vi->i_mode & ~S_IFMT;
@@ -2805,13 +2805,14 @@ done:
if (!IS_NOCMTIME(VFS_I(base_ni)) && !IS_RDONLY(VFS_I(base_ni))) {
struct timespec64 now = current_time(VFS_I(base_ni));
struct timespec64 ctime = inode_get_ctime(VFS_I(base_ni));
+ struct timespec64 mtime = inode_get_mtime(VFS_I(base_ni));
int sync_it = 0;
- if (!timespec64_equal(&VFS_I(base_ni)->i_mtime, &now) ||
+ if (!timespec64_equal(&mtime, &now) ||
!timespec64_equal(&ctime, &now))
sync_it = 1;
inode_set_ctime_to_ts(VFS_I(base_ni), now);
- VFS_I(base_ni)->i_mtime = now;
+ inode_set_mtime_to_ts(VFS_I(base_ni), now);
if (sync_it)
mark_inode_dirty_sync(VFS_I(base_ni));
@@ -2925,9 +2926,9 @@ int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
}
}
if (ia_valid & ATTR_ATIME)
- vi->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(vi, attr->ia_atime);
if (ia_valid & ATTR_MTIME)
- vi->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(vi, attr->ia_mtime);
if (ia_valid & ATTR_CTIME)
inode_set_ctime_to_ts(vi, attr->ia_ctime);
mark_inode_dirty(vi);
@@ -2996,7 +2997,7 @@ int __ntfs_write_inode(struct inode *vi, int sync)
si = (STANDARD_INFORMATION*)((u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset));
/* Update the access times if they have changed. */
- nt = utc2ntfs(vi->i_mtime);
+ nt = utc2ntfs(inode_get_mtime(vi));
if (si->last_data_change_time != nt) {
ntfs_debug("Updating mtime for inode 0x%lx: old = 0x%llx, "
"new = 0x%llx", vi->i_ino, (long long)
@@ -3014,7 +3015,7 @@ int __ntfs_write_inode(struct inode *vi, int sync)
si->last_mft_change_time = nt;
modified = true;
}
- nt = utc2ntfs(vi->i_atime);
+ nt = utc2ntfs(inode_get_atime(vi));
if (si->last_access_time != nt) {
ntfs_debug("Updating atime for inode 0x%lx: old = 0x%llx, "
"new = 0x%llx", vi->i_ino,
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index ad1a8f72da22..6fd1dc4b08c8 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -2682,7 +2682,7 @@ mft_rec_already_initialized:
vi->i_mode &= ~S_IWUGO;
/* Set the inode times to the current time. */
- vi->i_atime = vi->i_mtime = inode_set_ctime_current(vi);
+ simple_inode_init_ts(vi);
/*
* Set the file size to 0, the ntfs inode sizes are set to 0 by
* the call to ntfs_init_big_inode() below.
diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
index a9d82bbb4729..63f70259edc0 100644
--- a/fs/ntfs3/attrib.c
+++ b/fs/ntfs3/attrib.c
@@ -1106,10 +1106,10 @@ repack:
}
}
- /*
+ /*
* The code below may require additional cluster (to extend attribute list)
- * and / or one MFT record
- * It is too complex to undo operations if -ENOSPC occurs deep inside
+ * and / or one MFT record
+ * It is too complex to undo operations if -ENOSPC occurs deep inside
* in 'ni_insert_nonresident'.
* Return in advance -ENOSPC here if there are no free cluster and no free MFT.
*/
@@ -1736,10 +1736,8 @@ repack:
le_b = NULL;
attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
0, NULL, &mi_b);
- if (!attr_b) {
- err = -ENOENT;
- goto out;
- }
+ if (!attr_b)
+ return -ENOENT;
attr = attr_b;
le = le_b;
diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
index 42631b31adf1..7c01735d1219 100644
--- a/fs/ntfs3/attrlist.c
+++ b/fs/ntfs3/attrlist.c
@@ -52,7 +52,8 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
if (!attr->non_res) {
lsize = le32_to_cpu(attr->res.data_size);
- le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
+ /* attr is resident: lsize < record_size (1K or 4K) */
+ le = kvmalloc(al_aligned(lsize), GFP_KERNEL);
if (!le) {
err = -ENOMEM;
goto out;
@@ -80,7 +81,17 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
if (err < 0)
goto out;
- le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
+ /* attr is nonresident.
+ * The worst case:
+ * 1T (2^40) extremely fragmented file.
+ * cluster = 4K (2^12) => 2^28 fragments
+ * 2^9 fragments per one record => 2^19 records
+ * 2^5 bytes of ATTR_LIST_ENTRY per one record => 2^24 bytes.
+ *
+ * the result is 16M bytes per attribute list.
+ * Use kvmalloc to allocate in range [several Kbytes - dozen Mbytes]
+ */
+ le = kvmalloc(al_aligned(lsize), GFP_KERNEL);
if (!le) {
err = -ENOMEM;
goto out;
diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
index 107e808e06ea..63f14a0232f6 100644
--- a/fs/ntfs3/bitmap.c
+++ b/fs/ntfs3/bitmap.c
@@ -125,6 +125,7 @@ void wnd_close(struct wnd_bitmap *wnd)
struct rb_node *node, *next;
kfree(wnd->free_bits);
+ wnd->free_bits = NULL;
run_close(&wnd->run);
node = rb_first(&wnd->start_tree);
@@ -659,7 +660,8 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
wnd->bits_last = wbits;
wnd->free_bits =
- kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN);
+ kvmalloc_array(wnd->nwnd, sizeof(u16), GFP_KERNEL | __GFP_ZERO);
+
if (!wnd->free_bits)
return -ENOMEM;
diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
index 063a6654199b..ec0566b322d5 100644
--- a/fs/ntfs3/dir.c
+++ b/fs/ntfs3/dir.c
@@ -309,7 +309,11 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
return 0;
}
- dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
+ /* NTFS: symlinks are "dir + reparse" or "file + reparse" */
+ if (fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT)
+ dt_type = DT_LNK;
+ else
+ dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
}
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index 962f12ce6c0a..ad4a70b5d432 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -342,7 +342,7 @@ static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
err = 0;
}
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
if (IS_SYNC(inode)) {
@@ -400,7 +400,7 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size)
ni_unlock(ni);
ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
if (!IS_DIRSYNC(inode)) {
dirty = 1;
} else {
@@ -642,7 +642,7 @@ out:
filemap_invalidate_unlock(mapping);
if (!err) {
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
}
@@ -745,8 +745,8 @@ static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
}
static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t len, unsigned int flags)
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
{
struct inode *inode = in->f_mapping->host;
struct ntfs_inode *ni = ntfs_i(inode);
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 2b85cb10f0be..3df2d9e34b91 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -2148,7 +2148,7 @@ out1:
for (i = 0; i < pages_per_frame; i++) {
pg = pages[i];
- if (i == idx)
+ if (i == idx || !pg)
continue;
unlock_page(pg);
put_page(pg);
@@ -3208,6 +3208,12 @@ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
if (!fname || !memcmp(&fname->dup, dup, sizeof(fname->dup)))
continue;
+ /* Check simple case when parent inode equals current inode. */
+ if (ino_get(&fname->home) == ni->vfs_inode.i_ino) {
+ ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ continue;
+ }
+
/* ntfs_iget5 may sleep. */
dir = ntfs_iget5(sb, &fname->home, NULL);
if (IS_ERR(dir)) {
@@ -3265,7 +3271,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
if (is_rec_inuse(ni->mi.mrec) &&
!(sbi->flags & NTFS_FLAGS_LOG_REPLAYING) && inode->i_nlink) {
bool modified = false;
- struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 ts;
/* Update times in standard attribute. */
std = ni_std(ni);
@@ -3275,19 +3281,22 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
}
/* Update the access times if they have changed. */
- dup.m_time = kernel2nt(&inode->i_mtime);
+ ts = inode_get_mtime(inode);
+ dup.m_time = kernel2nt(&ts);
if (std->m_time != dup.m_time) {
std->m_time = dup.m_time;
modified = true;
}
- dup.c_time = kernel2nt(&ctime);
+ ts = inode_get_mtime(inode);
+ dup.c_time = kernel2nt(&ts);
if (std->c_time != dup.c_time) {
std->c_time = dup.c_time;
modified = true;
}
- dup.a_time = kernel2nt(&inode->i_atime);
+ ts = inode_get_atime(inode);
+ dup.a_time = kernel2nt(&ts);
if (std->a_time != dup.a_time) {
std->a_time = dup.a_time;
modified = true;
diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
index 12f28cdf5c83..98ccb6650858 100644
--- a/fs/ntfs3/fslog.c
+++ b/fs/ntfs3/fslog.c
@@ -2168,8 +2168,10 @@ file_is_valid:
if (!page) {
page = kmalloc(log->page_size, GFP_NOFS);
- if (!page)
- return -ENOMEM;
+ if (!page) {
+ err = -ENOMEM;
+ goto out;
+ }
}
/*
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
index 33afee0f5559..fbfe21dbb425 100644
--- a/fs/ntfs3/fsntfs.c
+++ b/fs/ntfs3/fsntfs.c
@@ -983,18 +983,11 @@ out:
if (err)
return err;
- mark_inode_dirty(&ni->vfs_inode);
+ mark_inode_dirty_sync(&ni->vfs_inode);
/* verify(!ntfs_update_mftmirr()); */
- /*
- * If we used wait=1, sync_inode_metadata waits for the io for the
- * inode to finish. It hangs when media is removed.
- * So wait=0 is sent down to sync_inode_metadata
- * and filemap_fdatawrite is used for the data blocks.
- */
- err = sync_inode_metadata(&ni->vfs_inode, 0);
- if (!err)
- err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
+ /* write mft record on disk. */
+ err = _ni_write_inode(&ni->vfs_inode, 1);
return err;
}
@@ -2461,10 +2454,12 @@ void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
{
CLST end, i, zone_len, zlen;
struct wnd_bitmap *wnd = &sbi->used.bitmap;
+ bool dirty = false;
down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
if (!wnd_is_used(wnd, lcn, len)) {
- ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ /* mark volume as dirty out of wnd->rw_lock */
+ dirty = true;
end = lcn + len;
len = 0;
@@ -2518,6 +2513,8 @@ void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
out:
up_write(&wnd->rw_lock);
+ if (dirty)
+ ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
}
/*
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index 124c6e822623..cf92b2433f7a 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -729,6 +729,9 @@ static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx,
u32 total = le32_to_cpu(hdr->total);
u16 offs[128];
+ if (unlikely(!cmp))
+ return NULL;
+
fill_table:
if (end > total)
return NULL;
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index eb2ed0701495..5e3d71374918 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -44,7 +44,7 @@ static struct inode *ntfs_read_mft(struct inode *inode,
u64 t64;
struct MFT_REC *rec;
struct runs_tree *run;
- struct timespec64 ctime;
+ struct timespec64 ts;
inode->i_op = NULL;
/* Setup 'uid' and 'gid' */
@@ -169,10 +169,12 @@ next_attr:
#ifdef STATX_BTIME
nt2kernel(std5->cr_time, &ni->i_crtime);
#endif
- nt2kernel(std5->a_time, &inode->i_atime);
- ctime = inode_get_ctime(inode);
- nt2kernel(std5->c_time, &ctime);
- nt2kernel(std5->m_time, &inode->i_mtime);
+ nt2kernel(std5->a_time, &ts);
+ inode_set_atime_to_ts(inode, ts);
+ nt2kernel(std5->c_time, &ts);
+ inode_set_ctime_to_ts(inode, ts);
+ nt2kernel(std5->m_time, &ts);
+ inode_set_mtime_to_ts(inode, ts);
ni->std_fa = std5->fa;
@@ -960,7 +962,8 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
if (err >= 0) {
if (!(ni->std_fa & FILE_ATTRIBUTE_ARCHIVE)) {
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
dirty = true;
}
@@ -1660,8 +1663,11 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
d_instantiate(dentry, inode);
/* Set original time. inode times (i_ctime) may be changed in ntfs_init_acl. */
- inode->i_atime = inode->i_mtime = inode_set_ctime_to_ts(inode, ni->i_crtime);
- dir->i_mtime = inode_set_ctime_to_ts(dir, ni->i_crtime);
+ inode_set_atime_to_ts(inode, ni->i_crtime);
+ inode_set_ctime_to_ts(inode, ni->i_crtime);
+ inode_set_mtime_to_ts(inode, ni->i_crtime);
+ inode_set_mtime_to_ts(dir, ni->i_crtime);
+ inode_set_ctime_to_ts(dir, ni->i_crtime);
mark_inode_dirty(dir);
mark_inode_dirty(inode);
@@ -1767,7 +1773,7 @@ int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
if (!err) {
drop_nlink(inode);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
if (inode->i_nlink)
diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
index ad430d50bd79..ee3093be5170 100644
--- a/fs/ntfs3/namei.c
+++ b/fs/ntfs3/namei.c
@@ -156,8 +156,8 @@ static int ntfs_link(struct dentry *ode, struct inode *dir, struct dentry *de)
err = ntfs_link_inode(inode, de);
if (!err) {
- dir->i_mtime = inode_set_ctime_to_ts(inode,
- inode_set_ctime_current(dir));
+ inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(inode);
mark_inode_dirty(dir);
d_instantiate(de, inode);
@@ -373,7 +373,7 @@ static int ntfs_atomic_open(struct inode *dir, struct dentry *dentry,
#ifdef CONFIG_NTFS3_FS_POSIX_ACL
if (IS_POSIXACL(dir)) {
- /*
+ /*
* Load in cache current acl to avoid ni_lock(dir):
* ntfs_create_inode -> ntfs_init_acl -> posix_acl_create ->
* ntfs_get_acl -> ntfs_get_acl_ex -> ni_lock
diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
index 98b76d1b09e7..86aecbb01a92 100644
--- a/fs/ntfs3/ntfs.h
+++ b/fs/ntfs3/ntfs.h
@@ -847,7 +847,7 @@ struct OBJECT_ID {
// Birth Volume Id is the Object Id of the Volume on.
// which the Object Id was allocated. It never changes.
struct GUID BirthVolumeId; //0x10:
-
+
// Birth Object Id is the first Object Id that was
// ever assigned to this MFT Record. I.e. If the Object Id
// is changed for some reason, this field will reflect the
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index 629403ede6e5..f6706143d14b 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -42,9 +42,11 @@ enum utf16_endian;
#define MINUS_ONE_T ((size_t)(-1))
/* Biggest MFT / smallest cluster */
#define MAXIMUM_BYTES_PER_MFT 4096
+#define MAXIMUM_SHIFT_BYTES_PER_MFT 12
#define NTFS_BLOCKS_PER_MFT_RECORD (MAXIMUM_BYTES_PER_MFT / 512)
#define MAXIMUM_BYTES_PER_INDEX 4096
+#define MAXIMUM_SHIFT_BYTES_PER_INDEX 12
#define NTFS_BLOCKS_PER_INODE (MAXIMUM_BYTES_PER_INDEX / 512)
/* NTFS specific error code when fixup failed. */
@@ -495,8 +497,6 @@ int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, u32 flags);
int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr);
-void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
- CLST len);
int ntfs_file_open(struct inode *inode, struct file *file);
int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len);
@@ -872,7 +872,7 @@ int ntfs_init_acl(struct mnt_idmap *idmap, struct inode *inode,
int ntfs_acl_chmod(struct mnt_idmap *idmap, struct dentry *dentry);
ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
-extern const struct xattr_handler *ntfs_xattr_handlers[];
+extern const struct xattr_handler * const ntfs_xattr_handlers[];
int ntfs_save_wsl_perm(struct inode *inode, __le16 *ea_size);
void ntfs_get_wsl_perm(struct inode *inode);
diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
index c12ebffc94da..53629b1f65e9 100644
--- a/fs/ntfs3/record.c
+++ b/fs/ntfs3/record.c
@@ -189,12 +189,19 @@ out:
return err;
}
+/*
+ * mi_enum_attr - start/continue attributes enumeration in record.
+ *
+ * NOTE: mi->mrec - memory of size sbi->record_size
+ * here we sure that mi->mrec->total == sbi->record_size (see mi_read)
+ */
struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
{
const struct MFT_REC *rec = mi->mrec;
u32 used = le32_to_cpu(rec->used);
- u32 t32, off, asize;
+ u32 t32, off, asize, prev_type;
u16 t16;
+ u64 data_size, alloc_size, tot_size;
if (!attr) {
u32 total = le32_to_cpu(rec->total);
@@ -213,6 +220,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
if (!is_rec_inuse(rec))
return NULL;
+ prev_type = 0;
attr = Add2Ptr(rec, off);
} else {
/* Check if input attr inside record. */
@@ -226,11 +234,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
return NULL;
}
- if (off + asize < off) {
- /* Overflow check. */
+ /* Overflow check. */
+ if (off + asize < off)
return NULL;
- }
+ prev_type = le32_to_cpu(attr->type);
attr = Add2Ptr(attr, asize);
off += asize;
}
@@ -250,7 +258,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
/* 0x100 is last known attribute for now. */
t32 = le32_to_cpu(attr->type);
- if ((t32 & 0xf) || (t32 > 0x100))
+ if (!t32 || (t32 & 0xf) || (t32 > 0x100))
+ return NULL;
+
+ /* attributes in record must be ordered by type */
+ if (t32 < prev_type)
return NULL;
/* Check overflow and boundary. */
@@ -259,16 +271,15 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
/* Check size of attribute. */
if (!attr->non_res) {
+ /* Check resident fields. */
if (asize < SIZEOF_RESIDENT)
return NULL;
t16 = le16_to_cpu(attr->res.data_off);
-
if (t16 > asize)
return NULL;
- t32 = le32_to_cpu(attr->res.data_size);
- if (t16 + t32 > asize)
+ if (t16 + le32_to_cpu(attr->res.data_size) > asize)
return NULL;
t32 = sizeof(short) * attr->name_len;
@@ -278,21 +289,52 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
return attr;
}
- /* Check some nonresident fields. */
- if (attr->name_len &&
- le16_to_cpu(attr->name_off) + sizeof(short) * attr->name_len >
- le16_to_cpu(attr->nres.run_off)) {
+ /* Check nonresident fields. */
+ if (attr->non_res != 1)
+ return NULL;
+
+ t16 = le16_to_cpu(attr->nres.run_off);
+ if (t16 > asize)
+ return NULL;
+
+ t32 = sizeof(short) * attr->name_len;
+ if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
+ return NULL;
+
+ /* Check start/end vcn. */
+ if (le64_to_cpu(attr->nres.svcn) > le64_to_cpu(attr->nres.evcn) + 1)
return NULL;
- }
- if (attr->nres.svcn || !is_attr_ext(attr)) {
+ data_size = le64_to_cpu(attr->nres.data_size);
+ if (le64_to_cpu(attr->nres.valid_size) > data_size)
+ return NULL;
+
+ alloc_size = le64_to_cpu(attr->nres.alloc_size);
+ if (data_size > alloc_size)
+ return NULL;
+
+ t32 = mi->sbi->cluster_mask;
+ if (alloc_size & t32)
+ return NULL;
+
+ if (!attr->nres.svcn && is_attr_ext(attr)) {
+ /* First segment of sparse/compressed attribute */
+ if (asize + 8 < SIZEOF_NONRESIDENT_EX)
+ return NULL;
+
+ tot_size = le64_to_cpu(attr->nres.total_size);
+ if (tot_size & t32)
+ return NULL;
+
+ if (tot_size > alloc_size)
+ return NULL;
+ } else {
if (asize + 8 < SIZEOF_NONRESIDENT)
return NULL;
if (attr->nres.c_unit)
return NULL;
- } else if (asize + 8 < SIZEOF_NONRESIDENT_EX)
- return NULL;
+ }
return attr;
}
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 5661a363005e..f763e3256ccc 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -453,15 +453,23 @@ static struct proc_dir_entry *proc_info_root;
* ntfs3.1
* cluster size
* number of clusters
+ * total number of mft records
+ * number of used mft records ~= number of files + folders
+ * real state of ntfs "dirty"/"clean"
+ * current state of ntfs "dirty"/"clean"
*/
static int ntfs3_volinfo(struct seq_file *m, void *o)
{
struct super_block *sb = m->private;
struct ntfs_sb_info *sbi = sb->s_fs_info;
- seq_printf(m, "ntfs%d.%d\n%u\n%zu\n", sbi->volume.major_ver,
- sbi->volume.minor_ver, sbi->cluster_size,
- sbi->used.bitmap.nbits);
+ seq_printf(m, "ntfs%d.%d\n%u\n%zu\n\%zu\n%zu\n%s\n%s\n",
+ sbi->volume.major_ver, sbi->volume.minor_ver,
+ sbi->cluster_size, sbi->used.bitmap.nbits,
+ sbi->mft.bitmap.nbits,
+ sbi->mft.bitmap.nbits - wnd_zeroes(&sbi->mft.bitmap),
+ sbi->volume.real_dirty ? "dirty" : "clean",
+ (sbi->volume.flags & VOLUME_FLAG_DIRTY) ? "dirty" : "clean");
return 0;
}
@@ -488,9 +496,13 @@ static ssize_t ntfs3_label_write(struct file *file, const char __user *buffer,
{
int err;
struct super_block *sb = pde_data(file_inode(file));
- struct ntfs_sb_info *sbi = sb->s_fs_info;
ssize_t ret = count;
- u8 *label = kmalloc(count, GFP_NOFS);
+ u8 *label;
+
+ if (sb_rdonly(sb))
+ return -EROFS;
+
+ label = kmalloc(count, GFP_NOFS);
if (!label)
return -ENOMEM;
@@ -502,7 +514,7 @@ static ssize_t ntfs3_label_write(struct file *file, const char __user *buffer,
while (ret > 0 && label[ret - 1] == '\n')
ret -= 1;
- err = ntfs_set_label(sbi, label, ret);
+ err = ntfs_set_label(sb->s_fs_info, label, ret);
if (err < 0) {
ntfs_err(sb, "failed (%d) to write label", err);
@@ -576,20 +588,30 @@ static noinline void ntfs3_put_sbi(struct ntfs_sb_info *sbi)
wnd_close(&sbi->mft.bitmap);
wnd_close(&sbi->used.bitmap);
- if (sbi->mft.ni)
+ if (sbi->mft.ni) {
iput(&sbi->mft.ni->vfs_inode);
+ sbi->mft.ni = NULL;
+ }
- if (sbi->security.ni)
+ if (sbi->security.ni) {
iput(&sbi->security.ni->vfs_inode);
+ sbi->security.ni = NULL;
+ }
- if (sbi->reparse.ni)
+ if (sbi->reparse.ni) {
iput(&sbi->reparse.ni->vfs_inode);
+ sbi->reparse.ni = NULL;
+ }
- if (sbi->objid.ni)
+ if (sbi->objid.ni) {
iput(&sbi->objid.ni->vfs_inode);
+ sbi->objid.ni = NULL;
+ }
- if (sbi->volume.ni)
+ if (sbi->volume.ni) {
iput(&sbi->volume.ni->vfs_inode);
+ sbi->volume.ni = NULL;
+ }
ntfs_update_mftmirr(sbi, 0);
@@ -836,7 +858,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
struct ntfs_sb_info *sbi = sb->s_fs_info;
int err;
u32 mb, gb, boot_sector_size, sct_per_clst, record_size;
- u64 sectors, clusters, mlcn, mlcn2;
+ u64 sectors, clusters, mlcn, mlcn2, dev_size0;
struct NTFS_BOOT *boot;
struct buffer_head *bh;
struct MFT_REC *rec;
@@ -845,6 +867,9 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
u32 boot_off = 0;
const char *hint = "Primary boot";
+ /* Save original dev_size. Used with alternative boot. */
+ dev_size0 = dev_size;
+
sbi->volume.blocks = dev_size >> PAGE_SHIFT;
bh = ntfs_bread(sb, 0);
@@ -853,6 +878,11 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
check_boot:
err = -EINVAL;
+
+ /* Corrupted image; do not read OOB */
+ if (bh->b_size - sizeof(*boot) < boot_off)
+ goto out;
+
boot = (struct NTFS_BOOT *)Add2Ptr(bh->b_data, boot_off);
if (memcmp(boot->system_id, "NTFS ", sizeof("NTFS ") - 1)) {
@@ -899,9 +929,17 @@ check_boot:
goto out;
}
- sbi->record_size = record_size =
- boot->record_size < 0 ? 1 << (-boot->record_size) :
- (u32)boot->record_size << cluster_bits;
+ if (boot->record_size >= 0) {
+ record_size = (u32)boot->record_size << cluster_bits;
+ } else if (-boot->record_size <= MAXIMUM_SHIFT_BYTES_PER_MFT) {
+ record_size = 1u << (-boot->record_size);
+ } else {
+ ntfs_err(sb, "%s: invalid record size %d.", hint,
+ boot->record_size);
+ goto out;
+ }
+
+ sbi->record_size = record_size;
sbi->record_bits = blksize_bits(record_size);
sbi->attr_size_tr = (5 * record_size >> 4); // ~320 bytes
@@ -918,9 +956,15 @@ check_boot:
goto out;
}
- sbi->index_size = boot->index_size < 0 ?
- 1u << (-boot->index_size) :
- (u32)boot->index_size << cluster_bits;
+ if (boot->index_size >= 0) {
+ sbi->index_size = (u32)boot->index_size << cluster_bits;
+ } else if (-boot->index_size <= MAXIMUM_SHIFT_BYTES_PER_INDEX) {
+ sbi->index_size = 1u << (-boot->index_size);
+ } else {
+ ntfs_err(sb, "%s: invalid index size %d.", hint,
+ boot->index_size);
+ goto out;
+ }
/* Check index record size. */
if (sbi->index_size < SECTOR_SIZE || !is_power_of_2(sbi->index_size)) {
@@ -1055,17 +1099,17 @@ check_boot:
if (bh->b_blocknr && !sb_rdonly(sb)) {
/*
- * Alternative boot is ok but primary is not ok.
- * Do not update primary boot here 'cause it may be faked boot.
- * Let ntfs to be mounted and update boot later.
- */
+ * Alternative boot is ok but primary is not ok.
+ * Do not update primary boot here 'cause it may be faked boot.
+ * Let ntfs to be mounted and update boot later.
+ */
*boot2 = kmemdup(boot, sizeof(*boot), GFP_NOFS | __GFP_NOWARN);
}
out:
- if (err == -EINVAL && !bh->b_blocknr && dev_size > PAGE_SHIFT) {
+ if (err == -EINVAL && !bh->b_blocknr && dev_size0 > PAGE_SHIFT) {
u32 block_size = min_t(u32, sector_size, PAGE_SIZE);
- u64 lbo = dev_size - sizeof(*boot);
+ u64 lbo = dev_size0 - sizeof(*boot);
/*
* Try alternative boot (last sector)
@@ -1079,6 +1123,7 @@ out:
boot_off = lbo & (block_size - 1);
hint = "Alternative boot";
+ dev_size = dev_size0; /* restore original size. */
goto check_boot;
}
brelse(bh);
@@ -1367,7 +1412,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
}
bytes = inode->i_size;
- sbi->def_table = t = kmalloc(bytes, GFP_NOFS | __GFP_NOWARN);
+ sbi->def_table = t = kvmalloc(bytes, GFP_KERNEL);
if (!t) {
err = -ENOMEM;
goto put_inode_out;
@@ -1521,9 +1566,9 @@ load_root:
if (boot2) {
/*
- * Alternative boot is ok but primary is not ok.
- * Volume is recognized as NTFS. Update primary boot.
- */
+ * Alternative boot is ok but primary is not ok.
+ * Volume is recognized as NTFS. Update primary boot.
+ */
struct buffer_head *bh0 = sb_getblk(sb, 0);
if (bh0) {
if (buffer_locked(bh0))
@@ -1564,6 +1609,7 @@ put_inode_out:
out:
ntfs3_put_sbi(sbi);
kfree(boot2);
+ ntfs3_put_sbi(sbi);
return err;
}
@@ -1757,7 +1803,6 @@ static int __init init_ntfs_fs(void)
if (IS_ENABLED(CONFIG_NTFS3_LZX_XPRESS))
pr_info("ntfs3: Read-only LZX/Xpress compression included\n");
-
#ifdef CONFIG_PROC_FS
/* Create "/proc/fs/ntfs3" */
proc_info_root = proc_mkdir("fs/ntfs3", NULL);
@@ -1799,7 +1844,6 @@ static void __exit exit_ntfs_fs(void)
if (proc_info_root)
remove_proc_entry("fs/ntfs3", NULL);
#endif
-
}
MODULE_LICENSE("GPL");
diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
index 29fd391899e5..4274b6f31cfa 100644
--- a/fs/ntfs3/xattr.c
+++ b/fs/ntfs3/xattr.c
@@ -211,7 +211,8 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
size = le32_to_cpu(info->size);
/* Enumerate all xattrs. */
- for (ret = 0, off = 0; off < size; off += ea_size) {
+ ret = 0;
+ for (off = 0; off + sizeof(struct EA_FULL) < size; off += ea_size) {
ea = Add2Ptr(ea_all, off);
ea_size = unpacked_ea_size(ea);
@@ -219,6 +220,10 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
break;
if (buffer) {
+ /* Check if we can use field ea->name */
+ if (off + ea_size > size)
+ break;
+
if (ret + ea->name_len + 1 > bytes_per_buffer) {
err = -ERANGE;
goto out;
@@ -1016,7 +1021,7 @@ static const struct xattr_handler ntfs_other_xattr_handler = {
.list = ntfs_xattr_user_list,
};
-const struct xattr_handler *ntfs_xattr_handlers[] = {
+const struct xattr_handler * const ntfs_xattr_handlers[] = {
&ntfs_other_xattr_handler,
NULL,
};
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index e75137a8e7cb..62464d194da3 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -193,8 +193,8 @@ static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
inode->i_mode = new_mode;
inode_set_ctime_current(inode);
di->i_mode = cpu_to_le16(inode->i_mode);
- di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
+ di->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
+ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, di_bh);
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index aef58f1395c8..f0937902f7b4 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -7436,10 +7436,10 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
}
inode->i_blocks = ocfs2_inode_sector_count(inode);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
- di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
+ di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime_sec(inode));
+ di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
ocfs2_update_inode_fsync_trans(handle, inode, 1);
ocfs2_journal_dirty(handle, di_bh);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 0fdba30740ab..6ab03494fc6e 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2048,9 +2048,9 @@ out_write_size:
}
inode->i_blocks = ocfs2_inode_sector_count(inode);
di->i_size = cpu_to_le64((u64)i_size_read(inode));
- inode->i_mtime = inode_set_ctime_current(inode);
- di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
- di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ di->i_mtime = di->i_ctime = cpu_to_le64(inode_get_mtime_sec(inode));
+ di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
if (handle)
ocfs2_update_inode_fsync_trans(handle, inode, 1);
}
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 21472e3ed182..4d7efefa98c5 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -213,7 +213,7 @@ struct o2hb_region {
unsigned int hr_num_pages;
struct page **hr_slot_data;
- struct block_device *hr_bdev;
+ struct bdev_handle *hr_bdev_handle;
struct o2hb_disk_slot *hr_slots;
/* live node map of this region */
@@ -261,6 +261,11 @@ struct o2hb_region {
int hr_last_hb_status;
};
+static inline struct block_device *reg_bdev(struct o2hb_region *reg)
+{
+ return reg->hr_bdev_handle ? reg->hr_bdev_handle->bdev : NULL;
+}
+
struct o2hb_bio_wait_ctxt {
atomic_t wc_num_reqs;
struct completion wc_io_complete;
@@ -286,7 +291,7 @@ static void o2hb_write_timeout(struct work_struct *work)
hr_write_timeout_work.work);
mlog(ML_ERROR, "Heartbeat write timeout to device %pg after %u "
- "milliseconds\n", reg->hr_bdev,
+ "milliseconds\n", reg_bdev(reg),
jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
if (o2hb_global_heartbeat_active()) {
@@ -383,7 +388,7 @@ static void o2hb_nego_timeout(struct work_struct *work)
if (!test_bit(master_node, reg->hr_nego_node_bitmap)) {
printk(KERN_NOTICE "o2hb: node %d hb write hung for %ds on region %s (%pg).\n",
o2nm_this_node(), O2HB_NEGO_TIMEOUT_MS/1000,
- config_item_name(&reg->hr_item), reg->hr_bdev);
+ config_item_name(&reg->hr_item), reg_bdev(reg));
set_bit(master_node, reg->hr_nego_node_bitmap);
}
if (!bitmap_equal(reg->hr_nego_node_bitmap, live_node_bitmap,
@@ -398,7 +403,8 @@ static void o2hb_nego_timeout(struct work_struct *work)
}
printk(KERN_NOTICE "o2hb: all nodes hb write hung, maybe region %s (%pg) is down.\n",
- config_item_name(&reg->hr_item), reg->hr_bdev);
+ config_item_name(&reg->hr_item),
+ reg_bdev(reg));
/* approve negotiate timeout request. */
o2hb_arm_timeout(reg);
@@ -419,7 +425,7 @@ static void o2hb_nego_timeout(struct work_struct *work)
/* negotiate timeout with master node. */
printk(KERN_NOTICE "o2hb: node %d hb write hung for %ds on region %s (%pg), negotiate timeout with node %d.\n",
o2nm_this_node(), O2HB_NEGO_TIMEOUT_MS/1000, config_item_name(&reg->hr_item),
- reg->hr_bdev, master_node);
+ reg_bdev(reg), master_node);
ret = o2hb_send_nego_msg(reg->hr_key, O2HB_NEGO_TIMEOUT_MSG,
master_node);
if (ret)
@@ -436,7 +442,8 @@ static int o2hb_nego_timeout_handler(struct o2net_msg *msg, u32 len, void *data,
nego_msg = (struct o2hb_nego_msg *)msg->buf;
printk(KERN_NOTICE "o2hb: receive negotiate timeout message from node %d on region %s (%pg).\n",
- nego_msg->node_num, config_item_name(&reg->hr_item), reg->hr_bdev);
+ nego_msg->node_num, config_item_name(&reg->hr_item),
+ reg_bdev(reg));
if (nego_msg->node_num < O2NM_MAX_NODES)
set_bit(nego_msg->node_num, reg->hr_nego_node_bitmap);
else
@@ -451,7 +458,7 @@ static int o2hb_nego_approve_handler(struct o2net_msg *msg, u32 len, void *data,
struct o2hb_region *reg = data;
printk(KERN_NOTICE "o2hb: negotiate timeout approved by master node on region %s (%pg).\n",
- config_item_name(&reg->hr_item), reg->hr_bdev);
+ config_item_name(&reg->hr_item), reg_bdev(reg));
o2hb_arm_timeout(reg);
return 0;
}
@@ -515,7 +522,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
* GFP_KERNEL that the local node can get fenced. It would be
* nicest if we could pre-allocate these bios and avoid this
* all together. */
- bio = bio_alloc(reg->hr_bdev, 16, opf, GFP_ATOMIC);
+ bio = bio_alloc(reg_bdev(reg), 16, opf, GFP_ATOMIC);
if (!bio) {
mlog(ML_ERROR, "Could not alloc slots BIO!\n");
bio = ERR_PTR(-ENOMEM);
@@ -687,7 +694,7 @@ static int o2hb_check_own_slot(struct o2hb_region *reg)
errstr = ERRSTR3;
mlog(ML_ERROR, "%s (%pg): expected(%u:0x%llx, 0x%llx), "
- "ondisk(%u:0x%llx, 0x%llx)\n", errstr, reg->hr_bdev,
+ "ondisk(%u:0x%llx, 0x%llx)\n", errstr, reg_bdev(reg),
slot->ds_node_num, (unsigned long long)slot->ds_last_generation,
(unsigned long long)slot->ds_last_time, hb_block->hb_node,
(unsigned long long)le64_to_cpu(hb_block->hb_generation),
@@ -861,7 +868,7 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg)
goto unlock;
printk(KERN_NOTICE "o2hb: Region %s (%pg) is now a quorum device\n",
- config_item_name(&reg->hr_item), reg->hr_bdev);
+ config_item_name(&reg->hr_item), reg_bdev(reg));
set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
@@ -920,7 +927,7 @@ static int o2hb_check_slot(struct o2hb_region *reg,
* consider it a transient miss but don't populate any
* other values as they may be junk. */
mlog(ML_ERROR, "Node %d has written a bad crc to %pg\n",
- slot->ds_node_num, reg->hr_bdev);
+ slot->ds_node_num, reg_bdev(reg));
o2hb_dump_slot(hb_block);
slot->ds_equal_samples++;
@@ -1003,8 +1010,8 @@ fire_callbacks:
"of %u ms, but our count is %u ms.\n"
"Please double check your configuration values "
"for 'O2CB_HEARTBEAT_THRESHOLD'\n",
- slot->ds_node_num, reg->hr_bdev, slot_dead_ms,
- dead_ms);
+ slot->ds_node_num, reg_bdev(reg),
+ slot_dead_ms, dead_ms);
}
goto out;
}
@@ -1143,7 +1150,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
* can't be sure that the new block ever made it to
* disk */
mlog(ML_ERROR, "Write error %d on device \"%pg\"\n",
- write_wc.wc_error, reg->hr_bdev);
+ write_wc.wc_error, reg_bdev(reg));
ret = write_wc.wc_error;
goto bail;
}
@@ -1169,7 +1176,7 @@ bail:
printk(KERN_NOTICE "o2hb: Unable to stabilize "
"heartbeat on region %s (%pg)\n",
config_item_name(&reg->hr_item),
- reg->hr_bdev);
+ reg_bdev(reg));
atomic_set(&reg->hr_steady_iterations, 0);
reg->hr_aborted_start = 1;
wake_up(&o2hb_steady_queue);
@@ -1489,7 +1496,7 @@ static void o2hb_region_release(struct config_item *item)
struct page *page;
struct o2hb_region *reg = to_o2hb_region(item);
- mlog(ML_HEARTBEAT, "hb region release (%pg)\n", reg->hr_bdev);
+ mlog(ML_HEARTBEAT, "hb region release (%pg)\n", reg_bdev(reg));
kfree(reg->hr_tmp_block);
@@ -1502,8 +1509,8 @@ static void o2hb_region_release(struct config_item *item)
kfree(reg->hr_slot_data);
}
- if (reg->hr_bdev)
- blkdev_put(reg->hr_bdev, NULL);
+ if (reg->hr_bdev_handle)
+ bdev_release(reg->hr_bdev_handle);
kfree(reg->hr_slots);
@@ -1562,7 +1569,7 @@ static ssize_t o2hb_region_block_bytes_store(struct config_item *item,
unsigned long block_bytes;
unsigned int block_bits;
- if (reg->hr_bdev)
+ if (reg->hr_bdev_handle)
return -EINVAL;
status = o2hb_read_block_input(reg, page, &block_bytes,
@@ -1591,7 +1598,7 @@ static ssize_t o2hb_region_start_block_store(struct config_item *item,
char *p = (char *)page;
ssize_t ret;
- if (reg->hr_bdev)
+ if (reg->hr_bdev_handle)
return -EINVAL;
ret = kstrtoull(p, 0, &tmp);
@@ -1616,7 +1623,7 @@ static ssize_t o2hb_region_blocks_store(struct config_item *item,
unsigned long tmp;
char *p = (char *)page;
- if (reg->hr_bdev)
+ if (reg->hr_bdev_handle)
return -EINVAL;
tmp = simple_strtoul(p, &p, 0);
@@ -1635,8 +1642,8 @@ static ssize_t o2hb_region_dev_show(struct config_item *item, char *page)
{
unsigned int ret = 0;
- if (to_o2hb_region(item)->hr_bdev)
- ret = sprintf(page, "%pg\n", to_o2hb_region(item)->hr_bdev);
+ if (to_o2hb_region(item)->hr_bdev_handle)
+ ret = sprintf(page, "%pg\n", reg_bdev(to_o2hb_region(item)));
return ret;
}
@@ -1745,7 +1752,10 @@ out:
return ret;
}
-/* this is acting as commit; we set up all of hr_bdev and hr_task or nothing */
+/*
+ * this is acting as commit; we set up all of hr_bdev_handle and hr_task or
+ * nothing
+ */
static ssize_t o2hb_region_dev_store(struct config_item *item,
const char *page,
size_t count)
@@ -1759,7 +1769,7 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
ssize_t ret = -EINVAL;
int live_threshold;
- if (reg->hr_bdev)
+ if (reg->hr_bdev_handle)
goto out;
/* We can't heartbeat without having had our node number
@@ -1785,16 +1795,15 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
if (!S_ISBLK(f.file->f_mapping->host->i_mode))
goto out2;
- reg->hr_bdev = blkdev_get_by_dev(f.file->f_mapping->host->i_rdev,
- BLK_OPEN_WRITE | BLK_OPEN_READ, NULL,
- NULL);
- if (IS_ERR(reg->hr_bdev)) {
- ret = PTR_ERR(reg->hr_bdev);
- reg->hr_bdev = NULL;
+ reg->hr_bdev_handle = bdev_open_by_dev(f.file->f_mapping->host->i_rdev,
+ BLK_OPEN_WRITE | BLK_OPEN_READ, NULL, NULL);
+ if (IS_ERR(reg->hr_bdev_handle)) {
+ ret = PTR_ERR(reg->hr_bdev_handle);
+ reg->hr_bdev_handle = NULL;
goto out2;
}
- sectsize = bdev_logical_block_size(reg->hr_bdev);
+ sectsize = bdev_logical_block_size(reg_bdev(reg));
if (sectsize != reg->hr_block_bytes) {
mlog(ML_ERROR,
"blocksize %u incorrect for device, expected %d",
@@ -1890,12 +1899,12 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
if (hb_task && o2hb_global_heartbeat_active())
printk(KERN_NOTICE "o2hb: Heartbeat started on region %s (%pg)\n",
- config_item_name(&reg->hr_item), reg->hr_bdev);
+ config_item_name(&reg->hr_item), reg_bdev(reg));
out3:
if (ret < 0) {
- blkdev_put(reg->hr_bdev, NULL);
- reg->hr_bdev = NULL;
+ bdev_release(reg->hr_bdev_handle);
+ reg->hr_bdev_handle = NULL;
}
out2:
fdput(f);
@@ -2085,7 +2094,7 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
printk(KERN_NOTICE "o2hb: Heartbeat %s on region %s (%pg)\n",
((atomic_read(&reg->hr_steady_iterations) == 0) ?
"stopped" : "start aborted"), config_item_name(item),
- reg->hr_bdev);
+ reg_bdev(reg));
}
/*
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 8b123d543e6e..a14c8fee6ee5 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -1658,7 +1658,8 @@ int __ocfs2_add_entry(handle_t *handle,
offset, ocfs2_dir_trailer_blk_off(dir->i_sb));
if (ocfs2_dirent_would_fit(de, rec_len)) {
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_current(dir));
retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
if (retval < 0) {
mlog_errno(retval);
@@ -2962,11 +2963,11 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
ocfs2_dinode_new_extent_list(dir, di);
i_size_write(dir, sb->s_blocksize);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
di->i_size = cpu_to_le64(sb->s_blocksize);
- di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime(dir).tv_sec);
- di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime(dir).tv_nsec);
+ di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime_sec(dir));
+ di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime_nsec(dir));
ocfs2_update_inode_fsync_trans(handle, dir, 1);
/*
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 81265123ce6c..9b57d012fd5c 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -337,7 +337,7 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb)
if (inode) {
inode->i_ino = get_next_ino();
inode_init_owner(&nop_mnt_idmap, inode, NULL, mode);
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inc_nlink(inode);
inode->i_fop = &simple_dir_operations;
@@ -360,7 +360,7 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
inode->i_ino = get_next_ino();
inode_init_owner(&nop_mnt_idmap, inode, parent, mode);
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
ip = DLMFS_I(inode);
ip->ip_conn = DLMFS_I(parent)->ip_conn;
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index c3e2961ee5db..64a6ef638495 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2162,7 +2162,7 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_meta_lvb *lvb;
- struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 ts;
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
@@ -2183,12 +2183,12 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
lvb->lvb_igid = cpu_to_be32(i_gid_read(inode));
lvb->lvb_imode = cpu_to_be16(inode->i_mode);
lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
- lvb->lvb_iatime_packed =
- cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
- lvb->lvb_ictime_packed =
- cpu_to_be64(ocfs2_pack_timespec(&ctime));
- lvb->lvb_imtime_packed =
- cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
+ ts = inode_get_atime(inode);
+ lvb->lvb_iatime_packed = cpu_to_be64(ocfs2_pack_timespec(&ts));
+ ts = inode_get_ctime(inode);
+ lvb->lvb_ictime_packed = cpu_to_be64(ocfs2_pack_timespec(&ts));
+ ts = inode_get_mtime(inode);
+ lvb->lvb_imtime_packed = cpu_to_be64(ocfs2_pack_timespec(&ts));
lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
@@ -2209,7 +2209,7 @@ static int ocfs2_refresh_inode_from_lvb(struct inode *inode)
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_meta_lvb *lvb;
- struct timespec64 ctime;
+ struct timespec64 ts;
mlog_meta_lvb(0, lockres);
@@ -2236,13 +2236,12 @@ static int ocfs2_refresh_inode_from_lvb(struct inode *inode)
i_gid_write(inode, be32_to_cpu(lvb->lvb_igid));
inode->i_mode = be16_to_cpu(lvb->lvb_imode);
set_nlink(inode, be16_to_cpu(lvb->lvb_inlink));
- ocfs2_unpack_timespec(&inode->i_atime,
- be64_to_cpu(lvb->lvb_iatime_packed));
- ocfs2_unpack_timespec(&inode->i_mtime,
- be64_to_cpu(lvb->lvb_imtime_packed));
- ocfs2_unpack_timespec(&ctime,
- be64_to_cpu(lvb->lvb_ictime_packed));
- inode_set_ctime_to_ts(inode, ctime);
+ ocfs2_unpack_timespec(&ts, be64_to_cpu(lvb->lvb_iatime_packed));
+ inode_set_atime_to_ts(inode, ts);
+ ocfs2_unpack_timespec(&ts, be64_to_cpu(lvb->lvb_imtime_packed));
+ inode_set_mtime_to_ts(inode, ts);
+ ocfs2_unpack_timespec(&ts, be64_to_cpu(lvb->lvb_ictime_packed));
+ inode_set_ctime_to_ts(inode, ts);
spin_unlock(&oi->ip_lock);
return 0;
}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index c45596c25c66..94e2a1244442 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -233,16 +233,18 @@ int ocfs2_should_update_atime(struct inode *inode,
if (vfsmnt->mnt_flags & MNT_RELATIME) {
struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 atime = inode_get_atime(inode);
+ struct timespec64 mtime = inode_get_mtime(inode);
- if ((timespec64_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
- (timespec64_compare(&inode->i_atime, &ctime) <= 0))
+ if ((timespec64_compare(&atime, &mtime) <= 0) ||
+ (timespec64_compare(&atime, &ctime) <= 0))
return 1;
return 0;
}
now = current_time(inode);
- if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
+ if ((now.tv_sec - inode_get_atime_sec(inode) <= osb->s_atime_quantum))
return 0;
else
return 1;
@@ -275,9 +277,9 @@ int ocfs2_update_inode_atime(struct inode *inode,
* have i_rwsem to guard against concurrent changes to other
* inode fields.
*/
- inode->i_atime = current_time(inode);
- di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
- di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
+ inode_set_atime_to_ts(inode, current_time(inode));
+ di->i_atime = cpu_to_le64(inode_get_atime_sec(inode));
+ di->i_atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode));
ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, bh);
@@ -296,7 +298,7 @@ int ocfs2_set_inode_size(handle_t *handle,
i_size_write(inode, new_i_size);
inode->i_blocks = ocfs2_inode_sector_count(inode);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
if (status < 0) {
@@ -417,12 +419,12 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
}
i_size_write(inode, new_i_size);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
di = (struct ocfs2_dinode *) fe_bh->b_data;
di->i_size = cpu_to_le64(new_i_size);
- di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
+ di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime_sec(inode));
+ di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, fe_bh);
@@ -821,9 +823,9 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
i_size_write(inode, abs_to);
inode->i_blocks = ocfs2_inode_sector_count(inode);
di->i_size = cpu_to_le64((u64)i_size_read(inode));
- inode->i_mtime = inode_set_ctime_current(inode);
- di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
- di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ di->i_mtime = di->i_ctime = cpu_to_le64(inode_get_mtime_sec(inode));
+ di->i_ctime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
di->i_mtime_nsec = di->i_ctime_nsec;
if (handle) {
ocfs2_journal_dirty(handle, di_bh);
@@ -2040,7 +2042,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
goto out_inode_unlock;
}
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
if (ret < 0)
mlog_errno(ret);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index e8771600b930..999111bfc271 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -302,10 +302,10 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
inode->i_blocks = ocfs2_inode_sector_count(inode);
inode->i_mapping->a_ops = &ocfs2_aops;
}
- inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
- inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
- inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
- inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec);
+ inode_set_atime(inode, le64_to_cpu(fe->i_atime),
+ le32_to_cpu(fe->i_atime_nsec));
+ inode_set_mtime(inode, le64_to_cpu(fe->i_mtime),
+ le32_to_cpu(fe->i_mtime_nsec));
inode_set_ctime(inode, le64_to_cpu(fe->i_ctime),
le32_to_cpu(fe->i_ctime_nsec));
@@ -1312,12 +1312,12 @@ int ocfs2_mark_inode_dirty(handle_t *handle,
fe->i_uid = cpu_to_le32(i_uid_read(inode));
fe->i_gid = cpu_to_le32(i_gid_read(inode));
fe->i_mode = cpu_to_le16(inode->i_mode);
- fe->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
- fe->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
- fe->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
- fe->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
- fe->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+ fe->i_atime = cpu_to_le64(inode_get_atime_sec(inode));
+ fe->i_atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode));
+ fe->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
+ fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
+ fe->i_mtime = cpu_to_le64(inode_get_mtime_sec(inode));
+ fe->i_mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
ocfs2_journal_dirty(handle, bh);
ocfs2_update_inode_fsync_trans(handle, inode, 1);
@@ -1348,10 +1348,10 @@ void ocfs2_refresh_inode(struct inode *inode,
inode->i_blocks = 0;
else
inode->i_blocks = ocfs2_inode_sector_count(inode);
- inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
- inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
- inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
- inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec);
+ inode_set_atime(inode, le64_to_cpu(fe->i_atime),
+ le32_to_cpu(fe->i_atime_nsec));
+ inode_set_mtime(inode, le64_to_cpu(fe->i_mtime),
+ le32_to_cpu(fe->i_mtime_nsec));
inode_set_ctime(inode, le64_to_cpu(fe->i_ctime),
le32_to_cpu(fe->i_ctime_nsec));
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 05d67968a3a9..1f9ed117e78b 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -951,8 +951,8 @@ static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
di = (struct ocfs2_dinode *)di_bh->b_data;
inode_set_ctime_current(inode);
- di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
+ di->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
+ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, di_bh);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 5cd6d7771cea..681e9501cdd3 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -795,8 +795,8 @@ static int ocfs2_link(struct dentry *old_dentry,
inc_nlink(inode);
inode_set_ctime_current(inode);
ocfs2_set_links_count(fe, inode->i_nlink);
- fe->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
+ fe->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
+ fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
ocfs2_journal_dirty(handle, fe_bh);
err = ocfs2_add_entry(handle, dentry, inode,
@@ -995,7 +995,7 @@ static int ocfs2_unlink(struct inode *dir,
ocfs2_set_links_count(fe, inode->i_nlink);
ocfs2_journal_dirty(handle, fe_bh);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
if (S_ISDIR(inode->i_mode))
drop_nlink(dir);
@@ -1550,8 +1550,8 @@ static int ocfs2_rename(struct mnt_idmap *idmap,
if (status >= 0) {
old_di = (struct ocfs2_dinode *) old_inode_bh->b_data;
- old_di->i_ctime = cpu_to_le64(inode_get_ctime(old_inode).tv_sec);
- old_di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(old_inode).tv_nsec);
+ old_di->i_ctime = cpu_to_le64(inode_get_ctime_sec(old_inode));
+ old_di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(old_inode));
ocfs2_journal_dirty(handle, old_inode_bh);
} else
mlog_errno(status);
@@ -1592,7 +1592,7 @@ static int ocfs2_rename(struct mnt_idmap *idmap,
drop_nlink(new_inode);
inode_set_ctime_current(new_inode);
}
- old_dir->i_mtime = inode_set_ctime_current(old_dir);
+ inode_set_mtime_to_ts(old_dir, inode_set_ctime_current(old_dir));
if (update_dot_dot) {
status = ocfs2_update_entry(old_inode, handle,
@@ -1614,8 +1614,8 @@ static int ocfs2_rename(struct mnt_idmap *idmap,
if (old_dir != new_dir) {
/* Keep the same times on both directories.*/
- new_dir->i_mtime = inode_set_ctime_to_ts(new_dir,
- inode_get_ctime(old_dir));
+ inode_set_mtime_to_ts(new_dir,
+ inode_set_ctime_to_ts(new_dir, inode_get_ctime(old_dir)));
/*
* This will also pick up the i_nlink change from the
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 25c8ec3c8c3a..3f80a56d0d60 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -3751,8 +3751,8 @@ static int ocfs2_change_ctime(struct inode *inode,
}
inode_set_ctime_current(inode);
- di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
+ di->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
+ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
ocfs2_journal_dirty(handle, di_bh);
@@ -4075,10 +4075,10 @@ static int ocfs2_complete_reflink(struct inode *s_inode,
*/
inode_set_ctime_current(t_inode);
- di->i_ctime = cpu_to_le64(inode_get_ctime(t_inode).tv_sec);
- di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(t_inode).tv_nsec);
+ di->i_ctime = cpu_to_le64(inode_get_ctime_sec(t_inode));
+ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(t_inode));
- t_inode->i_mtime = s_inode->i_mtime;
+ inode_set_mtime_to_ts(t_inode, inode_get_mtime(s_inode));
di->i_mtime = s_di->i_mtime;
di->i_mtime_nsec = s_di->i_mtime_nsec;
}
@@ -4456,7 +4456,7 @@ int ocfs2_reflink_update_dest(struct inode *dest,
if (newlen > i_size_read(dest))
i_size_write(dest, newlen);
spin_unlock(&OCFS2_I(dest)->ip_lock);
- dest->i_mtime = inode_set_ctime_current(dest);
+ inode_set_mtime_to_ts(dest, inode_set_ctime_current(dest));
ret = ocfs2_mark_inode_dirty(handle, dest, d_bh);
if (ret) {
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 6510ad783c91..3b81213ed7b8 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -87,14 +87,14 @@ static struct ocfs2_xattr_def_value_root def_xv = {
.xv.xr_list.l_count = cpu_to_le16(1),
};
-const struct xattr_handler *ocfs2_xattr_handlers[] = {
+const struct xattr_handler * const ocfs2_xattr_handlers[] = {
&ocfs2_xattr_user_handler,
&ocfs2_xattr_trusted_handler,
&ocfs2_xattr_security_handler,
NULL
};
-static const struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = {
+static const struct xattr_handler * const ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = {
[OCFS2_XATTR_INDEX_USER] = &ocfs2_xattr_user_handler,
[OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access,
[OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &nop_posix_acl_default,
@@ -3422,8 +3422,8 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
}
inode_set_ctime_current(inode);
- di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
+ di->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
+ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
ocfs2_journal_dirty(ctxt->handle, xis->inode_bh);
}
out:
diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h
index 00308b57f64f..65e9aa743919 100644
--- a/fs/ocfs2/xattr.h
+++ b/fs/ocfs2/xattr.h
@@ -30,7 +30,7 @@ struct ocfs2_security_xattr_info {
extern const struct xattr_handler ocfs2_xattr_user_handler;
extern const struct xattr_handler ocfs2_xattr_trusted_handler;
extern const struct xattr_handler ocfs2_xattr_security_handler;
-extern const struct xattr_handler *ocfs2_xattr_handlers[];
+extern const struct xattr_handler * const ocfs2_xattr_handlers[];
ssize_t ocfs2_listxattr(struct dentry *, char *, size_t);
int ocfs2_xattr_get_nolock(struct inode *, struct buffer_head *, int,
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index 2f8c1882f45c..d6cd81163030 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -51,7 +51,7 @@ struct inode *omfs_new_inode(struct inode *dir, umode_t mode)
inode_init_owner(&nop_mnt_idmap, inode, NULL, mode);
inode->i_mapping->a_ops = &omfs_aops;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
switch (mode & S_IFMT) {
case S_IFDIR:
inode->i_op = &omfs_dir_inops;
@@ -134,8 +134,8 @@ static int __omfs_write_inode(struct inode *inode, int wait)
oi->i_head.h_magic = OMFS_IMAGIC;
oi->i_size = cpu_to_be64(inode->i_size);
- ctime = inode_get_ctime(inode).tv_sec * 1000LL +
- ((inode_get_ctime(inode).tv_nsec + 999)/1000);
+ ctime = inode_get_ctime_sec(inode) * 1000LL +
+ ((inode_get_ctime_nsec(inode) + 999)/1000);
oi->i_ctime = cpu_to_be64(ctime);
omfs_update_checksums(oi);
@@ -230,11 +230,9 @@ struct inode *omfs_iget(struct super_block *sb, ino_t ino)
ctime = be64_to_cpu(oi->i_ctime);
nsecs = do_div(ctime, 1000) * 1000L;
- inode->i_atime.tv_sec = ctime;
- inode->i_mtime.tv_sec = ctime;
+ inode_set_atime(inode, ctime, nsecs);
+ inode_set_mtime(inode, ctime, nsecs);
inode_set_ctime(inode, ctime, nsecs);
- inode->i_atime.tv_nsec = nsecs;
- inode->i_mtime.tv_nsec = nsecs;
inode->i_mapping->a_ops = &omfs_aops;
diff --git a/fs/open.c b/fs/open.c
index 98f6601fbac6..02dc608d40d8 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -870,6 +870,30 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
return ksys_fchown(fd, user, group);
}
+static inline int file_get_write_access(struct file *f)
+{
+ int error;
+
+ error = get_write_access(f->f_inode);
+ if (unlikely(error))
+ return error;
+ error = mnt_get_write_access(f->f_path.mnt);
+ if (unlikely(error))
+ goto cleanup_inode;
+ if (unlikely(f->f_mode & FMODE_BACKING)) {
+ error = mnt_get_write_access(backing_file_user_path(f)->mnt);
+ if (unlikely(error))
+ goto cleanup_mnt;
+ }
+ return 0;
+
+cleanup_mnt:
+ mnt_put_write_access(f->f_path.mnt);
+cleanup_inode:
+ put_write_access(f->f_inode);
+ return error;
+}
+
static int do_dentry_open(struct file *f,
struct inode *inode,
int (*open)(struct inode *, struct file *))
@@ -892,14 +916,9 @@ static int do_dentry_open(struct file *f,
if ((f->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
i_readcount_inc(inode);
} else if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
- error = get_write_access(inode);
+ error = file_get_write_access(f);
if (unlikely(error))
goto cleanup_file;
- error = __mnt_want_write(f->f_path.mnt);
- if (unlikely(error)) {
- put_write_access(inode);
- goto cleanup_file;
- }
f->f_mode |= FMODE_WRITER;
}
@@ -1163,20 +1182,19 @@ EXPORT_SYMBOL_GPL(kernel_file_open);
/**
* backing_file_open - open a backing file for kernel internal use
- * @path: path of the file to open
+ * @user_path: path that the user reuqested to open
* @flags: open flags
* @real_path: path of the backing file
* @cred: credentials for open
*
* Open a backing file for a stackable filesystem (e.g., overlayfs).
- * @path may be on the stackable filesystem and backing inode on the
- * underlying filesystem. In this case, we want to be able to return
- * the @real_path of the backing inode. This is done by embedding the
- * returned file into a container structure that also stores the path of
- * the backing inode on the underlying filesystem, which can be
- * retrieved using backing_file_real_path().
+ * @user_path may be on the stackable filesystem and @real_path on the
+ * underlying filesystem. In this case, we want to be able to return the
+ * @user_path of the stackable filesystem. This is done by embedding the
+ * returned file into a container structure that also stores the stacked
+ * file's path, which can be retrieved using backing_file_user_path().
*/
-struct file *backing_file_open(const struct path *path, int flags,
+struct file *backing_file_open(const struct path *user_path, int flags,
const struct path *real_path,
const struct cred *cred)
{
@@ -1187,9 +1205,9 @@ struct file *backing_file_open(const struct path *path, int flags,
if (IS_ERR(f))
return f;
- f->f_path = *path;
- path_get(real_path);
- *backing_file_real_path(f) = *real_path;
+ path_get(user_path);
+ *backing_file_user_path(f) = *user_path;
+ f->f_path = *real_path;
error = do_dentry_open(f, d_inode(real_path->dentry), NULL);
if (error) {
fput(f);
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index b2457cb97fa0..c4b65a6d41cc 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -237,7 +237,7 @@ found:
if (IS_ERR(inode))
return ERR_CAST(inode);
if (inode->i_state & I_NEW) {
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
ent_oi = OP_I(inode);
ent_oi->type = ent_type;
ent_oi->u = ent_data;
@@ -387,7 +387,7 @@ static int openprom_fill_super(struct super_block *s, struct fs_context *fc)
goto out_no_root;
}
- root_inode->i_mtime = root_inode->i_atime = inode_set_ctime_current(root_inode);
+ simple_inode_init_ts(root_inode);
root_inode->i_op = &openprom_inode_operations;
root_inode->i_fop = &openprom_operations;
root_inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index b711654ca18a..926d9c0a428a 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -103,7 +103,7 @@ enum orangefs_vfs_op_states {
#define ORANGEFS_CACHE_CREATE_FLAGS 0
#endif
-extern const struct xattr_handler *orangefs_xattr_handlers[];
+extern const struct xattr_handler * const orangefs_xattr_handlers[];
extern struct posix_acl *orangefs_get_acl(struct inode *inode, int type, bool rcu);
extern int orangefs_set_acl(struct mnt_idmap *idmap,
diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c
index 0a9fcfdf552f..0fdceb00ca07 100644
--- a/fs/orangefs/orangefs-utils.c
+++ b/fs/orangefs/orangefs-utils.c
@@ -155,14 +155,14 @@ static inline void copy_attributes_from_inode(struct inode *inode,
if (orangefs_inode->attr_valid & ATTR_ATIME) {
attrs->mask |= ORANGEFS_ATTR_SYS_ATIME;
if (orangefs_inode->attr_valid & ATTR_ATIME_SET) {
- attrs->atime = (time64_t)inode->i_atime.tv_sec;
+ attrs->atime = (time64_t) inode_get_atime_sec(inode);
attrs->mask |= ORANGEFS_ATTR_SYS_ATIME_SET;
}
}
if (orangefs_inode->attr_valid & ATTR_MTIME) {
attrs->mask |= ORANGEFS_ATTR_SYS_MTIME;
if (orangefs_inode->attr_valid & ATTR_MTIME_SET) {
- attrs->mtime = (time64_t)inode->i_mtime.tv_sec;
+ attrs->mtime = (time64_t) inode_get_mtime_sec(inode);
attrs->mask |= ORANGEFS_ATTR_SYS_MTIME_SET;
}
}
@@ -357,15 +357,15 @@ again2:
downcall.resp.getattr.attributes.owner);
inode->i_gid = make_kgid(&init_user_ns, new_op->
downcall.resp.getattr.attributes.group);
- inode->i_atime.tv_sec = (time64_t)new_op->
- downcall.resp.getattr.attributes.atime;
- inode->i_mtime.tv_sec = (time64_t)new_op->
- downcall.resp.getattr.attributes.mtime;
+ inode_set_atime(inode,
+ (time64_t)new_op->downcall.resp.getattr.attributes.atime,
+ 0);
+ inode_set_mtime(inode,
+ (time64_t)new_op->downcall.resp.getattr.attributes.mtime,
+ 0);
inode_set_ctime(inode,
(time64_t)new_op->downcall.resp.getattr.attributes.ctime,
0);
- inode->i_atime.tv_nsec = 0;
- inode->i_mtime.tv_nsec = 0;
/* special case: mark the root inode as sticky */
inode->i_mode = type | (is_root_handle(inode) ? S_ISVTX : 0) |
diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
index 68b62689a63e..74ef75586f38 100644
--- a/fs/orangefs/xattr.c
+++ b/fs/orangefs/xattr.c
@@ -554,7 +554,7 @@ static const struct xattr_handler orangefs_xattr_default_handler = {
.set = orangefs_xattr_set_default,
};
-const struct xattr_handler *orangefs_xattr_handlers[] = {
+const struct xattr_handler * const orangefs_xattr_handlers[] = {
&orangefs_xattr_default_handler,
NULL
};
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 8be4dc050d1e..ec3671ca140c 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -239,6 +239,7 @@ static void ovl_file_accessed(struct file *file)
{
struct inode *inode, *upperinode;
struct timespec64 ctime, uctime;
+ struct timespec64 mtime, umtime;
if (file->f_flags & O_NOATIME)
return;
@@ -251,9 +252,11 @@ static void ovl_file_accessed(struct file *file)
ctime = inode_get_ctime(inode);
uctime = inode_get_ctime(upperinode);
- if ((!timespec64_equal(&inode->i_mtime, &upperinode->i_mtime) ||
- !timespec64_equal(&ctime, &uctime))) {
- inode->i_mtime = upperinode->i_mtime;
+ mtime = inode_get_mtime(inode);
+ umtime = inode_get_mtime(upperinode);
+ if ((!timespec64_equal(&mtime, &umtime)) ||
+ !timespec64_equal(&ctime, &uctime)) {
+ inode_set_mtime_to_ts(inode, inode_get_mtime(upperinode));
inode_set_ctime_to_ts(inode, uctime);
}
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 83ef66644c21..b6e98a7d36ce 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -704,7 +704,8 @@ int ovl_update_time(struct inode *inode, int flags)
if (upperpath.dentry) {
touch_atime(&upperpath);
- inode->i_atime = d_inode(upperpath.dentry)->i_atime;
+ inode_set_atime_to_ts(inode,
+ inode_get_atime(d_inode(upperpath.dentry)));
}
}
return 0;
diff --git a/fs/overlayfs/params.c b/fs/overlayfs/params.c
index 95b751507ac8..f6ff23fd101c 100644
--- a/fs/overlayfs/params.c
+++ b/fs/overlayfs/params.c
@@ -157,6 +157,34 @@ const struct fs_parameter_spec ovl_parameter_spec[] = {
{}
};
+static char *ovl_next_opt(char **s)
+{
+ char *sbegin = *s;
+ char *p;
+
+ if (sbegin == NULL)
+ return NULL;
+
+ for (p = sbegin; *p; p++) {
+ if (*p == '\\') {
+ p++;
+ if (!*p)
+ break;
+ } else if (*p == ',') {
+ *p = '\0';
+ *s = p + 1;
+ return sbegin;
+ }
+ }
+ *s = NULL;
+ return sbegin;
+}
+
+static int ovl_parse_monolithic(struct fs_context *fc, void *data)
+{
+ return vfs_parse_monolithic_sep(fc, data, ovl_next_opt);
+}
+
static ssize_t ovl_parse_param_split_lowerdirs(char *str)
{
ssize_t nr_layers = 1, nr_colons = 0;
@@ -164,7 +192,8 @@ static ssize_t ovl_parse_param_split_lowerdirs(char *str)
for (s = d = str;; s++, d++) {
if (*s == '\\') {
- s++;
+ /* keep esc chars in split lowerdir */
+ *d++ = *s++;
} else if (*s == ':') {
bool next_colon = (*(s + 1) == ':');
@@ -239,7 +268,7 @@ static void ovl_unescape(char *s)
}
}
-static int ovl_mount_dir(const char *name, struct path *path)
+static int ovl_mount_dir(const char *name, struct path *path, bool upper)
{
int err = -ENOMEM;
char *tmp = kstrdup(name, GFP_KERNEL);
@@ -248,7 +277,7 @@ static int ovl_mount_dir(const char *name, struct path *path)
ovl_unescape(tmp);
err = ovl_mount_dir_noesc(tmp, path);
- if (!err && path->dentry->d_flags & DCACHE_OP_REAL) {
+ if (!err && upper && path->dentry->d_flags & DCACHE_OP_REAL) {
pr_err("filesystem on '%s' not supported as upperdir\n",
tmp);
path_put_init(path);
@@ -269,7 +298,7 @@ static int ovl_parse_param_upperdir(const char *name, struct fs_context *fc,
struct path path;
char *dup;
- err = ovl_mount_dir(name, &path);
+ err = ovl_mount_dir(name, &path, true);
if (err)
return err;
@@ -321,12 +350,6 @@ static void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx)
* Set "/lower1", "/lower2", and "/lower3" as lower layers and
* "/data1" and "/data2" as data lower layers. Any existing lower
* layers are replaced.
- * (2) lowerdir=:/lower4
- * Append "/lower4" to current stack of lower layers. This requires
- * that there already is at least one lower layer configured.
- * (3) lowerdir=::/lower5
- * Append data "/lower5" as data lower layer. This requires that
- * there's at least one regular lower layer present.
*/
static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
{
@@ -348,49 +371,9 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
return 0;
}
- if (strncmp(name, "::", 2) == 0) {
- /*
- * This is a data layer.
- * There must be at least one regular lower layer
- * specified.
- */
- if (ctx->nr == 0) {
- pr_err("data lower layers without regular lower layers not allowed");
- return -EINVAL;
- }
-
- /* Skip the leading "::". */
- name += 2;
- data_layer = true;
- /*
- * A data layer is automatically an append as there
- * must've been at least one regular lower layer.
- */
- append = true;
- } else if (*name == ':') {
- /*
- * This is a regular lower layer.
- * If users want to append a layer enforce that they
- * have already specified a first layer before. It's
- * better to be strict.
- */
- if (ctx->nr == 0) {
- pr_err("cannot append layer if no previous layer has been specified");
- return -EINVAL;
- }
-
- /*
- * Once a sequence of data layers has started regular
- * lower layers are forbidden.
- */
- if (ctx->nr_data > 0) {
- pr_err("regular lower layers cannot follow data lower layers");
- return -EINVAL;
- }
-
- /* Skip the leading ":". */
- name++;
- append = true;
+ if (*name == ':') {
+ pr_err("cannot append lower layer");
+ return -EINVAL;
}
dup = kstrdup(name, GFP_KERNEL);
@@ -472,7 +455,7 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
l = &ctx->lower[nr];
memset(l, 0, sizeof(*l));
- err = ovl_mount_dir_noesc(dup_iter, &l->path);
+ err = ovl_mount_dir(dup_iter, &l->path, false);
if (err)
goto out_put;
@@ -682,6 +665,7 @@ static int ovl_reconfigure(struct fs_context *fc)
}
static const struct fs_context_operations ovl_context_ops = {
+ .parse_monolithic = ovl_parse_monolithic,
.parse_param = ovl_parse_param,
.get_tree = ovl_get_tree,
.reconfigure = ovl_reconfigure,
@@ -950,16 +934,23 @@ int ovl_show_options(struct seq_file *m, struct dentry *dentry)
struct super_block *sb = dentry->d_sb;
struct ovl_fs *ofs = OVL_FS(sb);
size_t nr, nr_merged_lower = ofs->numlayer - ofs->numdatalayer;
- char **lowerdatadirs = &ofs->config.lowerdirs[nr_merged_lower];
-
- /* lowerdirs[] starts from offset 1 */
- seq_printf(m, ",lowerdir=%s", ofs->config.lowerdirs[1]);
- /* dump regular lower layers */
- for (nr = 2; nr < nr_merged_lower; nr++)
- seq_printf(m, ":%s", ofs->config.lowerdirs[nr]);
- /* dump data lower layers */
- for (nr = 0; nr < ofs->numdatalayer; nr++)
- seq_printf(m, "::%s", lowerdatadirs[nr]);
+
+ /*
+ * lowerdirs[] starts from offset 1, then
+ * >= 0 regular lower layers prefixed with : and
+ * >= 0 data-only lower layers prefixed with ::
+ *
+ * we need to escase comma and space like seq_show_option() does and
+ * we also need to escape the colon separator from lowerdir paths.
+ */
+ seq_puts(m, ",lowerdir=");
+ for (nr = 1; nr < ofs->numlayer; nr++) {
+ if (nr > 1)
+ seq_putc(m, ':');
+ if (nr >= nr_merged_lower)
+ seq_putc(m, ':');
+ seq_escape(m, ofs->config.lowerdirs[nr], ":, \t\n\\");
+ }
if (ofs->config.upperdir) {
seq_show_option(m, "upperdir", ofs->config.upperdir);
seq_show_option(m, "workdir", ofs->config.workdir);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 3fa2416264a4..6cd949c59fed 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -34,14 +34,22 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
struct dentry *real = NULL, *lower;
int err;
- /* It's an overlay file */
+ /*
+ * vfs is only expected to call d_real() with NULL from d_real_inode()
+ * and with overlay inode from file_dentry() on an overlay file.
+ *
+ * TODO: remove @inode argument from d_real() API, remove code in this
+ * function that deals with non-NULL @inode and remove d_real() call
+ * from file_dentry().
+ */
if (inode && d_inode(dentry) == inode)
return dentry;
+ else if (inode)
+ goto bug;
if (!d_is_reg(dentry)) {
- if (!inode || inode == d_inode(dentry))
- return dentry;
- goto bug;
+ /* d_real_inode() is only relevant for regular files */
+ return dentry;
}
real = ovl_dentry_upper(dentry);
@@ -487,13 +495,13 @@ static const struct xattr_handler ovl_other_xattr_handler = {
.set = ovl_other_xattr_set,
};
-static const struct xattr_handler *ovl_trusted_xattr_handlers[] = {
+static const struct xattr_handler * const ovl_trusted_xattr_handlers[] = {
&ovl_own_trusted_xattr_handler,
&ovl_other_xattr_handler,
NULL
};
-static const struct xattr_handler *ovl_user_xattr_handlers[] = {
+static const struct xattr_handler * const ovl_user_xattr_handlers[] = {
&ovl_own_user_xattr_handler,
&ovl_other_xattr_handler,
NULL
@@ -1488,8 +1496,16 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_xattr = ofs->config.userxattr ? ovl_user_xattr_handlers :
ovl_trusted_xattr_handlers;
sb->s_fs_info = ofs;
+#ifdef CONFIG_FS_POSIX_ACL
sb->s_flags |= SB_POSIXACL;
+#endif
sb->s_iflags |= SB_I_SKIP_SYNC | SB_I_IMA_UNVERIFIABLE_SIGNATURE;
+ /*
+ * Ensure that umask handling is done by the filesystems used
+ * for the the upper layer instead of overlayfs as that would
+ * lead to unexpected results.
+ */
+ sb->s_iflags |= SB_I_NOUMASK;
err = -ENOMEM;
root_dentry = ovl_get_root(sb, ctx->upper.dentry, oe);
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 89e0d60d35b6..868afd8834c3 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -1409,8 +1409,8 @@ void ovl_copyattr(struct inode *inode)
inode->i_uid = vfsuid_into_kuid(vfsuid);
inode->i_gid = vfsgid_into_kgid(vfsgid);
inode->i_mode = realinode->i_mode;
- inode->i_atime = realinode->i_atime;
- inode->i_mtime = realinode->i_mtime;
+ inode_set_atime_to_ts(inode, inode_get_atime(realinode));
+ inode_set_mtime_to_ts(inode, inode_get_mtime(realinode));
inode_set_ctime_to_ts(inode, inode_get_ctime(realinode));
i_size_write(inode, i_size_read(realinode));
}
diff --git a/fs/pipe.c b/fs/pipe.c
index 139190165a1c..8916c455a469 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -227,6 +227,36 @@ static inline bool pipe_readable(const struct pipe_inode_info *pipe)
return !pipe_empty(head, tail) || !writers;
}
+static inline unsigned int pipe_update_tail(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf,
+ unsigned int tail)
+{
+ pipe_buf_release(pipe, buf);
+
+ /*
+ * If the pipe has a watch_queue, we need additional protection
+ * by the spinlock because notifications get posted with only
+ * this spinlock, no mutex
+ */
+ if (pipe_has_watch_queue(pipe)) {
+ spin_lock_irq(&pipe->rd_wait.lock);
+#ifdef CONFIG_WATCH_QUEUE
+ if (buf->flags & PIPE_BUF_FLAG_LOSS)
+ pipe->note_loss = true;
+#endif
+ pipe->tail = ++tail;
+ spin_unlock_irq(&pipe->rd_wait.lock);
+ return tail;
+ }
+
+ /*
+ * Without a watch_queue, we can simply increment the tail
+ * without the spinlock - the mutex is enough.
+ */
+ pipe->tail = ++tail;
+ return tail;
+}
+
static ssize_t
pipe_read(struct kiocb *iocb, struct iov_iter *to)
{
@@ -320,17 +350,8 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
buf->len = 0;
}
- if (!buf->len) {
- pipe_buf_release(pipe, buf);
- spin_lock_irq(&pipe->rd_wait.lock);
-#ifdef CONFIG_WATCH_QUEUE
- if (buf->flags & PIPE_BUF_FLAG_LOSS)
- pipe->note_loss = true;
-#endif
- tail++;
- pipe->tail = tail;
- spin_unlock_irq(&pipe->rd_wait.lock);
- }
+ if (!buf->len)
+ tail = pipe_update_tail(pipe, buf, tail);
total_len -= chars;
if (!total_len)
break; /* common path: read succeeded */
@@ -437,12 +458,10 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
goto out;
}
-#ifdef CONFIG_WATCH_QUEUE
- if (pipe->watch_queue) {
+ if (pipe_has_watch_queue(pipe)) {
ret = -EXDEV;
goto out;
}
-#endif
/*
* If it wasn't empty we try to merge new data into
@@ -507,16 +526,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
* it, either the reader will consume it or it'll still
* be there for the next write.
*/
- spin_lock_irq(&pipe->rd_wait.lock);
-
- head = pipe->head;
- if (pipe_full(head, pipe->tail, pipe->max_usage)) {
- spin_unlock_irq(&pipe->rd_wait.lock);
- continue;
- }
-
pipe->head = head + 1;
- spin_unlock_irq(&pipe->rd_wait.lock);
/* Insert it into the buffer array */
buf = &pipe->bufs[head & mask];
@@ -898,7 +908,7 @@ static struct inode * get_pipe_inode(void)
inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
return inode;
@@ -1324,10 +1334,8 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned int arg)
unsigned int nr_slots, size;
long ret = 0;
-#ifdef CONFIG_WATCH_QUEUE
- if (pipe->watch_queue)
+ if (pipe_has_watch_queue(pipe))
return -EBUSY;
-#endif
size = round_pipe_size(arg);
nr_slots = size >> PAGE_SHIFT;
@@ -1379,10 +1387,8 @@ struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
if (file->f_op != &pipefifo_fops || !pipe)
return NULL;
-#ifdef CONFIG_WATCH_QUEUE
- if (for_splice && pipe->watch_queue)
+ if (for_splice && pipe_has_watch_queue(pipe))
return NULL;
-#endif
return pipe;
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index ffd54617c354..83396ab14998 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1902,7 +1902,7 @@ struct inode *proc_pid_make_inode(struct super_block *sb,
ei = PROC_I(inode);
inode->i_mode = mode;
inode->i_ino = get_next_ino();
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_op = &proc_def_inode_operations;
/*
@@ -2218,7 +2218,7 @@ static int map_files_get_link(struct dentry *dentry, struct path *path)
rc = -ENOENT;
vma = find_exact_vma(mm, vm_start, vm_end);
if (vma && vma->vm_file) {
- *path = vma->vm_file->f_path;
+ *path = *file_user_path(vma->vm_file);
path_get(path);
rc = 0;
}
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 6276b3938842..6e72e5ad42bc 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -113,10 +113,12 @@ static bool tid_fd_mode(struct task_struct *task, unsigned fd, fmode_t *mode)
struct file *file;
rcu_read_lock();
- file = task_lookup_fd_rcu(task, fd);
- if (file)
- *mode = file->f_mode;
+ file = task_lookup_fdget_rcu(task, fd);
rcu_read_unlock();
+ if (file) {
+ *mode = file->f_mode;
+ fput(file);
+ }
return !!file;
}
@@ -259,12 +261,13 @@ static int proc_readfd_common(struct file *file, struct dir_context *ctx,
char name[10 + 1];
unsigned int len;
- f = task_lookup_next_fd_rcu(p, &fd);
+ f = task_lookup_next_fdget_rcu(p, &fd);
ctx->pos = fd + 2LL;
if (!f)
break;
data.mode = f->f_mode;
rcu_read_unlock();
+ fput(f);
data.fd = fd;
len = snprintf(name, sizeof(name), "%u", fd);
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 532dc9d240f7..592ed2516f47 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -660,7 +660,7 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
inode->i_private = de->data;
inode->i_ino = de->low_ino;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
PROC_I(inode)->pde = de;
if (is_empty_pde(de)) {
make_empty_dir_inode(inode);
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index 4d3493579458..c6e7ebc63756 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -58,7 +58,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
if (file) {
seq_pad(m, ' ');
- seq_file_path(m, file, "");
+ seq_path(m, file_user_path(file), "");
}
seq_putc(m, '\n');
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index c88854df0b62..bc9a2db89cfa 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -465,7 +465,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
head->count++;
spin_unlock(&sysctl_lock);
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_mode = table->mode;
if (!S_ISDIR(table->mode)) {
inode->i_mode |= S_IFREG;
diff --git a/fs/proc/self.c b/fs/proc/self.c
index ecc4da8d265e..b46fbfd22681 100644
--- a/fs/proc/self.c
+++ b/fs/proc/self.c
@@ -46,7 +46,7 @@ int proc_setup_self(struct super_block *s)
struct inode *inode = new_inode(s);
if (inode) {
inode->i_ino = self_inum;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_mode = S_IFLNK | S_IRWXUGO;
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 3dd5be96691b..1593940ca01e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -296,7 +296,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
if (anon_name)
seq_printf(m, "[anon_shmem:%s]", anon_name->name);
else
- seq_file_path(m, file, "\n");
+ seq_path(m, file_user_path(file), "\n");
goto done;
}
@@ -1967,7 +1967,7 @@ static int show_numa_map(struct seq_file *m, void *v)
if (file) {
seq_puts(m, " file=");
- seq_file_path(m, file, "\n\t= ");
+ seq_path(m, file_user_path(file), "\n\t= ");
} else if (vma_is_initial_heap(vma)) {
seq_puts(m, " heap");
} else if (vma_is_initial_stack(vma)) {
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 7cebd397cc26..bce674533000 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -157,7 +157,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
if (file) {
seq_pad(m, ' ');
- seq_file_path(m, file, "");
+ seq_path(m, file_user_path(file), "");
} else if (mm && vma_is_initial_stack(vma)) {
seq_pad(m, ' ');
seq_puts(m, "[stack]");
diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c
index 63ac1f93289f..0e5050d6ab64 100644
--- a/fs/proc/thread_self.c
+++ b/fs/proc/thread_self.c
@@ -46,7 +46,7 @@ int proc_setup_thread_self(struct super_block *s)
struct inode *inode = new_inode(s);
if (inode) {
inode->i_ino = thread_self_inum;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_mode = S_IFLNK | S_IRWXUGO;
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 585360706b33..d41c20d1b5e8 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -223,7 +223,7 @@ static struct inode *pstore_get_inode(struct super_block *sb)
struct inode *inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
}
return inode;
}
@@ -390,7 +390,8 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
inode->i_private = private;
if (record->time.tv_sec)
- inode->i_mtime = inode_set_ctime_to_ts(inode, record->time);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_to_ts(inode, record->time));
d_add(dentry, inode);
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index a7171f5532a1..6eb9bb369b57 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -301,10 +301,8 @@ struct inode *qnx4_iget(struct super_block *sb, unsigned long ino)
i_gid_write(inode, (gid_t)le16_to_cpu(raw_inode->di_gid));
set_nlink(inode, le16_to_cpu(raw_inode->di_nlink));
inode->i_size = le32_to_cpu(raw_inode->di_size);
- inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->di_mtime);
- inode->i_mtime.tv_nsec = 0;
- inode->i_atime.tv_sec = le32_to_cpu(raw_inode->di_atime);
- inode->i_atime.tv_nsec = 0;
+ inode_set_mtime(inode, le32_to_cpu(raw_inode->di_mtime), 0);
+ inode_set_atime(inode, le32_to_cpu(raw_inode->di_atime), 0);
inode_set_ctime(inode, le32_to_cpu(raw_inode->di_ctime), 0);
inode->i_blocks = le32_to_cpu(raw_inode->di_first_xtnt.xtnt_size);
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index 21f90d519f1a..a286c545717f 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -558,10 +558,8 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
i_uid_write(inode, (uid_t)fs32_to_cpu(sbi, raw_inode->di_uid));
i_gid_write(inode, (gid_t)fs32_to_cpu(sbi, raw_inode->di_gid));
inode->i_size = fs64_to_cpu(sbi, raw_inode->di_size);
- inode->i_mtime.tv_sec = fs32_to_cpu(sbi, raw_inode->di_mtime);
- inode->i_mtime.tv_nsec = 0;
- inode->i_atime.tv_sec = fs32_to_cpu(sbi, raw_inode->di_atime);
- inode->i_atime.tv_nsec = 0;
+ inode_set_mtime(inode, fs32_to_cpu(sbi, raw_inode->di_mtime), 0);
+ inode_set_atime(inode, fs32_to_cpu(sbi, raw_inode->di_atime), 0);
inode_set_ctime(inode, fs32_to_cpu(sbi, raw_inode->di_ctime), 0);
/* calc blocks based on 512 byte blocksize */
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 18e8387cab41..4ac05a9e25bc 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -65,7 +65,7 @@ struct inode *ramfs_get_inode(struct super_block *sb,
inode->i_mapping->a_ops = &ram_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
mapping_set_unevictable(inode->i_mapping);
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
switch (mode & S_IFMT) {
default:
init_special_inode(inode, mode, dev);
@@ -105,7 +105,7 @@ ramfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
error = 0;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
}
return error;
}
@@ -138,7 +138,8 @@ static int ramfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
if (!error) {
d_instantiate(dentry, inode);
dget(dentry);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_current(dir));
} else
iput(inode);
}
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 86e55d4bb10d..c8572346556f 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1257,11 +1257,9 @@ static void init_inode(struct inode *inode, struct treepath *path)
i_uid_write(inode, sd_v1_uid(sd));
i_gid_write(inode, sd_v1_gid(sd));
inode->i_size = sd_v1_size(sd);
- inode->i_atime.tv_sec = sd_v1_atime(sd);
- inode->i_mtime.tv_sec = sd_v1_mtime(sd);
+ inode_set_atime(inode, sd_v1_atime(sd), 0);
+ inode_set_mtime(inode, sd_v1_mtime(sd), 0);
inode_set_ctime(inode, sd_v1_ctime(sd), 0);
- inode->i_atime.tv_nsec = 0;
- inode->i_mtime.tv_nsec = 0;
inode->i_blocks = sd_v1_blocks(sd);
inode->i_generation = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
@@ -1311,11 +1309,9 @@ static void init_inode(struct inode *inode, struct treepath *path)
i_uid_write(inode, sd_v2_uid(sd));
inode->i_size = sd_v2_size(sd);
i_gid_write(inode, sd_v2_gid(sd));
- inode->i_mtime.tv_sec = sd_v2_mtime(sd);
- inode->i_atime.tv_sec = sd_v2_atime(sd);
+ inode_set_mtime(inode, sd_v2_mtime(sd), 0);
+ inode_set_atime(inode, sd_v2_atime(sd), 0);
inode_set_ctime(inode, sd_v2_ctime(sd), 0);
- inode->i_mtime.tv_nsec = 0;
- inode->i_atime.tv_nsec = 0;
inode->i_blocks = sd_v2_blocks(sd);
rdev = sd_v2_rdev(sd);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
@@ -1370,9 +1366,9 @@ static void inode2sd(void *sd, struct inode *inode, loff_t size)
set_sd_v2_uid(sd_v2, i_uid_read(inode));
set_sd_v2_size(sd_v2, size);
set_sd_v2_gid(sd_v2, i_gid_read(inode));
- set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec);
- set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec);
- set_sd_v2_ctime(sd_v2, inode_get_ctime(inode).tv_sec);
+ set_sd_v2_mtime(sd_v2, inode_get_mtime_sec(inode));
+ set_sd_v2_atime(sd_v2, inode_get_atime_sec(inode));
+ set_sd_v2_ctime(sd_v2, inode_get_ctime_sec(inode));
set_sd_v2_blocks(sd_v2, to_fake_used_blocks(inode, SD_V2_SIZE));
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
set_sd_v2_rdev(sd_v2, new_encode_dev(inode->i_rdev));
@@ -1391,9 +1387,9 @@ static void inode2sd_v1(void *sd, struct inode *inode, loff_t size)
set_sd_v1_gid(sd_v1, i_gid_read(inode));
set_sd_v1_nlink(sd_v1, inode->i_nlink);
set_sd_v1_size(sd_v1, size);
- set_sd_v1_atime(sd_v1, inode->i_atime.tv_sec);
- set_sd_v1_ctime(sd_v1, inode_get_ctime(inode).tv_sec);
- set_sd_v1_mtime(sd_v1, inode->i_mtime.tv_sec);
+ set_sd_v1_atime(sd_v1, inode_get_atime_sec(inode));
+ set_sd_v1_ctime(sd_v1, inode_get_ctime_sec(inode));
+ set_sd_v1_mtime(sd_v1, inode_get_mtime_sec(inode));
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
set_sd_v1_rdev(sd_v1, new_encode_dev(inode->i_rdev));
@@ -1984,7 +1980,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
/* uid and gid must already be set by the caller for quota init */
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_size = i_size;
inode->i_blocks = 0;
inode->i_bytes = 0;
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 015bfe4e4524..171c912af50f 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -90,8 +90,7 @@ static int flush_commit_list(struct super_block *s,
static int can_dirty(struct reiserfs_journal_cnode *cn);
static int journal_join(struct reiserfs_transaction_handle *th,
struct super_block *sb);
-static void release_journal_dev(struct super_block *super,
- struct reiserfs_journal *journal);
+static void release_journal_dev(struct reiserfs_journal *journal);
static void dirty_one_transaction(struct super_block *s,
struct reiserfs_journal_list *jl);
static void flush_async_commits(struct work_struct *work);
@@ -1893,7 +1892,7 @@ static void free_journal_ram(struct super_block *sb)
* j_header_bh is on the journal dev, make sure
* not to release the journal dev until we brelse j_header_bh
*/
- release_journal_dev(sb, journal);
+ release_journal_dev(journal);
vfree(journal);
}
@@ -2387,7 +2386,7 @@ static int journal_read(struct super_block *sb)
cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb);
reiserfs_info(sb, "checking transaction log (%pg)\n",
- journal->j_dev_bd);
+ journal->j_bdev_handle->bdev);
start = ktime_get_seconds();
/*
@@ -2448,7 +2447,7 @@ static int journal_read(struct super_block *sb)
* device and journal device to be the same
*/
d_bh =
- reiserfs_breada(journal->j_dev_bd, cur_dblock,
+ reiserfs_breada(journal->j_bdev_handle->bdev, cur_dblock,
sb->s_blocksize,
SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
SB_ONDISK_JOURNAL_SIZE(sb));
@@ -2587,17 +2586,11 @@ static void journal_list_init(struct super_block *sb)
SB_JOURNAL(sb)->j_current_jl = alloc_journal_list(sb);
}
-static void release_journal_dev(struct super_block *super,
- struct reiserfs_journal *journal)
+static void release_journal_dev(struct reiserfs_journal *journal)
{
- if (journal->j_dev_bd != NULL) {
- void *holder = NULL;
-
- if (journal->j_dev_bd->bd_dev != super->s_dev)
- holder = journal;
-
- blkdev_put(journal->j_dev_bd, holder);
- journal->j_dev_bd = NULL;
+ if (journal->j_bdev_handle) {
+ bdev_release(journal->j_bdev_handle);
+ journal->j_bdev_handle = NULL;
}
}
@@ -2612,7 +2605,7 @@ static int journal_init_dev(struct super_block *super,
result = 0;
- journal->j_dev_bd = NULL;
+ journal->j_bdev_handle = NULL;
jdev = SB_ONDISK_JOURNAL_DEVICE(super) ?
new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;
@@ -2623,36 +2616,37 @@ static int journal_init_dev(struct super_block *super,
if ((!jdev_name || !jdev_name[0])) {
if (jdev == super->s_dev)
holder = NULL;
- journal->j_dev_bd = blkdev_get_by_dev(jdev, blkdev_mode, holder,
- NULL);
- if (IS_ERR(journal->j_dev_bd)) {
- result = PTR_ERR(journal->j_dev_bd);
- journal->j_dev_bd = NULL;
+ journal->j_bdev_handle = bdev_open_by_dev(jdev, blkdev_mode,
+ holder, NULL);
+ if (IS_ERR(journal->j_bdev_handle)) {
+ result = PTR_ERR(journal->j_bdev_handle);
+ journal->j_bdev_handle = NULL;
reiserfs_warning(super, "sh-458",
"cannot init journal device unknown-block(%u,%u): %i",
MAJOR(jdev), MINOR(jdev), result);
return result;
} else if (jdev != super->s_dev)
- set_blocksize(journal->j_dev_bd, super->s_blocksize);
+ set_blocksize(journal->j_bdev_handle->bdev,
+ super->s_blocksize);
return 0;
}
- journal->j_dev_bd = blkdev_get_by_path(jdev_name, blkdev_mode, holder,
- NULL);
- if (IS_ERR(journal->j_dev_bd)) {
- result = PTR_ERR(journal->j_dev_bd);
- journal->j_dev_bd = NULL;
+ journal->j_bdev_handle = bdev_open_by_path(jdev_name, blkdev_mode,
+ holder, NULL);
+ if (IS_ERR(journal->j_bdev_handle)) {
+ result = PTR_ERR(journal->j_bdev_handle);
+ journal->j_bdev_handle = NULL;
reiserfs_warning(super, "sh-457",
"journal_init_dev: Cannot open '%s': %i",
jdev_name, result);
return result;
}
- set_blocksize(journal->j_dev_bd, super->s_blocksize);
+ set_blocksize(journal->j_bdev_handle->bdev, super->s_blocksize);
reiserfs_info(super,
"journal_init_dev: journal device: %pg\n",
- journal->j_dev_bd);
+ journal->j_bdev_handle->bdev);
return 0;
}
@@ -2810,7 +2804,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
"journal header magic %x (device %pg) does "
"not match to magic found in super block %x",
jh->jh_journal.jp_journal_magic,
- journal->j_dev_bd,
+ journal->j_bdev_handle->bdev,
sb_jp_journal_magic(rs));
brelse(bhjh);
goto free_and_return;
@@ -2834,7 +2828,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
reiserfs_info(sb, "journal params: device %pg, size %u, "
"journal first block %u, max trans len %u, max batch %u, "
"max commit age %u, max trans age %u\n",
- journal->j_dev_bd,
+ journal->j_bdev_handle->bdev,
SB_ONDISK_JOURNAL_SIZE(sb),
SB_ONDISK_JOURNAL_1st_BLOCK(sb),
journal->j_trans_max,
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 9c5704be2435..994d6e6995ab 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -572,7 +572,7 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th,
}
dir->i_size += paste_size;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
if (!S_ISDIR(inode->i_mode) && visible)
/* reiserfs_mkdir or reiserfs_rename will do that by itself */
reiserfs_update_sd(th, dir);
@@ -966,8 +966,8 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry)
inode->i_nlink);
clear_nlink(inode);
- dir->i_mtime = inode_set_ctime_to_ts(dir,
- inode_set_ctime_current(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
reiserfs_update_sd(&th, inode);
DEC_DIR_INODE_NLINK(dir)
@@ -1075,7 +1075,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
reiserfs_update_sd(&th, inode);
dir->i_size -= (de.de_entrylen + DEH_SIZE);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
reiserfs_update_sd(&th, dir);
if (!savelink)
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index 3dba8acf4e83..83cb9402e0f9 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -354,7 +354,7 @@ static int show_journal(struct seq_file *m, void *unused)
"prepare: \t%12lu\n"
"prepare_retry: \t%12lu\n",
DJP(jp_journal_1st_block),
- SB_JOURNAL(sb)->j_dev_bd,
+ SB_JOURNAL(sb)->j_bdev_handle->bdev,
DJP(jp_journal_dev),
DJP(jp_journal_size),
DJP(jp_journal_trans_max),
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index 7d12b8c5b2fa..725667880e62 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -299,7 +299,7 @@ struct reiserfs_journal {
/* oldest journal block. start here for traverse */
struct reiserfs_journal_cnode *j_first;
- struct block_device *j_dev_bd;
+ struct bdev_handle *j_bdev_handle;
/* first block on s_dev of reserved area journal */
int j_1st_reserved_block;
@@ -1165,7 +1165,7 @@ static inline int bmap_would_wrap(unsigned bmap_nr)
return bmap_nr > ((1LL << 16) - 1);
}
-extern const struct xattr_handler *reiserfs_xattr_handlers[];
+extern const struct xattr_handler * const reiserfs_xattr_handlers[];
/*
* this says about version of key of all items (but stat data) the
@@ -2809,9 +2809,12 @@ struct reiserfs_journal_header {
#define journal_hash(t,sb,block) ((t)[_jhashfn((sb),(block)) & JBH_HASH_MASK])
/* We need these to make journal.c code more readable */
-#define journal_find_get_block(s, block) __find_get_block(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
-#define journal_getblk(s, block) __getblk(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
-#define journal_bread(s, block) __bread(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
+#define journal_find_get_block(s, block) __find_get_block(\
+ SB_JOURNAL(s)->j_bdev_handle->bdev, block, s->s_blocksize)
+#define journal_getblk(s, block) __getblk(SB_JOURNAL(s)->j_bdev_handle->bdev,\
+ block, s->s_blocksize)
+#define journal_bread(s, block) __bread(SB_JOURNAL(s)->j_bdev_handle->bdev,\
+ block, s->s_blocksize)
enum reiserfs_bh_state_bits {
BH_JDirty = BH_PrivateStart, /* buffer is in current transaction */
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 3676e02a0232..2138ee7d271d 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -2003,7 +2003,8 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
pathrelse(&s_search_path);
if (update_timestamps) {
- inode->i_mtime = current_time(inode);
+ inode_set_mtime_to_ts(inode,
+ current_time(inode));
inode_set_ctime_current(inode);
}
reiserfs_update_sd(th, inode);
@@ -2028,7 +2029,7 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
update_and_out:
if (update_timestamps) {
/* this is truncate, not file closing */
- inode->i_mtime = current_time(inode);
+ inode_set_mtime_to_ts(inode, current_time(inode));
inode_set_ctime_current(inode);
}
reiserfs_update_sd(th, inode);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 7eaf36b3de12..67b5510beded 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2587,7 +2587,7 @@ out:
return err;
if (inode->i_size < off + len - towrite)
i_size_write(inode, off + len - towrite);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
return len - towrite;
}
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 6000964c2b80..998035a6388e 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -780,7 +780,7 @@ static inline bool reiserfs_posix_acl_list(const char *name,
}
/* This is the implementation for the xattr plugin infrastructure */
-static inline bool reiserfs_xattr_list(const struct xattr_handler **handlers,
+static inline bool reiserfs_xattr_list(const struct xattr_handler * const *handlers,
const char *name, struct dentry *dentry)
{
if (handlers) {
@@ -911,7 +911,7 @@ static int create_privroot(struct dentry *dentry) { return 0; }
#endif
/* Actual operations that are exported to VFS-land */
-const struct xattr_handler *reiserfs_xattr_handlers[] = {
+const struct xattr_handler * const reiserfs_xattr_handlers[] = {
#ifdef CONFIG_REISERFS_FS_XATTR
&reiserfs_xattr_user_handler,
&reiserfs_xattr_trusted_handler,
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index 5c35f6c76037..545ad44f96b8 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -322,7 +322,8 @@ static struct inode *romfs_iget(struct super_block *sb, unsigned long pos)
set_nlink(i, 1); /* Hard to decide.. */
i->i_size = be32_to_cpu(ri.size);
- i->i_mtime = i->i_atime = inode_set_ctime(i, 0, 0);
+ inode_set_mtime_to_ts(i,
+ inode_set_atime_to_ts(i, inode_set_ctime(i, 0, 0)));
/* set up mode and ops */
mode = romfs_modemap[nextfh & ROMFH_TYPE];
@@ -593,7 +594,7 @@ static void romfs_kill_sb(struct super_block *sb)
#ifdef CONFIG_ROMFS_ON_BLOCK
if (sb->s_bdev) {
sync_blockdev(sb->s_bdev);
- blkdev_put(sb->s_bdev, sb);
+ bdev_release(sb->s_bdev_handle);
}
#endif
}
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index e2be8aedb26e..fe1bf5b6e0cb 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -15,6 +15,7 @@
static struct cached_fid *init_cached_dir(const char *path);
static void free_cached_dir(struct cached_fid *cfid);
static void smb2_close_cached_fid(struct kref *ref);
+static void cfids_laundromat_worker(struct work_struct *work);
static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
const char *path,
@@ -169,15 +170,18 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
return -ENOENT;
}
/*
- * At this point we either have a lease already and we can just
- * return it. If not we are guaranteed to be the only thread accessing
- * this cfid.
+ * Return cached fid if it has a lease. Otherwise, it is either a new
+ * entry or laundromat worker removed it from @cfids->entries. Caller
+ * will put last reference if the latter.
*/
+ spin_lock(&cfids->cfid_list_lock);
if (cfid->has_lease) {
+ spin_unlock(&cfids->cfid_list_lock);
*ret_cfid = cfid;
kfree(utf16_path);
return 0;
}
+ spin_unlock(&cfids->cfid_list_lock);
/*
* Skip any prefix paths in @path as lookup_positive_unlocked() ends up
@@ -294,9 +298,11 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
goto oshr_free;
}
}
+ spin_lock(&cfids->cfid_list_lock);
cfid->dentry = dentry;
cfid->time = jiffies;
cfid->has_lease = true;
+ spin_unlock(&cfids->cfid_list_lock);
oshr_free:
kfree(utf16_path);
@@ -305,24 +311,28 @@ oshr_free:
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
spin_lock(&cfids->cfid_list_lock);
- if (rc && !cfid->has_lease) {
- if (cfid->on_list) {
- list_del(&cfid->entry);
- cfid->on_list = false;
- cfids->num_entries--;
+ if (!cfid->has_lease) {
+ if (rc) {
+ if (cfid->on_list) {
+ list_del(&cfid->entry);
+ cfid->on_list = false;
+ cfids->num_entries--;
+ }
+ rc = -ENOENT;
+ } else {
+ /*
+ * We are guaranteed to have two references at this
+ * point. One for the caller and one for a potential
+ * lease. Release the Lease-ref so that the directory
+ * will be closed when the caller closes the cached
+ * handle.
+ */
+ spin_unlock(&cfids->cfid_list_lock);
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ goto out;
}
- rc = -ENOENT;
}
spin_unlock(&cfids->cfid_list_lock);
- if (!rc && !cfid->has_lease) {
- /*
- * We are guaranteed to have two references at this point.
- * One for the caller and one for a potential lease.
- * Release the Lease-ref so that the directory will be closed
- * when the caller closes the cached handle.
- */
- kref_put(&cfid->refcount, smb2_close_cached_fid);
- }
if (rc) {
if (cfid->is_open)
SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
@@ -330,7 +340,7 @@ oshr_free:
free_cached_dir(cfid);
cfid = NULL;
}
-
+out:
if (rc == 0) {
*ret_cfid = cfid;
atomic_inc(&tcon->num_remote_opens);
@@ -572,53 +582,51 @@ static void free_cached_dir(struct cached_fid *cfid)
kfree(cfid);
}
-static int
-cifs_cfids_laundromat_thread(void *p)
+static void cfids_laundromat_worker(struct work_struct *work)
{
- struct cached_fids *cfids = p;
+ struct cached_fids *cfids;
struct cached_fid *cfid, *q;
- struct list_head entry;
+ LIST_HEAD(entry);
- while (!kthread_should_stop()) {
- ssleep(1);
- INIT_LIST_HEAD(&entry);
- if (kthread_should_stop())
- return 0;
- spin_lock(&cfids->cfid_list_lock);
- list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
- if (time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
- list_del(&cfid->entry);
- list_add(&cfid->entry, &entry);
- cfids->num_entries--;
- }
- }
- spin_unlock(&cfids->cfid_list_lock);
+ cfids = container_of(work, struct cached_fids, laundromat_work.work);
- list_for_each_entry_safe(cfid, q, &entry, entry) {
+ spin_lock(&cfids->cfid_list_lock);
+ list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+ if (cfid->time &&
+ time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
cfid->on_list = false;
- list_del(&cfid->entry);
+ list_move(&cfid->entry, &entry);
+ cfids->num_entries--;
+ /* To prevent race with smb2_cached_lease_break() */
+ kref_get(&cfid->refcount);
+ }
+ }
+ spin_unlock(&cfids->cfid_list_lock);
+
+ list_for_each_entry_safe(cfid, q, &entry, entry) {
+ list_del(&cfid->entry);
+ /*
+ * Cancel and wait for the work to finish in case we are racing
+ * with it.
+ */
+ cancel_work_sync(&cfid->lease_break);
+ if (cfid->has_lease) {
/*
- * Cancel, and wait for the work to finish in
- * case we are racing with it.
+ * Our lease has not yet been cancelled from the server
+ * so we need to drop the reference.
*/
- cancel_work_sync(&cfid->lease_break);
- if (cfid->has_lease) {
- /*
- * We lease has not yet been cancelled from
- * the server so we need to drop the reference.
- */
- spin_lock(&cfids->cfid_list_lock);
- cfid->has_lease = false;
- spin_unlock(&cfids->cfid_list_lock);
- kref_put(&cfid->refcount, smb2_close_cached_fid);
- }
+ spin_lock(&cfids->cfid_list_lock);
+ cfid->has_lease = false;
+ spin_unlock(&cfids->cfid_list_lock);
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
}
+ /* Drop the extra reference opened above */
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
}
-
- return 0;
+ queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
+ dir_cache_timeout * HZ);
}
-
struct cached_fids *init_cached_dirs(void)
{
struct cached_fids *cfids;
@@ -629,19 +637,10 @@ struct cached_fids *init_cached_dirs(void)
spin_lock_init(&cfids->cfid_list_lock);
INIT_LIST_HEAD(&cfids->entries);
- /*
- * since we're in a cifs function already, we know that
- * this will succeed. No need for try_module_get().
- */
- __module_get(THIS_MODULE);
- cfids->laundromat = kthread_run(cifs_cfids_laundromat_thread,
- cfids, "cifsd-cfid-laundromat");
- if (IS_ERR(cfids->laundromat)) {
- cifs_dbg(VFS, "Failed to start cfids laundromat thread.\n");
- kfree(cfids);
- module_put(THIS_MODULE);
- return NULL;
- }
+ INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
+ queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
+ dir_cache_timeout * HZ);
+
return cfids;
}
@@ -657,11 +656,7 @@ void free_cached_dirs(struct cached_fids *cfids)
if (cfids == NULL)
return;
- if (cfids->laundromat) {
- kthread_stop(cfids->laundromat);
- cfids->laundromat = NULL;
- module_put(THIS_MODULE);
- }
+ cancel_delayed_work_sync(&cfids->laundromat_work);
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
diff --git a/fs/smb/client/cached_dir.h b/fs/smb/client/cached_dir.h
index a82ff2cea789..81ba0fd5cc16 100644
--- a/fs/smb/client/cached_dir.h
+++ b/fs/smb/client/cached_dir.h
@@ -57,7 +57,7 @@ struct cached_fids {
spinlock_t cfid_list_lock;
int num_entries;
struct list_head entries;
- struct task_struct *laundromat;
+ struct delayed_work laundromat_work;
};
extern struct cached_fids *init_cached_dirs(void);
diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
index 41daebd220ff..8ca3d7606bb4 100644
--- a/fs/smb/client/cifsfs.h
+++ b/fs/smb/client/cifsfs.h
@@ -127,7 +127,7 @@ extern int cifs_symlink(struct mnt_idmap *idmap, struct inode *inode,
struct dentry *direntry, const char *symname);
#ifdef CONFIG_CIFS_XATTR
-extern const struct xattr_handler *cifs_xattr_handlers[];
+extern const struct xattr_handler * const cifs_xattr_handlers[];
extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
#else
# define cifs_xattr_handlers NULL
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 2108b3b40ce9..cf17e3dd703e 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -1085,7 +1085,8 @@ int cifs_close(struct inode *inode, struct file *file)
!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
dclose) {
if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
}
spin_lock(&cinode->deferred_lock);
cifs_add_deferred_close(cfile, dclose);
@@ -2596,7 +2597,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
write_data, to - from, &offset);
cifsFileInfo_put(open_file);
/* Does mm or vfs already set times? */
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
if ((bytes_written > 0) && (offset))
rc = 0;
else if (bytes_written < 0)
@@ -4647,11 +4648,13 @@ static void cifs_readahead(struct readahead_control *ractl)
static int cifs_readpage_worker(struct file *file, struct page *page,
loff_t *poffset)
{
+ struct inode *inode = file_inode(file);
+ struct timespec64 atime, mtime;
char *read_data;
int rc;
/* Is the page cached? */
- rc = cifs_readpage_from_fscache(file_inode(file), page);
+ rc = cifs_readpage_from_fscache(inode, page);
if (rc == 0)
goto read_complete;
@@ -4666,11 +4669,10 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
cifs_dbg(FYI, "Bytes read %d\n", rc);
/* we do not want atime to be less than mtime, it broke some apps */
- file_inode(file)->i_atime = current_time(file_inode(file));
- if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
- file_inode(file)->i_atime = file_inode(file)->i_mtime;
- else
- file_inode(file)->i_atime = current_time(file_inode(file));
+ atime = inode_set_atime_to_ts(inode, current_time(inode));
+ mtime = inode_get_mtime(inode);
+ if (timespec64_compare(&atime, &mtime))
+ inode_set_atime_to_ts(inode, inode_get_mtime(inode));
if (PAGE_SIZE > rc)
memset(read_data + rc, 0, PAGE_SIZE - rc);
diff --git a/fs/smb/client/fscache.h b/fs/smb/client/fscache.h
index 84f3b09367d2..a3d73720914f 100644
--- a/fs/smb/client/fscache.h
+++ b/fs/smb/client/fscache.h
@@ -49,12 +49,12 @@ static inline
void cifs_fscache_fill_coherency(struct inode *inode,
struct cifs_fscache_inode_coherency_data *cd)
{
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 mtime = inode_get_mtime(inode);
memset(cd, 0, sizeof(*cd));
- cd->last_write_time_sec = cpu_to_le64(cifsi->netfs.inode.i_mtime.tv_sec);
- cd->last_write_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_mtime.tv_nsec);
+ cd->last_write_time_sec = cpu_to_le64(mtime.tv_sec);
+ cd->last_write_time_nsec = cpu_to_le32(mtime.tv_nsec);
cd->last_change_time_sec = cpu_to_le64(ctime.tv_sec);
cd->last_change_time_nsec = cpu_to_le32(ctime.tv_nsec);
}
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index d7c302442c1e..3abfe77bfa46 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -82,6 +82,7 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
{
struct cifs_fscache_inode_coherency_data cd;
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
+ struct timespec64 mtime;
cifs_dbg(FYI, "%s: revalidating inode %llu\n",
__func__, cifs_i->uniqueid);
@@ -101,7 +102,8 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
/* revalidate if mtime or size have changed */
fattr->cf_mtime = timestamp_truncate(fattr->cf_mtime, inode);
- if (timespec64_equal(&inode->i_mtime, &fattr->cf_mtime) &&
+ mtime = inode_get_mtime(inode);
+ if (timespec64_equal(&mtime, &fattr->cf_mtime) &&
cifs_i->server_eof == fattr->cf_eof) {
cifs_dbg(FYI, "%s: inode %llu is unchanged\n",
__func__, cifs_i->uniqueid);
@@ -164,10 +166,10 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
fattr->cf_ctime = timestamp_truncate(fattr->cf_ctime, inode);
/* we do not want atime to be less than mtime, it broke some apps */
if (timespec64_compare(&fattr->cf_atime, &fattr->cf_mtime) < 0)
- inode->i_atime = fattr->cf_mtime;
+ inode_set_atime_to_ts(inode, fattr->cf_mtime);
else
- inode->i_atime = fattr->cf_atime;
- inode->i_mtime = fattr->cf_mtime;
+ inode_set_atime_to_ts(inode, fattr->cf_atime);
+ inode_set_mtime_to_ts(inode, fattr->cf_mtime);
inode_set_ctime_to_ts(inode, fattr->cf_ctime);
inode->i_rdev = fattr->cf_rdev;
cifs_nlink_fattr_to_inode(inode, fattr);
@@ -1816,7 +1818,7 @@ out_reval:
when needed */
inode_set_ctime_current(inode);
}
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
cifs_inode = CIFS_I(dir);
CIFS_I(dir)->time = 0; /* force revalidate of dir as well */
unlink_out:
@@ -2131,7 +2133,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
cifsInode->time = 0;
inode_set_ctime_current(d_inode(direntry));
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
rmdir_exit:
free_dentry_path(page);
@@ -2337,9 +2339,6 @@ unlink_target:
/* force revalidate to go get info when needed */
CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
- source_dir->i_mtime = target_dir->i_mtime = inode_set_ctime_to_ts(source_dir,
- inode_set_ctime_current(target_dir));
-
cifs_rename_exit:
kfree(info_buf_source);
free_dentry_path(page2);
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 9aeecee6b91b..f4849a8ad40b 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -1403,12 +1403,14 @@ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
/* Creation time should not need to be updated on close */
if (file_inf.LastWriteTime)
- inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime);
+ inode_set_mtime_to_ts(inode,
+ cifs_NTtimeToUnix(file_inf.LastWriteTime));
if (file_inf.ChangeTime)
inode_set_ctime_to_ts(inode,
cifs_NTtimeToUnix(file_inf.ChangeTime));
if (file_inf.LastAccessTime)
- inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime);
+ inode_set_atime_to_ts(inode,
+ cifs_NTtimeToUnix(file_inf.LastAccessTime));
/*
* i_blocks is not related to (i_size / i_blksize),
diff --git a/fs/smb/client/xattr.c b/fs/smb/client/xattr.c
index 4ad5531686d8..ac199160bce6 100644
--- a/fs/smb/client/xattr.c
+++ b/fs/smb/client/xattr.c
@@ -478,7 +478,7 @@ static const struct xattr_handler smb3_ntsd_full_xattr_handler = {
.set = cifs_xattr_set,
};
-const struct xattr_handler *cifs_xattr_handlers[] = {
+const struct xattr_handler * const cifs_xattr_handlers[] = {
&cifs_user_xattr_handler,
&cifs_os2_xattr_handler,
&cifs_cifs_acl_xattr_handler,
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 898860adf929..658209839729 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -231,11 +231,12 @@ void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err)
{
struct smb2_hdr *rsp_hdr;
- if (work->next_smb2_rcv_hdr_off)
- rsp_hdr = ksmbd_resp_buf_next(work);
- else
- rsp_hdr = smb2_get_msg(work->response_buf);
+ rsp_hdr = smb2_get_msg(work->response_buf);
rsp_hdr->Status = err;
+
+ work->iov_idx = 0;
+ work->iov_cnt = 0;
+ work->next_smb2_rcv_hdr_off = 0;
smb2_set_err_rsp(work);
}
@@ -4833,9 +4834,9 @@ static void find_file_posix_info(struct smb2_query_info_rsp *rsp,
file_info = (struct smb311_posix_qinfo *)rsp->Buffer;
file_info->CreationTime = cpu_to_le64(fp->create_time);
- time = ksmbd_UnixTimeToNT(inode->i_atime);
+ time = ksmbd_UnixTimeToNT(inode_get_atime(inode));
file_info->LastAccessTime = cpu_to_le64(time);
- time = ksmbd_UnixTimeToNT(inode->i_mtime);
+ time = ksmbd_UnixTimeToNT(inode_get_mtime(inode));
file_info->LastWriteTime = cpu_to_le64(time);
time = ksmbd_UnixTimeToNT(inode_get_ctime(inode));
file_info->ChangeTime = cpu_to_le64(time);
@@ -5442,9 +5443,9 @@ int smb2_close(struct ksmbd_work *work)
rsp->EndOfFile = cpu_to_le64(inode->i_size);
rsp->Attributes = fp->f_ci->m_fattr;
rsp->CreationTime = cpu_to_le64(fp->create_time);
- time = ksmbd_UnixTimeToNT(inode->i_atime);
+ time = ksmbd_UnixTimeToNT(inode_get_atime(inode));
rsp->LastAccessTime = cpu_to_le64(time);
- time = ksmbd_UnixTimeToNT(inode->i_mtime);
+ time = ksmbd_UnixTimeToNT(inode_get_mtime(inode));
rsp->LastWriteTime = cpu_to_le64(time);
time = ksmbd_UnixTimeToNT(inode_get_ctime(inode));
rsp->ChangeTime = cpu_to_le64(time);
@@ -6151,12 +6152,12 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
memcpy(aux_payload_buf, rpc_resp->payload, rpc_resp->payload_sz);
nbytes = rpc_resp->payload_sz;
- kvfree(rpc_resp);
err = ksmbd_iov_pin_rsp_read(work, (void *)rsp,
offsetof(struct smb2_read_rsp, Buffer),
aux_payload_buf, nbytes);
if (err)
goto out;
+ kvfree(rpc_resp);
} else {
err = ksmbd_iov_pin_rsp(work, (void *)rsp,
offsetof(struct smb2_read_rsp, Buffer));
diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
index c4b80ab7df74..c91eac6514dd 100644
--- a/fs/smb/server/vfs_cache.c
+++ b/fs/smb/server/vfs_cache.c
@@ -106,7 +106,7 @@ int ksmbd_query_inode_status(struct inode *inode)
ci = __ksmbd_inode_lookup(inode);
if (ci) {
ret = KSMBD_INODE_STATUS_OK;
- if (ci->m_flags & S_DEL_PENDING)
+ if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS))
ret = KSMBD_INODE_STATUS_PENDING_DELETE;
atomic_dec(&ci->m_count);
}
@@ -116,7 +116,7 @@ int ksmbd_query_inode_status(struct inode *inode)
bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
{
- return (fp->f_ci->m_flags & S_DEL_PENDING);
+ return (fp->f_ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS));
}
void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
@@ -603,6 +603,9 @@ err_out:
void ksmbd_update_fstate(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
unsigned int state)
{
+ if (!fp)
+ return;
+
write_lock(&ft->lock);
fp->f_state = state;
write_unlock(&ft->lock);
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
index c6e626b00546..aa3411354e66 100644
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -59,9 +59,9 @@ static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
i_uid_write(inode, i_uid);
i_gid_write(inode, i_gid);
inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
- inode->i_mtime.tv_sec = le32_to_cpu(sqsh_ino->mtime);
- inode->i_atime.tv_sec = inode->i_mtime.tv_sec;
- inode_set_ctime(inode, inode->i_mtime.tv_sec, 0);
+ inode_set_mtime(inode, le32_to_cpu(sqsh_ino->mtime), 0);
+ inode_set_atime(inode, inode_get_mtime_sec(inode), 0);
+ inode_set_ctime(inode, inode_get_mtime_sec(inode), 0);
inode->i_mode = le16_to_cpu(sqsh_ino->mode);
inode->i_size = 0;
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index a6164fdf9435..5a756e6790b5 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -111,4 +111,4 @@ extern const struct address_space_operations squashfs_symlink_aops;
extern const struct inode_operations squashfs_symlink_inode_ops;
/* xattr.c */
-extern const struct xattr_handler *squashfs_xattr_handlers[];
+extern const struct xattr_handler * const squashfs_xattr_handlers[];
diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c
index e1e3f3dd5a06..ce6608cabd49 100644
--- a/fs/squashfs/xattr.c
+++ b/fs/squashfs/xattr.c
@@ -262,7 +262,7 @@ static const struct xattr_handler *squashfs_xattr_handler(int type)
}
}
-const struct xattr_handler *squashfs_xattr_handlers[] = {
+const struct xattr_handler * const squashfs_xattr_handlers[] = {
&squashfs_xattr_user_handler,
&squashfs_xattr_trusted_handler,
&squashfs_xattr_security_handler,
diff --git a/fs/stack.c b/fs/stack.c
index b5e01bdb5f5f..f18920119944 100644
--- a/fs/stack.c
+++ b/fs/stack.c
@@ -66,8 +66,8 @@ void fsstack_copy_attr_all(struct inode *dest, const struct inode *src)
dest->i_uid = src->i_uid;
dest->i_gid = src->i_gid;
dest->i_rdev = src->i_rdev;
- dest->i_atime = src->i_atime;
- dest->i_mtime = src->i_mtime;
+ inode_set_atime_to_ts(dest, inode_get_atime(src));
+ inode_set_mtime_to_ts(dest, inode_get_mtime(src));
inode_set_ctime_to_ts(dest, inode_get_ctime(src));
dest->i_blkbits = src->i_blkbits;
dest->i_flags = src->i_flags;
diff --git a/fs/stat.c b/fs/stat.c
index d43a5cc1bfa4..24bb0209e459 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -57,8 +57,8 @@ void generic_fillattr(struct mnt_idmap *idmap, u32 request_mask,
stat->gid = vfsgid_into_kgid(vfsgid);
stat->rdev = inode->i_rdev;
stat->size = i_size_read(inode);
- stat->atime = inode->i_atime;
- stat->mtime = inode->i_mtime;
+ stat->atime = inode_get_atime(inode);
+ stat->mtime = inode_get_mtime(inode);
stat->ctime = inode_get_ctime(inode);
stat->blksize = i_blocksize(inode);
stat->blocks = inode->i_blocks;
diff --git a/fs/super.c b/fs/super.c
index 2d762ce67f6e..c7b452e12e4c 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1419,32 +1419,48 @@ EXPORT_SYMBOL(sget_dev);
#ifdef CONFIG_BLOCK
/*
- * Lock a super block that the callers holds a reference to.
+ * Lock the superblock that is holder of the bdev. Returns the superblock
+ * pointer if we successfully locked the superblock and it is alive. Otherwise
+ * we return NULL and just unlock bdev->bd_holder_lock.
*
- * The caller needs to ensure that the super_block isn't being freed while
- * calling this function, e.g. by holding a lock over the call to this function
- * and the place that clears the pointer to the superblock used by this function
- * before freeing the superblock.
+ * The function must be called with bdev->bd_holder_lock and releases it.
*/
-static bool super_lock_shared_active(struct super_block *sb)
+static struct super_block *bdev_super_lock_shared(struct block_device *bdev)
+ __releases(&bdev->bd_holder_lock)
{
- bool born = super_lock_shared(sb);
+ struct super_block *sb = bdev->bd_holder;
+ bool born;
+
+ lockdep_assert_held(&bdev->bd_holder_lock);
+ lockdep_assert_not_held(&sb->s_umount);
+ lockdep_assert_not_held(&bdev->bd_disk->open_mutex);
+
+ /* Make sure sb doesn't go away from under us */
+ spin_lock(&sb_lock);
+ sb->s_count++;
+ spin_unlock(&sb_lock);
+ mutex_unlock(&bdev->bd_holder_lock);
+ born = super_lock_shared(sb);
if (!born || !sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
super_unlock_shared(sb);
- return false;
+ put_super(sb);
+ return NULL;
}
- return true;
+ /*
+ * The superblock is active and we hold s_umount, we can drop our
+ * temporary reference now.
+ */
+ put_super(sb);
+ return sb;
}
static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
{
- struct super_block *sb = bdev->bd_holder;
-
- /* bd_holder_lock ensures that the sb isn't freed */
- lockdep_assert_held(&bdev->bd_holder_lock);
+ struct super_block *sb;
- if (!super_lock_shared_active(sb))
+ sb = bdev_super_lock_shared(bdev);
+ if (!sb)
return;
if (!surprise)
@@ -1459,11 +1475,10 @@ static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
static void fs_bdev_sync(struct block_device *bdev)
{
- struct super_block *sb = bdev->bd_holder;
-
- lockdep_assert_held(&bdev->bd_holder_lock);
+ struct super_block *sb;
- if (!super_lock_shared_active(sb))
+ sb = bdev_super_lock_shared(bdev);
+ if (!sb)
return;
sync_filesystem(sb);
super_unlock_shared(sb);
@@ -1479,14 +1494,16 @@ int setup_bdev_super(struct super_block *sb, int sb_flags,
struct fs_context *fc)
{
blk_mode_t mode = sb_open_mode(sb_flags);
+ struct bdev_handle *bdev_handle;
struct block_device *bdev;
- bdev = blkdev_get_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
- if (IS_ERR(bdev)) {
+ bdev_handle = bdev_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
+ if (IS_ERR(bdev_handle)) {
if (fc)
errorf(fc, "%s: Can't open blockdev", fc->source);
- return PTR_ERR(bdev);
+ return PTR_ERR(bdev_handle);
}
+ bdev = bdev_handle->bdev;
/*
* This really should be in blkdev_get_by_dev, but right now can't due
@@ -1494,7 +1511,7 @@ int setup_bdev_super(struct super_block *sb, int sb_flags,
* writable from userspace even for a read-only block device.
*/
if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
- blkdev_put(bdev, sb);
+ bdev_release(bdev_handle);
return -EACCES;
}
@@ -1510,10 +1527,11 @@ int setup_bdev_super(struct super_block *sb, int sb_flags,
mutex_unlock(&bdev->bd_fsfreeze_mutex);
if (fc)
warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
- blkdev_put(bdev, sb);
+ bdev_release(bdev_handle);
return -EBUSY;
}
spin_lock(&sb_lock);
+ sb->s_bdev_handle = bdev_handle;
sb->s_bdev = bdev;
sb->s_bdi = bdi_get(bdev->bd_disk->bdi);
if (bdev_stable_writes(bdev))
@@ -1646,7 +1664,7 @@ void kill_block_super(struct super_block *sb)
generic_shutdown_super(sb);
if (bdev) {
sync_blockdev(bdev);
- blkdev_put(bdev, sb);
+ bdev_release(sb->s_bdev_handle);
}
}
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index 2f5ead88d00b..2e126d72d619 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -224,7 +224,7 @@ got_it:
memset (de->name + namelen, 0, SYSV_DIRSIZE - namelen - 2);
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
dir_commit_chunk(page, pos, SYSV_DIRSIZE);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
err = sysv_handle_dirsync(dir);
out_page:
@@ -249,7 +249,7 @@ int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
}
de->inode = 0;
dir_commit_chunk(page, pos, SYSV_DIRSIZE);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
return sysv_handle_dirsync(inode);
}
@@ -346,7 +346,7 @@ int sysv_set_link(struct sysv_dir_entry *de, struct page *page,
}
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
dir_commit_chunk(page, pos, SYSV_DIRSIZE);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
return sysv_handle_dirsync(inode);
}
diff --git a/fs/sysv/ialloc.c b/fs/sysv/ialloc.c
index 6719da5889d9..269df6d49815 100644
--- a/fs/sysv/ialloc.c
+++ b/fs/sysv/ialloc.c
@@ -165,7 +165,7 @@ struct inode * sysv_new_inode(const struct inode * dir, umode_t mode)
dirty_sb(sb);
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
inode->i_ino = fs16_to_cpu(sbi, ino);
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_blocks = 0;
memset(SYSV_I(inode)->i_data, 0, sizeof(SYSV_I(inode)->i_data));
SYSV_I(inode)->i_dir_start_lookup = 0;
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 0aa3827d8178..5a915b2e68f5 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -200,11 +200,9 @@ struct inode *sysv_iget(struct super_block *sb, unsigned int ino)
i_gid_write(inode, (gid_t)fs16_to_cpu(sbi, raw_inode->i_gid));
set_nlink(inode, fs16_to_cpu(sbi, raw_inode->i_nlink));
inode->i_size = fs32_to_cpu(sbi, raw_inode->i_size);
- inode->i_atime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_atime);
- inode->i_mtime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_mtime);
+ inode_set_atime(inode, fs32_to_cpu(sbi, raw_inode->i_atime), 0);
+ inode_set_mtime(inode, fs32_to_cpu(sbi, raw_inode->i_mtime), 0);
inode_set_ctime(inode, fs32_to_cpu(sbi, raw_inode->i_ctime), 0);
- inode->i_atime.tv_nsec = 0;
- inode->i_mtime.tv_nsec = 0;
inode->i_blocks = 0;
si = SYSV_I(inode);
@@ -253,9 +251,9 @@ static int __sysv_write_inode(struct inode *inode, int wait)
raw_inode->i_gid = cpu_to_fs16(sbi, fs_high2lowgid(i_gid_read(inode)));
raw_inode->i_nlink = cpu_to_fs16(sbi, inode->i_nlink);
raw_inode->i_size = cpu_to_fs32(sbi, inode->i_size);
- raw_inode->i_atime = cpu_to_fs32(sbi, inode->i_atime.tv_sec);
- raw_inode->i_mtime = cpu_to_fs32(sbi, inode->i_mtime.tv_sec);
- raw_inode->i_ctime = cpu_to_fs32(sbi, inode_get_ctime(inode).tv_sec);
+ raw_inode->i_atime = cpu_to_fs32(sbi, inode_get_atime_sec(inode));
+ raw_inode->i_mtime = cpu_to_fs32(sbi, inode_get_mtime_sec(inode));
+ raw_inode->i_ctime = cpu_to_fs32(sbi, inode_get_ctime_sec(inode));
si = SYSV_I(inode);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index edb94e55de8e..725981474e5f 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -423,7 +423,7 @@ do_indirects:
}
n++;
}
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
if (IS_SYNC(inode))
sysv_sync_inode (inode);
else
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 891653ba9cf3..429603d865a9 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -152,7 +152,7 @@ struct inode *tracefs_get_inode(struct super_block *sb)
struct inode *inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
}
return inode;
}
diff --git a/fs/ubifs/crypto.c b/fs/ubifs/crypto.c
index 3125e76376ee..921f9033d0d2 100644
--- a/fs/ubifs/crypto.c
+++ b/fs/ubifs/crypto.c
@@ -88,8 +88,7 @@ int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn,
}
const struct fscrypt_operations ubifs_crypt_operations = {
- .flags = FS_CFLG_OWN_PAGES,
- .key_prefix = "ubifs:",
+ .legacy_key_prefix = "ubifs:",
.get_context = ubifs_crypt_get_context,
.set_context = ubifs_crypt_set_context,
.empty_dir = ubifs_crypt_empty_dir,
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index eef9e527d9ff..d013c5b3f1ed 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -237,14 +237,14 @@ void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
pr_err("\tuid %u\n", (unsigned int)i_uid_read(inode));
pr_err("\tgid %u\n", (unsigned int)i_gid_read(inode));
pr_err("\tatime %u.%u\n",
- (unsigned int)inode->i_atime.tv_sec,
- (unsigned int)inode->i_atime.tv_nsec);
+ (unsigned int) inode_get_atime_sec(inode),
+ (unsigned int) inode_get_atime_nsec(inode));
pr_err("\tmtime %u.%u\n",
- (unsigned int)inode->i_mtime.tv_sec,
- (unsigned int)inode->i_mtime.tv_nsec);
+ (unsigned int) inode_get_mtime_sec(inode),
+ (unsigned int) inode_get_mtime_nsec(inode));
pr_err("\tctime %u.%u\n",
- (unsigned int) inode_get_ctime(inode).tv_sec,
- (unsigned int) inode_get_ctime(inode).tv_nsec);
+ (unsigned int) inode_get_ctime_sec(inode),
+ (unsigned int) inode_get_ctime_nsec(inode));
pr_err("\tcreat_sqnum %llu\n", ui->creat_sqnum);
pr_err("\txattr_size %u\n", ui->xattr_size);
pr_err("\txattr_cnt %u\n", ui->xattr_cnt);
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 2f48c58d47cd..7af442de44c3 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -96,7 +96,7 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir,
inode->i_flags |= S_NOCMTIME;
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_mapping->nrpages = 0;
if (!is_xattr) {
@@ -324,7 +324,8 @@ static int ubifs_create(struct mnt_idmap *idmap, struct inode *dir,
mutex_lock(&dir_ui->ui_mutex);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_get_ctime(inode)));
err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
if (err)
goto out_cancel;
@@ -767,7 +768,8 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
inode_set_ctime_current(inode);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_get_ctime(inode)));
err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
if (err)
goto out_cancel;
@@ -841,7 +843,8 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
drop_nlink(inode);
dir->i_size -= sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_get_ctime(inode)));
err = ubifs_jnl_update(c, dir, &nm, inode, 1, 0);
if (err)
goto out_cancel;
@@ -944,7 +947,8 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
drop_nlink(dir);
dir->i_size -= sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_get_ctime(inode)));
err = ubifs_jnl_update(c, dir, &nm, inode, 1, 0);
if (err)
goto out_cancel;
@@ -1018,7 +1022,8 @@ static int ubifs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
inc_nlink(dir);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_get_ctime(inode)));
err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
if (err) {
ubifs_err(c, "cannot create directory, error %d", err);
@@ -1109,7 +1114,8 @@ static int ubifs_mknod(struct mnt_idmap *idmap, struct inode *dir,
mutex_lock(&dir_ui->ui_mutex);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_get_ctime(inode)));
err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
if (err)
goto out_cancel;
@@ -1209,7 +1215,8 @@ static int ubifs_symlink(struct mnt_idmap *idmap, struct inode *dir,
mutex_lock(&dir_ui->ui_mutex);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_get_ctime(inode)));
err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
if (err)
goto out_cancel;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index e5382f0b2587..2e65fd2dbdc3 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1088,9 +1088,9 @@ static void do_attr_changes(struct inode *inode, const struct iattr *attr)
if (attr->ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
if (attr->ia_valid & ATTR_ATIME)
- inode->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(inode, attr->ia_atime);
if (attr->ia_valid & ATTR_MTIME)
- inode->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
if (attr->ia_valid & ATTR_CTIME)
inode_set_ctime_to_ts(inode, attr->ia_ctime);
if (attr->ia_valid & ATTR_MODE) {
@@ -1192,7 +1192,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
mutex_lock(&ui->ui_mutex);
ui->ui_size = inode->i_size;
/* Truncation changes inode [mc]time */
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
/* Other attributes may be changed at the same time as well */
do_attr_changes(inode, attr);
err = ubifs_jnl_truncate(c, inode, old_size, new_size);
@@ -1239,7 +1239,7 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode,
mutex_lock(&ui->ui_mutex);
if (attr->ia_valid & ATTR_SIZE) {
/* Truncation changes inode [mc]time */
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
/* 'truncate_setsize()' changed @i_size, update @ui_size */
ui->ui_size = inode->i_size;
}
@@ -1365,9 +1365,9 @@ static inline int mctime_update_needed(const struct inode *inode,
const struct timespec64 *now)
{
struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 mtime = inode_get_mtime(inode);
- if (!timespec64_equal(&inode->i_mtime, now) ||
- !timespec64_equal(&ctime, now))
+ if (!timespec64_equal(&mtime, now) || !timespec64_equal(&ctime, now))
return 1;
return 0;
}
@@ -1429,7 +1429,7 @@ static int update_mctime(struct inode *inode)
return err;
mutex_lock(&ui->ui_mutex);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
release = ui->dirty;
mark_inode_dirty_sync(inode);
mutex_unlock(&ui->ui_mutex);
@@ -1567,7 +1567,7 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
struct ubifs_inode *ui = ubifs_inode(inode);
mutex_lock(&ui->ui_mutex);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
release = ui->dirty;
mark_inode_dirty_sync(inode);
mutex_unlock(&ui->ui_mutex);
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index ffc9beee7be6..d69d2154645b 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -452,12 +452,12 @@ static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino,
ino->ch.node_type = UBIFS_INO_NODE;
ino_key_init_flash(c, &ino->key, inode->i_ino);
ino->creat_sqnum = cpu_to_le64(ui->creat_sqnum);
- ino->atime_sec = cpu_to_le64(inode->i_atime.tv_sec);
- ino->atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
- ino->ctime_sec = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- ino->ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
- ino->mtime_sec = cpu_to_le64(inode->i_mtime.tv_sec);
- ino->mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+ ino->atime_sec = cpu_to_le64(inode_get_atime_sec(inode));
+ ino->atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode));
+ ino->ctime_sec = cpu_to_le64(inode_get_ctime_sec(inode));
+ ino->ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
+ ino->mtime_sec = cpu_to_le64(inode_get_mtime_sec(inode));
+ ino->mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
ino->uid = cpu_to_le32(i_uid_read(inode));
ino->gid = cpu_to_le32(i_gid_read(inode));
ino->mode = cpu_to_le32(inode->i_mode);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index b08fb28d16b5..366941d4a18a 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -142,10 +142,10 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
set_nlink(inode, le32_to_cpu(ino->nlink));
i_uid_write(inode, le32_to_cpu(ino->uid));
i_gid_write(inode, le32_to_cpu(ino->gid));
- inode->i_atime.tv_sec = (int64_t)le64_to_cpu(ino->atime_sec);
- inode->i_atime.tv_nsec = le32_to_cpu(ino->atime_nsec);
- inode->i_mtime.tv_sec = (int64_t)le64_to_cpu(ino->mtime_sec);
- inode->i_mtime.tv_nsec = le32_to_cpu(ino->mtime_nsec);
+ inode_set_atime(inode, (int64_t)le64_to_cpu(ino->atime_sec),
+ le32_to_cpu(ino->atime_nsec));
+ inode_set_mtime(inode, (int64_t)le64_to_cpu(ino->mtime_sec),
+ le32_to_cpu(ino->mtime_nsec));
inode_set_ctime(inode, (int64_t)le64_to_cpu(ino->ctime_sec),
le32_to_cpu(ino->ctime_nsec));
inode->i_mode = le32_to_cpu(ino->mode);
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index ebb3ad6b5e7e..62633816d7d0 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -2043,7 +2043,7 @@ ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf,
size_t size);
#ifdef CONFIG_UBIFS_FS_XATTR
-extern const struct xattr_handler *ubifs_xattr_handlers[];
+extern const struct xattr_handler * const ubifs_xattr_handlers[];
ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size);
void ubifs_evict_xattr_inode(struct ubifs_info *c, ino_t xattr_inum);
int ubifs_purge_xattrs(struct inode *host);
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 406c82eab513..0847db521984 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -735,7 +735,7 @@ static const struct xattr_handler ubifs_security_xattr_handler = {
};
#endif
-const struct xattr_handler *ubifs_xattr_handlers[] = {
+const struct xattr_handler * const ubifs_xattr_handlers[] = {
&ubifs_user_xattr_handler,
&ubifs_trusted_xattr_handler,
#ifdef CONFIG_UBIFS_FS_SECURITY
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 6b558cbbeb6b..5f1f969f4134 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -100,8 +100,8 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode)
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
- iinfo->i_crtime = inode->i_mtime;
+ simple_inode_init_ts(inode);
+ iinfo->i_crtime = inode_get_mtime(inode);
if (unlikely(insert_inode_locked(inode) < 0)) {
make_bad_inode(inode);
iput(inode);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index a17a6184cc39..d8493449d4c5 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1296,7 +1296,7 @@ set_size:
goto out_unlock;
}
update_time:
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
if (IS_SYNC(inode))
udf_sync_inode(inode);
else
@@ -1327,7 +1327,7 @@ static int udf_read_inode(struct inode *inode, bool hidden_inode)
int bs = inode->i_sb->s_blocksize;
int ret = -EIO;
uint32_t uid, gid;
- struct timespec64 ctime;
+ struct timespec64 ts;
reread:
if (iloc->partitionReferenceNum >= sbi->s_partitions) {
@@ -1504,10 +1504,12 @@ reread:
inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
- udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime);
- udf_disk_stamp_to_time(&inode->i_mtime, fe->modificationTime);
- udf_disk_stamp_to_time(&ctime, fe->attrTime);
- inode_set_ctime_to_ts(inode, ctime);
+ udf_disk_stamp_to_time(&ts, fe->accessTime);
+ inode_set_atime_to_ts(inode, ts);
+ udf_disk_stamp_to_time(&ts, fe->modificationTime);
+ inode_set_mtime_to_ts(inode, ts);
+ udf_disk_stamp_to_time(&ts, fe->attrTime);
+ inode_set_ctime_to_ts(inode, ts);
iinfo->i_unique = le64_to_cpu(fe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
@@ -1519,11 +1521,13 @@ reread:
inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
- udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime);
- udf_disk_stamp_to_time(&inode->i_mtime, efe->modificationTime);
+ udf_disk_stamp_to_time(&ts, efe->accessTime);
+ inode_set_atime_to_ts(inode, ts);
+ udf_disk_stamp_to_time(&ts, efe->modificationTime);
+ inode_set_mtime_to_ts(inode, ts);
+ udf_disk_stamp_to_time(&ts, efe->attrTime);
+ inode_set_ctime_to_ts(inode, ts);
udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime);
- udf_disk_stamp_to_time(&ctime, efe->attrTime);
- inode_set_ctime_to_ts(inode, ctime);
iinfo->i_unique = le64_to_cpu(efe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
@@ -1798,8 +1802,8 @@ static int udf_update_inode(struct inode *inode, int do_sync)
inode->i_sb->s_blocksize - sizeof(struct fileEntry));
fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
- udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
- udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
+ udf_time_to_disk_stamp(&fe->accessTime, inode_get_atime(inode));
+ udf_time_to_disk_stamp(&fe->modificationTime, inode_get_mtime(inode));
udf_time_to_disk_stamp(&fe->attrTime, inode_get_ctime(inode));
memset(&(fe->impIdent), 0, sizeof(struct regid));
strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
@@ -1829,12 +1833,14 @@ static int udf_update_inode(struct inode *inode, int do_sync)
cpu_to_le32(inode->i_sb->s_blocksize);
}
- udf_adjust_time(iinfo, inode->i_atime);
- udf_adjust_time(iinfo, inode->i_mtime);
+ udf_adjust_time(iinfo, inode_get_atime(inode));
+ udf_adjust_time(iinfo, inode_get_mtime(inode));
udf_adjust_time(iinfo, inode_get_ctime(inode));
- udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime);
- udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime);
+ udf_time_to_disk_stamp(&efe->accessTime,
+ inode_get_atime(inode));
+ udf_time_to_disk_stamp(&efe->modificationTime,
+ inode_get_mtime(inode));
udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
udf_time_to_disk_stamp(&efe->attrTime, inode_get_ctime(inode));
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index ae55ab8859b6..3508ac484da3 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -365,7 +365,7 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
*(__le32 *)((struct allocDescImpUse *)iter.fi.icb.impUse)->impUse =
cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL);
udf_fiiter_write_fi(&iter, NULL);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
udf_fiiter_release(&iter);
udf_add_fid_counter(dir->i_sb, false, 1);
@@ -471,7 +471,7 @@ static int udf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
udf_fiiter_release(&iter);
udf_add_fid_counter(dir->i_sb, true, 1);
inc_nlink(dir);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
d_instantiate_new(dentry, inode);
@@ -523,8 +523,8 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry)
inode->i_size = 0;
inode_dec_link_count(dir);
udf_add_fid_counter(dir->i_sb, true, -1);
- dir->i_mtime = inode_set_ctime_to_ts(dir,
- inode_set_ctime_current(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
mark_inode_dirty(dir);
ret = 0;
end_rmdir:
@@ -555,7 +555,7 @@ static int udf_unlink(struct inode *dir, struct dentry *dentry)
set_nlink(inode, 1);
}
udf_fiiter_delete_entry(&iter);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
inode_dec_link_count(inode);
udf_add_fid_counter(dir->i_sb, false, -1);
@@ -748,7 +748,7 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
udf_add_fid_counter(dir->i_sb, false, 1);
inode_set_ctime_current(inode);
mark_inode_dirty(inode);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
ihold(inode);
d_instantiate(dentry, inode);
@@ -866,8 +866,8 @@ static int udf_rename(struct mnt_idmap *idmap, struct inode *old_dir,
udf_add_fid_counter(old_dir->i_sb, S_ISDIR(new_inode->i_mode),
-1);
}
- old_dir->i_mtime = inode_set_ctime_current(old_dir);
- new_dir->i_mtime = inode_set_ctime_current(new_dir);
+ inode_set_mtime_to_ts(old_dir, inode_set_ctime_current(old_dir));
+ inode_set_mtime_to_ts(new_dir, inode_set_ctime_current(new_dir));
mark_inode_dirty(old_dir);
mark_inode_dirty(new_dir);
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index fd57f03b6c93..27c85d92d1dc 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -107,7 +107,7 @@ void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
ufs_commit_chunk(page, pos, len);
ufs_put_page(page);
if (update_times)
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
ufs_handle_dirsync(dir);
}
@@ -397,7 +397,7 @@ got_it:
ufs_set_de_type(sb, de, inode->i_mode);
ufs_commit_chunk(page, pos, rec_len);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
err = ufs_handle_dirsync(dir);
@@ -539,7 +539,7 @@ int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
pde->d_reclen = cpu_to_fs16(sb, to - from);
dir->d_ino = 0;
ufs_commit_chunk(page, pos, to - from);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
err = ufs_handle_dirsync(inode);
out:
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index a1e7bd9d1f98..73531827ecee 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -292,7 +292,7 @@ cg_found:
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
inode->i_blocks = 0;
inode->i_generation = 0;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
ufsi->i_flags = UFS_I(dir)->i_flags;
ufsi->i_lastfrag = 0;
ufsi->i_shadow = 0;
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 21a4779a2de5..338e4b97312f 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -579,13 +579,15 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
- inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
+ inode_set_atime(inode,
+ (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec),
+ 0);
inode_set_ctime(inode,
(signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec),
0);
- inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
- inode->i_mtime.tv_nsec = 0;
- inode->i_atime.tv_nsec = 0;
+ inode_set_mtime(inode,
+ (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec),
+ 0);
inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
@@ -626,12 +628,12 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
- inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
+ inode_set_atime(inode, fs64_to_cpu(sb, ufs2_inode->ui_atime),
+ fs32_to_cpu(sb, ufs2_inode->ui_atimensec));
inode_set_ctime(inode, fs64_to_cpu(sb, ufs2_inode->ui_ctime),
fs32_to_cpu(sb, ufs2_inode->ui_ctimensec));
- inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
- inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
- inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
+ inode_set_mtime(inode, fs64_to_cpu(sb, ufs2_inode->ui_mtime),
+ fs32_to_cpu(sb, ufs2_inode->ui_mtimensec));
inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
@@ -725,12 +727,14 @@ static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
- ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
+ ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb,
+ inode_get_atime_sec(inode));
ufs_inode->ui_atime.tv_usec = 0;
ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb,
- inode_get_ctime(inode).tv_sec);
+ inode_get_ctime_sec(inode));
ufs_inode->ui_ctime.tv_usec = 0;
- ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
+ ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb,
+ inode_get_mtime_sec(inode));
ufs_inode->ui_mtime.tv_usec = 0;
ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
@@ -770,13 +774,15 @@ static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
- ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
- ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
- ufs_inode->ui_ctime = cpu_to_fs64(sb, inode_get_ctime(inode).tv_sec);
+ ufs_inode->ui_atime = cpu_to_fs64(sb, inode_get_atime_sec(inode));
+ ufs_inode->ui_atimensec = cpu_to_fs32(sb,
+ inode_get_atime_nsec(inode));
+ ufs_inode->ui_ctime = cpu_to_fs64(sb, inode_get_ctime_sec(inode));
ufs_inode->ui_ctimensec = cpu_to_fs32(sb,
- inode_get_ctime(inode).tv_nsec);
- ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
- ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
+ inode_get_ctime_nsec(inode));
+ ufs_inode->ui_mtime = cpu_to_fs64(sb, inode_get_mtime_sec(inode));
+ ufs_inode->ui_mtimensec = cpu_to_fs32(sb,
+ inode_get_mtime_nsec(inode));
ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
@@ -1208,7 +1214,7 @@ static int ufs_truncate(struct inode *inode, loff_t size)
truncate_setsize(inode, size);
ufs_truncate_blocks(inode);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
out:
UFSD("EXIT: err %d\n", err);
diff --git a/fs/vboxsf/utils.c b/fs/vboxsf/utils.c
index 83f20dd15522..72ac9320e6a3 100644
--- a/fs/vboxsf/utils.c
+++ b/fs/vboxsf/utils.c
@@ -126,12 +126,12 @@ int vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode,
do_div(allocated, 512);
inode->i_blocks = allocated;
- inode->i_atime = ns_to_timespec64(
- info->access_time.ns_relative_to_unix_epoch);
+ inode_set_atime_to_ts(inode,
+ ns_to_timespec64(info->access_time.ns_relative_to_unix_epoch));
inode_set_ctime_to_ts(inode,
ns_to_timespec64(info->change_time.ns_relative_to_unix_epoch));
- inode->i_mtime = ns_to_timespec64(
- info->modification_time.ns_relative_to_unix_epoch);
+ inode_set_mtime_to_ts(inode,
+ ns_to_timespec64(info->modification_time.ns_relative_to_unix_epoch));
return 0;
}
@@ -194,7 +194,7 @@ int vboxsf_inode_revalidate(struct dentry *dentry)
struct vboxsf_sbi *sbi;
struct vboxsf_inode *sf_i;
struct shfl_fsobjinfo info;
- struct timespec64 prev_mtime;
+ struct timespec64 mtime, prev_mtime;
struct inode *inode;
int err;
@@ -202,7 +202,7 @@ int vboxsf_inode_revalidate(struct dentry *dentry)
return -EINVAL;
inode = d_inode(dentry);
- prev_mtime = inode->i_mtime;
+ prev_mtime = inode_get_mtime(inode);
sf_i = VBOXSF_I(inode);
sbi = VBOXSF_SBI(dentry->d_sb);
if (!sf_i->force_restat) {
@@ -225,7 +225,8 @@ int vboxsf_inode_revalidate(struct dentry *dentry)
* page-cache for it. Note this also gets triggered by our own writes,
* this is unavoidable.
*/
- if (timespec64_compare(&inode->i_mtime, &prev_mtime) > 0)
+ mtime = inode_get_mtime(inode);
+ if (timespec64_compare(&mtime, &prev_mtime) > 0)
invalidate_inode_pages2(inode->i_mapping);
return 0;
diff --git a/fs/xattr.c b/fs/xattr.c
index efd4736bc94b..09d927603433 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -56,7 +56,7 @@ strcmp_prefix(const char *a, const char *a_prefix)
static const struct xattr_handler *
xattr_resolve_name(struct inode *inode, const char **name)
{
- const struct xattr_handler **handlers = inode->i_sb->s_xattr;
+ const struct xattr_handler * const *handlers = inode->i_sb->s_xattr;
const struct xattr_handler *handler;
if (!(inode->i_opflags & IOP_XATTR)) {
@@ -162,7 +162,7 @@ xattr_permission(struct mnt_idmap *idmap, struct inode *inode,
int
xattr_supports_user_prefix(struct inode *inode)
{
- const struct xattr_handler **handlers = inode->i_sb->s_xattr;
+ const struct xattr_handler * const *handlers = inode->i_sb->s_xattr;
const struct xattr_handler *handler;
if (!(inode->i_opflags & IOP_XATTR)) {
@@ -999,7 +999,7 @@ int xattr_list_one(char **buffer, ssize_t *remaining_size, const char *name)
ssize_t
generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{
- const struct xattr_handler *handler, **handlers = dentry->d_sb->s_xattr;
+ const struct xattr_handler *handler, * const *handlers = dentry->d_sb->s_xattr;
ssize_t remaining_size = buffer_size;
int err = 0;
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index e9cc481b4ddf..f9f4d694640d 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -1001,6 +1001,12 @@ xfs_ag_shrink_space(
error = -ENOSPC;
goto resv_init_out;
}
+
+ /* Update perag geometry */
+ pag->block_count -= delta;
+ __xfs_agino_range(pag->pag_mount, pag->block_count, &pag->agino_min,
+ &pag->agino_max);
+
xfs_ialloc_log_agi(*tpp, agibp, XFS_AGI_LENGTH);
xfs_alloc_log_agf(*tpp, agfbp, XFS_AGF_LENGTH);
return 0;
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index a35781577cad..543f3748c2a3 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -220,8 +220,10 @@ xfs_inode_from_disk(
* a time before epoch is converted to a time long after epoch
* on 64 bit systems.
*/
- inode->i_atime = xfs_inode_from_disk_ts(from, from->di_atime);
- inode->i_mtime = xfs_inode_from_disk_ts(from, from->di_mtime);
+ inode_set_atime_to_ts(inode,
+ xfs_inode_from_disk_ts(from, from->di_atime));
+ inode_set_mtime_to_ts(inode,
+ xfs_inode_from_disk_ts(from, from->di_mtime));
inode_set_ctime_to_ts(inode,
xfs_inode_from_disk_ts(from, from->di_ctime));
@@ -315,8 +317,8 @@ xfs_inode_to_disk(
to->di_projid_lo = cpu_to_be16(ip->i_projid & 0xffff);
to->di_projid_hi = cpu_to_be16(ip->i_projid >> 16);
- to->di_atime = xfs_inode_to_disk_ts(ip, inode->i_atime);
- to->di_mtime = xfs_inode_to_disk_ts(ip, inode->i_mtime);
+ to->di_atime = xfs_inode_to_disk_ts(ip, inode_get_atime(inode));
+ to->di_mtime = xfs_inode_to_disk_ts(ip, inode_get_mtime(inode));
to->di_ctime = xfs_inode_to_disk_ts(ip, inode_get_ctime(inode));
to->di_nlink = cpu_to_be32(inode->i_nlink);
to->di_gen = cpu_to_be32(inode->i_generation);
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index fa180ab66b73..396648acb5be 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -970,6 +970,7 @@ xfs_rtfree_extent(
xfs_mount_t *mp; /* file system mount structure */
xfs_fsblock_t sb; /* summary file block number */
struct xfs_buf *sumbp = NULL; /* summary file block buffer */
+ struct timespec64 atime;
mp = tp->t_mountp;
@@ -999,7 +1000,10 @@ xfs_rtfree_extent(
mp->m_sb.sb_rextents) {
if (!(mp->m_rbmip->i_diflags & XFS_DIFLAG_NEWRTBM))
mp->m_rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;
- *(uint64_t *)&VFS_I(mp->m_rbmip)->i_atime = 0;
+
+ atime = inode_get_atime(VFS_I(mp->m_rbmip));
+ *((uint64_t *)&atime) = 0;
+ inode_set_atime_to_ts(VFS_I(mp->m_rbmip), atime);
xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
}
return 0;
diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
index 6b2296ff248a..70e97ea6eee7 100644
--- a/fs/xfs/libxfs/xfs_trans_inode.c
+++ b/fs/xfs/libxfs/xfs_trans_inode.c
@@ -65,7 +65,7 @@ xfs_trans_ichgtime(
tv = current_time(inode);
if (flags & XFS_ICHGTIME_MOD)
- inode->i_mtime = tv;
+ inode_set_mtime_to_ts(inode, tv);
if (flags & XFS_ICHGTIME_CHG)
inode_set_ctime_to_ts(inode, tv);
if (flags & XFS_ICHGTIME_CREATE)
diff --git a/fs/xfs/scrub/xfile.c b/fs/xfs/scrub/xfile.c
index d98e8e77c684..090c3ead43fd 100644
--- a/fs/xfs/scrub/xfile.c
+++ b/fs/xfs/scrub/xfile.c
@@ -10,7 +10,6 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
-#include "xfs_format.h"
#include "scrub/xfile.h"
#include "scrub/xfarray.h"
#include "scrub/scrub.h"
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index fcefab687285..40e0a1f1f753 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1644,7 +1644,7 @@ xfs_swap_extents(
uint64_t f;
int resblks = 0;
unsigned int flags = 0;
- struct timespec64 ctime;
+ struct timespec64 ctime, mtime;
/*
* Lock the inodes against other IO, page faults and truncate to
@@ -1758,10 +1758,11 @@ xfs_swap_extents(
* under it.
*/
ctime = inode_get_ctime(VFS_I(ip));
+ mtime = inode_get_mtime(VFS_I(ip));
if ((sbp->bs_ctime.tv_sec != ctime.tv_sec) ||
(sbp->bs_ctime.tv_nsec != ctime.tv_nsec) ||
- (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
- (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
+ (sbp->bs_mtime.tv_sec != mtime.tv_sec) ||
+ (sbp->bs_mtime.tv_nsec != mtime.tv_nsec)) {
error = -EBUSY;
goto out_trans_cancel;
}
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index c1ece4a08ff4..003e157241da 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1945,8 +1945,6 @@ void
xfs_free_buftarg(
struct xfs_buftarg *btp)
{
- struct block_device *bdev = btp->bt_bdev;
-
unregister_shrinker(&btp->bt_shrinker);
ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
percpu_counter_destroy(&btp->bt_io_count);
@@ -1954,8 +1952,8 @@ xfs_free_buftarg(
fs_put_dax(btp->bt_daxdev, btp->bt_mount);
/* the main block device is closed by kill_block_super */
- if (bdev != btp->bt_mount->m_super->s_bdev)
- blkdev_put(bdev, btp->bt_mount->m_super);
+ if (btp->bt_bdev != btp->bt_mount->m_super->s_bdev)
+ bdev_release(btp->bt_bdev_handle);
kmem_free(btp);
}
@@ -1990,16 +1988,15 @@ xfs_setsize_buftarg(
*/
STATIC int
xfs_setsize_buftarg_early(
- xfs_buftarg_t *btp,
- struct block_device *bdev)
+ xfs_buftarg_t *btp)
{
- return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
+ return xfs_setsize_buftarg(btp, bdev_logical_block_size(btp->bt_bdev));
}
struct xfs_buftarg *
xfs_alloc_buftarg(
struct xfs_mount *mp,
- struct block_device *bdev)
+ struct bdev_handle *bdev_handle)
{
xfs_buftarg_t *btp;
const struct dax_holder_operations *ops = NULL;
@@ -2010,9 +2007,10 @@ xfs_alloc_buftarg(
btp = kmem_zalloc(sizeof(*btp), KM_NOFS);
btp->bt_mount = mp;
- btp->bt_dev = bdev->bd_dev;
- btp->bt_bdev = bdev;
- btp->bt_daxdev = fs_dax_get_by_bdev(bdev, &btp->bt_dax_part_off,
+ btp->bt_bdev_handle = bdev_handle;
+ btp->bt_dev = bdev_handle->bdev->bd_dev;
+ btp->bt_bdev = bdev_handle->bdev;
+ btp->bt_daxdev = fs_dax_get_by_bdev(btp->bt_bdev, &btp->bt_dax_part_off,
mp, ops);
/*
@@ -2022,7 +2020,7 @@ xfs_alloc_buftarg(
ratelimit_state_init(&btp->bt_ioerror_rl, 30 * HZ,
DEFAULT_RATELIMIT_BURST);
- if (xfs_setsize_buftarg_early(btp, bdev))
+ if (xfs_setsize_buftarg_early(btp))
goto error_free;
if (list_lru_init(&btp->bt_lru))
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index df8f47953bb4..ada9d310b7d3 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -98,6 +98,7 @@ typedef unsigned int xfs_buf_flags_t;
*/
typedef struct xfs_buftarg {
dev_t bt_dev;
+ struct bdev_handle *bt_bdev_handle;
struct block_device *bt_bdev;
struct dax_device *bt_daxdev;
u64 bt_dax_part_off;
@@ -364,7 +365,7 @@ xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
* Handling of buftargs.
*/
struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *mp,
- struct block_device *bdev);
+ struct bdev_handle *bdev_handle);
extern void xfs_free_buftarg(struct xfs_buftarg *);
extern void xfs_buftarg_wait(struct xfs_buftarg *);
extern void xfs_buftarg_drain(struct xfs_buftarg *);
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
index 746814815b1d..9ecfdcdc752f 100644
--- a/fs/xfs/xfs_extent_busy.c
+++ b/fs/xfs/xfs_extent_busy.c
@@ -62,7 +62,8 @@ xfs_extent_busy_insert_list(
rb_link_node(&new->rb_node, parent, rbp);
rb_insert_color(&new->rb_node, &pag->pagb_tree);
- list_add(&new->list, busy_list);
+ /* always process discard lists in fifo order */
+ list_add_tail(&new->list, busy_list);
spin_unlock(&pag->pagb_lock);
}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 4d55f58d99b7..36f5cf802c07 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -844,8 +844,8 @@ xfs_init_new_inode(
ASSERT(ip->i_nblocks == 0);
tv = inode_set_ctime_current(inode);
- inode->i_mtime = tv;
- inode->i_atime = tv;
+ inode_set_mtime_to_ts(inode, tv);
+ inode_set_atime_to_ts(inode, tv);
ip->i_extsize = 0;
ip->i_diflags = 0;
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 127b2410eb20..17c51804f9c6 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -526,8 +526,8 @@ xfs_inode_to_log_dinode(
to->di_projid_hi = ip->i_projid >> 16;
memset(to->di_pad3, 0, sizeof(to->di_pad3));
- to->di_atime = xfs_inode_to_log_dinode_ts(ip, inode->i_atime);
- to->di_mtime = xfs_inode_to_log_dinode_ts(ip, inode->i_mtime);
+ to->di_atime = xfs_inode_to_log_dinode_ts(ip, inode_get_atime(inode));
+ to->di_mtime = xfs_inode_to_log_dinode_ts(ip, inode_get_mtime(inode));
to->di_ctime = xfs_inode_to_log_dinode_ts(ip, inode_get_ctime(inode));
to->di_nlink = inode->i_nlink;
to->di_gen = inode->i_generation;
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 1c1e6171209d..fdfda4fba12b 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -572,8 +572,8 @@ xfs_vn_getattr(
stat->uid = vfsuid_into_kuid(vfsuid);
stat->gid = vfsgid_into_kgid(vfsgid);
stat->ino = ip->i_ino;
- stat->atime = inode->i_atime;
- stat->mtime = inode->i_mtime;
+ stat->atime = inode_get_atime(inode);
+ stat->mtime = inode_get_mtime(inode);
stat->ctime = inode_get_ctime(inode);
stat->blocks = XFS_FSB_TO_BB(mp, ip->i_nblocks + ip->i_delayed_blks);
@@ -584,6 +584,11 @@ xfs_vn_getattr(
}
}
+ if ((request_mask & STATX_CHANGE_COOKIE) && IS_I_VERSION(inode)) {
+ stat->change_cookie = inode_query_iversion(inode);
+ stat->result_mask |= STATX_CHANGE_COOKIE;
+ }
+
/*
* Note: If you add another clause to set an attribute flag, please
* update attributes_mask below.
@@ -1062,9 +1067,9 @@ xfs_vn_update_time(
now = current_time(inode);
if (flags & S_MTIME)
- inode->i_mtime = now;
+ inode_set_mtime_to_ts(inode, now);
if (flags & S_ATIME)
- inode->i_atime = now;
+ inode_set_atime_to_ts(inode, now);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_log_inode(tp, ip, log_flags);
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index f5377ba5967a..14462614fcc8 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -107,12 +107,12 @@ xfs_bulkstat_one_int(
buf->bs_size = ip->i_disk_size;
buf->bs_nlink = inode->i_nlink;
- buf->bs_atime = inode->i_atime.tv_sec;
- buf->bs_atime_nsec = inode->i_atime.tv_nsec;
- buf->bs_mtime = inode->i_mtime.tv_sec;
- buf->bs_mtime_nsec = inode->i_mtime.tv_nsec;
- buf->bs_ctime = inode_get_ctime(inode).tv_sec;
- buf->bs_ctime_nsec = inode_get_ctime(inode).tv_nsec;
+ buf->bs_atime = inode_get_atime_sec(inode);
+ buf->bs_atime_nsec = inode_get_atime_nsec(inode);
+ buf->bs_mtime = inode_get_mtime_sec(inode);
+ buf->bs_mtime_nsec = inode_get_mtime_nsec(inode);
+ buf->bs_ctime = inode_get_ctime_sec(inode);
+ buf->bs_ctime_nsec = inode_get_ctime_nsec(inode);
buf->bs_gen = inode->i_generation;
buf->bs_mode = inode->i_mode;
diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c
index 4a9bbd3fe120..a7daa522e00f 100644
--- a/fs/xfs/xfs_notify_failure.c
+++ b/fs/xfs/xfs_notify_failure.c
@@ -126,8 +126,8 @@ xfs_dax_notify_ddev_failure(
struct xfs_rmap_irec ri_low = { };
struct xfs_rmap_irec ri_high;
struct xfs_agf *agf;
- xfs_agblock_t agend;
struct xfs_perag *pag;
+ xfs_agblock_t range_agend;
pag = xfs_perag_get(mp, agno);
error = xfs_alloc_read_agf(pag, tp, 0, &agf_bp);
@@ -148,10 +148,10 @@ xfs_dax_notify_ddev_failure(
ri_high.rm_startblock = XFS_FSB_TO_AGBNO(mp, end_fsbno);
agf = agf_bp->b_addr;
- agend = min(be32_to_cpu(agf->agf_length),
+ range_agend = min(be32_to_cpu(agf->agf_length) - 1,
ri_high.rm_startblock);
notify.startblock = ri_low.rm_startblock;
- notify.blockcount = agend - ri_low.rm_startblock;
+ notify.blockcount = range_agend + 1 - ri_low.rm_startblock;
error = xfs_rmap_query_range(cur, &ri_low, &ri_high,
xfs_dax_failure_fn, &notify);
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 16534e9873f6..2e1a4e5cd03d 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -1420,25 +1420,26 @@ xfs_rtunmount_inodes(
*/
int /* error */
xfs_rtpick_extent(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_extlen_t len, /* allocation length (rtextents) */
- xfs_rtblock_t *pick) /* result rt extent */
-{
- xfs_rtblock_t b; /* result block */
- int log2; /* log of sequence number */
- uint64_t resid; /* residual after log removed */
- uint64_t seq; /* sequence number of file creation */
- uint64_t *seqp; /* pointer to seqno in inode */
+ xfs_mount_t *mp, /* file system mount point */
+ xfs_trans_t *tp, /* transaction pointer */
+ xfs_extlen_t len, /* allocation length (rtextents) */
+ xfs_rtblock_t *pick) /* result rt extent */
+ {
+ xfs_rtblock_t b; /* result block */
+ int log2; /* log of sequence number */
+ uint64_t resid; /* residual after log removed */
+ uint64_t seq; /* sequence number of file creation */
+ struct timespec64 ts; /* temporary timespec64 storage */
ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
- seqp = (uint64_t *)&VFS_I(mp->m_rbmip)->i_atime;
if (!(mp->m_rbmip->i_diflags & XFS_DIFLAG_NEWRTBM)) {
mp->m_rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;
- *seqp = 0;
+ seq = 0;
+ } else {
+ ts = inode_get_atime(VFS_I(mp->m_rbmip));
+ seq = (uint64_t)ts.tv_sec;
}
- seq = *seqp;
if ((log2 = xfs_highbit64(seq)) == -1)
b = 0;
else {
@@ -1450,7 +1451,8 @@ xfs_rtpick_extent(
if (b + len > mp->m_sb.sb_rextents)
b = mp->m_sb.sb_rextents - len;
}
- *seqp = seq + 1;
+ ts.tv_sec = (time64_t)seq + 1;
+ inode_set_atime_to_ts(VFS_I(mp->m_rbmip), ts);
xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
*pick = b;
return 0;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 819a3568b28f..f0ae07828153 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -361,14 +361,15 @@ STATIC int
xfs_blkdev_get(
xfs_mount_t *mp,
const char *name,
- struct block_device **bdevp)
+ struct bdev_handle **handlep)
{
int error = 0;
- *bdevp = blkdev_get_by_path(name, BLK_OPEN_READ | BLK_OPEN_WRITE,
- mp->m_super, &fs_holder_ops);
- if (IS_ERR(*bdevp)) {
- error = PTR_ERR(*bdevp);
+ *handlep = bdev_open_by_path(name, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ mp->m_super, &fs_holder_ops);
+ if (IS_ERR(*handlep)) {
+ error = PTR_ERR(*handlep);
+ *handlep = NULL;
xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
}
@@ -433,7 +434,7 @@ xfs_open_devices(
{
struct super_block *sb = mp->m_super;
struct block_device *ddev = sb->s_bdev;
- struct block_device *logdev = NULL, *rtdev = NULL;
+ struct bdev_handle *logdev_handle = NULL, *rtdev_handle = NULL;
int error;
/*
@@ -446,17 +447,19 @@ xfs_open_devices(
* Open real time and log devices - order is important.
*/
if (mp->m_logname) {
- error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
+ error = xfs_blkdev_get(mp, mp->m_logname, &logdev_handle);
if (error)
goto out_relock;
}
if (mp->m_rtname) {
- error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
+ error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev_handle);
if (error)
goto out_close_logdev;
- if (rtdev == ddev || rtdev == logdev) {
+ if (rtdev_handle->bdev == ddev ||
+ (logdev_handle &&
+ rtdev_handle->bdev == logdev_handle->bdev)) {
xfs_warn(mp,
"Cannot mount filesystem with identical rtdev and ddev/logdev.");
error = -EINVAL;
@@ -468,22 +471,25 @@ xfs_open_devices(
* Setup xfs_mount buffer target pointers
*/
error = -ENOMEM;
- mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev);
+ mp->m_ddev_targp = xfs_alloc_buftarg(mp, sb->s_bdev_handle);
if (!mp->m_ddev_targp)
goto out_close_rtdev;
- if (rtdev) {
- mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev);
+ if (rtdev_handle) {
+ mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev_handle);
if (!mp->m_rtdev_targp)
goto out_free_ddev_targ;
}
- if (logdev && logdev != ddev) {
- mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev);
+ if (logdev_handle && logdev_handle->bdev != ddev) {
+ mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev_handle);
if (!mp->m_logdev_targp)
goto out_free_rtdev_targ;
} else {
mp->m_logdev_targp = mp->m_ddev_targp;
+ /* Handle won't be used, drop it */
+ if (logdev_handle)
+ bdev_release(logdev_handle);
}
error = 0;
@@ -497,11 +503,11 @@ out_relock:
out_free_ddev_targ:
xfs_free_buftarg(mp->m_ddev_targp);
out_close_rtdev:
- if (rtdev)
- blkdev_put(rtdev, sb);
+ if (rtdev_handle)
+ bdev_release(rtdev_handle);
out_close_logdev:
- if (logdev && logdev != ddev)
- blkdev_put(logdev, sb);
+ if (logdev_handle)
+ bdev_release(logdev_handle);
goto out_relock;
}
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index a3975f325f4e..987843f84d03 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -186,7 +186,7 @@ static const struct xattr_handler xfs_xattr_security_handler = {
.set = xfs_xattr_set,
};
-const struct xattr_handler *xfs_xattr_handlers[] = {
+const struct xattr_handler * const xfs_xattr_handlers[] = {
&xfs_xattr_user_handler,
&xfs_xattr_trusted_handler,
&xfs_xattr_security_handler,
diff --git a/fs/xfs/xfs_xattr.h b/fs/xfs/xfs_xattr.h
index 2b09133b1b9b..cec766cad26c 100644
--- a/fs/xfs/xfs_xattr.h
+++ b/fs/xfs/xfs_xattr.h
@@ -8,6 +8,6 @@
int xfs_attr_change(struct xfs_da_args *args);
-extern const struct xattr_handler *xfs_xattr_handlers[];
+extern const struct xattr_handler * const xfs_xattr_handlers[];
#endif /* __XFS_XATTR_H__ */
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index 9d1a9808fbbb..e6a75401677d 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -658,8 +658,8 @@ static struct inode *zonefs_get_file_inode(struct inode *dir,
inode->i_ino = ino;
inode->i_mode = z->z_mode;
- inode->i_mtime = inode->i_atime = inode_set_ctime_to_ts(inode,
- inode_get_ctime(dir));
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, inode_get_ctime(dir))));
inode->i_uid = z->z_uid;
inode->i_gid = z->z_gid;
inode->i_size = z->z_wpoffset;
@@ -695,8 +695,8 @@ static struct inode *zonefs_get_zgroup_inode(struct super_block *sb,
inode->i_ino = ino;
inode_init_owner(&nop_mnt_idmap, inode, root, S_IFDIR | 0555);
inode->i_size = sbi->s_zgroup[ztype].g_nr_zones;
- inode->i_mtime = inode->i_atime = inode_set_ctime_to_ts(inode,
- inode_get_ctime(root));
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, inode_get_ctime(root))));
inode->i_private = &sbi->s_zgroup[ztype];
set_nlink(inode, 2);
@@ -1319,7 +1319,7 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
inode->i_ino = bdev_nr_zones(sb->s_bdev);
inode->i_mode = S_IFDIR | 0555;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_op = &zonefs_dir_inode_operations;
inode->i_fop = &zonefs_dir_operations;
inode->i_size = 2;
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 94181fe9780a..3f34ebb27525 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -465,9 +465,4 @@ extern int acpi_processor_ffh_lpi_probe(unsigned int cpu);
extern int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi);
#endif
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
-extern int arch_register_cpu(int cpu);
-extern void arch_unregister_cpu(int cpu);
-#endif
-
#endif
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index f9544d9b670d..ac65f0626cfc 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -68,8 +68,7 @@ enum drm_sched_priority {
DRM_SCHED_PRIORITY_HIGH,
DRM_SCHED_PRIORITY_KERNEL,
- DRM_SCHED_PRIORITY_COUNT,
- DRM_SCHED_PRIORITY_UNSET = -2
+ DRM_SCHED_PRIORITY_COUNT
};
/* Used to chose between FIFO and RR jobs scheduling */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index bb3cb005873e..e748bc957d83 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -82,6 +82,8 @@ struct timer_map {
struct arch_timer_context *emul_ptimer;
};
+void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map);
+
struct arch_timer_cpu {
struct arch_timer_context timers[NR_KVM_TIMERS];
@@ -145,4 +147,9 @@ u64 timer_get_cval(struct arch_timer_context *ctxt);
void kvm_timer_cpu_up(void);
void kvm_timer_cpu_down(void);
+static inline bool has_cntpoff(void)
+{
+ return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
+}
+
#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index eef450f25982..51fa7ffdee83 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1479,14 +1479,25 @@ extern const struct blk_holder_ops fs_holder_ops;
#define sb_open_mode(flags) \
(BLK_OPEN_READ | (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
+struct bdev_handle {
+ struct block_device *bdev;
+ void *holder;
+ blk_mode_t mode;
+};
+
struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder,
const struct blk_holder_ops *hops);
struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode,
void *holder, const struct blk_holder_ops *hops);
+struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
+ const struct blk_holder_ops *hops);
+struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
+ void *holder, const struct blk_holder_ops *hops);
int bd_prepare_to_claim(struct block_device *bdev, void *holder,
const struct blk_holder_ops *hops);
void bd_abort_claiming(struct block_device *bdev, void *holder);
void blkdev_put(struct block_device *bdev, void *holder);
+void bdev_release(struct bdev_handle *handle);
/* just for blk-cgroup, don't use elsewhere */
struct block_device *blkdev_get_no_open(dev_t dev);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index f1b3151ac30b..265da00a1a8b 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -238,7 +238,7 @@ struct css_set {
* Lists running through all tasks using this cgroup group.
* mg_tasks lists tasks which belong to this cset but are in the
* process of being migrated out or in. Protected by
- * css_set_rwsem, but, during migration, once tasks are moved to
+ * css_set_lock, but, during migration, once tasks are moved to
* mg_tasks, it can be read safely while holding cgroup_mutex.
*/
struct list_head tasks;
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 0abd60a7987b..eb768a866fe3 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -80,6 +80,8 @@ extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
const char *fmt, ...);
+extern int arch_register_cpu(int cpu);
+extern void arch_unregister_cpu(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
extern ssize_t arch_cpu_probe(const char *, size_t);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 69d0435c7ebb..772ab4d74d94 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -165,6 +165,7 @@ void dm_error(const char *message);
struct dm_dev {
struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct dax_device *dax_dev;
blk_mode_t mode;
char name[16];
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 0d678e9a7b24..ebe78bd3d121 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -568,6 +568,25 @@ static inline void dma_fence_set_error(struct dma_fence *fence,
fence->error = error;
}
+/**
+ * dma_fence_timestamp - helper to get the completion timestamp of a fence
+ * @fence: fence to get the timestamp from.
+ *
+ * After a fence is signaled the timestamp is updated with the signaling time,
+ * but setting the timestamp can race with tasks waiting for the signaling. This
+ * helper busy waits for the correct timestamp to appear.
+ */
+static inline ktime_t dma_fence_timestamp(struct dma_fence *fence)
+{
+ if (WARN_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)))
+ return ktime_get();
+
+ while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
+ cpu_relax();
+
+ return fence->timestamp;
+}
+
signed long dma_fence_wait_timeout(struct dma_fence *,
bool intr, signed long timeout);
signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 11fbd0ee1370..6dd993240fcc 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -224,9 +224,23 @@ struct export_operations {
atomic attribute updates
*/
#define EXPORT_OP_FLUSH_ON_CLOSE (0x20) /* fs flushes file data on close */
+#define EXPORT_OP_ASYNC_LOCK (0x40) /* fs can do async lock request */
unsigned long flags;
};
+/**
+ * exportfs_lock_op_is_async() - export op supports async lock operation
+ * @export_ops: the nfs export operations to check
+ *
+ * Returns true if the nfs export_operations structure has
+ * EXPORT_OP_ASYNC_LOCK in their flags set
+ */
+static inline bool
+exportfs_lock_op_is_async(const struct export_operations *export_ops)
+{
+ return export_ops->flags & EXPORT_OP_ASYNC_LOCK;
+}
+
extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
int *max_len, struct inode *parent,
int flags);
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index e066816f3519..bc4c3287a65e 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -98,20 +98,9 @@ static inline struct file *files_lookup_fd_locked(struct files_struct *files, un
return files_lookup_fd_raw(files, fd);
}
-static inline struct file *files_lookup_fd_rcu(struct files_struct *files, unsigned int fd)
-{
- RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
- "suspicious rcu_dereference_check() usage");
- return files_lookup_fd_raw(files, fd);
-}
-
-static inline struct file *lookup_fd_rcu(unsigned int fd)
-{
- return files_lookup_fd_rcu(current->files, fd);
-}
-
-struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd);
-struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *fd);
+struct file *lookup_fdget_rcu(unsigned int fd);
+struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd);
+struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *fd);
struct task_struct;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b528f063e8ff..bd522c0ac267 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -67,7 +67,7 @@ struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
struct iov_iter;
-struct fscrypt_info;
+struct fscrypt_inode_info;
struct fscrypt_operations;
struct fsverity_info;
struct fsverity_operations;
@@ -671,8 +671,8 @@ struct inode {
};
dev_t i_rdev;
loff_t i_size;
- struct timespec64 i_atime;
- struct timespec64 i_mtime;
+ struct timespec64 __i_atime;
+ struct timespec64 __i_mtime;
struct timespec64 __i_ctime; /* use inode_*_ctime accessors! */
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
@@ -738,7 +738,7 @@ struct inode {
#endif
#ifdef CONFIG_FS_ENCRYPTION
- struct fscrypt_info *i_crypt_info;
+ struct fscrypt_inode_info *i_crypt_info;
#endif
#ifdef CONFIG_FS_VERITY
@@ -1042,7 +1042,10 @@ static inline struct file *get_file(struct file *f)
atomic_long_inc(&f->f_count);
return f;
}
-#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
+
+struct file *get_file_rcu(struct file __rcu **f);
+struct file *get_file_active(struct file **f);
+
#define file_count(x) atomic_long_read(&(x)->f_count)
#define MAX_NON_LFS ((1UL<<31) - 1)
@@ -1119,7 +1122,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_NOATIME BIT(10) /* Do not update access times. */
#define SB_NODIRATIME BIT(11) /* Do not update directory access times */
#define SB_SILENT BIT(15)
-#define SB_POSIXACL BIT(16) /* VFS does not apply the umask */
+#define SB_POSIXACL BIT(16) /* Supports POSIX ACLs */
#define SB_INLINECRYPT BIT(17) /* Use blk-crypto for encrypted files */
#define SB_KERNMOUNT BIT(22) /* this is a kern_mount call */
#define SB_I_VERSION BIT(23) /* Update inode I_version field */
@@ -1166,6 +1169,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */
+#define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */
/* Possible states of 'frozen' field */
enum {
@@ -1206,7 +1210,7 @@ struct super_block {
#ifdef CONFIG_SECURITY
void *s_security;
#endif
- const struct xattr_handler **s_xattr;
+ const struct xattr_handler * const *s_xattr;
#ifdef CONFIG_FS_ENCRYPTION
const struct fscrypt_operations *s_cop;
struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */
@@ -1221,6 +1225,7 @@ struct super_block {
struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
+ struct bdev_handle *s_bdev_handle;
struct backing_dev_info *s_bdi;
struct mtd_info *s_mtd;
struct hlist_node s_instances;
@@ -1511,24 +1516,81 @@ static inline bool fsuidgid_has_mapping(struct super_block *sb,
struct timespec64 current_time(struct inode *inode);
struct timespec64 inode_set_ctime_current(struct inode *inode);
-/**
- * inode_get_ctime - fetch the current ctime from the inode
- * @inode: inode from which to fetch ctime
- *
- * Grab the current ctime from the inode and return it.
- */
+static inline time64_t inode_get_atime_sec(const struct inode *inode)
+{
+ return inode->__i_atime.tv_sec;
+}
+
+static inline long inode_get_atime_nsec(const struct inode *inode)
+{
+ return inode->__i_atime.tv_nsec;
+}
+
+static inline struct timespec64 inode_get_atime(const struct inode *inode)
+{
+ return inode->__i_atime;
+}
+
+static inline struct timespec64 inode_set_atime_to_ts(struct inode *inode,
+ struct timespec64 ts)
+{
+ inode->__i_atime = ts;
+ return ts;
+}
+
+static inline struct timespec64 inode_set_atime(struct inode *inode,
+ time64_t sec, long nsec)
+{
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+ return inode_set_atime_to_ts(inode, ts);
+}
+
+static inline time64_t inode_get_mtime_sec(const struct inode *inode)
+{
+ return inode->__i_mtime.tv_sec;
+}
+
+static inline long inode_get_mtime_nsec(const struct inode *inode)
+{
+ return inode->__i_mtime.tv_nsec;
+}
+
+static inline struct timespec64 inode_get_mtime(const struct inode *inode)
+{
+ return inode->__i_mtime;
+}
+
+static inline struct timespec64 inode_set_mtime_to_ts(struct inode *inode,
+ struct timespec64 ts)
+{
+ inode->__i_mtime = ts;
+ return ts;
+}
+
+static inline struct timespec64 inode_set_mtime(struct inode *inode,
+ time64_t sec, long nsec)
+{
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+ return inode_set_mtime_to_ts(inode, ts);
+}
+
+static inline time64_t inode_get_ctime_sec(const struct inode *inode)
+{
+ return inode->__i_ctime.tv_sec;
+}
+
+static inline long inode_get_ctime_nsec(const struct inode *inode)
+{
+ return inode->__i_ctime.tv_nsec;
+}
+
static inline struct timespec64 inode_get_ctime(const struct inode *inode)
{
return inode->__i_ctime;
}
-/**
- * inode_set_ctime_to_ts - set the ctime in the inode
- * @inode: inode in which to set the ctime
- * @ts: value to set in the ctime field
- *
- * Set the ctime in @inode to @ts
- */
static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode,
struct timespec64 ts)
{
@@ -1553,6 +1615,8 @@ static inline struct timespec64 inode_set_ctime(struct inode *inode,
return inode_set_ctime_to_ts(inode, ts);
}
+struct timespec64 simple_inode_init_ts(struct inode *inode);
+
/*
* Snapshotting support.
*/
@@ -2081,7 +2145,12 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
#define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
+
+#ifdef CONFIG_FS_POSIX_ACL
#define IS_POSIXACL(inode) __IS_FLG(inode, SB_POSIXACL)
+#else
+#define IS_POSIXACL(inode) 0
+#endif
#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
@@ -2403,7 +2472,7 @@ struct audit_names;
struct filename {
const char *name; /* pointer to actual string */
const __user char *uptr; /* original userland pointer */
- int refcnt;
+ atomic_t refcnt;
struct audit_names *aname;
const char iname[];
};
@@ -2448,24 +2517,24 @@ struct file *dentry_open(const struct path *path, int flags,
const struct cred *creds);
struct file *dentry_create(const struct path *path, int flags, umode_t mode,
const struct cred *cred);
-struct file *backing_file_open(const struct path *path, int flags,
+struct file *backing_file_open(const struct path *user_path, int flags,
const struct path *real_path,
const struct cred *cred);
-struct path *backing_file_real_path(struct file *f);
+struct path *backing_file_user_path(struct file *f);
/*
- * file_real_path - get the path corresponding to f_inode
+ * file_user_path - get the path to display for memory mapped file
*
- * When opening a backing file for a stackable filesystem (e.g.,
- * overlayfs) f_path may be on the stackable filesystem and f_inode on
- * the underlying filesystem. When the path associated with f_inode is
- * needed, this helper should be used instead of accessing f_path
- * directly.
-*/
-static inline const struct path *file_real_path(struct file *f)
+ * When mmapping a file on a stackable filesystem (e.g., overlayfs), the file
+ * stored in ->vm_file is a backing file whose f_inode is on the underlying
+ * filesystem. When the mapped file path is displayed to user (e.g. via
+ * /proc/<pid>/maps), this helper should be used to get the path to display
+ * to the user, which is the path of the fd that user has requested to map.
+ */
+static inline const struct path *file_user_path(struct file *f)
{
if (unlikely(f->f_mode & FMODE_BACKING))
- return backing_file_real_path(f);
+ return backing_file_user_path(f);
return &f->f_path;
}
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index 96332db693d5..c13e99cbbf81 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -136,6 +136,8 @@ extern struct fs_context *vfs_dup_fs_context(struct fs_context *fc);
extern int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param);
extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
const char *value, size_t v_size);
+int vfs_parse_monolithic_sep(struct fs_context *fc, void *data,
+ char *(*sep)(char **));
extern int generic_parse_monolithic(struct fs_context *fc, void *data);
extern int vfs_get_tree(struct fs_context *fc);
extern void put_fs_context(struct fs_context *fc);
diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h
index 010d39d0dc1c..2b1f74b24070 100644
--- a/include/linux/fs_stack.h
+++ b/include/linux/fs_stack.h
@@ -16,14 +16,14 @@ extern void fsstack_copy_inode_size(struct inode *dst, struct inode *src);
static inline void fsstack_copy_attr_atime(struct inode *dest,
const struct inode *src)
{
- dest->i_atime = src->i_atime;
+ inode_set_atime_to_ts(dest, inode_get_atime(src));
}
static inline void fsstack_copy_attr_times(struct inode *dest,
const struct inode *src)
{
- dest->i_atime = src->i_atime;
- dest->i_mtime = src->i_mtime;
+ inode_set_atime_to_ts(dest, inode_get_atime(src));
+ inode_set_mtime_to_ts(dest, inode_get_mtime(src));
inode_set_ctime_to_ts(dest, inode_get_ctime(src));
}
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index c895b12737a1..12f9e455d569 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -31,7 +31,7 @@
#define FSCRYPT_CONTENTS_ALIGNMENT 16
union fscrypt_policy;
-struct fscrypt_info;
+struct fscrypt_inode_info;
struct fs_parameter;
struct seq_file;
@@ -59,26 +59,55 @@ struct fscrypt_name {
#ifdef CONFIG_FS_ENCRYPTION
-/*
- * If set, the fscrypt bounce page pool won't be allocated (unless another
- * filesystem needs it). Set this if the filesystem always uses its own bounce
- * pages for writes and therefore won't need the fscrypt bounce page pool.
- */
-#define FS_CFLG_OWN_PAGES (1U << 1)
-
/* Crypto operations for filesystems */
struct fscrypt_operations {
- /* Set of optional flags; see above for allowed flags */
- unsigned int flags;
+ /*
+ * If set, then fs/crypto/ will allocate a global bounce page pool the
+ * first time an encryption key is set up for a file. The bounce page
+ * pool is required by the following functions:
+ *
+ * - fscrypt_encrypt_pagecache_blocks()
+ * - fscrypt_zeroout_range() for files not using inline crypto
+ *
+ * If the filesystem doesn't use those, it doesn't need to set this.
+ */
+ unsigned int needs_bounce_pages : 1;
/*
- * If set, this is a filesystem-specific key description prefix that
- * will be accepted for "logon" keys for v1 fscrypt policies, in
- * addition to the generic prefix "fscrypt:". This functionality is
- * deprecated, so new filesystems shouldn't set this field.
+ * If set, then fs/crypto/ will allow the use of encryption settings
+ * that assume inode numbers fit in 32 bits (i.e.
+ * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64}), provided that the other
+ * prerequisites for these settings are also met. This is only useful
+ * if the filesystem wants to support inline encryption hardware that is
+ * limited to 32-bit or 64-bit data unit numbers and where programming
+ * keyslots is very slow.
*/
- const char *key_prefix;
+ unsigned int has_32bit_inodes : 1;
+
+ /*
+ * If set, then fs/crypto/ will allow users to select a crypto data unit
+ * size that is less than the filesystem block size. This is done via
+ * the log2_data_unit_size field of the fscrypt policy. This flag is
+ * not compatible with filesystems that encrypt variable-length blocks
+ * (i.e. blocks that aren't all equal to filesystem's block size), for
+ * example as a result of compression. It's also not compatible with
+ * the fscrypt_encrypt_block_inplace() and
+ * fscrypt_decrypt_block_inplace() functions.
+ */
+ unsigned int supports_subblock_data_units : 1;
+
+ /*
+ * This field exists only for backwards compatibility reasons and should
+ * only be set by the filesystems that are setting it already. It
+ * contains the filesystem-specific key description prefix that is
+ * accepted for "logon" keys for v1 fscrypt policies. This
+ * functionality is deprecated in favor of the generic prefix
+ * "fscrypt:", which itself is deprecated in favor of the filesystem
+ * keyring ioctls such as FS_IOC_ADD_ENCRYPTION_KEY. Filesystems that
+ * are newly adding fscrypt support should not set this field.
+ */
+ const char *legacy_key_prefix;
/*
* Get the fscrypt context of the given inode.
@@ -146,21 +175,6 @@ struct fscrypt_operations {
bool (*has_stable_inodes)(struct super_block *sb);
/*
- * Get the number of bits that the filesystem uses to represent inode
- * numbers and file logical block numbers.
- *
- * By default, both of these are assumed to be 64-bit. This function
- * can be implemented to declare that either or both of these numbers is
- * shorter, which may allow the use of the
- * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags and/or the use of
- * inline crypto hardware whose maximum DUN length is less than 64 bits
- * (e.g., eMMC v5.2 spec compliant hardware). This function only needs
- * to be implemented if support for one of these features is needed.
- */
- void (*get_ino_and_lblk_bits)(struct super_block *sb,
- int *ino_bits_ret, int *lblk_bits_ret);
-
- /*
* Return an array of pointers to the block devices to which the
* filesystem may write encrypted file contents, NULL if the filesystem
* only has a single such block device, or an ERR_PTR() on error.
@@ -178,7 +192,8 @@ struct fscrypt_operations {
unsigned int *num_devs);
};
-static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
+static inline struct fscrypt_inode_info *
+fscrypt_get_inode_info(const struct inode *inode)
{
/*
* Pairs with the cmpxchg_release() in fscrypt_setup_encryption_info().
@@ -390,7 +405,8 @@ static inline void fscrypt_set_ops(struct super_block *sb,
}
#else /* !CONFIG_FS_ENCRYPTION */
-static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
+static inline struct fscrypt_inode_info *
+fscrypt_get_inode_info(const struct inode *inode)
{
return NULL;
}
@@ -868,7 +884,7 @@ static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode)
*/
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
{
- return fscrypt_get_info(inode) != NULL;
+ return fscrypt_get_inode_info(inode) != NULL;
}
/**
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index ed48e4f1e755..bcb6609b54b3 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -96,8 +96,7 @@ static inline int fsnotify_file(struct file *file, __u32 mask)
if (file->f_mode & FMODE_NONOTIFY)
return 0;
- /* Overlayfs internal files have fake f_path */
- path = file_real_path(file);
+ path = &file->f_path;
return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
}
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index a30686e649f7..47d25a5e1933 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -60,6 +60,7 @@ struct resv_map {
long adds_in_progress;
struct list_head region_cache;
long region_cache_count;
+ struct rw_semaphore rw_sema;
#ifdef CONFIG_CGROUP_HUGETLB
/*
* On private mappings, the counter to uncharge reservations is stored
@@ -138,7 +139,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
void unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *,
zap_flags_t);
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct page *ref_page, zap_flags_t zap_flags);
@@ -245,6 +246,25 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end);
+extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *begin, unsigned long *end);
+extern void __hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details);
+
+static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_begin(vma, start, end);
+}
+
+static inline void hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_end(vma, details);
+}
+
void hugetlb_vma_lock_read(struct vm_area_struct *vma);
void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
void hugetlb_vma_lock_write(struct vm_area_struct *vma);
@@ -296,6 +316,18 @@ static inline void adjust_range_if_pmd_sharing_possible(
{
}
+static inline void hugetlb_zap_begin(
+ struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+}
+
+static inline void hugetlb_zap_end(
+ struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+}
+
static inline struct page *hugetlb_follow_page_mask(
struct vm_area_struct *vma, unsigned long address, unsigned int flags,
unsigned int *page_mask)
@@ -441,7 +473,7 @@ static inline long hugetlb_change_protection(
return 0;
}
-static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page,
zap_flags_t zap_flags)
@@ -1233,6 +1265,11 @@ static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
}
+static inline bool __vma_private_lock(struct vm_area_struct *vma)
+{
+ return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
+}
+
/*
* Safe version of huge_pte_offset() to check the locks. See comments
* above huge_pte_offset().
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index bd2f6e19c357..b24fb80782c5 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -4356,6 +4356,35 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
}
/**
+ * ieee80211_is_protected_dual_of_public_action - check if skb contains a
+ * protected dual of public action management frame
+ * @skb: the skb containing the frame, length will be checked
+ *
+ * Return: true if the skb contains a protected dual of public action
+ * management frame, false otherwise.
+ */
+static inline bool
+ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb)
+{
+ u8 action;
+
+ if (!ieee80211_is_public_action((void *)skb->data, skb->len) ||
+ skb->len < IEEE80211_MIN_ACTION_SIZE + 1)
+ return false;
+
+ action = *(u8 *)(skb->data + IEEE80211_MIN_ACTION_SIZE);
+
+ return action != WLAN_PUB_ACTION_20_40_BSS_COEX &&
+ action != WLAN_PUB_ACTION_DSE_REG_LOC_ANN &&
+ action != WLAN_PUB_ACTION_MSMT_PILOT &&
+ action != WLAN_PUB_ACTION_TDLS_DISCOVER_RES &&
+ action != WLAN_PUB_ACTION_LOC_TRACK_NOTI &&
+ action != WLAN_PUB_ACTION_FTM_REQUEST &&
+ action != WLAN_PUB_ACTION_FTM_RESPONSE &&
+ action != WLAN_PUB_ACTION_FILS_DISCOVERY;
+}
+
+/**
* _ieee80211_is_group_privacy_action - check if frame is a group addressed
* privacy action frame
* @hdr: the frame
diff --git a/include/linux/iov_iter.h b/include/linux/iov_iter.h
new file mode 100644
index 000000000000..270454a6703d
--- /dev/null
+++ b/include/linux/iov_iter.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* I/O iterator iteration building functions.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells ([email protected])
+ */
+
+#ifndef _LINUX_IOV_ITER_H
+#define _LINUX_IOV_ITER_H
+
+#include <linux/uio.h>
+#include <linux/bvec.h>
+
+typedef size_t (*iov_step_f)(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2);
+typedef size_t (*iov_ustep_f)(void __user *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2);
+
+/*
+ * Handle ITER_UBUF.
+ */
+static __always_inline
+size_t iterate_ubuf(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_ustep_f step)
+{
+ void __user *base = iter->ubuf;
+ size_t progress = 0, remain;
+
+ remain = step(base + iter->iov_offset, 0, len, priv, priv2);
+ progress = len - remain;
+ iter->iov_offset += progress;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_IOVEC.
+ */
+static __always_inline
+size_t iterate_iovec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_ustep_f step)
+{
+ const struct iovec *p = iter->__iov;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t part = min(len, p->iov_len - skip);
+
+ if (likely(part)) {
+ remain = step(p->iov_base + skip, progress, part, priv, priv2);
+ consumed = part - remain;
+ progress += consumed;
+ skip += consumed;
+ len -= consumed;
+ if (skip < p->iov_len)
+ break;
+ }
+ p++;
+ skip = 0;
+ } while (len);
+
+ iter->nr_segs -= p - iter->__iov;
+ iter->__iov = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_KVEC.
+ */
+static __always_inline
+size_t iterate_kvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct kvec *p = iter->kvec;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t part = min(len, p->iov_len - skip);
+
+ if (likely(part)) {
+ remain = step(p->iov_base + skip, progress, part, priv, priv2);
+ consumed = part - remain;
+ progress += consumed;
+ skip += consumed;
+ len -= consumed;
+ if (skip < p->iov_len)
+ break;
+ }
+ p++;
+ skip = 0;
+ } while (len);
+
+ iter->nr_segs -= p - iter->kvec;
+ iter->kvec = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_BVEC.
+ */
+static __always_inline
+size_t iterate_bvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct bio_vec *p = iter->bvec;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t offset = p->bv_offset + skip, part;
+ void *kaddr = kmap_local_page(p->bv_page + offset / PAGE_SIZE);
+
+ part = min3(len,
+ (size_t)(p->bv_len - skip),
+ (size_t)(PAGE_SIZE - offset % PAGE_SIZE));
+ remain = step(kaddr + offset % PAGE_SIZE, progress, part, priv, priv2);
+ kunmap_local(kaddr);
+ consumed = part - remain;
+ len -= consumed;
+ progress += consumed;
+ skip += consumed;
+ if (skip >= p->bv_len) {
+ skip = 0;
+ p++;
+ }
+ if (remain)
+ break;
+ } while (len);
+
+ iter->nr_segs -= p - iter->bvec;
+ iter->bvec = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_XARRAY.
+ */
+static __always_inline
+size_t iterate_xarray(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ struct folio *folio;
+ size_t progress = 0;
+ loff_t start = iter->xarray_start + iter->iov_offset;
+ pgoff_t index = start / PAGE_SIZE;
+ XA_STATE(xas, iter->xarray, index);
+
+ rcu_read_lock();
+ xas_for_each(&xas, folio, ULONG_MAX) {
+ size_t remain, consumed, offset, part, flen;
+
+ if (xas_retry(&xas, folio))
+ continue;
+ if (WARN_ON(xa_is_value(folio)))
+ break;
+ if (WARN_ON(folio_test_hugetlb(folio)))
+ break;
+
+ offset = offset_in_folio(folio, start + progress);
+ flen = min(folio_size(folio) - offset, len);
+
+ while (flen) {
+ void *base = kmap_local_folio(folio, offset);
+
+ part = min_t(size_t, flen,
+ PAGE_SIZE - offset_in_page(offset));
+ remain = step(base, progress, part, priv, priv2);
+ kunmap_local(base);
+
+ consumed = part - remain;
+ progress += consumed;
+ len -= consumed;
+
+ if (remain || len == 0)
+ goto out;
+ flen -= consumed;
+ offset += consumed;
+ }
+ }
+
+out:
+ rcu_read_unlock();
+ iter->iov_offset += progress;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_DISCARD.
+ */
+static __always_inline
+size_t iterate_discard(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ size_t progress = len;
+
+ iter->count -= progress;
+ return progress;
+}
+
+/**
+ * iterate_and_advance2 - Iterate over an iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @priv2: More data for the step functions.
+ * @ustep: Function for UBUF/IOVEC iterators; given __user addresses.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * Iterate over the next part of an iterator, up to the specified length. The
+ * buffer is presented in segments, which for kernel iteration are broken up by
+ * physical pages and mapped, with the mapped address being presented.
+ *
+ * Two step functions, @step and @ustep, must be provided, one for handling
+ * mapped kernel addresses and the other is given user addresses which have the
+ * potential to fault since no pinning is performed.
+ *
+ * The step functions are passed the address and length of the segment, @priv,
+ * @priv2 and the amount of data so far iterated over (which can, for example,
+ * be added to @priv to point to the right part of a second buffer). The step
+ * functions should return the amount of the segment they didn't process (ie. 0
+ * indicates complete processsing).
+ *
+ * This function returns the amount of data processed (ie. 0 means nothing was
+ * processed and the value of @len means processes to completion).
+ */
+static __always_inline
+size_t iterate_and_advance2(struct iov_iter *iter, size_t len, void *priv,
+ void *priv2, iov_ustep_f ustep, iov_step_f step)
+{
+ if (unlikely(iter->count < len))
+ len = iter->count;
+ if (unlikely(!len))
+ return 0;
+
+ if (likely(iter_is_ubuf(iter)))
+ return iterate_ubuf(iter, len, priv, priv2, ustep);
+ if (likely(iter_is_iovec(iter)))
+ return iterate_iovec(iter, len, priv, priv2, ustep);
+ if (iov_iter_is_bvec(iter))
+ return iterate_bvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_kvec(iter))
+ return iterate_kvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_xarray(iter))
+ return iterate_xarray(iter, len, priv, priv2, step);
+ return iterate_discard(iter, len, priv, priv2, step);
+}
+
+/**
+ * iterate_and_advance - Iterate over an iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @ustep: Function for UBUF/IOVEC iterators; given __user addresses.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * As iterate_and_advance2(), but priv2 is always NULL.
+ */
+static __always_inline
+size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv,
+ iov_ustep_f ustep, iov_step_f step)
+{
+ return iterate_and_advance2(iter, len, priv, NULL, ustep, step);
+}
+
+#endif /* _LINUX_IOV_ITER_H */
diff --git a/include/linux/iversion.h b/include/linux/iversion.h
index f174ff1b59ee..8f972eaca2ed 100644
--- a/include/linux/iversion.h
+++ b/include/linux/iversion.h
@@ -256,7 +256,7 @@ inode_peek_iversion(const struct inode *inode)
* For filesystems without any sort of change attribute, the best we can
* do is fake one up from the ctime:
*/
-static inline u64 time_to_chattr(struct timespec64 *t)
+static inline u64 time_to_chattr(const struct timespec64 *t)
{
u64 chattr = t->tv_sec;
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 842623d708c2..71fa9a40fb5a 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -466,10 +466,10 @@ static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
-#ifdef CONFIG_KASAN_INLINE
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
void kasan_non_canonical_hook(unsigned long addr);
-#else /* CONFIG_KASAN_INLINE */
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
static inline void kasan_non_canonical_hook(unsigned long addr) { }
-#endif /* CONFIG_KASAN_INLINE */
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
#endif /* LINUX_KASAN_H */
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 85bda2d02d65..2c982ff7475a 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -74,6 +74,33 @@ static inline void init_llist_head(struct llist_head *list)
}
/**
+ * init_llist_node - initialize lock-less list node
+ * @node: the node to be initialised
+ *
+ * In cases where there is a need to test if a node is on
+ * a list or not, this initialises the node to clearly
+ * not be on any list.
+ */
+static inline void init_llist_node(struct llist_node *node)
+{
+ node->next = node;
+}
+
+/**
+ * llist_on_list - test if a lock-list list node is on a list
+ * @node: the node to test
+ *
+ * When a node is on a list the ->next pointer will be NULL or
+ * some other node. It can never point to itself. We use that
+ * in init_llist_node() to record that a node is not on any list,
+ * and here to test whether it is on any list.
+ */
+static inline bool llist_on_list(const struct llist_node *node)
+{
+ return node->next != node;
+}
+
+/**
* llist_entry - get the struct of this entry
* @ptr: the &struct llist_node pointer.
* @type: the type of the struct this is embedded in.
@@ -249,6 +276,25 @@ static inline struct llist_node *__llist_del_all(struct llist_head *head)
extern struct llist_node *llist_del_first(struct llist_head *head);
+/**
+ * llist_del_first_init - delete first entry from lock-list and mark is as being off-list
+ * @head: the head of lock-less list to delete from.
+ *
+ * This behave the same as llist_del_first() except that llist_init_node() is called
+ * on the returned node so that llist_on_list() will report false for the node.
+ */
+static inline struct llist_node *llist_del_first_init(struct llist_head *head)
+{
+ struct llist_node *n = llist_del_first(head);
+
+ if (n)
+ init_llist_node(n);
+ return n;
+}
+
+extern bool llist_del_first_this(struct llist_head *head,
+ struct llist_node *this);
+
struct llist_node *llist_reverse_order(struct llist_node *head);
#endif /* LLIST_H */
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 0f016d69c996..9f565416d186 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -282,7 +282,7 @@ __be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *,
struct nlm_host *, struct nlm_lock *,
struct nlm_lock *, struct nlm_cookie *);
__be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *);
-void nlmsvc_retry_blocked(void);
+void nlmsvc_retry_blocked(struct svc_rqst *rqstp);
void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
nlm_host_match_fn_t match);
void nlmsvc_grant_reply(struct nlm_cookie *, __be32);
diff --git a/include/linux/lwq.h b/include/linux/lwq.h
new file mode 100644
index 000000000000..d081d5cf8e33
--- /dev/null
+++ b/include/linux/lwq.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef LWQ_H
+#define LWQ_H
+/*
+ * Light-weight single-linked queue built from llist
+ *
+ * Entries can be enqueued from any context with no locking.
+ * Entries can be dequeued from process context with integrated locking.
+ *
+ * This is particularly suitable when work items are queued in
+ * BH or IRQ context, and where work items are handled one at a time
+ * by dedicated threads.
+ */
+#include <linux/container_of.h>
+#include <linux/spinlock.h>
+#include <linux/llist.h>
+
+struct lwq_node {
+ struct llist_node node;
+};
+
+struct lwq {
+ spinlock_t lock;
+ struct llist_node *ready; /* entries to be dequeued */
+ struct llist_head new; /* entries being enqueued */
+};
+
+/**
+ * lwq_init - initialise a lwq
+ * @q: the lwq object
+ */
+static inline void lwq_init(struct lwq *q)
+{
+ spin_lock_init(&q->lock);
+ q->ready = NULL;
+ init_llist_head(&q->new);
+}
+
+/**
+ * lwq_empty - test if lwq contains any entry
+ * @q: the lwq object
+ *
+ * This empty test contains an acquire barrier so that if a wakeup
+ * is sent when lwq_dequeue returns true, it is safe to go to sleep after
+ * a test on lwq_empty().
+ */
+static inline bool lwq_empty(struct lwq *q)
+{
+ /* acquire ensures ordering wrt lwq_enqueue() */
+ return smp_load_acquire(&q->ready) == NULL && llist_empty(&q->new);
+}
+
+struct llist_node *__lwq_dequeue(struct lwq *q);
+/**
+ * lwq_dequeue - dequeue first (oldest) entry from lwq
+ * @q: the queue to dequeue from
+ * @type: the type of object to return
+ * @member: them member in returned object which is an lwq_node.
+ *
+ * Remove a single object from the lwq and return it. This will take
+ * a spinlock and so must always be called in the same context, typcially
+ * process contet.
+ */
+#define lwq_dequeue(q, type, member) \
+ ({ struct llist_node *_n = __lwq_dequeue(q); \
+ _n ? container_of(_n, type, member.node) : NULL; })
+
+struct llist_node *lwq_dequeue_all(struct lwq *q);
+
+/**
+ * lwq_for_each_safe - iterate over detached queue allowing deletion
+ * @_n: iterator variable
+ * @_t1: temporary struct llist_node **
+ * @_t2: temporary struct llist_node *
+ * @_l: address of llist_node pointer from lwq_dequeue_all()
+ * @_member: member in _n where lwq_node is found.
+ *
+ * Iterate over members in a dequeued list. If the iterator variable
+ * is set to NULL, the iterator removes that entry from the queue.
+ */
+#define lwq_for_each_safe(_n, _t1, _t2, _l, _member) \
+ for (_t1 = (_l); \
+ *(_t1) ? (_n = container_of(*(_t1), typeof(*(_n)), _member.node),\
+ _t2 = ((*_t1)->next), \
+ true) \
+ : false; \
+ (_n) ? (_t1 = &(_n)->_member.node.next, 0) \
+ : ((*(_t1) = (_t2)), 0))
+
+/**
+ * lwq_enqueue - add a new item to the end of the queue
+ * @n - the lwq_node embedded in the item to be added
+ * @q - the lwq to append to.
+ *
+ * No locking is needed to append to the queue so this can
+ * be called from any context.
+ * Return %true is the list may have previously been empty.
+ */
+static inline bool lwq_enqueue(struct lwq_node *n, struct lwq *q)
+{
+ /* acquire enqures ordering wrt lwq_dequeue */
+ return llist_add(&n->node, &q->new) &&
+ smp_load_acquire(&q->ready) == NULL;
+}
+
+/**
+ * lwq_enqueue_batch - add a list of new items to the end of the queue
+ * @n - the lwq_node embedded in the first item to be added
+ * @q - the lwq to append to.
+ *
+ * No locking is needed to append to the queue so this can
+ * be called from any context.
+ * Return %true is the list may have previously been empty.
+ */
+static inline bool lwq_enqueue_batch(struct llist_node *n, struct lwq *q)
+{
+ struct llist_node *e = n;
+
+ /* acquire enqures ordering wrt lwq_dequeue */
+ return llist_add_batch(llist_reverse_order(n), e, &q->new) &&
+ smp_load_acquire(&q->ready) == NULL;
+}
+#endif /* LWQ_H */
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
index 1e5893138afe..0b971b24a804 100644
--- a/include/linux/mcb.h
+++ b/include/linux/mcb.h
@@ -63,7 +63,6 @@ static inline struct mcb_bus *to_mcb_bus(struct device *dev)
struct mcb_device {
struct device dev;
struct mcb_bus *bus;
- bool is_added;
struct mcb_driver *driver;
u16 id;
int inst;
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 4f40b40306d0..ac3dd2876197 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -92,8 +92,8 @@ extern bool __mnt_is_readonly(struct vfsmount *mnt);
extern bool mnt_may_suid(struct vfsmount *mnt);
extern struct vfsmount *clone_private_mount(const struct path *path);
-extern int __mnt_want_write(struct vfsmount *);
-extern void __mnt_drop_write(struct vfsmount *);
+int mnt_get_write_access(struct vfsmount *mnt);
+void mnt_put_write_access(struct vfsmount *mnt);
extern struct vfsmount *fc_mount(struct fs_context *fc);
extern struct vfsmount *vfs_create_mount(struct fs_context *fc);
diff --git a/include/linux/mtd/jedec.h b/include/linux/mtd/jedec.h
index 0b6b59f7cfbd..56047a4e54c9 100644
--- a/include/linux/mtd/jedec.h
+++ b/include/linux/mtd/jedec.h
@@ -21,6 +21,9 @@ struct jedec_ecc_info {
/* JEDEC features */
#define JEDEC_FEATURE_16_BIT_BUS (1 << 0)
+/* JEDEC Optional Commands */
+#define JEDEC_OPT_CMD_READ_CACHE BIT(1)
+
struct nand_jedec_params {
/* rev info and features block */
/* 'J' 'E' 'S' 'D' */
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
index a7376f9beddf..55ab2e4d62f9 100644
--- a/include/linux/mtd/onfi.h
+++ b/include/linux/mtd/onfi.h
@@ -55,6 +55,7 @@
#define ONFI_SUBFEATURE_PARAM_LEN 4
/* ONFI optional commands SET/GET FEATURES supported? */
+#define ONFI_OPT_CMD_READ_CACHE BIT(1)
#define ONFI_OPT_CMD_SET_GET_FEATURES BIT(2)
struct nand_onfi_params {
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 90a141ba2a5a..c29ace15a053 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -225,6 +225,7 @@ struct gpio_desc;
* struct nand_parameters - NAND generic parameters from the parameter page
* @model: Model name
* @supports_set_get_features: The NAND chip supports setting/getting features
+ * @supports_read_cache: The NAND chip supports read cache operations
* @set_feature_list: Bitmap of features that can be set
* @get_feature_list: Bitmap of features that can be get
* @onfi: ONFI specific parameters
@@ -233,6 +234,7 @@ struct nand_parameters {
/* Generic parameters */
const char *model;
bool supports_set_get_features;
+ bool supports_read_cache;
DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 1463cbda4888..3100371b5e32 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -92,6 +92,30 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern struct dentry *lock_rename_child(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
+/**
+ * mode_strip_umask - handle vfs umask stripping
+ * @dir: parent directory of the new inode
+ * @mode: mode of the new inode to be created in @dir
+ *
+ * In most filesystems, umask stripping depends on whether or not the
+ * filesystem supports POSIX ACLs. If the filesystem doesn't support it umask
+ * stripping is done directly in here. If the filesystem does support POSIX
+ * ACLs umask stripping is deferred until the filesystem calls
+ * posix_acl_create().
+ *
+ * Some filesystems (like NFSv4) also want to avoid umask stripping by the
+ * VFS, but don't support POSIX ACLs. Those filesystems can set SB_I_NOUMASK
+ * to get this effect without declaring that they support POSIX ACLs.
+ *
+ * Returns: mode
+ */
+static inline umode_t __must_check mode_strip_umask(const struct inode *dir, umode_t mode)
+{
+ if (!IS_POSIXACL(dir) && !(dir->i_sb->s_iflags & SB_I_NOUMASK))
+ mode &= ~current_umask();
+ return mode;
+}
+
extern int __must_check nd_jump_link(const struct path *path);
static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
@@ -112,7 +136,7 @@ static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
static inline bool
retry_estale(const long error, const unsigned int flags)
{
- return error == -ESTALE && !(flags & LOOKUP_REVAL);
+ return unlikely(error == -ESTALE && !(flags & LOOKUP_REVAL));
}
#endif /* _LINUX_NAMEI_H */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 730003c4f4af..c11c4db34639 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -150,7 +150,7 @@ enum nfs_opnum4 {
OP_WRITE_SAME = 70,
OP_CLONE = 71,
- /* xattr support (RFC8726) */
+ /* xattr support (RFC8276) */
OP_GETXATTR = 72,
OP_SETXATTR = 73,
OP_LISTXATTRS = 74,
@@ -389,79 +389,203 @@ enum lock_type4 {
NFS4_WRITEW_LT = 4
};
+/*
+ * Symbol names and values are from RFC 7531 Section 2.
+ * "XDR Description of NFSv4.0"
+ */
+enum {
+ FATTR4_SUPPORTED_ATTRS = 0,
+ FATTR4_TYPE = 1,
+ FATTR4_FH_EXPIRE_TYPE = 2,
+ FATTR4_CHANGE = 3,
+ FATTR4_SIZE = 4,
+ FATTR4_LINK_SUPPORT = 5,
+ FATTR4_SYMLINK_SUPPORT = 6,
+ FATTR4_NAMED_ATTR = 7,
+ FATTR4_FSID = 8,
+ FATTR4_UNIQUE_HANDLES = 9,
+ FATTR4_LEASE_TIME = 10,
+ FATTR4_RDATTR_ERROR = 11,
+ FATTR4_ACL = 12,
+ FATTR4_ACLSUPPORT = 13,
+ FATTR4_ARCHIVE = 14,
+ FATTR4_CANSETTIME = 15,
+ FATTR4_CASE_INSENSITIVE = 16,
+ FATTR4_CASE_PRESERVING = 17,
+ FATTR4_CHOWN_RESTRICTED = 18,
+ FATTR4_FILEHANDLE = 19,
+ FATTR4_FILEID = 20,
+ FATTR4_FILES_AVAIL = 21,
+ FATTR4_FILES_FREE = 22,
+ FATTR4_FILES_TOTAL = 23,
+ FATTR4_FS_LOCATIONS = 24,
+ FATTR4_HIDDEN = 25,
+ FATTR4_HOMOGENEOUS = 26,
+ FATTR4_MAXFILESIZE = 27,
+ FATTR4_MAXLINK = 28,
+ FATTR4_MAXNAME = 29,
+ FATTR4_MAXREAD = 30,
+ FATTR4_MAXWRITE = 31,
+ FATTR4_MIMETYPE = 32,
+ FATTR4_MODE = 33,
+ FATTR4_NO_TRUNC = 34,
+ FATTR4_NUMLINKS = 35,
+ FATTR4_OWNER = 36,
+ FATTR4_OWNER_GROUP = 37,
+ FATTR4_QUOTA_AVAIL_HARD = 38,
+ FATTR4_QUOTA_AVAIL_SOFT = 39,
+ FATTR4_QUOTA_USED = 40,
+ FATTR4_RAWDEV = 41,
+ FATTR4_SPACE_AVAIL = 42,
+ FATTR4_SPACE_FREE = 43,
+ FATTR4_SPACE_TOTAL = 44,
+ FATTR4_SPACE_USED = 45,
+ FATTR4_SYSTEM = 46,
+ FATTR4_TIME_ACCESS = 47,
+ FATTR4_TIME_ACCESS_SET = 48,
+ FATTR4_TIME_BACKUP = 49,
+ FATTR4_TIME_CREATE = 50,
+ FATTR4_TIME_DELTA = 51,
+ FATTR4_TIME_METADATA = 52,
+ FATTR4_TIME_MODIFY = 53,
+ FATTR4_TIME_MODIFY_SET = 54,
+ FATTR4_MOUNTED_ON_FILEID = 55,
+};
+
+/*
+ * Symbol names and values are from RFC 5662 Section 2.
+ * "XDR Description of NFSv4.1"
+ */
+enum {
+ FATTR4_DIR_NOTIF_DELAY = 56,
+ FATTR4_DIRENT_NOTIF_DELAY = 57,
+ FATTR4_DACL = 58,
+ FATTR4_SACL = 59,
+ FATTR4_CHANGE_POLICY = 60,
+ FATTR4_FS_STATUS = 61,
+ FATTR4_FS_LAYOUT_TYPES = 62,
+ FATTR4_LAYOUT_HINT = 63,
+ FATTR4_LAYOUT_TYPES = 64,
+ FATTR4_LAYOUT_BLKSIZE = 65,
+ FATTR4_LAYOUT_ALIGNMENT = 66,
+ FATTR4_FS_LOCATIONS_INFO = 67,
+ FATTR4_MDSTHRESHOLD = 68,
+ FATTR4_RETENTION_GET = 69,
+ FATTR4_RETENTION_SET = 70,
+ FATTR4_RETENTEVT_GET = 71,
+ FATTR4_RETENTEVT_SET = 72,
+ FATTR4_RETENTION_HOLD = 73,
+ FATTR4_MODE_SET_MASKED = 74,
+ FATTR4_SUPPATTR_EXCLCREAT = 75,
+ FATTR4_FS_CHARSET_CAP = 76,
+};
+
+/*
+ * Symbol names and values are from RFC 7863 Section 2.
+ * "XDR Description of NFSv4.2"
+ */
+enum {
+ FATTR4_CLONE_BLKSIZE = 77,
+ FATTR4_SPACE_FREED = 78,
+ FATTR4_CHANGE_ATTR_TYPE = 79,
+ FATTR4_SEC_LABEL = 80,
+};
+
+/*
+ * Symbol names and values are from RFC 8275 Section 5.
+ * "The mode_umask Attribute"
+ */
+enum {
+ FATTR4_MODE_UMASK = 81,
+};
+
+/*
+ * Symbol names and values are from RFC 8276 Section 8.6.
+ * "Numeric Values Assigned to Protocol Extensions"
+ */
+enum {
+ FATTR4_XATTR_SUPPORT = 82,
+};
+
+/*
+ * The following internal definitions enable processing the above
+ * attribute bits within 32-bit word boundaries.
+ */
/* Mandatory Attributes */
-#define FATTR4_WORD0_SUPPORTED_ATTRS (1UL << 0)
-#define FATTR4_WORD0_TYPE (1UL << 1)
-#define FATTR4_WORD0_FH_EXPIRE_TYPE (1UL << 2)
-#define FATTR4_WORD0_CHANGE (1UL << 3)
-#define FATTR4_WORD0_SIZE (1UL << 4)
-#define FATTR4_WORD0_LINK_SUPPORT (1UL << 5)
-#define FATTR4_WORD0_SYMLINK_SUPPORT (1UL << 6)
-#define FATTR4_WORD0_NAMED_ATTR (1UL << 7)
-#define FATTR4_WORD0_FSID (1UL << 8)
-#define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9)
-#define FATTR4_WORD0_LEASE_TIME (1UL << 10)
-#define FATTR4_WORD0_RDATTR_ERROR (1UL << 11)
+#define FATTR4_WORD0_SUPPORTED_ATTRS BIT(FATTR4_SUPPORTED_ATTRS)
+#define FATTR4_WORD0_TYPE BIT(FATTR4_TYPE)
+#define FATTR4_WORD0_FH_EXPIRE_TYPE BIT(FATTR4_FH_EXPIRE_TYPE)
+#define FATTR4_WORD0_CHANGE BIT(FATTR4_CHANGE)
+#define FATTR4_WORD0_SIZE BIT(FATTR4_SIZE)
+#define FATTR4_WORD0_LINK_SUPPORT BIT(FATTR4_LINK_SUPPORT)
+#define FATTR4_WORD0_SYMLINK_SUPPORT BIT(FATTR4_SYMLINK_SUPPORT)
+#define FATTR4_WORD0_NAMED_ATTR BIT(FATTR4_NAMED_ATTR)
+#define FATTR4_WORD0_FSID BIT(FATTR4_FSID)
+#define FATTR4_WORD0_UNIQUE_HANDLES BIT(FATTR4_UNIQUE_HANDLES)
+#define FATTR4_WORD0_LEASE_TIME BIT(FATTR4_LEASE_TIME)
+#define FATTR4_WORD0_RDATTR_ERROR BIT(FATTR4_RDATTR_ERROR)
/* Mandatory in NFSv4.1 */
-#define FATTR4_WORD2_SUPPATTR_EXCLCREAT (1UL << 11)
+#define FATTR4_WORD2_SUPPATTR_EXCLCREAT BIT(FATTR4_SUPPATTR_EXCLCREAT - 64)
/* Recommended Attributes */
-#define FATTR4_WORD0_ACL (1UL << 12)
-#define FATTR4_WORD0_ACLSUPPORT (1UL << 13)
-#define FATTR4_WORD0_ARCHIVE (1UL << 14)
-#define FATTR4_WORD0_CANSETTIME (1UL << 15)
-#define FATTR4_WORD0_CASE_INSENSITIVE (1UL << 16)
-#define FATTR4_WORD0_CASE_PRESERVING (1UL << 17)
-#define FATTR4_WORD0_CHOWN_RESTRICTED (1UL << 18)
-#define FATTR4_WORD0_FILEHANDLE (1UL << 19)
-#define FATTR4_WORD0_FILEID (1UL << 20)
-#define FATTR4_WORD0_FILES_AVAIL (1UL << 21)
-#define FATTR4_WORD0_FILES_FREE (1UL << 22)
-#define FATTR4_WORD0_FILES_TOTAL (1UL << 23)
-#define FATTR4_WORD0_FS_LOCATIONS (1UL << 24)
-#define FATTR4_WORD0_HIDDEN (1UL << 25)
-#define FATTR4_WORD0_HOMOGENEOUS (1UL << 26)
-#define FATTR4_WORD0_MAXFILESIZE (1UL << 27)
-#define FATTR4_WORD0_MAXLINK (1UL << 28)
-#define FATTR4_WORD0_MAXNAME (1UL << 29)
-#define FATTR4_WORD0_MAXREAD (1UL << 30)
-#define FATTR4_WORD0_MAXWRITE (1UL << 31)
-#define FATTR4_WORD1_MIMETYPE (1UL << 0)
-#define FATTR4_WORD1_MODE (1UL << 1)
-#define FATTR4_WORD1_NO_TRUNC (1UL << 2)
-#define FATTR4_WORD1_NUMLINKS (1UL << 3)
-#define FATTR4_WORD1_OWNER (1UL << 4)
-#define FATTR4_WORD1_OWNER_GROUP (1UL << 5)
-#define FATTR4_WORD1_QUOTA_HARD (1UL << 6)
-#define FATTR4_WORD1_QUOTA_SOFT (1UL << 7)
-#define FATTR4_WORD1_QUOTA_USED (1UL << 8)
-#define FATTR4_WORD1_RAWDEV (1UL << 9)
-#define FATTR4_WORD1_SPACE_AVAIL (1UL << 10)
-#define FATTR4_WORD1_SPACE_FREE (1UL << 11)
-#define FATTR4_WORD1_SPACE_TOTAL (1UL << 12)
-#define FATTR4_WORD1_SPACE_USED (1UL << 13)
-#define FATTR4_WORD1_SYSTEM (1UL << 14)
-#define FATTR4_WORD1_TIME_ACCESS (1UL << 15)
-#define FATTR4_WORD1_TIME_ACCESS_SET (1UL << 16)
-#define FATTR4_WORD1_TIME_BACKUP (1UL << 17)
-#define FATTR4_WORD1_TIME_CREATE (1UL << 18)
-#define FATTR4_WORD1_TIME_DELTA (1UL << 19)
-#define FATTR4_WORD1_TIME_METADATA (1UL << 20)
-#define FATTR4_WORD1_TIME_MODIFY (1UL << 21)
-#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22)
-#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23)
-#define FATTR4_WORD1_DACL (1UL << 26)
-#define FATTR4_WORD1_SACL (1UL << 27)
-#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
-#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0)
-#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
-#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
-#define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13)
-#define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15)
-#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
-#define FATTR4_WORD2_MODE_UMASK (1UL << 17)
-#define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18)
+#define FATTR4_WORD0_ACL BIT(FATTR4_ACL)
+#define FATTR4_WORD0_ACLSUPPORT BIT(FATTR4_ACLSUPPORT)
+#define FATTR4_WORD0_ARCHIVE BIT(FATTR4_ARCHIVE)
+#define FATTR4_WORD0_CANSETTIME BIT(FATTR4_CANSETTIME)
+#define FATTR4_WORD0_CASE_INSENSITIVE BIT(FATTR4_CASE_INSENSITIVE)
+#define FATTR4_WORD0_CASE_PRESERVING BIT(FATTR4_CASE_PRESERVING)
+#define FATTR4_WORD0_CHOWN_RESTRICTED BIT(FATTR4_CHOWN_RESTRICTED)
+#define FATTR4_WORD0_FILEHANDLE BIT(FATTR4_FILEHANDLE)
+#define FATTR4_WORD0_FILEID BIT(FATTR4_FILEID)
+#define FATTR4_WORD0_FILES_AVAIL BIT(FATTR4_FILES_AVAIL)
+#define FATTR4_WORD0_FILES_FREE BIT(FATTR4_FILES_FREE)
+#define FATTR4_WORD0_FILES_TOTAL BIT(FATTR4_FILES_TOTAL)
+#define FATTR4_WORD0_FS_LOCATIONS BIT(FATTR4_FS_LOCATIONS)
+#define FATTR4_WORD0_HIDDEN BIT(FATTR4_HIDDEN)
+#define FATTR4_WORD0_HOMOGENEOUS BIT(FATTR4_HOMOGENEOUS)
+#define FATTR4_WORD0_MAXFILESIZE BIT(FATTR4_MAXFILESIZE)
+#define FATTR4_WORD0_MAXLINK BIT(FATTR4_MAXLINK)
+#define FATTR4_WORD0_MAXNAME BIT(FATTR4_MAXNAME)
+#define FATTR4_WORD0_MAXREAD BIT(FATTR4_MAXREAD)
+#define FATTR4_WORD0_MAXWRITE BIT(FATTR4_MAXWRITE)
+
+#define FATTR4_WORD1_MIMETYPE BIT(FATTR4_MIMETYPE - 32)
+#define FATTR4_WORD1_MODE BIT(FATTR4_MODE - 32)
+#define FATTR4_WORD1_NO_TRUNC BIT(FATTR4_NO_TRUNC - 32)
+#define FATTR4_WORD1_NUMLINKS BIT(FATTR4_NUMLINKS - 32)
+#define FATTR4_WORD1_OWNER BIT(FATTR4_OWNER - 32)
+#define FATTR4_WORD1_OWNER_GROUP BIT(FATTR4_OWNER_GROUP - 32)
+#define FATTR4_WORD1_QUOTA_HARD BIT(FATTR4_QUOTA_AVAIL_HARD - 32)
+#define FATTR4_WORD1_QUOTA_SOFT BIT(FATTR4_QUOTA_AVAIL_SOFT - 32)
+#define FATTR4_WORD1_QUOTA_USED BIT(FATTR4_QUOTA_USED - 32)
+#define FATTR4_WORD1_RAWDEV BIT(FATTR4_RAWDEV - 32)
+#define FATTR4_WORD1_SPACE_AVAIL BIT(FATTR4_SPACE_AVAIL - 32)
+#define FATTR4_WORD1_SPACE_FREE BIT(FATTR4_SPACE_FREE - 32)
+#define FATTR4_WORD1_SPACE_TOTAL BIT(FATTR4_SPACE_TOTAL - 32)
+#define FATTR4_WORD1_SPACE_USED BIT(FATTR4_SPACE_USED - 32)
+#define FATTR4_WORD1_SYSTEM BIT(FATTR4_SYSTEM - 32)
+#define FATTR4_WORD1_TIME_ACCESS BIT(FATTR4_TIME_ACCESS - 32)
+#define FATTR4_WORD1_TIME_ACCESS_SET BIT(FATTR4_TIME_ACCESS_SET - 32)
+#define FATTR4_WORD1_TIME_BACKUP BIT(FATTR4_TIME_BACKUP - 32)
+#define FATTR4_WORD1_TIME_CREATE BIT(FATTR4_TIME_CREATE - 32)
+#define FATTR4_WORD1_TIME_DELTA BIT(FATTR4_TIME_DELTA - 32)
+#define FATTR4_WORD1_TIME_METADATA BIT(FATTR4_TIME_METADATA - 32)
+#define FATTR4_WORD1_TIME_MODIFY BIT(FATTR4_TIME_MODIFY - 32)
+#define FATTR4_WORD1_TIME_MODIFY_SET BIT(FATTR4_TIME_MODIFY_SET - 32)
+#define FATTR4_WORD1_MOUNTED_ON_FILEID BIT(FATTR4_MOUNTED_ON_FILEID - 32)
+#define FATTR4_WORD1_DACL BIT(FATTR4_DACL - 32)
+#define FATTR4_WORD1_SACL BIT(FATTR4_SACL - 32)
+#define FATTR4_WORD1_FS_LAYOUT_TYPES BIT(FATTR4_FS_LAYOUT_TYPES - 32)
+
+#define FATTR4_WORD2_LAYOUT_TYPES BIT(FATTR4_LAYOUT_TYPES - 64)
+#define FATTR4_WORD2_LAYOUT_BLKSIZE BIT(FATTR4_LAYOUT_BLKSIZE - 64)
+#define FATTR4_WORD2_MDSTHRESHOLD BIT(FATTR4_MDSTHRESHOLD - 64)
+#define FATTR4_WORD2_CLONE_BLKSIZE BIT(FATTR4_CLONE_BLKSIZE - 64)
+#define FATTR4_WORD2_CHANGE_ATTR_TYPE BIT(FATTR4_CHANGE_ATTR_TYPE - 64)
+#define FATTR4_WORD2_SECURITY_LABEL BIT(FATTR4_SEC_LABEL - 64)
+#define FATTR4_WORD2_MODE_UMASK BIT(FATTR4_MODE_UMASK - 64)
+#define FATTR4_WORD2_XATTR_SUPPORT BIT(FATTR4_XATTR_SUPPORT - 64)
/* MDS threshold bitmap bits */
#define THRESHOLD_RD (1UL << 0)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index e85cd1c0eaf3..7b5406e3288d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -704,6 +704,7 @@ struct perf_event {
/* The cumulative AND of all event_caps for events in this group. */
int group_caps;
+ unsigned int group_generation;
struct perf_event *group_leader;
/*
* event->pmu will always point to pmu in which this event belongs.
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 608a9eb86bff..8ff23bf5a819 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -62,9 +62,6 @@ struct pipe_inode_info {
unsigned int tail;
unsigned int max_usage;
unsigned int ring_size;
-#ifdef CONFIG_WATCH_QUEUE
- bool note_loss;
-#endif
unsigned int nr_accounted;
unsigned int readers;
unsigned int writers;
@@ -72,6 +69,9 @@ struct pipe_inode_info {
unsigned int r_counter;
unsigned int w_counter;
bool poll_usage;
+#ifdef CONFIG_WATCH_QUEUE
+ bool note_loss;
+#endif
struct page *tmp_page;
struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers;
@@ -125,6 +125,22 @@ struct pipe_buf_operations {
};
/**
+ * pipe_has_watch_queue - Check whether the pipe is a watch_queue,
+ * i.e. it was created with O_NOTIFICATION_PIPE
+ * @pipe: The pipe to check
+ *
+ * Return: true if pipe is a watch queue, false otherwise.
+ */
+static inline bool pipe_has_watch_queue(const struct pipe_inode_info *pipe)
+{
+#ifdef CONFIG_WATCH_QUEUE
+ return pipe->watch_queue != NULL;
+#else
+ return false;
+#endif
+}
+
+/**
* pipe_empty - Return true if the pipe is empty
* @head: The pipe ring head pointer
* @tail: The pipe ring tail pointer
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
index 80cb00db42a4..79594aeb160d 100644
--- a/include/linux/pktcdvd.h
+++ b/include/linux/pktcdvd.h
@@ -154,7 +154,9 @@ struct packet_stacked_data
struct pktcdvd_device
{
- struct block_device *bdev; /* dev attached */
+ struct bdev_handle *bdev_handle; /* dev attached */
+ /* handle acquired for bdev during pkt_open_dev() */
+ struct bdev_handle *open_bdev_handle;
dev_t pkt_dev; /* our dev */
struct packet_settings settings;
struct packet_stats stats;
diff --git a/include/linux/pseudo_fs.h b/include/linux/pseudo_fs.h
index eceda1d1407a..730f77381d55 100644
--- a/include/linux/pseudo_fs.h
+++ b/include/linux/pseudo_fs.h
@@ -5,7 +5,7 @@
struct pseudo_fs_context {
const struct super_operations *ops;
- const struct xattr_handler **xattr;
+ const struct xattr_handler * const *xattr;
const struct dentry_operations *dops;
unsigned long magic;
};
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 4174c4b82d13..27998f73183e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1309,7 +1309,7 @@ struct sk_buff_fclones {
*
* Returns true if skb is a fast clone, and its clone is not freed.
* Some drivers call skb_orphan() in their ndo_start_xmit(),
- * so we also check that this didnt happen.
+ * so we also check that didn't happen.
*/
static inline bool skb_fclone_busy(const struct sock *sk,
const struct sk_buff *skb)
@@ -2016,7 +2016,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
* Copy shared buffers into a new sk_buff. We effectively do COW on
* packets to handle cases where we have a local reader and forward
* and a couple of other messy ones. The normal one is tcpdumping
- * a packet thats being forwarded.
+ * a packet that's being forwarded.
*/
/**
@@ -3679,6 +3679,9 @@ static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int l
return __skb_put_padto(skb, len, true);
}
+bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i)
+ __must_check;
+
static inline int skb_add_data(struct sk_buff *skb,
struct iov_iter *from, int copy)
{
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index dbf5b21feafe..b10f987509cc 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -17,6 +17,7 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/svcauth.h>
+#include <linux/lwq.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/pagevec.h>
@@ -33,10 +34,10 @@
*/
struct svc_pool {
unsigned int sp_id; /* pool id; also node id on NUMA */
- spinlock_t sp_lock; /* protects all fields */
- struct list_head sp_sockets; /* pending sockets */
- unsigned int sp_nrthreads; /* # of threads in pool */
+ struct lwq sp_xprts; /* pending transports */
+ atomic_t sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
+ struct llist_head sp_idle_threads; /* idle server threads */
/* statistics on pool operation */
struct percpu_counter sp_messages_arrived;
@@ -49,7 +50,8 @@ struct svc_pool {
/* bits for sp_flags */
enum {
SP_TASK_PENDING, /* still work to do even if no xprt is queued */
- SP_CONGESTED, /* all threads are busy, none idle */
+ SP_NEED_VICTIM, /* One thread needs to agree to exit */
+ SP_VICTIM_REMAINS, /* One thread needs to actually exit */
};
@@ -88,12 +90,9 @@ struct svc_serv {
int (*sv_threadfn)(void *data);
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
- struct list_head sv_cb_list; /* queue for callback requests
+ struct lwq sv_cb_list; /* queue for callback requests
* that arrive over the same
* connection */
- spinlock_t sv_cb_lock; /* protects the svc_cb_list */
- wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
- * entries in the svc_cb_list */
bool sv_bc_enabled; /* service uses backchannel */
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
};
@@ -186,6 +185,7 @@ extern u32 svc_max_payload(const struct svc_rqst *rqstp);
*/
struct svc_rqst {
struct list_head rq_all; /* all threads list */
+ struct llist_node rq_idle; /* On the idle list */
struct rcu_head rq_rcu_head; /* for RCU deferred kfree */
struct svc_xprt * rq_xprt; /* transport ptr */
@@ -251,6 +251,7 @@ struct svc_rqst {
* net namespace
*/
void ** rq_lease_breaker; /* The v4 client breaking a lease */
+ unsigned int rq_status_counter; /* RPC processing counter */
};
/* bits for rq_flags */
@@ -261,8 +262,7 @@ enum {
RQ_DROPME, /* drop current reply */
RQ_SPLICE_OK, /* turned off in gss privacy to prevent
* encrypting page cache pages */
- RQ_VICTIM, /* about to be shut down */
- RQ_BUSY, /* request is busy */
+ RQ_VICTIM, /* Have agreed to shut down */
RQ_DATA, /* request has data */
};
@@ -301,6 +301,28 @@ static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
return (struct sockaddr *) &rqst->rq_daddr;
}
+/**
+ * svc_thread_should_stop - check if this thread should stop
+ * @rqstp: the thread that might need to stop
+ *
+ * To stop an svc thread, the pool flags SP_NEED_VICTIM and SP_VICTIM_REMAINS
+ * are set. The first thread which sees SP_NEED_VICTIM clears it, becoming
+ * the victim using this function. It should then promptly call
+ * svc_exit_thread() to complete the process, clearing SP_VICTIM_REMAINS
+ * so the task waiting for a thread to exit can wake and continue.
+ *
+ * Return values:
+ * %true: caller should invoke svc_exit_thread()
+ * %false: caller should do nothing
+ */
+static inline bool svc_thread_should_stop(struct svc_rqst *rqstp)
+{
+ if (test_and_clear_bit(SP_NEED_VICTIM, &rqstp->rq_pool->sp_flags))
+ set_bit(RQ_VICTIM, &rqstp->rq_flags);
+
+ return test_bit(RQ_VICTIM, &rqstp->rq_flags);
+}
+
struct svc_deferred_req {
u32 prot; /* protocol (UDP or TCP) */
struct svc_xprt *xprt;
@@ -413,8 +435,7 @@ struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
void svc_process(struct svc_rqst *rqstp);
-int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
- struct svc_rqst *);
+void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp);
int svc_register(const struct svc_serv *, struct net *, const int,
const unsigned short, const unsigned short);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index fa55d12dc765..8e20cd60e2e7 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -54,7 +54,7 @@ struct svc_xprt {
const struct svc_xprt_ops *xpt_ops;
struct kref xpt_ref;
struct list_head xpt_list;
- struct list_head xpt_ready;
+ struct lwq_node xpt_ready;
unsigned long xpt_flags;
struct svc_serv *xpt_server; /* service for transport */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 4ecc89301eb7..f85d3a0daca2 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -57,6 +57,7 @@ struct xprt_class;
struct seq_file;
struct svc_serv;
struct net;
+#include <linux/lwq.h>
/*
* This describes a complete RPC request
@@ -121,7 +122,7 @@ struct rpc_rqst {
int rq_ntrans;
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
- struct list_head rq_bc_list; /* Callback service list */
+ struct lwq_node rq_bc_list; /* Callback service list */
unsigned long rq_bc_pa_state; /* Backchannel prealloc state */
struct list_head rq_bc_pa_list; /* Backchannel prealloc list */
#endif /* CONFIG_SUNRPC_BACKCHANEL */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 493487ed7c38..f6dd6575b905 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -298,6 +298,7 @@ struct swap_info_struct {
unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
struct rb_root swap_extent_root;/* root of the swap extent rbtree */
+ struct bdev_handle *bdev_handle;/* open handle of the bdev */
struct block_device *bdev; /* swap device or bdev of swap file */
struct file *swap_file; /* seldom referenced */
unsigned int old_block_size; /* seldom referenced */
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 42bce38a8e87..b6214cbf2a43 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -21,12 +21,12 @@ struct kvec {
enum iter_type {
/* iter types */
+ ITER_UBUF,
ITER_IOVEC,
- ITER_KVEC,
ITER_BVEC,
+ ITER_KVEC,
ITER_XARRAY,
ITER_DISCARD,
- ITER_UBUF,
};
#define ITER_SOURCE 1 // == WRITE
@@ -43,11 +43,7 @@ struct iov_iter {
bool copy_mc;
bool nofault;
bool data_source;
- bool user_backed;
- union {
- size_t iov_offset;
- int last_offset;
- };
+ size_t iov_offset;
/*
* Hack alert: overlay ubuf_iovec with iovec + count, so
* that the members resolve correctly regardless of the type
@@ -143,7 +139,7 @@ static inline unsigned char iov_iter_rw(const struct iov_iter *i)
static inline bool user_backed_iter(const struct iov_iter *i)
{
- return i->user_backed;
+ return iter_is_ubuf(i) || iter_is_iovec(i);
}
/*
@@ -342,27 +338,6 @@ iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
return npages;
}
-struct csum_state {
- __wsum csum;
- size_t off;
-};
-
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
-size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
-
-static __always_inline __must_check
-bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
- __wsum *csum, struct iov_iter *i)
-{
- size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
- if (likely(copied == bytes))
- return true;
- iov_iter_revert(i, copied);
- return false;
-}
-size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
- struct iov_iter *i);
-
struct iovec *iovec_from_user(const struct iovec __user *uvector,
unsigned long nr_segs, unsigned long fast_segs,
struct iovec *fast_iov, bool compat);
@@ -383,7 +358,6 @@ static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
*i = (struct iov_iter) {
.iter_type = ITER_UBUF,
.copy_mc = false,
- .user_backed = true,
.data_source = direction,
.ubuf = buf,
.count = count,
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 7b4dd69555e4..27cc1d464321 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -3,8 +3,8 @@
#define _LINUX_VIRTIO_NET_H
#include <linux/if_vlan.h>
+#include <linux/udp.h>
#include <uapi/linux/tcp.h>
-#include <uapi/linux/udp.h>
#include <uapi/linux/virtio_net.h>
static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
@@ -151,9 +151,22 @@ retry:
unsigned int nh_off = p_off;
struct skb_shared_info *shinfo = skb_shinfo(skb);
- /* UFO may not include transport header in gso_size. */
- if (gso_type & SKB_GSO_UDP)
+ switch (gso_type & ~SKB_GSO_TCP_ECN) {
+ case SKB_GSO_UDP:
+ /* UFO may not include transport header in gso_size. */
nh_off -= thlen;
+ break;
+ case SKB_GSO_UDP_L4:
+ if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ return -EINVAL;
+ if (skb->csum_offset != offsetof(struct udphdr, check))
+ return -EINVAL;
+ if (skb->len - p_off > gso_size * UDP_MAX_SEGMENTS)
+ return -EINVAL;
+ if (gso_type != SKB_GSO_UDP_L4)
+ return -EINVAL;
+ break;
+ }
/* Kernel has a special handling for GSO_BY_FRAGS. */
if (gso_size == GSO_BY_FRAGS)
diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h
index 45cd42f55d49..429c7b6afead 100644
--- a/include/linux/watch_queue.h
+++ b/include/linux/watch_queue.h
@@ -32,7 +32,7 @@ struct watch_filter {
DECLARE_BITMAP(type_filter, WATCH_TYPE__NR);
};
u32 nr_filters; /* Number of filters */
- struct watch_type_filter filters[];
+ struct watch_type_filter filters[] __counted_by(nr_filters);
};
struct watch_queue {
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
index 2d5fcda1bcd0..082f89531b88 100644
--- a/include/net/bluetooth/hci_mon.h
+++ b/include/net/bluetooth/hci_mon.h
@@ -56,7 +56,7 @@ struct hci_mon_new_index {
__u8 type;
__u8 bus;
bdaddr_t bdaddr;
- char name[8];
+ char name[8] __nonstring;
} __packed;
#define HCI_MON_NEW_INDEX_SIZE 16
diff --git a/include/net/macsec.h b/include/net/macsec.h
index 75a6f4863c83..ebf9bc54036a 100644
--- a/include/net/macsec.h
+++ b/include/net/macsec.h
@@ -258,6 +258,7 @@ struct macsec_context {
struct macsec_secy *secy;
struct macsec_rx_sc *rx_sc;
struct {
+ bool update_pn;
unsigned char assoc_num;
u8 key[MACSEC_MAX_KEY_LEN];
union {
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index d466e1a3b0b1..fe1507c1db82 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -53,6 +53,7 @@ struct nf_flowtable_type {
struct list_head list;
int family;
int (*init)(struct nf_flowtable *ft);
+ bool (*gc)(const struct flow_offload *flow);
int (*setup)(struct nf_flowtable *ft,
struct net_device *dev,
enum flow_block_command cmd);
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index bd7c3be4af5d..423b52eca908 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -50,6 +50,7 @@ struct netns_xfrm {
struct list_head policy_all;
struct hlist_head *policy_byidx;
unsigned int policy_idx_hmask;
+ unsigned int idx_generator;
struct hlist_head policy_inexact[XFRM_POLICY_MAX];
struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX];
unsigned int policy_count[XFRM_POLICY_MAX * 2];
diff --git a/include/net/sock.h b/include/net/sock.h
index b770261fbdaf..92f7ea62a915 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -336,7 +336,7 @@ struct sk_filter;
* @sk_cgrp_data: cgroup data for this cgroup
* @sk_memcg: this socket's memory cgroup association
* @sk_write_pending: a write to stream socket waits to start
- * @sk_wait_pending: number of threads blocked on this socket
+ * @sk_disconnects: number of disconnect operations performed on this sock
* @sk_state_change: callback to indicate change in the state of the sock
* @sk_data_ready: callback to indicate there is data to be processed
* @sk_write_space: callback to indicate there is bf sending space available
@@ -429,7 +429,7 @@ struct sock {
unsigned int sk_napi_id;
#endif
int sk_rcvbuf;
- int sk_wait_pending;
+ int sk_disconnects;
struct sk_filter __rcu *sk_filter;
union {
@@ -1189,8 +1189,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
}
#define sk_wait_event(__sk, __timeo, __condition, __wait) \
- ({ int __rc; \
- __sk->sk_wait_pending++; \
+ ({ int __rc, __dis = __sk->sk_disconnects; \
release_sock(__sk); \
__rc = __condition; \
if (!__rc) { \
@@ -1200,8 +1199,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
} \
sched_annotate_sleep(); \
lock_sock(__sk); \
- __sk->sk_wait_pending--; \
- __rc = __condition; \
+ __rc = __dis == __sk->sk_disconnects ? __condition : -EPIPE; \
__rc; \
})
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 7b1a720691ae..4b03ca7cb8a5 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -141,6 +141,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCP_RTO_MAX ((unsigned)(120*HZ))
#define TCP_RTO_MIN ((unsigned)(HZ/5))
#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
+
+#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
+
#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
* used as a fallback RTO for the
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index fd41fdac0a8e..65e49fae8da7 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -162,8 +162,24 @@ struct scsi_device {
* core. */
unsigned int eh_timeout; /* Error handling timeout */
- bool manage_system_start_stop; /* Let HLD (sd) manage system start/stop */
- bool manage_runtime_start_stop; /* Let HLD (sd) manage runtime start/stop */
+ /*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for system suspend/resume (suspend to RAM and
+ * hibernation) operations.
+ */
+ bool manage_system_start_stop;
+
+ /*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for runtime device suspand and resume operations.
+ */
+ bool manage_runtime_start_stop;
+
+ /*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for system shutdown (power off) operations.
+ */
+ bool manage_shutdown;
unsigned removable:1;
unsigned changed:1; /* Data invalid due to media change */
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index d2faec9a323e..433543eb82b9 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -469,6 +469,7 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai);
+int snd_soc_dapm_widget_name_cmp(struct snd_soc_dapm_widget *widget, const char *s);
/* dapm path setup */
int snd_soc_dapm_new_widgets(struct snd_soc_card *card);
diff --git a/include/trace/events/neigh.h b/include/trace/events/neigh.h
index 5eaa1fa99171..833143d0992e 100644
--- a/include/trace/events/neigh.h
+++ b/include/trace/events/neigh.h
@@ -39,7 +39,6 @@ TRACE_EVENT(neigh_create,
),
TP_fast_assign(
- struct in6_addr *pin6;
__be32 *p32;
__entry->family = tbl->family;
@@ -47,7 +46,6 @@ TRACE_EVENT(neigh_create,
__entry->entries = atomic_read(&tbl->gc_entries);
__entry->created = n != NULL;
__entry->gc_exempt = exempt_from_gc;
- pin6 = (struct in6_addr *)__entry->primary_key6;
p32 = (__be32 *)__entry->primary_key4;
if (tbl->family == AF_INET)
@@ -57,6 +55,8 @@ TRACE_EVENT(neigh_create,
#if IS_ENABLED(CONFIG_IPV6)
if (tbl->family == AF_INET6) {
+ struct in6_addr *pin6;
+
pin6 = (struct in6_addr *)__entry->primary_key6;
*pin6 = *(struct in6_addr *)pkey;
}
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index f8069ef2ee0f..718df1d9b834 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -1667,7 +1667,7 @@ TRACE_EVENT(svcrdma_encode_wseg,
__entry->offset = offset;
),
- TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
+ TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->segno, __entry->length,
(unsigned long long)__entry->offset, __entry->handle
@@ -1703,7 +1703,7 @@ TRACE_EVENT(svcrdma_decode_rseg,
__entry->offset = segment->rs_offset;
),
- TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
+ TP_printk("cq.id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->segno, __entry->position, __entry->length,
(unsigned long long)__entry->offset, __entry->handle
@@ -1740,7 +1740,7 @@ TRACE_EVENT(svcrdma_decode_wseg,
__entry->offset = segment->rs_offset;
),
- TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
+ TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->segno, __entry->length,
(unsigned long long)__entry->offset, __entry->handle
@@ -1959,7 +1959,7 @@ TRACE_EVENT(svcrdma_send_pullup,
__entry->msglen = msglen;
),
- TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
+ TP_printk("cq.id=%u cid=%d hdr=%u msg=%u (total %u)",
__entry->cq_id, __entry->completion_id,
__entry->hdrlen, __entry->msglen,
__entry->hdrlen + __entry->msglen)
@@ -2014,7 +2014,7 @@ TRACE_EVENT(svcrdma_post_send,
wr->ex.invalidate_rkey : 0;
),
- TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
+ TP_printk("cq.id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->num_sge, __entry->inv_rkey
)
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 6beb38c1dcb5..337c90787fb1 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -1677,7 +1677,6 @@ DEFINE_SVCXDRBUF_EVENT(sendto);
svc_rqst_flag(DROPME) \
svc_rqst_flag(SPLICE_OK) \
svc_rqst_flag(VICTIM) \
- svc_rqst_flag(BUSY) \
svc_rqst_flag_end(DATA)
#undef svc_rqst_flag
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index eaf9f248619f..0bade1592f34 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -45,8 +45,8 @@ extern "C" {
#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
-/**
- * @NOUVEAU_GETPARAM_EXEC_PUSH_MAX
+/*
+ * NOUVEAU_GETPARAM_EXEC_PUSH_MAX - query max pushes through getparam
*
* Query the maximum amount of IBs that can be pushed through a single
* &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC
diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h
index fd1fb0d5389d..7a8f4c290187 100644
--- a/include/uapi/linux/fscrypt.h
+++ b/include/uapi/linux/fscrypt.h
@@ -71,7 +71,8 @@ struct fscrypt_policy_v2 {
__u8 contents_encryption_mode;
__u8 filenames_encryption_mode;
__u8 flags;
- __u8 __reserved[4];
+ __u8 log2_data_unit_size;
+ __u8 __reserved[3];
__u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE];
};
diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h
index 2f61298a7b77..3dcdb9e33cba 100644
--- a/include/uapi/linux/gtp.h
+++ b/include/uapi/linux/gtp.h
@@ -33,6 +33,6 @@ enum gtp_attrs {
GTPA_PAD,
__GTPA_MAX,
};
-#define GTPA_MAX (__GTPA_MAX + 1)
+#define GTPA_MAX (__GTPA_MAX - 1)
#endif /* _UAPI_LINUX_GTP_H_ */
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index 4d0ad22f83b5..9efc42382fdb 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -18,11 +18,7 @@ struct sockaddr_ll {
unsigned short sll_hatype;
unsigned char sll_pkttype;
unsigned char sll_halen;
- union {
- unsigned char sll_addr[8];
- /* Actual length is in sll_halen. */
- __DECLARE_FLEX_ARRAY(unsigned char, sll_addr_flex);
- };
+ unsigned char sll_addr[8];
};
/* Packet types */
diff --git a/include/uapi/linux/nfsd_netlink.h b/include/uapi/linux/nfsd_netlink.h
new file mode 100644
index 000000000000..c8ae72466ee6
--- /dev/null
+++ b/include/uapi/linux/nfsd_netlink.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/nfsd.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_NFSD_H
+#define _UAPI_LINUX_NFSD_H
+
+#define NFSD_FAMILY_NAME "nfsd"
+#define NFSD_FAMILY_VERSION 1
+
+enum {
+ NFSD_A_RPC_STATUS_XID = 1,
+ NFSD_A_RPC_STATUS_FLAGS,
+ NFSD_A_RPC_STATUS_PROG,
+ NFSD_A_RPC_STATUS_VERSION,
+ NFSD_A_RPC_STATUS_PROC,
+ NFSD_A_RPC_STATUS_SERVICE_TIME,
+ NFSD_A_RPC_STATUS_PAD,
+ NFSD_A_RPC_STATUS_SADDR4,
+ NFSD_A_RPC_STATUS_DADDR4,
+ NFSD_A_RPC_STATUS_SADDR6,
+ NFSD_A_RPC_STATUS_DADDR6,
+ NFSD_A_RPC_STATUS_SPORT,
+ NFSD_A_RPC_STATUS_DPORT,
+ NFSD_A_RPC_STATUS_COMPOUND_OPS,
+
+ __NFSD_A_RPC_STATUS_MAX,
+ NFSD_A_RPC_STATUS_MAX = (__NFSD_A_RPC_STATUS_MAX - 1)
+};
+
+enum {
+ NFSD_CMD_RPC_STATUS_GET = 1,
+
+ __NFSD_CMD_MAX,
+ NFSD_CMD_MAX = (__NFSD_CMD_MAX - 1)
+};
+
+#endif /* _UAPI_LINUX_NFSD_H */
diff --git a/include/video/mmp_disp.h b/include/video/mmp_disp.h
index 77252cb46361..a722dcbf5073 100644
--- a/include/video/mmp_disp.h
+++ b/include/video/mmp_disp.h
@@ -231,7 +231,7 @@ struct mmp_path {
/* layers */
int overlay_num;
- struct mmp_overlay overlays[];
+ struct mmp_overlay overlays[] __counted_by(overlay_num);
};
extern struct mmp_path *mmp_get_path(const char *name);
diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
index 8d2a3bfc8dac..47d96e75e8ef 100644
--- a/include/video/uvesafb.h
+++ b/include/video/uvesafb.h
@@ -109,8 +109,6 @@ struct uvesafb_ktask {
u32 ack;
};
-static int uvesafb_exec(struct uvesafb_ktask *tsk);
-
#define UVESAFB_EXACT_RES 1
#define UVESAFB_EXACT_DEPTH 2
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 5dfd30b13f48..5fdef94f0864 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -244,7 +244,7 @@ retry:
for (i = 0, p = fs_names; i < num_fs; i++, p += strlen(p)+1)
printk(" %s", p);
printk("\n");
- panic("VFS: Unable to mount root fs on %s", b);
+ panic("VFS: Unable to mount root fs on \"%s\" or %s", pretty_name, b);
out:
put_page(page);
}
diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
index c53678875416..f04a43044d91 100644
--- a/io_uring/fdinfo.c
+++ b/io_uring/fdinfo.c
@@ -53,7 +53,6 @@ static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id,
__cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
{
struct io_ring_ctx *ctx = f->private_data;
- struct io_sq_data *sq = NULL;
struct io_overflow_cqe *ocqe;
struct io_rings *r = ctx->rings;
unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
@@ -64,6 +63,7 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
unsigned int cq_shift = 0;
unsigned int sq_shift = 0;
unsigned int sq_entries, cq_entries;
+ int sq_pid = -1, sq_cpu = -1;
bool has_lock;
unsigned int i;
@@ -143,13 +143,19 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
has_lock = mutex_trylock(&ctx->uring_lock);
if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
- sq = ctx->sq_data;
- if (!sq->thread)
- sq = NULL;
+ struct io_sq_data *sq = ctx->sq_data;
+
+ if (mutex_trylock(&sq->lock)) {
+ if (sq->thread) {
+ sq_pid = task_pid_nr(sq->thread);
+ sq_cpu = task_cpu(sq->thread);
+ }
+ mutex_unlock(&sq->lock);
+ }
}
- seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
- seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
+ seq_printf(m, "SqThread:\t%d\n", sq_pid);
+ seq_printf(m, "SqThreadCpu:\t%d\n", sq_cpu);
seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
struct file *f = io_file_from_index(&ctx->file_table, i);
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index d839a80a6751..8d1bc6cdfe71 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2674,7 +2674,11 @@ static void io_pages_free(struct page ***pages, int npages)
if (!pages)
return;
+
page_array = *pages;
+ if (!page_array)
+ return;
+
for (i = 0; i < npages; i++)
unpin_user_page(page_array[i]);
kvfree(page_array);
@@ -2758,7 +2762,9 @@ static void io_rings_free(struct io_ring_ctx *ctx)
ctx->sq_sqes = NULL;
} else {
io_pages_free(&ctx->ring_pages, ctx->n_ring_pages);
+ ctx->n_ring_pages = 0;
io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages);
+ ctx->n_sqe_pages = 0;
}
}
diff --git a/io_uring/openclose.c b/io_uring/openclose.c
index e3fae26e025d..fb73adb89067 100644
--- a/io_uring/openclose.c
+++ b/io_uring/openclose.c
@@ -220,7 +220,6 @@ int io_close(struct io_kiocb *req, unsigned int issue_flags)
{
struct files_struct *files = current->files;
struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
- struct fdtable *fdt;
struct file *file;
int ret = -EBADF;
@@ -230,13 +229,7 @@ int io_close(struct io_kiocb *req, unsigned int issue_flags)
}
spin_lock(&files->file_lock);
- fdt = files_fdtable(files);
- if (close->fd >= fdt->max_fds) {
- spin_unlock(&files->file_lock);
- goto err;
- }
- file = rcu_dereference_protected(fdt->fd[close->fd],
- lockdep_is_held(&files->file_lock));
+ file = files_lookup_fd_locked(files, close->fd);
if (!file || io_is_uring_fops(file)) {
spin_unlock(&files->file_lock);
goto err;
diff --git a/io_uring/rw.c b/io_uring/rw.c
index c8c822fa7980..8f68d5ad4564 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -339,7 +339,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
unsigned final_ret = io_fixup_rw_res(req, ret);
- if (req->flags & REQ_F_CUR_POS)
+ if (ret >= 0 && req->flags & REQ_F_CUR_POS)
req->file->f_pos = rw->kiocb.ki_pos;
if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
if (!__io_complete_rw_common(req, ret)) {
@@ -913,15 +913,6 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
kiocb_start_write(kiocb);
kiocb->ki_flags |= IOCB_WRITE;
- /*
- * For non-polled IO, set IOCB_DIO_CALLER_COMP, stating that our handler
- * groks deferring the completion to task context. This isn't
- * necessary and useful for polled IO as that can always complete
- * directly.
- */
- if (!(kiocb->ki_flags & IOCB_HIPRI))
- kiocb->ki_flags |= IOCB_DIO_CALLER_COMP;
-
if (likely(req->file->f_op->write_iter))
ret2 = call_write_iter(req->file, kiocb, &s->iter);
else if (req->file->f_op->write)
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index ba8215ed663a..5eea4dc0509e 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -302,7 +302,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
if (S_ISREG(mode)) {
struct mqueue_inode_info *info;
@@ -596,7 +596,7 @@ static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
put_ipc_ns(ipc_ns);
dir->i_size += DIRENT_SIZE;
- dir->i_mtime = dir->i_atime = inode_set_ctime_current(dir);
+ simple_inode_init_ts(dir);
d_instantiate(dentry, inode);
dget(dentry);
@@ -618,7 +618,7 @@ static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
- dir->i_mtime = dir->i_atime = inode_set_ctime_current(dir);
+ simple_inode_init_ts(dir);
dir->i_size -= DIRENT_SIZE;
drop_nlink(inode);
dput(dentry);
@@ -657,7 +657,7 @@ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
if (ret <= 0)
return ret;
- inode->i_atime = inode_set_ctime_current(inode);
+ inode_set_atime_to_ts(inode, inode_set_ctime_current(inode));
return ret;
}
@@ -1163,7 +1163,7 @@ static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
goto out_unlock;
__do_notify(info);
}
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
}
out_unlock:
spin_unlock(&info->lock);
@@ -1257,7 +1257,7 @@ static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
msg_ptr = msg_get(info);
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
/* There is now free space in queue. */
pipelined_receive(&wake_q, info);
@@ -1395,7 +1395,8 @@ retry:
if (notification == NULL) {
if (info->notify_owner == task_tgid(current)) {
remove_notification(info);
- inode->i_atime = inode_set_ctime_current(inode);
+ inode_set_atime_to_ts(inode,
+ inode_set_ctime_current(inode));
}
} else if (info->notify_owner != NULL) {
ret = -EBUSY;
@@ -1421,7 +1422,7 @@ retry:
info->notify_owner = get_pid(task_tgid(current));
info->notify_user_ns = get_user_ns(current_user_ns());
- inode->i_atime = inode_set_ctime_current(inode);
+ inode_set_atime_to_ts(inode, inode_set_ctime_current(inode));
}
spin_unlock(&info->lock);
out_fput:
@@ -1484,7 +1485,7 @@ static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
f.file->f_flags &= ~O_NONBLOCK;
spin_unlock(&f.file->f_lock);
- inode->i_atime = inode_set_ctime_current(inode);
+ inode_set_atime_to_ts(inode, inode_set_ctime_current(inode));
}
spin_unlock(&info->lock);
diff --git a/kernel/acct.c b/kernel/acct.c
index 1a9f929fe629..986c8214dabf 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -246,7 +246,7 @@ static int acct_on(struct filename *pathname)
filp_close(file, NULL);
return PTR_ERR(internal);
}
- err = __mnt_want_write(internal);
+ err = mnt_get_write_access(internal);
if (err) {
mntput(internal);
kfree(acct);
@@ -271,7 +271,7 @@ static int acct_on(struct filename *pathname)
old = xchg(&ns->bacct, &acct->pin);
mutex_unlock(&acct->lock);
pin_kill(old);
- __mnt_drop_write(mnt);
+ mnt_put_write_access(mnt);
mntput(mnt);
return 0;
}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 21d2fa815e78..6f0d6fb6523f 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -2212,7 +2212,7 @@ __audit_reusename(const __user char *uptr)
if (!n->name)
continue;
if (n->name->uptr == uptr) {
- n->name->refcnt++;
+ atomic_inc(&n->name->refcnt);
return n->name;
}
}
@@ -2241,7 +2241,7 @@ void __audit_getname(struct filename *name)
n->name = name;
n->name_len = AUDIT_NAME_FULL;
name->aname = n;
- name->refcnt++;
+ atomic_inc(&name->refcnt);
}
static inline int audit_copy_fcaps(struct audit_names *name,
@@ -2373,7 +2373,7 @@ out_alloc:
return;
if (name) {
n->name = name;
- name->refcnt++;
+ atomic_inc(&name->refcnt);
}
out:
@@ -2500,7 +2500,7 @@ void __audit_inode_child(struct inode *parent,
if (found_parent) {
found_child->name = found_parent->name;
found_child->name_len = AUDIT_NAME_FULL;
- found_child->name->refcnt++;
+ atomic_inc(&found_child->name->refcnt);
}
}
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 99d0625b6c82..1aafb2ff2e95 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -118,8 +118,7 @@ static struct inode *bpf_get_inode(struct super_block *sb,
return ERR_PTR(-ENOSPC);
inode->i_ino = get_next_ino();
- inode->i_atime = inode_set_ctime_current(inode);
- inode->i_mtime = inode->i_atime;
+ simple_inode_init_ts(inode);
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
@@ -147,7 +146,7 @@ static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode,
d_instantiate(dentry, inode);
dget(dentry);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
}
static int bpf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
diff --git a/kernel/bpf/mprog.c b/kernel/bpf/mprog.c
index 007d98c799e2..1394168062e8 100644
--- a/kernel/bpf/mprog.c
+++ b/kernel/bpf/mprog.c
@@ -401,14 +401,16 @@ int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
struct bpf_mprog_cp *cp;
struct bpf_prog *prog;
const u32 flags = 0;
+ u32 id, count = 0;
+ u64 revision = 1;
int i, ret = 0;
- u32 id, count;
- u64 revision;
if (attr->query.query_flags || attr->query.attach_flags)
return -EINVAL;
- revision = bpf_mprog_revision(entry);
- count = bpf_mprog_total(entry);
+ if (entry) {
+ revision = bpf_mprog_revision(entry);
+ count = bpf_mprog_total(entry);
+ }
if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
return -EFAULT;
if (copy_to_user(&uattr->query.revision, &revision, sizeof(revision)))
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index eb01c31ed591..d77b2f8b9364 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -3796,7 +3796,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
{
enum bpf_prog_type ptype;
struct bpf_prog *prog;
- u32 mask;
int ret;
if (CHECK_ATTR(BPF_PROG_ATTACH))
@@ -3805,10 +3804,16 @@ static int bpf_prog_attach(const union bpf_attr *attr)
ptype = attach_type_to_prog_type(attr->attach_type);
if (ptype == BPF_PROG_TYPE_UNSPEC)
return -EINVAL;
- mask = bpf_mprog_supported(ptype) ?
- BPF_F_ATTACH_MASK_MPROG : BPF_F_ATTACH_MASK_BASE;
- if (attr->attach_flags & ~mask)
- return -EINVAL;
+ if (bpf_mprog_supported(ptype)) {
+ if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
+ return -EINVAL;
+ } else {
+ if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE)
+ return -EINVAL;
+ if (attr->relative_fd ||
+ attr->expected_revision)
+ return -EINVAL;
+ }
prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
if (IS_ERR(prog))
@@ -3878,6 +3883,10 @@ static int bpf_prog_detach(const union bpf_attr *attr)
if (IS_ERR(prog))
return PTR_ERR(prog);
}
+ } else if (attr->attach_flags ||
+ attr->relative_fd ||
+ attr->expected_revision) {
+ return -EINVAL;
}
switch (ptype) {
@@ -3913,7 +3922,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
return ret;
}
-#define BPF_PROG_QUERY_LAST_FIELD query.link_attach_flags
+#define BPF_PROG_QUERY_LAST_FIELD query.revision
static int bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr)
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index c4ab9d6cdbe9..82ad23b1d257 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -308,11 +308,9 @@ again:
rcu_read_lock();
for (;; curr_fd++) {
struct file *f;
- f = task_lookup_next_fd_rcu(curr_task, &curr_fd);
+ f = task_lookup_next_fdget_rcu(curr_task, &curr_fd);
if (!f)
break;
- if (!get_file_rcu(f))
- continue;
/* set info->fd */
info->fd = curr_fd;
diff --git a/kernel/bpf/tcx.c b/kernel/bpf/tcx.c
index 13f0b5dc8262..1338a13a8b64 100644
--- a/kernel/bpf/tcx.c
+++ b/kernel/bpf/tcx.c
@@ -123,7 +123,6 @@ int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
{
bool ingress = attr->query.attach_type == BPF_TCX_INGRESS;
struct net *net = current->nsproxy->net_ns;
- struct bpf_mprog_entry *entry;
struct net_device *dev;
int ret;
@@ -133,12 +132,7 @@ int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
ret = -ENODEV;
goto out;
}
- entry = tcx_entry_fetch(dev, ingress);
- if (!entry) {
- ret = -ENOENT;
- goto out;
- }
- ret = bpf_mprog_query(attr, uattr, entry);
+ ret = bpf_mprog_query(attr, uattr, tcx_entry_fetch(dev, ingress));
out:
rtnl_unlock();
return ret;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index c0c7d137066a..873ade146f3d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -14479,7 +14479,7 @@ static int check_return_code(struct bpf_verifier_env *env)
struct tnum enforce_attach_type_range = tnum_unknown;
const struct bpf_prog *prog = env->prog;
struct bpf_reg_state *reg;
- struct tnum range = tnum_range(0, 1);
+ struct tnum range = tnum_range(0, 1), const_0 = tnum_const(0);
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
int err;
struct bpf_func_state *frame = env->cur_state->frame[0];
@@ -14527,8 +14527,8 @@ static int check_return_code(struct bpf_verifier_env *env)
return -EINVAL;
}
- if (!tnum_in(tnum_const(0), reg->var_off)) {
- verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
+ if (!tnum_in(const_0, reg->var_off)) {
+ verbose_invalid_scalar(env, reg, &const_0, "async callback", "R0");
return -EINVAL;
}
return 0;
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index c487ffef6652..76db6c67e39a 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -360,10 +360,9 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
}
css_task_iter_end(&it);
length = n;
- /* now sort & (if procs) strip out duplicates */
+ /* now sort & strip out duplicates (tgids or recycled thread PIDs) */
sort(array, length, sizeof(pid_t), cmppid, NULL);
- if (type == CGROUP_FILE_PROCS)
- length = pidlist_uniq(array, length);
+ length = pidlist_uniq(array, length);
l = cgroup_pidlist_find_create(cgrp, type);
if (!l) {
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 01637677736f..dff067bd56b1 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -678,6 +678,11 @@ static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev,
size_t pool_size;
size_t tlb_size;
+ if (nslabs > SLABS_PER_PAGE << MAX_ORDER) {
+ nslabs = SLABS_PER_PAGE << MAX_ORDER;
+ nareas = limit_nareas(nareas, nslabs);
+ }
+
pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas);
pool = kzalloc(pool_size, gfp);
if (!pool)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4c72a41f11af..a2f2a9525d72 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1954,6 +1954,7 @@ static void perf_group_attach(struct perf_event *event)
list_add_tail(&event->sibling_list, &group_leader->sibling_list);
group_leader->nr_siblings++;
+ group_leader->group_generation++;
perf_event__header_size(group_leader);
@@ -2144,6 +2145,7 @@ static void perf_group_detach(struct perf_event *event)
if (leader != event) {
list_del_init(&event->sibling_list);
event->group_leader->nr_siblings--;
+ event->group_leader->group_generation++;
goto out;
}
@@ -5440,7 +5442,7 @@ static int __perf_read_group_add(struct perf_event *leader,
u64 read_format, u64 *values)
{
struct perf_event_context *ctx = leader->ctx;
- struct perf_event *sub;
+ struct perf_event *sub, *parent;
unsigned long flags;
int n = 1; /* skip @nr */
int ret;
@@ -5450,6 +5452,33 @@ static int __perf_read_group_add(struct perf_event *leader,
return ret;
raw_spin_lock_irqsave(&ctx->lock, flags);
+ /*
+ * Verify the grouping between the parent and child (inherited)
+ * events is still in tact.
+ *
+ * Specifically:
+ * - leader->ctx->lock pins leader->sibling_list
+ * - parent->child_mutex pins parent->child_list
+ * - parent->ctx->mutex pins parent->sibling_list
+ *
+ * Because parent->ctx != leader->ctx (and child_list nests inside
+ * ctx->mutex), group destruction is not atomic between children, also
+ * see perf_event_release_kernel(). Additionally, parent can grow the
+ * group.
+ *
+ * Therefore it is possible to have parent and child groups in a
+ * different configuration and summing over such a beast makes no sense
+ * what so ever.
+ *
+ * Reject this.
+ */
+ parent = leader->parent;
+ if (parent &&
+ (parent->group_generation != leader->group_generation ||
+ parent->nr_siblings != leader->nr_siblings)) {
+ ret = -ECHILD;
+ goto unlock;
+ }
/*
* Since we co-schedule groups, {enabled,running} times of siblings
@@ -5483,8 +5512,9 @@ static int __perf_read_group_add(struct perf_event *leader,
values[n++] = atomic64_read(&sub->lost_samples);
}
+unlock:
raw_spin_unlock_irqrestore(&ctx->lock, flags);
- return 0;
+ return ret;
}
static int perf_read_group(struct perf_event *event,
@@ -5503,10 +5533,6 @@ static int perf_read_group(struct perf_event *event,
values[0] = 1 + leader->nr_siblings;
- /*
- * By locking the child_mutex of the leader we effectively
- * lock the child list of all siblings.. XXX explain how.
- */
mutex_lock(&leader->child_mutex);
ret = __perf_read_group_add(leader, read_format, values);
@@ -13346,6 +13372,8 @@ static int inherit_group(struct perf_event *parent_event,
!perf_get_aux_event(child_ctr, leader))
return -EINVAL;
}
+ if (leader)
+ leader->group_generation = parent_event->group_generation;
return 0;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 3b6d20dfb9a8..640123767726 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1492,9 +1492,7 @@ struct file *get_mm_exe_file(struct mm_struct *mm)
struct file *exe_file;
rcu_read_lock();
- exe_file = rcu_dereference(mm->exe_file);
- if (exe_file && !get_file_rcu(exe_file))
- exe_file = NULL;
+ exe_file = get_file_rcu(&mm->exe_file);
rcu_read_unlock();
return exe_file;
}
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
index 5353edfad8e1..b0639f21041f 100644
--- a/kernel/kcmp.c
+++ b/kernel/kcmp.c
@@ -64,8 +64,10 @@ get_file_raw_ptr(struct task_struct *task, unsigned int idx)
struct file *file;
rcu_read_lock();
- file = task_lookup_fd_rcu(task, idx);
+ file = task_lookup_fdget_rcu(task, idx);
rcu_read_unlock();
+ if (file)
+ fput(file);
return file;
}
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 8d35b9f9aaa3..dee341ae4ace 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -684,7 +684,7 @@ static void power_down(void)
cpu_relax();
}
-static int load_image_and_restore(bool snapshot_test)
+static int load_image_and_restore(void)
{
int error;
unsigned int flags;
@@ -694,12 +694,12 @@ static int load_image_and_restore(bool snapshot_test)
lock_device_hotplug();
error = create_basic_memory_bitmaps();
if (error) {
- swsusp_close(snapshot_test);
+ swsusp_close();
goto Unlock;
}
error = swsusp_read(&flags);
- swsusp_close(snapshot_test);
+ swsusp_close();
if (!error)
error = hibernation_restore(flags & SF_PLATFORM_MODE);
@@ -788,7 +788,7 @@ int hibernate(void)
pm_pr_dbg("Checking hibernation image\n");
error = swsusp_check(false);
if (!error)
- error = load_image_and_restore(false);
+ error = load_image_and_restore();
}
thaw_processes();
@@ -952,7 +952,7 @@ static int software_resume(void)
/* The snapshot device should not be opened while we're running */
if (!hibernate_acquire()) {
error = -EBUSY;
- swsusp_close(true);
+ swsusp_close();
goto Unlock;
}
@@ -973,7 +973,7 @@ static int software_resume(void)
goto Close_Finish;
}
- error = load_image_and_restore(true);
+ error = load_image_and_restore();
thaw_processes();
Finish:
pm_notifier_call_chain(PM_POST_RESTORE);
@@ -987,7 +987,7 @@ static int software_resume(void)
pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
return error;
Close_Finish:
- swsusp_close(true);
+ swsusp_close();
goto Finish;
}
diff --git a/kernel/power/power.h b/kernel/power/power.h
index a98f95e309a3..17fd9aaaf084 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -172,7 +172,7 @@ int swsusp_check(bool exclusive);
extern void swsusp_free(void);
extern int swsusp_read(unsigned int *flags_p);
extern int swsusp_write(unsigned int flags);
-void swsusp_close(bool exclusive);
+void swsusp_close(void);
#ifdef CONFIG_SUSPEND
extern int swsusp_unmark(void);
#endif
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 74edbce2320b..68a5c2f06957 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -222,7 +222,7 @@ int swsusp_swap_in_use(void)
*/
static unsigned short root_swap = 0xffff;
-static struct block_device *hib_resume_bdev;
+static struct bdev_handle *hib_resume_bdev_handle;
struct hib_bio_batch {
atomic_t count;
@@ -276,7 +276,8 @@ static int hib_submit_io(blk_opf_t opf, pgoff_t page_off, void *addr,
struct bio *bio;
int error = 0;
- bio = bio_alloc(hib_resume_bdev, 1, opf, GFP_NOIO | __GFP_HIGH);
+ bio = bio_alloc(hib_resume_bdev_handle->bdev, 1, opf,
+ GFP_NOIO | __GFP_HIGH);
bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
@@ -356,14 +357,14 @@ static int swsusp_swap_check(void)
return res;
root_swap = res;
- hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
+ hib_resume_bdev_handle = bdev_open_by_dev(swsusp_resume_device,
BLK_OPEN_WRITE, NULL, NULL);
- if (IS_ERR(hib_resume_bdev))
- return PTR_ERR(hib_resume_bdev);
+ if (IS_ERR(hib_resume_bdev_handle))
+ return PTR_ERR(hib_resume_bdev_handle);
- res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
+ res = set_blocksize(hib_resume_bdev_handle->bdev, PAGE_SIZE);
if (res < 0)
- blkdev_put(hib_resume_bdev, NULL);
+ bdev_release(hib_resume_bdev_handle);
return res;
}
@@ -443,7 +444,7 @@ static int get_swap_writer(struct swap_map_handle *handle)
err_rel:
release_swap_writer(handle);
err_close:
- swsusp_close(false);
+ swsusp_close();
return ret;
}
@@ -508,7 +509,7 @@ static int swap_writer_finish(struct swap_map_handle *handle,
if (error)
free_all_swap_pages(root_swap);
release_swap_writer(handle);
- swsusp_close(false);
+ swsusp_close();
return error;
}
@@ -1522,10 +1523,10 @@ int swsusp_check(bool exclusive)
void *holder = exclusive ? &swsusp_holder : NULL;
int error;
- hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, BLK_OPEN_READ,
- holder, NULL);
- if (!IS_ERR(hib_resume_bdev)) {
- set_blocksize(hib_resume_bdev, PAGE_SIZE);
+ hib_resume_bdev_handle = bdev_open_by_dev(swsusp_resume_device,
+ BLK_OPEN_READ, holder, NULL);
+ if (!IS_ERR(hib_resume_bdev_handle)) {
+ set_blocksize(hib_resume_bdev_handle->bdev, PAGE_SIZE);
clear_page(swsusp_header);
error = hib_submit_io(REQ_OP_READ, swsusp_resume_block,
swsusp_header, NULL);
@@ -1550,11 +1551,11 @@ int swsusp_check(bool exclusive)
put:
if (error)
- blkdev_put(hib_resume_bdev, holder);
+ bdev_release(hib_resume_bdev_handle);
else
pr_debug("Image signature found, resuming\n");
} else {
- error = PTR_ERR(hib_resume_bdev);
+ error = PTR_ERR(hib_resume_bdev_handle);
}
if (error)
@@ -1568,14 +1569,14 @@ put:
* @exclusive: Close the resume device which is exclusively opened.
*/
-void swsusp_close(bool exclusive)
+void swsusp_close(void)
{
- if (IS_ERR(hib_resume_bdev)) {
+ if (IS_ERR(hib_resume_bdev_handle)) {
pr_debug("Image device not initialised\n");
return;
}
- blkdev_put(hib_resume_bdev, exclusive ? &swsusp_holder : NULL);
+ bdev_release(hib_resume_bdev_handle);
}
/**
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ef7490c4b8b4..df348aa55d3c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -872,14 +872,16 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
*
* Which allows an EDF like search on (sub)trees.
*/
-static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
+static struct sched_entity *__pick_eevdf(struct cfs_rq *cfs_rq)
{
struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node;
struct sched_entity *curr = cfs_rq->curr;
struct sched_entity *best = NULL;
+ struct sched_entity *best_left = NULL;
if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
curr = NULL;
+ best = curr;
/*
* Once selected, run a task until it either becomes non-eligible or
@@ -900,33 +902,75 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
}
/*
- * If this entity has an earlier deadline than the previous
- * best, take this one. If it also has the earliest deadline
- * of its subtree, we're done.
+ * Now we heap search eligible trees for the best (min_)deadline
*/
- if (!best || deadline_gt(deadline, best, se)) {
+ if (!best || deadline_gt(deadline, best, se))
best = se;
- if (best->deadline == best->min_deadline)
- break;
- }
/*
- * If the earlest deadline in this subtree is in the fully
- * eligible left half of our space, go there.
+ * Every se in a left branch is eligible, keep track of the
+ * branch with the best min_deadline
*/
+ if (node->rb_left) {
+ struct sched_entity *left = __node_2_se(node->rb_left);
+
+ if (!best_left || deadline_gt(min_deadline, best_left, left))
+ best_left = left;
+
+ /*
+ * min_deadline is in the left branch. rb_left and all
+ * descendants are eligible, so immediately switch to the second
+ * loop.
+ */
+ if (left->min_deadline == se->min_deadline)
+ break;
+ }
+
+ /* min_deadline is at this node, no need to look right */
+ if (se->deadline == se->min_deadline)
+ break;
+
+ /* else min_deadline is in the right branch. */
+ node = node->rb_right;
+ }
+
+ /*
+ * We ran into an eligible node which is itself the best.
+ * (Or nr_running == 0 and both are NULL)
+ */
+ if (!best_left || (s64)(best_left->min_deadline - best->deadline) > 0)
+ return best;
+
+ /*
+ * Now best_left and all of its children are eligible, and we are just
+ * looking for deadline == min_deadline
+ */
+ node = &best_left->run_node;
+ while (node) {
+ struct sched_entity *se = __node_2_se(node);
+
+ /* min_deadline is the current node */
+ if (se->deadline == se->min_deadline)
+ return se;
+
+ /* min_deadline is in the left branch */
if (node->rb_left &&
__node_2_se(node->rb_left)->min_deadline == se->min_deadline) {
node = node->rb_left;
continue;
}
+ /* else min_deadline is in the right branch */
node = node->rb_right;
}
+ return NULL;
+}
- if (!best || (curr && deadline_gt(deadline, best, curr)))
- best = curr;
+static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
+{
+ struct sched_entity *se = __pick_eevdf(cfs_rq);
- if (unlikely(!best)) {
+ if (!se) {
struct sched_entity *left = __pick_first_entity(cfs_rq);
if (left) {
pr_err("EEVDF scheduling fail, picking leftmost\n");
@@ -934,7 +978,7 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
}
}
- return best;
+ return se;
}
#ifdef CONFIG_SCHED_DEBUG
@@ -3613,6 +3657,8 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
*/
deadline = div_s64(deadline * old_weight, weight);
se->deadline = se->vruntime + deadline;
+ if (se != cfs_rq->curr)
+ min_deadline_cb_propagate(&se->run_node, NULL);
}
#ifdef CONFIG_SMP
diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
index 3b21f4063258..881f90f0cbcf 100644
--- a/kernel/trace/fprobe.c
+++ b/kernel/trace/fprobe.c
@@ -189,7 +189,7 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
{
int i, size;
- if (num < 0)
+ if (num <= 0)
return -EINVAL;
if (!fp->exit_handler) {
@@ -202,8 +202,8 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
size = fp->nr_maxactive;
else
size = num * num_possible_cpus() * 2;
- if (size < 0)
- return -E2BIG;
+ if (size <= 0)
+ return -EINVAL;
fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler);
if (!fp->rethook)
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 3d7a180a8427..e834f149695b 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -705,6 +705,41 @@ static struct notifier_block trace_kprobe_module_nb = {
.priority = 1 /* Invoked after kprobe module callback */
};
+static int count_symbols(void *data, unsigned long unused)
+{
+ unsigned int *count = data;
+
+ (*count)++;
+
+ return 0;
+}
+
+struct sym_count_ctx {
+ unsigned int count;
+ const char *name;
+};
+
+static int count_mod_symbols(void *data, const char *name, unsigned long unused)
+{
+ struct sym_count_ctx *ctx = data;
+
+ if (strcmp(name, ctx->name) == 0)
+ ctx->count++;
+
+ return 0;
+}
+
+static unsigned int number_of_same_symbols(char *func_name)
+{
+ struct sym_count_ctx ctx = { .count = 0, .name = func_name };
+
+ kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count);
+
+ module_kallsyms_on_each_symbol(NULL, count_mod_symbols, &ctx);
+
+ return ctx.count;
+}
+
static int __trace_kprobe_create(int argc, const char *argv[])
{
/*
@@ -836,6 +871,31 @@ static int __trace_kprobe_create(int argc, const char *argv[])
}
}
+ if (symbol && !strchr(symbol, ':')) {
+ unsigned int count;
+
+ count = number_of_same_symbols(symbol);
+ if (count > 1) {
+ /*
+ * Users should use ADDR to remove the ambiguity of
+ * using KSYM only.
+ */
+ trace_probe_log_err(0, NON_UNIQ_SYMBOL);
+ ret = -EADDRNOTAVAIL;
+
+ goto error;
+ } else if (count == 0) {
+ /*
+ * We can return ENOENT earlier than when register the
+ * kprobe.
+ */
+ trace_probe_log_err(0, BAD_PROBE_ADDR);
+ ret = -ENOENT;
+
+ goto error;
+ }
+ }
+
trace_probe_log_set_index(0);
if (event) {
ret = traceprobe_parse_event_name(&event, &group, gbuf,
@@ -963,7 +1023,7 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
* @name: The name of the kprobe event
* @loc: The location of the kprobe event
* @kretprobe: Is this a return probe?
- * @args: Variable number of arg (pairs), one pair for each field
+ * @...: Variable number of arg (pairs), one pair for each field
*
* NOTE: Users normally won't want to call this function directly, but
* rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
@@ -1036,7 +1096,7 @@ EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
/**
* __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
* @cmd: A pointer to the dynevent_cmd struct representing the new event
- * @args: Variable number of arg (pairs), one pair for each field
+ * @...: Variable number of arg (pairs), one pair for each field
*
* NOTE: Users normally won't want to call this function directly, but
* rather use the kprobe_event_add_fields() wrapper, which
@@ -1695,6 +1755,7 @@ static int unregister_kprobe_event(struct trace_kprobe *tk)
}
#ifdef CONFIG_PERF_EVENTS
+
/* create a trace_kprobe, but don't add it to global lists */
struct trace_event_call *
create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
@@ -1705,6 +1766,24 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
int ret;
char *event;
+ if (func) {
+ unsigned int count;
+
+ count = number_of_same_symbols(func);
+ if (count > 1)
+ /*
+ * Users should use addr to remove the ambiguity of
+ * using func only.
+ */
+ return ERR_PTR(-EADDRNOTAVAIL);
+ else if (count == 0)
+ /*
+ * We can return ENOENT earlier than when register the
+ * kprobe.
+ */
+ return ERR_PTR(-ENOENT);
+ }
+
/*
* local trace_kprobes are not added to dyn_event, so they are never
* searched in find_trace_kprobe(). Therefore, there is no concern of
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index db575094c498..d8b302d01083 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -404,7 +404,7 @@ static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
vmstart = vma->vm_start;
}
if (file) {
- ret = trace_seq_path(s, &file->f_path);
+ ret = trace_seq_path(s, file_user_path(file));
if (ret)
trace_seq_printf(s, "[+0x%lx]",
ip - vmstart);
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 02b432ae7513..850d9ecb6765 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -450,6 +450,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
C(BAD_MAXACT, "Invalid maxactive number"), \
C(MAXACT_TOO_BIG, "Maxactive is too big"), \
C(BAD_PROBE_ADDR, "Invalid probed address or symbol"), \
+ C(NON_UNIQ_SYMBOL, "The symbol is not unique"), \
C(BAD_RETPROBE, "Retprobe address must be an function entry"), \
C(NO_TRACEPOINT, "Tracepoint is not found"), \
C(BAD_ADDR_SUFFIX, "Invalid probed address suffix"), \
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b9f053a5a5f0..a3522b70218d 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2166,7 +2166,7 @@ static struct worker *create_worker(struct worker_pool *pool)
{
struct worker *worker;
int id;
- char id_buf[16];
+ char id_buf[23];
/* ID is needed to determine kthread name */
id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
@@ -4600,12 +4600,22 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
}
cpus_read_unlock();
+ /* for unbound pwq, flush the pwq_release_worker ensures that the
+ * pwq_release_workfn() completes before calling kfree(wq).
+ */
+ if (ret)
+ kthread_flush_worker(pwq_release_worker);
+
return ret;
enomem:
if (wq->cpu_pwq) {
- for_each_possible_cpu(cpu)
- kfree(*per_cpu_ptr(wq->cpu_pwq, cpu));
+ for_each_possible_cpu(cpu) {
+ struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
+
+ if (pwq)
+ kmem_cache_free(pwq_cache, pwq);
+ }
free_percpu(wq->cpu_pwq);
wq->cpu_pwq = NULL;
}
@@ -5782,9 +5792,13 @@ static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
list_for_each_entry(wq, &workqueues, list) {
if (!(wq->flags & WQ_UNBOUND))
continue;
+
/* creating multiple pwqs breaks ordering guarantee */
- if (wq->flags & __WQ_ORDERED)
- continue;
+ if (!list_empty(&wq->pwqs)) {
+ if (wq->flags & __WQ_ORDERED_EXPLICIT)
+ continue;
+ wq->flags &= ~__WQ_ORDERED;
+ }
ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
if (IS_ERR(ctx)) {
diff --git a/lib/Kconfig b/lib/Kconfig
index c686f4adc124..76fe64f933fc 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -729,6 +729,11 @@ config PARMAN
config OBJAGG
tristate "objagg" if COMPILE_TEST
+config LWQ_TEST
+ bool "Boot-time test for lwq queuing"
+ help
+ Run boot-time test of light-weight queuing.
+
endmenu
config GENERIC_IOREMAP
diff --git a/lib/Makefile b/lib/Makefile
index 740109b6e2c8..d0c116b706e6 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -45,7 +45,7 @@ obj-y += lockref.o
obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
list_sort.o uuid.o iov_iter.o clz_ctz.o \
- bsearch.o find_bit.o llist.o memweight.o kfifo.o \
+ bsearch.o find_bit.o llist.o lwq.o memweight.o kfifo.o \
percpu-refcount.o rhashtable.o base64.o \
once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \
generic-radix-tree.o
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 27234a820eeb..de7d11cf4c63 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <crypto/hash.h>
#include <linux/export.h>
#include <linux/bvec.h>
#include <linux/fault-inject-usercopy.h>
@@ -10,192 +9,71 @@
#include <linux/vmalloc.h>
#include <linux/splice.h>
#include <linux/compat.h>
-#include <net/checksum.h>
#include <linux/scatterlist.h>
#include <linux/instrumented.h>
+#include <linux/iov_iter.h>
-/* covers ubuf and kbuf alike */
-#define iterate_buf(i, n, base, len, off, __p, STEP) { \
- size_t __maybe_unused off = 0; \
- len = n; \
- base = __p + i->iov_offset; \
- len -= (STEP); \
- i->iov_offset += len; \
- n = len; \
-}
-
-/* covers iovec and kvec alike */
-#define iterate_iovec(i, n, base, len, off, __p, STEP) { \
- size_t off = 0; \
- size_t skip = i->iov_offset; \
- do { \
- len = min(n, __p->iov_len - skip); \
- if (likely(len)) { \
- base = __p->iov_base + skip; \
- len -= (STEP); \
- off += len; \
- skip += len; \
- n -= len; \
- if (skip < __p->iov_len) \
- break; \
- } \
- __p++; \
- skip = 0; \
- } while (n); \
- i->iov_offset = skip; \
- n = off; \
-}
-
-#define iterate_bvec(i, n, base, len, off, p, STEP) { \
- size_t off = 0; \
- unsigned skip = i->iov_offset; \
- while (n) { \
- unsigned offset = p->bv_offset + skip; \
- unsigned left; \
- void *kaddr = kmap_local_page(p->bv_page + \
- offset / PAGE_SIZE); \
- base = kaddr + offset % PAGE_SIZE; \
- len = min(min(n, (size_t)(p->bv_len - skip)), \
- (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
- left = (STEP); \
- kunmap_local(kaddr); \
- len -= left; \
- off += len; \
- skip += len; \
- if (skip == p->bv_len) { \
- skip = 0; \
- p++; \
- } \
- n -= len; \
- if (left) \
- break; \
- } \
- i->iov_offset = skip; \
- n = off; \
-}
-
-#define iterate_xarray(i, n, base, len, __off, STEP) { \
- __label__ __out; \
- size_t __off = 0; \
- struct folio *folio; \
- loff_t start = i->xarray_start + i->iov_offset; \
- pgoff_t index = start / PAGE_SIZE; \
- XA_STATE(xas, i->xarray, index); \
- \
- len = PAGE_SIZE - offset_in_page(start); \
- rcu_read_lock(); \
- xas_for_each(&xas, folio, ULONG_MAX) { \
- unsigned left; \
- size_t offset; \
- if (xas_retry(&xas, folio)) \
- continue; \
- if (WARN_ON(xa_is_value(folio))) \
- break; \
- if (WARN_ON(folio_test_hugetlb(folio))) \
- break; \
- offset = offset_in_folio(folio, start + __off); \
- while (offset < folio_size(folio)) { \
- base = kmap_local_folio(folio, offset); \
- len = min(n, len); \
- left = (STEP); \
- kunmap_local(base); \
- len -= left; \
- __off += len; \
- n -= len; \
- if (left || n == 0) \
- goto __out; \
- offset += len; \
- len = PAGE_SIZE; \
- } \
- } \
-__out: \
- rcu_read_unlock(); \
- i->iov_offset += __off; \
- n = __off; \
-}
-
-#define __iterate_and_advance(i, n, base, len, off, I, K) { \
- if (unlikely(i->count < n)) \
- n = i->count; \
- if (likely(n)) { \
- if (likely(iter_is_ubuf(i))) { \
- void __user *base; \
- size_t len; \
- iterate_buf(i, n, base, len, off, \
- i->ubuf, (I)) \
- } else if (likely(iter_is_iovec(i))) { \
- const struct iovec *iov = iter_iov(i); \
- void __user *base; \
- size_t len; \
- iterate_iovec(i, n, base, len, off, \
- iov, (I)) \
- i->nr_segs -= iov - iter_iov(i); \
- i->__iov = iov; \
- } else if (iov_iter_is_bvec(i)) { \
- const struct bio_vec *bvec = i->bvec; \
- void *base; \
- size_t len; \
- iterate_bvec(i, n, base, len, off, \
- bvec, (K)) \
- i->nr_segs -= bvec - i->bvec; \
- i->bvec = bvec; \
- } else if (iov_iter_is_kvec(i)) { \
- const struct kvec *kvec = i->kvec; \
- void *base; \
- size_t len; \
- iterate_iovec(i, n, base, len, off, \
- kvec, (K)) \
- i->nr_segs -= kvec - i->kvec; \
- i->kvec = kvec; \
- } else if (iov_iter_is_xarray(i)) { \
- void *base; \
- size_t len; \
- iterate_xarray(i, n, base, len, off, \
- (K)) \
- } \
- i->count -= n; \
- } \
-}
-#define iterate_and_advance(i, n, base, len, off, I, K) \
- __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
-
-static int copyout(void __user *to, const void *from, size_t n)
+static __always_inline
+size_t copy_to_user_iter(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
{
if (should_fail_usercopy())
- return n;
- if (access_ok(to, n)) {
- instrument_copy_to_user(to, from, n);
- n = raw_copy_to_user(to, from, n);
+ return len;
+ if (access_ok(iter_to, len)) {
+ from += progress;
+ instrument_copy_to_user(iter_to, from, len);
+ len = raw_copy_to_user(iter_to, from, len);
}
- return n;
+ return len;
}
-static int copyout_nofault(void __user *to, const void *from, size_t n)
+static __always_inline
+size_t copy_to_user_iter_nofault(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
{
- long res;
+ ssize_t res;
if (should_fail_usercopy())
- return n;
-
- res = copy_to_user_nofault(to, from, n);
+ return len;
- return res < 0 ? n : res;
+ from += progress;
+ res = copy_to_user_nofault(iter_to, from, len);
+ return res < 0 ? len : res;
}
-static int copyin(void *to, const void __user *from, size_t n)
+static __always_inline
+size_t copy_from_user_iter(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
{
- size_t res = n;
+ size_t res = len;
if (should_fail_usercopy())
- return n;
- if (access_ok(from, n)) {
- instrument_copy_from_user_before(to, from, n);
- res = raw_copy_from_user(to, from, n);
- instrument_copy_from_user_after(to, from, n, res);
+ return len;
+ if (access_ok(iter_from, len)) {
+ to += progress;
+ instrument_copy_from_user_before(to, iter_from, len);
+ res = raw_copy_from_user(to, iter_from, len);
+ instrument_copy_from_user_after(to, iter_from, len, res);
}
return res;
}
+static __always_inline
+size_t memcpy_to_iter(void *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ memcpy(iter_to, from + progress, len);
+ return 0;
+}
+
+static __always_inline
+size_t memcpy_from_iter(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ memcpy(to + progress, iter_from, len);
+ return 0;
+}
+
/*
* fault_in_iov_iter_readable - fault in iov iterator for reading
* @i: iterator
@@ -290,7 +168,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
.iter_type = ITER_IOVEC,
.copy_mc = false,
.nofault = false,
- .user_backed = true,
.data_source = direction,
.__iov = iov,
.nr_segs = nr_segs,
@@ -300,36 +177,35 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
}
EXPORT_SYMBOL(iov_iter_init);
-static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
- __wsum sum, size_t off)
-{
- __wsum next = csum_partial_copy_nocheck(from, to, len);
- return csum_block_add(sum, next, off);
-}
-
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(i->data_source))
return 0;
if (user_backed_iter(i))
might_fault();
- iterate_and_advance(i, bytes, base, len, off,
- copyout(base, addr + off, len),
- memcpy(base, addr + off, len)
- )
-
- return bytes;
+ return iterate_and_advance(i, bytes, (void *)addr,
+ copy_to_user_iter, memcpy_to_iter);
}
EXPORT_SYMBOL(_copy_to_iter);
#ifdef CONFIG_ARCH_HAS_COPY_MC
-static int copyout_mc(void __user *to, const void *from, size_t n)
-{
- if (access_ok(to, n)) {
- instrument_copy_to_user(to, from, n);
- n = copy_mc_to_user((__force void *) to, from, n);
+static __always_inline
+size_t copy_to_user_iter_mc(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ if (access_ok(iter_to, len)) {
+ from += progress;
+ instrument_copy_to_user(iter_to, from, len);
+ len = copy_mc_to_user(iter_to, from, len);
}
- return n;
+ return len;
+}
+
+static __always_inline
+size_t memcpy_to_iter_mc(void *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ return copy_mc_to_kernel(iter_to, from + progress, len);
}
/**
@@ -362,22 +238,35 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
return 0;
if (user_backed_iter(i))
might_fault();
- __iterate_and_advance(i, bytes, base, len, off,
- copyout_mc(base, addr + off, len),
- copy_mc_to_kernel(base, addr + off, len)
- )
-
- return bytes;
+ return iterate_and_advance(i, bytes, (void *)addr,
+ copy_to_user_iter_mc, memcpy_to_iter_mc);
}
EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
#endif /* CONFIG_ARCH_HAS_COPY_MC */
-static void *memcpy_from_iter(struct iov_iter *i, void *to, const void *from,
- size_t size)
+static __always_inline
+size_t memcpy_from_iter_mc(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ return copy_mc_to_kernel(to + progress, iter_from, len);
+}
+
+static size_t __copy_from_iter_mc(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(i->count < bytes))
+ bytes = i->count;
+ if (unlikely(!bytes))
+ return 0;
+ return iterate_bvec(i, bytes, addr, NULL, memcpy_from_iter_mc);
+}
+
+static __always_inline
+size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
- if (iov_iter_is_copy_mc(i))
- return (void *)copy_mc_to_kernel(to, from, size);
- return memcpy(to, from, size);
+ if (unlikely(iov_iter_is_copy_mc(i)))
+ return __copy_from_iter_mc(addr, bytes, i);
+ return iterate_and_advance(i, bytes, addr,
+ copy_from_user_iter, memcpy_from_iter);
}
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
@@ -387,30 +276,44 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
if (user_backed_iter(i))
might_fault();
- iterate_and_advance(i, bytes, base, len, off,
- copyin(addr + off, base, len),
- memcpy_from_iter(i, addr + off, base, len)
- )
-
- return bytes;
+ return __copy_from_iter(addr, bytes, i);
}
EXPORT_SYMBOL(_copy_from_iter);
+static __always_inline
+size_t copy_from_user_iter_nocache(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ return __copy_from_user_inatomic_nocache(to + progress, iter_from, len);
+}
+
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(!i->data_source))
return 0;
- iterate_and_advance(i, bytes, base, len, off,
- __copy_from_user_inatomic_nocache(addr + off, base, len),
- memcpy(addr + off, base, len)
- )
-
- return bytes;
+ return iterate_and_advance(i, bytes, addr,
+ copy_from_user_iter_nocache,
+ memcpy_from_iter);
}
EXPORT_SYMBOL(_copy_from_iter_nocache);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+static __always_inline
+size_t copy_from_user_iter_flushcache(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ return __copy_from_user_flushcache(to + progress, iter_from, len);
+}
+
+static __always_inline
+size_t memcpy_from_iter_flushcache(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ memcpy_flushcache(to + progress, iter_from, len);
+ return 0;
+}
+
/**
* _copy_from_iter_flushcache - write destination through cpu cache
* @addr: destination kernel address
@@ -432,12 +335,9 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
if (WARN_ON_ONCE(!i->data_source))
return 0;
- iterate_and_advance(i, bytes, base, len, off,
- __copy_from_user_flushcache(addr + off, base, len),
- memcpy_flushcache(addr + off, base, len)
- )
-
- return bytes;
+ return iterate_and_advance(i, bytes, addr,
+ copy_from_user_iter_flushcache,
+ memcpy_from_iter_flushcache);
}
EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
#endif
@@ -509,10 +409,9 @@ size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t byte
void *kaddr = kmap_local_page(page);
size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
- iterate_and_advance(i, n, base, len, off,
- copyout_nofault(base, kaddr + offset + off, len),
- memcpy(base, kaddr + offset + off, len)
- )
+ n = iterate_and_advance(i, bytes, kaddr,
+ copy_to_user_iter_nofault,
+ memcpy_to_iter);
kunmap_local(kaddr);
res += n;
bytes -= n;
@@ -555,14 +454,25 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
}
EXPORT_SYMBOL(copy_page_from_iter);
-size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
+static __always_inline
+size_t zero_to_user_iter(void __user *iter_to, size_t progress,
+ size_t len, void *priv, void *priv2)
{
- iterate_and_advance(i, bytes, base, len, count,
- clear_user(base, len),
- memset(base, 0, len)
- )
+ return clear_user(iter_to, len);
+}
+
+static __always_inline
+size_t zero_to_iter(void *iter_to, size_t progress,
+ size_t len, void *priv, void *priv2)
+{
+ memset(iter_to, 0, len);
+ return 0;
+}
- return bytes;
+size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
+{
+ return iterate_and_advance(i, bytes, NULL,
+ zero_to_user_iter, zero_to_iter);
}
EXPORT_SYMBOL(iov_iter_zero);
@@ -587,10 +497,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
}
p = kmap_atomic(page) + offset;
- iterate_and_advance(i, n, base, len, off,
- copyin(p + off, base, len),
- memcpy_from_iter(i, p + off, base, len)
- )
+ n = __copy_from_iter(p, n, i);
kunmap_atomic(p);
copied += n;
offset += n;
@@ -1181,78 +1088,6 @@ ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
}
EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
-size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
- struct iov_iter *i)
-{
- __wsum sum, next;
- sum = *csum;
- if (WARN_ON_ONCE(!i->data_source))
- return 0;
-
- iterate_and_advance(i, bytes, base, len, off, ({
- next = csum_and_copy_from_user(base, addr + off, len);
- sum = csum_block_add(sum, next, off);
- next ? 0 : len;
- }), ({
- sum = csum_and_memcpy(addr + off, base, len, sum, off);
- })
- )
- *csum = sum;
- return bytes;
-}
-EXPORT_SYMBOL(csum_and_copy_from_iter);
-
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
- struct iov_iter *i)
-{
- struct csum_state *csstate = _csstate;
- __wsum sum, next;
-
- if (WARN_ON_ONCE(i->data_source))
- return 0;
- if (unlikely(iov_iter_is_discard(i))) {
- // can't use csum_memcpy() for that one - data is not copied
- csstate->csum = csum_block_add(csstate->csum,
- csum_partial(addr, bytes, 0),
- csstate->off);
- csstate->off += bytes;
- return bytes;
- }
-
- sum = csum_shift(csstate->csum, csstate->off);
- iterate_and_advance(i, bytes, base, len, off, ({
- next = csum_and_copy_to_user(addr + off, base, len);
- sum = csum_block_add(sum, next, off);
- next ? 0 : len;
- }), ({
- sum = csum_and_memcpy(base, addr + off, len, sum, off);
- })
- )
- csstate->csum = csum_shift(sum, csstate->off);
- csstate->off += bytes;
- return bytes;
-}
-EXPORT_SYMBOL(csum_and_copy_to_iter);
-
-size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
- struct iov_iter *i)
-{
-#ifdef CONFIG_CRYPTO_HASH
- struct ahash_request *hash = hashp;
- struct scatterlist sg;
- size_t copied;
-
- copied = copy_to_iter(addr, bytes, i);
- sg_init_one(&sg, addr, copied);
- ahash_request_set_crypt(hash, &sg, NULL, copied);
- crypto_ahash_update(hash);
- return copied;
-#else
- return 0;
-#endif
-}
-EXPORT_SYMBOL(hash_and_copy_to_iter);
-
static int iov_npages(const struct iov_iter *i, int maxpages)
{
size_t skip = i->iov_offset, size = i->count;
diff --git a/lib/llist.c b/lib/llist.c
index 6e668fa5a2c6..f21d0cfbbaaa 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -66,6 +66,34 @@ struct llist_node *llist_del_first(struct llist_head *head)
EXPORT_SYMBOL_GPL(llist_del_first);
/**
+ * llist_del_first_this - delete given entry of lock-less list if it is first
+ * @head: the head for your lock-less list
+ * @this: a list entry.
+ *
+ * If head of the list is given entry, delete and return %true else
+ * return %false.
+ *
+ * Multiple callers can safely call this concurrently with multiple
+ * llist_add() callers, providing all the callers offer a different @this.
+ */
+bool llist_del_first_this(struct llist_head *head,
+ struct llist_node *this)
+{
+ struct llist_node *entry, *next;
+
+ /* acquire ensures orderig wrt try_cmpxchg() is llist_del_first() */
+ entry = smp_load_acquire(&head->first);
+ do {
+ if (entry != this)
+ return false;
+ next = READ_ONCE(entry->next);
+ } while (!try_cmpxchg(&head->first, &entry, next));
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(llist_del_first_this);
+
+/**
* llist_reverse_order - reverse order of a llist chain
* @head: first item of the list to be reversed
*
diff --git a/lib/lwq.c b/lib/lwq.c
new file mode 100644
index 000000000000..57d080a4d53d
--- /dev/null
+++ b/lib/lwq.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Light-weight single-linked queue.
+ *
+ * Entries are enqueued to the head of an llist, with no blocking.
+ * This can happen in any context.
+ *
+ * Entries are dequeued using a spinlock to protect against multiple
+ * access. The llist is staged in reverse order, and refreshed
+ * from the llist when it exhausts.
+ *
+ * This is particularly suitable when work items are queued in BH or
+ * IRQ context, and where work items are handled one at a time by
+ * dedicated threads.
+ */
+#include <linux/rcupdate.h>
+#include <linux/lwq.h>
+
+struct llist_node *__lwq_dequeue(struct lwq *q)
+{
+ struct llist_node *this;
+
+ if (lwq_empty(q))
+ return NULL;
+ spin_lock(&q->lock);
+ this = q->ready;
+ if (!this && !llist_empty(&q->new)) {
+ /* ensure queue doesn't appear transiently lwq_empty */
+ smp_store_release(&q->ready, (void *)1);
+ this = llist_reverse_order(llist_del_all(&q->new));
+ if (!this)
+ q->ready = NULL;
+ }
+ if (this)
+ q->ready = llist_next(this);
+ spin_unlock(&q->lock);
+ return this;
+}
+EXPORT_SYMBOL_GPL(__lwq_dequeue);
+
+/**
+ * lwq_dequeue_all - dequeue all currently enqueued objects
+ * @q: the queue to dequeue from
+ *
+ * Remove and return a linked list of llist_nodes of all the objects that were
+ * in the queue. The first on the list will be the object that was least
+ * recently enqueued.
+ */
+struct llist_node *lwq_dequeue_all(struct lwq *q)
+{
+ struct llist_node *r, *t, **ep;
+
+ if (lwq_empty(q))
+ return NULL;
+
+ spin_lock(&q->lock);
+ r = q->ready;
+ q->ready = NULL;
+ t = llist_del_all(&q->new);
+ spin_unlock(&q->lock);
+ ep = &r;
+ while (*ep)
+ ep = &(*ep)->next;
+ *ep = llist_reverse_order(t);
+ return r;
+}
+EXPORT_SYMBOL_GPL(lwq_dequeue_all);
+
+#if IS_ENABLED(CONFIG_LWQ_TEST)
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wait_bit.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+struct tnode {
+ struct lwq_node n;
+ int i;
+ int c;
+};
+
+static int lwq_exercise(void *qv)
+{
+ struct lwq *q = qv;
+ int cnt;
+ struct tnode *t;
+
+ for (cnt = 0; cnt < 10000; cnt++) {
+ wait_var_event(q, (t = lwq_dequeue(q, struct tnode, n)) != NULL);
+ t->c++;
+ if (lwq_enqueue(&t->n, q))
+ wake_up_var(q);
+ }
+ while (!kthread_should_stop())
+ schedule_timeout_idle(1);
+ return 0;
+}
+
+static int lwq_test(void)
+{
+ int i;
+ struct lwq q;
+ struct llist_node *l, **t1, *t2;
+ struct tnode *t;
+ struct task_struct *threads[8];
+
+ printk(KERN_INFO "testing lwq....\n");
+ lwq_init(&q);
+ printk(KERN_INFO " lwq: run some threads\n");
+ for (i = 0; i < ARRAY_SIZE(threads); i++)
+ threads[i] = kthread_run(lwq_exercise, &q, "lwq-test-%d", i);
+ for (i = 0; i < 100; i++) {
+ t = kmalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ break;
+ t->i = i;
+ t->c = 0;
+ if (lwq_enqueue(&t->n, &q))
+ wake_up_var(&q);
+ }
+ /* wait for threads to exit */
+ for (i = 0; i < ARRAY_SIZE(threads); i++)
+ if (!IS_ERR_OR_NULL(threads[i]))
+ kthread_stop(threads[i]);
+ printk(KERN_INFO " lwq: dequeue first 50:");
+ for (i = 0; i < 50 ; i++) {
+ if (i && (i % 10) == 0) {
+ printk(KERN_CONT "\n");
+ printk(KERN_INFO " lwq: ... ");
+ }
+ t = lwq_dequeue(&q, struct tnode, n);
+ if (t)
+ printk(KERN_CONT " %d(%d)", t->i, t->c);
+ kfree(t);
+ }
+ printk(KERN_CONT "\n");
+ l = lwq_dequeue_all(&q);
+ printk(KERN_INFO " lwq: delete the multiples of 3 (test lwq_for_each_safe())\n");
+ lwq_for_each_safe(t, t1, t2, &l, n) {
+ if ((t->i % 3) == 0) {
+ t->i = -1;
+ kfree(t);
+ t = NULL;
+ }
+ }
+ if (l)
+ lwq_enqueue_batch(l, &q);
+ printk(KERN_INFO " lwq: dequeue remaining:");
+ while ((t = lwq_dequeue(&q, struct tnode, n)) != NULL) {
+ printk(KERN_CONT " %d", t->i);
+ kfree(t);
+ }
+ printk(KERN_CONT "\n");
+ return 0;
+}
+
+module_init(lwq_test);
+#endif /* CONFIG_LWQ_TEST*/
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 0e00a84e8e8f..bb24d84a4922 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -5627,7 +5627,7 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
/* Internal nodes */
nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
/* Add working room for split (2 nodes) + new parents */
- mas_node_count(mas, nr_nodes + 3);
+ mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
/* Detect if allocations run out */
mas->mas_flags |= MA_STATE_PREALLOC;
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 06959165e2f9..464eeb90d5ad 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -9,6 +9,7 @@
#include <linux/maple_tree.h>
#include <linux/module.h>
+#include <linux/rwsem.h>
#define MTREE_ALLOC_MAX 0x2000000000000Ul
#define CONFIG_MAPLE_SEARCH
@@ -1841,17 +1842,21 @@ static noinline void __init check_forking(struct maple_tree *mt)
void *val;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, mt, 0, 0);
+ struct rw_semaphore newmt_lock;
+
+ init_rwsem(&newmt_lock);
for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
xa_mk_value(i), GFP_KERNEL);
mt_set_non_kernel(99999);
- mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&newmt, &newmt_lock);
newmas.tree = &newmt;
mas_reset(&newmas);
mas_reset(&mas);
- mas_lock(&newmas);
+ down_write(&newmt_lock);
mas.index = 0;
mas.last = 0;
if (mas_expected_entries(&newmas, nr_entries)) {
@@ -1866,10 +1871,10 @@ static noinline void __init check_forking(struct maple_tree *mt)
}
rcu_read_unlock();
mas_destroy(&newmas);
- mas_unlock(&newmas);
mt_validate(&newmt);
mt_set_non_kernel(0);
- mtree_destroy(&newmt);
+ __mt_destroy(&newmt);
+ up_write(&newmt_lock);
}
static noinline void __init check_iteration(struct maple_tree *mt)
@@ -1980,6 +1985,10 @@ static noinline void __init bench_forking(struct maple_tree *mt)
void *val;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, mt, 0, 0);
+ struct rw_semaphore newmt_lock;
+
+ init_rwsem(&newmt_lock);
+ mt_set_external_lock(&newmt, &newmt_lock);
for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
@@ -1994,7 +2003,7 @@ static noinline void __init bench_forking(struct maple_tree *mt)
mas.index = 0;
mas.last = 0;
rcu_read_lock();
- mas_lock(&newmas);
+ down_write(&newmt_lock);
if (mas_expected_entries(&newmas, nr_entries)) {
printk("OOM!");
BUG_ON(1);
@@ -2005,11 +2014,11 @@ static noinline void __init bench_forking(struct maple_tree *mt)
mas_store(&newmas, val);
}
mas_destroy(&newmas);
- mas_unlock(&newmas);
rcu_read_unlock();
mt_validate(&newmt);
mt_set_non_kernel(0);
- mtree_destroy(&newmt);
+ __mt_destroy(&newmt);
+ up_write(&newmt_lock);
}
}
#endif
@@ -2616,6 +2625,10 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
void *tmp;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, &newmt, 0, 0);
+ struct rw_semaphore newmt_lock;
+
+ init_rwsem(&newmt_lock);
+ mt_set_external_lock(&newmt, &newmt_lock);
if (!zero_start)
i = 1;
@@ -2625,9 +2638,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
mtree_store_range(mt, i*10, (i+1)*10 - gap,
xa_mk_value(i), GFP_KERNEL);
- mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
mt_set_non_kernel(99999);
- mas_lock(&newmas);
+ down_write(&newmt_lock);
ret = mas_expected_entries(&newmas, nr_entries);
mt_set_non_kernel(0);
MT_BUG_ON(mt, ret != 0);
@@ -2640,9 +2653,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
}
rcu_read_unlock();
mas_destroy(&newmas);
- mas_unlock(&newmas);
- mtree_destroy(&newmt);
+ __mt_destroy(&newmt);
+ up_write(&newmt_lock);
}
/* Duplicate many sizes of trees. Mainly to test expected entry values */
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index b86ba7b0a921..f60e56150feb 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -1208,6 +1208,8 @@ static int damon_sysfs_set_targets(struct damon_ctx *ctx,
return 0;
}
+static bool damon_sysfs_schemes_regions_updating;
+
static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
{
struct damon_target *t, *next;
@@ -1219,8 +1221,10 @@ static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
cmd = damon_sysfs_cmd_request.cmd;
if (kdamond && ctx == kdamond->damon_ctx &&
(cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS ||
- cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES)) {
+ cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES) &&
+ damon_sysfs_schemes_regions_updating) {
damon_sysfs_schemes_update_regions_stop(ctx);
+ damon_sysfs_schemes_regions_updating = false;
mutex_unlock(&damon_sysfs_lock);
}
@@ -1340,7 +1344,6 @@ static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
static int damon_sysfs_cmd_request_callback(struct damon_ctx *c)
{
struct damon_sysfs_kdamond *kdamond;
- static bool damon_sysfs_schemes_regions_updating;
bool total_bytes_only = false;
int err = 0;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 52d26072dfda..1301ba7b2c9a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -97,6 +97,7 @@ static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
+static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
static inline bool subpool_is_free(struct hugepage_subpool *spool)
{
@@ -267,6 +268,10 @@ void hugetlb_vma_lock_read(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
down_read(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ down_read(&resv_map->rw_sema);
}
}
@@ -276,6 +281,10 @@ void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
up_read(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ up_read(&resv_map->rw_sema);
}
}
@@ -285,6 +294,10 @@ void hugetlb_vma_lock_write(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
down_write(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ down_write(&resv_map->rw_sema);
}
}
@@ -294,17 +307,27 @@ void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
up_write(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ up_write(&resv_map->rw_sema);
}
}
int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
{
- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
- if (!__vma_shareable_lock(vma))
- return 1;
+ if (__vma_shareable_lock(vma)) {
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
- return down_write_trylock(&vma_lock->rw_sema);
+ return down_write_trylock(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ return down_write_trylock(&resv_map->rw_sema);
+ }
+
+ return 1;
}
void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
@@ -313,6 +336,10 @@ void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
lockdep_assert_held(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ lockdep_assert_held(&resv_map->rw_sema);
}
}
@@ -345,6 +372,11 @@ static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
__hugetlb_vma_unlock_write_put(vma_lock);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ /* no free for anon vmas, but still need to unlock */
+ up_write(&resv_map->rw_sema);
}
}
@@ -1068,6 +1100,7 @@ struct resv_map *resv_map_alloc(void)
kref_init(&resv_map->refs);
spin_lock_init(&resv_map->lock);
INIT_LIST_HEAD(&resv_map->regions);
+ init_rwsem(&resv_map->rw_sema);
resv_map->adds_in_progress = 0;
/*
@@ -1138,8 +1171,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
- set_vma_private_data(vma, (get_vma_private_data(vma) &
- HPAGE_RESV_MASK) | (unsigned long)map);
+ set_vma_private_data(vma, (unsigned long)map);
}
static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
@@ -5274,9 +5306,9 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
return len + old_addr - old_end;
}
-static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct page *ref_page, zap_flags_t zap_flags)
+void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct page *ref_page, zap_flags_t zap_flags)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
@@ -5405,16 +5437,25 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
tlb_flush_mmu_tlbonly(tlb);
}
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
- struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page,
- zap_flags_t zap_flags)
+void __hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
{
+ if (!vma->vm_file) /* hugetlbfs_file_mmap error */
+ return;
+
+ adjust_range_if_pmd_sharing_possible(vma, start, end);
hugetlb_vma_lock_write(vma);
- i_mmap_lock_write(vma->vm_file->f_mapping);
+ if (vma->vm_file)
+ i_mmap_lock_write(vma->vm_file->f_mapping);
+}
- /* mmu notification performed in caller */
- __unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
+void __hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+ zap_flags_t zap_flags = details ? details->zap_flags : 0;
+
+ if (!vma->vm_file) /* hugetlbfs_file_mmap error */
+ return;
if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */
/*
@@ -5427,11 +5468,12 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
* someone else.
*/
__hugetlb_vma_unlock_write_free(vma);
- i_mmap_unlock_write(vma->vm_file->f_mapping);
} else {
- i_mmap_unlock_write(vma->vm_file->f_mapping);
hugetlb_vma_unlock_write(vma);
}
+
+ if (vma->vm_file)
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
}
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
@@ -6811,8 +6853,10 @@ out_err:
*/
if (chg >= 0 && add < 0)
region_abort(resv_map, from, to, regions_needed);
- if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
+ if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
kref_put(&resv_map->refs, resv_map_release);
+ set_vma_resv_map(vma, NULL);
+ }
return false;
}
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index ca4b6ff080a6..6e3cb118d20e 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -621,7 +621,7 @@ void kasan_report_async(void)
}
#endif /* CONFIG_KASAN_HW_TAGS */
-#ifdef CONFIG_KASAN_INLINE
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
/*
* With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
* canonical half of the address space) cause out-of-bounds shadow memory reads
diff --git a/mm/memory.c b/mm/memory.c
index 6c264d2f969c..517221f01303 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1683,7 +1683,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
if (vma->vm_file) {
zap_flags_t zap_flags = details ?
details->zap_flags : 0;
- __unmap_hugepage_range_final(tlb, vma, start, end,
+ __unmap_hugepage_range(tlb, vma, start, end,
NULL, zap_flags);
}
} else
@@ -1728,8 +1728,12 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
start_addr, end_addr);
mmu_notifier_invalidate_range_start(&range);
do {
- unmap_single_vma(tlb, vma, start_addr, end_addr, &details,
+ unsigned long start = start_addr;
+ unsigned long end = end_addr;
+ hugetlb_zap_begin(vma, &start, &end);
+ unmap_single_vma(tlb, vma, start, end, &details,
mm_wr_locked);
+ hugetlb_zap_end(vma, &details);
} while ((vma = mas_find(mas, tree_end - 1)) != NULL);
mmu_notifier_invalidate_range_end(&range);
}
@@ -1753,9 +1757,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
lru_add_drain();
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
address, end);
- if (is_vm_hugetlb_page(vma))
- adjust_range_if_pmd_sharing_possible(vma, &range.start,
- &range.end);
+ hugetlb_zap_begin(vma, &range.start, &range.end);
tlb_gather_mmu(&tlb, vma->vm_mm);
update_hiwater_rss(vma->vm_mm);
mmu_notifier_invalidate_range_start(&range);
@@ -1766,6 +1768,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
unmap_single_vma(&tlb, vma, address, end, details, false);
mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb);
+ hugetlb_zap_end(vma, details);
}
/**
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f1b00d6ac7ee..29ebf1e7898c 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1543,8 +1543,10 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
* the home node for vmas we already updated before.
*/
old = vma_policy(vma);
- if (!old)
+ if (!old) {
+ prev = vma;
continue;
+ }
if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
err = -EOPNOTSUPP;
break;
diff --git a/mm/migrate.c b/mm/migrate.c
index 2053b54556ca..06086dc9da28 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2162,6 +2162,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
const int __user *nodes,
int __user *status, int flags)
{
+ compat_uptr_t __user *compat_pages = (void __user *)pages;
int current_node = NUMA_NO_NODE;
LIST_HEAD(pagelist);
int start, i;
@@ -2174,8 +2175,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
int node;
err = -EFAULT;
- if (get_user(p, pages + i))
- goto out_flush;
+ if (in_compat_syscall()) {
+ compat_uptr_t cp;
+
+ if (get_user(cp, compat_pages + i))
+ goto out_flush;
+
+ p = compat_ptr(cp);
+ } else {
+ if (get_user(p, pages + i))
+ goto out_flush;
+ }
if (get_user(node, nodes + i))
goto out_flush;
diff --git a/mm/mmap.c b/mm/mmap.c
index b56a7f0c9f85..9e018d8dd7d6 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -583,11 +583,12 @@ again:
* dup_anon_vma() - Helper function to duplicate anon_vma
* @dst: The destination VMA
* @src: The source VMA
+ * @dup: Pointer to the destination VMA when successful.
*
* Returns: 0 on success.
*/
static inline int dup_anon_vma(struct vm_area_struct *dst,
- struct vm_area_struct *src)
+ struct vm_area_struct *src, struct vm_area_struct **dup)
{
/*
* Easily overlooked: when mprotect shifts the boundary, make sure the
@@ -595,9 +596,15 @@ static inline int dup_anon_vma(struct vm_area_struct *dst,
* anon pages imported.
*/
if (src->anon_vma && !dst->anon_vma) {
+ int ret;
+
vma_assert_write_locked(dst);
dst->anon_vma = src->anon_vma;
- return anon_vma_clone(dst, src);
+ ret = anon_vma_clone(dst, src);
+ if (ret)
+ return ret;
+
+ *dup = dst;
}
return 0;
@@ -624,6 +631,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff,
struct vm_area_struct *next)
{
+ struct vm_area_struct *anon_dup = NULL;
bool remove_next = false;
struct vma_prepare vp;
@@ -633,7 +641,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
remove_next = true;
vma_start_write(next);
- ret = dup_anon_vma(vma, next);
+ ret = dup_anon_vma(vma, next, &anon_dup);
if (ret)
return ret;
}
@@ -661,6 +669,8 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
return 0;
nomem:
+ if (anon_dup)
+ unlink_anon_vmas(anon_dup);
return -ENOMEM;
}
@@ -860,6 +870,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
{
struct vm_area_struct *curr, *next, *res;
struct vm_area_struct *vma, *adjust, *remove, *remove2;
+ struct vm_area_struct *anon_dup = NULL;
struct vma_prepare vp;
pgoff_t vma_pgoff;
int err = 0;
@@ -927,18 +938,18 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_start_write(next);
remove = next; /* case 1 */
vma_end = next->vm_end;
- err = dup_anon_vma(prev, next);
+ err = dup_anon_vma(prev, next, &anon_dup);
if (curr) { /* case 6 */
vma_start_write(curr);
remove = curr;
remove2 = next;
if (!next->anon_vma)
- err = dup_anon_vma(prev, curr);
+ err = dup_anon_vma(prev, curr, &anon_dup);
}
} else if (merge_prev) { /* case 2 */
if (curr) {
vma_start_write(curr);
- err = dup_anon_vma(prev, curr);
+ err = dup_anon_vma(prev, curr, &anon_dup);
if (end == curr->vm_end) { /* case 7 */
remove = curr;
} else { /* case 5 */
@@ -954,7 +965,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_end = addr;
adjust = next;
adj_start = -(prev->vm_end - addr);
- err = dup_anon_vma(next, prev);
+ err = dup_anon_vma(next, prev, &anon_dup);
} else {
/*
* Note that cases 3 and 8 are the ONLY ones where prev
@@ -968,14 +979,14 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_pgoff = curr->vm_pgoff;
vma_start_write(curr);
remove = curr;
- err = dup_anon_vma(next, curr);
+ err = dup_anon_vma(next, curr, &anon_dup);
}
}
}
/* Error in anon_vma clone. */
if (err)
- return NULL;
+ goto anon_vma_fail;
if (vma_start < vma->vm_start || vma_end > vma->vm_end)
vma_expanded = true;
@@ -988,7 +999,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
}
if (vma_iter_prealloc(vmi, vma))
- return NULL;
+ goto prealloc_fail;
init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
@@ -1016,6 +1027,15 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_complete(&vp, vmi, mm);
khugepaged_enter_vma(res, vm_flags);
return res;
+
+prealloc_fail:
+ if (anon_dup)
+ unlink_anon_vmas(anon_dup);
+
+anon_vma_fail:
+ vma_iter_set(vmi, addr);
+ vma_iter_load(vmi);
+ return NULL;
}
/*
@@ -3143,13 +3163,13 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
if (!len)
return 0;
- if (mmap_write_lock_killable(mm))
- return -EINTR;
-
/* Until we need other flags, refuse anything except VM_EXEC. */
if ((flags & (~VM_EXEC)) != 0)
return -EINVAL;
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+
ret = check_brk_limits(addr, len);
if (ret)
goto limits_failed;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 95546f376302..85741403948f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6475,6 +6475,7 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
next_page = page;
current_buddy = page + size;
}
+ page = next_page;
if (set_page_guard(zone, current_buddy, high, migratetype))
continue;
@@ -6482,7 +6483,6 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
if (current_buddy != target) {
add_to_free_list(current_buddy, zone, high, migratetype);
set_buddy_order(current_buddy, high);
- page = next_page;
}
}
}
diff --git a/mm/readahead.c b/mm/readahead.c
index e815c114de21..6925e6959fd3 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -735,7 +735,8 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
*/
ret = -EINVAL;
if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
- !S_ISREG(file_inode(f.file)->i_mode))
+ (!S_ISREG(file_inode(f.file)->i_mode) &&
+ !S_ISBLK(file_inode(f.file)->i_mode)))
goto out;
ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
diff --git a/mm/shmem.c b/mm/shmem.c
index 69595d341882..6b102965d355 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1112,7 +1112,7 @@ whole_folios:
void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
{
shmem_undo_range(inode, lstart, lend, false);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
inode_inc_iversion(inode);
}
EXPORT_SYMBOL_GPL(shmem_truncate_range);
@@ -1224,7 +1224,7 @@ static int shmem_setattr(struct mnt_idmap *idmap,
if (!error && update_ctime) {
inode_set_ctime_current(inode);
if (update_mtime)
- inode->i_mtime = inode_get_ctime(inode);
+ inode_set_mtime_to_ts(inode, inode_get_ctime(inode));
inode_inc_iversion(inode);
}
return error;
@@ -2455,7 +2455,7 @@ static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
inode->i_ino = ino;
inode_init_owner(idmap, inode, dir, mode);
inode->i_blocks = 0;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_generation = get_random_u32();
info = SHMEM_I(inode);
memset(info, 0, (char *)inode - (char *)info);
@@ -2463,7 +2463,7 @@ static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
atomic_set(&info->stop_eviction, 0);
info->seals = F_SEAL_SEAL;
info->flags = flags & VM_NORESERVE;
- info->i_crtime = inode->i_mtime;
+ info->i_crtime = inode_get_mtime(inode);
info->fsflags = (dir == NULL) ? 0 :
SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
if (info->fsflags)
@@ -3229,7 +3229,7 @@ shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
goto out_iput;
dir->i_size += BOGO_DIRENT_SIZE;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
inode_inc_iversion(dir);
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
@@ -3318,8 +3318,8 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr
}
dir->i_size += BOGO_DIRENT_SIZE;
- dir->i_mtime = inode_set_ctime_to_ts(dir,
- inode_set_ctime_current(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
inode_inc_iversion(dir);
inc_nlink(inode);
ihold(inode); /* New dentry reference */
@@ -3339,8 +3339,8 @@ static int shmem_unlink(struct inode *dir, struct dentry *dentry)
simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
dir->i_size -= BOGO_DIRENT_SIZE;
- dir->i_mtime = inode_set_ctime_to_ts(dir,
- inode_set_ctime_current(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
inode_inc_iversion(dir);
drop_nlink(inode);
dput(dentry); /* Undo the count from "create" - this does all the work */
@@ -3488,7 +3488,7 @@ static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
folio_put(folio);
}
dir->i_size += BOGO_DIRENT_SIZE;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
inode_inc_iversion(dir);
d_instantiate(dentry, inode);
dget(dentry);
@@ -3714,7 +3714,7 @@ static const struct xattr_handler shmem_user_xattr_handler = {
.set = shmem_xattr_handler_set,
};
-static const struct xattr_handler *shmem_xattr_handlers[] = {
+static const struct xattr_handler * const shmem_xattr_handlers[] = {
&shmem_security_xattr_handler,
&shmem_trusted_xattr_handler,
&shmem_user_xattr_handler,
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 8fda308e400d..9bbffe82d65a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -895,10 +895,13 @@ void __init setup_kmalloc_cache_index_table(void)
static unsigned int __kmalloc_minalign(void)
{
+ unsigned int minalign = dma_get_cache_alignment();
+
if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
is_swiotlb_allocated())
- return ARCH_KMALLOC_MINALIGN;
- return dma_get_cache_alignment();
+ minalign = ARCH_KMALLOC_MINALIGN;
+
+ return max(minalign, arch_slab_minalign());
}
void __init
diff --git a/mm/swapfile.c b/mm/swapfile.c
index e52f486834eb..4bc70f459164 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2530,11 +2530,10 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
exit_swap_address_space(p->type);
inode = mapping->host;
- if (S_ISBLK(inode->i_mode)) {
- struct block_device *bdev = I_BDEV(inode);
-
- set_blocksize(bdev, old_block_size);
- blkdev_put(bdev, p);
+ if (p->bdev_handle) {
+ set_blocksize(p->bdev, old_block_size);
+ bdev_release(p->bdev_handle);
+ p->bdev_handle = NULL;
}
inode_lock(inode);
@@ -2764,13 +2763,14 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
int error;
if (S_ISBLK(inode->i_mode)) {
- p->bdev = blkdev_get_by_dev(inode->i_rdev,
+ p->bdev_handle = bdev_open_by_dev(inode->i_rdev,
BLK_OPEN_READ | BLK_OPEN_WRITE, p, NULL);
- if (IS_ERR(p->bdev)) {
- error = PTR_ERR(p->bdev);
- p->bdev = NULL;
+ if (IS_ERR(p->bdev_handle)) {
+ error = PTR_ERR(p->bdev_handle);
+ p->bdev_handle = NULL;
return error;
}
+ p->bdev = p->bdev_handle->bdev;
p->old_block_size = block_size(p->bdev);
error = set_blocksize(p->bdev, PAGE_SIZE);
if (error < 0)
@@ -3206,9 +3206,10 @@ bad_swap:
p->percpu_cluster = NULL;
free_percpu(p->cluster_next_cpu);
p->cluster_next_cpu = NULL;
- if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
+ if (p->bdev_handle) {
set_blocksize(p->bdev, p->old_block_size);
- blkdev_put(p->bdev, p);
+ bdev_release(p->bdev_handle);
+ p->bdev_handle = NULL;
}
inode = NULL;
destroy_swap_extents(p);
diff --git a/mm/zswap.c b/mm/zswap.c
index 083c693602b8..37d2b1cb2ecb 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1383,8 +1383,8 @@ reject:
shrink:
pool = zswap_pool_last_get();
- if (pool)
- queue_work(shrink_wq, &pool->shrink_work);
+ if (pool && !queue_work(shrink_wq, &pool->shrink_work))
+ zswap_pool_put(pool);
goto reject;
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 7a6f20338db8..73470cc3518a 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1627,6 +1627,15 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
return ERR_PTR(-EOPNOTSUPP);
}
+ /* Reject outgoing connection to device with same BD ADDR against
+ * CVE-2020-26555
+ */
+ if (!bacmp(&hdev->bdaddr, dst)) {
+ bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
+ dst);
+ return ERR_PTR(-ECONNREFUSED);
+ }
+
acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
if (!acl) {
acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 31d02b54eea1..1e1c9147356c 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -26,6 +26,8 @@
/* Bluetooth HCI event handling. */
#include <asm/unaligned.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -3268,6 +3270,16 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
+ /* Reject incoming connection from device with same BD ADDR against
+ * CVE-2020-26555
+ */
+ if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
+ bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
+ &ev->bdaddr);
+ hci_reject_conn(hdev, &ev->bdaddr);
+ return;
+ }
+
mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
&flags);
@@ -4742,6 +4754,15 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
if (!conn)
goto unlock;
+ /* Ignore NULL link key against CVE-2020-26555 */
+ if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
+ bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
+ &ev->bdaddr);
+ hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
+ hci_conn_drop(conn);
+ goto unlock;
+ }
+
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
hci_conn_drop(conn);
@@ -5274,8 +5295,8 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
* available, then do not declare that OOB data is
* present.
*/
- if (!memcmp(data->rand256, ZERO_KEY, 16) ||
- !memcmp(data->hash256, ZERO_KEY, 16))
+ if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
+ !crypto_memneq(data->hash256, ZERO_KEY, 16))
return 0x00;
return 0x02;
@@ -5285,8 +5306,8 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
* not supported by the hardware, then check that if
* P-192 data values are present.
*/
- if (!memcmp(data->rand192, ZERO_KEY, 16) ||
- !memcmp(data->hash192, ZERO_KEY, 16))
+ if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
+ !crypto_memneq(data->hash192, ZERO_KEY, 16))
return 0x00;
return 0x01;
@@ -5303,7 +5324,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (!conn)
+ if (!conn || !hci_conn_ssp_enabled(conn))
goto unlock;
hci_conn_hold(conn);
@@ -5550,7 +5571,7 @@ static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (!conn)
+ if (!conn || !hci_conn_ssp_enabled(conn))
goto unlock;
/* Reset the authentication requirement to unknown */
@@ -7021,6 +7042,14 @@ unlock:
hci_dev_unlock(hdev);
}
+static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
+{
+ u8 handle = PTR_UINT(data);
+
+ return hci_le_terminate_big_sync(hdev, handle,
+ HCI_ERROR_LOCAL_HOST_TERM);
+}
+
static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
@@ -7065,16 +7094,17 @@ static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
rcu_read_lock();
}
+ rcu_read_unlock();
+
if (!ev->status && !i)
/* If no BISes have been connected for the BIG,
* terminate. This is in case all bound connections
* have been closed before the BIG creation
* has completed.
*/
- hci_le_terminate_big_sync(hdev, ev->handle,
- HCI_ERROR_LOCAL_HOST_TERM);
+ hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
+ UINT_PTR(ev->handle), NULL);
- rcu_read_unlock();
hci_dev_unlock(hdev);
}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 5e4f718073b7..3e7cd330d731 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -488,7 +488,8 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
ni->type = hdev->dev_type;
ni->bus = hdev->bus;
bacpy(&ni->bdaddr, &hdev->bdaddr);
- memcpy(ni->name, hdev->name, 8);
+ memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
+ strnlen(hdev->name, sizeof(ni->name)), '\0');
opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
break;
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index d06e07a0ea5a..a15ab0b874a9 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -5369,6 +5369,7 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
{
int err = 0;
u16 handle = conn->handle;
+ bool disconnect = false;
struct hci_conn *c;
switch (conn->state) {
@@ -5399,24 +5400,15 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
hci_dev_unlock(hdev);
return 0;
case BT_BOUND:
- hci_dev_lock(hdev);
- hci_conn_failed(conn, reason);
- hci_dev_unlock(hdev);
- return 0;
+ break;
default:
- hci_dev_lock(hdev);
- conn->state = BT_CLOSED;
- hci_disconn_cfm(conn, reason);
- hci_conn_del(conn);
- hci_dev_unlock(hdev);
- return 0;
+ disconnect = true;
+ break;
}
hci_dev_lock(hdev);
- /* Check if the connection hasn't been cleanup while waiting
- * commands to complete.
- */
+ /* Check if the connection has been cleaned up concurrently */
c = hci_conn_hash_lookup_handle(hdev, handle);
if (!c || c != conn) {
err = 0;
@@ -5428,7 +5420,13 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
* or in case of LE it was still scanning so it can be cleanup
* safely.
*/
- hci_conn_failed(conn, reason);
+ if (disconnect) {
+ conn->state = BT_CLOSED;
+ hci_disconn_cfm(conn, reason);
+ hci_conn_del(conn);
+ } else {
+ hci_conn_failed(conn, reason);
+ }
unlock:
hci_dev_unlock(hdev);
diff --git a/net/can/isotp.c b/net/can/isotp.c
index f02b5d3e4733..d1c6f206f429 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -948,21 +948,18 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (!so->bound || so->tx.state == ISOTP_SHUTDOWN)
return -EADDRNOTAVAIL;
-wait_free_buffer:
- /* we do not support multiple buffers - for now */
- if (wq_has_sleeper(&so->wait) && (msg->msg_flags & MSG_DONTWAIT))
- return -EAGAIN;
+ while (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
+ /* we do not support multiple buffers - for now */
+ if (msg->msg_flags & MSG_DONTWAIT)
+ return -EAGAIN;
- /* wait for complete transmission of current pdu */
- err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
- if (err)
- goto err_event_drop;
-
- if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
if (so->tx.state == ISOTP_SHUTDOWN)
return -EADDRNOTAVAIL;
- goto wait_free_buffer;
+ /* wait for complete transmission of current pdu */
+ err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+ if (err)
+ goto err_event_drop;
}
/* PDU size > default => try max_pdu_size */
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 10a41cd9c523..3c8b78d9c4d1 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -459,8 +459,8 @@ int ceph_tcp_connect(struct ceph_connection *con)
set_sock_callbacks(sock, con);
con_sock_state_connecting(con);
- ret = sock->ops->connect(sock, (struct sockaddr *)&ss, sizeof(ss),
- O_NONBLOCK);
+ ret = kernel_connect(sock, (struct sockaddr *)&ss, sizeof(ss),
+ O_NONBLOCK);
if (ret == -EINPROGRESS) {
dout("connect %s EINPROGRESS sk_state = %u\n",
ceph_pr_addr(&con->peer_addr),
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 176eb5834746..103d46fa0eeb 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -50,7 +50,7 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
-#include <linux/uio.h>
+#include <linux/iov_iter.h>
#include <linux/indirect_call_wrapper.h>
#include <net/protocol.h>
@@ -61,6 +61,7 @@
#include <net/tcp_states.h>
#include <trace/events/skb.h>
#include <net/busy_poll.h>
+#include <crypto/hash.h>
/*
* Is a socket 'connection oriented' ?
@@ -489,6 +490,24 @@ short_copy:
return 0;
}
+static size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
+ struct iov_iter *i)
+{
+#ifdef CONFIG_CRYPTO_HASH
+ struct ahash_request *hash = hashp;
+ struct scatterlist sg;
+ size_t copied;
+
+ copied = copy_to_iter(addr, bytes, i);
+ sg_init_one(&sg, addr, copied);
+ ahash_request_set_crypt(hash, &sg, NULL, copied);
+ crypto_ahash_update(hash);
+ return copied;
+#else
+ return 0;
+#endif
+}
+
/**
* skb_copy_and_hash_datagram_iter - Copy datagram to an iovec iterator
* and update a hash.
@@ -716,6 +735,60 @@ int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
}
EXPORT_SYMBOL(zerocopy_sg_from_iter);
+static __always_inline
+size_t copy_to_user_iter_csum(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ __wsum next, *csum = priv2;
+
+ next = csum_and_copy_to_user(from + progress, iter_to, len);
+ *csum = csum_block_add(*csum, next, progress);
+ return next ? 0 : len;
+}
+
+static __always_inline
+size_t memcpy_to_iter_csum(void *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ __wsum *csum = priv2;
+ __wsum next = csum_partial_copy_nocheck(from, iter_to, len);
+
+ *csum = csum_block_add(*csum, next, progress);
+ return 0;
+}
+
+struct csum_state {
+ __wsum csum;
+ size_t off;
+};
+
+static size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
+ struct iov_iter *i)
+{
+ struct csum_state *csstate = _csstate;
+ __wsum sum;
+
+ if (WARN_ON_ONCE(i->data_source))
+ return 0;
+ if (unlikely(iov_iter_is_discard(i))) {
+ // can't use csum_memcpy() for that one - data is not copied
+ csstate->csum = csum_block_add(csstate->csum,
+ csum_partial(addr, bytes, 0),
+ csstate->off);
+ csstate->off += bytes;
+ return bytes;
+ }
+
+ sum = csum_shift(csstate->csum, csstate->off);
+
+ bytes = iterate_and_advance2(i, bytes, (void *)addr, &sum,
+ copy_to_user_iter_csum,
+ memcpy_to_iter_csum);
+ csstate->csum = csum_shift(sum, csstate->off);
+ csstate->off += bytes;
+ return bytes;
+}
+
/**
* skb_copy_and_csum_datagram - Copy datagram to an iovec iterator
* and update a checksum.
diff --git a/net/core/dev.c b/net/core/dev.c
index 85df22f05c38..9f3f8930c691 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -345,7 +345,6 @@ int netdev_name_node_alt_create(struct net_device *dev, const char *name)
static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
{
list_del(&name_node->list);
- netdev_name_node_del(name_node);
kfree(name_node->name);
netdev_name_node_free(name_node);
}
@@ -364,6 +363,8 @@ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
if (name_node == dev->name_node || name_node->dev != dev)
return -EINVAL;
+ netdev_name_node_del(name_node);
+ synchronize_rcu();
__netdev_name_node_alt_destroy(name_node);
return 0;
@@ -380,6 +381,7 @@ static void netdev_name_node_alt_flush(struct net_device *dev)
/* Device list insertion */
static void list_netdevice(struct net_device *dev)
{
+ struct netdev_name_node *name_node;
struct net *net = dev_net(dev);
ASSERT_RTNL();
@@ -390,6 +392,10 @@ static void list_netdevice(struct net_device *dev)
hlist_add_head_rcu(&dev->index_hlist,
dev_index_hash(net, dev->ifindex));
write_unlock(&dev_base_lock);
+
+ netdev_for_each_altname(dev, name_node)
+ netdev_name_node_add(net, name_node);
+
/* We reserved the ifindex, this can't fail */
WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL));
@@ -401,12 +407,16 @@ static void list_netdevice(struct net_device *dev)
*/
static void unlist_netdevice(struct net_device *dev, bool lock)
{
+ struct netdev_name_node *name_node;
struct net *net = dev_net(dev);
ASSERT_RTNL();
xa_erase(&net->dev_by_index, dev->ifindex);
+ netdev_for_each_altname(dev, name_node)
+ netdev_name_node_del(name_node);
+
/* Unlink dev from the device chain */
if (lock)
write_lock(&dev_base_lock);
@@ -1086,7 +1096,8 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
for_each_netdev(net, d) {
struct netdev_name_node *name_node;
- list_for_each_entry(name_node, &d->name_node->list, list) {
+
+ netdev_for_each_altname(d, name_node) {
if (!sscanf(name_node->name, name, &i))
continue;
if (i < 0 || i >= max_netdevices)
@@ -1123,6 +1134,26 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
return -ENFILE;
}
+static int dev_prep_valid_name(struct net *net, struct net_device *dev,
+ const char *want_name, char *out_name)
+{
+ int ret;
+
+ if (!dev_valid_name(want_name))
+ return -EINVAL;
+
+ if (strchr(want_name, '%')) {
+ ret = __dev_alloc_name(net, want_name, out_name);
+ return ret < 0 ? ret : 0;
+ } else if (netdev_name_in_use(net, want_name)) {
+ return -EEXIST;
+ } else if (out_name != want_name) {
+ strscpy(out_name, want_name, IFNAMSIZ);
+ }
+
+ return 0;
+}
+
static int dev_alloc_name_ns(struct net *net,
struct net_device *dev,
const char *name)
@@ -1160,19 +1191,13 @@ EXPORT_SYMBOL(dev_alloc_name);
static int dev_get_valid_name(struct net *net, struct net_device *dev,
const char *name)
{
- BUG_ON(!net);
-
- if (!dev_valid_name(name))
- return -EINVAL;
-
- if (strchr(name, '%'))
- return dev_alloc_name_ns(net, dev, name);
- else if (netdev_name_in_use(net, name))
- return -EEXIST;
- else if (dev->name != name)
- strscpy(dev->name, name, IFNAMSIZ);
+ char buf[IFNAMSIZ];
+ int ret;
- return 0;
+ ret = dev_prep_valid_name(net, dev, name, buf);
+ if (ret >= 0)
+ strscpy(dev->name, buf, IFNAMSIZ);
+ return ret;
}
/**
@@ -3292,15 +3317,19 @@ int skb_checksum_help(struct sk_buff *skb)
offset = skb_checksum_start_offset(skb);
ret = -EINVAL;
- if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
+ if (unlikely(offset >= skb_headlen(skb))) {
DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
+ WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n",
+ offset, skb_headlen(skb));
goto out;
}
csum = skb_checksum(skb, offset, skb->len - offset, 0);
offset += skb->csum_offset;
- if (WARN_ON_ONCE(offset + sizeof(__sum16) > skb_headlen(skb))) {
+ if (unlikely(offset + sizeof(__sum16) > skb_headlen(skb))) {
DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
+ WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n",
+ offset + sizeof(__sum16), skb_headlen(skb));
goto out;
}
ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
@@ -11033,7 +11062,9 @@ EXPORT_SYMBOL(unregister_netdev);
int __dev_change_net_namespace(struct net_device *dev, struct net *net,
const char *pat, int new_ifindex)
{
+ struct netdev_name_node *name_node;
struct net *net_old = dev_net(dev);
+ char new_name[IFNAMSIZ] = {};
int err, new_nsid;
ASSERT_RTNL();
@@ -11060,10 +11091,15 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
/* We get here if we can't use the current device name */
if (!pat)
goto out;
- err = dev_get_valid_name(net, dev, pat);
+ err = dev_prep_valid_name(net, dev, pat, new_name);
if (err < 0)
goto out;
}
+ /* Check that none of the altnames conflicts. */
+ err = -EEXIST;
+ netdev_for_each_altname(dev, name_node)
+ if (netdev_name_in_use(net, name_node->name))
+ goto out;
/* Check that new_ifindex isn't used yet. */
if (new_ifindex) {
@@ -11131,6 +11167,9 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
netdev_adjacent_add_links(dev);
+ if (new_name[0]) /* Rename the netdev to prepared name */
+ strscpy(dev->name, new_name, IFNAMSIZ);
+
/* Fixup kobjects */
err = device_rename(&dev->dev, dev->name);
WARN_ON(err);
diff --git a/net/core/dev.h b/net/core/dev.h
index e075e198092c..fa2e9c5c4122 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -62,6 +62,9 @@ struct netdev_name_node {
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_change_name(struct net_device *dev, const char *newname);
+#define netdev_for_each_altname(dev, namenode) \
+ list_for_each_entry((namenode), &(dev)->name_node->list, list)
+
int netdev_name_node_alt_create(struct net_device *dev, const char *name);
int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 9c09f091cbff..df81c1f0a570 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -251,7 +251,8 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
static int neigh_forced_gc(struct neigh_table *tbl)
{
- int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
+ int max_clean = atomic_read(&tbl->gc_entries) -
+ READ_ONCE(tbl->gc_thresh2);
unsigned long tref = jiffies - 5 * HZ;
struct neighbour *n, *tmp;
int shrunk = 0;
@@ -280,7 +281,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
}
}
- tbl->last_flush = jiffies;
+ WRITE_ONCE(tbl->last_flush, jiffies);
write_unlock_bh(&tbl->lock);
@@ -464,17 +465,17 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl,
{
struct neighbour *n = NULL;
unsigned long now = jiffies;
- int entries;
+ int entries, gc_thresh3;
if (exempt_from_gc)
goto do_alloc;
entries = atomic_inc_return(&tbl->gc_entries) - 1;
- if (entries >= tbl->gc_thresh3 ||
- (entries >= tbl->gc_thresh2 &&
- time_after(now, tbl->last_flush + 5 * HZ))) {
- if (!neigh_forced_gc(tbl) &&
- entries >= tbl->gc_thresh3) {
+ gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
+ if (entries >= gc_thresh3 ||
+ (entries >= READ_ONCE(tbl->gc_thresh2) &&
+ time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
+ if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
net_info_ratelimited("%s: neighbor table overflow!\n",
tbl->id);
NEIGH_CACHE_STAT_INC(tbl, table_fulls);
@@ -955,13 +956,14 @@ static void neigh_periodic_work(struct work_struct *work)
if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
struct neigh_parms *p;
- tbl->last_rand = jiffies;
+
+ WRITE_ONCE(tbl->last_rand, jiffies);
list_for_each_entry(p, &tbl->parms_list, list)
p->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
}
- if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
+ if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
goto out;
for (i = 0 ; i < (1 << nht->hash_shift); i++) {
@@ -2167,15 +2169,16 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
ndtmsg->ndtm_pad2 = 0;
if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
- nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
- nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
- nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
- nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
+ nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
+ NDTA_PAD) ||
+ nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
+ nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
+ nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
goto nla_put_failure;
{
unsigned long now = jiffies;
- long flush_delta = now - tbl->last_flush;
- long rand_delta = now - tbl->last_rand;
+ long flush_delta = now - READ_ONCE(tbl->last_flush);
+ long rand_delta = now - READ_ONCE(tbl->last_rand);
struct neigh_hash_table *nht;
struct ndt_config ndc = {
.ndtc_key_len = tbl->key_len,
@@ -2183,7 +2186,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
.ndtc_entries = atomic_read(&tbl->entries),
.ndtc_last_flush = jiffies_to_msecs(flush_delta),
.ndtc_last_rand = jiffies_to_msecs(rand_delta),
- .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
+ .ndtc_proxy_qlen = READ_ONCE(tbl->proxy_queue.qlen),
};
rcu_read_lock();
@@ -2206,17 +2209,17 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
struct neigh_statistics *st;
st = per_cpu_ptr(tbl->stats, cpu);
- ndst.ndts_allocs += st->allocs;
- ndst.ndts_destroys += st->destroys;
- ndst.ndts_hash_grows += st->hash_grows;
- ndst.ndts_res_failed += st->res_failed;
- ndst.ndts_lookups += st->lookups;
- ndst.ndts_hits += st->hits;
- ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
- ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
- ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
- ndst.ndts_forced_gc_runs += st->forced_gc_runs;
- ndst.ndts_table_fulls += st->table_fulls;
+ ndst.ndts_allocs += READ_ONCE(st->allocs);
+ ndst.ndts_destroys += READ_ONCE(st->destroys);
+ ndst.ndts_hash_grows += READ_ONCE(st->hash_grows);
+ ndst.ndts_res_failed += READ_ONCE(st->res_failed);
+ ndst.ndts_lookups += READ_ONCE(st->lookups);
+ ndst.ndts_hits += READ_ONCE(st->hits);
+ ndst.ndts_rcv_probes_mcast += READ_ONCE(st->rcv_probes_mcast);
+ ndst.ndts_rcv_probes_ucast += READ_ONCE(st->rcv_probes_ucast);
+ ndst.ndts_periodic_gc_runs += READ_ONCE(st->periodic_gc_runs);
+ ndst.ndts_forced_gc_runs += READ_ONCE(st->forced_gc_runs);
+ ndst.ndts_table_fulls += READ_ONCE(st->table_fulls);
}
if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
@@ -2445,16 +2448,16 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
goto errout_tbl_lock;
if (tb[NDTA_THRESH1])
- tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
+ WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
if (tb[NDTA_THRESH2])
- tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
+ WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
if (tb[NDTA_THRESH3])
- tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
+ WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
if (tb[NDTA_GC_INTERVAL])
- tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
+ WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
err = 0;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index f56b8d697014..4d1696677c48 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -669,19 +669,19 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
seq_puts(seq, " Flags: ");
for (i = 0; i < NR_PKT_FLAGS; i++) {
- if (i == F_FLOW_SEQ)
+ if (i == FLOW_SEQ_SHIFT)
if (!pkt_dev->cflows)
continue;
- if (pkt_dev->flags & (1 << i))
+ if (pkt_dev->flags & (1 << i)) {
seq_printf(seq, "%s ", pkt_flag_names[i]);
- else if (i == F_FLOW_SEQ)
- seq_puts(seq, "FLOW_RND ");
-
#ifdef CONFIG_XFRM
- if (i == F_IPSEC && pkt_dev->spi)
- seq_printf(seq, "spi:%u", pkt_dev->spi);
+ if (i == IPSEC_SHIFT && pkt_dev->spi)
+ seq_printf(seq, "spi:%u ", pkt_dev->spi);
#endif
+ } else if (i == FLOW_SEQ_SHIFT) {
+ seq_puts(seq, "FLOW_RND ");
+ }
}
seq_puts(seq, "\n");
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 4a2ec33bfb51..53c377d054f0 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -5503,13 +5503,11 @@ static unsigned int
rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
enum netdev_offload_xstats_type type)
{
- bool enabled = netdev_offload_xstats_enabled(dev, type);
-
return nla_total_size(0) +
/* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
nla_total_size(sizeof(u8)) +
/* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
- (enabled ? nla_total_size(sizeof(u8)) : 0) +
+ nla_total_size(sizeof(u8)) +
0;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4eaf7ed0d1f4..2bfa6a7ba244 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -62,6 +62,7 @@
#include <linux/if_vlan.h>
#include <linux/mpls.h>
#include <linux/kcov.h>
+#include <linux/iov_iter.h>
#include <net/protocol.h>
#include <net/dst.h>
@@ -6931,3 +6932,42 @@ out:
return spliced ?: ret;
}
EXPORT_SYMBOL(skb_splice_from_iter);
+
+static __always_inline
+size_t memcpy_from_iter_csum(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ __wsum *csum = priv2;
+ __wsum next = csum_partial_copy_nocheck(iter_from, to + progress, len);
+
+ *csum = csum_block_add(*csum, next, progress);
+ return 0;
+}
+
+static __always_inline
+size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ __wsum next, *csum = priv2;
+
+ next = csum_and_copy_from_user(iter_from, to + progress, len);
+ *csum = csum_block_add(*csum, next, progress);
+ return next ? 0 : len;
+}
+
+bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
+ __wsum *csum, struct iov_iter *i)
+{
+ size_t copied;
+
+ if (WARN_ON_ONCE(!i->data_source))
+ return false;
+ copied = iterate_and_advance2(i, bytes, addr, csum,
+ copy_from_user_iter_csum,
+ memcpy_from_iter_csum);
+ if (likely(copied == bytes))
+ return true;
+ iov_iter_revert(i, copied);
+ return false;
+}
+EXPORT_SYMBOL(csum_and_copy_from_iter_full);
diff --git a/net/core/stream.c b/net/core/stream.c
index f5c4e47df165..96fbcb9bbb30 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -117,7 +117,7 @@ EXPORT_SYMBOL(sk_stream_wait_close);
*/
int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
{
- int err = 0;
+ int ret, err = 0;
long vm_wait = 0;
long current_timeo = *timeo_p;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -142,11 +142,13 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk->sk_write_pending++;
- sk_wait_event(sk, &current_timeo, READ_ONCE(sk->sk_err) ||
- (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) ||
- (sk_stream_memory_free(sk) &&
- !vm_wait), &wait);
+ ret = sk_wait_event(sk, &current_timeo, READ_ONCE(sk->sk_err) ||
+ (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) ||
+ (sk_stream_memory_free(sk) && !vm_wait),
+ &wait);
sk->sk_write_pending--;
+ if (ret < 0)
+ goto do_error;
if (vm_wait) {
vm_wait -= current_timeo;
diff --git a/net/devlink/health.c b/net/devlink/health.c
index 638cad8d5c65..51e6e81e31bb 100644
--- a/net/devlink/health.c
+++ b/net/devlink/health.c
@@ -58,7 +58,6 @@ struct devlink_health_reporter {
struct devlink *devlink;
struct devlink_port *devlink_port;
struct devlink_fmsg *dump_fmsg;
- struct mutex dump_lock; /* lock parallel read/write from dump buffers */
u64 graceful_period;
bool auto_recover;
bool auto_dump;
@@ -125,7 +124,6 @@ __devlink_health_reporter_create(struct devlink *devlink,
reporter->graceful_period = graceful_period;
reporter->auto_recover = !!ops->recover;
reporter->auto_dump = !!ops->dump;
- mutex_init(&reporter->dump_lock);
return reporter;
}
@@ -226,7 +224,6 @@ EXPORT_SYMBOL_GPL(devlink_health_reporter_create);
static void
devlink_health_reporter_free(struct devlink_health_reporter *reporter)
{
- mutex_destroy(&reporter->dump_lock);
if (reporter->dump_fmsg)
devlink_fmsg_free(reporter->dump_fmsg);
kfree(reporter);
@@ -625,10 +622,10 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
}
if (reporter->auto_dump) {
- mutex_lock(&reporter->dump_lock);
+ devl_lock(devlink);
/* store current dump of current error, for later analysis */
devlink_health_do_dump(reporter, priv_ctx, NULL);
- mutex_unlock(&reporter->dump_lock);
+ devl_unlock(devlink);
}
if (!reporter->auto_recover)
@@ -1262,7 +1259,7 @@ out:
}
static struct devlink_health_reporter *
-devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
+devlink_health_reporter_get_from_cb_lock(struct netlink_callback *cb)
{
const struct genl_info *info = genl_info_dump(cb);
struct devlink_health_reporter *reporter;
@@ -1272,10 +1269,12 @@ devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
devlink = devlink_get_from_attrs_lock(sock_net(cb->skb->sk), attrs);
if (IS_ERR(devlink))
return NULL;
- devl_unlock(devlink);
reporter = devlink_health_reporter_get_from_attrs(devlink, attrs);
- devlink_put(devlink);
+ if (!reporter) {
+ devl_unlock(devlink);
+ devlink_put(devlink);
+ }
return reporter;
}
@@ -1284,16 +1283,20 @@ int devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
{
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_health_reporter *reporter;
+ struct devlink *devlink;
int err;
- reporter = devlink_health_reporter_get_from_cb(cb);
+ reporter = devlink_health_reporter_get_from_cb_lock(cb);
if (!reporter)
return -EINVAL;
- if (!reporter->ops->dump)
+ devlink = reporter->devlink;
+ if (!reporter->ops->dump) {
+ devl_unlock(devlink);
+ devlink_put(devlink);
return -EOPNOTSUPP;
+ }
- mutex_lock(&reporter->dump_lock);
if (!state->idx) {
err = devlink_health_do_dump(reporter, NULL, cb->extack);
if (err)
@@ -1309,7 +1312,8 @@ int devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
err = devlink_fmsg_dumpit(reporter->dump_fmsg, skb, cb,
DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET);
unlock:
- mutex_unlock(&reporter->dump_lock);
+ devl_unlock(devlink);
+ devlink_put(devlink);
return err;
}
@@ -1326,9 +1330,7 @@ int devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
if (!reporter->ops->dump)
return -EOPNOTSUPP;
- mutex_lock(&reporter->dump_lock);
devlink_health_dump_clear(reporter);
- mutex_unlock(&reporter->dump_lock);
return 0;
}
diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c
index d0bc1dd8e65a..80c7302692c7 100644
--- a/net/handshake/netlink.c
+++ b/net/handshake/netlink.c
@@ -87,29 +87,6 @@ struct nlmsghdr *handshake_genl_put(struct sk_buff *msg,
}
EXPORT_SYMBOL(handshake_genl_put);
-/*
- * dup() a kernel socket for use as a user space file descriptor
- * in the current process. The kernel socket must have an
- * instatiated struct file.
- *
- * Implicit argument: "current()"
- */
-static int handshake_dup(struct socket *sock)
-{
- struct file *file;
- int newfd;
-
- file = get_file(sock->file);
- newfd = get_unused_fd_flags(O_CLOEXEC);
- if (newfd < 0) {
- fput(file);
- return newfd;
- }
-
- fd_install(newfd, file);
- return newfd;
-}
-
int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = sock_net(skb->sk);
@@ -133,17 +110,20 @@ int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info)
goto out_status;
sock = req->hr_sk->sk_socket;
- fd = handshake_dup(sock);
+ fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0) {
err = fd;
goto out_complete;
}
+
err = req->hr_proto->hp_accept(req, info, fd);
if (err) {
- fput(sock->file);
+ put_unused_fd(fd);
goto out_complete;
}
+ fd_install(fd, get_file(sock->file));
+
trace_handshake_cmd_accept(net, req, req->hr_sk, fd);
return 0;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 3d2e30e20473..2713c9b06c4c 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -597,7 +597,6 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
add_wait_queue(sk_sleep(sk), &wait);
sk->sk_write_pending += writebias;
- sk->sk_wait_pending++;
/* Basic assumption: if someone sets sk->sk_err, he _must_
* change state of the socket from TCP_SYN_*.
@@ -613,7 +612,6 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
}
remove_wait_queue(sk_sleep(sk), &wait);
sk->sk_write_pending -= writebias;
- sk->sk_wait_pending--;
return timeo;
}
@@ -642,6 +640,7 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
return -EINVAL;
if (uaddr->sa_family == AF_UNSPEC) {
+ sk->sk_disconnects++;
err = sk->sk_prot->disconnect(sk, flags);
sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
goto out;
@@ -696,6 +695,7 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
tcp_sk(sk)->fastopen_req &&
tcp_sk(sk)->fastopen_req->data ? 1 : 0;
+ int dis = sk->sk_disconnects;
/* Error code is set above */
if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
@@ -704,6 +704,11 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
err = sock_intr_errno(timeo);
if (signal_pending(current))
goto out;
+
+ if (dis != sk->sk_disconnects) {
+ err = -EPIPE;
+ goto out;
+ }
}
/* Connection was closed by RST, timeout, ICMP error
@@ -725,6 +730,7 @@ out:
sock_error:
err = sock_error(sk) ? : -ECONNABORTED;
sock->state = SS_UNCONNECTED;
+ sk->sk_disconnects++;
if (sk->sk_prot->disconnect(sk, flags))
sock->state = SS_DISCONNECTING;
goto out;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 2be2d4922557..4ccfc104f13a 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -732,7 +732,9 @@ static inline int esp_remove_trailer(struct sk_buff *skb)
skb->csum = csum_block_sub(skb->csum, csumdiff,
skb->len - trimlen);
}
- pskb_trim(skb, skb->len - trimlen);
+ ret = pskb_trim(skb, skb->len - trimlen);
+ if (unlikely(ret))
+ return ret;
ret = nexthdr[1];
@@ -784,7 +786,7 @@ int esp_input_done2(struct sk_buff *skb, int err)
/*
* 1) if the NAT-T peer's IP or port changed then
- * advertize the change to the keying daemon.
+ * advertise the change to the keying daemon.
* This is an inbound SA, so just compare
* SRC ports.
*/
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 1ea82bc33ef1..5eb1b8d302bb 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1325,15 +1325,18 @@ __be32 fib_info_update_nhc_saddr(struct net *net, struct fib_nh_common *nhc,
unsigned char scope)
{
struct fib_nh *nh;
+ __be32 saddr;
if (nhc->nhc_family != AF_INET)
return inet_select_addr(nhc->nhc_dev, 0, scope);
nh = container_of(nhc, struct fib_nh, nh_common);
- nh->nh_saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope);
- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
+ saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope);
- return nh->nh_saddr;
+ WRITE_ONCE(nh->nh_saddr, saddr);
+ WRITE_ONCE(nh->nh_saddr_genid, atomic_read(&net->ipv4.dev_addr_genid));
+
+ return saddr;
}
__be32 fib_result_prefsrc(struct net *net, struct fib_result *res)
@@ -1347,8 +1350,9 @@ __be32 fib_result_prefsrc(struct net *net, struct fib_result *res)
struct fib_nh *nh;
nh = container_of(nhc, struct fib_nh, nh_common);
- if (nh->nh_saddr_genid == atomic_read(&net->ipv4.dev_addr_genid))
- return nh->nh_saddr;
+ if (READ_ONCE(nh->nh_saddr_genid) ==
+ atomic_read(&net->ipv4.dev_addr_genid))
+ return READ_ONCE(nh->nh_saddr);
}
return fib_info_update_nhc_saddr(net, nhc, res->fi->fib_scope);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index aeebe8816689..394a498c2823 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -1145,7 +1145,6 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
if (newsk) {
struct inet_connection_sock *newicsk = inet_csk(newsk);
- newsk->sk_wait_pending = 0;
inet_sk_set_state(newsk, TCP_SYN_RECV);
newicsk->icsk_bind_hash = NULL;
newicsk->icsk_bind2_hash = NULL;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index c32f5e28758b..598c1b114d2c 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -149,8 +149,14 @@ static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2,
const struct sock *sk)
{
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family != tb2->family)
- return false;
+ if (sk->sk_family != tb2->family) {
+ if (sk->sk_family == AF_INET)
+ return ipv6_addr_v4mapped(&tb2->v6_rcv_saddr) &&
+ tb2->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
+
+ return ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr) &&
+ sk->sk_v6_rcv_saddr.s6_addr32[3] == tb2->rcv_saddr;
+ }
if (sk->sk_family == AF_INET6)
return ipv6_addr_equal(&tb2->v6_rcv_saddr,
@@ -819,19 +825,7 @@ static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb,
tb->l3mdev != l3mdev)
return false;
-#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family != tb->family) {
- if (sk->sk_family == AF_INET)
- return ipv6_addr_v4mapped(&tb->v6_rcv_saddr) &&
- tb->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
-
- return false;
- }
-
- if (sk->sk_family == AF_INET6)
- return ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
-#endif
- return tb->rcv_saddr == sk->sk_rcv_saddr;
+ return inet_bind2_bucket_addr_match(tb, sk);
}
bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3f66cdeef7de..3d3a24f79573 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -831,7 +831,9 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
*/
if (!skb_queue_empty(&sk->sk_receive_queue))
break;
- sk_wait_data(sk, &timeo, NULL);
+ ret = sk_wait_data(sk, &timeo, NULL);
+ if (ret < 0)
+ break;
if (signal_pending(current)) {
ret = sock_intr_errno(timeo);
break;
@@ -925,10 +927,11 @@ int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
return mss_now;
}
-/* In some cases, both sendmsg() could have added an skb to the write queue,
- * but failed adding payload on it. We need to remove it to consume less
+/* In some cases, sendmsg() could have added an skb to the write queue,
+ * but failed adding payload on it. We need to remove it to consume less
* memory, but more importantly be able to generate EPOLLOUT for Edge Trigger
- * epoll() users.
+ * epoll() users. Another reason is that tcp_write_xmit() does not like
+ * finding an empty skb in the write queue.
*/
void tcp_remove_empty_skb(struct sock *sk)
{
@@ -1287,6 +1290,7 @@ new_segment:
wait_for_space:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ tcp_remove_empty_skb(sk);
if (copied)
tcp_push(sk, flags & ~MSG_MORE, mss_now,
TCP_NAGLE_PUSH, size_goal);
@@ -2442,7 +2446,11 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
__sk_flush_backlog(sk);
} else {
tcp_cleanup_rbuf(sk, copied);
- sk_wait_data(sk, &timeo, last);
+ err = sk_wait_data(sk, &timeo, last);
+ if (err < 0) {
+ err = copied ? : err;
+ goto out;
+ }
}
if ((flags & MSG_PEEK) &&
@@ -2966,12 +2974,6 @@ int tcp_disconnect(struct sock *sk, int flags)
int old_state = sk->sk_state;
u32 seq;
- /* Deny disconnect if other threads are blocked in sk_wait_event()
- * or inet_wait_for_connect().
- */
- if (sk->sk_wait_pending)
- return -EBUSY;
-
if (old_state != TCP_CLOSE)
tcp_set_state(sk, TCP_CLOSE);
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 327268203001..53b0d62fd2c2 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -307,6 +307,10 @@ msg_bytes_ready:
}
data = tcp_msg_wait_data(sk, psock, timeo);
+ if (data < 0) {
+ copied = data;
+ goto unlock;
+ }
if (data && !sk_psock_queue_empty(psock))
goto msg_bytes_ready;
copied = -EAGAIN;
@@ -317,6 +321,8 @@ out:
tcp_rcv_space_adjust(sk);
if (copied > 0)
__tcp_cleanup_rbuf(sk, copied);
+
+unlock:
release_sock(sk);
sk_psock_put(sk, psock);
return copied;
@@ -351,6 +357,10 @@ msg_bytes_ready:
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
data = tcp_msg_wait_data(sk, psock, timeo);
+ if (data < 0) {
+ ret = data;
+ goto unlock;
+ }
if (data) {
if (!sk_psock_queue_empty(psock))
goto msg_bytes_ready;
@@ -361,6 +371,8 @@ msg_bytes_ready:
copied = -EAGAIN;
}
ret = copied;
+
+unlock:
release_sock(sk);
sk_psock_put(sk, psock);
return ret;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8afb0950a697..804821d6bd4d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2207,16 +2207,17 @@ void tcp_enter_loss(struct sock *sk)
* restore sanity to the SACK scoreboard. If the apparent reneging
* persists until this RTO then we'll clear the SACK scoreboard.
*/
-static bool tcp_check_sack_reneging(struct sock *sk, int flag)
+static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag)
{
- if (flag & FLAG_SACK_RENEGING &&
- flag & FLAG_SND_UNA_ADVANCED) {
+ if (*ack_flag & FLAG_SACK_RENEGING &&
+ *ack_flag & FLAG_SND_UNA_ADVANCED) {
struct tcp_sock *tp = tcp_sk(sk);
unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
msecs_to_jiffies(10));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
delay, TCP_RTO_MAX);
+ *ack_flag &= ~FLAG_SET_XMIT_TIMER;
return true;
}
return false;
@@ -2986,7 +2987,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
tp->prior_ssthresh = 0;
/* B. In all the states check for reneging SACKs. */
- if (tcp_check_sack_reneging(sk, flag))
+ if (tcp_check_sack_reneging(sk, ack_flag))
return;
/* C. Check consistency of the current state. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 27140e5cdc06..4167e8a48b60 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1869,6 +1869,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
#ifdef CONFIG_TLS_DEVICE
tail->decrypted != skb->decrypted ||
#endif
+ !mptcp_skb_can_collapse(tail, skb) ||
thtail->doff != th->doff ||
memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
goto no_coalesce;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index aa0fc8c766e5..f0723460753c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2456,6 +2456,7 @@ static int tcp_mtu_probe(struct sock *sk)
/* build the payload, and be prepared to abort if this fails. */
if (tcp_clone_payload(sk, nskb, probe_size)) {
+ tcp_skb_tsorted_anchor_cleanup(nskb);
consume_skb(nskb);
return -1;
}
@@ -2541,6 +2542,18 @@ static bool tcp_pacing_check(struct sock *sk)
return true;
}
+static bool tcp_rtx_queue_empty_or_single_skb(const struct sock *sk)
+{
+ const struct rb_node *node = sk->tcp_rtx_queue.rb_node;
+
+ /* No skb in the rtx queue. */
+ if (!node)
+ return true;
+
+ /* Only one skb in rtx queue. */
+ return !node->rb_left && !node->rb_right;
+}
+
/* TCP Small Queues :
* Control number of packets in qdisc/devices to two packets / or ~1 ms.
* (These limits are doubled for retransmits)
@@ -2578,12 +2591,12 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
limit += extra_bytes;
}
if (refcount_read(&sk->sk_wmem_alloc) > limit) {
- /* Always send skb if rtx queue is empty.
+ /* Always send skb if rtx queue is empty or has one skb.
* No need to wait for TX completion to call us back,
* after softirq/tasklet schedule.
* This helps when TX completions are delayed too much.
*/
- if (tcp_rtx_queue_empty(sk))
+ if (tcp_rtx_queue_empty_or_single_skb(sk))
return false;
set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
@@ -2787,7 +2800,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- u32 timeout, rto_delta_us;
+ u32 timeout, timeout_us, rto_delta_us;
int early_retrans;
/* Don't do any loss probe on a Fast Open connection before 3WHS
@@ -2811,11 +2824,12 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
* sample is available then probe after TCP_TIMEOUT_INIT.
*/
if (tp->srtt_us) {
- timeout = usecs_to_jiffies(tp->srtt_us >> 2);
+ timeout_us = tp->srtt_us >> 2;
if (tp->packets_out == 1)
- timeout += TCP_RTO_MIN;
+ timeout_us += tcp_rto_min_us(sk);
else
- timeout += TCP_TIMEOUT_MIN;
+ timeout_us += TCP_TIMEOUT_MIN_US;
+ timeout = usecs_to_jiffies(timeout_us);
} else {
timeout = TCP_TIMEOUT_INIT;
}
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index acf4869c5d3b..bba10110fbbc 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -104,7 +104,7 @@ bool tcp_rack_mark_lost(struct sock *sk)
tp->rack.advanced = 0;
tcp_rack_detect_loss(sk, &timeout);
if (timeout) {
- timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
+ timeout = usecs_to_jiffies(timeout + TCP_TIMEOUT_MIN_US);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
timeout, inet_csk(sk)->icsk_rto);
}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index fddd0cbdede1..2cc1a45742d8 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -770,7 +770,9 @@ static inline int esp_remove_trailer(struct sk_buff *skb)
skb->csum = csum_block_sub(skb->csum, csumdiff,
skb->len - trimlen);
}
- pskb_trim(skb, skb->len - trimlen);
+ ret = pskb_trim(skb, skb->len - trimlen);
+ if (unlikely(ret))
+ return ret;
ret = nexthdr[1];
@@ -831,7 +833,7 @@ int esp6_input_done2(struct sk_buff *skb, int err)
/*
* 1) if the NAT-T peer's IP or port changed then
- * advertize the change to the keying daemon.
+ * advertise the change to the keying daemon.
* This is an inbound SA, so just compare
* SRC ports.
*/
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 41a680c76d2e..42fb6996b077 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -117,10 +117,10 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
- if (likely(xdst->u.rt6.rt6i_idev))
- in6_dev_put(xdst->u.rt6.rt6i_idev);
dst_destroy_metrics_generic(dst);
rt6_uncached_list_del(&xdst->u.rt6);
+ if (likely(xdst->u.rt6.rt6i_idev))
+ in6_dev_put(xdst->u.rt6.rt6i_idev);
xfrm_dst_destroy(xdst);
}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 0665ff5e456e..a2db0585dce0 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -912,7 +912,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
*/
if (ieee80211_key_identical(sdata, old_key, key)) {
ret = -EALREADY;
- goto unlock;
+ goto out;
}
key->local = sdata->local;
@@ -940,7 +940,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
out:
ieee80211_key_free_unused(key);
- unlock:
mutex_unlock(&sdata->local->key_mtx);
return ret;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index e751cda5eef6..8f6b6f56b65b 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2468,8 +2468,7 @@ static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
/* drop unicast public action frames when using MPF */
if (is_unicast_ether_addr(mgmt->da) &&
- ieee80211_is_public_action((void *)rx->skb->data,
- rx->skb->len))
+ ieee80211_is_protected_dual_of_public_action(rx->skb))
return -EACCES;
}
diff --git a/net/mctp/route.c b/net/mctp/route.c
index ab62fe447038..7a47a58aa54b 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -737,6 +737,8 @@ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
{
struct mctp_route *tmp, *rt = NULL;
+ rcu_read_lock();
+
list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
/* TODO: add metrics */
if (mctp_rt_match_eid(tmp, dnet, daddr)) {
@@ -747,21 +749,29 @@ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
}
}
+ rcu_read_unlock();
+
return rt;
}
static struct mctp_route *mctp_route_lookup_null(struct net *net,
struct net_device *dev)
{
- struct mctp_route *rt;
+ struct mctp_route *tmp, *rt = NULL;
- list_for_each_entry_rcu(rt, &net->mctp.routes, list) {
- if (rt->dev->dev == dev && rt->type == RTN_LOCAL &&
- refcount_inc_not_zero(&rt->refs))
- return rt;
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
+ if (tmp->dev->dev == dev && tmp->type == RTN_LOCAL &&
+ refcount_inc_not_zero(&tmp->refs)) {
+ rt = tmp;
+ break;
+ }
}
- return NULL;
+ rcu_read_unlock();
+
+ return rt;
}
static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index c3b83cb390d9..886ab689a8ae 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1298,7 +1298,7 @@ alloc_skb:
if (copy == 0) {
u64 snd_una = READ_ONCE(msk->snd_una);
- if (snd_una != msk->snd_nxt) {
+ if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) {
tcp_remove_empty_skb(ssk);
return 0;
}
@@ -1306,11 +1306,6 @@ alloc_skb:
zero_window_probe = true;
data_seq = snd_una - 1;
copy = 1;
-
- /* all mptcp-level data is acked, no skbs should be present into the
- * ssk write queue
- */
- WARN_ON_ONCE(reuse_skb);
}
copy = min_t(size_t, copy, info->limit - info->sent);
@@ -1339,7 +1334,6 @@ alloc_skb:
if (reuse_skb) {
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
mpext->data_len += copy;
- WARN_ON_ONCE(zero_window_probe);
goto out;
}
@@ -2354,6 +2348,26 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
#define MPTCP_CF_PUSH BIT(1)
#define MPTCP_CF_FASTCLOSE BIT(2)
+/* be sure to send a reset only if the caller asked for it, also
+ * clean completely the subflow status when the subflow reaches
+ * TCP_CLOSE state
+ */
+static void __mptcp_subflow_disconnect(struct sock *ssk,
+ struct mptcp_subflow_context *subflow,
+ unsigned int flags)
+{
+ if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+ (flags & MPTCP_CF_FASTCLOSE)) {
+ /* The MPTCP code never wait on the subflow sockets, TCP-level
+ * disconnect should never fail
+ */
+ WARN_ON_ONCE(tcp_disconnect(ssk, 0));
+ mptcp_subflow_ctx_reset(subflow);
+ } else {
+ tcp_shutdown(ssk, SEND_SHUTDOWN);
+ }
+}
+
/* subflow sockets can be either outgoing (connect) or incoming
* (accept).
*
@@ -2391,7 +2405,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) {
- /* be sure to force the tcp_disconnect() path,
+ /* be sure to force the tcp_close path
* to generate the egress reset
*/
ssk->sk_lingertime = 0;
@@ -2401,11 +2415,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
if (!dispose_it) {
- /* The MPTCP code never wait on the subflow sockets, TCP-level
- * disconnect should never fail
- */
- WARN_ON_ONCE(tcp_disconnect(ssk, 0));
- mptcp_subflow_ctx_reset(subflow);
+ __mptcp_subflow_disconnect(ssk, subflow, flags);
release_sock(ssk);
goto out;
@@ -3098,12 +3108,6 @@ static int mptcp_disconnect(struct sock *sk, int flags)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- /* Deny disconnect if other threads are blocked in sk_wait_event()
- * or inet_wait_for_connect().
- */
- if (sk->sk_wait_pending)
- return -EBUSY;
-
/* We are on the fastopen error path. We can't call straight into the
* subflows cleanup code due to lock nesting (we are already under
* msk->firstsocket lock).
@@ -3173,7 +3177,6 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
#endif
- nsk->sk_wait_pending = 0;
__mptcp_init_sock(nsk);
msk = mptcp_sk(nsk);
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 1d34d700bd09..920a5a29ae1d 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -316,12 +316,6 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
}
EXPORT_SYMBOL_GPL(flow_offload_refresh);
-static bool nf_flow_is_outdated(const struct flow_offload *flow)
-{
- return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
- !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
-}
-
static inline bool nf_flow_has_expired(const struct flow_offload *flow)
{
return nf_flow_timeout_delta(flow->timeout) <= 0;
@@ -407,12 +401,18 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
return err;
}
+static bool nf_flow_custom_gc(struct nf_flowtable *flow_table,
+ const struct flow_offload *flow)
+{
+ return flow_table->type->gc && flow_table->type->gc(flow);
+}
+
static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
struct flow_offload *flow, void *data)
{
if (nf_flow_has_expired(flow) ||
nf_ct_is_dying(flow->ct) ||
- nf_flow_is_outdated(flow))
+ nf_flow_custom_gc(flow_table, flow))
flow_offload_teardown(flow);
if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index a72b6aeefb1b..29c651804cb2 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3166,7 +3166,7 @@ int nft_expr_inner_parse(const struct nft_ctx *ctx, const struct nlattr *nla,
if (err < 0)
return err;
- if (!tb[NFTA_EXPR_DATA])
+ if (!tb[NFTA_EXPR_DATA] || !tb[NFTA_EXPR_NAME])
return -EINVAL;
type = __nft_expr_type_get(ctx->family, tb[NFTA_EXPR_NAME]);
@@ -5556,7 +5556,6 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
- u64 timeout = 0;
nest = nla_nest_start_noflag(skb, NFTA_LIST_ELEM);
if (nest == NULL)
@@ -5592,15 +5591,11 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
htonl(*nft_set_ext_flags(ext))))
goto nla_put_failure;
- if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT)) {
- timeout = *nft_set_ext_timeout(ext);
- if (nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
- nf_jiffies64_to_msecs(timeout),
- NFTA_SET_ELEM_PAD))
- goto nla_put_failure;
- } else if (set->flags & NFT_SET_TIMEOUT) {
- timeout = READ_ONCE(set->timeout);
- }
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
+ nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
+ nf_jiffies64_to_msecs(*nft_set_ext_timeout(ext)),
+ NFTA_SET_ELEM_PAD))
+ goto nla_put_failure;
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
u64 expires, now = get_jiffies_64();
@@ -5615,9 +5610,6 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
nf_jiffies64_to_msecs(expires),
NFTA_SET_ELEM_PAD))
goto nla_put_failure;
-
- if (reset)
- *nft_set_ext_expiration(ext) = now + timeout;
}
if (nft_set_ext_exists(ext, NFT_SET_EXT_USERDATA)) {
@@ -7615,6 +7607,16 @@ nla_put_failure:
return -1;
}
+static void audit_log_obj_reset(const struct nft_table *table,
+ unsigned int base_seq, unsigned int nentries)
+{
+ char *buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, base_seq);
+
+ audit_log_nfcfg(buf, table->family, nentries,
+ AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC);
+ kfree(buf);
+}
+
struct nft_obj_filter {
char *table;
u32 type;
@@ -7629,8 +7631,10 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
struct nftables_pernet *nft_net;
+ unsigned int entries = 0;
struct nft_object *obj;
bool reset = false;
+ int rc = 0;
if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
reset = true;
@@ -7643,6 +7647,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
if (family != NFPROTO_UNSPEC && family != table->family)
continue;
+ entries = 0;
list_for_each_entry_rcu(obj, &table->objects, list) {
if (!nft_is_active(net, obj))
goto cont;
@@ -7658,34 +7663,27 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
filter->type != NFT_OBJECT_UNSPEC &&
obj->ops->type->type != filter->type)
goto cont;
- if (reset) {
- char *buf = kasprintf(GFP_ATOMIC,
- "%s:%u",
- table->name,
- nft_net->base_seq);
-
- audit_log_nfcfg(buf,
- family,
- obj->handle,
- AUDIT_NFT_OP_OBJ_RESET,
- GFP_ATOMIC);
- kfree(buf);
- }
- if (nf_tables_fill_obj_info(skb, net, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NFT_MSG_NEWOBJ,
- NLM_F_MULTI | NLM_F_APPEND,
- table->family, table,
- obj, reset) < 0)
- goto done;
+ rc = nf_tables_fill_obj_info(skb, net,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NFT_MSG_NEWOBJ,
+ NLM_F_MULTI | NLM_F_APPEND,
+ table->family, table,
+ obj, reset);
+ if (rc < 0)
+ break;
+ entries++;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
+ if (reset && entries)
+ audit_log_obj_reset(table, nft_net->base_seq, entries);
+ if (rc < 0)
+ break;
}
-done:
rcu_read_unlock();
cb->args[0] = idx;
@@ -7790,7 +7788,7 @@ static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
audit_log_nfcfg(buf,
family,
- obj->handle,
+ 1,
AUDIT_NFT_OP_OBJ_RESET,
GFP_ATOMIC);
kfree(buf);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 53c9e76473ba..f03f4d4d7d88 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -698,8 +698,8 @@ nfulnl_log_packet(struct net *net,
unsigned int plen = 0;
struct nfnl_log_net *log = nfnl_log_pernet(net);
const struct nfnl_ct_hook *nfnl_ct = NULL;
+ enum ip_conntrack_info ctinfo = 0;
struct nf_conn *ct = NULL;
- enum ip_conntrack_info ctinfo;
if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
li = li_user;
diff --git a/net/netfilter/nft_inner.c b/net/netfilter/nft_inner.c
index 28e2873ba24e..928312d01eb1 100644
--- a/net/netfilter/nft_inner.c
+++ b/net/netfilter/nft_inner.c
@@ -298,6 +298,7 @@ static int nft_inner_init(const struct nft_ctx *ctx,
int err;
if (!tb[NFTA_INNER_FLAGS] ||
+ !tb[NFTA_INNER_NUM] ||
!tb[NFTA_INNER_HDRSIZE] ||
!tb[NFTA_INNER_TYPE] ||
!tb[NFTA_INNER_EXPR])
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 120f6d395b98..0a689c8e0295 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -179,7 +179,7 @@ void nft_payload_eval(const struct nft_expr *expr,
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
- if (!skb_mac_header_was_set(skb))
+ if (!skb_mac_header_was_set(skb) || skb_mac_header_len(skb) == 0)
goto err;
if (skb_vlan_tag_present(skb) &&
diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h
index 25a75591583e..2e164a319945 100644
--- a/net/netfilter/nft_set_pipapo.h
+++ b/net/netfilter/nft_set_pipapo.h
@@ -147,7 +147,7 @@ struct nft_pipapo_match {
unsigned long * __percpu *scratch;
size_t bsize_max;
struct rcu_head rcu;
- struct nft_pipapo_field f[];
+ struct nft_pipapo_field f[] __counted_by(field_count);
};
/**
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 2660ceab3759..e34662f4a71e 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -568,6 +568,8 @@ static void *nft_rbtree_deactivate(const struct net *net,
nft_rbtree_interval_end(this)) {
parent = parent->rb_right;
continue;
+ } else if (nft_set_elem_expired(&rbe->ext)) {
+ break;
} else if (!nft_set_elem_active(&rbe->ext, genmask)) {
parent = parent->rb_left;
continue;
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index 6705bb895e23..1dac28136e6a 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -203,17 +203,13 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) {
llcp_sock = tmp_sock;
+ sock_hold(&llcp_sock->sk);
break;
}
}
read_unlock(&local->sockets.lock);
- if (llcp_sock == NULL)
- return NULL;
-
- sock_hold(&llcp_sock->sk);
-
return llcp_sock;
}
@@ -346,7 +342,8 @@ static int nfc_llcp_wks_sap(const char *service_name, size_t service_name_len)
static
struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
- const u8 *sn, size_t sn_len)
+ const u8 *sn, size_t sn_len,
+ bool needref)
{
struct sock *sk;
struct nfc_llcp_sock *llcp_sock, *tmp_sock;
@@ -382,6 +379,8 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
if (memcmp(sn, tmp_sock->service_name, sn_len) == 0) {
llcp_sock = tmp_sock;
+ if (needref)
+ sock_hold(&llcp_sock->sk);
break;
}
}
@@ -423,7 +422,8 @@ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
* to this service name.
*/
if (nfc_llcp_sock_from_sn(local, sock->service_name,
- sock->service_name_len) != NULL) {
+ sock->service_name_len,
+ false) != NULL) {
mutex_unlock(&local->sdp_lock);
return LLCP_SAP_MAX;
@@ -824,16 +824,7 @@ out:
static struct nfc_llcp_sock *nfc_llcp_sock_get_sn(struct nfc_llcp_local *local,
const u8 *sn, size_t sn_len)
{
- struct nfc_llcp_sock *llcp_sock;
-
- llcp_sock = nfc_llcp_sock_from_sn(local, sn, sn_len);
-
- if (llcp_sock == NULL)
- return NULL;
-
- sock_hold(&llcp_sock->sk);
-
- return llcp_sock;
+ return nfc_llcp_sock_from_sn(local, sn, sn_len, true);
}
static const u8 *nfc_llcp_connect_sn(const struct sk_buff *skb, size_t *sn_len)
@@ -1298,7 +1289,8 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
}
llcp_sock = nfc_llcp_sock_from_sn(local, service_name,
- service_name_len);
+ service_name_len,
+ true);
if (!llcp_sock) {
sap = 0;
goto add_snl;
@@ -1318,6 +1310,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
if (sap == LLCP_SAP_MAX) {
sap = 0;
+ nfc_llcp_sock_put(llcp_sock);
goto add_snl;
}
@@ -1335,6 +1328,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
pr_debug("%p %d\n", llcp_sock, sap);
+ nfc_llcp_sock_put(llcp_sock);
add_snl:
sdp = nfc_llcp_build_sdres_tlv(tid, sap);
if (sdp == NULL)
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index fff755dde30d..6c9592d05120 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -909,6 +909,11 @@ static int nci_activate_target(struct nfc_dev *nfc_dev,
return -EINVAL;
}
+ if (protocol >= NFC_PROTO_MAX) {
+ pr_err("the requested nfc protocol is invalid\n");
+ return -EINVAL;
+ }
+
if (!(nci_target->supported_protocols & (1 << protocol))) {
pr_err("target does not support the requested protocol 0x%x\n",
protocol);
diff --git a/net/nfc/nci/spi.c b/net/nfc/nci/spi.c
index 0935527d1d12..b68150c971d0 100644
--- a/net/nfc/nci/spi.c
+++ b/net/nfc/nci/spi.c
@@ -151,6 +151,8 @@ static int send_acknowledge(struct nci_spi *nspi, u8 acknowledge)
int ret;
skb = nci_skb_alloc(nspi->ndev, 0, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
/* add the NCI SPI header to the start of the buffer */
hdr = skb_push(skb, NCI_SPI_HDR_LEN);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8f97648d652f..a84e00b5904b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3607,7 +3607,12 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
if (dev) {
sll->sll_hatype = dev->type;
sll->sll_halen = dev->addr_len;
- memcpy(sll->sll_addr_flex, dev->dev_addr, dev->addr_len);
+
+ /* Let __fortify_memcpy_chk() know the actual buffer size. */
+ memcpy(((struct sockaddr_storage *)sll)->__data +
+ offsetof(struct sockaddr_ll, sll_addr) -
+ offsetofend(struct sockaddr_ll, sll_family),
+ dev->dev_addr, dev->addr_len);
} else {
sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
sll->sll_halen = 0;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 08630896b6c8..14cc8fe8584b 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -1180,7 +1180,6 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
init_waitqueue_head(&data->read_wait);
mutex_lock(&rfkill_global_mutex);
- mutex_lock(&data->mtx);
/*
* start getting events from elsewhere but hold mtx to get
* startup events added first
@@ -1192,10 +1191,11 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
goto free;
rfkill_sync(rfkill);
rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
+ mutex_lock(&data->mtx);
list_add_tail(&ev->list, &data->events);
+ mutex_unlock(&data->mtx);
}
list_add(&data->list, &rfkill_fds);
- mutex_unlock(&data->mtx);
mutex_unlock(&rfkill_global_mutex);
file->private_data = data;
@@ -1203,7 +1203,6 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
return stream_open(inode, file);
free:
- mutex_unlock(&data->mtx);
mutex_unlock(&rfkill_global_mutex);
mutex_destroy(&data->mtx);
list_for_each_entry_safe(ev, tmp, &data->events, list)
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index e9d1b2f2ff0a..5a81505fba9a 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -108,13 +108,13 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
rfkill->clk = devm_clk_get(&pdev->dev, NULL);
- gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_ASIS);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
rfkill->reset_gpio = gpio;
- gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_OUT_LOW);
+ gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_ASIS);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 7c652d14528b..fb52d6f9aff9 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -278,7 +278,16 @@ err_nat:
return err;
}
+static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
+{
+ return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
+ test_bit(IPS_HW_OFFLOAD_BIT, &flow->ct->status) &&
+ !test_bit(NF_FLOW_HW_PENDING, &flow->flags) &&
+ !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
+}
+
static struct nf_flowtable_type flowtable_ct = {
+ .gc = tcf_ct_flow_is_outdated,
.action = tcf_ct_flow_table_fill_actions,
.owner = THIS_MODULE,
};
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index da4c179a4d41..6663e971a13e 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -366,7 +366,7 @@ static int u32_init(struct tcf_proto *tp)
idr_init(&root_ht->handle_idr);
if (tp_c == NULL) {
- tp_c = kzalloc(struct_size(tp_c, hlist->ht, 1), GFP_KERNEL);
+ tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
if (tp_c == NULL) {
kfree(root_ht);
return -ENOBUFS;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 3554085bc2be..880c5f16b29c 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -902,6 +902,14 @@ hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
cl->cl_flags |= HFSC_USC;
}
+static void
+hfsc_upgrade_rt(struct hfsc_class *cl)
+{
+ cl->cl_fsc = cl->cl_rsc;
+ rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
+ cl->cl_flags |= HFSC_FSC;
+}
+
static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
[TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) },
[TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) },
@@ -1011,10 +1019,6 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (parent == NULL)
return -ENOENT;
}
- if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
- NL_SET_ERR_MSG(extack, "Invalid parent - parent class must have FSC");
- return -EINVAL;
- }
if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
return -EINVAL;
@@ -1065,6 +1069,12 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cl->cf_tree = RB_ROOT;
sch_tree_lock(sch);
+ /* Check if the inner class is a misconfigured 'rt' */
+ if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
+ NL_SET_ERR_MSG(extack,
+ "Forced curve change on parent 'rt' to 'sc'");
+ hfsc_upgrade_rt(parent);
+ }
qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
list_add_tail(&cl->siblings, &parent->children);
if (parent->level == 0)
diff --git a/net/smc/Kconfig b/net/smc/Kconfig
index 1ab3c5a2c5ad..746be3996768 100644
--- a/net/smc/Kconfig
+++ b/net/smc/Kconfig
@@ -2,6 +2,7 @@
config SMC
tristate "SMC socket protocol family"
depends on INET && INFINIBAND
+ depends on m || ISM != m
help
SMC-R provides a "sockets over RDMA" solution making use of
RDMA over Converged Ethernet (RoCE) technology to upgrade
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index bacdd971615e..35ddebae8894 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1201,6 +1201,7 @@ static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
(struct smc_clc_msg_accept_confirm_v2 *)aclc;
struct smc_clc_first_contact_ext *fce =
smc_get_clc_first_contact_ext(clc_v2, false);
+ struct net *net = sock_net(&smc->sk);
int rc;
if (!ini->first_contact_peer || aclc->hdr.version == SMC_V1)
@@ -1210,7 +1211,7 @@ static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
memcpy(ini->smcrv2.nexthop_mac, &aclc->r0.lcl.mac, ETH_ALEN);
ini->smcrv2.uses_gateway = false;
} else {
- if (smc_ib_find_route(smc->clcsock->sk->sk_rcv_saddr,
+ if (smc_ib_find_route(net, smc->clcsock->sk->sk_rcv_saddr,
smc_ib_gid_to_ipv4(aclc->r0.lcl.gid),
ini->smcrv2.nexthop_mac,
&ini->smcrv2.uses_gateway))
@@ -2361,7 +2362,7 @@ static int smc_listen_find_device(struct smc_sock *new_smc,
smc_find_ism_store_rc(rc, ini);
return (!rc) ? 0 : ini->rc;
}
- return SMC_CLC_DECL_NOSMCDEV;
+ return prfx_rc;
}
/* listen worker: finish RDMA setup */
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index 9b66d6aeeb1a..89981dbe46c9 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -193,7 +193,7 @@ bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
}
-int smc_ib_find_route(__be32 saddr, __be32 daddr,
+int smc_ib_find_route(struct net *net, __be32 saddr, __be32 daddr,
u8 nexthop_mac[], u8 *uses_gateway)
{
struct neighbour *neigh = NULL;
@@ -205,7 +205,7 @@ int smc_ib_find_route(__be32 saddr, __be32 daddr,
if (daddr == cpu_to_be32(INADDR_NONE))
goto out;
- rt = ip_route_output_flow(&init_net, &fl4, NULL);
+ rt = ip_route_output_flow(net, &fl4, NULL);
if (IS_ERR(rt))
goto out;
if (rt->rt_uses_gateway && rt->rt_gw_family != AF_INET)
@@ -235,6 +235,7 @@ static int smc_ib_determine_gid_rcu(const struct net_device *ndev,
if (smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP &&
smc_ib_gid_to_ipv4((u8 *)&attr->gid) != cpu_to_be32(INADDR_NONE)) {
struct in_device *in_dev = __in_dev_get_rcu(ndev);
+ struct net *net = dev_net(ndev);
const struct in_ifaddr *ifa;
bool subnet_match = false;
@@ -248,7 +249,7 @@ static int smc_ib_determine_gid_rcu(const struct net_device *ndev,
}
if (!subnet_match)
goto out;
- if (smcrv2->daddr && smc_ib_find_route(smcrv2->saddr,
+ if (smcrv2->daddr && smc_ib_find_route(net, smcrv2->saddr,
smcrv2->daddr,
smcrv2->nexthop_mac,
&smcrv2->uses_gateway))
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index 4df5f8c8a0a1..ef8ac2b7546d 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -112,7 +112,7 @@ void smc_ib_sync_sg_for_device(struct smc_link *lnk,
int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
unsigned short vlan_id, u8 gid[], u8 *sgid_index,
struct smc_init_info_smcrv2 *smcrv2);
-int smc_ib_find_route(__be32 saddr, __be32 daddr,
+int smc_ib_find_route(struct net *net, __be32 saddr, __be32 daddr,
u8 nexthop_mac[], u8 *uses_gateway);
bool smc_ib_is_valid_local_systemid(void);
int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb);
diff --git a/net/smc/smc_stats.h b/net/smc/smc_stats.h
index aa8928975cc6..9d32058db2b5 100644
--- a/net/smc/smc_stats.h
+++ b/net/smc/smc_stats.h
@@ -92,13 +92,14 @@ do { \
typeof(_smc_stats) stats = (_smc_stats); \
typeof(_tech) t = (_tech); \
typeof(_len) l = (_len); \
- int _pos = fls64((l) >> 13); \
+ int _pos; \
typeof(_rc) r = (_rc); \
int m = SMC_BUF_MAX - 1; \
this_cpu_inc((*stats).smc[t].key ## _cnt); \
- if (r <= 0) \
+ if (r <= 0 || l <= 0) \
break; \
- _pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
+ _pos = fls64((l - 1) >> 13); \
+ _pos = (_pos <= m) ? _pos : m; \
this_cpu_inc((*stats).smc[t].key ## _pd.buf[_pos]); \
this_cpu_add((*stats).smc[t].key ## _bytes, r); \
} \
@@ -138,9 +139,12 @@ while (0)
do { \
typeof(_len) _l = (_len); \
typeof(_tech) t = (_tech); \
- int _pos = fls((_l) >> 13); \
+ int _pos; \
int m = SMC_BUF_MAX - 1; \
- _pos = (_pos < m) ? ((_l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
+ if (_l <= 0) \
+ break; \
+ _pos = fls((_l - 1) >> 13); \
+ _pos = (_pos <= m) ? _pos : m; \
this_cpu_inc((*(_smc_stats)).smc[t].k ## _rmbsize.buf[_pos]); \
} \
while (0)
diff --git a/net/socket.c b/net/socket.c
index c4a6f5532955..5740475e084c 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -403,7 +403,7 @@ static const struct xattr_handler sockfs_security_xattr_handler = {
.set = sockfs_security_xattr_set,
};
-static const struct xattr_handler *sockfs_xattr_handlers[] = {
+static const struct xattr_handler * const sockfs_xattr_handlers[] = {
&sockfs_xattr_handler,
&sockfs_security_xattr_handler,
NULL
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 65a6c6429a53..caa94cf57123 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -83,7 +83,6 @@ static struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt)
return NULL;
req->rq_xprt = xprt;
- INIT_LIST_HEAD(&req->rq_bc_list);
/* Preallocate one XDR receive buffer */
if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
@@ -349,10 +348,8 @@ found:
}
/*
- * Add callback request to callback list. The callback
- * service sleeps on the sv_cb_waitq waiting for new
- * requests. Wake it up after adding enqueing the
- * request.
+ * Add callback request to callback list. Wake a thread
+ * on the first pool (usually the only pool) to handle it.
*/
void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
{
@@ -369,8 +366,6 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
dprintk("RPC: add callback request to list\n");
xprt_get(xprt);
- spin_lock(&bc_serv->sv_cb_lock);
- list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
- wake_up(&bc_serv->sv_cb_waitq);
- spin_unlock(&bc_serv->sv_cb_lock);
+ lwq_enqueue(&req->rq_bc_list, &bc_serv->sv_cb_list);
+ svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
}
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index f420d8457345..dcc2b4f49e77 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -472,7 +472,7 @@ rpc_get_inode(struct super_block *sb, umode_t mode)
return NULL;
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
switch (mode & S_IFMT) {
case S_IFDIR:
inode->i_fop = &simple_dir_operations;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 812fda9d45dd..3f2ea7a0496f 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -438,9 +438,7 @@ EXPORT_SYMBOL_GPL(svc_bind);
static void
__svc_init_bc(struct svc_serv *serv)
{
- INIT_LIST_HEAD(&serv->sv_cb_list);
- spin_lock_init(&serv->sv_cb_lock);
- init_waitqueue_head(&serv->sv_cb_waitq);
+ lwq_init(&serv->sv_cb_list);
}
#else
static void
@@ -509,9 +507,9 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
i, serv->sv_name);
pool->sp_id = i;
- INIT_LIST_HEAD(&pool->sp_sockets);
+ lwq_init(&pool->sp_xprts);
INIT_LIST_HEAD(&pool->sp_all_threads);
- spin_lock_init(&pool->sp_lock);
+ init_llist_head(&pool->sp_idle_threads);
percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL);
percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL);
@@ -575,11 +573,12 @@ svc_destroy(struct kref *ref)
timer_shutdown_sync(&serv->sv_temptimer);
/*
- * The last user is gone and thus all sockets have to be destroyed to
- * the point. Check this.
+ * Remaining transports at this point are not expected.
*/
- BUG_ON(!list_empty(&serv->sv_permsocks));
- BUG_ON(!list_empty(&serv->sv_tempsocks));
+ WARN_ONCE(!list_empty(&serv->sv_permsocks),
+ "SVC: permsocks remain for %s\n", serv->sv_program->pg_name);
+ WARN_ONCE(!list_empty(&serv->sv_tempsocks),
+ "SVC: tempsocks remain for %s\n", serv->sv_program->pg_name);
cache_clean_deferred(serv);
@@ -642,7 +641,6 @@ svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
folio_batch_init(&rqstp->rq_fbatch);
- __set_bit(RQ_BUSY, &rqstp->rq_flags);
rqstp->rq_server = serv;
rqstp->rq_pool = pool;
@@ -682,10 +680,13 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
serv->sv_nrthreads += 1;
spin_unlock_bh(&serv->sv_lock);
- spin_lock_bh(&pool->sp_lock);
- pool->sp_nrthreads++;
+ atomic_inc(&pool->sp_nrthreads);
+
+ /* Protected by whatever lock the service uses when calling
+ * svc_set_num_threads()
+ */
list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
- spin_unlock_bh(&pool->sp_lock);
+
return rqstp;
}
@@ -701,23 +702,25 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
void svc_pool_wake_idle_thread(struct svc_pool *pool)
{
struct svc_rqst *rqstp;
+ struct llist_node *ln;
rcu_read_lock();
- list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
- if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags))
- continue;
-
+ ln = READ_ONCE(pool->sp_idle_threads.first);
+ if (ln) {
+ rqstp = llist_entry(ln, struct svc_rqst, rq_idle);
WRITE_ONCE(rqstp->rq_qtime, ktime_get());
- wake_up_process(rqstp->rq_task);
+ if (!task_is_running(rqstp->rq_task)) {
+ wake_up_process(rqstp->rq_task);
+ trace_svc_wake_up(rqstp->rq_task->pid);
+ percpu_counter_inc(&pool->sp_threads_woken);
+ }
rcu_read_unlock();
- percpu_counter_inc(&pool->sp_threads_woken);
- trace_svc_wake_up(rqstp->rq_task->pid);
return;
}
rcu_read_unlock();
- set_bit(SP_CONGESTED, &pool->sp_flags);
}
+EXPORT_SYMBOL_GPL(svc_pool_wake_idle_thread);
static struct svc_pool *
svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
@@ -725,36 +728,38 @@ svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
return pool ? pool : &serv->sv_pools[(*state)++ % serv->sv_nrpools];
}
-static struct task_struct *
-svc_pool_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
+static struct svc_pool *
+svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool,
+ unsigned int *state)
{
+ struct svc_pool *pool;
unsigned int i;
- struct task_struct *task = NULL;
+
+retry:
+ pool = target_pool;
if (pool != NULL) {
- spin_lock_bh(&pool->sp_lock);
+ if (atomic_inc_not_zero(&pool->sp_nrthreads))
+ goto found_pool;
+ return NULL;
} else {
for (i = 0; i < serv->sv_nrpools; i++) {
pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
- spin_lock_bh(&pool->sp_lock);
- if (!list_empty(&pool->sp_all_threads))
+ if (atomic_inc_not_zero(&pool->sp_nrthreads))
goto found_pool;
- spin_unlock_bh(&pool->sp_lock);
}
return NULL;
}
found_pool:
- if (!list_empty(&pool->sp_all_threads)) {
- struct svc_rqst *rqstp;
-
- rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
- set_bit(RQ_VICTIM, &rqstp->rq_flags);
- list_del_rcu(&rqstp->rq_all);
- task = rqstp->rq_task;
- }
- spin_unlock_bh(&pool->sp_lock);
- return task;
+ set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
+ set_bit(SP_NEED_VICTIM, &pool->sp_flags);
+ if (!atomic_dec_and_test(&pool->sp_nrthreads))
+ return pool;
+ /* Nothing left in this pool any more */
+ clear_bit(SP_NEED_VICTIM, &pool->sp_flags);
+ clear_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
+ goto retry;
}
static int
@@ -795,18 +800,16 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
static int
svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
{
- struct svc_rqst *rqstp;
- struct task_struct *task;
unsigned int state = serv->sv_nrthreads-1;
+ struct svc_pool *victim;
do {
- task = svc_pool_victim(serv, pool, &state);
- if (task == NULL)
+ victim = svc_pool_victim(serv, pool, &state);
+ if (!victim)
break;
- rqstp = kthread_data(task);
- /* Did we lose a race to svo_function threadfn? */
- if (kthread_stop(task) == -EINTR)
- svc_exit_thread(rqstp);
+ svc_pool_wake_idle_thread(victim);
+ wait_on_bit(&victim->sp_flags, SP_VICTIM_REMAINS,
+ TASK_IDLE);
nrservs++;
} while (nrservs < 0);
return 0;
@@ -832,13 +835,10 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
int
svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
{
- if (pool == NULL) {
+ if (!pool)
nrservs -= serv->sv_nrthreads;
- } else {
- spin_lock_bh(&pool->sp_lock);
- nrservs -= pool->sp_nrthreads;
- spin_unlock_bh(&pool->sp_lock);
- }
+ else
+ nrservs -= atomic_read(&pool->sp_nrthreads);
if (nrservs > 0)
return svc_start_kthreads(serv, pool, nrservs);
@@ -924,11 +924,9 @@ svc_exit_thread(struct svc_rqst *rqstp)
struct svc_serv *serv = rqstp->rq_server;
struct svc_pool *pool = rqstp->rq_pool;
- spin_lock_bh(&pool->sp_lock);
- pool->sp_nrthreads--;
- if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags))
- list_del_rcu(&rqstp->rq_all);
- spin_unlock_bh(&pool->sp_lock);
+ list_del_rcu(&rqstp->rq_all);
+
+ atomic_dec(&pool->sp_nrthreads);
spin_lock_bh(&serv->sv_lock);
serv->sv_nrthreads -= 1;
@@ -938,6 +936,11 @@ svc_exit_thread(struct svc_rqst *rqstp)
svc_rqst_free(rqstp);
svc_put(serv);
+ /* That svc_put() cannot be the last, because the thread
+ * waiting for SP_VICTIM_REMAINS to clear must hold
+ * a reference. So it is still safe to access pool.
+ */
+ clear_and_wake_up_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
}
EXPORT_SYMBOL_GPL(svc_exit_thread);
@@ -1544,24 +1547,20 @@ out_drop:
}
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
-/*
- * Process a backchannel RPC request that arrived over an existing
- * outbound connection
+/**
+ * svc_process_bc - process a reverse-direction RPC request
+ * @req: RPC request to be used for client-side processing
+ * @rqstp: server-side execution context
+ *
*/
-int
-bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
- struct svc_rqst *rqstp)
+void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
{
struct rpc_task *task;
int proc_error;
- int error;
-
- dprintk("svc: %s(%p)\n", __func__, req);
/* Build the svc_rqst used by the common processing routine */
rqstp->rq_xid = req->rq_xid;
rqstp->rq_prot = req->rq_xprt->prot;
- rqstp->rq_server = serv;
rqstp->rq_bc_net = req->rq_xprt->xprt_net;
rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
@@ -1590,10 +1589,8 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
* been processed by the caller.
*/
svcxdr_init_decode(rqstp);
- if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2)) {
- error = -EINVAL;
- goto out;
- }
+ if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2))
+ return;
/* Parse and execute the bc call */
proc_error = svc_process_common(rqstp);
@@ -1602,26 +1599,18 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
if (!proc_error) {
/* Processing error: drop the request */
xprt_free_bc_request(req);
- error = -EINVAL;
- goto out;
+ return;
}
/* Finally, send the reply synchronously */
memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
task = rpc_run_bc_task(req);
- if (IS_ERR(task)) {
- error = PTR_ERR(task);
- goto out;
- }
+ if (IS_ERR(task))
+ return;
WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
- error = task->tk_status;
rpc_put_task(task);
-
-out:
- dprintk("svc: %s(), error=%d\n", __func__, error);
- return error;
}
-EXPORT_SYMBOL_GPL(bc_svc_process);
+EXPORT_SYMBOL_GPL(svc_process_bc);
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
/**
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 4cfe9640df48..fee83d1024bc 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -9,7 +9,6 @@
#include <linux/sched/mm.h>
#include <linux/errno.h>
#include <linux/freezer.h>
-#include <linux/kthread.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <linux/sunrpc/addr.h>
@@ -17,6 +16,7 @@
#include <linux/sunrpc/svc_xprt.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/xprt.h>
+#include <linux/sunrpc/bc_xprt.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <trace/events/sunrpc.h>
@@ -201,7 +201,6 @@ void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl,
kref_init(&xprt->xpt_ref);
xprt->xpt_server = serv;
INIT_LIST_HEAD(&xprt->xpt_list);
- INIT_LIST_HEAD(&xprt->xpt_ready);
INIT_LIST_HEAD(&xprt->xpt_deferred);
INIT_LIST_HEAD(&xprt->xpt_users);
mutex_init(&xprt->xpt_mutex);
@@ -472,9 +471,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
pool = svc_pool_for_cpu(xprt->xpt_server);
percpu_counter_inc(&pool->sp_sockets_queued);
- spin_lock_bh(&pool->sp_lock);
- list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
- spin_unlock_bh(&pool->sp_lock);
+ lwq_enqueue(&xprt->xpt_ready, &pool->sp_xprts);
svc_pool_wake_idle_thread(pool);
}
@@ -487,18 +484,9 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
{
struct svc_xprt *xprt = NULL;
- if (list_empty(&pool->sp_sockets))
- goto out;
-
- spin_lock_bh(&pool->sp_lock);
- if (likely(!list_empty(&pool->sp_sockets))) {
- xprt = list_first_entry(&pool->sp_sockets,
- struct svc_xprt, xpt_ready);
- list_del_init(&xprt->xpt_ready);
+ xprt = lwq_dequeue(&pool->sp_xprts, struct svc_xprt, xpt_ready);
+ if (xprt)
svc_xprt_get(xprt);
- }
- spin_unlock_bh(&pool->sp_lock);
-out:
return xprt;
}
@@ -674,7 +662,7 @@ static bool svc_alloc_arg(struct svc_rqst *rqstp)
continue;
set_current_state(TASK_IDLE);
- if (kthread_should_stop()) {
+ if (svc_thread_should_stop(rqstp)) {
set_current_state(TASK_RUNNING);
return false;
}
@@ -699,7 +687,7 @@ static bool svc_alloc_arg(struct svc_rqst *rqstp)
}
static bool
-rqst_should_sleep(struct svc_rqst *rqstp)
+svc_thread_should_sleep(struct svc_rqst *rqstp)
{
struct svc_pool *pool = rqstp->rq_pool;
@@ -708,65 +696,51 @@ rqst_should_sleep(struct svc_rqst *rqstp)
return false;
/* was a socket queued? */
- if (!list_empty(&pool->sp_sockets))
+ if (!lwq_empty(&pool->sp_xprts))
return false;
/* are we shutting down? */
- if (kthread_should_stop())
+ if (svc_thread_should_stop(rqstp))
return false;
- /* are we freezing? */
- if (freezing(current))
- return false;
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+ if (svc_is_backchannel(rqstp)) {
+ if (!lwq_empty(&rqstp->rq_server->sv_cb_list))
+ return false;
+ }
+#endif
return true;
}
-static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp)
+static void svc_thread_wait_for_work(struct svc_rqst *rqstp)
{
- struct svc_pool *pool = rqstp->rq_pool;
-
- /* rq_xprt should be clear on entry */
- WARN_ON_ONCE(rqstp->rq_xprt);
-
- rqstp->rq_xprt = svc_xprt_dequeue(pool);
- if (rqstp->rq_xprt)
- goto out_found;
-
- set_current_state(TASK_IDLE);
- smp_mb__before_atomic();
- clear_bit(SP_CONGESTED, &pool->sp_flags);
- clear_bit(RQ_BUSY, &rqstp->rq_flags);
- smp_mb__after_atomic();
-
- if (likely(rqst_should_sleep(rqstp)))
- schedule();
- else
+ struct svc_pool *pool = rqstp->rq_pool;
+
+ if (svc_thread_should_sleep(rqstp)) {
+ set_current_state(TASK_IDLE | TASK_FREEZABLE);
+ llist_add(&rqstp->rq_idle, &pool->sp_idle_threads);
+ if (likely(svc_thread_should_sleep(rqstp)))
+ schedule();
+
+ while (!llist_del_first_this(&pool->sp_idle_threads,
+ &rqstp->rq_idle)) {
+ /* Work just became available. This thread can only
+ * handle it after removing rqstp from the idle
+ * list. If that attempt failed, some other thread
+ * must have queued itself after finding no
+ * work to do, so that thread has taken responsibly
+ * for this new work. This thread can safely sleep
+ * until woken again.
+ */
+ schedule();
+ set_current_state(TASK_IDLE | TASK_FREEZABLE);
+ }
__set_current_state(TASK_RUNNING);
-
+ } else {
+ cond_resched();
+ }
try_to_freeze();
-
- set_bit(RQ_BUSY, &rqstp->rq_flags);
- smp_mb__after_atomic();
- clear_bit(SP_TASK_PENDING, &pool->sp_flags);
- rqstp->rq_xprt = svc_xprt_dequeue(pool);
- if (rqstp->rq_xprt)
- goto out_found;
-
- if (kthread_should_stop())
- return NULL;
- return NULL;
-out_found:
- clear_bit(SP_TASK_PENDING, &pool->sp_flags);
- /* Normally we will wait up to 5 seconds for any required
- * cache information to be provided.
- */
- if (!test_bit(SP_CONGESTED, &pool->sp_flags))
- rqstp->rq_chandle.thread_wait = 5*HZ;
- else
- rqstp->rq_chandle.thread_wait = 1*HZ;
- trace_svc_xprt_dequeue(rqstp);
- return rqstp->rq_xprt;
}
static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
@@ -785,7 +759,7 @@ static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt
svc_xprt_received(newxpt);
}
-static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
+static void svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
{
struct svc_serv *serv = rqstp->rq_server;
int len = 0;
@@ -826,11 +800,35 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
len = xprt->xpt_ops->xpo_recvfrom(rqstp);
rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
+ if (len <= 0)
+ goto out;
+
+ trace_svc_xdr_recvfrom(&rqstp->rq_arg);
+
+ clear_bit(XPT_OLD, &xprt->xpt_flags);
+
+ rqstp->rq_chandle.defer = svc_defer;
+
+ if (serv->sv_stats)
+ serv->sv_stats->netcnt++;
+ percpu_counter_inc(&rqstp->rq_pool->sp_messages_arrived);
+ rqstp->rq_stime = ktime_get();
+ svc_process(rqstp);
} else
svc_xprt_received(xprt);
out:
- return len;
+ rqstp->rq_res.len = 0;
+ svc_xprt_release(rqstp);
+}
+
+static void svc_thread_wake_next(struct svc_rqst *rqstp)
+{
+ if (!svc_thread_should_sleep(rqstp))
+ /* More work pending after I dequeued some,
+ * wake another worker
+ */
+ svc_pool_wake_idle_thread(rqstp->rq_pool);
}
/**
@@ -843,44 +841,51 @@ out:
*/
void svc_recv(struct svc_rqst *rqstp)
{
- struct svc_xprt *xprt = NULL;
- struct svc_serv *serv = rqstp->rq_server;
- int len;
+ struct svc_pool *pool = rqstp->rq_pool;
if (!svc_alloc_arg(rqstp))
- goto out;
+ return;
- try_to_freeze();
- cond_resched();
- if (kthread_should_stop())
- goto out;
+ svc_thread_wait_for_work(rqstp);
- xprt = svc_get_next_xprt(rqstp);
- if (!xprt)
- goto out;
+ clear_bit(SP_TASK_PENDING, &pool->sp_flags);
- len = svc_handle_xprt(rqstp, xprt);
+ if (svc_thread_should_stop(rqstp)) {
+ svc_thread_wake_next(rqstp);
+ return;
+ }
- /* No data, incomplete (TCP) read, or accept() */
- if (len <= 0)
- goto out_release;
+ rqstp->rq_xprt = svc_xprt_dequeue(pool);
+ if (rqstp->rq_xprt) {
+ struct svc_xprt *xprt = rqstp->rq_xprt;
- trace_svc_xdr_recvfrom(&rqstp->rq_arg);
+ svc_thread_wake_next(rqstp);
+ /* Normally we will wait up to 5 seconds for any required
+ * cache information to be provided. When there are no
+ * idle threads, we reduce the wait time.
+ */
+ if (pool->sp_idle_threads.first)
+ rqstp->rq_chandle.thread_wait = 5 * HZ;
+ else
+ rqstp->rq_chandle.thread_wait = 1 * HZ;
- clear_bit(XPT_OLD, &xprt->xpt_flags);
+ trace_svc_xprt_dequeue(rqstp);
+ svc_handle_xprt(rqstp, xprt);
+ }
- rqstp->rq_chandle.defer = svc_defer;
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+ if (svc_is_backchannel(rqstp)) {
+ struct svc_serv *serv = rqstp->rq_server;
+ struct rpc_rqst *req;
- if (serv->sv_stats)
- serv->sv_stats->netcnt++;
- percpu_counter_inc(&rqstp->rq_pool->sp_messages_arrived);
- rqstp->rq_stime = ktime_get();
- svc_process(rqstp);
-out:
- return;
-out_release:
- rqstp->rq_res.len = 0;
- svc_xprt_release(rqstp);
+ req = lwq_dequeue(&serv->sv_cb_list,
+ struct rpc_rqst, rq_bc_list);
+ if (req) {
+ svc_thread_wake_next(rqstp);
+ svc_process_bc(req, rqstp);
+ }
+ }
+#endif
}
EXPORT_SYMBOL_GPL(svc_recv);
@@ -890,7 +895,6 @@ EXPORT_SYMBOL_GPL(svc_recv);
void svc_drop(struct svc_rqst *rqstp)
{
trace_svc_drop(rqstp);
- svc_xprt_release(rqstp);
}
EXPORT_SYMBOL_GPL(svc_drop);
@@ -906,8 +910,6 @@ void svc_send(struct svc_rqst *rqstp)
int status;
xprt = rqstp->rq_xprt;
- if (!xprt)
- return;
/* calculate over-all length */
xb = &rqstp->rq_res;
@@ -920,7 +922,6 @@ void svc_send(struct svc_rqst *rqstp)
status = xprt->xpt_ops->xpo_sendto(rqstp);
trace_svc_send(rqstp, status);
- svc_xprt_release(rqstp);
}
/*
@@ -1031,7 +1032,6 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
spin_lock_bh(&serv->sv_lock);
list_del_init(&xprt->xpt_list);
- WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
if (test_bit(XPT_TEMP, &xprt->xpt_flags))
serv->sv_tmpcnt--;
spin_unlock_bh(&serv->sv_lock);
@@ -1082,36 +1082,26 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st
return ret;
}
-static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
+static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
{
- struct svc_pool *pool;
struct svc_xprt *xprt;
- struct svc_xprt *tmp;
int i;
for (i = 0; i < serv->sv_nrpools; i++) {
- pool = &serv->sv_pools[i];
-
- spin_lock_bh(&pool->sp_lock);
- list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) {
- if (xprt->xpt_net != net)
- continue;
- list_del_init(&xprt->xpt_ready);
- spin_unlock_bh(&pool->sp_lock);
- return xprt;
+ struct svc_pool *pool = &serv->sv_pools[i];
+ struct llist_node *q, **t1, *t2;
+
+ q = lwq_dequeue_all(&pool->sp_xprts);
+ lwq_for_each_safe(xprt, t1, t2, &q, xpt_ready) {
+ if (xprt->xpt_net == net) {
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
+ svc_delete_xprt(xprt);
+ xprt = NULL;
+ }
}
- spin_unlock_bh(&pool->sp_lock);
- }
- return NULL;
-}
-
-static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
-{
- struct svc_xprt *xprt;
- while ((xprt = svc_dequeue_net(serv, net))) {
- set_bit(XPT_CLOSE, &xprt->xpt_flags);
- svc_delete_xprt(xprt);
+ if (q)
+ lwq_enqueue_batch(q, &pool->sp_xprts);
}
}
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index e4d84a13c566..8c817e755262 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -263,11 +263,9 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
/* Queue rqst for ULP's callback service */
bc_serv = xprt->bc_serv;
xprt_get(xprt);
- spin_lock(&bc_serv->sv_cb_lock);
- list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
- spin_unlock(&bc_serv->sv_cb_lock);
+ lwq_enqueue(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
- wake_up(&bc_serv->sv_cb_waitq);
+ svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
r_xprt->rx_stats.bcall_count++;
return;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 85c8bcaebb80..3b05f90a3e50 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -852,7 +852,8 @@ out_readfail:
if (ret == -EINVAL)
svc_rdma_send_error(rdma_xprt, ctxt, ret);
svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
- return ret;
+ svc_xprt_deferred_close(xprt);
+ return -ENOTCONN;
out_backchannel:
svc_rdma_handle_bc_reply(rqstp, ctxt);
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 02f583ff9239..002483e60c19 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -139,8 +139,8 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx)
int wait_on_pending_writer(struct sock *sk, long *timeo)
{
- int rc = 0;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int ret, rc = 0;
add_wait_queue(sk_sleep(sk), &wait);
while (1) {
@@ -154,9 +154,13 @@ int wait_on_pending_writer(struct sock *sk, long *timeo)
break;
}
- if (sk_wait_event(sk, timeo,
- !READ_ONCE(sk->sk_write_pending), &wait))
+ ret = sk_wait_event(sk, timeo,
+ !READ_ONCE(sk->sk_write_pending), &wait);
+ if (ret) {
+ if (ret < 0)
+ rc = ret;
break;
+ }
}
remove_wait_queue(sk_sleep(sk), &wait);
return rc;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index d1fc295b83b5..e9d1e83a859d 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1291,6 +1291,7 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int ret = 0;
long timeo;
timeo = sock_rcvtimeo(sk, nonblock);
@@ -1302,6 +1303,9 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
if (sk->sk_err)
return sock_error(sk);
+ if (ret < 0)
+ return ret;
+
if (!skb_queue_empty(&sk->sk_receive_queue)) {
tls_strp_check_rcv(&ctx->strp);
if (tls_strp_msg_ready(ctx))
@@ -1320,10 +1324,10 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
released = true;
add_wait_queue(sk_sleep(sk), &wait);
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- sk_wait_event(sk, &timeo,
- tls_strp_msg_ready(ctx) ||
- !sk_psock_queue_empty(psock),
- &wait);
+ ret = sk_wait_event(sk, &timeo,
+ tls_strp_msg_ready(ctx) ||
+ !sk_psock_queue_empty(psock),
+ &wait);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
remove_wait_queue(sk_sleep(sk), &wait);
@@ -1852,6 +1856,7 @@ static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
bool nonblock)
{
long timeo;
+ int ret;
timeo = sock_rcvtimeo(sk, nonblock);
@@ -1861,14 +1866,16 @@ static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
ctx->reader_contended = 1;
add_wait_queue(&ctx->wq, &wait);
- sk_wait_event(sk, &timeo,
- !READ_ONCE(ctx->reader_present), &wait);
+ ret = sk_wait_event(sk, &timeo,
+ !READ_ONCE(ctx->reader_present), &wait);
remove_wait_queue(&ctx->wq, &wait);
if (timeo <= 0)
return -EAGAIN;
if (signal_pending(current))
return sock_intr_errno(timeo);
+ if (ret < 0)
+ return ret;
}
WRITE_ONCE(ctx->reader_present, 1);
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index e95df847176b..b80bf681327b 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -555,6 +555,11 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
virtio_device_ready(vdev);
+ return 0;
+}
+
+static void virtio_vsock_vqs_start(struct virtio_vsock *vsock)
+{
mutex_lock(&vsock->tx_lock);
vsock->tx_run = true;
mutex_unlock(&vsock->tx_lock);
@@ -569,7 +574,16 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
vsock->event_run = true;
mutex_unlock(&vsock->event_lock);
- return 0;
+ /* virtio_transport_send_pkt() can queue packets once
+ * the_virtio_vsock is set, but they won't be processed until
+ * vsock->tx_run is set to true. We queue vsock->send_pkt_work
+ * when initialization finishes to send those packets queued
+ * earlier.
+ * We don't need to queue the other workers (rx, event) because
+ * as long as we don't fill the queues with empty buffers, the
+ * host can't send us any notification.
+ */
+ queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
}
static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
@@ -664,6 +678,7 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
goto out;
rcu_assign_pointer(the_virtio_vsock, vsock);
+ virtio_vsock_vqs_start(vsock);
mutex_unlock(&the_virtio_vsock_mutex);
@@ -736,6 +751,7 @@ static int virtio_vsock_restore(struct virtio_device *vdev)
goto out;
rcu_assign_pointer(the_virtio_vsock, vsock);
+ virtio_vsock_vqs_start(vsock);
out:
mutex_unlock(&the_virtio_vsock_mutex);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 64e861617110..acec41c1809a 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1622,7 +1622,7 @@ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work)
list_add_tail(&work->entry, &rdev->wiphy_work_list);
spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
- schedule_work(&rdev->wiphy_work);
+ queue_work(system_unbound_wq, &rdev->wiphy_work);
}
EXPORT_SYMBOL_GPL(wiphy_work_queue);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 3e2c398abddc..55a1d3633853 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -43,10 +43,11 @@ void cfg80211_rx_assoc_resp(struct net_device *dev,
for (link_id = 0; link_id < ARRAY_SIZE(data->links); link_id++) {
cr.links[link_id].status = data->links[link_id].status;
+ cr.links[link_id].bss = data->links[link_id].bss;
+
WARN_ON_ONCE(cr.links[link_id].status != WLAN_STATUS_SUCCESS &&
(!cr.ap_mld_addr || !cr.links[link_id].bss));
- cr.links[link_id].bss = data->links[link_id].bss;
if (!cr.links[link_id].bss)
continue;
cr.links[link_id].bssid = data->links[link_id].bss->bssid;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 939deecf0bbe..8210a6090ac1 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -2125,7 +2125,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
if (!res)
goto drop;
- rdev_inform_bss(rdev, &res->pub, ies, data->drv_data);
+ rdev_inform_bss(rdev, &res->pub, ies, drv_data->drv_data);
if (data->bss_source == BSS_SOURCE_MBSSID) {
/* this is a nontransmitting bss, we need to add it to
diff --git a/net/xdp/xsk_queue.c b/net/xdp/xsk_queue.c
index f8905400ee07..d2c264030017 100644
--- a/net/xdp/xsk_queue.c
+++ b/net/xdp/xsk_queue.c
@@ -34,6 +34,16 @@ struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
q->ring_mask = nentries - 1;
size = xskq_get_ring_size(q, umem_queue);
+
+ /* size which is overflowing or close to SIZE_MAX will become 0 in
+ * PAGE_ALIGN(), checking SIZE_MAX is enough due to the previous
+ * is_power_of_2(), the rest will be handled by vmalloc_user()
+ */
+ if (unlikely(size == SIZE_MAX)) {
+ kfree(q);
+ return NULL;
+ }
+
size = PAGE_ALIGN(size);
q->ring = vmalloc_user(size);
diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
index b86474084690..e21cc71095bb 100644
--- a/net/xfrm/xfrm_interface_core.c
+++ b/net/xfrm/xfrm_interface_core.c
@@ -380,8 +380,8 @@ static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
skb->dev = dev;
if (err) {
- dev->stats.rx_errors++;
- dev->stats.rx_dropped++;
+ DEV_STATS_INC(dev, rx_errors);
+ DEV_STATS_INC(dev, rx_dropped);
return 0;
}
@@ -426,7 +426,6 @@ static int
xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
{
struct xfrm_if *xi = netdev_priv(dev);
- struct net_device_stats *stats = &xi->dev->stats;
struct dst_entry *dst = skb_dst(skb);
unsigned int length = skb->len;
struct net_device *tdev;
@@ -473,7 +472,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
tdev = dst->dev;
if (tdev == dev) {
- stats->collisions++;
+ DEV_STATS_INC(dev, collisions);
net_warn_ratelimited("%s: Local routing loop detected!\n",
dev->name);
goto tx_err_dst_release;
@@ -512,13 +511,13 @@ xmit:
if (net_xmit_eval(err) == 0) {
dev_sw_netstats_tx_add(dev, 1, length);
} else {
- stats->tx_errors++;
- stats->tx_aborted_errors++;
+ DEV_STATS_INC(dev, tx_errors);
+ DEV_STATS_INC(dev, tx_aborted_errors);
}
return 0;
tx_err_link_failure:
- stats->tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
dst_link_failure(skb);
tx_err_dst_release:
dst_release(dst);
@@ -528,7 +527,6 @@ tx_err_dst_release:
static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
- struct net_device_stats *stats = &xi->dev->stats;
struct dst_entry *dst = skb_dst(skb);
struct flowi fl;
int ret;
@@ -545,7 +543,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6);
if (dst->error) {
dst_release(dst);
- stats->tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_err;
}
skb_dst_set(skb, dst);
@@ -561,7 +559,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
if (IS_ERR(rt)) {
- stats->tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_err;
}
skb_dst_set(skb, &rt->dst);
@@ -580,8 +578,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
tx_err:
- stats->tx_errors++;
- stats->tx_dropped++;
+ DEV_STATS_INC(dev, tx_errors);
+ DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index d6b405782b63..d24b4d4f620e 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -851,7 +851,7 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net,
struct hlist_node *newpos = NULL;
bool matches_s, matches_d;
- if (!policy->bydst_reinsert)
+ if (policy->walk.dead || !policy->bydst_reinsert)
continue;
WARN_ON_ONCE(policy->family != family);
@@ -1256,8 +1256,11 @@ static void xfrm_hash_rebuild(struct work_struct *work)
struct xfrm_pol_inexact_bin *bin;
u8 dbits, sbits;
+ if (policy->walk.dead)
+ continue;
+
dir = xfrm_policy_id2dir(policy->index);
- if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
+ if (dir >= XFRM_POLICY_MAX)
continue;
if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
@@ -1372,8 +1375,6 @@ EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
* of an absolute inpredictability of ordering of rules. This will not pass. */
static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
{
- static u32 idx_generator;
-
for (;;) {
struct hlist_head *list;
struct xfrm_policy *p;
@@ -1381,8 +1382,8 @@ static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
int found;
if (!index) {
- idx = (idx_generator | dir);
- idx_generator += 8;
+ idx = (net->xfrm.idx_generator | dir);
+ net->xfrm.idx_generator += 8;
} else {
idx = index;
index = 0;
@@ -1823,9 +1824,11 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
again:
list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
+ if (pol->walk.dead)
+ continue;
+
dir = xfrm_policy_id2dir(pol->index);
- if (pol->walk.dead ||
- dir >= XFRM_POLICY_MAX ||
+ if (dir >= XFRM_POLICY_MAX ||
pol->type != type)
continue;
@@ -1862,9 +1865,11 @@ int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
again:
list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
+ if (pol->walk.dead)
+ continue;
+
dir = xfrm_policy_id2dir(pol->index);
- if (pol->walk.dead ||
- dir >= XFRM_POLICY_MAX ||
+ if (dir >= XFRM_POLICY_MAX ||
pol->xdo.dev != dev)
continue;
@@ -3215,7 +3220,7 @@ no_transform:
}
for (i = 0; i < num_pols; i++)
- pols[i]->curlft.use_time = ktime_get_real_seconds();
+ WRITE_ONCE(pols[i]->curlft.use_time, ktime_get_real_seconds());
if (num_xfrms < 0) {
/* Prohibit the flow */
diff --git a/rust/Makefile b/rust/Makefile
index 87958e864be0..7dbf9abe0d01 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -93,15 +93,14 @@ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
# and then retouch the generated files.
rustdoc: rustdoc-core rustdoc-macros rustdoc-compiler_builtins \
rustdoc-alloc rustdoc-kernel
- $(Q)cp $(srctree)/Documentation/images/logo.svg $(rustdoc_output)
- $(Q)cp $(srctree)/Documentation/images/COPYING-logo $(rustdoc_output)
+ $(Q)cp $(srctree)/Documentation/images/logo.svg $(rustdoc_output)/static.files/
+ $(Q)cp $(srctree)/Documentation/images/COPYING-logo $(rustdoc_output)/static.files/
$(Q)find $(rustdoc_output) -name '*.html' -type f -print0 | xargs -0 sed -Ei \
- -e 's:rust-logo\.svg:logo.svg:g' \
- -e 's:rust-logo\.png:logo.svg:g' \
- -e 's:favicon\.svg:logo.svg:g' \
- -e 's:<link rel="alternate icon" type="image/png" href="[./]*favicon-(16x16|32x32)\.png">::g'
- $(Q)echo '.logo-container > img { object-fit: contain; }' \
- >> $(rustdoc_output)/rustdoc.css
+ -e 's:rust-logo-[0-9a-f]+\.svg:logo.svg:g' \
+ -e 's:favicon-[0-9a-f]+\.svg:logo.svg:g' \
+ -e 's:<link rel="alternate icon" type="image/png" href="[/.]+/static\.files/favicon-(16x16|32x32)-[0-9a-f]+\.png">::g'
+ $(Q)for f in $(rustdoc_output)/static.files/rustdoc-*.css; do \
+ echo ".logo-container > img { object-fit: contain; }" >> $$f; done
rustdoc-macros: private rustdoc_host = yes
rustdoc-macros: private rustc_target_flags = --crate-type proc-macro \
@@ -290,6 +289,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
-fno-reorder-blocks -fno-allow-store-data-races -fasan-shadow-offset=% \
-fzero-call-used-regs=% -fno-stack-clash-protection \
-fno-inline-functions-called-once -fsanitize=bounds-strict \
+ -fstrict-flex-arrays=% \
--param=% --param asan-%
# Derived from `scripts/Makefile.clang`.
diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
index 05fcab6abfe6..032b64543953 100644
--- a/rust/kernel/error.rs
+++ b/rust/kernel/error.rs
@@ -37,7 +37,7 @@ pub mod code {
declare_err!(E2BIG, "Argument list too long.");
declare_err!(ENOEXEC, "Exec format error.");
declare_err!(EBADF, "Bad file number.");
- declare_err!(ECHILD, "Exec format error.");
+ declare_err!(ECHILD, "No child processes.");
declare_err!(EAGAIN, "Try again.");
declare_err!(ENOMEM, "Out of memory.");
declare_err!(EACCES, "Permission denied.");
@@ -133,7 +133,7 @@ impl Error {
/// Returns the error encoded as a pointer.
#[allow(dead_code)]
pub(crate) fn to_ptr<T>(self) -> *mut T {
- // SAFETY: self.0 is a valid error due to its invariant.
+ // SAFETY: `self.0` is a valid error due to its invariant.
unsafe { bindings::ERR_PTR(self.0.into()) as *mut _ }
}
diff --git a/scripts/const_structs.checkpatch b/scripts/const_structs.checkpatch
index dc39d938ea77..188412aa2757 100644
--- a/scripts/const_structs.checkpatch
+++ b/scripts/const_structs.checkpatch
@@ -94,3 +94,4 @@ vm_operations_struct
wacom_features
watchdog_ops
wd_ops
+xattr_handler
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index bd6a910f6528..53a0070ff5df 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -226,7 +226,7 @@ static int __aafs_setup_d_inode(struct inode *dir, struct dentry *dentry,
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_private = data;
if (S_ISDIR(mode)) {
inode->i_op = iops ? iops : &simple_dir_inode_operations;
@@ -1557,7 +1557,8 @@ void __aafs_profile_migrate_dents(struct aa_profile *old,
if (new->dents[i]) {
struct inode *inode = d_inode(new->dents[i]);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
}
old->dents[i] = NULL;
}
@@ -2543,7 +2544,7 @@ static int aa_mk_null_file(struct dentry *parent)
inode->i_ino = get_next_ino();
inode->i_mode = S_IFCHR | S_IRUGO | S_IWUGO;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO,
MKDEV(MEM_MAJOR, 3));
d_instantiate(dentry, inode);
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 8b8846073e14..913ec8d0eb63 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -89,10 +89,10 @@ void __aa_loaddata_update(struct aa_loaddata *data, long revision)
struct inode *inode;
inode = d_inode(data->dents[AAFS_LOADDATA_DIR]);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
inode = d_inode(data->dents[AAFS_LOADDATA_REVISION]);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
}
}
diff --git a/security/inode.c b/security/inode.c
index 3aa75fffa8c9..9e7cde913667 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -145,7 +145,7 @@ static struct dentry *securityfs_create_dentry(const char *name, umode_t mode,
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_private = data;
if (S_ISDIR(mode)) {
inode->i_op = &simple_dir_inode_operations;
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 6fa640263216..6c596ae7fef9 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -1198,7 +1198,7 @@ static struct inode *sel_make_inode(struct super_block *sb, umode_t mode)
if (ret) {
ret->i_mode = mode;
- ret->i_atime = ret->i_mtime = inode_set_ctime_current(ret);
+ simple_inode_init_ts(ret);
}
return ret;
}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index bd9ddf412b46..9a69236fa207 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -3527,7 +3527,7 @@ static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
if (runtime->state == SNDRV_PCM_STATE_OPEN ||
runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
return -EBADFD;
- if (!to->user_backed)
+ if (!user_backed_iter(to))
return -EINVAL;
if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
return -EINVAL;
@@ -3567,7 +3567,7 @@ static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
if (runtime->state == SNDRV_PCM_STATE_OPEN ||
runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
return -EBADFD;
- if (!from->user_backed)
+ if (!user_backed_iter(from))
return -EINVAL;
if (from->nr_segs > 128 || from->nr_segs != runtime->channels ||
!frame_aligned(runtime, iov->iov_len))
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 3eeecf67c17b..9677c09cf7a9 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -7078,6 +7078,24 @@ static void alc287_fixup_bind_dacs(struct hda_codec *codec,
0x0); /* Make sure 0x14 was disable */
}
}
+/* Fix none verb table of Headset Mic pin */
+static void alc_fixup_headset_mic(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+ static const struct hda_pintbl pincfgs[] = {
+ { 0x19, 0x03a1103c },
+ { }
+ };
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ snd_hda_apply_pincfgs(codec, pincfgs);
+ alc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12);
+ spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
+ break;
+ }
+}
enum {
@@ -7344,6 +7362,7 @@ enum {
ALC245_FIXUP_HP_X360_MUTE_LEDS,
ALC287_FIXUP_THINKPAD_I2S_SPK,
ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD,
+ ALC2XX_FIXUP_HEADSET_MIC,
};
/* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -9448,6 +9467,10 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI,
},
+ [ALC2XX_FIXUP_HEADSET_MIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_headset_mic,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -9722,6 +9745,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x89d3, "HP EliteBook 645 G9 (MB 89D2)", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8a20, "HP Laptop 15s-fq5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x8a25, "HP Victus 16-d1xxx (MB 8A25)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED),
@@ -9791,6 +9815,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301V", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
+ SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZV", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
@@ -10750,6 +10775,8 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
{0x19, 0x40000000},
{0x1a, 0x40000000}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC2XX_FIXUP_HEADSET_MIC,
+ {0x19, 0x40000000}),
{}
};
diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c
index f2e7c6d0be46..f9059780b7a7 100644
--- a/sound/soc/codecs/cs35l56.c
+++ b/sound/soc/codecs/cs35l56.c
@@ -706,7 +706,7 @@ static void cs35l56_patch(struct cs35l56_private *cs35l56)
mutex_lock(&cs35l56->base.irq_lock);
- init_completion(&cs35l56->init_completion);
+ reinit_completion(&cs35l56->init_completion);
cs35l56->soft_resetting = true;
cs35l56_system_reset(&cs35l56->base, !!cs35l56->sdw_peripheral);
@@ -1186,6 +1186,12 @@ post_soft_reset:
/* Registers could be dirty after soft reset or SoundWire enumeration */
regcache_sync(cs35l56->base.regmap);
+ /* Set ASP1 DOUT to high-impedance when it is not transmitting audio data. */
+ ret = regmap_set_bits(cs35l56->base.regmap, CS35L56_ASP1_CONTROL3,
+ CS35L56_ASP1_DOUT_HIZ_CTRL_MASK);
+ if (ret)
+ return dev_err_probe(cs35l56->base.dev, ret, "Failed to write ASP1_CONTROL3\n");
+
cs35l56->base.init_done = true;
complete(&cs35l56->init_completion);
diff --git a/sound/soc/codecs/cs42l42-sdw.c b/sound/soc/codecs/cs42l42-sdw.c
index 974bae4abfad..94a66a325303 100644
--- a/sound/soc/codecs/cs42l42-sdw.c
+++ b/sound/soc/codecs/cs42l42-sdw.c
@@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/device.h>
+#include <linux/gpio/consumer.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
diff --git a/sound/soc/codecs/cs42l43-jack.c b/sound/soc/codecs/cs42l43-jack.c
index 92e37bc1df9d..9f5f1a92561d 100644
--- a/sound/soc/codecs/cs42l43-jack.c
+++ b/sound/soc/codecs/cs42l43-jack.c
@@ -34,7 +34,7 @@ static const unsigned int cs42l43_accdet_db_ms[] = {
static const unsigned int cs42l43_accdet_ramp_ms[] = { 10, 40, 90, 170 };
static const unsigned int cs42l43_accdet_bias_sense[] = {
- 14, 23, 41, 50, 60, 68, 86, 95, 0,
+ 14, 24, 43, 52, 61, 71, 90, 99, 0,
};
static int cs42l43_find_index(struct cs42l43_codec *priv, const char * const prop,
diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
index 581b334a6631..3bbe85091649 100644
--- a/sound/soc/codecs/da7219-aad.c
+++ b/sound/soc/codecs/da7219-aad.c
@@ -59,9 +59,6 @@ static void da7219_aad_btn_det_work(struct work_struct *work)
bool micbias_up = false;
int retries = 0;
- /* Disable ground switch */
- snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
-
/* Drive headphones/lineout */
snd_soc_component_update_bits(component, DA7219_HP_L_CTRL,
DA7219_HP_L_AMP_OE_MASK,
@@ -155,9 +152,6 @@ static void da7219_aad_hptest_work(struct work_struct *work)
tonegen_freq_hptest = cpu_to_le16(DA7219_AAD_HPTEST_RAMP_FREQ_INT_OSC);
}
- /* Disable ground switch */
- snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
-
/* Ensure gain ramping at fastest rate */
gain_ramp_ctrl = snd_soc_component_read(component, DA7219_GAIN_RAMP_CTRL);
snd_soc_component_write(component, DA7219_GAIN_RAMP_CTRL, DA7219_GAIN_RAMP_RATE_X8);
@@ -421,6 +415,11 @@ static irqreturn_t da7219_aad_irq_thread(int irq, void *data)
* handle a removal, and we can check at the end of
* hptest if we have a valid result or not.
*/
+
+ cancel_delayed_work_sync(&da7219_aad->jack_det_work);
+ /* Disable ground switch */
+ snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
+
if (statusa & DA7219_JACK_TYPE_STS_MASK) {
report |= SND_JACK_HEADSET;
mask |= SND_JACK_HEADSET | SND_JACK_LINEOUT;
diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
index ec6859ec0d38..fff4a8b862a7 100644
--- a/sound/soc/codecs/lpass-wsa-macro.c
+++ b/sound/soc/codecs/lpass-wsa-macro.c
@@ -1675,12 +1675,12 @@ static int wsa_macro_spk_boost_event(struct snd_soc_dapm_widget *w,
u16 boost_path_ctl, boost_path_cfg1;
u16 reg, reg_mix;
- if (!strcmp(w->name, "WSA_RX INT0 CHAIN")) {
+ if (!snd_soc_dapm_widget_name_cmp(w, "WSA_RX INT0 CHAIN")) {
boost_path_ctl = CDC_WSA_BOOST0_BOOST_PATH_CTL;
boost_path_cfg1 = CDC_WSA_RX0_RX_PATH_CFG1;
reg = CDC_WSA_RX0_RX_PATH_CTL;
reg_mix = CDC_WSA_RX0_RX_PATH_MIX_CTL;
- } else if (!strcmp(w->name, "WSA_RX INT1 CHAIN")) {
+ } else if (!snd_soc_dapm_widget_name_cmp(w, "WSA_RX INT1 CHAIN")) {
boost_path_ctl = CDC_WSA_BOOST1_BOOST_PATH_CTL;
boost_path_cfg1 = CDC_WSA_RX1_RX_PATH_CFG1;
reg = CDC_WSA_RX1_RX_PATH_CTL;
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 1a137ca3f496..7938b52d741d 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3257,6 +3257,8 @@ int rt5645_set_jack_detect(struct snd_soc_component *component,
RT5645_GP1_PIN_IRQ, RT5645_GP1_PIN_IRQ);
regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL1,
RT5645_DIG_GATE_CTRL, RT5645_DIG_GATE_CTRL);
+ regmap_update_bits(rt5645->regmap, RT5645_DEPOP_M1,
+ RT5645_HP_CB_MASK, RT5645_HP_CB_PU);
}
rt5645_irq(0, rt5645);
diff --git a/sound/soc/codecs/tas2780.c b/sound/soc/codecs/tas2780.c
index 86bd6c18a944..41076be23854 100644
--- a/sound/soc/codecs/tas2780.c
+++ b/sound/soc/codecs/tas2780.c
@@ -39,7 +39,7 @@ static void tas2780_reset(struct tas2780_priv *tas2780)
usleep_range(2000, 2050);
}
- snd_soc_component_write(tas2780->component, TAS2780_SW_RST,
+ ret = snd_soc_component_write(tas2780->component, TAS2780_SW_RST,
TAS2780_RST);
if (ret)
dev_err(tas2780->dev, "%s:errCode:0x%x Reset error!\n",
diff --git a/sound/soc/codecs/wcd938x-sdw.c b/sound/soc/codecs/wcd938x-sdw.c
index 6951120057e5..a1f04010da95 100644
--- a/sound/soc/codecs/wcd938x-sdw.c
+++ b/sound/soc/codecs/wcd938x-sdw.c
@@ -1278,7 +1278,31 @@ static int wcd9380_probe(struct sdw_slave *pdev,
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
- return component_add(dev, &wcd938x_sdw_component_ops);
+ ret = component_add(dev, &wcd938x_sdw_component_ops);
+ if (ret)
+ goto err_disable_rpm;
+
+ return 0;
+
+err_disable_rpm:
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+
+ return ret;
+}
+
+static int wcd9380_remove(struct sdw_slave *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ component_del(dev, &wcd938x_sdw_component_ops);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+
+ return 0;
}
static const struct sdw_device_id wcd9380_slave_id[] = {
@@ -1320,6 +1344,7 @@ static const struct dev_pm_ops wcd938x_sdw_pm_ops = {
static struct sdw_driver wcd9380_codec_driver = {
.probe = wcd9380_probe,
+ .remove = wcd9380_remove,
.ops = &wcd9380_slave_ops,
.id_table = wcd9380_slave_id,
.driver = {
diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
index a3c680661377..d27b919c63b4 100644
--- a/sound/soc/codecs/wcd938x.c
+++ b/sound/soc/codecs/wcd938x.c
@@ -3325,8 +3325,10 @@ static int wcd938x_populate_dt_data(struct wcd938x_priv *wcd938x, struct device
return dev_err_probe(dev, ret, "Failed to get supplies\n");
ret = regulator_bulk_enable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
- if (ret)
+ if (ret) {
+ regulator_bulk_free(WCD938X_MAX_SUPPLY, wcd938x->supplies);
return dev_err_probe(dev, ret, "Failed to enable supplies\n");
+ }
wcd938x_dt_parse_micbias_info(dev, wcd938x);
@@ -3435,7 +3437,8 @@ static int wcd938x_bind(struct device *dev)
wcd938x->rxdev = wcd938x_sdw_device_get(wcd938x->rxnode);
if (!wcd938x->rxdev) {
dev_err(dev, "could not find slave with matching of node\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_unbind;
}
wcd938x->sdw_priv[AIF1_PB] = dev_get_drvdata(wcd938x->rxdev);
wcd938x->sdw_priv[AIF1_PB]->wcd938x = wcd938x;
@@ -3443,46 +3446,47 @@ static int wcd938x_bind(struct device *dev)
wcd938x->txdev = wcd938x_sdw_device_get(wcd938x->txnode);
if (!wcd938x->txdev) {
dev_err(dev, "could not find txslave with matching of node\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_rxdev;
}
wcd938x->sdw_priv[AIF1_CAP] = dev_get_drvdata(wcd938x->txdev);
wcd938x->sdw_priv[AIF1_CAP]->wcd938x = wcd938x;
wcd938x->tx_sdw_dev = dev_to_sdw_dev(wcd938x->txdev);
- if (!wcd938x->tx_sdw_dev) {
- dev_err(dev, "could not get txslave with matching of dev\n");
- return -EINVAL;
- }
/* As TX is main CSR reg interface, which should not be suspended first.
* expicilty add the dependency link */
if (!device_link_add(wcd938x->rxdev, wcd938x->txdev, DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME)) {
dev_err(dev, "could not devlink tx and rx\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_txdev;
}
if (!device_link_add(dev, wcd938x->txdev, DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME)) {
dev_err(dev, "could not devlink wcd and tx\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_remove_rxtx_link;
}
if (!device_link_add(dev, wcd938x->rxdev, DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME)) {
dev_err(dev, "could not devlink wcd and rx\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_remove_tx_link;
}
wcd938x->regmap = dev_get_regmap(&wcd938x->tx_sdw_dev->dev, NULL);
if (!wcd938x->regmap) {
dev_err(dev, "could not get TX device regmap\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_remove_rx_link;
}
ret = wcd938x_irq_init(wcd938x, dev);
if (ret) {
dev_err(dev, "%s: IRQ init failed: %d\n", __func__, ret);
- return ret;
+ goto err_remove_rx_link;
}
wcd938x->sdw_priv[AIF1_PB]->slave_irq = wcd938x->virq;
@@ -3491,27 +3495,45 @@ static int wcd938x_bind(struct device *dev)
ret = wcd938x_set_micbias_data(wcd938x);
if (ret < 0) {
dev_err(dev, "%s: bad micbias pdata\n", __func__);
- return ret;
+ goto err_remove_rx_link;
}
ret = snd_soc_register_component(dev, &soc_codec_dev_wcd938x,
wcd938x_dais, ARRAY_SIZE(wcd938x_dais));
- if (ret)
+ if (ret) {
dev_err(dev, "%s: Codec registration failed\n",
__func__);
+ goto err_remove_rx_link;
+ }
- return ret;
+ return 0;
+err_remove_rx_link:
+ device_link_remove(dev, wcd938x->rxdev);
+err_remove_tx_link:
+ device_link_remove(dev, wcd938x->txdev);
+err_remove_rxtx_link:
+ device_link_remove(wcd938x->rxdev, wcd938x->txdev);
+err_put_txdev:
+ put_device(wcd938x->txdev);
+err_put_rxdev:
+ put_device(wcd938x->rxdev);
+err_unbind:
+ component_unbind_all(dev, wcd938x);
+
+ return ret;
}
static void wcd938x_unbind(struct device *dev)
{
struct wcd938x_priv *wcd938x = dev_get_drvdata(dev);
+ snd_soc_unregister_component(dev);
device_link_remove(dev, wcd938x->txdev);
device_link_remove(dev, wcd938x->rxdev);
device_link_remove(wcd938x->rxdev, wcd938x->txdev);
- snd_soc_unregister_component(dev);
+ put_device(wcd938x->txdev);
+ put_device(wcd938x->rxdev);
component_unbind_all(dev, wcd938x);
}
@@ -3572,13 +3594,13 @@ static int wcd938x_probe(struct platform_device *pdev)
ret = wcd938x_add_slave_components(wcd938x, dev, &match);
if (ret)
- return ret;
+ goto err_disable_regulators;
wcd938x_reset(wcd938x);
ret = component_master_add_with_match(dev, &wcd938x_comp_ops, match);
if (ret)
- return ret;
+ goto err_disable_regulators;
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
@@ -3588,11 +3610,27 @@ static int wcd938x_probe(struct platform_device *pdev)
pm_runtime_idle(dev);
return 0;
+
+err_disable_regulators:
+ regulator_bulk_disable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
+ regulator_bulk_free(WCD938X_MAX_SUPPLY, wcd938x->supplies);
+
+ return ret;
}
static void wcd938x_remove(struct platform_device *pdev)
{
- component_master_del(&pdev->dev, &wcd938x_comp_ops);
+ struct device *dev = &pdev->dev;
+ struct wcd938x_priv *wcd938x = dev_get_drvdata(dev);
+
+ component_master_del(dev, &wcd938x_comp_ops);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+
+ regulator_bulk_disable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
+ regulator_bulk_free(WCD938X_MAX_SUPPLY, wcd938x->supplies);
}
#if defined(CONFIG_OF)
diff --git a/sound/soc/dwc/dwc-i2s.c b/sound/soc/dwc/dwc-i2s.c
index 22c004179214..9ea4be56d3b7 100644
--- a/sound/soc/dwc/dwc-i2s.c
+++ b/sound/soc/dwc/dwc-i2s.c
@@ -917,7 +917,7 @@ static int jh7110_i2stx0_clk_cfg(struct i2s_clk_config_data *config)
static int dw_i2s_probe(struct platform_device *pdev)
{
- const struct i2s_platform_data *pdata = of_device_get_match_data(&pdev->dev);
+ const struct i2s_platform_data *pdata = pdev->dev.platform_data;
struct dw_i2s_dev *dev;
struct resource *res;
int ret, irq;
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index b70034c07eee..b8a3cb8b7597 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -773,7 +773,7 @@ static int pxa_ssp_probe(struct snd_soc_dai *dai)
if (IS_ERR(priv->extclk)) {
ret = PTR_ERR(priv->extclk);
if (ret == -EPROBE_DEFER)
- return ret;
+ goto err_priv;
priv->extclk = NULL;
}
diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c
index ba7c0ae82e00..566033f7dd2e 100644
--- a/sound/soc/soc-component.c
+++ b/sound/soc/soc-component.c
@@ -242,6 +242,7 @@ int snd_soc_component_notify_control(struct snd_soc_component *component,
char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
struct snd_kcontrol *kctl;
+ /* When updating, change also snd_soc_dapm_widget_name_cmp() */
if (component->name_prefix)
snprintf(name, ARRAY_SIZE(name), "%s %s", component->name_prefix, ctl);
else
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index f07e83678373..312e55579831 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2728,6 +2728,18 @@ int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream,
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_update_dai);
+int snd_soc_dapm_widget_name_cmp(struct snd_soc_dapm_widget *widget, const char *s)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(widget->dapm);
+ const char *wname = widget->name;
+
+ if (component->name_prefix)
+ wname += strlen(component->name_prefix) + 1; /* plus space */
+
+ return strcmp(wname, s);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_widget_name_cmp);
+
/*
* dapm_update_widget_flags() - Re-compute widget sink and source flags
* @w: The widget for which to update the flags
diff --git a/sound/soc/ti/ams-delta.c b/sound/soc/ti/ams-delta.c
index 371943350fdf..666057d50ea0 100644
--- a/sound/soc/ti/ams-delta.c
+++ b/sound/soc/ti/ams-delta.c
@@ -336,8 +336,8 @@ static void cx81801_hangup(struct tty_struct *tty)
}
/* Line discipline .receive_buf() */
-static void cx81801_receive(struct tty_struct *tty, const u8 *cp,
- const char *fp, int count)
+static void cx81801_receive(struct tty_struct *tty, const u8 *cp, const u8 *fp,
+ size_t count)
{
struct snd_soc_component *component = tty->disc_data;
const unsigned char *c;
diff --git a/tools/arch/x86/include/uapi/asm/unistd_32.h b/tools/arch/x86/include/uapi/asm/unistd_32.h
index 4798f9d18fe8..9de35df1afc3 100644
--- a/tools/arch/x86/include/uapi/asm/unistd_32.h
+++ b/tools/arch/x86/include/uapi/asm/unistd_32.h
@@ -26,6 +26,6 @@
#ifndef __NR_setns
#define __NR_setns 346
#endif
-#ifdef __NR_seccomp
+#ifndef __NR_seccomp
#define __NR_seccomp 354
#endif
diff --git a/tools/build/feature/test-llvm.cpp b/tools/build/feature/test-llvm.cpp
new file mode 100644
index 000000000000..88a3d1bdd9f6
--- /dev/null
+++ b/tools/build/feature/test-llvm.cpp
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/raw_ostream.h"
+#define NUM_VERSION (((LLVM_VERSION_MAJOR) << 16) + (LLVM_VERSION_MINOR << 8) + LLVM_VERSION_PATCH)
+
+#if NUM_VERSION < 0x030900
+# error "LLVM version too low"
+#endif
+int main()
+{
+ llvm::errs() << "Hello World!\n";
+ llvm::llvm_shutdown();
+ return 0;
+}
diff --git a/tools/include/linux/rwsem.h b/tools/include/linux/rwsem.h
new file mode 100644
index 000000000000..83971b3cbfce
--- /dev/null
+++ b/tools/include/linux/rwsem.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _TOOLS__RWSEM_H
+#define _TOOLS__RWSEM_H
+
+#include <pthread.h>
+
+struct rw_semaphore {
+ pthread_rwlock_t lock;
+};
+
+static inline int init_rwsem(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_init(&sem->lock, NULL);
+}
+
+static inline int exit_rwsem(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_destroy(&sem->lock);
+}
+
+static inline int down_read(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_rdlock(&sem->lock);
+}
+
+static inline int up_read(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_unlock(&sem->lock);
+}
+
+static inline int down_write(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_wrlock(&sem->lock);
+}
+
+static inline int up_write(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_unlock(&sem->lock);
+}
+#endif /* _TOOLS_RWSEM_H */
diff --git a/tools/include/nolibc/arch-i386.h b/tools/include/nolibc/arch-i386.h
index 64415b9fac77..28c26a00a762 100644
--- a/tools/include/nolibc/arch-i386.h
+++ b/tools/include/nolibc/arch-i386.h
@@ -167,7 +167,9 @@ void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_
__asm__ volatile (
"xor %ebp, %ebp\n" /* zero the stack frame */
"mov %esp, %eax\n" /* save stack pointer to %eax, as arg1 of _start_c */
- "and $-16, %esp\n" /* last pushed argument must be 16-byte aligned */
+ "add $12, %esp\n" /* avoid over-estimating after the 'and' & 'sub' below */
+ "and $-16, %esp\n" /* the %esp must be 16-byte aligned on 'call' */
+ "sub $12, %esp\n" /* sub 12 to keep it aligned after the push %eax */
"push %eax\n" /* push arg1 on stack to support plain stack modes too */
"call _start_c\n" /* transfer to c runtime */
"hlt\n" /* ensure it does not return */
diff --git a/tools/include/nolibc/crt.h b/tools/include/nolibc/crt.h
index a5f33fef1672..a05655b4ce1d 100644
--- a/tools/include/nolibc/crt.h
+++ b/tools/include/nolibc/crt.h
@@ -13,6 +13,7 @@ const unsigned long *_auxv __attribute__((weak));
static void __stack_chk_init(void);
static void exit(int);
+__attribute__((weak))
void _start_c(long *sp)
{
long argc;
diff --git a/tools/net/ynl/Makefile.deps b/tools/net/ynl/Makefile.deps
index f842bc66b967..64d139400db1 100644
--- a/tools/net/ynl/Makefile.deps
+++ b/tools/net/ynl/Makefile.deps
@@ -18,3 +18,4 @@ CFLAGS_devlink:=$(call get_hdr_inc,_LINUX_DEVLINK_H_,devlink.h)
CFLAGS_ethtool:=$(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_H_,ethtool_netlink.h)
CFLAGS_handshake:=$(call get_hdr_inc,_LINUX_HANDSHAKE_H,handshake.h)
CFLAGS_netdev:=$(call get_hdr_inc,_LINUX_NETDEV_H,netdev.h)
+CFLAGS_nfsd:=$(call get_hdr_inc,_LINUX_NFSD_H,nfsd.h)
diff --git a/tools/net/ynl/generated/Makefile b/tools/net/ynl/generated/Makefile
index f8817d2e56e4..c1935b01902e 100644
--- a/tools/net/ynl/generated/Makefile
+++ b/tools/net/ynl/generated/Makefile
@@ -14,7 +14,7 @@ YNL_GEN_ARG_ethtool:=--user-header linux/ethtool_netlink.h \
TOOL:=../ynl-gen-c.py
-GENS:=ethtool devlink handshake fou netdev
+GENS:=ethtool devlink handshake fou netdev nfsd
SRCS=$(patsubst %,%-user.c,${GENS})
HDRS=$(patsubst %,%-user.h,${GENS})
OBJS=$(patsubst %,%-user.o,${GENS})
diff --git a/tools/net/ynl/generated/devlink-user.c b/tools/net/ynl/generated/devlink-user.c
index 3a8d8499fab6..2cb2518500cb 100644
--- a/tools/net/ynl/generated/devlink-user.c
+++ b/tools/net/ynl/generated/devlink-user.c
@@ -16,19 +16,19 @@
static const char * const devlink_op_strmap[] = {
[3] = "get",
[7] = "port-get",
- [DEVLINK_CMD_SB_GET] = "sb-get",
- [DEVLINK_CMD_SB_POOL_GET] = "sb-pool-get",
- [DEVLINK_CMD_SB_PORT_POOL_GET] = "sb-port-pool-get",
- [DEVLINK_CMD_SB_TC_POOL_BIND_GET] = "sb-tc-pool-bind-get",
+ [13] = "sb-get",
+ [17] = "sb-pool-get",
+ [21] = "sb-port-pool-get",
+ [25] = "sb-tc-pool-bind-get",
[DEVLINK_CMD_PARAM_GET] = "param-get",
[DEVLINK_CMD_REGION_GET] = "region-get",
[DEVLINK_CMD_INFO_GET] = "info-get",
[DEVLINK_CMD_HEALTH_REPORTER_GET] = "health-reporter-get",
- [DEVLINK_CMD_TRAP_GET] = "trap-get",
- [DEVLINK_CMD_TRAP_GROUP_GET] = "trap-group-get",
- [DEVLINK_CMD_TRAP_POLICER_GET] = "trap-policer-get",
- [DEVLINK_CMD_RATE_GET] = "rate-get",
- [DEVLINK_CMD_LINECARD_GET] = "linecard-get",
+ [63] = "trap-get",
+ [67] = "trap-group-get",
+ [71] = "trap-policer-get",
+ [76] = "rate-get",
+ [80] = "linecard-get",
[DEVLINK_CMD_SELFTESTS_GET] = "selftests-get",
};
@@ -838,7 +838,7 @@ devlink_sb_get(struct ynl_sock *ys, struct devlink_sb_get_req *req)
rsp = calloc(1, sizeof(*rsp));
yrs.yarg.data = rsp;
yrs.cb = devlink_sb_get_rsp_parse;
- yrs.rsp_cmd = DEVLINK_CMD_SB_GET;
+ yrs.rsp_cmd = 13;
err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
@@ -876,7 +876,7 @@ devlink_sb_get_dump(struct ynl_sock *ys, struct devlink_sb_get_req_dump *req)
yds.ys = ys;
yds.alloc_sz = sizeof(struct devlink_sb_get_list);
yds.cb = devlink_sb_get_rsp_parse;
- yds.rsp_cmd = DEVLINK_CMD_SB_GET;
+ yds.rsp_cmd = 13;
yds.rsp_policy = &devlink_nest;
nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_GET, 1);
@@ -987,7 +987,7 @@ devlink_sb_pool_get(struct ynl_sock *ys, struct devlink_sb_pool_get_req *req)
rsp = calloc(1, sizeof(*rsp));
yrs.yarg.data = rsp;
yrs.cb = devlink_sb_pool_get_rsp_parse;
- yrs.rsp_cmd = DEVLINK_CMD_SB_POOL_GET;
+ yrs.rsp_cmd = 17;
err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
@@ -1026,7 +1026,7 @@ devlink_sb_pool_get_dump(struct ynl_sock *ys,
yds.ys = ys;
yds.alloc_sz = sizeof(struct devlink_sb_pool_get_list);
yds.cb = devlink_sb_pool_get_rsp_parse;
- yds.rsp_cmd = DEVLINK_CMD_SB_POOL_GET;
+ yds.rsp_cmd = 17;
yds.rsp_policy = &devlink_nest;
nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_POOL_GET, 1);
@@ -1147,7 +1147,7 @@ devlink_sb_port_pool_get(struct ynl_sock *ys,
rsp = calloc(1, sizeof(*rsp));
yrs.yarg.data = rsp;
yrs.cb = devlink_sb_port_pool_get_rsp_parse;
- yrs.rsp_cmd = DEVLINK_CMD_SB_PORT_POOL_GET;
+ yrs.rsp_cmd = 21;
err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
@@ -1187,7 +1187,7 @@ devlink_sb_port_pool_get_dump(struct ynl_sock *ys,
yds.ys = ys;
yds.alloc_sz = sizeof(struct devlink_sb_port_pool_get_list);
yds.cb = devlink_sb_port_pool_get_rsp_parse;
- yds.rsp_cmd = DEVLINK_CMD_SB_PORT_POOL_GET;
+ yds.rsp_cmd = 21;
yds.rsp_policy = &devlink_nest;
nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_PORT_POOL_GET, 1);
@@ -1316,7 +1316,7 @@ devlink_sb_tc_pool_bind_get(struct ynl_sock *ys,
rsp = calloc(1, sizeof(*rsp));
yrs.yarg.data = rsp;
yrs.cb = devlink_sb_tc_pool_bind_get_rsp_parse;
- yrs.rsp_cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET;
+ yrs.rsp_cmd = 25;
err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
@@ -1356,7 +1356,7 @@ devlink_sb_tc_pool_bind_get_dump(struct ynl_sock *ys,
yds.ys = ys;
yds.alloc_sz = sizeof(struct devlink_sb_tc_pool_bind_get_list);
yds.cb = devlink_sb_tc_pool_bind_get_rsp_parse;
- yds.rsp_cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET;
+ yds.rsp_cmd = 25;
yds.rsp_policy = &devlink_nest;
nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_TC_POOL_BIND_GET, 1);
@@ -2183,7 +2183,7 @@ devlink_trap_get(struct ynl_sock *ys, struct devlink_trap_get_req *req)
rsp = calloc(1, sizeof(*rsp));
yrs.yarg.data = rsp;
yrs.cb = devlink_trap_get_rsp_parse;
- yrs.rsp_cmd = DEVLINK_CMD_TRAP_GET;
+ yrs.rsp_cmd = 63;
err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
@@ -2223,7 +2223,7 @@ devlink_trap_get_dump(struct ynl_sock *ys,
yds.ys = ys;
yds.alloc_sz = sizeof(struct devlink_trap_get_list);
yds.cb = devlink_trap_get_rsp_parse;
- yds.rsp_cmd = DEVLINK_CMD_TRAP_GET;
+ yds.rsp_cmd = 63;
yds.rsp_policy = &devlink_nest;
nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_TRAP_GET, 1);
@@ -2336,7 +2336,7 @@ devlink_trap_group_get(struct ynl_sock *ys,
rsp = calloc(1, sizeof(*rsp));
yrs.yarg.data = rsp;
yrs.cb = devlink_trap_group_get_rsp_parse;
- yrs.rsp_cmd = DEVLINK_CMD_TRAP_GROUP_GET;
+ yrs.rsp_cmd = 67;
err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
@@ -2376,7 +2376,7 @@ devlink_trap_group_get_dump(struct ynl_sock *ys,
yds.ys = ys;
yds.alloc_sz = sizeof(struct devlink_trap_group_get_list);
yds.cb = devlink_trap_group_get_rsp_parse;
- yds.rsp_cmd = DEVLINK_CMD_TRAP_GROUP_GET;
+ yds.rsp_cmd = 67;
yds.rsp_policy = &devlink_nest;
nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_TRAP_GROUP_GET, 1);
@@ -2483,7 +2483,7 @@ devlink_trap_policer_get(struct ynl_sock *ys,
rsp = calloc(1, sizeof(*rsp));
yrs.yarg.data = rsp;
yrs.cb = devlink_trap_policer_get_rsp_parse;
- yrs.rsp_cmd = DEVLINK_CMD_TRAP_POLICER_GET;
+ yrs.rsp_cmd = 71;
err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
@@ -2523,7 +2523,7 @@ devlink_trap_policer_get_dump(struct ynl_sock *ys,
yds.ys = ys;
yds.alloc_sz = sizeof(struct devlink_trap_policer_get_list);
yds.cb = devlink_trap_policer_get_rsp_parse;
- yds.rsp_cmd = DEVLINK_CMD_TRAP_POLICER_GET;
+ yds.rsp_cmd = 71;
yds.rsp_policy = &devlink_nest;
nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_TRAP_POLICER_GET, 1);
@@ -2642,7 +2642,7 @@ devlink_rate_get(struct ynl_sock *ys, struct devlink_rate_get_req *req)
rsp = calloc(1, sizeof(*rsp));
yrs.yarg.data = rsp;
yrs.cb = devlink_rate_get_rsp_parse;
- yrs.rsp_cmd = DEVLINK_CMD_RATE_GET;
+ yrs.rsp_cmd = 76;
err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
@@ -2682,7 +2682,7 @@ devlink_rate_get_dump(struct ynl_sock *ys,
yds.ys = ys;
yds.alloc_sz = sizeof(struct devlink_rate_get_list);
yds.cb = devlink_rate_get_rsp_parse;
- yds.rsp_cmd = DEVLINK_CMD_RATE_GET;
+ yds.rsp_cmd = 76;
yds.rsp_policy = &devlink_nest;
nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_RATE_GET, 1);
@@ -2786,7 +2786,7 @@ devlink_linecard_get(struct ynl_sock *ys, struct devlink_linecard_get_req *req)
rsp = calloc(1, sizeof(*rsp));
yrs.yarg.data = rsp;
yrs.cb = devlink_linecard_get_rsp_parse;
- yrs.rsp_cmd = DEVLINK_CMD_LINECARD_GET;
+ yrs.rsp_cmd = 80;
err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
@@ -2825,7 +2825,7 @@ devlink_linecard_get_dump(struct ynl_sock *ys,
yds.ys = ys;
yds.alloc_sz = sizeof(struct devlink_linecard_get_list);
yds.cb = devlink_linecard_get_rsp_parse;
- yds.rsp_cmd = DEVLINK_CMD_LINECARD_GET;
+ yds.rsp_cmd = 80;
yds.rsp_policy = &devlink_nest;
nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_LINECARD_GET, 1);
diff --git a/tools/net/ynl/generated/nfsd-user.c b/tools/net/ynl/generated/nfsd-user.c
new file mode 100644
index 000000000000..fec6828680ce
--- /dev/null
+++ b/tools/net/ynl/generated/nfsd-user.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/nfsd.yaml */
+/* YNL-GEN user source */
+
+#include <stdlib.h>
+#include <string.h>
+#include "nfsd-user.h"
+#include "ynl.h"
+#include <linux/nfsd_netlink.h>
+
+#include <libmnl/libmnl.h>
+#include <linux/genetlink.h>
+
+/* Enums */
+static const char * const nfsd_op_strmap[] = {
+ [NFSD_CMD_RPC_STATUS_GET] = "rpc-status-get",
+};
+
+const char *nfsd_op_str(int op)
+{
+ if (op < 0 || op >= (int)MNL_ARRAY_SIZE(nfsd_op_strmap))
+ return NULL;
+ return nfsd_op_strmap[op];
+}
+
+/* Policies */
+struct ynl_policy_attr nfsd_rpc_status_policy[NFSD_A_RPC_STATUS_MAX + 1] = {
+ [NFSD_A_RPC_STATUS_XID] = { .name = "xid", .type = YNL_PT_U32, },
+ [NFSD_A_RPC_STATUS_FLAGS] = { .name = "flags", .type = YNL_PT_U32, },
+ [NFSD_A_RPC_STATUS_PROG] = { .name = "prog", .type = YNL_PT_U32, },
+ [NFSD_A_RPC_STATUS_VERSION] = { .name = "version", .type = YNL_PT_U8, },
+ [NFSD_A_RPC_STATUS_PROC] = { .name = "proc", .type = YNL_PT_U32, },
+ [NFSD_A_RPC_STATUS_SERVICE_TIME] = { .name = "service_time", .type = YNL_PT_U64, },
+ [NFSD_A_RPC_STATUS_PAD] = { .name = "pad", .type = YNL_PT_IGNORE, },
+ [NFSD_A_RPC_STATUS_SADDR4] = { .name = "saddr4", .type = YNL_PT_U32, },
+ [NFSD_A_RPC_STATUS_DADDR4] = { .name = "daddr4", .type = YNL_PT_U32, },
+ [NFSD_A_RPC_STATUS_SADDR6] = { .name = "saddr6", .type = YNL_PT_BINARY,},
+ [NFSD_A_RPC_STATUS_DADDR6] = { .name = "daddr6", .type = YNL_PT_BINARY,},
+ [NFSD_A_RPC_STATUS_SPORT] = { .name = "sport", .type = YNL_PT_U16, },
+ [NFSD_A_RPC_STATUS_DPORT] = { .name = "dport", .type = YNL_PT_U16, },
+ [NFSD_A_RPC_STATUS_COMPOUND_OPS] = { .name = "compound-ops", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest nfsd_rpc_status_nest = {
+ .max_attr = NFSD_A_RPC_STATUS_MAX,
+ .table = nfsd_rpc_status_policy,
+};
+
+/* Common nested types */
+/* ============== NFSD_CMD_RPC_STATUS_GET ============== */
+/* NFSD_CMD_RPC_STATUS_GET - dump */
+void nfsd_rpc_status_get_list_free(struct nfsd_rpc_status_get_list *rsp)
+{
+ struct nfsd_rpc_status_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.saddr6);
+ free(rsp->obj.daddr6);
+ free(rsp->obj.compound_ops);
+ free(rsp);
+ }
+}
+
+struct nfsd_rpc_status_get_list *nfsd_rpc_status_get_dump(struct ynl_sock *ys)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct nfsd_rpc_status_get_list);
+ yds.cb = nfsd_rpc_status_get_rsp_parse;
+ yds.rsp_cmd = NFSD_CMD_RPC_STATUS_GET;
+ yds.rsp_policy = &nfsd_rpc_status_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, NFSD_CMD_RPC_STATUS_GET, 1);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ nfsd_rpc_status_get_list_free(yds.first);
+ return NULL;
+}
+
+const struct ynl_family ynl_nfsd_family = {
+ .name = "nfsd",
+};
diff --git a/tools/net/ynl/generated/nfsd-user.h b/tools/net/ynl/generated/nfsd-user.h
new file mode 100644
index 000000000000..b6b69501031a
--- /dev/null
+++ b/tools/net/ynl/generated/nfsd-user.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/nfsd.yaml */
+/* YNL-GEN user header */
+
+#ifndef _LINUX_NFSD_GEN_H
+#define _LINUX_NFSD_GEN_H
+
+#include <stdlib.h>
+#include <string.h>
+#include <linux/types.h>
+#include <linux/nfsd_netlink.h>
+
+struct ynl_sock;
+
+extern const struct ynl_family ynl_nfsd_family;
+
+/* Enums */
+const char *nfsd_op_str(int op);
+
+/* Common nested types */
+/* ============== NFSD_CMD_RPC_STATUS_GET ============== */
+/* NFSD_CMD_RPC_STATUS_GET - dump */
+struct nfsd_rpc_status_get_list {
+ struct nfsd_rpc_status_get_list *next;
+ struct nfsd_rpc_status_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void nfsd_rpc_status_get_list_free(struct nfsd_rpc_status_get_list *rsp);
+
+struct nfsd_rpc_status_get_list *nfsd_rpc_status_get_dump(struct ynl_sock *ys);
+
+#endif /* _LINUX_NFSD_GEN_H */
diff --git a/tools/perf/dlfilters/dlfilter-test-api-v0.c b/tools/perf/dlfilters/dlfilter-test-api-v0.c
index 72f263d49121..4083b1abeaab 100644
--- a/tools/perf/dlfilters/dlfilter-test-api-v0.c
+++ b/tools/perf/dlfilters/dlfilter-test-api-v0.c
@@ -289,6 +289,15 @@ static int check_attr(void *ctx)
return 0;
}
+static int check_object_code(void *ctx, const struct perf_dlfilter_sample *sample)
+{
+ __u8 buf[15];
+
+ CHECK(perf_dlfilter_fns.object_code(ctx, sample->ip, buf, sizeof(buf)) > 0);
+
+ return 0;
+}
+
static int do_checks(void *data, const struct perf_dlfilter_sample *sample, void *ctx, bool early)
{
struct filter_data *d = data;
@@ -314,7 +323,8 @@ static int do_checks(void *data, const struct perf_dlfilter_sample *sample, void
if (early && !d->do_early)
return 0;
- if (check_al(ctx) || check_addr_al(ctx) || check_address_al(ctx, sample))
+ if (check_al(ctx) || check_addr_al(ctx) || check_address_al(ctx, sample) ||
+ check_object_code(ctx, sample))
return -1;
if (early)
diff --git a/tools/perf/dlfilters/dlfilter-test-api-v2.c b/tools/perf/dlfilters/dlfilter-test-api-v2.c
index 38e593d92920..32ff619e881c 100644
--- a/tools/perf/dlfilters/dlfilter-test-api-v2.c
+++ b/tools/perf/dlfilters/dlfilter-test-api-v2.c
@@ -308,6 +308,15 @@ static int check_attr(void *ctx)
return 0;
}
+static int check_object_code(void *ctx, const struct perf_dlfilter_sample *sample)
+{
+ __u8 buf[15];
+
+ CHECK(perf_dlfilter_fns.object_code(ctx, sample->ip, buf, sizeof(buf)) > 0);
+
+ return 0;
+}
+
static int do_checks(void *data, const struct perf_dlfilter_sample *sample, void *ctx, bool early)
{
struct filter_data *d = data;
@@ -333,7 +342,8 @@ static int do_checks(void *data, const struct perf_dlfilter_sample *sample, void
if (early && !d->do_early)
return 0;
- if (check_al(ctx) || check_addr_al(ctx) || check_address_al(ctx, sample))
+ if (check_al(ctx) || check_addr_al(ctx) || check_address_al(ctx, sample) ||
+ check_object_code(ctx, sample))
return -1;
if (early)
diff --git a/tools/perf/util/dlfilter.c b/tools/perf/util/dlfilter.c
index 1dbf27822ee2..4a1dc21b0450 100644
--- a/tools/perf/util/dlfilter.c
+++ b/tools/perf/util/dlfilter.c
@@ -282,13 +282,21 @@ static struct perf_event_attr *dlfilter__attr(void *ctx)
return &d->evsel->core.attr;
}
+static __s32 code_read(__u64 ip, struct map *map, struct machine *machine, void *buf, __u32 len)
+{
+ u64 offset = map__map_ip(map, ip);
+
+ if (ip + len >= map__end(map))
+ len = map__end(map) - ip;
+
+ return dso__data_read_offset(map__dso(map), machine, offset, buf, len);
+}
+
static __s32 dlfilter__object_code(void *ctx, __u64 ip, void *buf, __u32 len)
{
struct dlfilter *d = (struct dlfilter *)ctx;
struct addr_location *al;
struct addr_location a;
- struct map *map;
- u64 offset;
__s32 ret;
if (!d->ctx_valid)
@@ -298,27 +306,17 @@ static __s32 dlfilter__object_code(void *ctx, __u64 ip, void *buf, __u32 len)
if (!al)
return -1;
- map = al->map;
-
- if (map && ip >= map__start(map) && ip < map__end(map) &&
+ if (al->map && ip >= map__start(al->map) && ip < map__end(al->map) &&
machine__kernel_ip(d->machine, ip) == machine__kernel_ip(d->machine, d->sample->ip))
- goto have_map;
+ return code_read(ip, al->map, d->machine, buf, len);
addr_location__init(&a);
+
thread__find_map_fb(al->thread, d->sample->cpumode, ip, &a);
- if (!a.map) {
- ret = -1;
- goto out;
- }
+ ret = a.map ? code_read(ip, a.map, d->machine, buf, len) : -1;
- map = a.map;
-have_map:
- offset = map__map_ip(map, ip);
- if (ip + len >= map__end(map))
- len = map__end(map) - ip;
- ret = dso__data_read_offset(map__dso(map), d->machine, offset, buf, len);
-out:
addr_location__exit(&a);
+
return ret;
}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 8de6f39abd1b..d515ba8a0e16 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -295,7 +295,7 @@ static int perf_pmu__parse_scale(struct perf_pmu *pmu, struct perf_pmu_alias *al
len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
if (!len)
return 0;
- scnprintf(path + len, sizeof(path) - len, "%s/%s.scale", pmu->name, alias->name);
+ scnprintf(path + len, sizeof(path) - len, "%s/events/%s.scale", pmu->name, alias->name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -330,7 +330,7 @@ static int perf_pmu__parse_unit(struct perf_pmu *pmu, struct perf_pmu_alias *ali
len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
if (!len)
return 0;
- scnprintf(path + len, sizeof(path) - len, "%s/%s.unit", pmu->name, alias->name);
+ scnprintf(path + len, sizeof(path) - len, "%s/events/%s.unit", pmu->name, alias->name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -364,7 +364,7 @@ perf_pmu__parse_per_pkg(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
if (!len)
return 0;
- scnprintf(path + len, sizeof(path) - len, "%s/%s.per-pkg", pmu->name, alias->name);
+ scnprintf(path + len, sizeof(path) - len, "%s/events/%s.per-pkg", pmu->name, alias->name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -385,7 +385,7 @@ static int perf_pmu__parse_snapshot(struct perf_pmu *pmu, struct perf_pmu_alias
len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
if (!len)
return 0;
- scnprintf(path + len, sizeof(path) - len, "%s/%s.snapshot", pmu->name, alias->name);
+ scnprintf(path + len, sizeof(path) - len, "%s/events/%s.snapshot", pmu->name, alias->name);
fd = open(path, O_RDONLY);
if (fd == -1)
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_helpers.h b/tools/testing/selftests/bpf/prog_tests/tc_helpers.h
index 6c93215be8a3..67f985f7d215 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_helpers.h
+++ b/tools/testing/selftests/bpf/prog_tests/tc_helpers.h
@@ -45,7 +45,7 @@ static inline __u32 ifindex_from_link_fd(int fd)
return link_info.tcx.ifindex;
}
-static inline void __assert_mprog_count(int target, int expected, bool miniq, int ifindex)
+static inline void __assert_mprog_count(int target, int expected, int ifindex)
{
__u32 count = 0, attach_flags = 0;
int err;
@@ -53,20 +53,22 @@ static inline void __assert_mprog_count(int target, int expected, bool miniq, in
err = bpf_prog_query(ifindex, target, 0, &attach_flags,
NULL, &count);
ASSERT_EQ(count, expected, "count");
- if (!expected && !miniq)
- ASSERT_EQ(err, -ENOENT, "prog_query");
- else
- ASSERT_EQ(err, 0, "prog_query");
+ ASSERT_EQ(err, 0, "prog_query");
}
static inline void assert_mprog_count(int target, int expected)
{
- __assert_mprog_count(target, expected, false, loopback);
+ __assert_mprog_count(target, expected, loopback);
}
static inline void assert_mprog_count_ifindex(int ifindex, int target, int expected)
{
- __assert_mprog_count(target, expected, false, ifindex);
+ __assert_mprog_count(target, expected, ifindex);
+}
+
+static inline void tc_skel_reset_all_seen(struct test_tc_link *skel)
+{
+ memset(skel->bss, 0, sizeof(*skel->bss));
}
#endif /* TC_HELPERS */
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_links.c b/tools/testing/selftests/bpf/prog_tests/tc_links.c
index 74fc1fe9ee26..bc9841144685 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_links.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_links.c
@@ -65,6 +65,7 @@ void serial_test_tc_links_basic(void)
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -97,6 +98,7 @@ void serial_test_tc_links_basic(void)
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -187,6 +189,7 @@ static void test_tc_links_before_target(int target)
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -194,9 +197,6 @@ static void test_tc_links_before_target(int target)
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
-
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE,
.relative_fd = bpf_program__fd(skel->progs.tc2),
@@ -246,6 +246,7 @@ static void test_tc_links_before_target(int target)
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -342,6 +343,7 @@ static void test_tc_links_after_target(int target)
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -349,9 +351,6 @@ static void test_tc_links_after_target(int target)
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
-
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_AFTER,
.relative_fd = bpf_program__fd(skel->progs.tc1),
@@ -401,6 +400,7 @@ static void test_tc_links_after_target(int target)
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -502,6 +502,7 @@ static void test_tc_links_revision_target(int target)
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "prog_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -581,22 +582,20 @@ static void test_tc_chain_classic(int target, bool chain_tc_old)
assert_mprog_count(target, 2);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
- skel->bss->seen_tc3 = false;
-
err = bpf_link__detach(skel->links.tc2);
if (!ASSERT_OK(err, "prog_detach"))
goto cleanup;
assert_mprog_count(target, 1);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -707,16 +706,13 @@ static void test_tc_links_replace_target(int target)
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
- skel->bss->seen_tc3 = false;
-
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_REPLACE,
.relative_fd = bpf_program__fd(skel->progs.tc2),
@@ -781,16 +777,13 @@ static void test_tc_links_replace_target(int target)
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
- skel->bss->seen_tc3 = false;
-
err = bpf_link__detach(skel->links.tc2);
if (!ASSERT_OK(err, "link_detach"))
goto cleanup;
@@ -812,16 +805,13 @@ static void test_tc_links_replace_target(int target)
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
- skel->bss->seen_tc3 = false;
-
err = bpf_link__update_program(skel->links.tc1, skel->progs.tc1);
if (!ASSERT_OK(err, "link_update_self"))
goto cleanup;
@@ -843,6 +833,7 @@ static void test_tc_links_replace_target(int target)
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1254,6 +1245,7 @@ static void test_tc_links_prepend_target(int target)
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1261,9 +1253,6 @@ static void test_tc_links_prepend_target(int target)
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
-
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_BEFORE,
);
@@ -1311,6 +1300,7 @@ static void test_tc_links_prepend_target(int target)
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1411,6 +1401,7 @@ static void test_tc_links_append_target(int target)
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1418,9 +1409,6 @@ static void test_tc_links_append_target(int target)
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
-
LIBBPF_OPTS_RESET(optl,
.flags = BPF_F_AFTER,
);
@@ -1468,6 +1456,7 @@ static void test_tc_links_append_target(int target)
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1637,38 +1626,33 @@ static void test_tc_chain_mixed(int target)
assert_mprog_count(target, 1);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5");
ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6");
- skel->bss->seen_tc4 = false;
- skel->bss->seen_tc5 = false;
- skel->bss->seen_tc6 = false;
-
err = bpf_link__update_program(skel->links.tc6, skel->progs.tc4);
if (!ASSERT_OK(err, "link_update"))
goto cleanup;
assert_mprog_count(target, 1);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
- skel->bss->seen_tc4 = false;
- skel->bss->seen_tc5 = false;
- skel->bss->seen_tc6 = false;
-
err = bpf_link__detach(skel->links.tc6);
if (!ASSERT_OK(err, "prog_detach"))
goto cleanup;
- __assert_mprog_count(target, 0, true, loopback);
+ assert_mprog_count(target, 0);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
@@ -1758,22 +1742,20 @@ static void test_tc_links_ingress(int target, bool chain_tc_old,
assert_mprog_count(target, 2);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
- skel->bss->seen_tc3 = false;
-
err = bpf_link__detach(skel->links.tc2);
if (!ASSERT_OK(err, "prog_detach"))
goto cleanup;
assert_mprog_count(target, 1);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_opts.c b/tools/testing/selftests/bpf/prog_tests/tc_opts.c
index 99af79ea21a9..ca506d2fcf58 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_opts.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_opts.c
@@ -59,6 +59,7 @@ void serial_test_tc_opts_basic(void)
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -83,6 +84,7 @@ void serial_test_tc_opts_basic(void)
ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -163,6 +165,7 @@ static void test_tc_opts_before_target(int target)
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -219,6 +222,7 @@ static void test_tc_opts_before_target(int target)
ASSERT_EQ(optq.prog_ids[3], id2, "prog_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -313,6 +317,7 @@ static void test_tc_opts_after_target(int target)
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -369,6 +374,7 @@ static void test_tc_opts_after_target(int target)
ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -514,6 +520,7 @@ static void test_tc_opts_revision_target(int target)
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -608,22 +615,20 @@ static void test_tc_chain_classic(int target, bool chain_tc_old)
assert_mprog_count(target, 2);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
- skel->bss->seen_tc3 = false;
-
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
if (!ASSERT_OK(err, "prog_detach"))
goto cleanup_detach;
assert_mprog_count(target, 1);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -635,7 +640,7 @@ cleanup_detach:
if (!ASSERT_OK(err, "prog_detach"))
goto cleanup;
- __assert_mprog_count(target, 0, chain_tc_old, loopback);
+ assert_mprog_count(target, 0);
cleanup:
if (tc_attached) {
tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
@@ -730,16 +735,13 @@ static void test_tc_opts_replace_target(int target)
ASSERT_EQ(optq.prog_attach_flags[1], 0, "prog_flags[1]");
ASSERT_EQ(optq.prog_attach_flags[2], 0, "prog_flags[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
- skel->bss->seen_tc3 = false;
-
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE,
.replace_prog_fd = fd2,
@@ -767,16 +769,13 @@ static void test_tc_opts_replace_target(int target)
ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
- skel->bss->seen_tc1 = false;
- skel->bss->seen_tc2 = false;
- skel->bss->seen_tc3 = false;
-
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE | BPF_F_BEFORE,
.replace_prog_fd = fd3,
@@ -805,6 +804,7 @@ static void test_tc_opts_replace_target(int target)
ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1084,6 +1084,7 @@ static void test_tc_opts_prepend_target(int target)
ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1124,6 +1125,7 @@ static void test_tc_opts_prepend_target(int target)
ASSERT_EQ(optq.prog_ids[3], id1, "prog_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1222,6 +1224,7 @@ static void test_tc_opts_append_target(int target)
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1262,6 +1265,7 @@ static void test_tc_opts_append_target(int target)
ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -2250,7 +2254,7 @@ static void test_tc_opts_delete_empty(int target, bool chain_tc_old)
BPF_TC_INGRESS : BPF_TC_EGRESS;
err = bpf_tc_hook_create(&tc_hook);
ASSERT_OK(err, "bpf_tc_hook_create");
- __assert_mprog_count(target, 0, true, loopback);
+ assert_mprog_count(target, 0);
}
err = bpf_prog_detach_opts(0, loopback, target, &optd);
ASSERT_EQ(err, -ENOENT, "prog_detach");
@@ -2316,16 +2320,13 @@ static void test_tc_chain_mixed(int target)
assert_mprog_count(target, 1);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5");
ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6");
- skel->bss->seen_tc4 = false;
- skel->bss->seen_tc5 = false;
- skel->bss->seen_tc6 = false;
-
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE,
.replace_prog_fd = fd3,
@@ -2339,21 +2340,19 @@ static void test_tc_chain_mixed(int target)
assert_mprog_count(target, 1);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
- skel->bss->seen_tc4 = false;
- skel->bss->seen_tc5 = false;
- skel->bss->seen_tc6 = false;
-
cleanup_opts:
err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
- __assert_mprog_count(target, 0, true, loopback);
+ assert_mprog_count(target, 0);
+ tc_skel_reset_all_seen(skel);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
@@ -2462,3 +2461,229 @@ void serial_test_tc_opts_max(void)
test_tc_opts_max_target(BPF_TCX_INGRESS, BPF_F_AFTER, true);
test_tc_opts_max_target(BPF_TCX_EGRESS, BPF_F_AFTER, false);
}
+
+static void test_tc_opts_query_target(int target)
+{
+ const size_t attr_size = offsetofend(union bpf_attr, query);
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
+ struct test_tc_link *skel;
+ union bpf_attr attr;
+ __u32 prog_ids[5];
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ fd2 = bpf_program__fd(skel->progs.tc2);
+ fd3 = bpf_program__fd(skel->progs.tc3);
+ fd4 = bpf_program__fd(skel->progs.tc4);
+
+ id1 = id_from_prog_fd(fd1);
+ id2 = id_from_prog_fd(fd2);
+ id3 = id_from_prog_fd(fd3);
+ id4 = id_from_prog_fd(fd4);
+
+ assert_mprog_count(target, 0);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 1,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ assert_mprog_count(target, 1);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 2,
+ );
+
+ err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup1;
+
+ assert_mprog_count(target, 2);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 3,
+ );
+
+ err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup2;
+
+ assert_mprog_count(target, 3);
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = 4,
+ );
+
+ err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup3;
+
+ assert_mprog_count(target, 4);
+
+ /* Test 1: Double query via libbpf API */
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids, NULL, "prog_ids");
+ ASSERT_EQ(optq.link_ids, NULL, "link_ids");
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.prog_ids = prog_ids;
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(optq.count, 4, "count");
+ ASSERT_EQ(optq.revision, 5, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
+ ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
+ ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(optq.link_ids, NULL, "link_ids");
+
+ /* Test 2: Double query via bpf_attr & bpf(2) directly */
+ memset(&attr, 0, attr_size);
+ attr.query.target_ifindex = loopback;
+ attr.query.attach_type = target;
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(attr.query.count, 4, "count");
+ ASSERT_EQ(attr.query.revision, 5, "revision");
+ ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+ ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+ ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+ ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+ ASSERT_EQ(attr.query.prog_ids, 0, "prog_ids");
+ ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+ ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+ ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ attr.query.prog_ids = ptr_to_u64(prog_ids);
+
+ err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup4;
+
+ ASSERT_EQ(attr.query.count, 4, "count");
+ ASSERT_EQ(attr.query.revision, 5, "revision");
+ ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+ ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+ ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+ ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+ ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
+ ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]");
+ ASSERT_EQ(prog_ids[2], id3, "prog_ids[2]");
+ ASSERT_EQ(prog_ids[3], id4, "prog_ids[3]");
+ ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
+ ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+ ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+ ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+cleanup4:
+ err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 3);
+
+cleanup3:
+ err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 2);
+
+cleanup2:
+ err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 1);
+
+cleanup1:
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 0);
+
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+void serial_test_tc_opts_query(void)
+{
+ test_tc_opts_query_target(BPF_TCX_INGRESS);
+ test_tc_opts_query_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_opts_query_attach_target(int target)
+{
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ struct test_tc_link *skel;
+ __u32 prog_ids[2];
+ __u32 fd1, id1;
+ int err;
+
+ skel = test_tc_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto cleanup;
+
+ fd1 = bpf_program__fd(skel->progs.tc1);
+ id1 = id_from_prog_fd(fd1);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup;
+
+ ASSERT_EQ(optq.count, 0, "count");
+ ASSERT_EQ(optq.revision, 1, "revision");
+
+ LIBBPF_OPTS_RESET(opta,
+ .expected_revision = optq.revision,
+ );
+
+ err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+
+ memset(prog_ids, 0, sizeof(prog_ids));
+ optq.prog_ids = prog_ids;
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ err = bpf_prog_query_opts(loopback, target, &optq);
+ if (!ASSERT_OK(err, "prog_query"))
+ goto cleanup1;
+
+ ASSERT_EQ(optq.count, 1, "count");
+ ASSERT_EQ(optq.revision, 2, "revision");
+ ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+ ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+
+cleanup1:
+ err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+ ASSERT_OK(err, "prog_detach");
+ assert_mprog_count(target, 0);
+cleanup:
+ test_tc_link__destroy(skel);
+}
+
+void serial_test_tc_opts_query_attach(void)
+{
+ test_tc_opts_query_attach_target(BPF_TCX_INGRESS);
+ test_tc_opts_query_attach_target(BPF_TCX_EGRESS);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/timer.c b/tools/testing/selftests/bpf/prog_tests/timer.c
index 290c21dbe65a..ce2c61d62fc6 100644
--- a/tools/testing/selftests/bpf/prog_tests/timer.c
+++ b/tools/testing/selftests/bpf/prog_tests/timer.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include "timer.skel.h"
+#include "timer_failure.skel.h"
static int timer(struct timer *timer_skel)
{
@@ -49,10 +50,11 @@ void serial_test_timer(void)
timer_skel = timer__open_and_load();
if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load"))
- goto cleanup;
+ return;
err = timer(timer_skel);
ASSERT_OK(err, "timer");
-cleanup:
timer__destroy(timer_skel);
+
+ RUN_TESTS(timer_failure);
}
diff --git a/tools/testing/selftests/bpf/progs/timer_failure.c b/tools/testing/selftests/bpf/progs/timer_failure.c
new file mode 100644
index 000000000000..226d33b5a05c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/timer_failure.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <time.h>
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_tcp_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct elem {
+ struct bpf_timer t;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct elem);
+} timer_map SEC(".maps");
+
+static int timer_cb_ret1(void *map, int *key, struct bpf_timer *timer)
+{
+ if (bpf_get_smp_processor_id() % 2)
+ return 1;
+ else
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+__failure __msg("should have been in (0x0; 0x0)")
+int BPF_PROG2(test_ret_1, int, a)
+{
+ int key = 0;
+ struct bpf_timer *timer;
+
+ timer = bpf_map_lookup_elem(&timer_map, &key);
+ if (timer) {
+ bpf_timer_init(timer, &timer_map, CLOCK_BOOTTIME);
+ bpf_timer_set_callback(timer, timer_cb_ret1);
+ bpf_timer_start(timer, 1000, 0);
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc
new file mode 100644
index 000000000000..bc9514428dba
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc
@@ -0,0 +1,13 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test failure of registering kprobe on non unique symbol
+# requires: kprobe_events
+
+SYMBOL='name_show'
+
+# We skip this test on kernel where SYMBOL is unique or does not exist.
+if [ "$(grep -c -E "[[:alnum:]]+ t ${SYMBOL}" /proc/kallsyms)" -le '1' ]; then
+ exit_unsupported
+fi
+
+! echo "p:test_non_unique ${SYMBOL}" > kprobe_events
diff --git a/tools/testing/selftests/kvm/include/ucall_common.h b/tools/testing/selftests/kvm/include/ucall_common.h
index 112bc1da732a..ce33d306c2cb 100644
--- a/tools/testing/selftests/kvm/include/ucall_common.h
+++ b/tools/testing/selftests/kvm/include/ucall_common.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * tools/testing/selftests/kvm/include/kvm_util.h
- *
* Copyright (C) 2018, Google LLC.
*/
#ifndef SELFTEST_KVM_UCALL_COMMON_H
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 4fd042112526..25bc61dac5fb 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -68,6 +68,12 @@ struct xstate {
#define XFEATURE_MASK_OPMASK BIT_ULL(5)
#define XFEATURE_MASK_ZMM_Hi256 BIT_ULL(6)
#define XFEATURE_MASK_Hi16_ZMM BIT_ULL(7)
+#define XFEATURE_MASK_PT BIT_ULL(8)
+#define XFEATURE_MASK_PKRU BIT_ULL(9)
+#define XFEATURE_MASK_PASID BIT_ULL(10)
+#define XFEATURE_MASK_CET_USER BIT_ULL(11)
+#define XFEATURE_MASK_CET_KERNEL BIT_ULL(12)
+#define XFEATURE_MASK_LBR BIT_ULL(15)
#define XFEATURE_MASK_XTILE_CFG BIT_ULL(17)
#define XFEATURE_MASK_XTILE_DATA BIT_ULL(18)
@@ -147,6 +153,7 @@ struct kvm_x86_cpu_feature {
#define X86_FEATURE_CLWB KVM_X86_CPU_FEATURE(0x7, 0, EBX, 24)
#define X86_FEATURE_UMIP KVM_X86_CPU_FEATURE(0x7, 0, ECX, 2)
#define X86_FEATURE_PKU KVM_X86_CPU_FEATURE(0x7, 0, ECX, 3)
+#define X86_FEATURE_OSPKE KVM_X86_CPU_FEATURE(0x7, 0, ECX, 4)
#define X86_FEATURE_LA57 KVM_X86_CPU_FEATURE(0x7, 0, ECX, 16)
#define X86_FEATURE_RDPID KVM_X86_CPU_FEATURE(0x7, 0, ECX, 22)
#define X86_FEATURE_SGX_LC KVM_X86_CPU_FEATURE(0x7, 0, ECX, 30)
@@ -553,6 +560,13 @@ static inline void xsetbv(u32 index, u64 value)
__asm__ __volatile__("xsetbv" :: "a" (eax), "d" (edx), "c" (index));
}
+static inline void wrpkru(u32 pkru)
+{
+ /* Note, ECX and EDX are architecturally required to be '0'. */
+ asm volatile(".byte 0x0f,0x01,0xef\n\t"
+ : : "a" (pkru), "c"(0), "d"(0));
+}
+
static inline struct desc_ptr get_gdt(void)
{
struct desc_ptr gdt;
@@ -908,6 +922,15 @@ static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
!kvm_cpu_has(feature.anti_feature);
}
+static __always_inline uint64_t kvm_cpu_supported_xcr0(void)
+{
+ if (!kvm_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
+ return 0;
+
+ return kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
+ ((uint64_t)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
+}
+
static inline size_t kvm_cpuid2_size(int nr_entries)
{
return sizeof(struct kvm_cpuid2) +
diff --git a/tools/testing/selftests/kvm/lib/guest_sprintf.c b/tools/testing/selftests/kvm/lib/guest_sprintf.c
index c4a69d8aeb68..74627514c4d4 100644
--- a/tools/testing/selftests/kvm/lib/guest_sprintf.c
+++ b/tools/testing/selftests/kvm/lib/guest_sprintf.c
@@ -200,6 +200,13 @@ repeat:
++fmt;
}
+ /*
+ * Play nice with %llu, %llx, etc. KVM selftests only support
+ * 64-bit builds, so just treat %ll* the same as %l*.
+ */
+ if (qualifier == 'l' && *fmt == 'l')
+ ++fmt;
+
/* default base */
base = 10;
diff --git a/tools/testing/selftests/kvm/lib/x86_64/apic.c b/tools/testing/selftests/kvm/lib/x86_64/apic.c
index 7168e25c194e..89153a333e83 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/apic.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/apic.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * tools/testing/selftests/kvm/lib/x86_64/processor.c
- *
* Copyright (C) 2021, Google LLC.
*/
diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
index 20eb2e730800..8698d1ab60d0 100644
--- a/tools/testing/selftests/kvm/memslot_perf_test.c
+++ b/tools/testing/selftests/kvm/memslot_perf_test.c
@@ -1033,9 +1033,8 @@ static bool test_loop(const struct test_data *data,
struct test_result *rbestruntime)
{
uint64_t maxslots;
- struct test_result result;
+ struct test_result result = {};
- result.nloops = 0;
if (!test_execute(targs->nslots, &maxslots, targs->seconds, data,
&result.nloops,
&result.slot_runtime, &result.guest_runtime)) {
@@ -1089,7 +1088,7 @@ int main(int argc, char *argv[])
.seconds = 5,
.runs = 1,
};
- struct test_result rbestslottime;
+ struct test_result rbestslottime = {};
int tctr;
if (!check_memory_sizes())
@@ -1098,11 +1097,10 @@ int main(int argc, char *argv[])
if (!parse_args(argc, argv, &targs))
return -1;
- rbestslottime.slottimens = 0;
for (tctr = targs.tfirst; tctr <= targs.tlast; tctr++) {
const struct test_data *data = &tests[tctr];
unsigned int runctr;
- struct test_result rbestruntime;
+ struct test_result rbestruntime = {};
if (tctr > targs.tfirst)
pr_info("\n");
@@ -1110,7 +1108,6 @@ int main(int argc, char *argv[])
pr_info("Testing %s performance with %i runs, %d seconds each\n",
data->name, targs.runs, targs.seconds);
- rbestruntime.runtimens = 0;
for (runctr = 0; runctr < targs.runs; runctr++)
if (!test_loop(data, &targs,
&rbestslottime, &rbestruntime))
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
index e446d76d1c0c..6c1278562090 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * KVM_GET/SET_* tests
- *
* Copyright (C) 2022, Red Hat, Inc.
*
* Tests for Hyper-V extensions to SVM.
diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
index 7f36c32fa760..18ac5c1952a3 100644
--- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
+++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * tools/testing/selftests/kvm/nx_huge_page_test.c
- *
* Usage: to be run via nx_huge_page_test.sh, which does the necessary
* environment setup and teardown
*
diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
index 0560149e66ed..7cbb409801ee 100755
--- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
+++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
@@ -4,7 +4,6 @@
# Wrapper script which performs setup and cleanup for nx_huge_pages_test.
# Makes use of root privileges to set up huge pages and KVM module parameters.
#
-# tools/testing/selftests/kvm/nx_huge_page_test.sh
# Copyright (C) 2022, Google LLC.
set -e
diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
index 4c4925a8ab45..88b58aab7207 100644
--- a/tools/testing/selftests/kvm/x86_64/state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/state_test.c
@@ -139,6 +139,83 @@ static void vmx_l1_guest_code(struct vmx_pages *vmx_pages)
static void __attribute__((__flatten__)) guest_code(void *arg)
{
GUEST_SYNC(1);
+
+ if (this_cpu_has(X86_FEATURE_XSAVE)) {
+ uint64_t supported_xcr0 = this_cpu_supported_xcr0();
+ uint8_t buffer[4096];
+
+ memset(buffer, 0xcc, sizeof(buffer));
+
+ set_cr4(get_cr4() | X86_CR4_OSXSAVE);
+ GUEST_ASSERT(this_cpu_has(X86_FEATURE_OSXSAVE));
+
+ xsetbv(0, xgetbv(0) | supported_xcr0);
+
+ /*
+ * Modify state for all supported xfeatures to take them out of
+ * their "init" state, i.e. to make them show up in XSTATE_BV.
+ *
+ * Note off-by-default features, e.g. AMX, are out of scope for
+ * this particular testcase as they have a different ABI.
+ */
+ GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_FP);
+ asm volatile ("fincstp");
+
+ GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_SSE);
+ asm volatile ("vmovdqu %0, %%xmm0" :: "m" (buffer));
+
+ if (supported_xcr0 & XFEATURE_MASK_YMM)
+ asm volatile ("vmovdqu %0, %%ymm0" :: "m" (buffer));
+
+ if (supported_xcr0 & XFEATURE_MASK_AVX512) {
+ asm volatile ("kmovq %0, %%k1" :: "r" (-1ull));
+ asm volatile ("vmovupd %0, %%zmm0" :: "m" (buffer));
+ asm volatile ("vmovupd %0, %%zmm16" :: "m" (buffer));
+ }
+
+ if (this_cpu_has(X86_FEATURE_MPX)) {
+ uint64_t bounds[2] = { 10, 0xffffffffull };
+ uint64_t output[2] = { };
+
+ GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_BNDREGS);
+ GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_BNDCSR);
+
+ /*
+ * Don't bother trying to get BNDCSR into the INUSE
+ * state. MSR_IA32_BNDCFGS doesn't count as it isn't
+ * managed via XSAVE/XRSTOR, and BNDCFGU can only be
+ * modified by XRSTOR. Stuffing XSTATE_BV in the host
+ * is simpler than doing XRSTOR here in the guest.
+ *
+ * However, temporarily enable MPX in BNDCFGS so that
+ * BNDMOV actually loads BND1. If MPX isn't *fully*
+ * enabled, all MPX instructions are treated as NOPs.
+ *
+ * Hand encode "bndmov (%rax),%bnd1" as support for MPX
+ * mnemonics/registers has been removed from gcc and
+ * clang (and was never fully supported by clang).
+ */
+ wrmsr(MSR_IA32_BNDCFGS, BIT_ULL(0));
+ asm volatile (".byte 0x66,0x0f,0x1a,0x08" :: "a" (bounds));
+ /*
+ * Hand encode "bndmov %bnd1, (%rax)" to sanity check
+ * that BND1 actually got loaded.
+ */
+ asm volatile (".byte 0x66,0x0f,0x1b,0x08" :: "a" (output));
+ wrmsr(MSR_IA32_BNDCFGS, 0);
+
+ GUEST_ASSERT_EQ(bounds[0], output[0]);
+ GUEST_ASSERT_EQ(bounds[1], output[1]);
+ }
+ if (this_cpu_has(X86_FEATURE_PKU)) {
+ GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_PKRU);
+ set_cr4(get_cr4() | X86_CR4_PKE);
+ GUEST_ASSERT(this_cpu_has(X86_FEATURE_OSPKE));
+
+ wrpkru(-1u);
+ }
+ }
+
GUEST_SYNC(2);
if (arg) {
@@ -153,10 +230,11 @@ static void __attribute__((__flatten__)) guest_code(void *arg)
int main(int argc, char *argv[])
{
+ uint64_t *xstate_bv, saved_xstate_bv;
vm_vaddr_t nested_gva = 0;
-
+ struct kvm_cpuid2 empty_cpuid = {};
struct kvm_regs regs1, regs2;
- struct kvm_vcpu *vcpu;
+ struct kvm_vcpu *vcpu, *vcpuN;
struct kvm_vm *vm;
struct kvm_x86_state *state;
struct ucall uc;
@@ -209,6 +287,34 @@ int main(int argc, char *argv[])
/* Restore state in a new VM. */
vcpu = vm_recreate_with_one_vcpu(vm);
vcpu_load_state(vcpu, state);
+
+ /*
+ * Restore XSAVE state in a dummy vCPU, first without doing
+ * KVM_SET_CPUID2, and then with an empty guest CPUID. Except
+ * for off-by-default xfeatures, e.g. AMX, KVM is supposed to
+ * allow KVM_SET_XSAVE regardless of guest CPUID. Manually
+ * load only XSAVE state, MSRs in particular have a much more
+ * convoluted ABI.
+ *
+ * Load two versions of XSAVE state: one with the actual guest
+ * XSAVE state, and one with all supported features forced "on"
+ * in xstate_bv, e.g. to ensure that KVM allows loading all
+ * supported features, even if something goes awry in saving
+ * the original snapshot.
+ */
+ xstate_bv = (void *)&((uint8_t *)state->xsave->region)[512];
+ saved_xstate_bv = *xstate_bv;
+
+ vcpuN = __vm_vcpu_add(vm, vcpu->id + 1);
+ vcpu_xsave_set(vcpuN, state->xsave);
+ *xstate_bv = kvm_cpu_supported_xcr0();
+ vcpu_xsave_set(vcpuN, state->xsave);
+
+ vcpu_init_cpuid(vcpuN, &empty_cpuid);
+ vcpu_xsave_set(vcpuN, state->xsave);
+ *xstate_bv = saved_xstate_bv;
+ vcpu_xsave_set(vcpuN, state->xsave);
+
kvm_x86_state_cleanup(state);
memset(&regs2, 0, sizeof(regs2));
diff --git a/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c b/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c
index 5b669818e39a..59c7304f805e 100644
--- a/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c
+++ b/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c
@@ -1,10 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * svm_vmcall_test
- *
* Copyright © 2021 Amazon.com, Inc. or its affiliates.
- *
- * Xen shared_info / pvclock testing
*/
#include "test_util.h"
diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
index 05898ad9f4d9..9ec9ab60b63e 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
@@ -1,10 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * svm_vmcall_test
- *
* Copyright © 2021 Amazon.com, Inc. or its affiliates.
- *
- * Xen shared_info / pvclock testing
*/
#include "test_util.h"
diff --git a/tools/testing/selftests/mm/mremap_dontunmap.c b/tools/testing/selftests/mm/mremap_dontunmap.c
index ca2359835e75..a06e73ec8568 100644
--- a/tools/testing/selftests/mm/mremap_dontunmap.c
+++ b/tools/testing/selftests/mm/mremap_dontunmap.c
@@ -7,6 +7,7 @@
*/
#define _GNU_SOURCE
#include <sys/mman.h>
+#include <linux/mman.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 8b017070960d..4a2881d43989 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -34,6 +34,7 @@ TEST_PROGS += gro.sh
TEST_PROGS += gre_gso.sh
TEST_PROGS += cmsg_so_mark.sh
TEST_PROGS += cmsg_time.sh cmsg_ipv6.sh
+TEST_PROGS += netns-name.sh
TEST_PROGS += srv6_end_dt46_l3vpn_test.sh
TEST_PROGS += srv6_end_dt4_l3vpn_test.sh
TEST_PROGS += srv6_end_dt6_l3vpn_test.sh
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index e7d2a530618a..66d0db7a2614 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -2437,6 +2437,9 @@ ipv4_mpath_list_test()
run_cmd "ip -n ns2 route add 203.0.113.0/24
nexthop via 172.16.201.2 nexthop via 172.16.202.2"
run_cmd "ip netns exec ns2 sysctl -qw net.ipv4.fib_multipath_hash_policy=1"
+ run_cmd "ip netns exec ns2 sysctl -qw net.ipv4.conf.veth2.rp_filter=0"
+ run_cmd "ip netns exec ns2 sysctl -qw net.ipv4.conf.all.rp_filter=0"
+ run_cmd "ip netns exec ns2 sysctl -qw net.ipv4.conf.default.rp_filter=0"
set +e
local dmac=$(ip -n ns2 -j link show dev veth2 | jq -r '.[]["address"]')
@@ -2449,7 +2452,7 @@ ipv4_mpath_list_test()
# words, the FIB lookup tracepoint needs to be triggered for every
# packet.
local t0_rx_pkts=$(link_stats_get ns2 veth2 rx packets)
- run_cmd "perf stat -e fib:fib_table_lookup --filter 'err == 0' -j -o $tmp_file -- $cmd"
+ run_cmd "perf stat -a -e fib:fib_table_lookup --filter 'err == 0' -j -o $tmp_file -- $cmd"
local t1_rx_pkts=$(link_stats_get ns2 veth2 rx packets)
local diff=$(echo $t1_rx_pkts - $t0_rx_pkts | bc -l)
list_rcv_eval $tmp_file $diff
@@ -2494,7 +2497,7 @@ ipv6_mpath_list_test()
# words, the FIB lookup tracepoint needs to be triggered for every
# packet.
local t0_rx_pkts=$(link_stats_get ns2 veth2 rx packets)
- run_cmd "perf stat -e fib6:fib6_table_lookup --filter 'err == 0' -j -o $tmp_file -- $cmd"
+ run_cmd "perf stat -a -e fib6:fib6_table_lookup --filter 'err == 0' -j -o $tmp_file -- $cmd"
local t1_rx_pkts=$(link_stats_get ns2 veth2 rx packets)
local diff=$(echo $t1_rx_pkts - $t0_rx_pkts | bc -l)
list_rcv_eval $tmp_file $diff
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index ee1f89a872b3..dc895b7b94e1 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -1432,7 +1432,9 @@ chk_rst_nr()
count=$(get_counter ${ns_tx} "MPTcpExtMPRstTx")
if [ -z "$count" ]; then
print_skip
- elif [ $count -lt $rst_tx ]; then
+ # accept more rst than expected except if we don't expect any
+ elif { [ $rst_tx -ne 0 ] && [ $count -lt $rst_tx ]; } ||
+ { [ $rst_tx -eq 0 ] && [ $count -ne 0 ]; }; then
fail_test "got $count MP_RST[s] TX expected $rst_tx"
else
print_ok
@@ -1442,7 +1444,9 @@ chk_rst_nr()
count=$(get_counter ${ns_rx} "MPTcpExtMPRstRx")
if [ -z "$count" ]; then
print_skip
- elif [ "$count" -lt "$rst_rx" ]; then
+ # accept more rst than expected except if we don't expect any
+ elif { [ $rst_rx -ne 0 ] && [ $count -lt $rst_rx ]; } ||
+ { [ $rst_rx -eq 0 ] && [ $count -ne 0 ]; }; then
fail_test "got $count MP_RST[s] RX expected $rst_rx"
else
print_ok
@@ -2305,6 +2309,7 @@ remove_tests()
chk_join_nr 1 1 1
chk_rm_tx_nr 1
chk_rm_nr 1 1
+ chk_rst_nr 0 0
fi
# multiple subflows, remove
@@ -2317,6 +2322,7 @@ remove_tests()
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 2 2 2
chk_rm_nr 2 2
+ chk_rst_nr 0 0
fi
# single address, remove
@@ -2329,6 +2335,7 @@ remove_tests()
chk_join_nr 1 1 1
chk_add_nr 1 1
chk_rm_nr 1 1 invert
+ chk_rst_nr 0 0
fi
# subflow and signal, remove
@@ -2342,6 +2349,7 @@ remove_tests()
chk_join_nr 2 2 2
chk_add_nr 1 1
chk_rm_nr 1 1
+ chk_rst_nr 0 0
fi
# subflows and signal, remove
@@ -2356,6 +2364,7 @@ remove_tests()
chk_join_nr 3 3 3
chk_add_nr 1 1
chk_rm_nr 2 2
+ chk_rst_nr 0 0
fi
# addresses remove
@@ -2370,6 +2379,7 @@ remove_tests()
chk_join_nr 3 3 3
chk_add_nr 3 3
chk_rm_nr 3 3 invert
+ chk_rst_nr 0 0
fi
# invalid addresses remove
@@ -2384,6 +2394,7 @@ remove_tests()
chk_join_nr 1 1 1
chk_add_nr 3 3
chk_rm_nr 3 1 invert
+ chk_rst_nr 0 0
fi
# subflows and signal, flush
@@ -2398,6 +2409,7 @@ remove_tests()
chk_join_nr 3 3 3
chk_add_nr 1 1
chk_rm_nr 1 3 invert simult
+ chk_rst_nr 0 0
fi
# subflows flush
@@ -2417,6 +2429,7 @@ remove_tests()
else
chk_rm_nr 3 3
fi
+ chk_rst_nr 0 0
fi
# addresses flush
@@ -2431,6 +2444,7 @@ remove_tests()
chk_join_nr 3 3 3
chk_add_nr 3 3
chk_rm_nr 3 3 invert simult
+ chk_rst_nr 0 0
fi
# invalid addresses flush
@@ -2445,6 +2459,7 @@ remove_tests()
chk_join_nr 1 1 1
chk_add_nr 3 3
chk_rm_nr 3 1 invert
+ chk_rst_nr 0 0
fi
# remove id 0 subflow
@@ -2456,6 +2471,7 @@ remove_tests()
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 1 1 1
chk_rm_nr 1 1
+ chk_rst_nr 0 0
fi
# remove id 0 address
@@ -2468,6 +2484,7 @@ remove_tests()
chk_join_nr 1 1 1
chk_add_nr 1 1
chk_rm_nr 1 1 invert
+ chk_rst_nr 0 0 invert
fi
}
diff --git a/tools/testing/selftests/net/netns-name.sh b/tools/testing/selftests/net/netns-name.sh
new file mode 100755
index 000000000000..7d3d3fc99461
--- /dev/null
+++ b/tools/testing/selftests/net/netns-name.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+set -o pipefail
+
+NS=netns-name-test
+DEV=dummy-dev0
+DEV2=dummy-dev1
+ALT_NAME=some-alt-name
+
+RET_CODE=0
+
+cleanup() {
+ ip netns del $NS
+}
+
+trap cleanup EXIT
+
+fail() {
+ echo "ERROR: ${1:-unexpected return code} (ret: $_)" >&2
+ RET_CODE=1
+}
+
+ip netns add $NS
+
+#
+# Test basic move without a rename
+#
+ip -netns $NS link add name $DEV type dummy || fail
+ip -netns $NS link set dev $DEV netns 1 ||
+ fail "Can't perform a netns move"
+ip link show dev $DEV >> /dev/null || fail "Device not found after move"
+ip link del $DEV || fail
+
+#
+# Test move with a conflict
+#
+ip link add name $DEV type dummy
+ip -netns $NS link add name $DEV type dummy || fail
+ip -netns $NS link set dev $DEV netns 1 2> /dev/null &&
+ fail "Performed a netns move with a name conflict"
+ip link show dev $DEV >> /dev/null || fail "Device not found after move"
+ip -netns $NS link del $DEV || fail
+ip link del $DEV || fail
+
+#
+# Test move with a conflict and rename
+#
+ip link add name $DEV type dummy
+ip -netns $NS link add name $DEV type dummy || fail
+ip -netns $NS link set dev $DEV netns 1 name $DEV2 ||
+ fail "Can't perform a netns move with rename"
+ip link del $DEV2 || fail
+ip link del $DEV || fail
+
+#
+# Test dup alt-name with netns move
+#
+ip link add name $DEV type dummy || fail
+ip link property add dev $DEV altname $ALT_NAME || fail
+ip -netns $NS link add name $DEV2 type dummy || fail
+ip -netns $NS link property add dev $DEV2 altname $ALT_NAME || fail
+
+ip -netns $NS link set dev $DEV2 netns 1 2> /dev/null &&
+ fail "Moved with alt-name dup"
+
+ip link del $DEV || fail
+ip -netns $NS link del $DEV2 || fail
+
+#
+# Test creating alt-name in one net-ns and using in another
+#
+ip -netns $NS link add name $DEV type dummy || fail
+ip -netns $NS link property add dev $DEV altname $ALT_NAME || fail
+ip -netns $NS link set dev $DEV netns 1 || fail
+ip link show dev $ALT_NAME >> /dev/null || fail "Can't find alt-name after move"
+ip -netns $NS link show dev $ALT_NAME 2> /dev/null &&
+ fail "Can still find alt-name after move"
+ip link del $DEV || fail
+
+echo -ne "$(basename $0) \t\t\t\t"
+if [ $RET_CODE -eq 0 ]; then
+ echo "[ OK ]"
+else
+ echo "[ FAIL ]"
+fi
+exit $RET_CODE
diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
index 9c2012d70b08..f8499d4c87f3 100755
--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
+++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
@@ -3,6 +3,8 @@
#
# OVS kernel module self tests
+trap ovs_exit_sig EXIT TERM INT ERR
+
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
@@ -142,6 +144,12 @@ ovs_add_flow () {
return 0
}
+ovs_del_flows () {
+ info "Deleting all flows from DP: sbx:$1 br:$2"
+ ovs_sbx "$1" python3 $ovs_base/ovs-dpctl.py del-flows "$2"
+ return 0
+}
+
ovs_drop_record_and_run () {
local sbx=$1
shift
@@ -198,6 +206,17 @@ test_drop_reason() {
ip netns exec server ip addr add 172.31.110.20/24 dev s1
ip netns exec server ip link set s1 up
+ # Check if drop reasons can be sent
+ ovs_add_flow "test_drop_reason" dropreason \
+ 'in_port(1),eth(),eth_type(0x0806),arp()' 'drop(10)' 2>/dev/null
+ if [ $? == 1 ]; then
+ info "no support for drop reasons - skipping"
+ ovs_exit_sig
+ return $ksft_skip
+ fi
+
+ ovs_del_flows "test_drop_reason" dropreason
+
# Allow ARP
ovs_add_flow "test_drop_reason" dropreason \
'in_port(1),eth(),eth_type(0x0806),arp()' '2' || return 1
@@ -525,7 +544,7 @@ run_test() {
fi
if python3 ovs-dpctl.py -h 2>&1 | \
- grep "Need to install the python" >/dev/null 2>&1; then
+ grep -E "Need to (install|upgrade) the python" >/dev/null 2>&1; then
stdbuf -o0 printf "TEST: %-60s [PYLIB]\n" "${tdesc}"
return $ksft_skip
fi
diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
index 912dc8c49085..b97e621face9 100644
--- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
@@ -28,8 +28,10 @@ try:
from pyroute2.netlink import nlmsg_atoms
from pyroute2.netlink.exceptions import NetlinkError
from pyroute2.netlink.generic import GenericNetlinkSocket
+ import pyroute2
+
except ModuleNotFoundError:
- print("Need to install the python pyroute2 package.")
+ print("Need to install the python pyroute2 package >= 0.6.")
sys.exit(0)
@@ -1117,12 +1119,14 @@ class ovskey(nla):
"src",
lambda x: str(ipaddress.IPv4Address(x)),
int,
+ convert_ipv4,
),
(
"dst",
"dst",
- lambda x: str(ipaddress.IPv6Address(x)),
+ lambda x: str(ipaddress.IPv4Address(x)),
int,
+ convert_ipv4,
),
("tp_src", "tp_src", "%d", int),
("tp_dst", "tp_dst", "%d", int),
@@ -1904,6 +1908,32 @@ class OvsFlow(GenericNetlinkSocket):
raise ne
return reply
+ def del_flows(self, dpifindex):
+ """
+ Send a del message to the kernel that will drop all flows.
+
+ dpifindex should be a valid datapath obtained by calling
+ into the OvsDatapath lookup
+ """
+
+ flowmsg = OvsFlow.ovs_flow_msg()
+ flowmsg["cmd"] = OVS_FLOW_CMD_DEL
+ flowmsg["version"] = OVS_DATAPATH_VERSION
+ flowmsg["reserved"] = 0
+ flowmsg["dpifindex"] = dpifindex
+
+ try:
+ reply = self.nlm_request(
+ flowmsg,
+ msg_type=self.prid,
+ msg_flags=NLM_F_REQUEST | NLM_F_ACK,
+ )
+ reply = reply[0]
+ except NetlinkError as ne:
+ print(flowmsg)
+ raise ne
+ return reply
+
def dump(self, dpifindex, flowspec=None):
"""
Returns a list of messages containing flows.
@@ -1998,6 +2028,12 @@ def main(argv):
nlmsg_atoms.ovskey = ovskey
nlmsg_atoms.ovsactions = ovsactions
+ # version check for pyroute2
+ prverscheck = pyroute2.__version__.split(".")
+ if int(prverscheck[0]) == 0 and int(prverscheck[1]) < 6:
+ print("Need to upgrade the python pyroute2 package to >= 0.6.")
+ sys.exit(0)
+
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
@@ -2060,6 +2096,9 @@ def main(argv):
addflcmd.add_argument("flow", help="Flow specification")
addflcmd.add_argument("acts", help="Flow actions")
+ delfscmd = subparsers.add_parser("del-flows")
+ delfscmd.add_argument("flsbr", help="Datapath name")
+
args = parser.parse_args()
if args.verbose > 0:
@@ -2143,6 +2182,11 @@ def main(argv):
flow = OvsFlow.ovs_flow_msg()
flow.parse(args.flow, args.acts, rep["dpifindex"])
ovsflow.add_flow(rep["dpifindex"], flow)
+ elif hasattr(args, "flsbr"):
+ rep = ovsdp.info(args.flsbr, 0)
+ if rep is None:
+ print("DP '%s' not found." % args.flsbr)
+ ovsflow.del_flows(rep["dpifindex"])
return 0
diff --git a/tools/testing/selftests/netfilter/nft_audit.sh b/tools/testing/selftests/netfilter/nft_audit.sh
index bb34329e02a7..99ed5bd6e840 100755
--- a/tools/testing/selftests/netfilter/nft_audit.sh
+++ b/tools/testing/selftests/netfilter/nft_audit.sh
@@ -11,6 +11,12 @@ nft --version >/dev/null 2>&1 || {
exit $SKIP_RC
}
+# Run everything in a separate network namespace
+[ "${1}" != "run" ] && { unshare -n "${0}" run; exit $?; }
+
+# give other scripts a chance to finish - audit_logread sees all activity
+sleep 1
+
logfile=$(mktemp)
rulefile=$(mktemp)
echo "logging into $logfile"
@@ -93,6 +99,12 @@ do_test 'nft add counter t1 c1' \
do_test 'nft add counter t2 c1; add counter t2 c2' \
'table=t2 family=2 entries=2 op=nft_register_obj'
+for ((i = 3; i <= 500; i++)); do
+ echo "add counter t2 c$i"
+done >$rulefile
+do_test "nft -f $rulefile" \
+'table=t2 family=2 entries=498 op=nft_register_obj'
+
# adding/updating quotas
do_test 'nft add quota t1 q1 { 10 bytes }' \
@@ -101,6 +113,12 @@ do_test 'nft add quota t1 q1 { 10 bytes }' \
do_test 'nft add quota t2 q1 { 10 bytes }; add quota t2 q2 { 10 bytes }' \
'table=t2 family=2 entries=2 op=nft_register_obj'
+for ((i = 3; i <= 500; i++)); do
+ echo "add quota t2 q$i { 10 bytes }"
+done >$rulefile
+do_test "nft -f $rulefile" \
+'table=t2 family=2 entries=498 op=nft_register_obj'
+
# changing the quota value triggers obj update path
do_test 'nft add quota t1 q1 { 20 bytes }' \
'table=t1 family=2 entries=1 op=nft_register_obj'
@@ -150,6 +168,40 @@ done
do_test 'nft reset set t1 s' \
'table=t1 family=2 entries=3 op=nft_reset_setelem'
+# resetting counters
+
+do_test 'nft reset counter t1 c1' \
+'table=t1 family=2 entries=1 op=nft_reset_obj'
+
+do_test 'nft reset counters t1' \
+'table=t1 family=2 entries=1 op=nft_reset_obj'
+
+do_test 'nft reset counters t2' \
+'table=t2 family=2 entries=342 op=nft_reset_obj
+table=t2 family=2 entries=158 op=nft_reset_obj'
+
+do_test 'nft reset counters' \
+'table=t1 family=2 entries=1 op=nft_reset_obj
+table=t2 family=2 entries=341 op=nft_reset_obj
+table=t2 family=2 entries=159 op=nft_reset_obj'
+
+# resetting quotas
+
+do_test 'nft reset quota t1 q1' \
+'table=t1 family=2 entries=1 op=nft_reset_obj'
+
+do_test 'nft reset quotas t1' \
+'table=t1 family=2 entries=1 op=nft_reset_obj'
+
+do_test 'nft reset quotas t2' \
+'table=t2 family=2 entries=315 op=nft_reset_obj
+table=t2 family=2 entries=185 op=nft_reset_obj'
+
+do_test 'nft reset quotas' \
+'table=t1 family=2 entries=1 op=nft_reset_obj
+table=t2 family=2 entries=314 op=nft_reset_obj
+table=t2 family=2 entries=186 op=nft_reset_obj'
+
# deleting rules
readarray -t handles < <(nft -a list chain t1 c1 | \
diff --git a/tools/testing/selftests/riscv/mm/Makefile b/tools/testing/selftests/riscv/mm/Makefile
index 11e0f0568923..c333263f2b27 100644
--- a/tools/testing/selftests/riscv/mm/Makefile
+++ b/tools/testing/selftests/riscv/mm/Makefile
@@ -5,11 +5,11 @@
# Additional include paths needed by kselftest.h and local headers
CFLAGS += -D_GNU_SOURCE -std=gnu99 -I.
-TEST_GEN_FILES := testcases/mmap_default testcases/mmap_bottomup
+TEST_GEN_FILES := mmap_default mmap_bottomup
-TEST_PROGS := testcases/run_mmap.sh
+TEST_PROGS := run_mmap.sh
include ../../lib.mk
-$(OUTPUT)/mm: testcases/mmap_default.c testcases/mmap_bottomup.c testcases/mmap_tests.h
+$(OUTPUT)/mm: mmap_default.c mmap_bottomup.c mmap_tests.h
$(CC) -o$@ $(CFLAGS) $(LDFLAGS) $^
diff --git a/tools/testing/selftests/riscv/mm/testcases/mmap_bottomup.c b/tools/testing/selftests/riscv/mm/mmap_bottomup.c
index b29379f7e478..1757d19ca89b 100644
--- a/tools/testing/selftests/riscv/mm/testcases/mmap_bottomup.c
+++ b/tools/testing/selftests/riscv/mm/mmap_bottomup.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <sys/mman.h>
-#include <testcases/mmap_test.h>
+#include <mmap_test.h>
#include "../../kselftest_harness.h"
diff --git a/tools/testing/selftests/riscv/mm/testcases/mmap_default.c b/tools/testing/selftests/riscv/mm/mmap_default.c
index d1accb91b726..c63c60b9397e 100644
--- a/tools/testing/selftests/riscv/mm/testcases/mmap_default.c
+++ b/tools/testing/selftests/riscv/mm/mmap_default.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <sys/mman.h>
-#include <testcases/mmap_test.h>
+#include <mmap_test.h>
#include "../../kselftest_harness.h"
diff --git a/tools/testing/selftests/riscv/mm/testcases/mmap_test.h b/tools/testing/selftests/riscv/mm/mmap_test.h
index 9b8434f62f57..9b8434f62f57 100644
--- a/tools/testing/selftests/riscv/mm/testcases/mmap_test.h
+++ b/tools/testing/selftests/riscv/mm/mmap_test.h
diff --git a/tools/testing/selftests/riscv/mm/testcases/run_mmap.sh b/tools/testing/selftests/riscv/mm/run_mmap.sh
index ca5ad7c48bad..ca5ad7c48bad 100755
--- a/tools/testing/selftests/riscv/mm/testcases/run_mmap.sh
+++ b/tools/testing/selftests/riscv/mm/run_mmap.sh
diff --git a/tools/testing/selftests/user_events/abi_test.c b/tools/testing/selftests/user_events/abi_test.c
index 8202f1327c39..f5575ef2007c 100644
--- a/tools/testing/selftests/user_events/abi_test.c
+++ b/tools/testing/selftests/user_events/abi_test.c
@@ -47,7 +47,7 @@ static int change_event(bool enable)
return ret;
}
-static int reg_enable(long *enable, int size, int bit)
+static int reg_enable(void *enable, int size, int bit)
{
struct user_reg reg = {0};
int fd = open(data_file, O_RDWR);
@@ -69,7 +69,7 @@ static int reg_enable(long *enable, int size, int bit)
return ret;
}
-static int reg_disable(long *enable, int bit)
+static int reg_disable(void *enable, int bit)
{
struct user_unreg reg = {0};
int fd = open(data_file, O_RDWR);
@@ -90,7 +90,8 @@ static int reg_disable(long *enable, int bit)
}
FIXTURE(user) {
- long check;
+ int check;
+ long check_long;
bool umount;
};
@@ -99,6 +100,7 @@ FIXTURE_SETUP(user) {
change_event(false);
self->check = 0;
+ self->check_long = 0;
}
FIXTURE_TEARDOWN(user) {
@@ -136,9 +138,9 @@ TEST_F(user, bit_sizes) {
#if BITS_PER_LONG == 8
/* Allow 0-64 bits for 64-bit */
- ASSERT_EQ(0, reg_enable(&self->check, sizeof(long), 63));
- ASSERT_NE(0, reg_enable(&self->check, sizeof(long), 64));
- ASSERT_EQ(0, reg_disable(&self->check, 63));
+ ASSERT_EQ(0, reg_enable(&self->check_long, sizeof(long), 63));
+ ASSERT_NE(0, reg_enable(&self->check_long, sizeof(long), 64));
+ ASSERT_EQ(0, reg_disable(&self->check_long, 63));
#endif
/* Disallowed sizes (everything beside 4 and 8) */
@@ -200,7 +202,7 @@ static int clone_check(void *check)
for (i = 0; i < 10; ++i) {
usleep(100000);
- if (*(long *)check)
+ if (*(int *)check)
return 0;
}
diff --git a/tools/virtio/linux/dma-mapping.h b/tools/virtio/linux/dma-mapping.h
index 834a90bd3270..822ecaa8e4df 100644
--- a/tools/virtio/linux/dma-mapping.h
+++ b/tools/virtio/linux/dma-mapping.h
@@ -24,11 +24,23 @@ enum dma_data_direction {
#define dma_map_page(d, p, o, s, dir) (page_to_phys(p) + (o))
#define dma_map_single(d, p, s, dir) (virt_to_phys(p))
+#define dma_map_single_attrs(d, p, s, dir, a) (virt_to_phys(p))
#define dma_mapping_error(...) (0)
#define dma_unmap_single(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
#define dma_unmap_page(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
+#define sg_dma_address(sg) (0)
+#define dma_need_sync(v, a) (0)
+#define dma_unmap_single_attrs(d, a, s, r, t) do { \
+ (void)(d); (void)(a); (void)(s); (void)(r); (void)(t); \
+} while (0)
+#define dma_sync_single_range_for_cpu(d, a, o, s, r) do { \
+ (void)(d); (void)(a); (void)(o); (void)(s); (void)(r); \
+} while (0)
+#define dma_sync_single_range_for_device(d, a, o, s, r) do { \
+ (void)(d); (void)(a); (void)(o); (void)(s); (void)(r); \
+} while (0)
#define dma_max_mapping_size(...) SIZE_MAX
#endif