aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-class-watchdog34
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-slab13
-rw-r--r--Documentation/admin-guide/cgroup-v1/memory.rst4
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt2
-rw-r--r--Documentation/core-api/kernel-api.rst3
-rw-r--r--Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,i2c.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-rcar.txt)0
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,iic-emev2.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-emev2.txt)0
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,iic.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt)0
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,riic.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-riic.txt)0
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-mediatek.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-sprd.txt40
-rw-r--r--Documentation/devicetree/bindings/thermal/qoriq-thermal.txt1
-rw-r--r--Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml58
-rw-r--r--Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt1
-rw-r--r--Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.txt22
-rw-r--r--Documentation/devicetree/bindings/watchdog/sunxi-wdt.txt22
-rw-r--r--Documentation/devicetree/bindings/watchdog/watchdog.yaml26
-rw-r--r--Documentation/filesystems/ceph.txt14
-rw-r--r--Documentation/vm/split_page_table_lock.rst10
-rw-r--r--Documentation/watchdog/watchdog-parameters.rst19
-rw-r--r--MAINTAINERS21
-rw-r--r--arch/Kconfig11
-rw-r--r--arch/alpha/include/asm/pgalloc.h2
-rw-r--r--arch/alpha/include/asm/pgtable.h5
-rw-r--r--arch/alpha/include/uapi/asm/mman.h3
-rw-r--r--arch/arc/include/asm/pgalloc.h5
-rw-r--r--arch/arc/include/asm/pgtable.h5
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/include/asm/pgalloc.h2
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h5
-rw-r--r--arch/arm/include/asm/pgtable.h2
-rw-r--r--arch/arm/include/asm/processor.h2
-rw-r--r--arch/arm/include/asm/tlb.h2
-rw-r--r--arch/arm/kernel/process.c5
-rw-r--r--arch/arm/mm/flush.c7
-rw-r--r--arch/arm/mm/mmap.c52
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/include/asm/pgalloc.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/arm64/include/asm/processor.h2
-rw-r--r--arch/arm64/include/asm/tlb.h2
-rw-r--r--arch/arm64/kernel/process.c8
-rw-r--r--arch/arm64/mm/flush.c3
-rw-r--r--arch/arm64/mm/mmap.c72
-rw-r--r--arch/arm64/mm/mmu.c2
-rw-r--r--arch/arm64/mm/pgd.c2
-rw-r--r--arch/c6x/include/asm/pgtable.h5
-rw-r--r--arch/csky/include/asm/pgalloc.h4
-rw-r--r--arch/csky/include/asm/pgtable.h5
-rw-r--r--arch/h8300/include/asm/pgtable.h6
-rw-r--r--arch/hexagon/include/asm/pgalloc.h4
-rw-r--r--arch/hexagon/include/asm/pgtable.h3
-rw-r--r--arch/hexagon/mm/Makefile2
-rw-r--r--arch/hexagon/mm/init.c13
-rw-r--r--arch/hexagon/mm/pgalloc.c10
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/include/asm/pgalloc.h52
-rw-r--r--arch/ia64/include/asm/pgtable.h5
-rw-r--r--arch/ia64/kernel/irq_ia64.c1
-rw-r--r--arch/ia64/mm/contig.c1
-rw-r--r--arch/ia64/mm/discontig.c2
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/m68k/include/asm/mcf_pgalloc.h6
-rw-r--r--arch/m68k/include/asm/motorola_pgalloc.h6
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h7
-rw-r--r--arch/m68k/include/asm/pgtable_no.h7
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h2
-rw-r--r--arch/microblaze/Kconfig3
-rw-r--r--arch/microblaze/boot/dts/system.dts16
-rw-r--r--arch/microblaze/configs/mmu_defconfig22
-rw-r--r--arch/microblaze/configs/nommu_defconfig25
-rw-r--r--arch/microblaze/include/asm/io.h1
-rw-r--r--arch/microblaze/include/asm/pgalloc.h122
-rw-r--r--arch/microblaze/include/asm/pgtable.h7
-rw-r--r--arch/microblaze/include/asm/uaccess.h42
-rw-r--r--arch/microblaze/kernel/reset.c87
-rw-r--r--arch/microblaze/mm/consistent.c221
-rw-r--r--arch/microblaze/mm/pgtable.c4
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/include/asm/pgalloc.h4
-rw-r--r--arch/mips/include/asm/pgtable.h5
-rw-r--r--arch/mips/include/asm/processor.h5
-rw-r--r--arch/mips/include/uapi/asm/mman.h3
-rw-r--r--arch/mips/mm/mmap.c84
-rw-r--r--arch/nds32/include/asm/pgalloc.h2
-rw-r--r--arch/nds32/include/asm/pgtable.h2
-rw-r--r--arch/nios2/include/asm/pgalloc.h4
-rw-r--r--arch/nios2/include/asm/pgtable.h2
-rw-r--r--arch/openrisc/include/asm/pgalloc.h8
-rw-r--r--arch/openrisc/include/asm/pgtable.h5
-rw-r--r--arch/parisc/include/asm/pgalloc.h2
-rw-r--r--arch/parisc/include/asm/pgtable.h2
-rw-r--r--arch/parisc/include/uapi/asm/mman.h3
-rw-r--r--arch/powerpc/include/asm/pgalloc.h2
-rw-r--r--arch/powerpc/include/asm/pgtable.h1
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c2
-rw-r--r--arch/powerpc/mm/book3s64/iommu_api.c7
-rw-r--r--arch/powerpc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/mm/pgtable-frag.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c207
-rw-r--r--arch/riscv/Kconfig12
-rw-r--r--arch/riscv/include/asm/pgalloc.h6
-rw-r--r--arch/riscv/include/asm/pgtable.h5
-rw-r--r--arch/s390/hypfs/inode.c137
-rw-r--r--arch/s390/include/asm/cpu_mf.h10
-rw-r--r--arch/s390/include/asm/perf_event.h2
-rw-r--r--arch/s390/include/asm/pgtable.h6
-rw-r--r--arch/s390/include/uapi/asm/zcrypt.h4
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c165
-rw-r--r--arch/s390/kernel/topology.c3
-rw-r--r--arch/s390/mm/pgalloc.c6
-rw-r--r--arch/sh/include/asm/pgalloc.h46
-rw-r--r--arch/sh/include/asm/pgtable.h5
-rw-r--r--arch/sh/mm/Kconfig3
-rw-r--r--arch/sh/mm/nommu.c4
-rw-r--r--arch/sparc/include/asm/pgalloc_32.h2
-rw-r--r--arch/sparc/include/asm/pgalloc_64.h2
-rw-r--r--arch/sparc/include/asm/pgtable_32.h5
-rw-r--r--arch/sparc/include/asm/pgtable_64.h6
-rw-r--r--arch/sparc/mm/init_32.c1
-rw-r--r--arch/sparc/mm/init_64.c4
-rw-r--r--arch/sparc/mm/srmmu.c4
-rw-r--r--arch/um/include/asm/pgalloc.h4
-rw-r--r--arch/um/include/asm/pgtable.h2
-rw-r--r--arch/unicore32/include/asm/pgalloc.h2
-rw-r--r--arch/unicore32/include/asm/pgtable.h2
-rw-r--r--arch/unicore32/include/asm/tlb.h2
-rw-r--r--arch/x86/include/asm/pgtable_32.h2
-rw-r--r--arch/x86/include/asm/pgtable_64.h3
-rw-r--r--arch/x86/mm/pat_rbtree.c19
-rw-r--r--arch/x86/mm/pgtable.c8
-rw-r--r--arch/xtensa/include/asm/pgalloc.h4
-rw-r--r--arch/xtensa/include/asm/pgtable.h1
-rw-r--r--arch/xtensa/include/asm/tlbflush.h3
-rw-r--r--arch/xtensa/include/uapi/asm/mman.h3
-rw-r--r--block/bfq-iosched.c35
-rw-r--r--block/blk-core.c7
-rw-r--r--block/blk-flush.c10
-rw-r--r--block/blk-integrity.c11
-rw-r--r--block/blk-iocost.c30
-rw-r--r--block/blk-mq-sched.c2
-rw-r--r--block/blk-mq.c11
-rw-r--r--block/blk-sysfs.c14
-rw-r--r--block/blk.h9
-rw-r--r--block/bsg-lib.c10
-rw-r--r--block/elevator.c31
-rw-r--r--block/t10-pi.c169
-rw-r--r--drivers/acpi/acpi_apd.c7
-rw-r--r--drivers/ata/libahci_platform.c9
-rw-r--r--drivers/base/memory.c44
-rw-r--r--drivers/base/node.c55
-rw-r--r--drivers/block/drbd/drbd_interval.c29
-rw-r--r--drivers/block/nbd.c108
-rw-r--r--drivers/block/pktcdvd.c1
-rw-r--r--drivers/block/rbd.c18
-rw-r--r--drivers/char/tpm/tpm-interface.c23
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c5
-rw-r--r--drivers/gpio/gpio-mvebu.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h7
-rw-r--r--drivers/gpu/drm/amd/include/renoir_ip_offset.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c70
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.h25
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c30
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c22
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c3
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_ioctl.c7
-rw-r--r--drivers/gpu/drm/drm_mode_object.c4
-rw-r--r--drivers/gpu/drm/drm_self_refresh_helper.c73
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c8
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c25
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c5
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c10
-rw-r--r--drivers/hid/hid-hyperv.c4
-rw-r--r--drivers/hv/channel_mgmt.c161
-rw-r--r--drivers/hv/connection.c8
-rw-r--r--drivers/hv/hv.c66
-rw-r--r--drivers/hv/hv_balloon.c143
-rw-r--r--drivers/hv/hyperv_vmbus.h30
-rw-r--r--drivers/hv/vmbus_drv.c265
-rw-r--r--drivers/i2c/busses/Kconfig17
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-axxia.c152
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c6
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c11
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c49
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c14
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c41
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c22
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c4
-rw-r--r--drivers/i2c/busses/i2c-fsi.c4
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c3
-rw-r--r--drivers/i2c/busses/i2c-i801.c145
-rw-r--r--drivers/i2c/busses/i2c-icy.c230
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c17
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-mxs.c4
-rw-r--r--drivers/i2c/busses/i2c-ocores.c5
-rw-r--r--drivers/i2c/busses/i2c-piix4.c34
-rw-r--r--drivers/i2c/busses/i2c-sprd.c25
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c2
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c2
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c56
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c26
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c22
-rw-r--r--drivers/i2c/i2c-core-base.c20
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c42
-rw-r--r--drivers/infiniband/core/umem.c5
-rw-r--r--drivers/infiniband/hw/hfi1/user_pages.c5
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c5
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c5
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c10
-rw-r--r--drivers/md/dm-integrity.c10
-rw-r--r--drivers/media/i2c/adv748x/adv748x-core.c6
-rw-r--r--drivers/media/i2c/adv7604.c22
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c9
-rw-r--r--drivers/misc/eeprom/at24.c1
-rw-r--r--drivers/net/wimax/i2400m/tx.c3
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c22
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.h8
-rw-r--r--drivers/ntb/hw/idt/Kconfig6
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c2
-rw-r--r--drivers/ntb/ntb_transport.c2
-rw-r--r--drivers/ntb/test/ntb_perf.c2
-rw-r--r--drivers/nvme/host/core.c9
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c2
-rw-r--r--drivers/platform/x86/pmc_atom.c7
-rw-r--r--drivers/pwm/Kconfig13
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c40
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c2
-rw-r--r--drivers/pwm/pwm-atmel.c49
-rw-r--r--drivers/pwm/pwm-bcm-iproc.c2
-rw-r--r--drivers/pwm/pwm-bcm2835.c19
-rw-r--r--drivers/pwm/pwm-cros-ec.c2
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c8
-rw-r--r--drivers/pwm/pwm-hibvt.c2
-rw-r--r--drivers/pwm/pwm-imx-tpm.c4
-rw-r--r--drivers/pwm/pwm-imx27.c6
-rw-r--r--drivers/pwm/pwm-jz4740.c7
-rw-r--r--drivers/pwm/pwm-lpss.c2
-rw-r--r--drivers/pwm/pwm-mediatek.c231
-rw-r--r--drivers/pwm/pwm-meson.c4
-rw-r--r--drivers/pwm/pwm-mxs.c4
-rw-r--r--drivers/pwm/pwm-rcar.c4
-rw-r--r--drivers/pwm/pwm-rockchip.c18
-rw-r--r--drivers/pwm/pwm-sifive.c6
-rw-r--r--drivers/pwm/pwm-sprd.c309
-rw-r--r--drivers/pwm/pwm-sti.c4
-rw-r--r--drivers/pwm/pwm-stm32-lp.c8
-rw-r--r--drivers/pwm/pwm-stm32.c4
-rw-r--r--drivers/pwm/pwm-sun4i.c10
-rw-r--r--drivers/pwm/pwm-zx.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c2
-rw-r--r--drivers/s390/cio/css.c2
-rw-r--r--drivers/s390/cio/device_ops.c23
-rw-r--r--drivers/s390/crypto/ap_bus.c12
-rw-r--r--drivers/s390/crypto/ap_bus.h3
-rw-r--r--drivers/s390/crypto/pkey_api.c113
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c2
-rw-r--r--drivers/s390/crypto/zcrypt_api.h3
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c72
-rw-r--r--drivers/scsi/sd.c8
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c4
-rw-r--r--drivers/target/tcm_fc/tfc_io.c3
-rw-r--r--drivers/tee/tee_shm.c1
-rw-r--r--drivers/thermal/armada_thermal.c5
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c6
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c96
-rw-r--r--drivers/thermal/intel/intel_pch_thermal.c6
-rw-r--r--drivers/thermal/qcom/tsens-8960.c2
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c12
-rw-r--r--drivers/thermal/qcom/tsens-v1.c1
-rw-r--r--drivers/thermal/qcom/tsens.h1
-rw-r--r--drivers/thermal/qoriq_thermal.c45
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c3
-rw-r--r--drivers/thermal/tegra/soctherm.c2
-rw-r--r--drivers/thermal/thermal_core.c44
-rw-r--r--drivers/thermal/thermal_hwmon.c8
-rw-r--r--drivers/usb/gadget/function/f_fs.c233
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c8
-rw-r--r--drivers/vfio/vfio_iommu_type1.c2
-rw-r--r--drivers/watchdog/Kconfig33
-rw-r--r--drivers/watchdog/Makefile3
-rw-r--r--drivers/watchdog/aspeed_wdt.c69
-rw-r--r--drivers/watchdog/ath79_wdt.c4
-rw-r--r--drivers/watchdog/cpwd.c25
-rw-r--r--drivers/watchdog/diag288_wdt.c3
-rw-r--r--drivers/watchdog/f71808e_wdt.c17
-rw-r--r--drivers/watchdog/iTCO_wdt.c26
-rw-r--r--drivers/watchdog/imx2_wdt.c4
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c243
-rw-r--r--drivers/watchdog/imx_sc_wdt.c9
-rw-r--r--drivers/watchdog/jz4740_wdt.c1
-rw-r--r--drivers/watchdog/ks8695_wdt.c319
-rw-r--r--drivers/watchdog/nuc900_wdt.c302
-rw-r--r--drivers/watchdog/orion_wdt.c66
-rw-r--r--drivers/watchdog/qcom-wdt.c84
-rw-r--r--drivers/watchdog/sprd_wdt.c4
-rw-r--r--drivers/watchdog/ziirave_wdt.c351
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/pci.c21
-rw-r--r--drivers/xen/swiotlb-xen.c5
-rw-r--r--fs/binfmt_elf.c23
-rw-r--r--fs/ceph/Makefile2
-rw-r--r--fs/ceph/addr.c61
-rw-r--r--fs/ceph/cache.c2
-rw-r--r--fs/ceph/caps.c173
-rw-r--r--fs/ceph/debugfs.c1
-rw-r--r--fs/ceph/export.c60
-rw-r--r--fs/ceph/file.c104
-rw-r--r--fs/ceph/inode.c50
-rw-r--r--fs/ceph/io.c163
-rw-r--r--fs/ceph/io.h12
-rw-r--r--fs/ceph/locks.c8
-rw-r--r--fs/ceph/mds_client.c110
-rw-r--r--fs/ceph/mds_client.h8
-rw-r--r--fs/ceph/super.c52
-rw-r--r--fs/ceph/super.h49
-rw-r--r--fs/ceph/xattr.c76
-rw-r--r--fs/fat/dir.c17
-rw-r--r--fs/fat/fatent.c3
-rw-r--r--fs/fs_context.c14
-rw-r--r--fs/fuse/cuse.c101
-rw-r--r--fs/fuse/dev.c654
-rw-r--r--fs/fuse/dir.c283
-rw-r--r--fs/fuse/file.c1227
-rw-r--r--fs/fuse/fuse_i.h350
-rw-r--r--fs/fuse/inode.c553
-rw-r--r--fs/fuse/readdir.c72
-rw-r--r--fs/fuse/xattr.c76
-rw-r--r--fs/gfs2/incore.h8
-rw-r--r--fs/gfs2/ops_fstype.c495
-rw-r--r--fs/gfs2/super.c333
-rw-r--r--fs/gfs2/super.h3
-rw-r--r--fs/inode.c3
-rw-r--r--fs/io_uring.c291
-rw-r--r--fs/iomap/direct-io.c24
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jbd2/transaction.c12
-rw-r--r--fs/jffs2/super.c2
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/nfs/dir.c41
-rw-r--r--fs/nfs/filelayout/filelayout.c1
-rw-r--r--fs/nfs/internal.h8
-rw-r--r--fs/nfs/nfs3proc.c45
-rw-r--r--fs/nfs/nfs4_fs.h11
-rw-r--r--fs/nfs/nfs4proc.c315
-rw-r--r--fs/nfs/nfs4state.c22
-rw-r--r--fs/nfs/nfs4xdr.c2
-rw-r--r--fs/nfs/pnfs.c71
-rw-r--r--fs/nfs/pnfs.h17
-rw-r--r--fs/nfs/super.c9
-rw-r--r--fs/ntfs/mft.c12
-rw-r--r--fs/ntfs/namei.c2
-rw-r--r--fs/ntfs/runlist.c2
-rw-r--r--fs/ntfs/super.c2
-rw-r--r--fs/ocfs2/alloc.c20
-rw-r--r--fs/ocfs2/aops.c13
-rw-r--r--fs/ocfs2/blockcheck.c26
-rw-r--r--fs/ocfs2/cluster/heartbeat.c103
-rw-r--r--fs/ocfs2/dir.c3
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h1
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c55
-rw-r--r--fs/ocfs2/dlm/dlmdebug.h16
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c7
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c23
-rw-r--r--fs/ocfs2/dlmglue.c27
-rw-r--r--fs/ocfs2/extent_map.c3
-rw-r--r--fs/ocfs2/file.c13
-rw-r--r--fs/ocfs2/inode.c2
-rw-r--r--fs/ocfs2/journal.h42
-rw-r--r--fs/ocfs2/namei.c2
-rw-r--r--fs/ocfs2/ocfs2.h3
-rw-r--r--fs/ocfs2/super.c10
-rw-r--r--fs/open.c10
-rw-r--r--fs/proc/meminfo.c8
-rw-r--r--fs/proc/task_mmu.c6
-rw-r--r--fs/proc_namespace.c2
-rw-r--r--fs/reiserfs/do_balan.c15
-rw-r--r--fs/reiserfs/fix_node.c6
-rw-r--r--fs/reiserfs/journal.c22
-rw-r--r--fs/reiserfs/lbalance.c3
-rw-r--r--fs/reiserfs/objectid.c3
-rw-r--r--fs/reiserfs/prints.c3
-rw-r--r--fs/reiserfs/stree.c4
-rw-r--r--fs/super.c5
-rw-r--r--fs/userfaultfd.c22
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h7
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c13
-rw-r--r--fs/xfs/libxfs/xfs_sb.c2
-rw-r--r--fs/xfs/scrub/alloc.c3
-rw-r--r--fs/xfs/xfs_buf.c4
-rw-r--r--fs/xfs/xfs_file.c14
-rw-r--r--fs/xfs/xfs_sysfs.c13
-rw-r--r--include/asm-generic/bug.h53
-rw-r--r--include/asm-generic/pgalloc.h13
-rw-r--r--include/asm-generic/pgtable.h7
-rw-r--r--include/drm/drm_crtc.h10
-rw-r--r--include/drm/drm_self_refresh_helper.h6
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/ceph/libceph.h1
-rw-r--r--include/linux/ceph/messenger.h1
-rw-r--r--include/linux/ceph/mon_client.h1
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/compaction.h22
-rw-r--r--include/linux/cpumask.h14
-rw-r--r--include/linux/fs.h32
-rw-r--r--include/linux/fs_context.h1
-rw-r--r--include/linux/huge_mm.h9
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/hyperv.h16
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/interval_tree_generic.h22
-rw-r--r--include/linux/iomap.h10
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/kgdb.h2
-rw-r--r--include/linux/khugepaged.h12
-rw-r--r--include/linux/memcontrol.h23
-rw-r--r--include/linux/memory.h7
-rw-r--r--include/linux/mm.h41
-rw-r--r--include/linux/mm_types.h1
-rw-r--r--include/linux/mm_types_task.h4
-rw-r--r--include/linux/mmzone.h14
-rw-r--r--include/linux/nfs_fs.h3
-rw-r--r--include/linux/page_ext.h1
-rw-r--r--include/linux/pagemap.h10
-rw-r--r--include/linux/printk.h22
-rw-r--r--include/linux/pwm.h4
-rw-r--r--include/linux/quicklist.h94
-rw-r--r--include/linux/rbtree_augmented.h88
-rw-r--r--include/linux/shrinker.h7
-rw-r--r--include/linux/slab.h62
-rw-r--r--include/linux/string.h5
-rw-r--r--include/linux/sunrpc/sched.h3
-rw-r--r--include/linux/sunrpc/xdr.h2
-rw-r--r--include/linux/sunrpc/xprt.h1
-rw-r--r--include/linux/sunrpc/xprtrdma.h4
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/t10-pi.h14
-rw-r--r--include/linux/thread_info.h2
-rw-r--r--include/linux/uaccess.h21
-rw-r--r--include/linux/vmalloc.h20
-rw-r--r--include/linux/zpool.h3
-rw-r--r--include/trace/events/rpcrdma.h88
-rw-r--r--include/trace/events/writeback.h38
-rw-r--r--include/uapi/asm-generic/mman-common.h3
-rw-r--r--include/uapi/linux/coff.h5
-rw-r--r--include/uapi/linux/fuse.h4
-rw-r--r--include/uapi/linux/io_uring.h2
-rw-r--r--init/main.c6
-rw-r--r--ipc/mqueue.c22
-rw-r--r--ipc/sem.c3
-rw-r--r--kernel/bpf/inode.c92
-rw-r--r--kernel/debug/debug_core.c31
-rw-r--r--kernel/elfcore.c1
-rw-r--r--kernel/events/core.c6
-rw-r--r--kernel/events/uprobes.c81
-rw-r--r--kernel/fork.c16
-rw-r--r--kernel/kexec_core.c2
-rw-r--r--kernel/panic.c42
-rw-r--r--kernel/resource.c4
-rw-r--r--kernel/sched/idle.c1
-rw-r--r--kernel/sysctl.c6
-rw-r--r--kernel/time/timer.c8
-rw-r--r--kernel/trace/trace_kprobe.c5
-rw-r--r--kernel/trace/trace_uprobe.c5
-rw-r--r--lib/Kconfig.debug19
-rw-r--r--lib/Kconfig.kasan8
-rw-r--r--lib/bug.c11
-rw-r--r--lib/extable.c1
-rw-r--r--lib/generic-radix-tree.c4
-rw-r--r--lib/hexdump.c21
-rw-r--r--lib/iov_iter.c2
-rw-r--r--lib/lzo/lzo1x_compress.c14
-rw-r--r--lib/rbtree_test.c37
-rw-r--r--lib/show_mem.c5
-rw-r--r--lib/string.c12
-rw-r--r--lib/strncpy_from_user.c3
-rw-r--r--lib/strnlen_user.c3
-rw-r--r--lib/test_kasan.c41
-rw-r--r--mm/Kconfig16
-rw-r--r--mm/Kconfig.debug4
-rw-r--r--mm/Makefile4
-rw-r--r--mm/compaction.c50
-rw-r--r--mm/filemap.c168
-rw-r--r--mm/frame_vector.c2
-rw-r--r--mm/gup.c129
-rw-r--r--mm/huge_memory.c123
-rw-r--r--mm/hugetlb.c89
-rw-r--r--mm/hugetlb_cgroup.c2
-rw-r--r--mm/init-mm.c2
-rw-r--r--mm/internal.h2
-rw-r--r--mm/kasan/common.c32
-rw-r--r--mm/kasan/kasan.h14
-rw-r--r--mm/kasan/report.c44
-rw-r--r--mm/kasan/tags_report.c24
-rw-r--r--mm/khugepaged.c366
-rw-r--r--mm/kmemleak.c326
-rw-r--r--mm/ksm.c18
-rw-r--r--mm/madvise.c320
-rw-r--r--mm/memcontrol.c198
-rw-r--r--mm/memfd.c2
-rw-r--r--mm/memory.c13
-rw-r--r--mm/memory_hotplug.c103
-rw-r--r--mm/mempolicy.c7
-rw-r--r--mm/migrate.c15
-rw-r--r--mm/mincore.c2
-rw-r--r--mm/mlock.c4
-rw-r--r--mm/mmap.c46
-rw-r--r--mm/mmu_gather.c2
-rw-r--r--mm/mprotect.c2
-rw-r--r--mm/mremap.c3
-rw-r--r--mm/msync.c2
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/oom_kill.c26
-rw-r--r--mm/page_alloc.c27
-rw-r--r--mm/page_owner.c123
-rw-r--r--mm/page_poison.c2
-rw-r--r--mm/page_vma_mapped.c3
-rw-r--r--mm/quicklist.c103
-rw-r--r--mm/rmap.c25
-rw-r--r--mm/shmem.c12
-rw-r--r--mm/slab.h64
-rw-r--r--mm/slab_common.c37
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c22
-rw-r--r--mm/sparse.c25
-rw-r--r--mm/swap.c58
-rw-r--r--mm/swap_state.c6
-rw-r--r--mm/usercopy.c8
-rw-r--r--mm/util.c122
-rw-r--r--mm/vmalloc.c89
-rw-r--r--mm/vmscan.c211
-rw-r--r--mm/vmstat.c2
-rw-r--r--mm/z3fold.c154
-rw-r--r--mm/zpool.c16
-rw-r--r--mm/zsmalloc.c23
-rw-r--r--mm/zswap.c15
-rw-r--r--net/ceph/ceph_common.c37
-rw-r--r--net/ceph/messenger.c6
-rw-r--r--net/ceph/mon_client.c7
-rw-r--r--net/ceph/osd_client.c65
-rw-r--r--net/ceph/osdmap.c69
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/sunrpc/clnt.c26
-rw-r--r--net/sunrpc/sched.c32
-rw-r--r--net/sunrpc/xdr.c65
-rw-r--r--net/sunrpc/xprt.c61
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c4
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c166
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c71
-rw-r--r--net/sunrpc/xprtrdma/transport.c15
-rw-r--r--net/sunrpc/xprtrdma/verbs.c263
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h59
-rw-r--r--net/sunrpc/xprtsock.c8
-rw-r--r--net/xdp/xdp_umem.c9
-rw-r--r--net/xdp/xsk.c2
-rwxr-xr-xscripts/checkpatch.pl69
-rw-r--r--scripts/gdb/linux/symbols.py4
-rw-r--r--security/keys/trusted.c5
-rw-r--r--sound/firewire/dice/dice-alesis.c2
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/patch_analog.c1
-rw-r--r--sound/pci/hda/patch_realtek.c28
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c12
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.h1
-rw-r--r--sound/soc/codecs/pcm3168a.c3
-rw-r--r--sound/soc/fsl/fsl_sai.c15
-rw-r--r--sound/soc/fsl/fsl_sai.h1
-rw-r--r--sound/soc/sh/rcar/ssi.c10
-rw-r--r--sound/soc/soc-core.c2
-rw-r--r--sound/soc/ti/Kconfig11
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h3
-rw-r--r--tools/arch/x86/include/uapi/asm/unistd.h2
-rw-r--r--tools/hv/Build3
-rw-r--r--tools/hv/Makefile51
-rw-r--r--tools/include/asm/bug.h1
-rw-r--r--tools/include/linux/rbtree.h71
-rw-r--r--tools/include/linux/rbtree_augmented.h119
-rw-r--r--tools/include/uapi/asm-generic/unistd.h2
-rw-r--r--tools/include/uapi/linux/prctl.h7
-rw-r--r--tools/lib/rbtree.c37
-rw-r--r--tools/lib/traceevent/Build11
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-event_print.txt130
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-func_apis.txt10
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-handle.txt8
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-plugins.txt99
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent.txt15
-rw-r--r--tools/lib/traceevent/Makefile94
-rw-r--r--tools/lib/traceevent/event-parse.c22
-rw-r--r--tools/lib/traceevent/event-parse.h2
-rw-r--r--tools/lib/traceevent/plugins/Build10
-rw-r--r--tools/lib/traceevent/plugins/Makefile222
-rw-r--r--tools/lib/traceevent/plugins/plugin_cfg80211.c (renamed from tools/lib/traceevent/plugin_cfg80211.c)0
-rw-r--r--tools/lib/traceevent/plugins/plugin_function.c (renamed from tools/lib/traceevent/plugin_function.c)0
-rw-r--r--tools/lib/traceevent/plugins/plugin_hrtimer.c (renamed from tools/lib/traceevent/plugin_hrtimer.c)0
-rw-r--r--tools/lib/traceevent/plugins/plugin_jbd2.c (renamed from tools/lib/traceevent/plugin_jbd2.c)0
-rw-r--r--tools/lib/traceevent/plugins/plugin_kmem.c (renamed from tools/lib/traceevent/plugin_kmem.c)0
-rw-r--r--tools/lib/traceevent/plugins/plugin_kvm.c (renamed from tools/lib/traceevent/plugin_kvm.c)0
-rw-r--r--tools/lib/traceevent/plugins/plugin_mac80211.c (renamed from tools/lib/traceevent/plugin_mac80211.c)0
-rw-r--r--tools/lib/traceevent/plugins/plugin_sched_switch.c (renamed from tools/lib/traceevent/plugin_sched_switch.c)0
-rw-r--r--tools/lib/traceevent/plugins/plugin_scsi.c (renamed from tools/lib/traceevent/plugin_scsi.c)0
-rw-r--r--tools/lib/traceevent/plugins/plugin_xen.c (renamed from tools/lib/traceevent/plugin_xen.c)0
-rw-r--r--tools/perf/Makefile.config2
-rw-r--r--tools/perf/Makefile.perf6
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c7
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c6
-rw-r--r--tools/perf/arch/arm64/util/dwarf-regs.c1
-rw-r--r--tools/perf/arch/arm64/util/header.c4
-rw-r--r--tools/perf/arch/arm64/util/unwind-libunwind.c2
-rw-r--r--tools/perf/arch/powerpc/util/dwarf-regs.c1
-rw-r--r--tools/perf/arch/powerpc/util/header.c1
-rw-r--r--tools/perf/arch/powerpc/util/kvm-stat.c45
-rw-r--r--tools/perf/arch/powerpc/util/skip-callchain-idx.c1
-rw-r--r--tools/perf/arch/powerpc/util/sym-handling.c1
-rw-r--r--tools/perf/arch/s390/Makefile1
-rw-r--r--tools/perf/arch/s390/util/auxtrace.c1
-rw-r--r--tools/perf/arch/s390/util/machine.c2
-rw-r--r--tools/perf/arch/x86/tests/intel-cqm.c6
-rw-r--r--tools/perf/arch/x86/tests/perf-time-to-tsc.c12
-rw-r--r--tools/perf/arch/x86/tests/rdpmc.c2
-rw-r--r--tools/perf/arch/x86/util/archinsn.c1
-rw-r--r--tools/perf/arch/x86/util/event.c2
-rw-r--r--tools/perf/arch/x86/util/intel-bts.c9
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c17
-rw-r--r--tools/perf/arch/x86/util/machine.c3
-rw-r--r--tools/perf/arch/x86/util/tsc.c2
-rw-r--r--tools/perf/arch/x86/util/unwind-libunwind.c2
-rw-r--r--tools/perf/bench/epoll-ctl.c2
-rw-r--r--tools/perf/bench/epoll-wait.c2
-rw-r--r--tools/perf/bench/futex-hash.c2
-rw-r--r--tools/perf/bench/futex-lock-pi.c2
-rw-r--r--tools/perf/bench/futex-requeue.c2
-rw-r--r--tools/perf/bench/futex-wake-parallel.c3
-rw-r--r--tools/perf/bench/futex-wake.c2
-rw-r--r--tools/perf/bench/numa.c1
-rw-r--r--tools/perf/bench/sched-messaging.c2
-rw-r--r--tools/perf/bench/sched-pipe.c2
-rw-r--r--tools/perf/builtin-annotate.c6
-rw-r--r--tools/perf/builtin-buildid-cache.c5
-rw-r--r--tools/perf/builtin-buildid-list.c5
-rw-r--r--tools/perf/builtin-c2c.c7
-rw-r--r--tools/perf/builtin-config.c1
-rw-r--r--tools/perf/builtin-diff.c9
-rw-r--r--tools/perf/builtin-evlist.c8
-rw-r--r--tools/perf/builtin-inject.c6
-rw-r--r--tools/perf/builtin-kmem.c5
-rw-r--r--tools/perf/builtin-kvm.c37
-rw-r--r--tools/perf/builtin-list.c4
-rw-r--r--tools/perf/builtin-lock.c5
-rw-r--r--tools/perf/builtin-mem.c5
-rw-r--r--tools/perf/builtin-record.c117
-rw-r--r--tools/perf/builtin-report.c6
-rw-r--r--tools/perf/builtin-sched.c17
-rw-r--r--tools/perf/builtin-script.c20
-rw-r--r--tools/perf/builtin-stat.c41
-rw-r--r--tools/perf/builtin-timechart.c5
-rw-r--r--tools/perf/builtin-top.c28
-rw-r--r--tools/perf/builtin-trace.c22
-rw-r--r--tools/perf/jvmti/Build9
-rw-r--r--tools/perf/lib/Makefile36
-rw-r--r--tools/perf/lib/core.c13
-rw-r--r--tools/perf/lib/cpumap.c12
-rw-r--r--tools/perf/lib/evlist.c124
-rw-r--r--tools/perf/lib/evsel.c30
-rw-r--r--tools/perf/lib/include/internal/evlist.h33
-rw-r--r--tools/perf/lib/include/internal/evsel.h33
-rw-r--r--tools/perf/lib/include/internal/lib.h4
-rw-r--r--tools/perf/lib/include/internal/mmap.h32
-rw-r--r--tools/perf/lib/include/perf/core.h2
-rw-r--r--tools/perf/lib/include/perf/cpumap.h1
-rw-r--r--tools/perf/lib/include/perf/evlist.h1
-rw-r--r--tools/perf/lib/lib.c2
-rw-r--r--tools/perf/lib/libperf.map4
-rw-r--r--tools/perf/lib/tests/test-cpumap.c10
-rw-r--r--tools/perf/lib/tests/test-evlist.c10
-rw-r--r--tools/perf/lib/tests/test-evsel.c10
-rw-r--r--tools/perf/lib/tests/test-threadmap.c10
-rw-r--r--tools/perf/perf.c13
-rw-r--r--tools/perf/pmu-events/README22
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/branch.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/bus.json24
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/cache.json207
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/exception.json52
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/instruction.json108
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/memory.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/other.json7
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/pipeline.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/mapfile.csv2
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/other.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/cache.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/core.json2
-rw-r--r--tools/perf/pmu-events/jevents.c1
-rw-r--r--tools/perf/tests/backward-ring-buffer.c11
-rw-r--r--tools/perf/tests/bitmap.c2
-rw-r--r--tools/perf/tests/bpf.c9
-rw-r--r--tools/perf/tests/clang.c2
-rw-r--r--tools/perf/tests/code-reading.c13
-rw-r--r--tools/perf/tests/cpumap.c1
-rw-r--r--tools/perf/tests/dso-data.c1
-rw-r--r--tools/perf/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/tests/event-times.c15
-rw-r--r--tools/perf/tests/event_update.c10
-rw-r--r--tools/perf/tests/evsel-roundtrip-name.c2
-rw-r--r--tools/perf/tests/hists_common.c2
-rw-r--r--tools/perf/tests/hists_cumulate.c2
-rw-r--r--tools/perf/tests/hists_link.c5
-rw-r--r--tools/perf/tests/hists_output.c2
-rw-r--r--tools/perf/tests/keep-tracking.c14
-rw-r--r--tools/perf/tests/llvm.c1
-rw-r--r--tools/perf/tests/make8
-rw-r--r--tools/perf/tests/mem2node.c2
-rw-r--r--tools/perf/tests/mmap-basic.c8
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c4
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c5
-rw-r--r--tools/perf/tests/openat-syscall-tp-fields.c11
-rw-r--r--tools/perf/tests/parse-events.c117
-rw-r--r--tools/perf/tests/parse-no-sample-id-all.c2
-rw-r--r--tools/perf/tests/perf-hooks.c1
-rw-r--r--tools/perf/tests/perf-record.c13
-rw-r--r--tools/perf/tests/pmu.c1
-rw-r--r--tools/perf/tests/sample-parsing.c2
-rw-r--r--tools/perf/tests/sdt.c1
-rw-r--r--tools/perf/tests/stat.c1
-rw-r--r--tools/perf/tests/sw-clock.c5
-rw-r--r--tools/perf/tests/switch-tracking.c30
-rw-r--r--tools/perf/tests/task-exit.c11
-rw-r--r--tools/perf/tests/thread-map.c1
-rw-r--r--tools/perf/tests/topology.c7
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c2
-rw-r--r--tools/perf/ui/browser.c1
-rw-r--r--tools/perf/ui/browsers/annotate.c1
-rw-r--r--tools/perf/ui/browsers/header.c1
-rw-r--r--tools/perf/ui/browsers/hists.c6
-rw-r--r--tools/perf/ui/browsers/map.c1
-rw-r--r--tools/perf/ui/browsers/res_sample.c2
-rw-r--r--tools/perf/ui/browsers/scripts.c3
-rw-r--r--tools/perf/ui/gtk/helpline.c1
-rw-r--r--tools/perf/ui/gtk/hists.c1
-rw-r--r--tools/perf/ui/gtk/progress.c1
-rw-r--r--tools/perf/ui/gtk/setup.c3
-rw-r--r--tools/perf/ui/gtk/util.c1
-rw-r--r--tools/perf/ui/helpline.c2
-rw-r--r--tools/perf/ui/hist.c1
-rw-r--r--tools/perf/ui/setup.c2
-rw-r--r--tools/perf/ui/stdio/hist.c1
-rw-r--r--tools/perf/ui/tui/helpline.c1
-rw-r--r--tools/perf/ui/tui/setup.c2
-rw-r--r--tools/perf/ui/tui/util.c1
-rw-r--r--tools/perf/util/Build3
-rw-r--r--tools/perf/util/annotate.c3
-rw-r--r--tools/perf/util/arm-spe.c1
-rw-r--r--tools/perf/util/auxtrace.c12
-rw-r--r--tools/perf/util/auxtrace.h26
-rw-r--r--tools/perf/util/bpf-event.c1
-rw-r--r--tools/perf/util/bpf-event.h15
-rw-r--r--tools/perf/util/bpf-loader.c2
-rw-r--r--tools/perf/util/branch.c2
-rw-r--r--tools/perf/util/branch.h9
-rw-r--r--tools/perf/util/build-id.c3
-rw-r--r--tools/perf/util/callchain.c1
-rw-r--r--tools/perf/util/callchain.h5
-rw-r--r--tools/perf/util/cloexec.c2
-rw-r--r--tools/perf/util/copyfile.c144
-rw-r--r--tools/perf/util/copyfile.h16
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c1
-rw-r--r--tools/perf/util/cs-etm.c4
-rw-r--r--tools/perf/util/data-convert-bt.c5
-rw-r--r--tools/perf/util/data.c3
-rw-r--r--tools/perf/util/debug.c1
-rw-r--r--tools/perf/util/debug.h2
-rw-r--r--tools/perf/util/demangle-java.c1
-rw-r--r--tools/perf/util/demangle-rust.c1
-rw-r--r--tools/perf/util/dwarf-regs.c1
-rw-r--r--tools/perf/util/env.h3
-rw-r--r--tools/perf/util/event.c1109
-rw-r--r--tools/perf/util/event.h77
-rw-r--r--tools/perf/util/evlist.c295
-rw-r--r--tools/perf/util/evlist.h81
-rw-r--r--tools/perf/util/evsel.c484
-rw-r--r--tools/perf/util/evsel.h126
-rw-r--r--tools/perf/util/evsel_config.h50
-rw-r--r--tools/perf/util/evsel_fprintf.c16
-rw-r--r--tools/perf/util/evsel_fprintf.h50
-rw-r--r--tools/perf/util/genelf.h3
-rw-r--r--tools/perf/util/header.c424
-rw-r--r--tools/perf/util/header.h60
-rw-r--r--tools/perf/util/hist.h1
-rw-r--r--tools/perf/util/intel-bts.c6
-rw-r--r--tools/perf/util/intel-pt.c11
-rw-r--r--tools/perf/util/jitdump.c4
-rw-r--r--tools/perf/util/kvm-stat.h4
-rw-r--r--tools/perf/util/libunwind/arm64.c1
-rw-r--r--tools/perf/util/libunwind/x86_32.c1
-rw-r--r--tools/perf/util/llvm-utils.c1
-rw-r--r--tools/perf/util/lzma.c2
-rw-r--r--tools/perf/util/machine.c16
-rw-r--r--tools/perf/util/machine.h15
-rw-r--r--tools/perf/util/memswap.h7
-rw-r--r--tools/perf/util/mmap.c185
-rw-r--r--tools/perf/util/mmap.h77
-rw-r--r--tools/perf/util/namespaces.c18
-rw-r--r--tools/perf/util/namespaces.h2
-rw-r--r--tools/perf/util/parse-events.c9
-rw-r--r--tools/perf/util/parse-events.y4
-rw-r--r--tools/perf/util/perf-hooks.c1
-rw-r--r--tools/perf/util/perf_event_attr_fprintf.c148
-rw-r--r--tools/perf/util/pmu.c1
-rw-r--r--tools/perf/util/probe-event.c1
-rw-r--r--tools/perf/util/probe-file.c1
-rw-r--r--tools/perf/util/probe-finder.c19
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/python.c28
-rw-r--r--tools/perf/util/record.c8
-rw-r--r--tools/perf/util/rwsem.c1
-rw-r--r--tools/perf/util/s390-cpumsf.c1
-rw-r--r--tools/perf/util/s390-sample-raw.c1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c2
-rw-r--r--tools/perf/util/session.c92
-rw-r--r--tools/perf/util/session.h5
-rw-r--r--tools/perf/util/sort.c2
-rw-r--r--tools/perf/util/srccode.c2
-rw-r--r--tools/perf/util/stat-shadow.c4
-rw-r--r--tools/perf/util/stat.c62
-rw-r--r--tools/perf/util/stat.h9
-rw-r--r--tools/perf/util/svghelper.c2
-rw-r--r--tools/perf/util/symbol-elf.c5
-rw-r--r--tools/perf/util/symbol-minimal.c3
-rw-r--r--tools/perf/util/symbol.c2
-rw-r--r--tools/perf/util/synthetic-events.c1884
-rw-r--r--tools/perf/util/synthetic-events.h103
-rw-r--r--tools/perf/util/target.c2
-rw-r--r--tools/perf/util/top.c3
-rw-r--r--tools/perf/util/trace-event-info.c2
-rw-r--r--tools/perf/util/trace-event-read.c1
-rw-r--r--tools/perf/util/trace-event.c1
-rw-r--r--tools/perf/util/tsc.h14
-rw-r--r--tools/perf/util/unwind-libdw.c1
-rw-r--r--tools/perf/util/unwind-libunwind-local.c1
-rw-r--r--tools/perf/util/usage.c1
-rw-r--r--tools/perf/util/util.c136
-rw-r--r--tools/perf/util/util.h8
-rw-r--r--tools/perf/util/vdso.c2
-rw-r--r--tools/perf/util/zlib.c4
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c122
-rw-r--r--tools/power/x86/intel-speed-select/isst-core.c25
-rw-r--r--tools/power/x86/intel-speed-select/isst-display.c71
-rw-r--r--tools/power/x86/intel-speed-select/isst.h10
-rw-r--r--tools/testing/selftests/.gitignore2
-rw-r--r--tools/testing/selftests/Makefile22
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc2
-rw-r--r--tools/testing/selftests/livepatch/config2
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c5
-rw-r--r--tools/testing/selftests/tpm2/Makefile1
-rw-r--r--tools/testing/selftests/watchdog/watchdog-test.c41
-rw-r--r--usr/Makefile3
899 files changed, 17001 insertions, 12537 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-watchdog b/Documentation/ABI/testing/sysfs-class-watchdog
index 6317ade5ad19..675f9b537661 100644
--- a/Documentation/ABI/testing/sysfs-class-watchdog
+++ b/Documentation/ABI/testing/sysfs-class-watchdog
@@ -72,3 +72,37 @@ Description:
It is a read/write file. When read, the currently assigned
pretimeout governor is returned. When written, it sets
the pretimeout governor.
+
+What: /sys/class/watchdog/watchdog1/access_cs0
+Date: August 2019
+Contact: Ivan Mikhaylov <[email protected]>,
+ Alexander Amelkin <[email protected]>
+Description:
+ It is a read/write file. This attribute exists only if the
+ system has booted from the alternate flash chip due to
+ expiration of a watchdog timer of AST2400/AST2500 when
+ alternate boot function was enabled with 'aspeed,alt-boot'
+ devicetree option for that watchdog or with an appropriate
+ h/w strapping (for WDT2 only).
+
+ At alternate flash the 'access_cs0' sysfs node provides:
+ ast2400: a way to get access to the primary SPI flash
+ chip at CS0 after booting from the alternate
+ chip at CS1.
+ ast2500: a way to restore the normal address mapping
+ from (CS0->CS1, CS1->CS0) to (CS0->CS0,
+ CS1->CS1).
+
+ Clearing the boot code selection and timeout counter also
+ resets to the initial state the chip select line mapping. When
+ the SoC is in normal mapping state (i.e. booted from CS0),
+ clearing those bits does nothing for both versions of the SoC.
+ For alternate boot mode (booted from CS1 due to wdt2
+ expiration) the behavior differs as described above.
+
+ This option can be used with wdt2 (watchdog1) only.
+
+ When read, the current status of the boot code selection is
+ shown. When written with any non-zero value, it clears
+ the boot code selection and the timeout counter, which results
+ in chipselect reset for AST2400/AST2500.
diff --git a/Documentation/ABI/testing/sysfs-kernel-slab b/Documentation/ABI/testing/sysfs-kernel-slab
index 29601d93a1c2..ed35833ad7f0 100644
--- a/Documentation/ABI/testing/sysfs-kernel-slab
+++ b/Documentation/ABI/testing/sysfs-kernel-slab
@@ -429,10 +429,15 @@ KernelVersion: 2.6.22
Contact: Pekka Enberg <[email protected]>,
Christoph Lameter <[email protected]>
Description:
- The shrink file is written when memory should be reclaimed from
- a cache. Empty partial slabs are freed and the partial list is
- sorted so the slabs with the fewest available objects are used
- first.
+ The shrink file is used to reclaim unused slab cache
+ memory from a cache. Empty per-cpu or partial slabs
+ are freed and the partial list is sorted so the slabs
+ with the fewest available objects are used first.
+ It only accepts a value of "1" on write for shrinking
+ the cache. Other input values are considered invalid.
+ Shrinking slab caches might be expensive and can
+ adversely impact other running applications. So it
+ should be used with care.
What: /sys/kernel/slab/cache/slab_size
Date: May 2007
diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
index 41bdc038dad9..0ae4f564c2d6 100644
--- a/Documentation/admin-guide/cgroup-v1/memory.rst
+++ b/Documentation/admin-guide/cgroup-v1/memory.rst
@@ -85,8 +85,10 @@ Brief summary of control files.
memory.oom_control set/show oom controls.
memory.numa_stat show the number of memory usage per numa
node
-
memory.kmem.limit_in_bytes set/show hard limit for kernel memory
+ This knob is deprecated and shouldn't be
+ used. It is planned that this be removed in
+ the foreseeable future.
memory.kmem.usage_in_bytes show current kernel memory allocation
memory.kmem.failcnt show the number of kernel memory usage
hits limits
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 254d8a369f32..944e03e29f65 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -809,6 +809,8 @@
enables the feature at boot time. By default, it is
disabled and the system will work mostly the same as a
kernel built without CONFIG_DEBUG_PAGEALLOC.
+ Note: to get most of debug_pagealloc error reports, it's
+ useful to also enable the page_owner functionality.
on: enable the feature
debugpat [X86] Enable PAT debugging
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 08af5caf036d..f77de49b1d51 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -42,6 +42,9 @@ String Manipulation
.. kernel-doc:: lib/string.c
:export:
+.. kernel-doc:: include/linux/string.h
+ :internal:
+
.. kernel-doc:: mm/util.c
:functions: kstrdup kstrdup_const kstrndup kmemdup kmemdup_nul memdup_user
vmemdup_user strndup_user memdup_user_nul
diff --git a/Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.txt b/Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.txt
index e9de3756752b..c9a6587fe4bb 100644
--- a/Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.txt
@@ -1,7 +1,9 @@
Broadcom BCM2835 I2C controller
Required properties:
-- compatible : Should be "brcm,bcm2835-i2c".
+- compatible : Should be one of:
+ "brcm,bcm2711-i2c"
+ "brcm,bcm2835-i2c"
- reg: Should contain register location and length.
- interrupts: Should contain interrupt.
- clocks : The clock feeding the I2C controller.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt b/Documentation/devicetree/bindings/i2c/renesas,i2c.txt
index 3ee5e8f6ee01..3ee5e8f6ee01 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
+++ b/Documentation/devicetree/bindings/i2c/renesas,i2c.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-emev2.txt b/Documentation/devicetree/bindings/i2c/renesas,iic-emev2.txt
index 5ed1ea1c7e14..5ed1ea1c7e14 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-emev2.txt
+++ b/Documentation/devicetree/bindings/i2c/renesas,iic-emev2.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt b/Documentation/devicetree/bindings/i2c/renesas,iic.txt
index 202602e6e837..202602e6e837 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt
+++ b/Documentation/devicetree/bindings/i2c/renesas,iic.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-riic.txt b/Documentation/devicetree/bindings/i2c/renesas,riic.txt
index e26fe3ad86a9..e26fe3ad86a9 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-riic.txt
+++ b/Documentation/devicetree/bindings/i2c/renesas,riic.txt
diff --git a/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt b/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt
index 991728cb46cb..c8501530173c 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt
@@ -6,6 +6,8 @@ Required properties:
- "mediatek,mt7622-pwm": found on mt7622 SoC.
- "mediatek,mt7623-pwm": found on mt7623 SoC.
- "mediatek,mt7628-pwm": found on mt7628 SoC.
+ - "mediatek,mt7629-pwm", "mediatek,mt7622-pwm": found on mt7629 SoC.
+ - "mediatek,mt8516-pwm": found on mt8516 SoC.
- reg: physical base address and length of the controller's registers.
- #pwm-cells: must be 2. See pwm.txt in this directory for a description of
the cell format.
diff --git a/Documentation/devicetree/bindings/pwm/pwm-sprd.txt b/Documentation/devicetree/bindings/pwm/pwm-sprd.txt
new file mode 100644
index 000000000000..16fa5a096206
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-sprd.txt
@@ -0,0 +1,40 @@
+Spreadtrum PWM controller
+
+Spreadtrum SoCs PWM controller provides 4 PWM channels.
+
+Required properties:
+- compatible : Should be "sprd,ums512-pwm".
+- reg: Physical base address and length of the controller's registers.
+- clocks: The phandle and specifier referencing the controller's clocks.
+- clock-names: Should contain following entries:
+ "pwmn": used to derive the functional clock for PWM channel n (n range: 0 ~ 3).
+ "enablen": for PWM channel n enable clock (n range: 0 ~ 3).
+- #pwm-cells: Should be 2. See pwm.txt in this directory for a description of
+ the cells format.
+
+Optional properties:
+- assigned-clocks: Reference to the PWM clock entries.
+- assigned-clock-parents: The phandle of the parent clock of PWM clock.
+
+Example:
+ pwms: pwm@32260000 {
+ compatible = "sprd,ums512-pwm";
+ reg = <0 0x32260000 0 0x10000>;
+ clock-names = "pwm0", "enable0",
+ "pwm1", "enable1",
+ "pwm2", "enable2",
+ "pwm3", "enable3";
+ clocks = <&aon_clk CLK_PWM0>, <&aonapb_gate CLK_PWM0_EB>,
+ <&aon_clk CLK_PWM1>, <&aonapb_gate CLK_PWM1_EB>,
+ <&aon_clk CLK_PWM2>, <&aonapb_gate CLK_PWM2_EB>,
+ <&aon_clk CLK_PWM3>, <&aonapb_gate CLK_PWM3_EB>;
+ assigned-clocks = <&aon_clk CLK_PWM0>,
+ <&aon_clk CLK_PWM1>,
+ <&aon_clk CLK_PWM2>,
+ <&aon_clk CLK_PWM3>;
+ assigned-clock-parents = <&ext_26m>,
+ <&ext_26m>,
+ <&ext_26m>,
+ <&ext_26m>;
+ #pwm-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/thermal/qoriq-thermal.txt b/Documentation/devicetree/bindings/thermal/qoriq-thermal.txt
index 04cbb90a5d3e..28f2cbaf1702 100644
--- a/Documentation/devicetree/bindings/thermal/qoriq-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/qoriq-thermal.txt
@@ -23,6 +23,7 @@ Required properties:
Optional property:
- little-endian : If present, the TMU registers are little endian. If absent,
the default is big endian.
+- clocks : the clock for clocking the TMU silicon.
Example:
diff --git a/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml b/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml
new file mode 100644
index 000000000000..3a54f58683a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/allwinner,sun4i-a10-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Watchdog Device Tree Bindings
+
+allOf:
+ - $ref: "watchdog.yaml#"
+
+maintainers:
+ - Chen-Yu Tsai <[email protected]>
+ - Maxime Ripard <[email protected]>
+
+properties:
+ compatible:
+ oneOf:
+ - const: allwinner,sun4i-a10-wdt
+ - const: allwinner,sun6i-a31-wdt
+ - items:
+ - const: allwinner,sun50i-a64-wdt
+ - const: allwinner,sun6i-a31-wdt
+ - items:
+ - const: allwinner,sun50i-h6-wdt
+ - const: allwinner,sun6i-a31-wdt
+ - items:
+ - const: allwinner,suniv-f1c100s-wdt
+ - const: allwinner,sun4i-a10-wdt
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ wdt: watchdog@1c20c90 {
+ compatible = "allwinner,sun4i-a10-wdt";
+ reg = <0x01c20c90 0x10>;
+ interrupts = <24>;
+ clocks = <&osc24M>;
+ timeout-sec = <10>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt b/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt
index c5077a1f5cb3..d78d4a8fb868 100644
--- a/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible: must be one of:
- "aspeed,ast2400-wdt"
- "aspeed,ast2500-wdt"
+ - "aspeed,ast2600-wdt"
- reg: physical base address of the controller and length of memory mapped
region
diff --git a/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.txt b/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.txt
new file mode 100644
index 000000000000..f902508d6cac
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.txt
@@ -0,0 +1,22 @@
+* Freescale i.MX7ULP Watchdog Timer (WDT) Controller
+
+Required properties:
+- compatible : Should be "fsl,imx7ulp-wdt"
+- reg : Should contain WDT registers location and length
+- interrupts : Should contain WDT interrupt
+- clocks: Should contain a phandle pointing to the gated peripheral clock.
+
+Optional properties:
+- timeout-sec : Contains the watchdog timeout in seconds
+
+Examples:
+
+wdog1: watchdog@403d0000 {
+ compatible = "fsl,imx7ulp-wdt";
+ reg = <0x403d0000 0x10000>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pcc2 IMX7ULP_CLK_WDG1>;
+ assigned-clocks = <&pcc2 IMX7ULP_CLK_WDG1>;
+ assigned-clocks-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>;
+ timeout-sec = <40>;
+};
diff --git a/Documentation/devicetree/bindings/watchdog/sunxi-wdt.txt b/Documentation/devicetree/bindings/watchdog/sunxi-wdt.txt
deleted file mode 100644
index e65198d82a2b..000000000000
--- a/Documentation/devicetree/bindings/watchdog/sunxi-wdt.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Allwinner SoCs Watchdog timer
-
-Required properties:
-
-- compatible : should be one of
- "allwinner,sun4i-a10-wdt"
- "allwinner,sun6i-a31-wdt"
- "allwinner,sun50i-a64-wdt","allwinner,sun6i-a31-wdt"
- "allwinner,sun50i-h6-wdt","allwinner,sun6i-a31-wdt"
- "allwinner,suniv-f1c100s-wdt", "allwinner,sun4i-a10-wdt"
-- reg : Specifies base physical address and size of the registers.
-
-Optional properties:
-- timeout-sec : Contains the watchdog timeout in seconds
-
-Example:
-
-wdt: watchdog@1c20c90 {
- compatible = "allwinner,sun4i-a10-wdt";
- reg = <0x01c20c90 0x10>;
- timeout-sec = <10>;
-};
diff --git a/Documentation/devicetree/bindings/watchdog/watchdog.yaml b/Documentation/devicetree/bindings/watchdog/watchdog.yaml
new file mode 100644
index 000000000000..187bf6cb62bf
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/watchdog.yaml
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/watchdog.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Watchdog Generic Bindings
+
+maintainers:
+ - Guenter Roeck <[email protected]>
+ - Wim Van Sebroeck <[email protected]>
+
+description: |
+ This document describes generic bindings which can be used to
+ describe watchdog devices in a device tree.
+
+properties:
+ $nodename:
+ pattern: "^watchdog(@.*|-[0-9a-f])?$"
+
+ timeout-sec:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Contains the watchdog timeout in seconds.
+
+...
diff --git a/Documentation/filesystems/ceph.txt b/Documentation/filesystems/ceph.txt
index d2c6a5ccf0f5..b19b6a03f91c 100644
--- a/Documentation/filesystems/ceph.txt
+++ b/Documentation/filesystems/ceph.txt
@@ -158,6 +158,20 @@ Mount Options
copies. Currently, it's only used in copy_file_range, which will revert
to the default VFS implementation if this option is used.
+ recover_session=<no|clean>
+ Set auto reconnect mode in the case where the client is blacklisted. The
+ available modes are "no" and "clean". The default is "no".
+
+ * no: never attempt to reconnect when client detects that it has been
+ blacklisted. Operations will generally fail after being blacklisted.
+
+ * clean: client reconnects to the ceph cluster automatically when it
+ detects that it has been blacklisted. During reconnect, client drops
+ dirty data/metadata, invalidates page caches and writable file handles.
+ After reconnect, file locks become stale because the MDS loses track
+ of them. If an inode contains any stale file locks, read/write on the
+ inode is not allowed until applications release all stale file locks.
+
More Information
================
diff --git a/Documentation/vm/split_page_table_lock.rst b/Documentation/vm/split_page_table_lock.rst
index 889b00be469f..ff51f4a5494d 100644
--- a/Documentation/vm/split_page_table_lock.rst
+++ b/Documentation/vm/split_page_table_lock.rst
@@ -54,9 +54,9 @@ Hugetlb-specific helpers:
Support of split page table lock by an architecture
===================================================
-There's no need in special enabling of PTE split page table lock:
-everything required is done by pgtable_page_ctor() and pgtable_page_dtor(),
-which must be called on PTE table allocation / freeing.
+There's no need in special enabling of PTE split page table lock: everything
+required is done by pgtable_pte_page_ctor() and pgtable_pte_page_dtor(), which
+must be called on PTE table allocation / freeing.
Make sure the architecture doesn't use slab allocator for page table
allocation: slab uses page->slab_cache for its pages.
@@ -74,7 +74,7 @@ paths: i.e X86_PAE preallocate few PMDs on pgd_alloc().
With everything in place you can set CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK.
-NOTE: pgtable_page_ctor() and pgtable_pmd_page_ctor() can fail -- it must
+NOTE: pgtable_pte_page_ctor() and pgtable_pmd_page_ctor() can fail -- it must
be handled properly.
page->ptl
@@ -94,7 +94,7 @@ trick:
split lock with enabled DEBUG_SPINLOCK or DEBUG_LOCK_ALLOC, but costs
one more cache line for indirect access;
-The spinlock_t allocated in pgtable_page_ctor() for PTE table and in
+The spinlock_t allocated in pgtable_pte_page_ctor() for PTE table and in
pgtable_pmd_page_ctor() for PMD table.
Please, never access page->ptl directly -- use appropriate helper.
diff --git a/Documentation/watchdog/watchdog-parameters.rst b/Documentation/watchdog/watchdog-parameters.rst
index a3985cc5aeda..223c99361a30 100644
--- a/Documentation/watchdog/watchdog-parameters.rst
+++ b/Documentation/watchdog/watchdog-parameters.rst
@@ -301,15 +301,6 @@ ixp4xx_wdt:
-------------------------------------------------
-ks8695_wdt:
- wdt_time:
- Watchdog time in seconds. (default=5)
- nowayout:
- Watchdog cannot be stopped once started
- (default=kernel config parameter)
-
--------------------------------------------------
-
machzwd:
nowayout:
Watchdog cannot be stopped once started
@@ -375,16 +366,6 @@ nic7018_wdt:
-------------------------------------------------
-nuc900_wdt:
- heartbeat:
- Watchdog heartbeats in seconds.
- (default = 15)
- nowayout:
- Watchdog cannot be stopped once started
- (default=kernel config parameter)
-
--------------------------------------------------
-
omap_wdt:
timer_margin:
initial watchdog timeout (in seconds)
diff --git a/MAINTAINERS b/MAINTAINERS
index 54f1286087e9..f6ff76864649 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7865,6 +7865,12 @@ S: Maintained
F: drivers/mfd/lpc_ich.c
F: drivers/gpio/gpio-ich.c
+ICY I2C DRIVER
+M: Max Staudt <[email protected]>
+S: Maintained
+F: drivers/i2c/busses/i2c-icy.c
+
IDE SUBSYSTEM
M: "David S. Miller" <[email protected]>
@@ -9050,10 +9056,11 @@ S: Supported
F: Documentation/security/keys/trusted-encrypted.rst
F: include/keys/trusted-type.h
F: security/keys/trusted.c
-F: security/keys/trusted.h
+F: include/keys/trusted.h
KEYS/KEYRINGS:
M: David Howells <[email protected]>
+M: Jarkko Sakkinen <[email protected]>
S: Maintained
F: Documentation/security/keys/core.rst
@@ -13239,9 +13246,11 @@ F: drivers/media/rc/pwm-ir-tx.c
PWM SUBSYSTEM
M: Thierry Reding <[email protected]>
+R: Uwe Kleine-König <[email protected]>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/thierry.reding/linux-pwm.git
+Q: https://patchwork.ozlabs.org/project/linux-pwm/list/
F: Documentation/driver-api/pwm.rst
F: Documentation/devicetree/bindings/pwm/
F: include/linux/pwm.h
@@ -13250,6 +13259,7 @@ F: drivers/video/backlight/pwm_bl.c
F: include/linux/pwm_backlight.h
F: drivers/gpio/gpio-mvebu.c
F: Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
+K: pwm_(config|apply_state|ops)
PXA GPIO DRIVER
M: Robert Jarzmik <[email protected]>
@@ -13791,7 +13801,7 @@ F: drivers/clk/renesas/
RENESAS EMEV2 I2C DRIVER
M: Wolfram Sang <[email protected]>
S: Supported
-F: Documentation/devicetree/bindings/i2c/i2c-emev2.txt
+F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.txt
F: drivers/i2c/busses/i2c-emev2.c
RENESAS ETHERNET DRIVERS
@@ -13813,15 +13823,15 @@ F: drivers/iio/adc/rcar-gyroadc.c
RENESAS R-CAR I2C DRIVERS
M: Wolfram Sang <[email protected]>
S: Supported
-F: Documentation/devicetree/bindings/i2c/i2c-rcar.txt
-F: Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt
+F: Documentation/devicetree/bindings/i2c/renesas,i2c.txt
+F: Documentation/devicetree/bindings/i2c/renesas,iic.txt
F: drivers/i2c/busses/i2c-rcar.c
F: drivers/i2c/busses/i2c-sh_mobile.c
RENESAS RIIC DRIVER
M: Chris Brandt <[email protected]>
S: Supported
-F: Documentation/devicetree/bindings/i2c/i2c-riic.txt
+F: Documentation/devicetree/bindings/i2c/renesas,riic.txt
F: drivers/i2c/busses/i2c-riic.c
RENESAS USB PHY DRIVER
@@ -16065,6 +16075,7 @@ THERMAL
M: Zhang Rui <[email protected]>
M: Eduardo Valentin <[email protected]>
R: Daniel Lezcano <[email protected]>
+R: Amit Kucheria <[email protected]>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git
diff --git a/arch/Kconfig b/arch/Kconfig
index 0fcf8ec1e098..5f8a5d84dbbe 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -706,6 +706,17 @@ config HAVE_ARCH_COMPAT_MMAP_BASES
and vice-versa 32-bit applications to call 64-bit mmap().
Required for applications doing different bitness syscalls.
+# This allows to use a set of generic functions to determine mmap base
+# address by giving priority to top-down scheme only if the process
+# is not in legacy mode (compat task, unlimited stack size or
+# sysctl_legacy_va_layout).
+# Architecture that selects this option can provide its own version of:
+# - STACK_RND_MASK
+config ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
+ bool
+ depends on MMU
+ select ARCH_HAS_ELF_RANDOMIZE
+
config HAVE_COPY_THREAD_TLS
bool
help
diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
index 71ded3b7d82d..eb91f1e85629 100644
--- a/arch/alpha/include/asm/pgalloc.h
+++ b/arch/alpha/include/asm/pgalloc.h
@@ -53,6 +53,4 @@ pmd_free(struct mm_struct *mm, pmd_t *pmd)
free_page((unsigned long)pmd);
}
-#define check_pgt_cache() do { } while (0)
-
#endif /* _ALPHA_PGALLOC_H */
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 89c2032f9960..065b57f408c3 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -359,11 +359,6 @@ extern void paging_init(void);
#include <asm-generic/pgtable.h>
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-
/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
#define HAVE_ARCH_UNMAPPED_AREA
diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h
index ac23379b7a87..a18ec7f63888 100644
--- a/arch/alpha/include/uapi/asm/mman.h
+++ b/arch/alpha/include/uapi/asm/mman.h
@@ -68,6 +68,9 @@
#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+#define MADV_COLD 20 /* deactivate these pages */
+#define MADV_PAGEOUT 21 /* reclaim these pages */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 9bdb8ed5b0db..b747f2ec2928 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -108,7 +108,7 @@ pte_alloc_one(struct mm_struct *mm)
return 0;
memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
page = virt_to_page(pte_pg);
- if (!pgtable_page_ctor(page)) {
+ if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return 0;
}
@@ -123,13 +123,12 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
{
- pgtable_page_dtor(virt_to_page(ptep));
+ pgtable_pte_page_dtor(virt_to_page(ptep));
free_pages((unsigned long)ptep, __get_order_pte());
}
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
-#define check_pgt_cache() do { } while (0)
#define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd))
#endif /* _ASM_ARC_PGALLOC_H */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 1d87c18a2976..7addd0301c51 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -395,11 +395,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
/* to cope with aliasing VIPT cache */
#define HAVE_ARCH_UNMAPPED_AREA
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 229f2cdd81ca..8a50efb559f3 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -34,6 +34,7 @@ config ARM
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_IPC_PARSE_VERSION
select BINFMT_FLAT_ARGVP_ENVP_ON_STACK
select BUILDTIME_EXTABLE_SORT if MMU
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index a2a68b751971..069da393110c 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -15,8 +15,6 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-#define check_pgt_cache() do { } while (0)
-
#ifdef CONFIG_MMU
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index d0de24f06724..010fa1a35a68 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -71,11 +71,6 @@ typedef pte_t *pte_addr_t;
extern unsigned int kobjsize(const void *objp);
/*
- * No page table caches to initialise.
- */
-#define pgtable_cache_init() do { } while (0)
-
-/*
* All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets.
*/
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index f2e990dc27e7..3ae120cd1715 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -368,8 +368,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#define pgtable_cache_init() do { } while (0)
-
#endif /* !__ASSEMBLY__ */
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 20c2f42454b8..614bf829e454 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -140,8 +140,6 @@ static inline void prefetchw(const void *ptr)
#endif
#endif
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
#endif
#endif /* __ASM_ARM_PROCESSOR_H */
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index b75ea15b85c0..669474add486 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -44,7 +44,7 @@ static inline void __tlb_remove_table(void *_table)
static inline void
__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
{
- pgtable_page_dtor(pte);
+ pgtable_pte_page_dtor(pte);
#ifndef CONFIG_ARM_LPAE
/*
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index f934a6739fc0..9485acc520a4 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -319,11 +319,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
- return randomize_page(mm->brk, 0x02000000);
-}
-
#ifdef CONFIG_MMU
#ifdef CONFIG_KUSER_HELPERS
/*
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 6ecbda87ee46..6d89db7895d1 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -204,18 +204,17 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
* coherent with the kernels mapping.
*/
if (!PageHighMem(page)) {
- size_t page_size = PAGE_SIZE << compound_order(page);
- __cpuc_flush_dcache_area(page_address(page), page_size);
+ __cpuc_flush_dcache_area(page_address(page), page_size(page));
} else {
unsigned long i;
if (cache_is_vipt_nonaliasing()) {
- for (i = 0; i < (1 << compound_order(page)); i++) {
+ for (i = 0; i < compound_nr(page); i++) {
void *addr = kmap_atomic(page + i);
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_atomic(addr);
}
} else {
- for (i = 0; i < (1 << compound_order(page)); i++) {
+ for (i = 0; i < compound_nr(page); i++) {
void *addr = kmap_high_get(page + i);
if (addr) {
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index f866870db749..b8d912ac9e61 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -17,33 +17,6 @@
((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
(((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
-/* gap between mmap and stack */
-#define MIN_GAP (128*1024*1024UL)
-#define MAX_GAP ((TASK_SIZE)/6*5)
-
-static int mmap_is_legacy(struct rlimit *rlim_stack)
-{
- if (current->personality & ADDR_COMPAT_LAYOUT)
- return 1;
-
- if (rlim_stack->rlim_cur == RLIM_INFINITY)
- return 1;
-
- return sysctl_legacy_va_layout;
-}
-
-static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
-{
- unsigned long gap = rlim_stack->rlim_cur;
-
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
- return PAGE_ALIGN(TASK_SIZE - gap - rnd);
-}
-
/*
* We need to ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches. We need to ensure that
@@ -171,31 +144,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
return addr;
}
-unsigned long arch_mmap_rnd(void)
-{
- unsigned long rnd;
-
- rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
-
- return rnd << PAGE_SHIFT;
-}
-
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
-{
- unsigned long random_factor = 0UL;
-
- if (current->flags & PF_RANDOMIZE)
- random_factor = arch_mmap_rnd();
-
- if (mmap_is_legacy(rlim_stack)) {
- mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
- mm->get_unmapped_area = arch_get_unmapped_area;
- } else {
- mm->mmap_base = mmap_base(random_factor, rlim_stack);
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- }
-}
-
/*
* You really shouldn't be using read() or write() on /dev/mem. This
* might go away in the future.
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 25da9b2d9610..48c2888297dd 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -731,7 +731,7 @@ static void *__init late_alloc(unsigned long sz)
{
void *ptr = (void *)__get_free_pages(GFP_PGTABLE_KERNEL, get_order(sz));
- if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
+ if (!ptr || !pgtable_pte_page_ctor(virt_to_page(ptr)))
BUG();
return ptr;
}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 37c610963eee..866e05882799 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -15,7 +15,6 @@ config ARM64
select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
- select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
@@ -71,6 +70,7 @@ config ARM64
select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
select ARCH_HAS_UBSAN_SANITIZE_ALL
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 14d0bc44d451..172d76fa0245 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -15,8 +15,6 @@
#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
-#define check_pgt_cache() do { } while (0)
-
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
#if CONFIG_PGTABLE_LEVELS > 2
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 57427d17580e..7576df00eb50 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -861,8 +861,6 @@ extern int kern_addr_valid(unsigned long addr);
#include <asm-generic/pgtable.h>
-static inline void pgtable_cache_init(void) { }
-
/*
* On AArch64, the cache coherency is handled via the set_pte_at() function.
*/
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index c67848c55009..5623685c7d13 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -280,8 +280,6 @@ static inline void spin_lock_prefetch(const void *ptr)
"nop") : : "p" (ptr));
}
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
extern void __init minsigstksz_setup(void);
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index a95d1fcb7e21..b76df828e6b7 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -44,7 +44,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
- pgtable_page_dtor(pte);
+ pgtable_pte_page_dtor(pte);
tlb_remove_table(tlb, pte);
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 03689c0beb34..a47462def04b 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -557,14 +557,6 @@ unsigned long arch_align_stack(unsigned long sp)
return sp & ~0xf;
}
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
- if (is_compat_task())
- return randomize_page(mm->brk, SZ_32M);
- else
- return randomize_page(mm->brk, SZ_1G);
-}
-
/*
* Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
*/
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index dc19300309d2..ac485163a4a7 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -56,8 +56,7 @@ void __sync_icache_dcache(pte_t pte)
struct page *page = pte_page(pte);
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
- sync_icache_aliases(page_address(page),
- PAGE_SIZE << compound_order(page));
+ sync_icache_aliases(page_address(page), page_size(page));
}
EXPORT_SYMBOL_GPL(__sync_icache_dcache);
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index b050641b5139..3028bacbc4e9 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -21,78 +21,6 @@
#include <asm/cputype.h>
/*
- * Leave enough space between the mmap area and the stack to honour ulimit in
- * the face of randomisation.
- */
-#define MIN_GAP (SZ_128M)
-#define MAX_GAP (STACK_TOP/6*5)
-
-static int mmap_is_legacy(struct rlimit *rlim_stack)
-{
- if (current->personality & ADDR_COMPAT_LAYOUT)
- return 1;
-
- if (rlim_stack->rlim_cur == RLIM_INFINITY)
- return 1;
-
- return sysctl_legacy_va_layout;
-}
-
-unsigned long arch_mmap_rnd(void)
-{
- unsigned long rnd;
-
-#ifdef CONFIG_COMPAT
- if (test_thread_flag(TIF_32BIT))
- rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
- else
-#endif
- rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
- return rnd << PAGE_SHIFT;
-}
-
-static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
-{
- unsigned long gap = rlim_stack->rlim_cur;
- unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap;
-
- /* Values close to RLIM_INFINITY can overflow. */
- if (gap + pad > gap)
- gap += pad;
-
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
- return PAGE_ALIGN(STACK_TOP - gap - rnd);
-}
-
-/*
- * This function, called very early during the creation of a new process VM
- * image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
-{
- unsigned long random_factor = 0UL;
-
- if (current->flags & PF_RANDOMIZE)
- random_factor = arch_mmap_rnd();
-
- /*
- * Fall back to the standard layout if the personality bit is set, or
- * if the expected stack growth is unlimited:
- */
- if (mmap_is_legacy(rlim_stack)) {
- mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
- mm->get_unmapped_area = arch_get_unmapped_area;
- } else {
- mm->mmap_base = mmap_base(random_factor, rlim_stack);
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- }
-}
-
-/*
* You really shouldn't be using read() or write() on /dev/mem. This might go
* away in the future.
*/
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 53dc6f24cfb7..60c929f3683b 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -384,7 +384,7 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
* folded, and if so pgtable_pmd_page_ctor() becomes nop.
*/
if (shift == PAGE_SHIFT)
- BUG_ON(!pgtable_page_ctor(phys_to_page(pa)));
+ BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa)));
else if (shift == PMD_SHIFT)
BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa)));
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 7548f9ca1f11..4a64089e5771 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -35,7 +35,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
kmem_cache_free(pgd_cache, pgd);
}
-void __init pgd_cache_init(void)
+void __init pgtable_cache_init(void)
{
if (PGD_SIZE == PAGE_SIZE)
return;
diff --git a/arch/c6x/include/asm/pgtable.h b/arch/c6x/include/asm/pgtable.h
index 0bd805964ea6..0b6919c00413 100644
--- a/arch/c6x/include/asm/pgtable.h
+++ b/arch/c6x/include/asm/pgtable.h
@@ -60,11 +60,6 @@ extern unsigned long empty_zero_page;
#define swapper_pg_dir ((pgd_t *) 0)
/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-
-/*
* c6x is !MMU, so define the simpliest implementation
*/
#define pgprot_writecombine pgprot_noncached
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index 98c5716708d6..c7c1ed27e348 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -71,12 +71,10 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
#define __pte_free_tlb(tlb, pte, address) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page(tlb, pte); \
} while (0)
-#define check_pgt_cache() do {} while (0)
-
extern void pagetable_init(void);
extern void pre_mmu_init(void);
extern void pre_trap_init(void);
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index c429a6f347de..0040b3a05b61 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -296,11 +296,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define kern_addr_valid(addr) (1)
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do {} while (0)
-
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h
index a99caa49d265..4d00152fab58 100644
--- a/arch/h8300/include/asm/pgtable.h
+++ b/arch/h8300/include/asm/pgtable.h
@@ -4,7 +4,6 @@
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
#include <asm-generic/pgtable.h>
-#define pgtable_cache_init() do { } while (0)
extern void paging_init(void);
#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */
#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */
@@ -35,11 +34,6 @@ extern unsigned int kobjsize(const void *objp);
extern int is_in_rom(unsigned long);
/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-
-/*
* All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets.
*/
diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h
index d6544dc71258..cc9be514a676 100644
--- a/arch/hexagon/include/asm/pgalloc.h
+++ b/arch/hexagon/include/asm/pgalloc.h
@@ -13,8 +13,6 @@
#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
-#define check_pgt_cache() do {} while (0)
-
extern unsigned long long kmap_generation;
/*
@@ -96,7 +94,7 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pgtable_page_dtor((pte)); \
+ pgtable_pte_page_dtor((pte)); \
tlb_remove_page((tlb), (pte)); \
} while (0)
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
index a3ff6d24c09e..2fec20ad939e 100644
--- a/arch/hexagon/include/asm/pgtable.h
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -431,9 +431,6 @@ static inline int pte_exec(pte_t pte)
#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-/* I think this is in case we have page table caches; needed by init/main.c */
-#define pgtable_cache_init() do { } while (0)
-
/*
* Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the PTE is
* interpreted as swap information. The remaining free bits are interpreted as
diff --git a/arch/hexagon/mm/Makefile b/arch/hexagon/mm/Makefile
index 1894263ae5bc..893838499591 100644
--- a/arch/hexagon/mm/Makefile
+++ b/arch/hexagon/mm/Makefile
@@ -3,5 +3,5 @@
# Makefile for Hexagon memory management subsystem
#
-obj-y := init.o pgalloc.o ioremap.o uaccess.o vm_fault.o cache.o
+obj-y := init.o ioremap.o uaccess.o vm_fault.o cache.o
obj-y += copy_to_user.o copy_from_user.o strnlen_user.o vm_tlb.o
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
index f1f6ebd537b7..c961773a6fff 100644
--- a/arch/hexagon/mm/init.c
+++ b/arch/hexagon/mm/init.c
@@ -71,19 +71,6 @@ void __init mem_init(void)
init_mm.context.ptbase = __pa(init_mm.pgd);
}
-/*
- * free_initrd_mem - frees... initrd memory.
- * @start - start of init memory
- * @end - end of init memory
- *
- * Apparently has to be passed the address of the initrd memory.
- *
- * Wrapped by #ifdef CONFIG_BLKDEV_INITRD
- */
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
-}
-
void sync_icache_dcache(pte_t pte)
{
unsigned long addr;
diff --git a/arch/hexagon/mm/pgalloc.c b/arch/hexagon/mm/pgalloc.c
deleted file mode 100644
index 4d4316140237..000000000000
--- a/arch/hexagon/mm/pgalloc.c
+++ /dev/null
@@ -1,10 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/init.h>
-
-void __init pgtable_cache_init(void)
-{
-}
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 685a3df126ca..16714477eef4 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -72,10 +72,6 @@ config 64BIT
config ZONE_DMA32
def_bool y
-config QUICKLIST
- bool
- default y
-
config MMU
bool
default y
diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
index c9e481023c25..f4c491044882 100644
--- a/arch/ia64/include/asm/pgalloc.h
+++ b/arch/ia64/include/asm/pgalloc.h
@@ -19,18 +19,19 @@
#include <linux/mm.h>
#include <linux/page-flags.h>
#include <linux/threads.h>
-#include <linux/quicklist.h>
+
+#include <asm-generic/pgalloc.h>
#include <asm/mmu_context.h>
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- return quicklist_alloc(0, GFP_KERNEL, NULL);
+ return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- quicklist_free(0, NULL, pgd);
+ free_page((unsigned long)pgd);
}
#if CONFIG_PGTABLE_LEVELS == 4
@@ -42,12 +43,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return quicklist_alloc(0, GFP_KERNEL, NULL);
+ return (pud_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
- quicklist_free(0, NULL, pud);
+ free_page((unsigned long)pud);
}
#define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud)
#endif /* CONFIG_PGTABLE_LEVELS == 4 */
@@ -60,12 +61,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return quicklist_alloc(0, GFP_KERNEL, NULL);
+ return (pmd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
- quicklist_free(0, NULL, pmd);
+ free_page((unsigned long)pmd);
}
#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd)
@@ -83,43 +84,6 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
pmd_val(*pmd_entry) = __pa(pte);
}
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
-{
- struct page *page;
- void *pg;
-
- pg = quicklist_alloc(0, GFP_KERNEL, NULL);
- if (!pg)
- return NULL;
- page = virt_to_page(pg);
- if (!pgtable_page_ctor(page)) {
- quicklist_free(0, NULL, pg);
- return NULL;
- }
- return page;
-}
-
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
-{
- return quicklist_alloc(0, GFP_KERNEL, NULL);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
- pgtable_page_dtor(pte);
- quicklist_free_page(0, NULL, pte);
-}
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- quicklist_free(0, NULL, pte);
-}
-
-static inline void check_pgt_cache(void)
-{
- quicklist_trim(0, NULL, 25, 16);
-}
-
#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
#endif /* _ASM_IA64_PGALLOC_H */
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index b1e7468eb65a..d602e7c622db 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -566,11 +566,6 @@ extern struct page *zero_page_memmap_ptr;
#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-
/* These tell get_user_pages() that the first gate page is accessible from user-level. */
#define FIXADDR_USER_START GATE_ADDR
#ifdef HAVE_BUGGY_SEGREL
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index f10208478131..8e91c86e8072 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -583,6 +583,7 @@ void ia64_process_pending_intr(void)
static irqreturn_t dummy_handler (int irq, void *dev_id)
{
BUG();
+ return IRQ_NONE;
}
static struct irqaction ipi_irqaction = {
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index db09a693f094..5b00dc3898e1 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -108,7 +108,6 @@ setup_per_cpu_areas(void)
struct pcpu_group_info *gi;
unsigned int cpu;
ssize_t static_size, reserved_size, dyn_size;
- int rc;
ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
if (!ai)
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 219fc640414b..4f33f6e7e206 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -186,7 +186,7 @@ void __init setup_per_cpu_areas(void)
unsigned long base_offset;
unsigned int cpu;
ssize_t static_size, reserved_size, dyn_size;
- int node, prev_node, unit, nr_units, rc;
+ int node, prev_node, unit, nr_units;
ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
if (!ai)
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 678b98a09c85..bf9df2625bc8 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -64,7 +64,7 @@ __ia64_sync_icache_dcache (pte_t pte)
if (test_bit(PG_arch_1, &page->flags))
return; /* i-cache is already coherent with d-cache */
- flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
+ flush_icache_range(addr, addr + page_size(page));
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index 4399d712f6db..b34d44d666a4 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -41,7 +41,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
unsigned long address)
{
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
__free_page(page);
}
@@ -54,7 +54,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm)
if (!page)
return NULL;
- if (!pgtable_page_ctor(page)) {
+ if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
@@ -73,7 +73,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm)
static inline void pte_free(struct mm_struct *mm, struct page *page)
{
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
__free_page(page);
}
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index d04d9ba9b976..acab315c851f 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -36,7 +36,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
if(!page)
return NULL;
- if (!pgtable_page_ctor(page)) {
+ if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
@@ -51,7 +51,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
static inline void pte_free(struct mm_struct *mm, pgtable_t page)
{
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
cache_page(kmap(page));
kunmap(page);
__free_page(page);
@@ -60,7 +60,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t page)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
unsigned long address)
{
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
cache_page(kmap(page));
kunmap(page);
__free_page(page);
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index fde4534b974f..646c174fff99 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -176,11 +176,4 @@ pgprot_t pgprot_dmacoherent(pgprot_t prot);
#include <asm-generic/pgtable.h>
#endif /* !__ASSEMBLY__ */
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-
-#define check_pgt_cache() do { } while (0)
-
#endif /* _M68K_PGTABLE_H */
diff --git a/arch/m68k/include/asm/pgtable_no.h b/arch/m68k/include/asm/pgtable_no.h
index fc3a96c77bd8..c18165b0d904 100644
--- a/arch/m68k/include/asm/pgtable_no.h
+++ b/arch/m68k/include/asm/pgtable_no.h
@@ -45,11 +45,6 @@ extern void paging_init(void);
#define ZERO_PAGE(vaddr) (virt_to_page(0))
/*
- * No page table caches to initialise.
- */
-#define pgtable_cache_init() do { } while (0)
-
-/*
* All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets.
*/
@@ -60,6 +55,4 @@ extern void paging_init(void);
#include <asm-generic/pgtable.h>
-#define check_pgt_cache() do { } while (0)
-
#endif /* _M68KNOMMU_PGTABLE_H */
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index 1a8ddbd0d23c..856121122b91 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -21,7 +21,7 @@ extern const char bad_pmd_string[];
#define __pte_free_tlb(tlb,pte,addr) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
} while (0)
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 632c9477a0f6..c9c4be822456 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -5,15 +5,18 @@ config MICROBLAZE
select ARCH_NO_SWAP
select ARCH_HAS_BINFMT_FLAT if !MMU
select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU
+ select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select ARCH_HAS_UNCACHED_SEGMENT if !MMU
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
select TIMER_OF
select CLONE_BACKWARDS3
select COMMON_CLK
+ select DMA_DIRECT_REMAP if MMU
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
select GENERIC_CPU_DEVICES
diff --git a/arch/microblaze/boot/dts/system.dts b/arch/microblaze/boot/dts/system.dts
index 5a8a9d090c37..5b236527176e 100644
--- a/arch/microblaze/boot/dts/system.dts
+++ b/arch/microblaze/boot/dts/system.dts
@@ -18,7 +18,6 @@
#address-cells = <1>;
#size-cells = <1>;
compatible = "xlnx,microblaze";
- hard-reset-gpios = <&LEDs_8Bit 2 1>;
model = "testing";
DDR2_SDRAM: memory@90000000 {
device_type = "memory";
@@ -281,6 +280,21 @@
gpios = <&LEDs_8Bit 7 1>;
};
} ;
+
+ gpio-restart {
+ compatible = "gpio-restart";
+ /*
+ * FIXME: is this active low or active high?
+ * the current flag (1) indicates active low.
+ * delay measures are templates, should be adjusted
+ * to datasheet or trial-and-error with real hardware.
+ */
+ gpios = <&LEDs_8Bit 2 1>;
+ active-delay = <100>;
+ inactive-delay = <10>;
+ wait-delay = <100>;
+ };
+
RS232_Uart_1: serial@84000000 {
clock-frequency = <125000000>;
compatible = "xlnx,xps-uartlite-1.00.a";
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index 92fd4e95b488..654edfdc7867 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -5,15 +5,10 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_KALLSYMS_ALL=y
# CONFIG_BASE_FULL is not set
+CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_EFI_PARTITION is not set
CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
@@ -25,14 +20,19 @@ CONFIG_MMU=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE_FORCE=y
CONFIG_HIGHMEM=y
-CONFIG_PCI=y
CONFIG_PCI_XILINX=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_EFI_PARTITION is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
# CONFIG_IPV6 is not set
CONFIG_BRIDGE=m
+CONFIG_PCI=y
CONFIG_MTD=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
@@ -41,6 +41,7 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_NETDEVICES=y
CONFIG_XILINX_EMACLITE=y
+CONFIG_XILINX_AXI_EMAC=y
CONFIG_XILINX_LL_TEMAC=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
@@ -59,6 +60,8 @@ CONFIG_SPI_XILINX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_XILINX=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_GPIO_RESTART=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_XILINX_WATCHDOG=y
@@ -74,8 +77,8 @@ CONFIG_CRAMFS=y
CONFIG_ROMFS_FS=y
CONFIG_NFS_FS=y
CONFIG_CIFS=y
-CONFIG_CIFS_STATS=y
CONFIG_CIFS_STATS2=y
+CONFIG_ENCRYPTED_KEYS=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SLAB=y
CONFIG_DETECT_HUNG_TASK=y
@@ -83,6 +86,3 @@ CONFIG_DEBUG_SPINLOCK=y
CONFIG_KGDB=y
CONFIG_KGDB_TESTS=y
CONFIG_KGDB_KDB=y
-CONFIG_EARLY_PRINTK=y
-CONFIG_KEYS=y
-CONFIG_ENCRYPTED_KEYS=y
diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
index 06d69a6e192d..377de39ccb8c 100644
--- a/arch/microblaze/configs/nommu_defconfig
+++ b/arch/microblaze/configs/nommu_defconfig
@@ -7,15 +7,10 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_KALLSYMS_ALL=y
# CONFIG_BASE_FULL is not set
+CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_EFI_PARTITION is not set
CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
@@ -25,13 +20,18 @@ CONFIG_XILINX_MICROBLAZE0_USE_FPU=2
CONFIG_HZ_100=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE_FORCE=y
-CONFIG_PCI=y
CONFIG_PCI_XILINX=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_EFI_PARTITION is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
# CONFIG_IPV6 is not set
+CONFIG_PCI=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
@@ -62,6 +62,8 @@ CONFIG_SPI_XILINX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_XILINX=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_GPIO_RESTART=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_XILINX_WATCHDOG=y
@@ -75,11 +77,6 @@ CONFIG_ROMFS_FS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NLS=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_SLAB=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_EARLY_PRINTK=y
CONFIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=y
CONFIG_CRYPTO_ECB=y
@@ -87,3 +84,7 @@ CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_ARC4=y
CONFIG_CRYPTO_DES=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_SLAB=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_SPINLOCK=y
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index c7968139486f..86c95b2a1ce1 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -40,7 +40,6 @@ extern void iounmap(volatile void __iomem *addr);
extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
#define ioremap_nocache(addr, size) ioremap((addr), (size))
-#define ioremap_fullcache(addr, size) ioremap((addr), (size))
#define ioremap_wc(addr, size) ioremap((addr), (size))
#define ioremap_wt(addr, size) ioremap((addr), (size))
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index f4cc9ffc449e..7ecb05baa601 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -21,83 +21,23 @@
#include <asm/cache.h>
#include <asm/pgtable.h>
-#define PGDIR_ORDER 0
-
-/*
- * This is handled very differently on MicroBlaze since out page tables
- * are all 0's and I want to be able to use these zero'd pages elsewhere
- * as well - it gives us quite a speedup.
- * -- Cort
- */
-extern struct pgtable_cache_struct {
- unsigned long *pgd_cache;
- unsigned long *pte_cache;
- unsigned long pgtable_cache_sz;
-} quicklists;
-
-#define pgd_quicklist (quicklists.pgd_cache)
-#define pmd_quicklist ((unsigned long *)0)
-#define pte_quicklist (quicklists.pte_cache)
-#define pgtable_cache_size (quicklists.pgtable_cache_sz)
-
-extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */
-extern atomic_t zero_sz; /* # currently pre-zero'd pages */
-extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */
-extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */
-extern atomic_t zerototal; /* # pages zero'd over time */
-
-#define zero_quicklist (zero_cache)
-#define zero_cache_sz (zero_sz)
-#define zero_cache_calls (zeropage_calls)
-#define zero_cache_hits (zeropage_hits)
-#define zero_cache_total (zerototal)
-
-/*
- * return a pre-zero'd page from the list,
- * return NULL if none available -- Cort
- */
-extern unsigned long get_zero_page_fast(void);
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+#include <asm-generic/pgalloc.h>
extern void __bad_pte(pmd_t *pmd);
-static inline pgd_t *get_pgd_slow(void)
+static inline pgd_t *get_pgd(void)
{
- pgd_t *ret;
-
- ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER);
- if (ret != NULL)
- clear_page(ret);
- return ret;
+ return (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, 0);
}
-static inline pgd_t *get_pgd_fast(void)
-{
- unsigned long *ret;
-
- ret = pgd_quicklist;
- if (ret != NULL) {
- pgd_quicklist = (unsigned long *)(*ret);
- ret[0] = 0;
- pgtable_cache_size--;
- } else
- ret = (unsigned long *)get_pgd_slow();
- return (pgd_t *)ret;
-}
-
-static inline void free_pgd_fast(pgd_t *pgd)
-{
- *(unsigned long **)pgd = pgd_quicklist;
- pgd_quicklist = (unsigned long *) pgd;
- pgtable_cache_size++;
-}
-
-static inline void free_pgd_slow(pgd_t *pgd)
+static inline void free_pgd(pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
-#define pgd_free(mm, pgd) free_pgd_fast(pgd)
-#define pgd_alloc(mm) get_pgd_fast()
+#define pgd_free(mm, pgd) free_pgd(pgd)
+#define pgd_alloc(mm) get_pgd()
#define pmd_pgtable(pmd) pmd_page(pmd)
@@ -110,50 +50,6 @@ static inline void free_pgd_slow(pgd_t *pgd)
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
-static inline struct page *pte_alloc_one(struct mm_struct *mm)
-{
- struct page *ptepage;
-
-#ifdef CONFIG_HIGHPTE
- int flags = GFP_KERNEL | __GFP_HIGHMEM;
-#else
- int flags = GFP_KERNEL;
-#endif
-
- ptepage = alloc_pages(flags, 0);
- if (!ptepage)
- return NULL;
- clear_highpage(ptepage);
- if (!pgtable_page_ctor(ptepage)) {
- __free_page(ptepage);
- return NULL;
- }
- return ptepage;
-}
-
-static inline void pte_free_fast(pte_t *pte)
-{
- *(unsigned long **)pte = pte_quicklist;
- pte_quicklist = (unsigned long *) pte;
- pgtable_cache_size++;
-}
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- free_page((unsigned long)pte);
-}
-
-static inline void pte_free_slow(struct page *ptepage)
-{
- __free_page(ptepage);
-}
-
-static inline void pte_free(struct mm_struct *mm, struct page *ptepage)
-{
- pgtable_page_dtor(ptepage);
- __free_page(ptepage);
-}
-
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
#define pmd_populate(mm, pmd, pte) \
@@ -171,10 +67,6 @@ static inline void pte_free(struct mm_struct *mm, struct page *ptepage)
#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x)
#define pgd_populate(mm, pmd, pte) BUG()
-extern int do_check_pgt_cache(int, int);
-
#endif /* CONFIG_MMU */
-#define check_pgt_cache() do { } while (0)
-
#endif /* _ASM_MICROBLAZE_PGALLOC_H */
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 142d3f004848..954b69af451f 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -46,8 +46,6 @@ extern int mem_init_done;
#define swapper_pg_dir ((pgd_t *) NULL)
-#define pgtable_cache_init() do {} while (0)
-
#define arch_enter_lazy_cpu_mode() do {} while (0)
#define pgprot_noncached_wc(prot) prot
@@ -526,11 +524,6 @@ extern unsigned long iopa(unsigned long addr);
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define kern_addr_valid(addr) (1)
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-
void do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code);
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index bff2a71c828a..a1f206b90753 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -163,44 +163,15 @@ extern long __user_bad(void);
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
-#define get_user(x, ptr) \
- __get_user_check((x), (ptr), sizeof(*(ptr)))
-
-#define __get_user_check(x, ptr, size) \
-({ \
- unsigned long __gu_val = 0; \
- const typeof(*(ptr)) __user *__gu_addr = (ptr); \
- int __gu_err = 0; \
- \
- if (access_ok(__gu_addr, size)) { \
- switch (size) { \
- case 1: \
- __get_user_asm("lbu", __gu_addr, __gu_val, \
- __gu_err); \
- break; \
- case 2: \
- __get_user_asm("lhu", __gu_addr, __gu_val, \
- __gu_err); \
- break; \
- case 4: \
- __get_user_asm("lw", __gu_addr, __gu_val, \
- __gu_err); \
- break; \
- default: \
- __gu_err = __user_bad(); \
- break; \
- } \
- } else { \
- __gu_err = -EFAULT; \
- } \
- x = (__force typeof(*(ptr)))__gu_val; \
- __gu_err; \
+#define get_user(x, ptr) ({ \
+ const typeof(*(ptr)) __user *__gu_ptr = (ptr); \
+ access_ok(__gu_ptr, sizeof(*__gu_ptr)) ? \
+ __get_user(x, __gu_ptr) : -EFAULT; \
})
#define __get_user(x, ptr) \
({ \
unsigned long __gu_val = 0; \
- /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
long __gu_err; \
switch (sizeof(*(ptr))) { \
case 1: \
@@ -212,6 +183,11 @@ extern long __user_bad(void);
case 4: \
__get_user_asm("lw", (ptr), __gu_val, __gu_err); \
break; \
+ case 8: \
+ __gu_err = __copy_from_user(&__gu_val, ptr, 8); \
+ if (__gu_err) \
+ __gu_err = -EFAULT; \
+ break; \
default: \
/* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\
} \
diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c
index fcbe1daf6316..5f4722908164 100644
--- a/arch/microblaze/kernel/reset.c
+++ b/arch/microblaze/kernel/reset.c
@@ -8,83 +8,9 @@
*/
#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/of_platform.h>
-
-/* Trigger specific functions */
-#ifdef CONFIG_GPIOLIB
-
-#include <linux/of_gpio.h>
-
-static int handle; /* reset pin handle */
-static unsigned int reset_val;
-
-static int of_platform_reset_gpio_probe(void)
-{
- int ret;
- handle = of_get_named_gpio(of_find_node_by_path("/"),
- "hard-reset-gpios", 0);
-
- if (!gpio_is_valid(handle)) {
- pr_info("Skipping unavailable RESET gpio %d (%s)\n",
- handle, "reset");
- return -ENODEV;
- }
-
- ret = gpio_request(handle, "reset");
- if (ret < 0) {
- pr_info("GPIO pin is already allocated\n");
- return ret;
- }
-
- /* get current setup value */
- reset_val = gpio_get_value(handle);
- /* FIXME maybe worth to perform any action */
- pr_debug("Reset: Gpio output state: 0x%x\n", reset_val);
-
- /* Setup GPIO as output */
- ret = gpio_direction_output(handle, 0);
- if (ret < 0)
- goto err;
-
- /* Setup output direction */
- gpio_set_value(handle, 0);
-
- pr_info("RESET: Registered gpio device: %d, current val: %d\n",
- handle, reset_val);
- return 0;
-err:
- gpio_free(handle);
- return ret;
-}
-device_initcall(of_platform_reset_gpio_probe);
-
-
-static void gpio_system_reset(void)
-{
- if (gpio_is_valid(handle))
- gpio_set_value(handle, 1 - reset_val);
- else
- pr_notice("Reset GPIO unavailable - halting!\n");
-}
-#else
-static void gpio_system_reset(void)
-{
- pr_notice("No reset GPIO present - halting!\n");
-}
-
-void of_platform_reset_gpio_probe(void)
-{
- return;
-}
-#endif
-
-void machine_restart(char *cmd)
-{
- pr_notice("Machine restart...\n");
- gpio_system_reset();
- while (1)
- ;
-}
+#include <linux/reboot.h>
void machine_shutdown(void)
{
@@ -106,3 +32,12 @@ void machine_power_off(void)
while (1)
;
}
+
+void machine_restart(char *cmd)
+{
+ do_kernel_restart(cmd);
+ /* Give the restart hook 1 s to take us down */
+ mdelay(1000);
+ pr_emerg("Reboot failed -- System halted\n");
+ while (1);
+}
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index bc7042209c57..8c5f0c332d8b 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -4,217 +4,56 @@
* Copyright (C) 2010 Michal Simek <[email protected]>
* Copyright (C) 2010 PetaLogix
* Copyright (C) 2005 John Williams <[email protected]>
- *
- * Based on PowerPC version derived from arch/arm/mm/consistent.c
- * Copyright (C) 2001 Dan Malek ([email protected])
- * Copyright (C) 2000 Russell King
*/
-#include <linux/export.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
#include <linux/kernel.h>
-#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/stddef.h>
-#include <linux/vmalloc.h>
#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/memblock.h>
-#include <linux/highmem.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/gfp.h>
#include <linux/dma-noncoherent.h>
-
-#include <asm/pgalloc.h>
-#include <linux/io.h>
-#include <linux/hardirq.h>
-#include <linux/mmu_context.h>
-#include <asm/mmu.h>
-#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/cpuinfo.h>
-#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
-#ifndef CONFIG_MMU
-/* I have to use dcache values because I can't relate on ram size */
-# define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
-#endif
-
-/*
- * Consistent memory allocators. Used for DMA devices that want to
- * share uncached memory with the processor core.
- * My crufty no-MMU approach is simple. In the HW platform we can optionally
- * mirror the DDR up above the processor cacheable region. So, memory accessed
- * in this mirror region will not be cached. It's alloced from the same
- * pool as normal memory, but the handle we return is shifted up into the
- * uncached region. This will no doubt cause big problems if memory allocated
- * here is not also freed properly. -- JW
- */
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp, unsigned long attrs)
+void arch_dma_prep_coherent(struct page *page, size_t size)
{
- unsigned long order, vaddr;
- void *ret;
- unsigned int i, err = 0;
- struct page *page, *end;
-
-#ifdef CONFIG_MMU
- phys_addr_t pa;
- struct vm_struct *area;
- unsigned long va;
-#endif
-
- if (in_interrupt())
- BUG();
-
- /* Only allocate page size areas. */
- size = PAGE_ALIGN(size);
- order = get_order(size);
-
- vaddr = __get_free_pages(gfp | __GFP_ZERO, order);
- if (!vaddr)
- return NULL;
+ phys_addr_t paddr = page_to_phys(page);
- /*
- * we need to ensure that there are no cachelines in use,
- * or worse dirty in this area.
- */
- flush_dcache_range(virt_to_phys((void *)vaddr),
- virt_to_phys((void *)vaddr) + size);
+ flush_dcache_range(paddr, paddr + size);
+}
#ifndef CONFIG_MMU
- ret = (void *)vaddr;
- /*
- * Here's the magic! Note if the uncached shadow is not implemented,
- * it's up to the calling code to also test that condition and make
- * other arranegments, such as manually flushing the cache and so on.
- */
-# ifdef CONFIG_XILINX_UNCACHED_SHADOW
- ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
-# endif
- if ((unsigned int)ret > cpuinfo.dcache_base &&
- (unsigned int)ret < cpuinfo.dcache_high)
- pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
-
- /* dma_handle is same as physical (shadowed) address */
- *dma_handle = (dma_addr_t)ret;
+/*
+ * Consistent memory allocators. Used for DMA devices that want to share
+ * uncached memory with the processor core. My crufty no-MMU approach is
+ * simple. In the HW platform we can optionally mirror the DDR up above the
+ * processor cacheable region. So, memory accessed in this mirror region will
+ * not be cached. It's alloced from the same pool as normal memory, but the
+ * handle we return is shifted up into the uncached region. This will no doubt
+ * cause big problems if memory allocated here is not also freed properly. -- JW
+ *
+ * I have to use dcache values because I can't relate on ram size:
+ */
+#ifdef CONFIG_XILINX_UNCACHED_SHADOW
+#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
#else
- /* Allocate some common virtual space to map the new pages. */
- area = get_vm_area(size, VM_ALLOC);
- if (!area) {
- free_pages(vaddr, order);
- return NULL;
- }
- va = (unsigned long) area->addr;
- ret = (void *)va;
-
- /* This gives us the real physical address of the first page. */
- *dma_handle = pa = __virt_to_phys(vaddr);
-#endif
-
- /*
- * free wasted pages. We skip the first page since we know
- * that it will have count = 1 and won't require freeing.
- * We also mark the pages in use as reserved so that
- * remap_page_range works.
- */
- page = virt_to_page(vaddr);
- end = page + (1 << order);
-
- split_page(page, order);
-
- for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
-#ifdef CONFIG_MMU
- /* MS: This is the whole magic - use cache inhibit pages */
- err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
-#endif
+#define UNCACHED_SHADOW_MASK 0
+#endif /* CONFIG_XILINX_UNCACHED_SHADOW */
- SetPageReserved(page);
- page++;
- }
-
- /* Free the otherwise unused pages. */
- while (page < end) {
- __free_page(page);
- page++;
- }
-
- if (err) {
- free_pages(vaddr, order);
- return NULL;
- }
-
- return ret;
-}
-
-#ifdef CONFIG_MMU
-static pte_t *consistent_virt_to_pte(void *vaddr)
+void *uncached_kernel_address(void *ptr)
{
- unsigned long addr = (unsigned long)vaddr;
-
- return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr);
-}
-
-long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr,
- dma_addr_t dma_addr)
-{
- pte_t *ptep = consistent_virt_to_pte(vaddr);
-
- if (pte_none(*ptep) || !pte_present(*ptep))
- return 0;
+ unsigned long addr = (unsigned long)ptr;
- return pte_pfn(*ptep);
+ addr |= UNCACHED_SHADOW_MASK;
+ if (addr > cpuinfo.dcache_base && addr < cpuinfo.dcache_high)
+ pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
+ return (void *)addr;
}
-#endif
-/*
- * free page(s) as defined by the above mapping.
- */
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr, unsigned long attrs)
+void *cached_kernel_address(void *ptr)
{
- struct page *page;
-
- if (in_interrupt())
- BUG();
-
- size = PAGE_ALIGN(size);
-
-#ifndef CONFIG_MMU
- /* Clear SHADOW_MASK bit in address, and free as per usual */
-# ifdef CONFIG_XILINX_UNCACHED_SHADOW
- vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
-# endif
- page = virt_to_page(vaddr);
-
- do {
- __free_reserved_page(page);
- page++;
- } while (size -= PAGE_SIZE);
-#else
- do {
- pte_t *ptep = consistent_virt_to_pte(vaddr);
- unsigned long pfn;
-
- if (!pte_none(*ptep) && pte_present(*ptep)) {
- pfn = pte_pfn(*ptep);
- pte_clear(&init_mm, (unsigned int)vaddr, ptep);
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
- __free_reserved_page(page);
- }
- }
- vaddr += PAGE_SIZE;
- } while (size -= PAGE_SIZE);
+ unsigned long addr = (unsigned long)ptr;
- /* flush tlb */
- flush_tlb_all();
-#endif
+ return (void *)(addr & ~UNCACHED_SHADOW_MASK);
}
+#endif /* CONFIG_MMU */
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 8fe54fda31dc..010bb9cee2e4 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -44,10 +44,6 @@ unsigned long ioremap_base;
unsigned long ioremap_bot;
EXPORT_SYMBOL(ioremap_bot);
-#ifndef CONFIG_SMP
-struct pgtable_cache_struct quicklists;
-#endif
-
static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
unsigned long flags)
{
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index cc8e2b1032a5..a0bd9bdb5f83 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -5,7 +5,6 @@ config MIPS
select ARCH_32BIT_OFF_T if !64BIT
select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
select ARCH_CLOCKSOURCE_DATA
- select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_SUPPORTS_UPROBES
@@ -13,6 +12,7 @@ config MIPS
select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index aa16b85ddffc..166842337eb2 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -54,7 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
#define __pte_free_tlb(tlb,pte,address) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
} while (0)
@@ -105,8 +105,6 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
#endif /* __PAGETABLE_PUD_FOLDED */
-#define check_pgt_cache() do { } while (0)
-
extern void pagetable_init(void);
#endif /* _ASM_PGALLOC_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 4dca733d5076..f85bd5b15f51 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -661,9 +661,4 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-
#endif /* _ASM_PGTABLE_H */
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index aca909bd7841..fba18d4a9190 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -29,11 +29,6 @@
extern unsigned int vced_count, vcei_count;
-/*
- * MIPS does have an arch_pick_mmap_layout()
- */
-#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
-
#ifdef CONFIG_32BIT
#ifdef CONFIG_KVM_GUEST
/* User space process size is limited to 1GB in KVM Guest Mode */
diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h
index c2b40969eb1f..57dc2ac4f8bd 100644
--- a/arch/mips/include/uapi/asm/mman.h
+++ b/arch/mips/include/uapi/asm/mman.h
@@ -95,6 +95,9 @@
#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+#define MADV_COLD 20 /* deactivate these pages */
+#define MADV_PAGEOUT 21 /* reclaim these pages */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index d79f2b432318..00fe90c6db3e 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -20,33 +20,6 @@
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
EXPORT_SYMBOL(shm_align_mask);
-/* gap between mmap and stack */
-#define MIN_GAP (128*1024*1024UL)
-#define MAX_GAP ((TASK_SIZE)/6*5)
-
-static int mmap_is_legacy(struct rlimit *rlim_stack)
-{
- if (current->personality & ADDR_COMPAT_LAYOUT)
- return 1;
-
- if (rlim_stack->rlim_cur == RLIM_INFINITY)
- return 1;
-
- return sysctl_legacy_va_layout;
-}
-
-static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
-{
- unsigned long gap = rlim_stack->rlim_cur;
-
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
- return PAGE_ALIGN(TASK_SIZE - gap - rnd);
-}
-
#define COLOUR_ALIGN(addr, pgoff) \
((((addr) + shm_align_mask) & ~shm_align_mask) + \
(((pgoff) << PAGE_SHIFT) & shm_align_mask))
@@ -144,63 +117,6 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
addr0, len, pgoff, flags, DOWN);
}
-unsigned long arch_mmap_rnd(void)
-{
- unsigned long rnd;
-
-#ifdef CONFIG_COMPAT
- if (TASK_IS_32BIT_ADDR)
- rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
- else
-#endif /* CONFIG_COMPAT */
- rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
-
- return rnd << PAGE_SHIFT;
-}
-
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
-{
- unsigned long random_factor = 0UL;
-
- if (current->flags & PF_RANDOMIZE)
- random_factor = arch_mmap_rnd();
-
- if (mmap_is_legacy(rlim_stack)) {
- mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
- mm->get_unmapped_area = arch_get_unmapped_area;
- } else {
- mm->mmap_base = mmap_base(random_factor, rlim_stack);
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- }
-}
-
-static inline unsigned long brk_rnd(void)
-{
- unsigned long rnd = get_random_long();
-
- rnd = rnd << PAGE_SHIFT;
- /* 8MB for 32bit, 256MB for 64bit */
- if (TASK_IS_32BIT_ADDR)
- rnd = rnd & 0x7ffffful;
- else
- rnd = rnd & 0xffffffful;
-
- return rnd;
-}
-
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
- unsigned long base = mm->brk;
- unsigned long ret;
-
- ret = PAGE_ALIGN(base + brk_rnd());
-
- if (ret < mm->brk)
- return mm->brk;
-
- return ret;
-}
-
bool __virt_addr_valid(const volatile void *kaddr)
{
unsigned long vaddr = (unsigned long)kaddr;
diff --git a/arch/nds32/include/asm/pgalloc.h b/arch/nds32/include/asm/pgalloc.h
index e78b43d8389f..37125e6884d7 100644
--- a/arch/nds32/include/asm/pgalloc.h
+++ b/arch/nds32/include/asm/pgalloc.h
@@ -23,8 +23,6 @@
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t * pgd);
-#define check_pgt_cache() do { } while (0)
-
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
pgtable_t pte;
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h
index c70cc56bec09..0588ec99725c 100644
--- a/arch/nds32/include/asm/pgtable.h
+++ b/arch/nds32/include/asm/pgtable.h
@@ -403,8 +403,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
* into virtual address `from'
*/
-#define pgtable_cache_init() do { } while (0)
-
#endif /* !__ASSEMBLY__ */
#endif /* _ASMNDS32_PGTABLE_H */
diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h
index 4bc8cf72067e..0b146d773c85 100644
--- a/arch/nios2/include/asm/pgalloc.h
+++ b/arch/nios2/include/asm/pgalloc.h
@@ -41,10 +41,8 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), (pte)); \
} while (0)
-#define check_pgt_cache() do { } while (0)
-
#endif /* _ASM_NIOS2_PGALLOC_H */
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index 95237b7f6fc1..99985d8b7166 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -291,8 +291,6 @@ static inline void pte_clear(struct mm_struct *mm,
#include <asm-generic/pgtable.h>
-#define pgtable_cache_init() do { } while (0)
-
extern void __init paging_init(void);
extern void __init mmu_init(void);
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
index 3d4b397c2d06..da12a4c38c4b 100644
--- a/arch/openrisc/include/asm/pgalloc.h
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -75,7 +75,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm)
if (!pte)
return NULL;
clear_page(page_address(pte));
- if (!pgtable_page_ctor(pte)) {
+ if (!pgtable_pte_page_ctor(pte)) {
__free_page(pte);
return NULL;
}
@@ -89,18 +89,16 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
- pgtable_page_dtor(pte);
+ pgtable_pte_page_dtor(pte);
__free_page(pte);
}
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), (pte)); \
} while (0)
#define pmd_pgtable(pmd) pmd_page(pmd)
-#define check_pgt_cache() do { } while (0)
-
#endif
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index 2fe9ff5b5d6f..248d22d8faa7 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -443,11 +443,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#include <asm-generic/pgtable.h>
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-
typedef pte_t *pte_addr_t;
#endif /* __ASSEMBLY__ */
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index 4f2059a50fae..d98647c29b74 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -124,6 +124,4 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
pmd_populate_kernel(mm, pmd, page_address(pte_page))
#define pmd_pgtable(pmd) pmd_page(pmd)
-#define check_pgt_cache() do { } while (0)
-
#endif
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 6d58c1739b42..4ac374b3a99f 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -132,8 +132,6 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define PTRS_PER_PTE (1UL << BITS_PER_PTE)
/* Definitions for 2nd level */
-#define pgtable_cache_init() do { } while (0)
-
#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index c98162f494db..6fd8871e4081 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -48,6 +48,9 @@
#define MADV_DONTFORK 10 /* don't inherit across fork */
#define MADV_DOFORK 11 /* do inherit across fork */
+#define MADV_COLD 20 /* deactivate these pages */
+#define MADV_PAGEOUT 21 /* reclaim these pages */
+
#define MADV_MERGEABLE 65 /* KSM may merge identical pages */
#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index 2b2c60a1a66d..6dd78a2dc03a 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -64,8 +64,6 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
extern struct kmem_cache *pgtable_cache[];
#define PGT_CACHE(shift) pgtable_cache[shift]
-static inline void check_pgt_cache(void) { }
-
#ifdef CONFIG_PPC_BOOK3S
#include <asm/book3s/pgalloc.h>
#else
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 8b7865a2d576..4053b2ab427c 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -87,7 +87,6 @@ extern unsigned long ioremap_bot;
unsigned long vmalloc_to_phys(void *vmalloc_addr);
void pgtable_cache_add(unsigned int shift);
-void pgtable_cache_init(void);
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
void mark_initmem_nx(void);
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 3410ea9f4de1..6c123760164e 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1748,7 +1748,7 @@ void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
/*
* IF we try to do a HUGE PTE update after a withdraw is done.
* we will find the below NULL. This happens when we do
- * split_huge_page_pmd
+ * split_huge_pmd
*/
if (!hpte_slot_array)
return;
diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c
index b056cae3388b..56cc84520577 100644
--- a/arch/powerpc/mm/book3s64/iommu_api.c
+++ b/arch/powerpc/mm/book3s64/iommu_api.c
@@ -129,11 +129,8 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
* Allow to use larger than 64k IOMMU pages. Only do that
* if we are backed by hugetlb.
*/
- if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) {
- struct page *head = compound_head(page);
-
- pageshift = compound_order(head) + PAGE_SHIFT;
- }
+ if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
+ pageshift = page_shift(compound_head(page));
mem->pageshift = min(mem->pageshift, pageshift);
/*
* We don't need struct page reference any more, switch
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index a8953f108808..73d4873fc7f8 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -667,7 +667,7 @@ void flush_dcache_icache_hugepage(struct page *page)
BUG_ON(!PageCompound(page));
- for (i = 0; i < (1UL << compound_order(page)); i++) {
+ for (i = 0; i < compound_nr(page); i++) {
if (!PageHighMem(page)) {
__flush_dcache_icache(page_address(page+i));
} else {
diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c
index a7b05214760c..ee4bd6d38602 100644
--- a/arch/powerpc/mm/pgtable-frag.c
+++ b/arch/powerpc/mm/pgtable-frag.c
@@ -25,7 +25,7 @@ void pte_frag_destroy(void *pte_frag)
count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
/* We allow PTE_FRAG_NR fragments from a PTE page */
if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
__free_page(page);
}
}
@@ -61,7 +61,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
if (!page)
return NULL;
- if (!pgtable_page_ctor(page)) {
+ if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
@@ -113,7 +113,7 @@ void pte_fragment_free(unsigned long *table, int kernel)
BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
if (atomic_dec_and_test(&page->pt_frag_refcount)) {
if (!kernel)
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
__free_page(page);
}
}
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 065ff14b76e1..1d93e55a2de1 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -10,6 +10,8 @@
#include <linux/file.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/fsnotify.h>
#include <linux/backing-dev.h>
#include <linux/init.h>
@@ -20,7 +22,6 @@
#include <linux/pagemap.h>
#include <linux/poll.h>
#include <linux/slab.h>
-#include <linux/parser.h>
#include <asm/prom.h>
#include <asm/spu.h>
@@ -30,7 +31,7 @@
#include "spufs.h"
struct spufs_sb_info {
- int debug;
+ bool debug;
};
static struct kmem_cache *spufs_inode_cache;
@@ -574,16 +575,27 @@ long spufs_create(struct path *path, struct dentry *dentry,
}
/* File system initialization */
+struct spufs_fs_context {
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
+};
+
enum {
- Opt_uid, Opt_gid, Opt_mode, Opt_debug, Opt_err,
+ Opt_uid, Opt_gid, Opt_mode, Opt_debug,
+};
+
+static const struct fs_parameter_spec spufs_param_specs[] = {
+ fsparam_u32 ("gid", Opt_gid),
+ fsparam_u32oct ("mode", Opt_mode),
+ fsparam_u32 ("uid", Opt_uid),
+ fsparam_flag ("debug", Opt_debug),
+ {}
};
-static const match_table_t spufs_tokens = {
- { Opt_uid, "uid=%d" },
- { Opt_gid, "gid=%d" },
- { Opt_mode, "mode=%o" },
- { Opt_debug, "debug" },
- { Opt_err, NULL },
+static const struct fs_parameter_description spufs_fs_parameters = {
+ .name = "spufs",
+ .specs = spufs_param_specs,
};
static int spufs_show_options(struct seq_file *m, struct dentry *root)
@@ -604,47 +616,41 @@ static int spufs_show_options(struct seq_file *m, struct dentry *root)
return 0;
}
-static int
-spufs_parse_options(struct super_block *sb, char *options, struct inode *root)
-{
- char *p;
- substring_t args[MAX_OPT_ARGS];
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token, option;
-
- if (!*p)
- continue;
-
- token = match_token(p, spufs_tokens, args);
- switch (token) {
- case Opt_uid:
- if (match_int(&args[0], &option))
- return 0;
- root->i_uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(root->i_uid))
- return 0;
- break;
- case Opt_gid:
- if (match_int(&args[0], &option))
- return 0;
- root->i_gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(root->i_gid))
- return 0;
- break;
- case Opt_mode:
- if (match_octal(&args[0], &option))
- return 0;
- root->i_mode = option | S_IFDIR;
- break;
- case Opt_debug:
- spufs_get_sb_info(sb)->debug = 1;
- break;
- default:
- return 0;
- }
+static int spufs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct spufs_fs_context *ctx = fc->fs_private;
+ struct spufs_sb_info *sbi = fc->s_fs_info;
+ struct fs_parse_result result;
+ kuid_t uid;
+ kgid_t gid;
+ int opt;
+
+ opt = fs_parse(fc, &spufs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_uid:
+ uid = make_kuid(current_user_ns(), result.uint_32);
+ if (!uid_valid(uid))
+ return invalf(fc, "Unknown uid");
+ ctx->uid = uid;
+ break;
+ case Opt_gid:
+ gid = make_kgid(current_user_ns(), result.uint_32);
+ if (!gid_valid(gid))
+ return invalf(fc, "Unknown gid");
+ ctx->gid = gid;
+ break;
+ case Opt_mode:
+ ctx->mode = result.uint_32 & S_IALLUGO;
+ break;
+ case Opt_debug:
+ sbi->debug = true;
+ break;
}
- return 1;
+
+ return 0;
}
static void spufs_exit_isolated_loader(void)
@@ -678,79 +684,98 @@ spufs_init_isolated_loader(void)
printk(KERN_INFO "spufs: SPU isolation mode enabled\n");
}
-static int
-spufs_create_root(struct super_block *sb, void *data)
+static int spufs_create_root(struct super_block *sb, struct fs_context *fc)
{
+ struct spufs_fs_context *ctx = fc->fs_private;
struct inode *inode;
- int ret;
- ret = -ENODEV;
if (!spu_management_ops)
- goto out;
+ return -ENODEV;
- ret = -ENOMEM;
- inode = spufs_new_inode(sb, S_IFDIR | 0775);
+ inode = spufs_new_inode(sb, S_IFDIR | ctx->mode);
if (!inode)
- goto out;
+ return -ENOMEM;
+ inode->i_uid = ctx->uid;
+ inode->i_gid = ctx->gid;
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
SPUFS_I(inode)->i_ctx = NULL;
inc_nlink(inode);
- ret = -EINVAL;
- if (!spufs_parse_options(sb, data, inode))
- goto out_iput;
-
- ret = -ENOMEM;
sb->s_root = d_make_root(inode);
if (!sb->s_root)
- goto out;
-
+ return -ENOMEM;
return 0;
-out_iput:
- iput(inode);
-out:
- return ret;
}
-static int
-spufs_fill_super(struct super_block *sb, void *data, int silent)
-{
- struct spufs_sb_info *info;
- static const struct super_operations s_ops = {
- .alloc_inode = spufs_alloc_inode,
- .free_inode = spufs_free_inode,
- .statfs = simple_statfs,
- .evict_inode = spufs_evict_inode,
- .show_options = spufs_show_options,
- };
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
+static const struct super_operations spufs_ops = {
+ .alloc_inode = spufs_alloc_inode,
+ .free_inode = spufs_free_inode,
+ .statfs = simple_statfs,
+ .evict_inode = spufs_evict_inode,
+ .show_options = spufs_show_options,
+};
+static int spufs_fill_super(struct super_block *sb, struct fs_context *fc)
+{
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = SPUFS_MAGIC;
- sb->s_op = &s_ops;
- sb->s_fs_info = info;
+ sb->s_op = &spufs_ops;
- return spufs_create_root(sb, data);
+ return spufs_create_root(sb, fc);
+}
+
+static int spufs_get_tree(struct fs_context *fc)
+{
+ return get_tree_single(fc, spufs_fill_super);
}
-static struct dentry *
-spufs_mount(struct file_system_type *fstype, int flags,
- const char *name, void *data)
+static void spufs_free_fc(struct fs_context *fc)
{
- return mount_single(fstype, flags, data, spufs_fill_super);
+ kfree(fc->s_fs_info);
+}
+
+static const struct fs_context_operations spufs_context_ops = {
+ .free = spufs_free_fc,
+ .parse_param = spufs_parse_param,
+ .get_tree = spufs_get_tree,
+};
+
+static int spufs_init_fs_context(struct fs_context *fc)
+{
+ struct spufs_fs_context *ctx;
+ struct spufs_sb_info *sbi;
+
+ ctx = kzalloc(sizeof(struct spufs_fs_context), GFP_KERNEL);
+ if (!ctx)
+ goto nomem;
+
+ sbi = kzalloc(sizeof(struct spufs_sb_info), GFP_KERNEL);
+ if (!sbi)
+ goto nomem_ctx;
+
+ ctx->uid = current_uid();
+ ctx->gid = current_gid();
+ ctx->mode = 0755;
+
+ fc->s_fs_info = sbi;
+ fc->ops = &spufs_context_ops;
+ return 0;
+
+nomem_ctx:
+ kfree(ctx);
+nomem:
+ return -ENOMEM;
}
static struct file_system_type spufs_type = {
.owner = THIS_MODULE,
.name = "spufs",
- .mount = spufs_mount,
+ .init_fs_context = spufs_init_fs_context,
+ .parameters = &spufs_fs_parameters,
.kill_sb = kill_litter_super,
};
MODULE_ALIAS_FS("spufs");
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 71d29fb4008a..8eebbc8860bb 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -59,6 +59,18 @@ config RISCV
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select SPARSEMEM_STATIC if 32BIT
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
+ select HAVE_ARCH_MMAP_RND_BITS
+
+config ARCH_MMAP_RND_BITS_MIN
+ default 18 if 64BIT
+ default 8
+
+# max bits determined by the following formula:
+# VA_BITS - PAGE_SHIFT - 3
+config ARCH_MMAP_RND_BITS_MAX
+ default 24 if 64BIT # SV39 based
+ default 17
config MMU
def_bool y
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index 56a67d66f72f..d59ea92285ec 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -78,12 +78,8 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#define __pte_free_tlb(tlb, pte, buf) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
} while (0)
-static inline void check_pgt_cache(void)
-{
-}
-
#endif /* _ASM_RISCV_PGALLOC_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 80905b27ee98..c60123f018f5 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -424,11 +424,6 @@ extern void *dtb_early_va;
extern void setup_bootmem(void);
extern void paging_init(void);
-static inline void pgtable_cache_init(void)
-{
- /* No page table caches to initialize */
-}
-
#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
#define VMALLOC_END (PAGE_OFFSET - 1)
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index a4418fc425b8..70139d0791b6 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -12,17 +12,17 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/namei.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/time.h>
-#include <linux/parser.h>
#include <linux/sysfs.h>
#include <linux/init.h>
#include <linux/kobject.h>
#include <linux/seq_file.h>
-#include <linux/mount.h>
#include <linux/uio.h>
#include <asm/ebcdic.h>
#include "hypfs.h"
@@ -207,52 +207,44 @@ static int hypfs_release(struct inode *inode, struct file *filp)
return 0;
}
-enum { opt_uid, opt_gid, opt_err };
+enum { Opt_uid, Opt_gid, };
-static const match_table_t hypfs_tokens = {
- {opt_uid, "uid=%u"},
- {opt_gid, "gid=%u"},
- {opt_err, NULL}
+static const struct fs_parameter_spec hypfs_param_specs[] = {
+ fsparam_u32("gid", Opt_gid),
+ fsparam_u32("uid", Opt_uid),
+ {}
};
-static int hypfs_parse_options(char *options, struct super_block *sb)
+static const struct fs_parameter_description hypfs_fs_parameters = {
+ .name = "hypfs",
+ .specs = hypfs_param_specs,
+};
+
+static int hypfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- char *str;
- substring_t args[MAX_OPT_ARGS];
+ struct hypfs_sb_info *hypfs_info = fc->s_fs_info;
+ struct fs_parse_result result;
kuid_t uid;
kgid_t gid;
-
- if (!options)
- return 0;
- while ((str = strsep(&options, ",")) != NULL) {
- int token, option;
- struct hypfs_sb_info *hypfs_info = sb->s_fs_info;
-
- if (!*str)
- continue;
- token = match_token(str, hypfs_tokens, args);
- switch (token) {
- case opt_uid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(uid))
- return -EINVAL;
- hypfs_info->uid = uid;
- break;
- case opt_gid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(gid))
- return -EINVAL;
- hypfs_info->gid = gid;
- break;
- case opt_err:
- default:
- pr_err("%s is not a valid mount option\n", str);
- return -EINVAL;
- }
+ int opt;
+
+ opt = fs_parse(fc, &hypfs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_uid:
+ uid = make_kuid(current_user_ns(), result.uint_32);
+ if (!uid_valid(uid))
+ return invalf(fc, "Unknown uid");
+ hypfs_info->uid = uid;
+ break;
+ case Opt_gid:
+ gid = make_kgid(current_user_ns(), result.uint_32);
+ if (!gid_valid(gid))
+ return invalf(fc, "Unknown gid");
+ hypfs_info->gid = gid;
+ break;
}
return 0;
}
@@ -266,26 +258,18 @@ static int hypfs_show_options(struct seq_file *s, struct dentry *root)
return 0;
}
-static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
+static int hypfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
+ struct hypfs_sb_info *sbi = sb->s_fs_info;
struct inode *root_inode;
- struct dentry *root_dentry;
- int rc = 0;
- struct hypfs_sb_info *sbi;
+ struct dentry *root_dentry, *update_file;
+ int rc;
- sbi = kzalloc(sizeof(struct hypfs_sb_info), GFP_KERNEL);
- if (!sbi)
- return -ENOMEM;
- mutex_init(&sbi->lock);
- sbi->uid = current_uid();
- sbi->gid = current_gid();
- sb->s_fs_info = sbi;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = HYPFS_MAGIC;
sb->s_op = &hypfs_s_ops;
- if (hypfs_parse_options(data, sb))
- return -EINVAL;
+
root_inode = hypfs_make_inode(sb, S_IFDIR | 0755);
if (!root_inode)
return -ENOMEM;
@@ -300,18 +284,46 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
rc = hypfs_diag_create_files(root_dentry);
if (rc)
return rc;
- sbi->update_file = hypfs_create_update_file(root_dentry);
- if (IS_ERR(sbi->update_file))
- return PTR_ERR(sbi->update_file);
+ update_file = hypfs_create_update_file(root_dentry);
+ if (IS_ERR(update_file))
+ return PTR_ERR(update_file);
+ sbi->update_file = update_file;
hypfs_update_update(sb);
pr_info("Hypervisor filesystem mounted\n");
return 0;
}
-static struct dentry *hypfs_mount(struct file_system_type *fst, int flags,
- const char *devname, void *data)
+static int hypfs_get_tree(struct fs_context *fc)
+{
+ return get_tree_single(fc, hypfs_fill_super);
+}
+
+static void hypfs_free_fc(struct fs_context *fc)
{
- return mount_single(fst, flags, data, hypfs_fill_super);
+ kfree(fc->s_fs_info);
+}
+
+static const struct fs_context_operations hypfs_context_ops = {
+ .free = hypfs_free_fc,
+ .parse_param = hypfs_parse_param,
+ .get_tree = hypfs_get_tree,
+};
+
+static int hypfs_init_fs_context(struct fs_context *fc)
+{
+ struct hypfs_sb_info *sbi;
+
+ sbi = kzalloc(sizeof(struct hypfs_sb_info), GFP_KERNEL);
+ if (!sbi)
+ return -ENOMEM;
+
+ mutex_init(&sbi->lock);
+ sbi->uid = current_uid();
+ sbi->gid = current_gid();
+
+ fc->s_fs_info = sbi;
+ fc->ops = &hypfs_context_ops;
+ return 0;
}
static void hypfs_kill_super(struct super_block *sb)
@@ -442,7 +454,8 @@ static const struct file_operations hypfs_file_ops = {
static struct file_system_type hypfs_type = {
.owner = THIS_MODULE,
.name = "s390_hypfs",
- .mount = hypfs_mount,
+ .init_fs_context = hypfs_init_fs_context,
+ .parameters = &hypfs_fs_parameters,
.kill_sb = hypfs_kill_super
};
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index ae3e3221d4b5..ceeb552d3472 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -70,7 +70,7 @@ struct hws_qsi_info_block { /* Bit(s) */
unsigned long tear; /* 24-31: TEAR contents */
unsigned long dear; /* 32-39: DEAR contents */
unsigned int rsvrd0; /* 40-43: reserved */
- unsigned int cpu_speed; /* 44-47: CPU speed */
+ unsigned int cpu_speed; /* 44-47: CPU speed */
unsigned long long rsvrd1; /* 48-55: reserved */
unsigned long long rsvrd2; /* 56-63: reserved */
} __packed;
@@ -89,10 +89,10 @@ struct hws_lsctl_request_block {
unsigned long tear; /* 16-23: TEAR contents */
unsigned long dear; /* 24-31: DEAR contents */
/* 32-63: */
- unsigned long rsvrd1; /* reserved */
- unsigned long rsvrd2; /* reserved */
- unsigned long rsvrd3; /* reserved */
- unsigned long rsvrd4; /* reserved */
+ unsigned long rsvrd1; /* reserved */
+ unsigned long rsvrd2; /* reserved */
+ unsigned long rsvrd3; /* reserved */
+ unsigned long rsvrd4; /* reserved */
} __packed;
struct hws_basic_entry {
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 560d8f766ddf..4652ffffe0b2 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -60,6 +60,7 @@ struct perf_sf_sde_regs {
#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
PERF_CPUM_SF_DIAG_MODE)
#define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */
+#define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */
#define REG_NONE 0
#define REG_OVERFLOW 1
@@ -70,5 +71,6 @@ struct perf_sf_sde_regs {
#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS)
+#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
#endif /* _ASM_S390_PERF_EVENT_H */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 0c4600725fc2..36c578c0ff96 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1682,12 +1682,6 @@ extern void s390_reset_cmma(struct mm_struct *mm);
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-/*
- * No page table caches to initialise
- */
-static inline void pgtable_cache_init(void) { }
-static inline void check_pgt_cache(void) { }
-
#include <asm-generic/pgtable.h>
#endif /* _S390_PAGE_H */
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
index 8c5755f41dde..f9e5e1f0821d 100644
--- a/arch/s390/include/uapi/asm/zcrypt.h
+++ b/arch/s390/include/uapi/asm/zcrypt.h
@@ -4,7 +4,7 @@
*
* zcrypt 2.2.1 (user-visible header)
*
- * Copyright IBM Corp. 2001, 2018
+ * Copyright IBM Corp. 2001, 2019
* Author(s): Robert Burroughs
* Eric Rossman ([email protected])
*
@@ -286,7 +286,7 @@ struct zcrypt_device_matrix_ext {
* 0x08: CEX3A
* 0x0a: CEX4
* 0x0b: CEX5
- * 0x0c: CEX6
+ * 0x0c: CEX6 and CEX7
* 0x0d: device is disabled
*
* ZCRYPT_QDEPTH_MASK
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 292a452cd1f3..544a02e944c6 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -673,13 +673,89 @@ out:
rcu_read_unlock();
}
+static unsigned long getrate(bool freq, unsigned long sample,
+ struct hws_qsi_info_block *si)
+{
+ unsigned long rate;
+
+ if (freq) {
+ rate = freq_to_sample_rate(si, sample);
+ rate = hw_limit_rate(si, rate);
+ } else {
+ /* The min/max sampling rates specifies the valid range
+ * of sample periods. If the specified sample period is
+ * out of range, limit the period to the range boundary.
+ */
+ rate = hw_limit_rate(si, sample);
+
+ /* The perf core maintains a maximum sample rate that is
+ * configurable through the sysctl interface. Ensure the
+ * sampling rate does not exceed this value. This also helps
+ * to avoid throttling when pushing samples with
+ * perf_event_overflow().
+ */
+ if (sample_rate_to_freq(si, rate) >
+ sysctl_perf_event_sample_rate) {
+ debug_sprintf_event(sfdbg, 1,
+ "Sampling rate exceeds maximum "
+ "perf sample rate\n");
+ rate = 0;
+ }
+ }
+ return rate;
+}
+
+/* The sampling information (si) contains information about the
+ * min/max sampling intervals and the CPU speed. So calculate the
+ * correct sampling interval and avoid the whole period adjust
+ * feedback loop.
+ *
+ * Since the CPU Measurement sampling facility can not handle frequency
+ * calculate the sampling interval when frequency is specified using
+ * this formula:
+ * interval := cpu_speed * 1000000 / sample_freq
+ *
+ * Returns errno on bad input and zero on success with parameter interval
+ * set to the correct sampling rate.
+ *
+ * Note: This function turns off freq bit to avoid calling function
+ * perf_adjust_period(). This causes frequency adjustment in the common
+ * code part which causes tremendous variations in the counter values.
+ */
+static int __hw_perf_event_init_rate(struct perf_event *event,
+ struct hws_qsi_info_block *si)
+{
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long rate;
+
+ if (attr->freq) {
+ if (!attr->sample_freq)
+ return -EINVAL;
+ rate = getrate(attr->freq, attr->sample_freq, si);
+ attr->freq = 0; /* Don't call perf_adjust_period() */
+ SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FREQ_MODE;
+ } else {
+ rate = getrate(attr->freq, attr->sample_period, si);
+ if (!rate)
+ return -EINVAL;
+ }
+ attr->sample_period = rate;
+ SAMPL_RATE(hwc) = rate;
+ hw_init_period(hwc, SAMPL_RATE(hwc));
+ debug_sprintf_event(sfdbg, 4, "__hw_perf_event_init_rate:"
+ "cpu:%d period:%llx freq:%d,%#lx\n", event->cpu,
+ event->attr.sample_period, event->attr.freq,
+ SAMPLE_FREQ_MODE(hwc));
+ return 0;
+}
+
static int __hw_perf_event_init(struct perf_event *event)
{
struct cpu_hw_sf *cpuhw;
struct hws_qsi_info_block si;
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
- unsigned long rate;
int cpu, err;
/* Reserve CPU-measurement sampling facility */
@@ -745,43 +821,9 @@ static int __hw_perf_event_init(struct perf_event *event)
if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS)
SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS;
- /* The sampling information (si) contains information about the
- * min/max sampling intervals and the CPU speed. So calculate the
- * correct sampling interval and avoid the whole period adjust
- * feedback loop.
- */
- rate = 0;
- if (attr->freq) {
- if (!attr->sample_freq) {
- err = -EINVAL;
- goto out;
- }
- rate = freq_to_sample_rate(&si, attr->sample_freq);
- rate = hw_limit_rate(&si, rate);
- attr->freq = 0;
- attr->sample_period = rate;
- } else {
- /* The min/max sampling rates specifies the valid range
- * of sample periods. If the specified sample period is
- * out of range, limit the period to the range boundary.
- */
- rate = hw_limit_rate(&si, hwc->sample_period);
-
- /* The perf core maintains a maximum sample rate that is
- * configurable through the sysctl interface. Ensure the
- * sampling rate does not exceed this value. This also helps
- * to avoid throttling when pushing samples with
- * perf_event_overflow().
- */
- if (sample_rate_to_freq(&si, rate) >
- sysctl_perf_event_sample_rate) {
- err = -EINVAL;
- debug_sprintf_event(sfdbg, 1, "Sampling rate exceeds maximum perf sample rate\n");
- goto out;
- }
- }
- SAMPL_RATE(hwc) = rate;
- hw_init_period(hwc, SAMPL_RATE(hwc));
+ err = __hw_perf_event_init_rate(event, &si);
+ if (err)
+ goto out;
/* Initialize sample data overflow accounting */
hwc->extra_reg.reg = REG_OVERFLOW;
@@ -904,6 +946,8 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
if (sfb_has_pending_allocs(&cpuhw->sfb, hwc))
extend_sampling_buffer(&cpuhw->sfb, hwc);
}
+ /* Rate may be adjusted with ioctl() */
+ cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw);
}
/* (Re)enable the PMU and sampling facility */
@@ -922,8 +966,9 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
lpp(&S390_lowcore.lpp);
debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i "
- "tear=%p dear=%p\n", cpuhw->lsctl.es,
- cpuhw->lsctl.cs, cpuhw->lsctl.ed, cpuhw->lsctl.cd,
+ "interval:%lx tear=%p dear=%p\n",
+ cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed,
+ cpuhw->lsctl.cd, cpuhw->lsctl.interval,
(void *) cpuhw->lsctl.tear,
(void *) cpuhw->lsctl.dear);
}
@@ -1717,6 +1762,44 @@ static void cpumsf_pmu_read(struct perf_event *event)
/* Nothing to do ... updates are interrupt-driven */
}
+/* Check if the new sampling period/freqeuncy is appropriate.
+ *
+ * Return non-zero on error and zero on passed checks.
+ */
+static int cpumsf_pmu_check_period(struct perf_event *event, u64 value)
+{
+ struct hws_qsi_info_block si;
+ unsigned long rate;
+ bool do_freq;
+
+ memset(&si, 0, sizeof(si));
+ if (event->cpu == -1) {
+ if (qsi(&si))
+ return -ENODEV;
+ } else {
+ /* Event is pinned to a particular CPU, retrieve the per-CPU
+ * sampling structure for accessing the CPU-specific QSI.
+ */
+ struct cpu_hw_sf *cpuhw = &per_cpu(cpu_hw_sf, event->cpu);
+
+ si = cpuhw->qsi;
+ }
+
+ do_freq = !!SAMPLE_FREQ_MODE(&event->hw);
+ rate = getrate(do_freq, value, &si);
+ if (!rate)
+ return -EINVAL;
+
+ event->attr.sample_period = rate;
+ SAMPL_RATE(&event->hw) = rate;
+ hw_init_period(&event->hw, SAMPL_RATE(&event->hw));
+ debug_sprintf_event(sfdbg, 4, "cpumsf_pmu_check_period:"
+ "cpu:%d value:%llx period:%llx freq:%d\n",
+ event->cpu, value,
+ event->attr.sample_period, do_freq);
+ return 0;
+}
+
/* Activate sampling control.
* Next call of pmu_enable() starts sampling.
*/
@@ -1908,6 +1991,8 @@ static struct pmu cpumf_sampling = {
.setup_aux = aux_buffer_setup,
.free_aux = aux_buffer_free,
+
+ .check_period = cpumsf_pmu_check_period,
};
static void cpumf_measurement_alert(struct ext_code ext_code,
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 2db6fb405a9a..3627953007ed 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -311,7 +311,8 @@ int arch_update_cpu_topology(void)
on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
- kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+ if (dev)
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
return rc;
}
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 54fcdf66ae96..3dd253f81a77 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -210,7 +210,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
page = alloc_page(GFP_KERNEL);
if (!page)
return NULL;
- if (!pgtable_page_ctor(page)) {
+ if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
@@ -256,7 +256,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
atomic_xor_bits(&page->_refcount, 3U << 24);
}
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
__free_page(page);
}
@@ -308,7 +308,7 @@ void __tlb_remove_table(void *_table)
case 3: /* 4K page table with pgstes */
if (mask & 3)
atomic_xor_bits(&page->_refcount, 3 << 24);
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
__free_page(page);
break;
}
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index b56f908b1395..22d968bfe9bb 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -2,10 +2,8 @@
#ifndef __ASM_SH_PGALLOC_H
#define __ASM_SH_PGALLOC_H
-#include <linux/quicklist.h>
#include <asm/page.h>
-
-#define QUICK_PT 0 /* Other page table pages that are zero on free */
+#include <asm-generic/pgalloc.h>
extern pgd_t *pgd_alloc(struct mm_struct *);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
@@ -29,44 +27,9 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
}
#define pmd_pgtable(pmd) pmd_page(pmd)
-/*
- * Allocate and free page tables.
- */
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
-{
- return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
-}
-
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
-{
- struct page *page;
- void *pg;
-
- pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
- if (!pg)
- return NULL;
- page = virt_to_page(pg);
- if (!pgtable_page_ctor(page)) {
- quicklist_free(QUICK_PT, NULL, pg);
- return NULL;
- }
- return page;
-}
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- quicklist_free(QUICK_PT, NULL, pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
- pgtable_page_dtor(pte);
- quicklist_free_page(QUICK_PT, NULL, pte);
-}
-
#define __pte_free_tlb(tlb,pte,addr) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), (pte)); \
} while (0)
@@ -79,9 +42,4 @@ do { \
} while (0);
#endif
-static inline void check_pgt_cache(void)
-{
- quicklist_trim(QUICK_PT, NULL, 25, 16);
-}
-
#endif /* __ASM_SH_PGALLOC_H */
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index 9085d1142fa3..cbd0f3c55a0c 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -123,11 +123,6 @@ typedef pte_t *pte_addr_t;
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
-/*
- * Initialise the page table caches
- */
-extern void pgtable_cache_init(void);
-
struct vm_area_struct;
struct mm_struct;
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 02ed2df25a54..5c8a2ebfc720 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -1,9 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
menu "Memory management options"
-config QUICKLIST
- def_bool y
-
config MMU
bool "Support for memory management hardware"
depends on !CPU_SH2
diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c
index cc779a90d917..dca946f426c6 100644
--- a/arch/sh/mm/nommu.c
+++ b/arch/sh/mm/nommu.c
@@ -97,7 +97,3 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
{
}
-
-void pgtable_cache_init(void)
-{
-}
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
index 282be50a4adf..10538a4d1a1e 100644
--- a/arch/sparc/include/asm/pgalloc_32.h
+++ b/arch/sparc/include/asm/pgalloc_32.h
@@ -17,8 +17,6 @@ void srmmu_free_nocache(void *addr, int size);
extern struct resource sparc_iomap;
-#define check_pgt_cache() do { } while (0)
-
pgd_t *get_pgd_fast(void);
static inline void free_pgd_fast(pgd_t *pgd)
{
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
index 48abccba4991..9d3e5cc95bbb 100644
--- a/arch/sparc/include/asm/pgalloc_64.h
+++ b/arch/sparc/include/asm/pgalloc_64.h
@@ -69,8 +69,6 @@ void pte_free(struct mm_struct *mm, pgtable_t ptepage);
#define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
#define pmd_pgtable(PMD) ((pte_t *)__pmd_page(PMD))
-#define check_pgt_cache() do { } while (0)
-
void pgtable_free(void *table, bool is_page);
#ifdef CONFIG_SMP
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 4eebed6c6781..31da44826645 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -445,9 +445,4 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
/* We provide our own get_unmapped_area to cope with VA holes for userland */
#define HAVE_ARCH_UNMAPPED_AREA
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-
#endif /* !(_SPARC_PGTABLE_H) */
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 1599de730532..6ae8016ef4ec 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -1078,7 +1078,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
}
#define io_remap_pfn_range io_remap_pfn_range
-static inline unsigned long untagged_addr(unsigned long start)
+static inline unsigned long __untagged_addr(unsigned long start)
{
if (adi_capable()) {
long addr = start;
@@ -1098,7 +1098,8 @@ static inline unsigned long untagged_addr(unsigned long start)
return start;
}
-#define untagged_addr untagged_addr
+#define untagged_addr(addr) \
+ ((__typeof__(addr))(__untagged_addr((unsigned long)(addr))))
static inline bool pte_access_permitted(pte_t pte, bool write)
{
@@ -1135,7 +1136,6 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
unsigned long);
#define HAVE_ARCH_FB_UNMAPPED_AREA
-void pgtable_cache_init(void);
void sun4v_register_fault_status(void);
void sun4v_ktsb_register(void);
void __init cheetah_ecache_flush_init(void);
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 046ab116cc8c..906eda1158b4 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -31,7 +31,6 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/vaddrs.h>
-#include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */
#include <asm/setup.h>
#include <asm/tlb.h>
#include <asm/prom.h>
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 4b099dd7a767..e6d91819da92 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2903,7 +2903,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm)
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return NULL;
- if (!pgtable_page_ctor(page)) {
+ if (!pgtable_pte_page_ctor(page)) {
free_unref_page(page);
return NULL;
}
@@ -2919,7 +2919,7 @@ static void __pte_free(pgtable_t pte)
{
struct page *page = virt_to_page(pte);
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
__free_page(page);
}
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index aaebbc00d262..cc3ad64479ac 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -378,7 +378,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm)
if ((pte = (unsigned long)pte_alloc_one_kernel(mm)) == 0)
return NULL;
page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
- if (!pgtable_page_ctor(page)) {
+ if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
@@ -389,7 +389,7 @@ void pte_free(struct mm_struct *mm, pgtable_t pte)
{
unsigned long p;
- pgtable_page_dtor(pte);
+ pgtable_pte_page_dtor(pte);
p = (unsigned long)page_address(pte); /* Cached address (for test) */
if (p == 0)
BUG();
diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h
index 023599c3fa51..881e76da1938 100644
--- a/arch/um/include/asm/pgalloc.h
+++ b/arch/um/include/asm/pgalloc.h
@@ -29,7 +29,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
#define __pte_free_tlb(tlb,pte, address) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb),(pte)); \
} while (0)
@@ -43,7 +43,5 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
#endif
-#define check_pgt_cache() do { } while (0)
-
#endif
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index e4d3ed980d82..36a44d58f373 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -32,8 +32,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
/* zero page used for uninitialized stuff */
extern unsigned long *empty_zero_page;
-#define pgtable_cache_init() do ; while (0)
-
/* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
diff --git a/arch/unicore32/include/asm/pgalloc.h b/arch/unicore32/include/asm/pgalloc.h
index 3f0903bd98e9..ba1c9a79993b 100644
--- a/arch/unicore32/include/asm/pgalloc.h
+++ b/arch/unicore32/include/asm/pgalloc.h
@@ -18,8 +18,6 @@
#define __HAVE_ARCH_PTE_ALLOC_ONE
#include <asm-generic/pgalloc.h>
-#define check_pgt_cache() do { } while (0)
-
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_PRESENT)
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_PRESENT)
diff --git a/arch/unicore32/include/asm/pgtable.h b/arch/unicore32/include/asm/pgtable.h
index 126e961a8cb0..c8f7ba12f309 100644
--- a/arch/unicore32/include/asm/pgtable.h
+++ b/arch/unicore32/include/asm/pgtable.h
@@ -285,8 +285,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
#include <asm-generic/pgtable.h>
-#define pgtable_cache_init() do { } while (0)
-
#endif /* !__ASSEMBLY__ */
#endif /* __UNICORE_PGTABLE_H__ */
diff --git a/arch/unicore32/include/asm/tlb.h b/arch/unicore32/include/asm/tlb.h
index 10d2356bfddd..4663d8cc80ef 100644
--- a/arch/unicore32/include/asm/tlb.h
+++ b/arch/unicore32/include/asm/tlb.h
@@ -15,7 +15,7 @@
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), (pte)); \
} while (0)
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index c78da8eda8f2..0dca7f7aeff2 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -29,8 +29,6 @@ extern pgd_t swapper_pg_dir[1024];
extern pgd_t initial_page_table[1024];
extern pmd_t initial_pg_pmd[];
-static inline void pgtable_cache_init(void) { }
-static inline void check_pgt_cache(void) { }
void paging_init(void);
void sync_initial_page_table(void);
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 4990d26dfc73..0b6c4042942a 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -241,9 +241,6 @@ extern void cleanup_highmap(void);
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#define pgtable_cache_init() do { } while (0)
-#define check_pgt_cache() do { } while (0)
-
#define PAGE_AGP PAGE_KERNEL_NOCACHE
#define HAVE_PAGE_AGP 1
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
index fa16036fa592..65ebe4b88f7c 100644
--- a/arch/x86/mm/pat_rbtree.c
+++ b/arch/x86/mm/pat_rbtree.c
@@ -54,23 +54,10 @@ static u64 get_subtree_max_end(struct rb_node *node)
return ret;
}
-static u64 compute_subtree_max_end(struct memtype *data)
-{
- u64 max_end = data->end, child_max_end;
-
- child_max_end = get_subtree_max_end(data->rb.rb_right);
- if (child_max_end > max_end)
- max_end = child_max_end;
-
- child_max_end = get_subtree_max_end(data->rb.rb_left);
- if (child_max_end > max_end)
- max_end = child_max_end;
-
- return max_end;
-}
+#define NODE_END(node) ((node)->end)
-RB_DECLARE_CALLBACKS(static, memtype_rb_augment_cb, struct memtype, rb,
- u64, subtree_max_end, compute_subtree_max_end)
+RB_DECLARE_CALLBACKS_MAX(static, memtype_rb_augment_cb,
+ struct memtype, rb, u64, subtree_max_end, NODE_END)
/* Find the first (lowest start addr) overlapping range from rb tree */
static struct memtype *memtype_rb_lowest_match(struct rb_root *root,
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 44816ff6411f..3e4b9035bb9a 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -45,7 +45,7 @@ early_param("userpte", setup_userpte);
void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
- pgtable_page_dtor(pte);
+ pgtable_pte_page_dtor(pte);
paravirt_release_pte(page_to_pfn(pte));
paravirt_tlb_remove_table(tlb, pte);
}
@@ -357,7 +357,7 @@ static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
static struct kmem_cache *pgd_cache;
-void __init pgd_cache_init(void)
+void __init pgtable_cache_init(void)
{
/*
* When PAE kernel is running as a Xen domain, it does not use
@@ -402,10 +402,6 @@ static inline void _pgd_free(pgd_t *pgd)
}
#else
-void __init pgd_cache_init(void)
-{
-}
-
static inline pgd_t *_pgd_alloc(void)
{
return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index dd744aa450fa..1d38f0e755ba 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -55,7 +55,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
if (!pte)
return NULL;
page = virt_to_page(pte);
- if (!pgtable_page_ctor(page)) {
+ if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
@@ -69,7 +69,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
- pgtable_page_dtor(pte);
+ pgtable_pte_page_dtor(pte);
__free_page(pte);
}
#define pmd_pgtable(pmd) pmd_page(pmd)
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index ce3ff5e591b9..3f7fe5a8c286 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -238,7 +238,6 @@ extern void paging_init(void);
# define swapper_pg_dir NULL
static inline void paging_init(void) { }
#endif
-static inline void pgtable_cache_init(void) { }
/*
* The pmd contains the kernel virtual address of the pte page.
diff --git a/arch/xtensa/include/asm/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h
index 06875feb27c2..856e2da2e397 100644
--- a/arch/xtensa/include/asm/tlbflush.h
+++ b/arch/xtensa/include/asm/tlbflush.h
@@ -160,9 +160,6 @@ static inline void invalidate_dtlb_mapping (unsigned address)
invalidate_dtlb_entry(tlb_entry);
}
-#define check_pgt_cache() do { } while (0)
-
-
/*
* DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa
* ISA and exist only for test purposes..
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
index ebbb48842190..e5e643752947 100644
--- a/arch/xtensa/include/uapi/asm/mman.h
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -103,6 +103,9 @@
#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+#define MADV_COLD 20 /* deactivate these pages */
+#define MADV_PAGEOUT 21 /* reclaim these pages */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index b33be928d164..0319d6339822 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -2016,7 +2016,7 @@ static void bfq_add_request(struct request *rq)
(bfqq->last_serv_time_ns > 0 &&
bfqd->rqs_injected && bfqd->rq_in_driver > 0)) &&
time_is_before_eq_jiffies(bfqq->decrease_time_jif +
- msecs_to_jiffies(100))) {
+ msecs_to_jiffies(10))) {
bfqd->last_empty_occupied_ns = ktime_get_ns();
/*
* Start the state machine for measuring the
@@ -2025,7 +2025,21 @@ static void bfq_add_request(struct request *rq)
* be set when rq will be dispatched.
*/
bfqd->wait_dispatch = true;
- bfqd->rqs_injected = false;
+ /*
+ * If there is no I/O in service in the drive,
+ * then possible injection occurred before the
+ * arrival of rq will not affect the total
+ * service time of rq. So the injection limit
+ * must not be updated as a function of such
+ * total service time, unless new injection
+ * occurs before rq is completed. To have the
+ * injection limit updated only in the latter
+ * case, reset rqs_injected here (rqs_injected
+ * will be set in case injection is performed
+ * on bfqq before rq is completed).
+ */
+ if (bfqd->rq_in_driver == 0)
+ bfqd->rqs_injected = false;
}
}
@@ -5784,14 +5798,14 @@ static void bfq_update_inject_limit(struct bfq_data *bfqd,
u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns;
unsigned int old_limit = bfqq->inject_limit;
- if (bfqq->last_serv_time_ns > 0) {
+ if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) {
u64 threshold = (bfqq->last_serv_time_ns * 3)>>1;
if (tot_time_ns >= threshold && old_limit > 0) {
bfqq->inject_limit--;
bfqq->decrease_time_jif = jiffies;
} else if (tot_time_ns < threshold &&
- old_limit < bfqd->max_rq_in_driver<<1)
+ old_limit <= bfqd->max_rq_in_driver)
bfqq->inject_limit++;
}
@@ -5809,12 +5823,14 @@ static void bfq_update_inject_limit(struct bfq_data *bfqd,
*/
if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 1) ||
tot_time_ns < bfqq->last_serv_time_ns) {
+ if (bfqq->last_serv_time_ns == 0) {
+ /*
+ * Now we certainly have a base value: make sure we
+ * start trying injection.
+ */
+ bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
+ }
bfqq->last_serv_time_ns = tot_time_ns;
- /*
- * Now we certainly have a base value: make sure we
- * start trying injection.
- */
- bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
} else if (!bfqd->rqs_injected && bfqd->rq_in_driver == 1)
/*
* No I/O injected and no request still in service in
@@ -5830,6 +5846,7 @@ static void bfq_update_inject_limit(struct bfq_data *bfqd,
/* update complete, not waiting for any request completion any longer */
bfqd->waited_rq = NULL;
+ bfqd->rqs_injected = false;
}
/*
diff --git a/block/blk-core.c b/block/blk-core.c
index 875e8d105067..d5e668ec751b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -34,6 +34,7 @@
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
#include <linux/blk-cgroup.h>
+#include <linux/t10-pi.h>
#include <linux/debugfs.h>
#include <linux/bpf.h>
#include <linux/psi.h>
@@ -1436,6 +1437,12 @@ bool blk_update_request(struct request *req, blk_status_t error,
if (!req->bio)
return false;
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
+ error == BLK_STS_OK)
+ req->q->integrity.profile->complete_fn(req, nr_bytes);
+#endif
+
if (unlikely(error && !blk_rq_is_passthrough(req) &&
!(req->rq_flags & RQF_QUIET)))
print_req_error(req, error, __func__);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index aedd9320e605..1eec9cbe5a0a 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -214,6 +214,16 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
+
+ if (!refcount_dec_and_test(&flush_rq->ref)) {
+ fq->rq_status = error;
+ spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
+ return;
+ }
+
+ if (fq->rq_status != BLK_STS_OK)
+ error = fq->rq_status;
+
hctx = flush_rq->mq_hctx;
if (!q->elevator) {
blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index ca39b4624cf8..ff1070edbb40 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -368,10 +368,21 @@ static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter)
return BLK_STS_OK;
}
+static void blk_integrity_nop_prepare(struct request *rq)
+{
+}
+
+static void blk_integrity_nop_complete(struct request *rq,
+ unsigned int nr_bytes)
+{
+}
+
static const struct blk_integrity_profile nop_profile = {
.name = "nop",
.generate_fn = blk_integrity_nop_fn,
.verify_fn = blk_integrity_nop_fn,
+ .prepare_fn = blk_integrity_nop_prepare,
+ .complete_fn = blk_integrity_nop_complete,
};
/**
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index 3b39deb8b9f8..2a3db80c1dce 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -529,8 +529,8 @@ struct iocg_wake_ctx {
static const struct ioc_params autop[] = {
[AUTOP_HDD] = {
.qos = {
- [QOS_RLAT] = 50000, /* 50ms */
- [QOS_WLAT] = 50000,
+ [QOS_RLAT] = 250000, /* 250ms */
+ [QOS_WLAT] = 250000,
[QOS_MIN] = VRATE_MIN_PPM,
[QOS_MAX] = VRATE_MAX_PPM,
},
@@ -1343,7 +1343,7 @@ static void ioc_timer_fn(struct timer_list *timer)
u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
u32 missed_ppm[2], rq_wait_pct;
u64 period_vtime;
- int i;
+ int prev_busy_level, i;
/* how were the latencies during the period? */
ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
@@ -1407,7 +1407,8 @@ static void ioc_timer_fn(struct timer_list *timer)
* comparing vdone against period start. If lagging behind
* IOs from past periods, don't increase vrate.
*/
- if (!atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
+ if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
+ !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
time_after64(vtime, vdone) &&
time_after64(vtime, now.vnow -
MAX_LAGGING_PERIODS * period_vtime) &&
@@ -1531,26 +1532,29 @@ skip_surplus_transfers:
* and experiencing shortages but not surpluses, we're too stingy
* and should increase vtime rate.
*/
+ prev_busy_level = ioc->busy_level;
if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
missed_ppm[READ] > ppm_rthr ||
missed_ppm[WRITE] > ppm_wthr) {
ioc->busy_level = max(ioc->busy_level, 0);
ioc->busy_level++;
- } else if (nr_lagging) {
- ioc->busy_level = max(ioc->busy_level, 0);
- } else if (nr_shortages && !nr_surpluses &&
- rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
+ } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
- ioc->busy_level = min(ioc->busy_level, 0);
- ioc->busy_level--;
+ /* take action iff there is contention */
+ if (nr_shortages && !nr_lagging) {
+ ioc->busy_level = min(ioc->busy_level, 0);
+ /* redistribute surpluses first */
+ if (!nr_surpluses)
+ ioc->busy_level--;
+ }
} else {
ioc->busy_level = 0;
}
ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
- if (ioc->busy_level) {
+ if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
u64 vrate = atomic64_read(&ioc->vtime_rate);
u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
@@ -1592,6 +1596,10 @@ skip_surplus_transfers:
atomic64_set(&ioc->vtime_rate, vrate);
ioc->inuse_margin_vtime = DIV64_U64_ROUND_UP(
ioc->period_us * vrate * INUSE_MARGIN_PCT, 100);
+ } else if (ioc->busy_level != prev_busy_level || nr_lagging) {
+ trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
+ &missed_ppm, rq_wait_pct, nr_lagging,
+ nr_shortages, nr_surpluses);
}
ioc_refresh_params(ioc, false);
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index c9d183d6c499..ca22afd47b3d 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -555,8 +555,6 @@ void blk_mq_sched_free_requests(struct request_queue *q)
struct blk_mq_hw_ctx *hctx;
int i;
- lockdep_assert_held(&q->sysfs_lock);
-
queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->sched_tags)
blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 20a49be536b5..6e3b15f70cd7 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -30,6 +30,7 @@
#include <trace/events/block.h>
#include <linux/blk-mq.h>
+#include <linux/t10-pi.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
@@ -700,6 +701,11 @@ void blk_mq_start_request(struct request *rq)
*/
rq->nr_phys_segments++;
}
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
+ q->integrity.profile->prepare_fn(rq);
+#endif
}
EXPORT_SYMBOL(blk_mq_start_request);
@@ -912,7 +918,10 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
*/
if (blk_mq_req_expired(rq, next))
blk_mq_rq_timed_out(rq, reserved);
- if (refcount_dec_and_test(&rq->ref))
+
+ if (is_flush_rq(rq, hctx))
+ rq->end_io(rq, 0);
+ else if (refcount_dec_and_test(&rq->ref))
__blk_mq_free_request(rq);
return true;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index b82736c781c5..46f5198be017 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -482,7 +482,6 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
blk_mq_quiesce_queue(q);
wbt_set_min_lat(q, val);
- wbt_update_limits(q);
blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
@@ -989,13 +988,11 @@ int blk_register_queue(struct gendisk *disk)
blk_mq_debugfs_register(q);
}
- /*
- * The flag of QUEUE_FLAG_REGISTERED isn't set yet, so elevator
- * switch won't happen at all.
- */
+ mutex_lock(&q->sysfs_lock);
if (q->elevator) {
ret = elv_register_queue(q, false);
if (ret) {
+ mutex_unlock(&q->sysfs_lock);
mutex_unlock(&q->sysfs_dir_lock);
kobject_del(&q->kobj);
blk_trace_remove_sysfs(dev);
@@ -1005,7 +1002,6 @@ int blk_register_queue(struct gendisk *disk)
has_elevator = true;
}
- mutex_lock(&q->sysfs_lock);
blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
wbt_enable_default(q);
blk_throtl_register_queue(q);
@@ -1062,12 +1058,10 @@ void blk_unregister_queue(struct gendisk *disk)
kobject_del(&q->kobj);
blk_trace_remove_sysfs(disk_to_dev(disk));
- /*
- * q->kobj has been removed, so it is safe to check if elevator
- * exists without holding q->sysfs_lock.
- */
+ mutex_lock(&q->sysfs_lock);
if (q->elevator)
elv_unregister_queue(q);
+ mutex_unlock(&q->sysfs_lock);
mutex_unlock(&q->sysfs_dir_lock);
kobject_put(&disk_to_dev(disk)->kobj);
diff --git a/block/blk.h b/block/blk.h
index ed347f7a97b1..47fba9362e60 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -19,6 +19,7 @@ struct blk_flush_queue {
unsigned int flush_queue_delayed:1;
unsigned int flush_pending_idx:1;
unsigned int flush_running_idx:1;
+ blk_status_t rq_status;
unsigned long flush_pending_since;
struct list_head flush_queue[2];
struct list_head flush_data_in_flight;
@@ -47,6 +48,12 @@ static inline void __blk_get_queue(struct request_queue *q)
kobject_get(&q->kobj);
}
+static inline bool
+is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
+{
+ return hctx->fq->flush_rq == req;
+}
+
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
int node, int cmd_size, gfp_t flags);
void blk_free_flush_queue(struct blk_flush_queue *q);
@@ -194,6 +201,8 @@ void elv_unregister_queue(struct request_queue *q);
static inline void elevator_exit(struct request_queue *q,
struct elevator_queue *e)
{
+ lockdep_assert_held(&q->sysfs_lock);
+
blk_mq_sched_free_requests(q);
__elevator_exit(q, e);
}
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 785dd58947f1..347dda16c2f4 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -266,6 +266,7 @@ static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *req = bd->rq;
struct bsg_set *bset =
container_of(q->tag_set, struct bsg_set, tag_set);
+ int sts = BLK_STS_IOERR;
int ret;
blk_mq_start_request(req);
@@ -274,14 +275,15 @@ static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_IOERR;
if (!bsg_prepare_job(dev, req))
- return BLK_STS_IOERR;
+ goto out;
ret = bset->job_fn(blk_mq_rq_to_pdu(req));
- if (ret)
- return BLK_STS_IOERR;
+ if (!ret)
+ sts = BLK_STS_OK;
+out:
put_device(dev);
- return BLK_STS_OK;
+ return sts;
}
/* called right after the request is allocated for the request_queue */
diff --git a/block/elevator.c b/block/elevator.c
index bba10e83478a..5437059c9261 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -503,9 +503,7 @@ int elv_register_queue(struct request_queue *q, bool uevent)
if (uevent)
kobject_uevent(&e->kobj, KOBJ_ADD);
- mutex_lock(&q->sysfs_lock);
e->registered = 1;
- mutex_unlock(&q->sysfs_lock);
}
return error;
}
@@ -523,11 +521,9 @@ void elv_unregister_queue(struct request_queue *q)
kobject_uevent(&e->kobj, KOBJ_REMOVE);
kobject_del(&e->kobj);
- mutex_lock(&q->sysfs_lock);
e->registered = 0;
/* Re-enable throttling in case elevator disabled it */
wbt_enable_default(q);
- mutex_unlock(&q->sysfs_lock);
}
}
@@ -590,32 +586,11 @@ int elevator_switch_mq(struct request_queue *q,
lockdep_assert_held(&q->sysfs_lock);
if (q->elevator) {
- if (q->elevator->registered) {
- mutex_unlock(&q->sysfs_lock);
-
- /*
- * Concurrent elevator switch can't happen becasue
- * sysfs write is always exclusively on same file.
- *
- * Also the elevator queue won't be freed after
- * sysfs_lock is released becasue kobject_del() in
- * blk_unregister_queue() waits for completion of
- * .store & .show on its attributes.
- */
+ if (q->elevator->registered)
elv_unregister_queue(q);
- mutex_lock(&q->sysfs_lock);
- }
ioc_clear_queue(q);
elevator_exit(q, q->elevator);
-
- /*
- * sysfs_lock may be dropped, so re-check if queue is
- * unregistered. If yes, don't switch to new elevator
- * any more
- */
- if (!blk_queue_registered(q))
- return 0;
}
ret = blk_mq_init_sched(q, new_e);
@@ -623,11 +598,7 @@ int elevator_switch_mq(struct request_queue *q,
goto out;
if (new_e) {
- mutex_unlock(&q->sysfs_lock);
-
ret = elv_register_queue(q, true);
-
- mutex_lock(&q->sysfs_lock);
if (ret) {
elevator_exit(q, q->elevator);
goto out;
diff --git a/block/t10-pi.c b/block/t10-pi.c
index 0c0094609dd6..9803c7e0376e 100644
--- a/block/t10-pi.c
+++ b/block/t10-pi.c
@@ -27,7 +27,7 @@ static __be16 t10_pi_ip_fn(void *data, unsigned int len)
* tag.
*/
static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
- csum_fn *fn, unsigned int type)
+ csum_fn *fn, enum t10_dif_type type)
{
unsigned int i;
@@ -37,7 +37,7 @@ static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
pi->guard_tag = fn(iter->data_buf, iter->interval);
pi->app_tag = 0;
- if (type == 1)
+ if (type == T10_PI_TYPE1_PROTECTION)
pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
else
pi->ref_tag = 0;
@@ -51,17 +51,18 @@ static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
}
static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
- csum_fn *fn, unsigned int type)
+ csum_fn *fn, enum t10_dif_type type)
{
unsigned int i;
+ BUG_ON(type == T10_PI_TYPE0_PROTECTION);
+
for (i = 0 ; i < iter->data_size ; i += iter->interval) {
struct t10_pi_tuple *pi = iter->prot_buf;
__be16 csum;
- switch (type) {
- case 1:
- case 2:
+ if (type == T10_PI_TYPE1_PROTECTION ||
+ type == T10_PI_TYPE2_PROTECTION) {
if (pi->app_tag == T10_PI_APP_ESCAPE)
goto next;
@@ -73,12 +74,10 @@ static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
iter->seed, be32_to_cpu(pi->ref_tag));
return BLK_STS_PROTECTION;
}
- break;
- case 3:
+ } else if (type == T10_PI_TYPE3_PROTECTION) {
if (pi->app_tag == T10_PI_APP_ESCAPE &&
pi->ref_tag == T10_PI_REF_ESCAPE)
goto next;
- break;
}
csum = fn(iter->data_buf, iter->interval);
@@ -102,94 +101,40 @@ next:
static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
{
- return t10_pi_generate(iter, t10_pi_crc_fn, 1);
+ return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
}
static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
{
- return t10_pi_generate(iter, t10_pi_ip_fn, 1);
+ return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
}
static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
{
- return t10_pi_verify(iter, t10_pi_crc_fn, 1);
+ return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
}
static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
{
- return t10_pi_verify(iter, t10_pi_ip_fn, 1);
-}
-
-static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
-{
- return t10_pi_generate(iter, t10_pi_crc_fn, 3);
-}
-
-static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
-{
- return t10_pi_generate(iter, t10_pi_ip_fn, 3);
-}
-
-static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
-{
- return t10_pi_verify(iter, t10_pi_crc_fn, 3);
+ return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
}
-static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
-{
- return t10_pi_verify(iter, t10_pi_ip_fn, 3);
-}
-
-const struct blk_integrity_profile t10_pi_type1_crc = {
- .name = "T10-DIF-TYPE1-CRC",
- .generate_fn = t10_pi_type1_generate_crc,
- .verify_fn = t10_pi_type1_verify_crc,
-};
-EXPORT_SYMBOL(t10_pi_type1_crc);
-
-const struct blk_integrity_profile t10_pi_type1_ip = {
- .name = "T10-DIF-TYPE1-IP",
- .generate_fn = t10_pi_type1_generate_ip,
- .verify_fn = t10_pi_type1_verify_ip,
-};
-EXPORT_SYMBOL(t10_pi_type1_ip);
-
-const struct blk_integrity_profile t10_pi_type3_crc = {
- .name = "T10-DIF-TYPE3-CRC",
- .generate_fn = t10_pi_type3_generate_crc,
- .verify_fn = t10_pi_type3_verify_crc,
-};
-EXPORT_SYMBOL(t10_pi_type3_crc);
-
-const struct blk_integrity_profile t10_pi_type3_ip = {
- .name = "T10-DIF-TYPE3-IP",
- .generate_fn = t10_pi_type3_generate_ip,
- .verify_fn = t10_pi_type3_verify_ip,
-};
-EXPORT_SYMBOL(t10_pi_type3_ip);
-
/**
- * t10_pi_prepare - prepare PI prior submitting request to device
+ * t10_pi_type1_prepare - prepare PI prior submitting request to device
* @rq: request with PI that should be prepared
- * @protection_type: PI type (Type 1/Type 2/Type 3)
*
* For Type 1/Type 2, the virtual start sector is the one that was
* originally submitted by the block layer for the ref_tag usage. Due to
* partitioning, MD/DM cloning, etc. the actual physical start sector is
* likely to be different. Remap protection information to match the
* physical LBA.
- *
- * Type 3 does not have a reference tag so no remapping is required.
*/
-void t10_pi_prepare(struct request *rq, u8 protection_type)
+static void t10_pi_type1_prepare(struct request *rq)
{
const int tuple_sz = rq->q->integrity.tuple_size;
u32 ref_tag = t10_pi_ref_tag(rq);
struct bio *bio;
- if (protection_type == T10_PI_TYPE3_PROTECTION)
- return;
-
__rq_for_each_bio(bio, rq) {
struct bio_integrity_payload *bip = bio_integrity(bio);
u32 virt = bip_get_seed(bip) & 0xffffffff;
@@ -222,13 +167,11 @@ void t10_pi_prepare(struct request *rq, u8 protection_type)
bip->bip_flags |= BIP_MAPPED_INTEGRITY;
}
}
-EXPORT_SYMBOL(t10_pi_prepare);
/**
- * t10_pi_complete - prepare PI prior returning request to the block layer
+ * t10_pi_type1_complete - prepare PI prior returning request to the blk layer
* @rq: request with PI that should be prepared
- * @protection_type: PI type (Type 1/Type 2/Type 3)
- * @intervals: total elements to prepare
+ * @nr_bytes: total bytes to prepare
*
* For Type 1/Type 2, the virtual start sector is the one that was
* originally submitted by the block layer for the ref_tag usage. Due to
@@ -236,19 +179,14 @@ EXPORT_SYMBOL(t10_pi_prepare);
* likely to be different. Since the physical start sector was submitted
* to the device, we should remap it back to virtual values expected by the
* block layer.
- *
- * Type 3 does not have a reference tag so no remapping is required.
*/
-void t10_pi_complete(struct request *rq, u8 protection_type,
- unsigned int intervals)
+static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
{
+ unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp;
const int tuple_sz = rq->q->integrity.tuple_size;
u32 ref_tag = t10_pi_ref_tag(rq);
struct bio *bio;
- if (protection_type == T10_PI_TYPE3_PROTECTION)
- return;
-
__rq_for_each_bio(bio, rq) {
struct bio_integrity_payload *bip = bio_integrity(bio);
u32 virt = bip_get_seed(bip) & 0xffffffff;
@@ -276,4 +214,73 @@ void t10_pi_complete(struct request *rq, u8 protection_type,
}
}
}
-EXPORT_SYMBOL(t10_pi_complete);
+
+static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
+{
+ return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
+}
+
+static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
+{
+ return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
+}
+
+static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
+{
+ return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
+}
+
+static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
+{
+ return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
+}
+
+/**
+ * Type 3 does not have a reference tag so no remapping is required.
+ */
+static void t10_pi_type3_prepare(struct request *rq)
+{
+}
+
+/**
+ * Type 3 does not have a reference tag so no remapping is required.
+ */
+static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes)
+{
+}
+
+const struct blk_integrity_profile t10_pi_type1_crc = {
+ .name = "T10-DIF-TYPE1-CRC",
+ .generate_fn = t10_pi_type1_generate_crc,
+ .verify_fn = t10_pi_type1_verify_crc,
+ .prepare_fn = t10_pi_type1_prepare,
+ .complete_fn = t10_pi_type1_complete,
+};
+EXPORT_SYMBOL(t10_pi_type1_crc);
+
+const struct blk_integrity_profile t10_pi_type1_ip = {
+ .name = "T10-DIF-TYPE1-IP",
+ .generate_fn = t10_pi_type1_generate_ip,
+ .verify_fn = t10_pi_type1_verify_ip,
+ .prepare_fn = t10_pi_type1_prepare,
+ .complete_fn = t10_pi_type1_complete,
+};
+EXPORT_SYMBOL(t10_pi_type1_ip);
+
+const struct blk_integrity_profile t10_pi_type3_crc = {
+ .name = "T10-DIF-TYPE3-CRC",
+ .generate_fn = t10_pi_type3_generate_crc,
+ .verify_fn = t10_pi_type3_verify_crc,
+ .prepare_fn = t10_pi_type3_prepare,
+ .complete_fn = t10_pi_type3_complete,
+};
+EXPORT_SYMBOL(t10_pi_type3_crc);
+
+const struct blk_integrity_profile t10_pi_type3_ip = {
+ .name = "T10-DIF-TYPE3-IP",
+ .generate_fn = t10_pi_type3_generate_ip,
+ .verify_fn = t10_pi_type3_verify_ip,
+ .prepare_fn = t10_pi_type3_prepare,
+ .complete_fn = t10_pi_type3_complete,
+};
+EXPORT_SYMBOL(t10_pi_type3_ip);
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 7cd0c9ac71ea..71511ae2dfcd 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -160,11 +160,17 @@ static const struct apd_device_desc hip08_i2c_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 250000000,
};
+
static const struct apd_device_desc thunderx2_i2c_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 125000000,
};
+static const struct apd_device_desc nxp_i2c_desc = {
+ .setup = acpi_apd_setup,
+ .fixed_clk_rate = 350000000,
+};
+
static const struct apd_device_desc hip08_spi_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 250000000,
@@ -238,6 +244,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
{ "HISI02A1", APD_ADDR(hip07_i2c_desc) },
{ "HISI02A2", APD_ADDR(hip08_i2c_desc) },
{ "HISI0173", APD_ADDR(hip08_spi_desc) },
+ { "NXP0001", APD_ADDR(nxp_i2c_desc) },
#endif
{ }
};
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 9e9583a6bba9..e742780950de 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -497,6 +497,7 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
if (of_property_read_u32(child, "reg", &port)) {
rc = -EINVAL;
+ of_node_put(child);
goto err_out;
}
@@ -514,14 +515,18 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
if (port_dev) {
rc = ahci_platform_get_regulator(hpriv, port,
&port_dev->dev);
- if (rc == -EPROBE_DEFER)
+ if (rc == -EPROBE_DEFER) {
+ of_node_put(child);
goto err_out;
+ }
}
#endif
rc = ahci_platform_get_phy(hpriv, port, dev, child);
- if (rc)
+ if (rc) {
+ of_node_put(child);
goto err_out;
+ }
enabled_ports++;
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 20c39d1bcef8..6bea4f3f8040 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -100,26 +100,9 @@ unsigned long __weak memory_block_size_bytes(void)
}
EXPORT_SYMBOL_GPL(memory_block_size_bytes);
-static unsigned long get_memory_block_size(void)
-{
- unsigned long block_sz;
-
- block_sz = memory_block_size_bytes();
-
- /* Validate blk_sz is a power of 2 and not less than section size */
- if ((block_sz & (block_sz - 1)) || (block_sz < MIN_MEMORY_BLOCK_SIZE)) {
- WARN_ON(1);
- block_sz = MIN_MEMORY_BLOCK_SIZE;
- }
-
- return block_sz;
-}
-
/*
- * use this as the physical section index that this memsection
- * uses.
+ * Show the first physical section index (number) of this memory block.
*/
-
static ssize_t phys_index_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -131,7 +114,10 @@ static ssize_t phys_index_show(struct device *dev,
}
/*
- * Show whether the section of memory is likely to be hot-removable
+ * Show whether the memory block is likely to be offlineable (or is already
+ * offline). Once offline, the memory block could be removed. The return
+ * value does, however, not indicate that there is a way to remove the
+ * memory block.
*/
static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -455,12 +441,12 @@ static DEVICE_ATTR_RO(phys_device);
static DEVICE_ATTR_RO(removable);
/*
- * Block size attribute stuff
+ * Show the memory block size (shared by all memory blocks).
*/
static ssize_t block_size_bytes_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%lx\n", get_memory_block_size());
+ return sprintf(buf, "%lx\n", memory_block_size_bytes());
}
static DEVICE_ATTR_RO(block_size_bytes);
@@ -670,10 +656,10 @@ static int init_memory_block(struct memory_block **memory,
return -ENOMEM;
mem->start_section_nr = block_id * sections_per_block;
- mem->end_section_nr = mem->start_section_nr + sections_per_block - 1;
mem->state = state;
start_pfn = section_nr_to_pfn(mem->start_section_nr);
mem->phys_device = arch_get_memory_phys_device(start_pfn);
+ mem->nid = NUMA_NO_NODE;
ret = register_memory(mem);
@@ -810,19 +796,22 @@ static const struct attribute_group *memory_root_attr_groups[] = {
/*
* Initialize the sysfs support for memory devices...
*/
-int __init memory_dev_init(void)
+void __init memory_dev_init(void)
{
int ret;
int err;
unsigned long block_sz, nr;
+ /* Validate the configured memory block size */
+ block_sz = memory_block_size_bytes();
+ if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE)
+ panic("Memory block size not suitable: 0x%lx\n", block_sz);
+ sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
+
ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
if (ret)
goto out;
- block_sz = get_memory_block_size();
- sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
-
/*
* Create entries for memory sections that were found
* during boot and have been initialized
@@ -838,8 +827,7 @@ int __init memory_dev_init(void)
out:
if (ret)
- printk(KERN_ERR "%s() failed: %d\n", __func__, ret);
- return ret;
+ panic("%s() failed: %d\n", __func__, ret);
}
/**
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 75b7e6f6535b..296546ffed6c 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -427,6 +427,8 @@ static ssize_t node_read_meminfo(struct device *dev,
"Node %d AnonHugePages: %8lu kB\n"
"Node %d ShmemHugePages: %8lu kB\n"
"Node %d ShmemPmdMapped: %8lu kB\n"
+ "Node %d FileHugePages: %8lu kB\n"
+ "Node %d FilePmdMapped: %8lu kB\n"
#endif
,
nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
@@ -452,6 +454,10 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
HPAGE_PMD_NR),
nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
+ HPAGE_PMD_NR),
+ nid, K(node_page_state(pgdat, NR_FILE_THPS) *
+ HPAGE_PMD_NR),
+ nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED) *
HPAGE_PMD_NR)
#endif
);
@@ -756,15 +762,13 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
static int register_mem_sect_under_node(struct memory_block *mem_blk,
void *arg)
{
+ unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
+ unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
+ unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
int ret, nid = *(int *)arg;
- unsigned long pfn, sect_start_pfn, sect_end_pfn;
+ unsigned long pfn;
- mem_blk->nid = nid;
-
- sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
- sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr);
- sect_end_pfn += PAGES_PER_SECTION - 1;
- for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
+ for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
int page_nid;
/*
@@ -789,6 +793,13 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk,
if (page_nid != nid)
continue;
}
+
+ /*
+ * If this memory block spans multiple nodes, we only indicate
+ * the last processed node.
+ */
+ mem_blk->nid = nid;
+
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
@@ -804,32 +815,18 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk,
}
/*
- * Unregister memory block device under all nodes that it spans.
- * Has to be called with mem_sysfs_mutex held (due to unlinked_nodes).
+ * Unregister a memory block device under the node it spans. Memory blocks
+ * with multiple nodes cannot be offlined and therefore also never be removed.
*/
void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
{
- unsigned long pfn, sect_start_pfn, sect_end_pfn;
- static nodemask_t unlinked_nodes;
-
- nodes_clear(unlinked_nodes);
- sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
- sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr);
- for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
- int nid;
+ if (mem_blk->nid == NUMA_NO_NODE)
+ return;
- nid = get_nid_for_pfn(pfn);
- if (nid < 0)
- continue;
- if (!node_online(nid))
- continue;
- if (node_test_and_set(nid, unlinked_nodes))
- continue;
- sysfs_remove_link(&node_devices[nid]->dev.kobj,
- kobject_name(&mem_blk->dev.kobj));
- sysfs_remove_link(&mem_blk->dev.kobj,
- kobject_name(&node_devices[nid]->dev.kobj));
- }
+ sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj,
+ kobject_name(&mem_blk->dev.kobj));
+ sysfs_remove_link(&mem_blk->dev.kobj,
+ kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
}
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
index c58986556161..651bd0236a99 100644
--- a/drivers/block/drbd/drbd_interval.c
+++ b/drivers/block/drbd/drbd_interval.c
@@ -13,33 +13,10 @@ sector_t interval_end(struct rb_node *node)
return this->end;
}
-/**
- * compute_subtree_last - compute end of @node
- *
- * The end of an interval is the highest (start + (size >> 9)) value of this
- * node and of its children. Called for @node and its parents whenever the end
- * may have changed.
- */
-static inline sector_t
-compute_subtree_last(struct drbd_interval *node)
-{
- sector_t max = node->sector + (node->size >> 9);
-
- if (node->rb.rb_left) {
- sector_t left = interval_end(node->rb.rb_left);
- if (left > max)
- max = left;
- }
- if (node->rb.rb_right) {
- sector_t right = interval_end(node->rb.rb_right);
- if (right > max)
- max = right;
- }
- return max;
-}
+#define NODE_END(node) ((node)->sector + ((node)->size >> 9))
-RB_DECLARE_CALLBACKS(static, augment_callbacks, struct drbd_interval, rb,
- sector_t, end, compute_subtree_last);
+RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
+ struct drbd_interval, rb, sector_t, end, NODE_END);
/**
* drbd_insert_interval - insert a new interval into a tree
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index a8e3815295fe..ac07e8c94c79 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -26,6 +26,7 @@
#include <linux/ioctl.h>
#include <linux/mutex.h>
#include <linux/compiler.h>
+#include <linux/completion.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -71,14 +72,17 @@ struct link_dead_args {
int index;
};
-#define NBD_TIMEDOUT 0
+#define NBD_RT_TIMEDOUT 0
+#define NBD_RT_DISCONNECT_REQUESTED 1
+#define NBD_RT_DISCONNECTED 2
+#define NBD_RT_HAS_PID_FILE 3
+#define NBD_RT_HAS_CONFIG_REF 4
+#define NBD_RT_BOUND 5
+#define NBD_RT_DESTROY_ON_DISCONNECT 6
+#define NBD_RT_DISCONNECT_ON_CLOSE 7
+
+#define NBD_DESTROY_ON_DISCONNECT 0
#define NBD_DISCONNECT_REQUESTED 1
-#define NBD_DISCONNECTED 2
-#define NBD_HAS_PID_FILE 3
-#define NBD_HAS_CONFIG_REF 4
-#define NBD_BOUND 5
-#define NBD_DESTROY_ON_DISCONNECT 6
-#define NBD_DISCONNECT_ON_CLOSE 7
struct nbd_config {
u32 flags;
@@ -113,6 +117,9 @@ struct nbd_device {
struct list_head list;
struct task_struct *task_recv;
struct task_struct *task_setup;
+
+ struct completion *destroy_complete;
+ unsigned long flags;
};
#define NBD_CMD_REQUEUED 1
@@ -223,6 +230,16 @@ static void nbd_dev_remove(struct nbd_device *nbd)
disk->private_data = NULL;
put_disk(disk);
}
+
+ /*
+ * Place this in the last just before the nbd is freed to
+ * make sure that the disk and the related kobject are also
+ * totally removed to avoid duplicate creation of the same
+ * one.
+ */
+ if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete)
+ complete(nbd->destroy_complete);
+
kfree(nbd);
}
@@ -238,8 +255,8 @@ static void nbd_put(struct nbd_device *nbd)
static int nbd_disconnected(struct nbd_config *config)
{
- return test_bit(NBD_DISCONNECTED, &config->runtime_flags) ||
- test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
+ return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
+ test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
}
static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
@@ -257,9 +274,9 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
if (!nsock->dead) {
kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
if (atomic_dec_return(&nbd->config->live_connections) == 0) {
- if (test_and_clear_bit(NBD_DISCONNECT_REQUESTED,
+ if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
&nbd->config->runtime_flags)) {
- set_bit(NBD_DISCONNECTED,
+ set_bit(NBD_RT_DISCONNECTED,
&nbd->config->runtime_flags);
dev_info(nbd_to_dev(nbd),
"Disconnected due to user request.\n");
@@ -333,7 +350,7 @@ static void sock_shutdown(struct nbd_device *nbd)
if (config->num_connections == 0)
return;
- if (test_and_set_bit(NBD_DISCONNECTED, &config->runtime_flags))
+ if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
return;
for (i = 0; i < config->num_connections; i++) {
@@ -427,7 +444,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
}
dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
- set_bit(NBD_TIMEDOUT, &config->runtime_flags);
+ set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
cmd->status = BLK_STS_IOERR;
mutex_unlock(&cmd->lock);
sock_shutdown(nbd);
@@ -795,7 +812,7 @@ static int find_fallback(struct nbd_device *nbd, int index)
struct nbd_sock *nsock = config->socks[index];
int fallback = nsock->fallback_index;
- if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
+ if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
return new_index;
if (config->num_connections <= 1) {
@@ -836,7 +853,7 @@ static int wait_for_reconnect(struct nbd_device *nbd)
struct nbd_config *config = nbd->config;
if (!config->dead_conn_timeout)
return 0;
- if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
+ if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
return 0;
return wait_event_timeout(config->conn_wait,
atomic_read(&config->live_connections) > 0,
@@ -969,12 +986,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
return err;
if (!netlink && !nbd->task_setup &&
- !test_bit(NBD_BOUND, &config->runtime_flags))
+ !test_bit(NBD_RT_BOUND, &config->runtime_flags))
nbd->task_setup = current;
if (!netlink &&
(nbd->task_setup != current ||
- test_bit(NBD_BOUND, &config->runtime_flags))) {
+ test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
dev_err(disk_to_dev(nbd->disk),
"Device being setup by another task");
sockfd_put(sock);
@@ -1053,7 +1070,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
mutex_unlock(&nsock->tx_lock);
sockfd_put(old);
- clear_bit(NBD_DISCONNECTED, &config->runtime_flags);
+ clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
/* We take the tx_mutex in an error path in the recv_work, so we
* need to queue_work outside of the tx_mutex.
@@ -1124,7 +1141,8 @@ static int nbd_disconnect(struct nbd_device *nbd)
struct nbd_config *config = nbd->config;
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
- set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
+ set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
+ set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
send_disconnects(nbd);
return 0;
}
@@ -1143,7 +1161,7 @@ static void nbd_config_put(struct nbd_device *nbd)
struct nbd_config *config = nbd->config;
nbd_dev_dbg_close(nbd);
nbd_size_clear(nbd);
- if (test_and_clear_bit(NBD_HAS_PID_FILE,
+ if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
&config->runtime_flags))
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
nbd->task_recv = NULL;
@@ -1209,7 +1227,7 @@ static int nbd_start_device(struct nbd_device *nbd)
dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
return error;
}
- set_bit(NBD_HAS_PID_FILE, &config->runtime_flags);
+ set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
nbd_dev_dbg_init(nbd);
for (i = 0; i < num_connections; i++) {
@@ -1256,9 +1274,9 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
mutex_lock(&nbd->config_lock);
nbd_bdev_reset(bdev);
/* user requested, ignore socket errors */
- if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags))
+ if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
ret = 0;
- if (test_bit(NBD_TIMEDOUT, &config->runtime_flags))
+ if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
ret = -ETIMEDOUT;
return ret;
}
@@ -1269,7 +1287,7 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
sock_shutdown(nbd);
__invalidate_device(bdev, true);
nbd_bdev_reset(bdev);
- if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
+ if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
nbd_config_put(nbd);
}
@@ -1364,7 +1382,7 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
/* Don't allow ioctl operations on a nbd device that was created with
* netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
*/
- if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
+ if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
(cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
error = __nbd_ioctl(bdev, nbd, cmd, arg);
else
@@ -1435,7 +1453,7 @@ static void nbd_release(struct gendisk *disk, fmode_t mode)
struct nbd_device *nbd = disk->private_data;
struct block_device *bdev = bdget_disk(disk, 0);
- if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
+ if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
bdev->bd_openers == 0)
nbd_disconnect_and_put(nbd);
@@ -1636,6 +1654,7 @@ static int nbd_dev_add(int index)
nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
BLK_MQ_F_BLOCKING;
nbd->tag_set.driver_data = nbd;
+ nbd->destroy_complete = NULL;
err = blk_mq_alloc_tag_set(&nbd->tag_set);
if (err)
@@ -1750,6 +1769,7 @@ static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
{
+ DECLARE_COMPLETION_ONSTACK(destroy_complete);
struct nbd_device *nbd = NULL;
struct nbd_config *config;
int index = -1;
@@ -1801,6 +1821,17 @@ again:
mutex_unlock(&nbd_index_mutex);
return -EINVAL;
}
+
+ if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
+ test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) {
+ nbd->destroy_complete = &destroy_complete;
+ mutex_unlock(&nbd_index_mutex);
+
+ /* Wait untill the the nbd stuff is totally destroyed */
+ wait_for_completion(&destroy_complete);
+ goto again;
+ }
+
if (!refcount_inc_not_zero(&nbd->refs)) {
mutex_unlock(&nbd_index_mutex);
if (index == -1)
@@ -1833,7 +1864,7 @@ again:
return -ENOMEM;
}
refcount_set(&nbd->config_refs, 1);
- set_bit(NBD_BOUND, &config->runtime_flags);
+ set_bit(NBD_RT_BOUND, &config->runtime_flags);
ret = nbd_genl_size_set(info, nbd);
if (ret)
@@ -1853,12 +1884,15 @@ again:
if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
- set_bit(NBD_DESTROY_ON_DISCONNECT,
+ set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
&config->runtime_flags);
+ set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
put_dev = true;
+ } else {
+ clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
}
if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
- set_bit(NBD_DISCONNECT_ON_CLOSE,
+ set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
&config->runtime_flags);
}
}
@@ -1897,7 +1931,7 @@ again:
out:
mutex_unlock(&nbd->config_lock);
if (!ret) {
- set_bit(NBD_HAS_CONFIG_REF, &config->runtime_flags);
+ set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
refcount_inc(&nbd->config_refs);
nbd_connect_reply(info, nbd->index);
}
@@ -1919,7 +1953,7 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
* queue.
*/
flush_workqueue(nbd->recv_workq);
- if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
+ if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
nbd_config_put(nbd);
}
@@ -2003,7 +2037,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
mutex_lock(&nbd->config_lock);
config = nbd->config;
- if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
+ if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
!nbd->task_recv) {
dev_err(nbd_to_dev(nbd),
"not configured, cannot reconfigure\n");
@@ -2026,20 +2060,22 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
- if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
+ if (!test_and_set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
&config->runtime_flags))
put_dev = true;
+ set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
} else {
- if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
+ if (test_and_clear_bit(NBD_RT_DESTROY_ON_DISCONNECT,
&config->runtime_flags))
refcount_inc(&nbd->refs);
+ clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
}
if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
- set_bit(NBD_DISCONNECT_ON_CLOSE,
+ set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
&config->runtime_flags);
} else {
- clear_bit(NBD_DISCONNECT_ON_CLOSE,
+ clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
&config->runtime_flags);
}
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 024060165afa..76457003f140 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2594,7 +2594,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
if (ret)
return ret;
if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
- WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
return -EINVAL;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index c8fb886aebd4..7c4350c0fb77 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1754,8 +1754,6 @@ static struct rbd_img_request *rbd_img_request_create(
mutex_init(&img_request->state_mutex);
kref_init(&img_request->kref);
- dout("%s: rbd_dev %p %s -> img %p\n", __func__, rbd_dev,
- obj_op_name(op_type), img_request);
return img_request;
}
@@ -2944,6 +2942,9 @@ static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
__set_bit(IMG_REQ_CHILD, &child_img_req->flags);
child_img_req->obj_request = obj_req;
+ dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req,
+ obj_req);
+
if (!rbd_img_is_write(img_req)) {
switch (img_req->data_type) {
case OBJ_REQUEST_BIO:
@@ -4877,6 +4878,9 @@ static void rbd_queue_workfn(struct work_struct *work)
img_request->rq = rq;
snapc = NULL; /* img_request consumes a ref */
+ dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
+ img_request, obj_op_name(op_type), offset, length);
+
if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT)
result = rbd_img_fill_nodata(img_request, offset, length);
else
@@ -5669,17 +5673,20 @@ static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
{
+ size_t size;
void *reply_buf;
int ret;
void *p;
- reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
+ /* Response will be an encoded string, which includes a length */
+ size = sizeof(__le32) + RBD_OBJ_PREFIX_LEN_MAX;
+ reply_buf = kzalloc(size, GFP_KERNEL);
if (!reply_buf)
return -ENOMEM;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
&rbd_dev->header_oloc, "get_object_prefix",
- NULL, 0, reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
+ NULL, 0, reply_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out;
@@ -6696,7 +6703,6 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
dout("rbd id object name is %s\n", oid.name);
/* Response will be an encoded string, which includes a length */
-
size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
response = kzalloc(size, GFP_NOIO);
if (!response) {
@@ -6708,7 +6714,7 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
"get_id", NULL, 0,
- response, RBD_IMAGE_ID_LEN_MAX);
+ response, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret == -ENOENT) {
image_id = kstrdup("", GFP_KERNEL);
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 1b4f95c13e00..d7a3888ad80f 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -320,18 +320,22 @@ int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
if (!chip)
return -ENODEV;
- for (i = 0; i < chip->nr_allocated_banks; i++)
- if (digests[i].alg_id != chip->allocated_banks[i].alg_id)
- return -EINVAL;
+ for (i = 0; i < chip->nr_allocated_banks; i++) {
+ if (digests[i].alg_id != chip->allocated_banks[i].alg_id) {
+ rc = EINVAL;
+ goto out;
+ }
+ }
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
rc = tpm2_pcr_extend(chip, pcr_idx, digests);
- tpm_put_ops(chip);
- return rc;
+ goto out;
}
rc = tpm1_pcr_extend(chip, pcr_idx, digests[0].digest,
"attempting extend a PCR value");
+
+out:
tpm_put_ops(chip);
return rc;
}
@@ -354,14 +358,9 @@ int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
if (!chip)
return -ENODEV;
- rc = tpm_buf_init(&buf, 0, 0);
- if (rc)
- goto out;
-
- memcpy(buf.data, cmd, buflen);
+ buf.data = cmd;
rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to a send a command");
- tpm_buf_destroy(&buf);
-out:
+
tpm_put_ops(chip);
return rc;
}
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index c70cb5f272cf..0891ab829b1b 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -1078,7 +1078,7 @@ new_buf:
bool merge;
if (page)
- pg_size <<= compound_order(page);
+ pg_size = page_size(page);
if (off < pg_size &&
skb_can_coalesce(skb, i, page, off)) {
merge = 1;
@@ -1105,8 +1105,7 @@ new_buf:
__GFP_NORETRY,
order);
if (page)
- pg_size <<=
- compound_order(page);
+ pg_size <<= order;
}
if (!page) {
page = alloc_page(gfp);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 869d47f89599..6c0687694341 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -694,7 +694,7 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
}
static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct mvebu_pwm *mvpwm = to_mvebu_pwm(chip);
struct mvebu_gpio_chip *mvchip = mvpwm->mvchip;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 42b936b6bbf1..6d021ecc8d59 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1103,7 +1103,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
alloc_flags = 0;
if (!offset || !*offset)
return -EINVAL;
- user_addr = *offset;
+ user_addr = untagged_addr(*offset);
} else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
ALLOC_MEM_FLAGS_MMIO_REMAP)) {
domain = AMDGPU_GEM_DOMAIN_GTT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 61bd10310604..5803fcbae22f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -948,6 +948,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
case AMD_IP_BLOCK_TYPE_UVD:
case AMD_IP_BLOCK_TYPE_VCN:
case AMD_IP_BLOCK_TYPE_VCE:
+ case AMD_IP_BLOCK_TYPE_SDMA:
if (swsmu)
ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
else
@@ -956,7 +957,6 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
break;
case AMD_IP_BLOCK_TYPE_GMC:
case AMD_IP_BLOCK_TYPE_ACP:
- case AMD_IP_BLOCK_TYPE_SDMA:
ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate));
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index bdf849da32e4..264677ab248a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1012,11 +1012,16 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
/* Navi14 */
- {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
/* Renoir */
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
+ /* Navi12 */
+ {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
+
{0, 0, 0}
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index b174bd5eb38e..8ceb44925947 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -291,6 +291,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
uint32_t handle;
int r;
+ args->addr = untagged_addr(args->addr);
+
if (offset_in_page(args->addr | args->size))
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 7850084a05e3..60655834d649 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -143,7 +143,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
/* ring tests don't use a job */
if (job) {
vm = job->vm;
- fence_ctx = job->base.s_fence->scheduled.context;
+ fence_ctx = job->base.s_fence ?
+ job->base.s_fence->scheduled.context : 0;
} else {
vm = NULL;
fence_ctx = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 0e2ec608530b..f6147528be64 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -677,6 +677,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
sh_num = 0xffffffff;
+ if (info->read_mmr_reg.count > 128)
+ return -EINVAL;
+
regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
if (!regs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index db28823891ac..638c821611ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -70,6 +70,11 @@ MODULE_FIRMWARE("amdgpu/navi10_mec.bin");
MODULE_FIRMWARE("amdgpu/navi10_mec2.bin");
MODULE_FIRMWARE("amdgpu/navi10_rlc.bin");
+MODULE_FIRMWARE("amdgpu/navi14_ce_wks.bin");
+MODULE_FIRMWARE("amdgpu/navi14_pfp_wks.bin");
+MODULE_FIRMWARE("amdgpu/navi14_me_wks.bin");
+MODULE_FIRMWARE("amdgpu/navi14_mec_wks.bin");
+MODULE_FIRMWARE("amdgpu/navi14_mec2_wks.bin");
MODULE_FIRMWARE("amdgpu/navi14_ce.bin");
MODULE_FIRMWARE("amdgpu/navi14_pfp.bin");
MODULE_FIRMWARE("amdgpu/navi14_me.bin");
@@ -594,7 +599,8 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
- char fw_name[30];
+ char fw_name[40];
+ char wks[10];
int err;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
@@ -607,12 +613,16 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
+ memset(wks, 0, sizeof(wks));
switch (adev->asic_type) {
case CHIP_NAVI10:
chip_name = "navi10";
break;
case CHIP_NAVI14:
chip_name = "navi14";
+ if (!(adev->pdev->device == 0x7340 &&
+ adev->pdev->revision != 0x00))
+ snprintf(wks, sizeof(wks), "_wks");
break;
case CHIP_NAVI12:
chip_name = "navi12";
@@ -621,7 +631,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -632,7 +642,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -643,7 +653,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -708,7 +718,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
if (adev->gfx.rlc.is_rlc_v2_1)
gfx_v10_0_init_rlc_ext_microcode(adev);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -719,7 +729,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
if (!err) {
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 83d45f98a461..dcadc73bffd2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1650,7 +1650,6 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_RAVEN:
- case CHIP_RENOIR:
gfx_v9_0_init_lbpw(adev);
break;
case CHIP_VEGA20:
@@ -3026,7 +3025,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_RAVEN:
- case CHIP_RENOIR:
if (amdgpu_lbpw == 0)
gfx_v9_0_enable_lbpw(adev, false);
else
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index ff18b3a57892..78452cf0115d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1889,8 +1889,9 @@ static int sdma_v4_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_powergating_by_smu)
+ if ((adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_powergating_by_smu) ||
+ adev->asic_type == CHIP_RENOIR)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
if (!amdgpu_sriov_vf(adev))
@@ -1917,8 +1918,9 @@ static int sdma_v4_0_hw_fini(void *handle)
sdma_v4_0_ctx_switch_enable(adev, false);
sdma_v4_0_enable(adev, false);
- if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs
- && adev->powerplay.pp_funcs->set_powergating_by_smu)
+ if ((adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs
+ && adev->powerplay.pp_funcs->set_powergating_by_smu) ||
+ adev->asic_type == CHIP_RENOIR)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
index 4a5951036927..c44723c267c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
@@ -493,7 +493,15 @@ static void smu_v11_0_i2c_fini(struct i2c_adapter *control)
}
/* Restore clock gating */
- smu_v11_0_i2c_set_clock_gating(control, true);
+
+ /*
+ * TODO Reenabling clock gating seems to break subsequent SMU operation
+ * on the I2C bus. My guess is that SMU doesn't disable clock gating like
+ * we do here before working with the bus. So for now just don't restore
+ * it but later work with SMU to see if they have this issue and can
+ * update their code appropriately
+ */
+ /* smu_v11_0_i2c_set_clock_gating(control, true); */
}
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index a8cf82d46109..901fe3590165 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -694,10 +694,10 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x003f8000, 0x8f6f896f,
0x88776f77, 0x8a6eff6e,
0x023f8000, 0xb9eef807,
- 0xb970f812, 0xb971f813,
- 0x8ff08870, 0xf4051bb8,
+ 0xb97af812, 0xb97bf813,
+ 0x8ffa887a, 0xf4051bbd,
0xfa000000, 0xbf8cc07f,
- 0xf4051c38, 0xfa000008,
+ 0xf4051ebd, 0xfa000008,
0xbf8cc07f, 0x87ee6e6e,
0xbf840001, 0xbe80206e,
0xb971f803, 0x8771ff71,
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index 35986219ce5f..cdaa523ce6be 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -187,12 +187,12 @@ L_FETCH_2ND_TRAP:
// Read second-level TBA/TMA from first-level TMA and jump if available.
// ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
// ttmp12 holds SQ_WAVE_STATUS
- s_getreg_b32 ttmp4, hwreg(HW_REG_SHADER_TMA_LO)
- s_getreg_b32 ttmp5, hwreg(HW_REG_SHADER_TMA_HI)
- s_lshl_b64 [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8
- s_load_dwordx2 [ttmp2, ttmp3], [ttmp4, ttmp5], 0x0 glc:1 // second-level TBA
+ s_getreg_b32 ttmp14, hwreg(HW_REG_SHADER_TMA_LO)
+ s_getreg_b32 ttmp15, hwreg(HW_REG_SHADER_TMA_HI)
+ s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+ s_load_dwordx2 [ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 glc:1 // second-level TBA
s_waitcnt lgkmcnt(0)
- s_load_dwordx2 [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8 glc:1 // second-level TMA
+ s_load_dwordx2 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 glc:1 // second-level TMA
s_waitcnt lgkmcnt(0)
s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e1b09bb432bd..8cab6da512a0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2113,6 +2113,7 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
}
static const struct backlight_ops amdgpu_dm_backlight_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
.get_brightness = amdgpu_dm_backlight_get_brightness,
.update_status = amdgpu_dm_backlight_update_status,
};
@@ -2384,6 +2385,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+ if (adev->asic_type == CHIP_RENOIR)
+ dm->dc->debug.disable_stutter = true;
return 0;
fail:
@@ -5770,8 +5773,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* change FB pitch, DCC state, rotation or mirroing.
*/
bundle->flip_addrs[planes_count].flip_immediate =
- (crtc->state->pageflip_flags &
- DRM_MODE_PAGE_FLIP_ASYNC) != 0 &&
+ crtc->state->async_flip &&
acrtc_state->update_type == UPDATE_TYPE_FAST;
timestamp_ns = ktime_get_ns();
@@ -6348,7 +6350,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
amdgpu_dm_enable_crtc_interrupts(dev, state, true);
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
- if (new_crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ if (new_crtc_state->async_flip)
wait_for_vblank = false;
/* update planes when needed per crtc*/
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 383f4f8db8f4..9b2cb57bf2ba 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -708,6 +708,10 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev)
{
+ /* for dali, the highest voltage level we want is 0 */
+ if (ASICREV_IS_DALI(hw_internal_rev))
+ return 0;
+
/* we are ok with all levels */
return 4;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
index 5cc3acccda2a..b1e657e137a9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
@@ -98,11 +98,14 @@ uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
struct dc_stream_state *stream = context->streams[j];
uint32_t vertical_blank_in_pixels = 0;
uint32_t vertical_blank_time = 0;
+ uint32_t vertical_total_min = stream->timing.v_total;
+ struct dc_crtc_timing_adjust adjust = stream->adjust;
+ if (adjust.v_total_max != adjust.v_total_min)
+ vertical_total_min = adjust.v_total_min;
vertical_blank_in_pixels = stream->timing.h_total *
- (stream->timing.v_total
+ (vertical_total_min
- stream->timing.v_addressable);
-
vertical_blank_time = vertical_blank_in_pixels
* 10000 / stream->timing.pix_clk_100hz;
@@ -171,6 +174,10 @@ void dce11_pplib_apply_display_requirements(
struct dc_state *context)
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+ int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ;
+
+ if (dc->bw_vbios && dc->bw_vbios->memory_type == bw_def_hbm)
+ memory_type_multiplier = MEMORY_TYPE_HBM;
pp_display_cfg->all_displays_in_sync =
context->bw_ctx.bw.dce.all_displays_in_sync;
@@ -183,8 +190,20 @@ void dce11_pplib_apply_display_requirements(
pp_display_cfg->cpu_pstate_separation_time =
context->bw_ctx.bw.dce.blackout_recovery_time_us;
- pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz
- / MEMORY_TYPE_MULTIPLIER_CZ;
+ /*
+ * TODO: determine whether the bandwidth has reached memory's limitation
+ * , then change minimum memory clock based on real-time bandwidth
+ * limitation.
+ */
+ if (ASICREV_IS_VEGA20_P(dc->ctx->asic_id.hw_internal_rev) && (context->stream_count >= 2)) {
+ pp_display_cfg->min_memory_clock_khz = max(pp_display_cfg->min_memory_clock_khz,
+ (uint32_t) div64_s64(
+ div64_s64(dc->bw_vbios->high_yclk.value,
+ memory_type_multiplier), 10000));
+ } else {
+ pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz
+ / memory_type_multiplier;
+ }
pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 1488ffddf4e3..31b698bf9cfc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -148,7 +148,7 @@ static void dce_mi_program_pte_vm(
pte->min_pte_before_flip_horiz_scan;
REG_UPDATE(GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT,
- GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, 0xff);
+ GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, 0x7f);
REG_UPDATE_3(DVMM_PTE_CONTROL,
DVMM_PAGE_WIDTH, page_width,
@@ -157,7 +157,7 @@ static void dce_mi_program_pte_vm(
REG_UPDATE_2(DVMM_PTE_ARB_CONTROL,
DVMM_PTE_REQ_PER_CHUNK, pte->pte_req_per_chunk,
- DVMM_MAX_PTE_REQ_OUTSTANDING, 0xff);
+ DVMM_MAX_PTE_REQ_OUTSTANDING, 0x7f);
}
static void program_urgency_watermark(
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index afc61055eca1..1787b9bf800a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -1091,6 +1091,7 @@ struct resource_pool *dce100_create_resource_pool(
if (construct(num_virtual_links, dc, pool))
return &pool->base;
+ kfree(pool);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index c66fe170e1e8..318e9c2e2ca8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -1462,6 +1462,7 @@ struct resource_pool *dce110_create_resource_pool(
if (construct(num_virtual_links, dc, pool, asic_id))
return &pool->base;
+ kfree(pool);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 3ac4c7e73050..83e1878161c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -987,6 +987,10 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
struct dm_pp_clock_levels_with_latency mem_clks = {0};
struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0};
struct dm_pp_clock_levels clks = {0};
+ int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ;
+
+ if (dc->bw_vbios && dc->bw_vbios->memory_type == bw_def_hbm)
+ memory_type_multiplier = MEMORY_TYPE_HBM;
/*do system clock TODO PPLIB: after PPLIB implement,
* then remove old way
@@ -1026,12 +1030,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
&clks);
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
+ clks.clocks_in_khz[0] * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ,
+ clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ,
+ clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier,
1000);
return;
@@ -1067,12 +1071,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
* YCLK = UMACLK*m_memoryTypeMultiplier
*/
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
+ mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
+ mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
1000);
/* Now notify PPLib/SMU about which Watermarks sets they should select
@@ -1338,6 +1342,7 @@ struct resource_pool *dce112_create_resource_pool(
if (construct(num_virtual_links, dc, pool))
return &pool->base;
+ kfree(pool);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 7d08154e9662..8b85e5274bba 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -847,6 +847,8 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
int i;
unsigned int clk;
unsigned int latency;
+ /*original logic in dal3*/
+ int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ;
/*do system clock*/
if (!dm_pp_get_clock_levels_by_type_with_latency(
@@ -905,13 +907,16 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
* ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula):
* YCLK = UMACLK*m_memoryTypeMultiplier
*/
+ if (dc->bw_vbios->memory_type == bw_def_hbm)
+ memory_type_multiplier = MEMORY_TYPE_HBM;
+
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
+ mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
+ mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
1000);
/* Now notify PPLib/SMU about which Watermarks sets they should select
@@ -1203,6 +1208,7 @@ struct resource_pool *dce120_create_resource_pool(
if (construct(num_virtual_links, dc, pool))
return &pool->base;
+ kfree(pool);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 5a89e462e7cc..59305e411a66 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1570,6 +1570,7 @@ struct resource_pool *dcn10_create_resource_pool(
if (construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
+ kfree(pool);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 3ca5139f1273..de182185fe1f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/slab.h>
+
#include "dm_services.h"
#include "dc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c
index 34485d9de78a..8572678f8d4f 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c
@@ -35,12 +35,10 @@
#include "hw_factory_dcn21.h"
-
#include "dcn/dcn_2_1_0_offset.h"
#include "dcn/dcn_2_1_0_sh_mask.h"
#include "renoir_ip_offset.h"
-
#include "reg_helper.h"
#include "../hpd_regs.h"
/* begin *********************
@@ -136,6 +134,39 @@ static const struct ddc_sh_mask ddc_mask[] = {
DDC_MASK_SH_LIST_DCN2(_MASK, 6)
};
+#include "../generic_regs.h"
+
+/* set field name */
+#define SF_GENERIC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define generic_regs(id) \
+{\
+ GENERIC_REG_LIST(id)\
+}
+
+static const struct generic_registers generic_regs[] = {
+ generic_regs(A),
+};
+
+static const struct generic_sh_mask generic_shift[] = {
+ GENERIC_MASK_SH_LIST(__SHIFT, A),
+};
+
+static const struct generic_sh_mask generic_mask[] = {
+ GENERIC_MASK_SH_LIST(_MASK, A),
+};
+
+static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_generic *generic = HW_GENERIC_FROM_BASE(pin);
+
+ generic->regs = &generic_regs[en];
+ generic->shifts = &generic_shift[en];
+ generic->masks = &generic_mask[en];
+ generic->base.regs = &generic_regs[en].gpio;
+}
+
static void define_ddc_registers(
struct hw_gpio_pin *pin,
uint32_t en)
@@ -181,7 +212,8 @@ static const struct hw_factory_funcs funcs = {
.get_hpd_pin = dal_hw_hpd_get_pin,
.get_generic_pin = dal_hw_generic_get_pin,
.define_hpd_registers = define_hpd_registers,
- .define_ddc_registers = define_ddc_registers
+ .define_ddc_registers = define_ddc_registers,
+ .define_generic_registers = define_generic_registers
};
/*
* dal_hw_factory_dcn10_init
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
index ad7c43746291..fbb58fb8c318 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
@@ -58,7 +58,6 @@
#define SF_HPD(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
-
/* macros to expend register list macro defined in HW object header file
* end *********************/
@@ -71,7 +70,7 @@ static bool offset_to_id(
{
switch (offset) {
/* GENERIC */
- case REG(DC_GENERICA):
+ case REG(DC_GPIO_GENERIC_A):
*id = GPIO_ID_GENERIC;
switch (mask) {
case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 1cc1c8ce633b..bef224bf803e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -31,6 +31,8 @@
#include "dm_pp_smu.h"
#define MEMORY_TYPE_MULTIPLIER_CZ 4
+#define MEMORY_TYPE_HBM 2
+
enum dce_version resource_parse_asic_id(
struct hw_asic_id asic_id);
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 1f16892f0add..1be6c44fd32f 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -137,10 +137,13 @@
#define RAVEN1_F0 0xF0
#define RAVEN_UNKNOWN 0xFF
+#define PICASSO_15D8_REV_E3 0xE3
+#define PICASSO_15D8_REV_E4 0xE4
+
#define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
#define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0))
-#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < 0xF0))
-
+#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < PICASSO_15D8_REV_E3))
+#define ASICREV_IS_DALI(eChipRev) ((eChipRev >= PICASSO_15D8_REV_E3) && (eChipRev < RAVEN1_F0))
#define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
diff --git a/drivers/gpu/drm/amd/include/renoir_ip_offset.h b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
index 554714c8e000..094648cac392 100644
--- a/drivers/gpu/drm/amd/include/renoir_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
@@ -155,7 +155,7 @@ static const struct IP_BASE MP0_BASE ={ { { { 0x00016000, 0x0243FC00, 0x00DC0000
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MP1_BASE ={ { { { 0x00016200, 0x02400400, 0x00E80000, 0x00EC0000, 0x00F00000 } },
+static const struct IP_BASE MP1_BASE ={ { { { 0x00016000, 0x02400400, 0x00E80000, 0x00EC0000, 0x00F00000 } },
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index fa636cb462c1..fa8ad7db2b3a 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -1531,6 +1531,7 @@ static int pp_asic_reset_mode_2(void *handle)
static int pp_smu_i2c_bus_access(void *handle, bool acquire)
{
struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -1540,7 +1541,11 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire)
return -EINVAL;
}
- return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
+ mutex_lock(&hwmgr->smu_lock);
+ ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
}
static const struct amd_pm_funcs pp_dpm_funcs = {
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 22f3c60d380f..33960fb38a5d 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -354,6 +354,9 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
case AMD_IP_BLOCK_TYPE_GFX:
ret = smu_gfx_off_control(smu, gate);
break;
+ case AMD_IP_BLOCK_TYPE_SDMA:
+ ret = smu_powergate_sdma(smu, gate);
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 2a6da546fb55..e62bfba51562 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -177,12 +177,82 @@ static int renoir_get_dpm_uclk_limited(struct smu_context *smu, uint32_t *clock,
}
+static int renoir_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, char *buf)
+{
+ int i, size = 0, ret = 0;
+ uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+ SmuMetrics_t metrics = {0};
+
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
+ return -EINVAL;
+
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
+ (void *)&metrics, false);
+ if (ret)
+ return ret;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ /* retirve table returned paramters unit is MHz */
+ cur_value = metrics.ClockFrequency[CLOCK_GFXCLK];
+ ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max);
+ if (!ret) {
+ /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
+ if (cur_value == max)
+ i = 2;
+ else if (cur_value == min)
+ i = 0;
+ else
+ i = 1;
+
+ size += sprintf(buf + size, "0: %uMhz %s\n", min,
+ i == 0 ? "*" : "");
+ size += sprintf(buf + size, "1: %uMhz %s\n",
+ i == 1 ? cur_value : RENOIR_UMD_PSTATE_GFXCLK,
+ i == 1 ? "*" : "");
+ size += sprintf(buf + size, "2: %uMhz %s\n", max,
+ i == 2 ? "*" : "");
+ }
+ return size;
+ case SMU_SOCCLK:
+ count = NUM_SOCCLK_DPM_LEVELS;
+ cur_value = metrics.ClockFrequency[CLOCK_SOCCLK];
+ break;
+ case SMU_MCLK:
+ count = NUM_MEMCLK_DPM_LEVELS;
+ cur_value = metrics.ClockFrequency[CLOCK_UMCCLK];
+ break;
+ case SMU_DCEFCLK:
+ count = NUM_DCFCLK_DPM_LEVELS;
+ cur_value = metrics.ClockFrequency[CLOCK_DCFCLK];
+ break;
+ case SMU_FCLK:
+ count = NUM_FCLK_DPM_LEVELS;
+ cur_value = metrics.ClockFrequency[CLOCK_FCLK];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < count; i++) {
+ GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ cur_value == value ? "*" : "");
+ }
+
+ return size;
+}
+
static const struct pptable_funcs renoir_ppt_funcs = {
.get_smu_msg_index = renoir_get_smu_msg_index,
.get_smu_table_index = renoir_get_smu_table_index,
.tables_init = renoir_tables_init,
.set_power_state = NULL,
.get_dpm_uclk_limited = renoir_get_dpm_uclk_limited,
+ .print_clk_levels = renoir_print_clk_levels,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
index e9b7237c0f7f..2a390ddd37dd 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
@@ -25,4 +25,29 @@
extern void renoir_set_ppt_funcs(struct smu_context *smu);
+/* UMD PState Renoir Msg Parameters in MHz */
+#define RENOIR_UMD_PSTATE_GFXCLK 700
+#define RENOIR_UMD_PSTATE_SOCCLK 678
+#define RENOIR_UMD_PSTATE_FCLK 800
+
+#define GET_DPM_CUR_FREQ(table, clk_type, dpm_level, freq) \
+ do { \
+ switch (clk_type) { \
+ case SMU_SOCCLK: \
+ freq = table->SocClocks[dpm_level].Freq; \
+ break; \
+ case SMU_MCLK: \
+ freq = table->MemClocks[dpm_level].Freq; \
+ break; \
+ case SMU_DCEFCLK: \
+ freq = table->DcfClocks[dpm_level].Freq; \
+ break; \
+ case SMU_FCLK: \
+ freq = table->FClocks[dpm_level].Freq; \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
#endif
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 98bccace8c1c..9e13e466e72c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -874,6 +874,9 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
&adv7511_connector_helper_funcs);
drm_connector_attach_encoder(&adv->connector, bridge->encoder);
+ if (adv->type == ADV7533)
+ ret = adv7533_attach_dsi(adv);
+
if (adv->i2c_main->irq)
regmap_write(adv->regmap, ADV7511_REG_INT_ENABLE(0),
ADV7511_INT0_HPD);
@@ -978,10 +981,10 @@ static int adv7511_init_cec_regmap(struct adv7511 *adv)
{
int ret;
- adv->i2c_cec = i2c_new_secondary_device(adv->i2c_main, "cec",
+ adv->i2c_cec = i2c_new_ancillary_device(adv->i2c_main, "cec",
ADV7511_CEC_I2C_ADDR_DEFAULT);
- if (!adv->i2c_cec)
- return -EINVAL;
+ if (IS_ERR(adv->i2c_cec))
+ return PTR_ERR(adv->i2c_cec);
i2c_set_clientdata(adv->i2c_cec, adv);
adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
@@ -1162,20 +1165,20 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511_packet_disable(adv7511, 0xffff);
- adv7511->i2c_edid = i2c_new_secondary_device(i2c, "edid",
+ adv7511->i2c_edid = i2c_new_ancillary_device(i2c, "edid",
ADV7511_EDID_I2C_ADDR_DEFAULT);
- if (!adv7511->i2c_edid) {
- ret = -EINVAL;
+ if (IS_ERR(adv7511->i2c_edid)) {
+ ret = PTR_ERR(adv7511->i2c_edid);
goto uninit_regulators;
}
regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR,
adv7511->i2c_edid->addr << 1);
- adv7511->i2c_packet = i2c_new_secondary_device(i2c, "packet",
+ adv7511->i2c_packet = i2c_new_ancillary_device(i2c, "packet",
ADV7511_PACKET_I2C_ADDR_DEFAULT);
- if (!adv7511->i2c_packet) {
- ret = -EINVAL;
+ if (IS_ERR(adv7511->i2c_packet)) {
+ ret = PTR_ERR(adv7511->i2c_packet);
goto err_i2c_unregister_edid;
}
@@ -1219,17 +1222,8 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
drm_bridge_add(&adv7511->bridge);
adv7511_audio_init(dev, adv7511);
-
- if (adv7511->type == ADV7533) {
- ret = adv7533_attach_dsi(adv7511);
- if (ret)
- goto err_remove_bridge;
- }
-
return 0;
-err_remove_bridge:
- drm_bridge_remove(&adv7511->bridge);
err_unregister_cec:
i2c_unregister_device(adv7511->i2c_cec);
if (adv7511->cec_clk)
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index aa16ea17ff9b..3ef2ac52ce94 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -26,6 +26,7 @@
*/
#include <linux/dma-fence.h>
+#include <linux/ktime.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -1580,9 +1581,23 @@ static void commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
const struct drm_mode_config_helper_funcs *funcs;
+ ktime_t start;
+ s64 commit_time_ms;
funcs = dev->mode_config.helper_private;
+ /*
+ * We're measuring the _entire_ commit, so the time will vary depending
+ * on how many fences and objects are involved. For the purposes of self
+ * refresh, this is desirable since it'll give us an idea of how
+ * congested things are. This will inform our decision on how often we
+ * should enter self refresh after idle.
+ *
+ * These times will be averaged out in the self refresh helpers to avoid
+ * overreacting over one outlier frame
+ */
+ start = ktime_get();
+
drm_atomic_helper_wait_for_fences(dev, old_state, false);
drm_atomic_helper_wait_for_dependencies(old_state);
@@ -1592,6 +1607,11 @@ static void commit_tail(struct drm_atomic_state *old_state)
else
drm_atomic_helper_commit_tail(old_state);
+ commit_time_ms = ktime_ms_delta(ktime_get(), start);
+ if (commit_time_ms > 0)
+ drm_self_refresh_helper_update_avg_times(old_state,
+ (unsigned long)commit_time_ms);
+
drm_atomic_helper_commit_cleanup_done(old_state);
drm_atomic_state_put(old_state);
@@ -3275,7 +3295,7 @@ static int page_flip_common(struct drm_atomic_state *state,
return PTR_ERR(crtc_state);
crtc_state->event = event;
- crtc_state->pageflip_flags = flags;
+ crtc_state->async_flip = flags & DRM_MODE_PAGE_FLIP_ASYNC;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 46dc264a248b..d0a937fb0c56 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -128,7 +128,7 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
state->zpos_changed = false;
state->commit = NULL;
state->event = NULL;
- state->pageflip_flags = 0;
+ state->async_flip = false;
/* Self refresh should be canceled when a new update is available */
state->active = drm_atomic_crtc_effectively_active(state);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 5a5b42db6f2a..7a26bfb5329c 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -1305,8 +1305,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
if (arg->reserved)
return -EINVAL;
- if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
- !dev->mode_config.async_page_flip)
+ if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC)
return -EINVAL;
/* can't test and expect an event at the same time. */
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index c456c3d3def2..769feefeeeef 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -976,14 +976,14 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
if (ret)
goto err_minors;
+ dev->registered = true;
+
if (dev->driver->load) {
ret = dev->driver->load(dev, flags);
if (ret)
goto err_minors;
}
- dev->registered = true;
-
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_modeset_register_all(dev);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index f675a3bb2c88..fcd728d7cf72 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -336,7 +336,12 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
case DRM_CLIENT_CAP_ATOMIC:
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
return -EOPNOTSUPP;
- if (req->value > 1)
+ /* The modesetting DDX has a totally broken idea of atomic. */
+ if (current->comm[0] == 'X' && req->value == 1) {
+ pr_info("broken atomic modeset userspace detected, disabling atomic\n");
+ return -EOPNOTSUPP;
+ }
+ if (req->value > 2)
return -EINVAL;
file_priv->atomic = req->value;
file_priv->universal_planes = req->value;
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index c355ba8e6d5d..6a23e36ed4fe 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -42,7 +42,7 @@ int __drm_mode_object_add(struct drm_device *dev, struct drm_mode_object *obj,
{
int ret;
- WARN_ON(dev->registered && !obj_free_cb);
+ WARN_ON(!dev->driver->load && dev->registered && !obj_free_cb);
mutex_lock(&dev->mode_config.idr_mutex);
ret = idr_alloc(&dev->mode_config.object_idr, register_obj ? obj : NULL,
@@ -104,7 +104,7 @@ void drm_mode_object_register(struct drm_device *dev,
void drm_mode_object_unregister(struct drm_device *dev,
struct drm_mode_object *object)
{
- WARN_ON(dev->registered && !object->free_cb);
+ WARN_ON(!dev->driver->load && dev->registered && !object->free_cb);
mutex_lock(&dev->mode_config.idr_mutex);
if (object->id) {
diff --git a/drivers/gpu/drm/drm_self_refresh_helper.c b/drivers/gpu/drm/drm_self_refresh_helper.c
index 4b9424a8f1f1..68f4765a5896 100644
--- a/drivers/gpu/drm/drm_self_refresh_helper.c
+++ b/drivers/gpu/drm/drm_self_refresh_helper.c
@@ -5,6 +5,7 @@
* Authors:
* Sean Paul <[email protected]>
*/
+#include <linux/average.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
@@ -50,11 +51,17 @@
* atomic_check when &drm_crtc_state.self_refresh_active is true.
*/
+#define SELF_REFRESH_AVG_SEED_MS 200
+
+DECLARE_EWMA(psr_time, 4, 4)
+
struct drm_self_refresh_data {
struct drm_crtc *crtc;
struct delayed_work entry_work;
- struct drm_atomic_state *save_state;
- unsigned int entry_delay_ms;
+
+ struct mutex avg_mutex;
+ struct ewma_psr_time entry_avg_ms;
+ struct ewma_psr_time exit_avg_ms;
};
static void drm_self_refresh_helper_entry_work(struct work_struct *work)
@@ -123,6 +130,44 @@ out_drop_locks:
}
/**
+ * drm_self_refresh_helper_update_avg_times - Updates a crtc's SR time averages
+ * @state: the state which has just been applied to hardware
+ * @commit_time_ms: the amount of time in ms that this commit took to complete
+ *
+ * Called after &drm_mode_config_funcs.atomic_commit_tail, this function will
+ * update the average entry/exit self refresh times on self refresh transitions.
+ * These averages will be used when calculating how long to delay before
+ * entering self refresh mode after activity.
+ */
+void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state,
+ unsigned int commit_time_ms)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct drm_self_refresh_data *sr_data = crtc->self_refresh_data;
+ struct ewma_psr_time *time;
+
+ if (old_crtc_state->self_refresh_active ==
+ new_crtc_state->self_refresh_active)
+ continue;
+
+ if (new_crtc_state->self_refresh_active)
+ time = &sr_data->entry_avg_ms;
+ else
+ time = &sr_data->exit_avg_ms;
+
+ mutex_lock(&sr_data->avg_mutex);
+ ewma_psr_time_add(time, commit_time_ms);
+ mutex_unlock(&sr_data->avg_mutex);
+ }
+}
+EXPORT_SYMBOL(drm_self_refresh_helper_update_avg_times);
+
+/**
* drm_self_refresh_helper_alter_state - Alters the atomic state for SR exit
* @state: the state currently being checked
*
@@ -153,6 +198,7 @@ void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state)
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct drm_self_refresh_data *sr_data;
+ unsigned int delay;
/* Don't trigger the entry timer when we're already in SR */
if (crtc_state->self_refresh_active)
@@ -162,8 +208,13 @@ void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state)
if (!sr_data)
continue;
+ mutex_lock(&sr_data->avg_mutex);
+ delay = (ewma_psr_time_read(&sr_data->entry_avg_ms) +
+ ewma_psr_time_read(&sr_data->exit_avg_ms)) * 2;
+ mutex_unlock(&sr_data->avg_mutex);
+
mod_delayed_work(system_wq, &sr_data->entry_work,
- msecs_to_jiffies(sr_data->entry_delay_ms));
+ msecs_to_jiffies(delay));
}
}
EXPORT_SYMBOL(drm_self_refresh_helper_alter_state);
@@ -171,12 +222,10 @@ EXPORT_SYMBOL(drm_self_refresh_helper_alter_state);
/**
* drm_self_refresh_helper_init - Initializes self refresh helpers for a crtc
* @crtc: the crtc which supports self refresh supported displays
- * @entry_delay_ms: amount of inactivity to wait before entering self refresh
*
* Returns zero if successful or -errno on failure
*/
-int drm_self_refresh_helper_init(struct drm_crtc *crtc,
- unsigned int entry_delay_ms)
+int drm_self_refresh_helper_init(struct drm_crtc *crtc)
{
struct drm_self_refresh_data *sr_data = crtc->self_refresh_data;
@@ -190,8 +239,18 @@ int drm_self_refresh_helper_init(struct drm_crtc *crtc,
INIT_DELAYED_WORK(&sr_data->entry_work,
drm_self_refresh_helper_entry_work);
- sr_data->entry_delay_ms = entry_delay_ms;
sr_data->crtc = crtc;
+ mutex_init(&sr_data->avg_mutex);
+ ewma_psr_time_init(&sr_data->entry_avg_ms);
+ ewma_psr_time_init(&sr_data->exit_avg_ms);
+
+ /*
+ * Seed the averages so they're non-zero (and sufficiently large
+ * for even poorly performing panels). As time goes on, this will be
+ * averaged out and the values will trend to their true value.
+ */
+ ewma_psr_time_add(&sr_data->entry_avg_ms, SELF_REFRESH_AVG_SEED_MS);
+ ewma_psr_time_add(&sr_data->exit_avg_ms, SELF_REFRESH_AVG_SEED_MS);
crtc->self_refresh_data = sr_data;
return 0;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 2db029371c91..5193b6257061 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -267,7 +267,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
asyw->image.pitch[0] = fb->base.pitches[0];
}
- if (!(asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC))
+ if (!asyh->state.async_flip)
asyw->image.interval = 1;
else
asyw->image.interval = 0;
@@ -383,7 +383,7 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
}
/* Can't do an immediate flip while changing the LUT. */
- asyh->state.pageflip_flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
+ asyh->state.async_flip = false;
}
static int
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index a1f5fa6a742a..12ff77dacc95 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -39,7 +39,7 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
* If frequency scaling from low to high, adjust voltage first.
* If frequency scaling from high to low, adjust frequency first.
*/
- if (old_clk_rate < target_rate && pfdev->regulator) {
+ if (old_clk_rate < target_rate) {
err = regulator_set_voltage(pfdev->regulator, target_volt,
target_volt);
if (err) {
@@ -53,14 +53,12 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
if (err) {
dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
err);
- if (pfdev->regulator)
- regulator_set_voltage(pfdev->regulator,
- pfdev->devfreq.cur_volt,
- pfdev->devfreq.cur_volt);
+ regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt,
+ pfdev->devfreq.cur_volt);
return err;
}
- if (old_clk_rate > target_rate && pfdev->regulator) {
+ if (old_clk_rate > target_rate) {
err = regulator_set_voltage(pfdev->regulator, target_volt,
target_volt);
if (err)
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 46b0b02e4289..238fb6d54df4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -89,12 +89,9 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev)
{
int ret;
- pfdev->regulator = devm_regulator_get_optional(pfdev->dev, "mali");
+ pfdev->regulator = devm_regulator_get(pfdev->dev, "mali");
if (IS_ERR(pfdev->regulator)) {
ret = PTR_ERR(pfdev->regulator);
- pfdev->regulator = NULL;
- if (ret == -ENODEV)
- return 0;
dev_err(pfdev->dev, "failed to get regulator: %d\n", ret);
return ret;
}
@@ -110,8 +107,7 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev)
static void panfrost_regulator_fini(struct panfrost_device *pfdev)
{
- if (pfdev->regulator)
- regulator_disable(pfdev->regulator);
+ regulator_disable(pfdev->regulator);
}
int panfrost_device_init(struct panfrost_device *pfdev)
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 6010f9ee7c1f..bdd990568476 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -394,28 +394,40 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
free_io_pgtable_ops(mmu->pgtbl_ops);
}
-static struct drm_mm_node *addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
+static struct panfrost_gem_object *
+addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
{
- struct drm_mm_node *node = NULL;
+ struct panfrost_gem_object *bo = NULL;
+ struct panfrost_file_priv *priv;
+ struct drm_mm_node *node;
u64 offset = addr >> PAGE_SHIFT;
struct panfrost_mmu *mmu;
spin_lock(&pfdev->as_lock);
list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
- struct panfrost_file_priv *priv;
- if (as != mmu->as)
- continue;
+ if (as == mmu->as)
+ break;
+ }
+ if (as != mmu->as)
+ goto out;
+
+ priv = container_of(mmu, struct panfrost_file_priv, mmu);
- priv = container_of(mmu, struct panfrost_file_priv, mmu);
- drm_mm_for_each_node(node, &priv->mm) {
- if (offset >= node->start && offset < (node->start + node->size))
- goto out;
+ spin_lock(&priv->mm_lock);
+
+ drm_mm_for_each_node(node, &priv->mm) {
+ if (offset >= node->start &&
+ offset < (node->start + node->size)) {
+ bo = drm_mm_node_to_panfrost_bo(node);
+ drm_gem_object_get(&bo->base.base);
+ break;
}
}
+ spin_unlock(&priv->mm_lock);
out:
spin_unlock(&pfdev->as_lock);
- return node;
+ return bo;
}
#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
@@ -423,29 +435,28 @@ out:
int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
{
int ret, i;
- struct drm_mm_node *node;
struct panfrost_gem_object *bo;
struct address_space *mapping;
pgoff_t page_offset;
struct sg_table *sgt;
struct page **pages;
- node = addr_to_drm_mm_node(pfdev, as, addr);
- if (!node)
+ bo = addr_to_drm_mm_node(pfdev, as, addr);
+ if (!bo)
return -ENOENT;
- bo = drm_mm_node_to_panfrost_bo(node);
if (!bo->is_heap) {
dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
- node->start << PAGE_SHIFT);
- return -EINVAL;
+ bo->node.start << PAGE_SHIFT);
+ ret = -EINVAL;
+ goto err_bo;
}
WARN_ON(bo->mmu->as != as);
/* Assume 2MB alignment and size multiple */
addr &= ~((u64)SZ_2M - 1);
page_offset = addr >> PAGE_SHIFT;
- page_offset -= node->start;
+ page_offset -= bo->node.start;
mutex_lock(&bo->base.pages_lock);
@@ -454,7 +465,8 @@ int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
if (!bo->sgts) {
mutex_unlock(&bo->base.pages_lock);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_bo;
}
pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
@@ -463,7 +475,8 @@ int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
kfree(bo->sgts);
bo->sgts = NULL;
mutex_unlock(&bo->base.pages_lock);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_bo;
}
bo->base.pages = pages;
bo->base.pages_use_count = 1;
@@ -501,12 +514,16 @@ int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
+ drm_gem_object_put_unlocked(&bo->base.base);
+
return 0;
err_map:
sg_free_table(sgt);
err_pages:
drm_gem_shmem_put_pages(&bo->base);
+err_bo:
+ drm_gem_object_put_unlocked(&bo->base.base);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 431e6b64b77d..d0bc91ed7c90 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -324,8 +324,39 @@ bool radeon_device_is_virtual(void);
static int radeon_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
+ unsigned long flags = 0;
int ret;
+ if (!ent)
+ return -ENODEV; /* Avoid NULL-ptr deref in drm_get_pci_dev */
+
+ flags = ent->driver_data;
+
+ if (!radeon_si_support) {
+ switch (flags & RADEON_FAMILY_MASK) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ dev_info(&pdev->dev,
+ "SI support disabled by module param\n");
+ return -ENODEV;
+ }
+ }
+ if (!radeon_cik_support) {
+ switch (flags & RADEON_FAMILY_MASK) {
+ case CHIP_KAVERI:
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ dev_info(&pdev->dev,
+ "CIK support disabled by module param\n");
+ return -ENODEV;
+ }
+ }
+
if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 4cf58dbbe439..b2b076606f54 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -296,6 +296,8 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
uint32_t handle;
int r;
+ args->addr = untagged_addr(args->addr);
+
if (offset_in_page(args->addr | args->size))
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 07f7ace42c4b..e85c554eeaa9 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -100,31 +100,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
struct radeon_device *rdev;
int r, acpi_status;
- if (!radeon_si_support) {
- switch (flags & RADEON_FAMILY_MASK) {
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
- dev_info(dev->dev,
- "SI support disabled by module param\n");
- return -ENODEV;
- }
- }
- if (!radeon_cik_support) {
- switch (flags & RADEON_FAMILY_MASK) {
- case CHIP_KAVERI:
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- case CHIP_KABINI:
- case CHIP_MULLINS:
- dev_info(dev->dev,
- "CIK support disabled by module param\n");
- return -ENODEV;
- }
- }
-
rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
if (rdev == NULL) {
return -ENOMEM;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 2f821c58007c..613404f86668 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -39,8 +39,6 @@
#include "rockchip_drm_vop.h"
#include "rockchip_rgb.h"
-#define VOP_SELF_REFRESH_ENTRY_DELAY_MS 100
-
#define VOP_WIN_SET(vop, win, name, v) \
vop_reg_set(vop, &win->phy->name, win->base, ~0, v, #name)
#define VOP_SCL_SET(vop, win, name, v) \
@@ -1563,8 +1561,7 @@ static int vop_create_crtc(struct vop *vop)
init_completion(&vop->line_flag_completion);
crtc->port = port;
- ret = drm_self_refresh_helper_init(crtc,
- VOP_SELF_REFRESH_ENTRY_DELAY_MS);
+ ret = drm_self_refresh_helper_init(crtc);
if (ret)
DRM_DEV_DEBUG_KMS(vop->dev,
"Failed to init %s with SR helpers %d, ignoring\n",
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index feaa538026a0..3db000aacd26 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -174,7 +174,6 @@ via_map_blit_for_device(struct pci_dev *pdev,
static void
via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
{
- struct page *page;
int i;
switch (vsg->state) {
@@ -189,13 +188,8 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
kfree(vsg->desc_pages);
/* fall through */
case dr_via_pages_locked:
- for (i = 0; i < vsg->num_pages; ++i) {
- if (NULL != (page = vsg->pages[i])) {
- if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
- SetPageDirty(page);
- put_page(page);
- }
- }
+ put_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
+ (vsg->direction == DMA_FROM_DEVICE));
/* fall through */
case dr_via_pages_alloc:
vfree(vsg->pages);
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 7795831d37c2..cc5b09b87ab0 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -104,8 +104,8 @@ struct synthhid_input_report {
#pragma pack(pop)
-#define INPUTVSC_SEND_RING_BUFFER_SIZE (10*PAGE_SIZE)
-#define INPUTVSC_RECV_RING_BUFFER_SIZE (10*PAGE_SIZE)
+#define INPUTVSC_SEND_RING_BUFFER_SIZE (40 * 1024)
+#define INPUTVSC_RECV_RING_BUFFER_SIZE (40 * 1024)
enum pipe_prot_msg_type {
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index addcef50df7a..8eb167540b4f 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -407,7 +407,15 @@ void hv_process_channel_removal(struct vmbus_channel *channel)
cpumask_clear_cpu(channel->target_cpu,
&primary_channel->alloced_cpus_in_node);
- vmbus_release_relid(channel->offermsg.child_relid);
+ /*
+ * Upon suspend, an in-use hv_sock channel is marked as "rescinded" and
+ * the relid is invalidated; after hibernation, when the user-space app
+ * destroys the channel, the relid is INVALID_RELID, and in this case
+ * it's unnecessary and unsafe to release the old relid, since the same
+ * relid can refer to a completely different channel now.
+ */
+ if (channel->offermsg.child_relid != INVALID_RELID)
+ vmbus_release_relid(channel->offermsg.child_relid);
free_channel(channel);
}
@@ -545,6 +553,10 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
mutex_lock(&vmbus_connection.channel_mutex);
+ /* Remember the channels that should be cleaned up upon suspend. */
+ if (is_hvsock_channel(newchannel) || is_sub_channel(newchannel))
+ atomic_inc(&vmbus_connection.nr_chan_close_on_suspend);
+
/*
* Now that we have acquired the channel_mutex,
* we can release the potentially racing rescind thread.
@@ -847,6 +859,67 @@ void vmbus_initiate_unload(bool crash)
vmbus_wait_for_unload();
}
+static void check_ready_for_resume_event(void)
+{
+ /*
+ * If all the old primary channels have been fixed up, then it's safe
+ * to resume.
+ */
+ if (atomic_dec_and_test(&vmbus_connection.nr_chan_fixup_on_resume))
+ complete(&vmbus_connection.ready_for_resume_event);
+}
+
+static void vmbus_setup_channel_state(struct vmbus_channel *channel,
+ struct vmbus_channel_offer_channel *offer)
+{
+ /*
+ * Setup state for signalling the host.
+ */
+ channel->sig_event = VMBUS_EVENT_CONNECTION_ID;
+
+ if (vmbus_proto_version != VERSION_WS2008) {
+ channel->is_dedicated_interrupt =
+ (offer->is_dedicated_interrupt != 0);
+ channel->sig_event = offer->connection_id;
+ }
+
+ memcpy(&channel->offermsg, offer,
+ sizeof(struct vmbus_channel_offer_channel));
+ channel->monitor_grp = (u8)offer->monitorid / 32;
+ channel->monitor_bit = (u8)offer->monitorid % 32;
+}
+
+/*
+ * find_primary_channel_by_offer - Get the channel object given the new offer.
+ * This is only used in the resume path of hibernation.
+ */
+static struct vmbus_channel *
+find_primary_channel_by_offer(const struct vmbus_channel_offer_channel *offer)
+{
+ struct vmbus_channel *channel = NULL, *iter;
+ const guid_t *inst1, *inst2;
+
+ /* Ignore sub-channel offers. */
+ if (offer->offer.sub_channel_index != 0)
+ return NULL;
+
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ list_for_each_entry(iter, &vmbus_connection.chn_list, listentry) {
+ inst1 = &iter->offermsg.offer.if_instance;
+ inst2 = &offer->offer.if_instance;
+
+ if (guid_equal(inst1, inst2)) {
+ channel = iter;
+ break;
+ }
+ }
+
+ mutex_unlock(&vmbus_connection.channel_mutex);
+
+ return channel;
+}
+
/*
* vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
*
@@ -854,12 +927,58 @@ void vmbus_initiate_unload(bool crash)
static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_offer_channel *offer;
- struct vmbus_channel *newchannel;
+ struct vmbus_channel *oldchannel, *newchannel;
+ size_t offer_sz;
offer = (struct vmbus_channel_offer_channel *)hdr;
trace_vmbus_onoffer(offer);
+ oldchannel = find_primary_channel_by_offer(offer);
+
+ if (oldchannel != NULL) {
+ atomic_dec(&vmbus_connection.offer_in_progress);
+
+ /*
+ * We're resuming from hibernation: all the sub-channel and
+ * hv_sock channels we had before the hibernation should have
+ * been cleaned up, and now we must be seeing a re-offered
+ * primary channel that we had before the hibernation.
+ */
+
+ WARN_ON(oldchannel->offermsg.child_relid != INVALID_RELID);
+ /* Fix up the relid. */
+ oldchannel->offermsg.child_relid = offer->child_relid;
+
+ offer_sz = sizeof(*offer);
+ if (memcmp(offer, &oldchannel->offermsg, offer_sz) == 0) {
+ check_ready_for_resume_event();
+ return;
+ }
+
+ /*
+ * This is not an error, since the host can also change the
+ * other field(s) of the offer, e.g. on WS RS5 (Build 17763),
+ * the offer->connection_id of the Mellanox VF vmbus device
+ * can change when the host reoffers the device upon resume.
+ */
+ pr_debug("vmbus offer changed: relid=%d\n",
+ offer->child_relid);
+
+ print_hex_dump_debug("Old vmbus offer: ", DUMP_PREFIX_OFFSET,
+ 16, 4, &oldchannel->offermsg, offer_sz,
+ false);
+ print_hex_dump_debug("New vmbus offer: ", DUMP_PREFIX_OFFSET,
+ 16, 4, offer, offer_sz, false);
+
+ /* Fix up the old channel. */
+ vmbus_setup_channel_state(oldchannel, offer);
+
+ check_ready_for_resume_event();
+
+ return;
+ }
+
/* Allocate the channel object and save this offer. */
newchannel = alloc_channel();
if (!newchannel) {
@@ -869,25 +988,21 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
return;
}
- /*
- * Setup state for signalling the host.
- */
- newchannel->sig_event = VMBUS_EVENT_CONNECTION_ID;
-
- if (vmbus_proto_version != VERSION_WS2008) {
- newchannel->is_dedicated_interrupt =
- (offer->is_dedicated_interrupt != 0);
- newchannel->sig_event = offer->connection_id;
- }
-
- memcpy(&newchannel->offermsg, offer,
- sizeof(struct vmbus_channel_offer_channel));
- newchannel->monitor_grp = (u8)offer->monitorid / 32;
- newchannel->monitor_bit = (u8)offer->monitorid % 32;
+ vmbus_setup_channel_state(newchannel, offer);
vmbus_process_offer(newchannel);
}
+static void check_ready_for_suspend_event(void)
+{
+ /*
+ * If all the sub-channels or hv_sock channels have been cleaned up,
+ * then it's safe to suspend.
+ */
+ if (atomic_dec_and_test(&vmbus_connection.nr_chan_close_on_suspend))
+ complete(&vmbus_connection.ready_for_suspend_event);
+}
+
/*
* vmbus_onoffer_rescind - Rescind offer handler.
*
@@ -898,6 +1013,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
struct vmbus_channel_rescind_offer *rescind;
struct vmbus_channel *channel;
struct device *dev;
+ bool clean_up_chan_for_suspend;
rescind = (struct vmbus_channel_rescind_offer *)hdr;
@@ -937,6 +1053,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
return;
}
+ clean_up_chan_for_suspend = is_hvsock_channel(channel) ||
+ is_sub_channel(channel);
/*
* Before setting channel->rescind in vmbus_rescind_cleanup(), we
* should make sure the channel callback is not running any more.
@@ -962,6 +1080,10 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
if (channel->device_obj) {
if (channel->chn_rescind_callback) {
channel->chn_rescind_callback(channel);
+
+ if (clean_up_chan_for_suspend)
+ check_ready_for_suspend_event();
+
return;
}
/*
@@ -994,6 +1116,11 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
}
mutex_unlock(&vmbus_connection.channel_mutex);
}
+
+ /* The "channel" may have been freed. Do not access it any longer. */
+
+ if (clean_up_chan_for_suspend)
+ check_ready_for_suspend_event();
}
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 09829e15d4a0..6e4c015783ff 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -26,6 +26,11 @@
struct vmbus_connection vmbus_connection = {
.conn_state = DISCONNECTED,
.next_gpadl_handle = ATOMIC_INIT(0xE1E10),
+
+ .ready_for_suspend_event= COMPLETION_INITIALIZER(
+ vmbus_connection.ready_for_suspend_event),
+ .ready_for_resume_event = COMPLETION_INITIALIZER(
+ vmbus_connection.ready_for_resume_event),
};
EXPORT_SYMBOL_GPL(vmbus_connection);
@@ -59,8 +64,7 @@ static __u32 vmbus_get_next_version(__u32 current_version)
}
}
-static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
- __u32 version)
+int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
{
int ret = 0;
unsigned int cur_cpu;
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 6188fb7dda42..fcc52797c169 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -154,7 +154,7 @@ void hv_synic_free(void)
* retrieve the initialized message and event pages. Otherwise, we create and
* initialize the message and event pages.
*/
-int hv_synic_init(unsigned int cpu)
+void hv_synic_enable_regs(unsigned int cpu)
{
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
@@ -196,6 +196,11 @@ int hv_synic_init(unsigned int cpu)
sctrl.enable = 1;
hv_set_synic_state(sctrl.as_uint64);
+}
+
+int hv_synic_init(unsigned int cpu)
+{
+ hv_synic_enable_regs(cpu);
hv_stimer_init(cpu);
@@ -205,20 +210,45 @@ int hv_synic_init(unsigned int cpu)
/*
* hv_synic_cleanup - Cleanup routine for hv_synic_init().
*/
-int hv_synic_cleanup(unsigned int cpu)
+void hv_synic_disable_regs(unsigned int cpu)
{
union hv_synic_sint shared_sint;
union hv_synic_simp simp;
union hv_synic_siefp siefp;
union hv_synic_scontrol sctrl;
+
+ hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+
+ shared_sint.masked = 1;
+
+ /* Need to correctly cleanup in the case of SMP!!! */
+ /* Disable the interrupt */
+ hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+
+ hv_get_simp(simp.as_uint64);
+ simp.simp_enabled = 0;
+ simp.base_simp_gpa = 0;
+
+ hv_set_simp(simp.as_uint64);
+
+ hv_get_siefp(siefp.as_uint64);
+ siefp.siefp_enabled = 0;
+ siefp.base_siefp_gpa = 0;
+
+ hv_set_siefp(siefp.as_uint64);
+
+ /* Disable the global synic bit */
+ hv_get_synic_state(sctrl.as_uint64);
+ sctrl.enable = 0;
+ hv_set_synic_state(sctrl.as_uint64);
+}
+
+int hv_synic_cleanup(unsigned int cpu)
+{
struct vmbus_channel *channel, *sc;
bool channel_found = false;
unsigned long flags;
- hv_get_synic_state(sctrl.as_uint64);
- if (sctrl.enable != 1)
- return -EFAULT;
-
/*
* Search for channels which are bound to the CPU we're about to
* cleanup. In case we find one and vmbus is still connected we need to
@@ -249,29 +279,7 @@ int hv_synic_cleanup(unsigned int cpu)
hv_stimer_cleanup(cpu);
- hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
-
- shared_sint.masked = 1;
-
- /* Need to correctly cleanup in the case of SMP!!! */
- /* Disable the interrupt */
- hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
-
- hv_get_simp(simp.as_uint64);
- simp.simp_enabled = 0;
- simp.base_simp_gpa = 0;
-
- hv_set_simp(simp.as_uint64);
-
- hv_get_siefp(siefp.as_uint64);
- siefp.siefp_enabled = 0;
- siefp.base_siefp_gpa = 0;
-
- hv_set_siefp(siefp.as_uint64);
-
- /* Disable the global synic bit */
- sctrl.enable = 0;
- hv_set_synic_state(sctrl.as_uint64);
+ hv_synic_disable_regs(cpu);
return 0;
}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 6fb4ea5f0304..34bd73526afd 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -494,7 +494,7 @@ enum hv_dm_state {
static __u8 recv_buffer[PAGE_SIZE];
-static __u8 *send_buffer;
+static __u8 balloon_up_send_buffer[PAGE_SIZE];
#define PAGES_IN_2M 512
#define HA_CHUNK (32 * 1024)
@@ -1292,8 +1292,8 @@ static void balloon_up(struct work_struct *dummy)
}
while (!done) {
- bl_resp = (struct dm_balloon_response *)send_buffer;
- memset(send_buffer, 0, PAGE_SIZE);
+ memset(balloon_up_send_buffer, 0, PAGE_SIZE);
+ bl_resp = (struct dm_balloon_response *)balloon_up_send_buffer;
bl_resp->hdr.type = DM_BALLOON_RESPONSE;
bl_resp->hdr.size = sizeof(struct dm_balloon_response);
bl_resp->more_pages = 1;
@@ -1564,58 +1564,18 @@ static void balloon_onchannelcallback(void *context)
}
-static int balloon_probe(struct hv_device *dev,
- const struct hv_vmbus_device_id *dev_id)
+static int balloon_connect_vsp(struct hv_device *dev)
{
- int ret;
- unsigned long t;
struct dm_version_request version_req;
struct dm_capabilities cap_msg;
-
-#ifdef CONFIG_MEMORY_HOTPLUG
- do_hot_add = hot_add;
-#else
- do_hot_add = false;
-#endif
-
- /*
- * First allocate a send buffer.
- */
-
- send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!send_buffer)
- return -ENOMEM;
+ unsigned long t;
+ int ret;
ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
- balloon_onchannelcallback, dev);
-
+ balloon_onchannelcallback, dev);
if (ret)
- goto probe_error0;
+ return ret;
- dm_device.dev = dev;
- dm_device.state = DM_INITIALIZING;
- dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
- init_completion(&dm_device.host_event);
- init_completion(&dm_device.config_event);
- INIT_LIST_HEAD(&dm_device.ha_region_list);
- spin_lock_init(&dm_device.ha_lock);
- INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
- INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
- dm_device.host_specified_ha_region = false;
-
- dm_device.thread =
- kthread_run(dm_thread_func, &dm_device, "hv_balloon");
- if (IS_ERR(dm_device.thread)) {
- ret = PTR_ERR(dm_device.thread);
- goto probe_error1;
- }
-
-#ifdef CONFIG_MEMORY_HOTPLUG
- set_online_page_callback(&hv_online_page);
- register_memory_notifier(&hv_memory_nb);
-#endif
-
- hv_set_drvdata(dev, &dm_device);
/*
* Initiate the hand shake with the host and negotiate
* a version that the host can support. We start with the
@@ -1631,16 +1591,15 @@ static int balloon_probe(struct hv_device *dev,
dm_device.version = version_req.version.version;
ret = vmbus_sendpacket(dev->channel, &version_req,
- sizeof(struct dm_version_request),
- (unsigned long)NULL,
- VM_PKT_DATA_INBAND, 0);
+ sizeof(struct dm_version_request),
+ (unsigned long)NULL, VM_PKT_DATA_INBAND, 0);
if (ret)
- goto probe_error2;
+ goto out;
t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
- goto probe_error2;
+ goto out;
}
/*
@@ -1648,8 +1607,8 @@ static int balloon_probe(struct hv_device *dev,
* fail the probe function.
*/
if (dm_device.state == DM_INIT_ERROR) {
- ret = -ETIMEDOUT;
- goto probe_error2;
+ ret = -EPROTO;
+ goto out;
}
pr_info("Using Dynamic Memory protocol version %u.%u\n",
@@ -1682,16 +1641,15 @@ static int balloon_probe(struct hv_device *dev,
cap_msg.max_page_number = -1;
ret = vmbus_sendpacket(dev->channel, &cap_msg,
- sizeof(struct dm_capabilities),
- (unsigned long)NULL,
- VM_PKT_DATA_INBAND, 0);
+ sizeof(struct dm_capabilities),
+ (unsigned long)NULL, VM_PKT_DATA_INBAND, 0);
if (ret)
- goto probe_error2;
+ goto out;
t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
- goto probe_error2;
+ goto out;
}
/*
@@ -1699,25 +1657,65 @@ static int balloon_probe(struct hv_device *dev,
* fail the probe function.
*/
if (dm_device.state == DM_INIT_ERROR) {
- ret = -ETIMEDOUT;
- goto probe_error2;
+ ret = -EPROTO;
+ goto out;
}
+ return 0;
+out:
+ vmbus_close(dev->channel);
+ return ret;
+}
+
+static int balloon_probe(struct hv_device *dev,
+ const struct hv_vmbus_device_id *dev_id)
+{
+ int ret;
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+ do_hot_add = hot_add;
+#else
+ do_hot_add = false;
+#endif
+ dm_device.dev = dev;
+ dm_device.state = DM_INITIALIZING;
+ dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
+ init_completion(&dm_device.host_event);
+ init_completion(&dm_device.config_event);
+ INIT_LIST_HEAD(&dm_device.ha_region_list);
+ spin_lock_init(&dm_device.ha_lock);
+ INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
+ INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
+ dm_device.host_specified_ha_region = false;
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+ set_online_page_callback(&hv_online_page);
+ register_memory_notifier(&hv_memory_nb);
+#endif
+
+ hv_set_drvdata(dev, &dm_device);
+
+ ret = balloon_connect_vsp(dev);
+ if (ret != 0)
+ return ret;
+
dm_device.state = DM_INITIALIZED;
- last_post_time = jiffies;
+
+ dm_device.thread =
+ kthread_run(dm_thread_func, &dm_device, "hv_balloon");
+ if (IS_ERR(dm_device.thread)) {
+ ret = PTR_ERR(dm_device.thread);
+ goto probe_error;
+ }
return 0;
-probe_error2:
+probe_error:
+ vmbus_close(dev->channel);
#ifdef CONFIG_MEMORY_HOTPLUG
+ unregister_memory_notifier(&hv_memory_nb);
restore_online_page_callback(&hv_online_page);
#endif
- kthread_stop(dm_device.thread);
-
-probe_error1:
- vmbus_close(dev->channel);
-probe_error0:
- kfree(send_buffer);
return ret;
}
@@ -1734,12 +1732,11 @@ static int balloon_remove(struct hv_device *dev)
cancel_work_sync(&dm->balloon_wrk.wrk);
cancel_work_sync(&dm->ha_wrk.wrk);
- vmbus_close(dev->channel);
kthread_stop(dm->thread);
- kfree(send_buffer);
+ vmbus_close(dev->channel);
#ifdef CONFIG_MEMORY_HOTPLUG
- restore_online_page_callback(&hv_online_page);
unregister_memory_notifier(&hv_memory_nb);
+ restore_online_page_callback(&hv_online_page);
#endif
spin_lock_irqsave(&dm_device.ha_lock, flags);
list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 50eaa1fd6e45..af9379a3bf89 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -169,8 +169,10 @@ extern int hv_synic_alloc(void);
extern void hv_synic_free(void);
+extern void hv_synic_enable_regs(unsigned int cpu);
extern int hv_synic_init(unsigned int cpu);
+extern void hv_synic_disable_regs(unsigned int cpu);
extern int hv_synic_cleanup(unsigned int cpu);
/* Interface */
@@ -256,6 +258,32 @@ struct vmbus_connection {
struct workqueue_struct *work_queue;
struct workqueue_struct *handle_primary_chan_wq;
struct workqueue_struct *handle_sub_chan_wq;
+
+ /*
+ * The number of sub-channels and hv_sock channels that should be
+ * cleaned up upon suspend: sub-channels will be re-created upon
+ * resume, and hv_sock channels should not survive suspend.
+ */
+ atomic_t nr_chan_close_on_suspend;
+ /*
+ * vmbus_bus_suspend() waits for "nr_chan_close_on_suspend" to
+ * drop to zero.
+ */
+ struct completion ready_for_suspend_event;
+
+ /*
+ * The number of primary channels that should be "fixed up"
+ * upon resume: these channels are re-offered upon resume, and some
+ * fields of the channel offers (i.e. child_relid and connection_id)
+ * can change, so the old offermsg must be fixed up, before the resume
+ * callbacks of the VSC drivers start to further touch the channels.
+ */
+ atomic_t nr_chan_fixup_on_resume;
+ /*
+ * vmbus_bus_resume() waits for "nr_chan_fixup_on_resume" to
+ * drop to zero.
+ */
+ struct completion ready_for_resume_event;
};
@@ -270,6 +298,8 @@ struct vmbus_msginfo {
extern struct vmbus_connection vmbus_connection;
+int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version);
+
static inline void vmbus_send_interrupt(u32 relid)
{
sync_set_bit(relid, vmbus_connection.send_int_page);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index ebd35fc35290..391f0b225c9a 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -24,12 +24,14 @@
#include <linux/sched/task_stack.h>
#include <asm/mshyperv.h>
+#include <linux/delay.h>
#include <linux/notifier.h>
#include <linux/ptrace.h>
#include <linux/screen_info.h>
#include <linux/kdebug.h>
#include <linux/efi.h>
#include <linux/random.h>
+#include <linux/syscore_ops.h>
#include <clocksource/hyperv_timer.h>
#include "hyperv_vmbus.h"
@@ -910,6 +912,43 @@ static void vmbus_shutdown(struct device *child_device)
drv->shutdown(dev);
}
+/*
+ * vmbus_suspend - Suspend a vmbus device
+ */
+static int vmbus_suspend(struct device *child_device)
+{
+ struct hv_driver *drv;
+ struct hv_device *dev = device_to_hv_device(child_device);
+
+ /* The device may not be attached yet */
+ if (!child_device->driver)
+ return 0;
+
+ drv = drv_to_hv_drv(child_device->driver);
+ if (!drv->suspend)
+ return -EOPNOTSUPP;
+
+ return drv->suspend(dev);
+}
+
+/*
+ * vmbus_resume - Resume a vmbus device
+ */
+static int vmbus_resume(struct device *child_device)
+{
+ struct hv_driver *drv;
+ struct hv_device *dev = device_to_hv_device(child_device);
+
+ /* The device may not be attached yet */
+ if (!child_device->driver)
+ return 0;
+
+ drv = drv_to_hv_drv(child_device->driver);
+ if (!drv->resume)
+ return -EOPNOTSUPP;
+
+ return drv->resume(dev);
+}
/*
* vmbus_device_release - Final callback release of the vmbus child device
@@ -925,6 +964,14 @@ static void vmbus_device_release(struct device *device)
kfree(hv_dev);
}
+/*
+ * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than
+ * SET_SYSTEM_SLEEP_PM_OPS: see the comment before vmbus_bus_pm.
+ */
+static const struct dev_pm_ops vmbus_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_suspend, vmbus_resume)
+};
+
/* The one and only one */
static struct bus_type hv_bus = {
.name = "vmbus",
@@ -935,6 +982,7 @@ static struct bus_type hv_bus = {
.uevent = vmbus_uevent,
.dev_groups = vmbus_dev_groups,
.drv_groups = vmbus_drv_groups,
+ .pm = &vmbus_pm,
};
struct onmessage_work_context {
@@ -1022,6 +1070,41 @@ msg_handled:
vmbus_signal_eom(msg, message_type);
}
+/*
+ * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
+ * hibernation, because hv_sock connections can not persist across hibernation.
+ */
+static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
+{
+ struct onmessage_work_context *ctx;
+ struct vmbus_channel_rescind_offer *rescind;
+
+ WARN_ON(!is_hvsock_channel(channel));
+
+ /*
+ * sizeof(*ctx) is small and the allocation should really not fail,
+ * otherwise the state of the hv_sock connections ends up in limbo.
+ */
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
+
+ /*
+ * So far, these are not really used by Linux. Just set them to the
+ * reasonable values conforming to the definitions of the fields.
+ */
+ ctx->msg.header.message_type = 1;
+ ctx->msg.header.payload_size = sizeof(*rescind);
+
+ /* These values are actually used by Linux. */
+ rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.u.payload;
+ rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
+ rescind->child_relid = channel->offermsg.child_relid;
+
+ INIT_WORK(&ctx->work, vmbus_onmessage_work);
+
+ queue_work_on(vmbus_connection.connect_cpu,
+ vmbus_connection.work_queue,
+ &ctx->work);
+}
/*
* Direct callback for channels using other deferred processing
@@ -2042,6 +2125,129 @@ acpi_walk_err:
return ret_val;
}
+static int vmbus_bus_suspend(struct device *dev)
+{
+ struct vmbus_channel *channel, *sc;
+ unsigned long flags;
+
+ while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
+ /*
+ * We wait here until the completion of any channel
+ * offers that are currently in progress.
+ */
+ msleep(1);
+ }
+
+ mutex_lock(&vmbus_connection.channel_mutex);
+ list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+ if (!is_hvsock_channel(channel))
+ continue;
+
+ vmbus_force_channel_rescinded(channel);
+ }
+ mutex_unlock(&vmbus_connection.channel_mutex);
+
+ /*
+ * Wait until all the sub-channels and hv_sock channels have been
+ * cleaned up. Sub-channels should be destroyed upon suspend, otherwise
+ * they would conflict with the new sub-channels that will be created
+ * in the resume path. hv_sock channels should also be destroyed, but
+ * a hv_sock channel of an established hv_sock connection can not be
+ * really destroyed since it may still be referenced by the userspace
+ * application, so we just force the hv_sock channel to be rescinded
+ * by vmbus_force_channel_rescinded(), and the userspace application
+ * will thoroughly destroy the channel after hibernation.
+ *
+ * Note: the counter nr_chan_close_on_suspend may never go above 0 if
+ * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM.
+ */
+ if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
+ wait_for_completion(&vmbus_connection.ready_for_suspend_event);
+
+ WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0);
+
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+ /*
+ * Invalidate the field. Upon resume, vmbus_onoffer() will fix
+ * up the field, and the other fields (if necessary).
+ */
+ channel->offermsg.child_relid = INVALID_RELID;
+
+ if (is_hvsock_channel(channel)) {
+ if (!channel->rescind) {
+ pr_err("hv_sock channel not rescinded!\n");
+ WARN_ON_ONCE(1);
+ }
+ continue;
+ }
+
+ spin_lock_irqsave(&channel->lock, flags);
+ list_for_each_entry(sc, &channel->sc_list, sc_list) {
+ pr_err("Sub-channel not deleted!\n");
+ WARN_ON_ONCE(1);
+ }
+ spin_unlock_irqrestore(&channel->lock, flags);
+
+ atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
+ }
+
+ mutex_unlock(&vmbus_connection.channel_mutex);
+
+ vmbus_initiate_unload(false);
+
+ vmbus_connection.conn_state = DISCONNECTED;
+
+ /* Reset the event for the next resume. */
+ reinit_completion(&vmbus_connection.ready_for_resume_event);
+
+ return 0;
+}
+
+static int vmbus_bus_resume(struct device *dev)
+{
+ struct vmbus_channel_msginfo *msginfo;
+ size_t msgsize;
+ int ret;
+
+ /*
+ * We only use the 'vmbus_proto_version', which was in use before
+ * hibernation, to re-negotiate with the host.
+ */
+ if (vmbus_proto_version == VERSION_INVAL ||
+ vmbus_proto_version == 0) {
+ pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
+ return -EINVAL;
+ }
+
+ msgsize = sizeof(*msginfo) +
+ sizeof(struct vmbus_channel_initiate_contact);
+
+ msginfo = kzalloc(msgsize, GFP_KERNEL);
+
+ if (msginfo == NULL)
+ return -ENOMEM;
+
+ ret = vmbus_negotiate_version(msginfo, vmbus_proto_version);
+
+ kfree(msginfo);
+
+ if (ret != 0)
+ return ret;
+
+ WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
+
+ vmbus_request_offers();
+
+ wait_for_completion(&vmbus_connection.ready_for_resume_event);
+
+ /* Reset the event for the next suspend. */
+ reinit_completion(&vmbus_connection.ready_for_suspend_event);
+
+ return 0;
+}
+
static const struct acpi_device_id vmbus_acpi_device_ids[] = {
{"VMBUS", 0},
{"VMBus", 0},
@@ -2049,6 +2255,19 @@ static const struct acpi_device_id vmbus_acpi_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
+/*
+ * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than
+ * SET_SYSTEM_SLEEP_PM_OPS, otherwise NIC SR-IOV can not work, because the
+ * "pci_dev_pm_ops" uses the "noirq" callbacks: in the resume path, the
+ * pci "noirq" restore callback runs before "non-noirq" callbacks (see
+ * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
+ * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
+ * resume callback must also run via the "noirq" callbacks.
+ */
+static const struct dev_pm_ops vmbus_bus_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_bus_suspend, vmbus_bus_resume)
+};
+
static struct acpi_driver vmbus_acpi_driver = {
.name = "vmbus",
.ids = vmbus_acpi_device_ids,
@@ -2056,6 +2275,7 @@ static struct acpi_driver vmbus_acpi_driver = {
.add = vmbus_acpi_add,
.remove = vmbus_acpi_remove,
},
+ .drv.pm = &vmbus_bus_pm,
};
static void hv_kexec_handler(void)
@@ -2086,6 +2306,47 @@ static void hv_crash_handler(struct pt_regs *regs)
hyperv_cleanup();
};
+static int hv_synic_suspend(void)
+{
+ /*
+ * When we reach here, all the non-boot CPUs have been offlined, and
+ * the stimers on them have been unbound in hv_synic_cleanup() ->
+ * hv_stimer_cleanup() -> clockevents_unbind_device().
+ *
+ * hv_synic_suspend() only runs on CPU0 with interrupts disabled. Here
+ * we do not unbind the stimer on CPU0 because: 1) it's unnecessary
+ * because the interrupts remain disabled between syscore_suspend()
+ * and syscore_resume(): see create_image() and resume_target_kernel();
+ * 2) the stimer on CPU0 is automatically disabled later by
+ * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
+ * -> clockevents_shutdown() -> ... -> hv_ce_shutdown(); 3) a warning
+ * would be triggered if we call clockevents_unbind_device(), which
+ * may sleep, in an interrupts-disabled context. So, we intentionally
+ * don't call hv_stimer_cleanup(0) here.
+ */
+
+ hv_synic_disable_regs(0);
+
+ return 0;
+}
+
+static void hv_synic_resume(void)
+{
+ hv_synic_enable_regs(0);
+
+ /*
+ * Note: we don't need to call hv_stimer_init(0), because the timer
+ * on CPU0 is not unbound in hv_synic_suspend(), and the timer is
+ * automatically re-enabled in timekeeping_resume().
+ */
+}
+
+/* The callbacks run only on CPU0, with irqs_disabled. */
+static struct syscore_ops hv_synic_syscore_ops = {
+ .suspend = hv_synic_suspend,
+ .resume = hv_synic_resume,
+};
+
static int __init hv_acpi_init(void)
{
int ret, t;
@@ -2116,6 +2377,8 @@ static int __init hv_acpi_init(void)
hv_setup_kexec_handler(hv_kexec_handler);
hv_setup_crash_handler(hv_crash_handler);
+ register_syscore_ops(&hv_synic_syscore_ops);
+
return 0;
cleanup:
@@ -2128,6 +2391,8 @@ static void __exit vmbus_exit(void)
{
int cpu;
+ unregister_syscore_ops(&hv_synic_syscore_ops);
+
hv_remove_kexec_handler();
hv_remove_crash_handler();
vmbus_connection.conn_state = DISCONNECTED;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 5587215b8ddb..146ce40d8e0a 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -429,6 +429,7 @@ config I2C_AXXIA
tristate "Axxia I2C controller"
depends on ARCH_AXXIA || COMPILE_TEST
default ARCH_AXXIA
+ select I2C_SLAVE
help
Say yes if you want to support the I2C bus on Axxia platforms.
@@ -977,7 +978,7 @@ config I2C_SIRF
will be called i2c-sirf.
config I2C_SPRD
- bool "Spreadtrum I2C interface"
+ tristate "Spreadtrum I2C interface"
depends on I2C=y && ARCH_SPRD
help
If you say yes to this option, support will be included for the
@@ -1309,6 +1310,20 @@ config I2C_ELEKTOR
This support is also available as a module. If so, the module
will be called i2c-elektor.
+config I2C_ICY
+ tristate "ICY Zorro card"
+ depends on ZORRO
+ select I2C_ALGOPCF
+ help
+ This supports the PCF8584 Zorro bus I2C adapter, known as ICY.
+ Say Y if you own such an adapter.
+
+ This support is also available as a module. If so, the module
+ will be called i2c-icy.
+
+ If you have a 2019 edition board with an LTC2990 sensor at address
+ 0x4c, loading the module 'ltc2990' is sufficient to enable it.
+
config I2C_MLXCPLD
tristate "Mellanox I2C driver"
depends on X86_64
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 80c23895eaaf..3ab8aebc39c9 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -140,6 +140,7 @@ obj-$(CONFIG_I2C_BCM_KONA) += i2c-bcm-kona.o
obj-$(CONFIG_I2C_BRCMSTB) += i2c-brcmstb.o
obj-$(CONFIG_I2C_CROS_EC_TUNNEL) += i2c-cros-ec-tunnel.o
obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o
+obj-$(CONFIG_I2C_ICY) += i2c-icy.o
obj-$(CONFIG_I2C_MLXCPLD) += i2c-mlxcpld.o
obj-$(CONFIG_I2C_OPAL) += i2c-opal.o
obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index ff3142b15cab..0214daa913ff 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -77,6 +77,40 @@
MST_STATUS_IP)
#define MST_TX_BYTES_XFRD 0x50
#define MST_RX_BYTES_XFRD 0x54
+#define SLV_ADDR_DEC_CTL 0x58
+#define SLV_ADDR_DEC_GCE BIT(0) /* ACK to General Call Address from own master (loopback) */
+#define SLV_ADDR_DEC_OGCE BIT(1) /* ACK to General Call Address from external masters */
+#define SLV_ADDR_DEC_SA1E BIT(2) /* ACK to addr_1 enabled */
+#define SLV_ADDR_DEC_SA1M BIT(3) /* 10-bit addressing for addr_1 enabled */
+#define SLV_ADDR_DEC_SA2E BIT(4) /* ACK to addr_2 enabled */
+#define SLV_ADDR_DEC_SA2M BIT(5) /* 10-bit addressing for addr_2 enabled */
+#define SLV_ADDR_1 0x5c
+#define SLV_ADDR_2 0x60
+#define SLV_RX_CTL 0x64
+#define SLV_RX_ACSA1 BIT(0) /* Generate ACK for writes to addr_1 */
+#define SLV_RX_ACSA2 BIT(1) /* Generate ACK for writes to addr_2 */
+#define SLV_RX_ACGCA BIT(2) /* ACK data phase transfers to General Call Address */
+#define SLV_DATA 0x68
+#define SLV_RX_FIFO 0x6c
+#define SLV_FIFO_DV1 BIT(0) /* Data Valid for addr_1 */
+#define SLV_FIFO_DV2 BIT(1) /* Data Valid for addr_2 */
+#define SLV_FIFO_AS BIT(2) /* (N)ACK Sent */
+#define SLV_FIFO_TNAK BIT(3) /* Timeout NACK */
+#define SLV_FIFO_STRC BIT(4) /* First byte after start condition received */
+#define SLV_FIFO_RSC BIT(5) /* Repeated Start Condition */
+#define SLV_FIFO_STPC BIT(6) /* Stop Condition */
+#define SLV_FIFO_DV (SLV_FIFO_DV1 | SLV_FIFO_DV2)
+#define SLV_INT_ENABLE 0x70
+#define SLV_INT_STATUS 0x74
+#define SLV_STATUS_RFH BIT(0) /* FIFO service */
+#define SLV_STATUS_WTC BIT(1) /* Write transfer complete */
+#define SLV_STATUS_SRS1 BIT(2) /* Slave read from addr 1 */
+#define SLV_STATUS_SRRS1 BIT(3) /* Repeated start from addr 1 */
+#define SLV_STATUS_SRND1 BIT(4) /* Read request not following start condition */
+#define SLV_STATUS_SRC1 BIT(5) /* Read canceled */
+#define SLV_STATUS_SRAT1 BIT(6) /* Slave Read timed out */
+#define SLV_STATUS_SRDRE1 BIT(7) /* Data written after timed out */
+#define SLV_READ_DUMMY 0x78
#define SCL_HIGH_PERIOD 0x80
#define SCL_LOW_PERIOD 0x84
#define SPIKE_FLTR_LEN 0x88
@@ -111,6 +145,8 @@ struct axxia_i2c_dev {
struct clk *i2c_clk;
u32 bus_clk_rate;
bool last;
+ struct i2c_client *slave;
+ int irq;
};
static void i2c_int_disable(struct axxia_i2c_dev *idev, u32 mask)
@@ -276,13 +312,65 @@ static int axxia_i2c_fill_tx_fifo(struct axxia_i2c_dev *idev)
return ret;
}
+static void axxia_i2c_slv_fifo_event(struct axxia_i2c_dev *idev)
+{
+ u32 fifo_status = readl(idev->base + SLV_RX_FIFO);
+ u8 val;
+
+ dev_dbg(idev->dev, "slave irq fifo_status=0x%x\n", fifo_status);
+
+ if (fifo_status & SLV_FIFO_DV1) {
+ if (fifo_status & SLV_FIFO_STRC)
+ i2c_slave_event(idev->slave,
+ I2C_SLAVE_WRITE_REQUESTED, &val);
+
+ val = readl(idev->base + SLV_DATA);
+ i2c_slave_event(idev->slave, I2C_SLAVE_WRITE_RECEIVED, &val);
+ }
+ if (fifo_status & SLV_FIFO_STPC) {
+ readl(idev->base + SLV_DATA); /* dummy read */
+ i2c_slave_event(idev->slave, I2C_SLAVE_STOP, &val);
+ }
+ if (fifo_status & SLV_FIFO_RSC)
+ readl(idev->base + SLV_DATA); /* dummy read */
+}
+
+static irqreturn_t axxia_i2c_slv_isr(struct axxia_i2c_dev *idev)
+{
+ u32 status = readl(idev->base + SLV_INT_STATUS);
+ u8 val;
+
+ dev_dbg(idev->dev, "slave irq status=0x%x\n", status);
+
+ if (status & SLV_STATUS_RFH)
+ axxia_i2c_slv_fifo_event(idev);
+ if (status & SLV_STATUS_SRS1) {
+ i2c_slave_event(idev->slave, I2C_SLAVE_READ_REQUESTED, &val);
+ writel(val, idev->base + SLV_DATA);
+ }
+ if (status & SLV_STATUS_SRND1) {
+ i2c_slave_event(idev->slave, I2C_SLAVE_READ_PROCESSED, &val);
+ writel(val, idev->base + SLV_DATA);
+ }
+ if (status & SLV_STATUS_SRC1)
+ i2c_slave_event(idev->slave, I2C_SLAVE_STOP, &val);
+
+ writel(INT_SLV, idev->base + INTERRUPT_STATUS);
+ return IRQ_HANDLED;
+}
+
static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
{
struct axxia_i2c_dev *idev = _dev;
+ irqreturn_t ret = IRQ_NONE;
u32 status;
- if (!(readl(idev->base + INTERRUPT_STATUS) & INT_MST))
- return IRQ_NONE;
+ status = readl(idev->base + INTERRUPT_STATUS);
+
+ if (status & INT_SLV)
+ ret = axxia_i2c_slv_isr(idev);
+ if (!(status & INT_MST))
+ return ret;
/* Read interrupt status bits */
status = readl(idev->base + MST_INT_STATUS);
@@ -583,9 +671,58 @@ static u32 axxia_i2c_func(struct i2c_adapter *adap)
return caps;
}
+static int axxia_i2c_reg_slave(struct i2c_client *slave)
+{
+ struct axxia_i2c_dev *idev = i2c_get_adapdata(slave->adapter);
+ u32 slv_int_mask = SLV_STATUS_RFH;
+ u32 dec_ctl;
+
+ if (idev->slave)
+ return -EBUSY;
+
+ idev->slave = slave;
+
+ /* Enable slave mode as well */
+ writel(GLOBAL_MST_EN | GLOBAL_SLV_EN, idev->base + GLOBAL_CONTROL);
+ writel(INT_MST | INT_SLV, idev->base + INTERRUPT_ENABLE);
+
+ /* Set slave address */
+ dec_ctl = SLV_ADDR_DEC_SA1E;
+ if (slave->flags & I2C_CLIENT_TEN)
+ dec_ctl |= SLV_ADDR_DEC_SA1M;
+
+ writel(SLV_RX_ACSA1, idev->base + SLV_RX_CTL);
+ writel(dec_ctl, idev->base + SLV_ADDR_DEC_CTL);
+ writel(slave->addr, idev->base + SLV_ADDR_1);
+
+ /* Enable interrupts */
+ slv_int_mask |= SLV_STATUS_SRS1 | SLV_STATUS_SRRS1 | SLV_STATUS_SRND1;
+ slv_int_mask |= SLV_STATUS_SRC1;
+ writel(slv_int_mask, idev->base + SLV_INT_ENABLE);
+
+ return 0;
+}
+
+static int axxia_i2c_unreg_slave(struct i2c_client *slave)
+{
+ struct axxia_i2c_dev *idev = i2c_get_adapdata(slave->adapter);
+
+ /* Disable slave mode */
+ writel(GLOBAL_MST_EN, idev->base + GLOBAL_CONTROL);
+ writel(INT_MST, idev->base + INTERRUPT_ENABLE);
+
+ synchronize_irq(idev->irq);
+
+ idev->slave = NULL;
+
+ return 0;
+}
+
static const struct i2c_algorithm axxia_i2c_algo = {
.master_xfer = axxia_i2c_xfer,
.functionality = axxia_i2c_func,
+ .reg_slave = axxia_i2c_reg_slave,
+ .unreg_slave = axxia_i2c_unreg_slave,
};
static const struct i2c_adapter_quirks axxia_i2c_quirks = {
@@ -599,7 +736,6 @@ static int axxia_i2c_probe(struct platform_device *pdev)
struct axxia_i2c_dev *idev = NULL;
struct resource *res;
void __iomem *base;
- int irq;
int ret = 0;
idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL);
@@ -611,10 +747,10 @@ static int axxia_i2c_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
+ idev->irq = platform_get_irq(pdev, 0);
+ if (idev->irq < 0) {
dev_err(&pdev->dev, "missing interrupt resource\n");
- return irq;
+ return idev->irq;
}
idev->i2c_clk = devm_clk_get(&pdev->dev, "i2c");
@@ -643,10 +779,10 @@ static int axxia_i2c_probe(struct platform_device *pdev)
goto error_disable_clk;
}
- ret = devm_request_irq(&pdev->dev, irq, axxia_i2c_isr, 0,
+ ret = devm_request_irq(&pdev->dev, idev->irq, axxia_i2c_isr, 0,
pdev->name, idev);
if (ret) {
- dev_err(&pdev->dev, "failed to claim IRQ%d\n", irq);
+ dev_err(&pdev->dev, "failed to claim IRQ%d\n", idev->irq);
goto error_disable_clk;
}
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 19ef2b0c682a..9ffdffaf6141 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -808,7 +808,7 @@ static struct i2c_algorithm bcm_iproc_algo = {
.unreg_slave = bcm_iproc_i2c_unreg_slave,
};
-static struct i2c_adapter_quirks bcm_iproc_i2c_quirks = {
+static const struct i2c_adapter_quirks bcm_iproc_i2c_quirks = {
.max_read_len = M_RX_MAX_READ_LEN,
};
@@ -922,7 +922,9 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
adap = &iproc_i2c->adapter;
i2c_set_adapdata(adap, iproc_i2c);
- strlcpy(adap->name, "Broadcom iProc I2C adapter", sizeof(adap->name));
+ snprintf(adap->name, sizeof(adap->name),
+ "Broadcom iProc (%s)",
+ of_node_full_name(iproc_i2c->device->of_node));
adap->algo = &bcm_iproc_algo;
adap->quirks = &bcm_iproc_i2c_quirks;
adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index 67752f7b0371..e01b2b57e724 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -389,7 +390,7 @@ static const struct i2c_algorithm bcm2835_i2c_algo = {
};
/*
- * This HW was reported to have problems with clock stretching:
+ * The BCM2835 was reported to have problems with clock stretching:
* http://www.advamation.com/knowhow/raspberrypi/rpi-i2c-bug.html
* https://www.raspberrypi.org/forums/viewtopic.php?p=146272
*/
@@ -471,11 +472,12 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, i2c_dev);
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DEPRECATED;
- strlcpy(adap->name, "bcm2835 I2C adapter", sizeof(adap->name));
+ snprintf(adap->name, sizeof(adap->name), "bcm2835 (%s)",
+ of_node_full_name(pdev->dev.of_node));
adap->algo = &bcm2835_i2c_algo;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
- adap->quirks = &bcm2835_i2c_quirks;
+ adap->quirks = of_device_get_match_data(&pdev->dev);
bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0);
@@ -501,7 +503,8 @@ static int bcm2835_i2c_remove(struct platform_device *pdev)
}
static const struct of_device_id bcm2835_i2c_of_match[] = {
- { .compatible = "brcm,bcm2835-i2c" },
+ { .compatible = "brcm,bcm2711-i2c" },
+ { .compatible = "brcm,bcm2835-i2c", .data = &bcm2835_i2c_quirks },
{},
};
MODULE_DEVICE_TABLE(of, bcm2835_i2c_of_match);
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 66af44bfa67d..b8fde61bb5d8 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -178,6 +178,51 @@ static const struct i2c_algorithm cht_wc_i2c_adap_algo = {
.smbus_xfer = cht_wc_i2c_adap_smbus_xfer,
};
+/*
+ * We are an i2c-adapter which itself is part of an i2c-client. This means that
+ * transfers done through us take adapter->bus_lock twice, once for our parent
+ * i2c-adapter and once to take our own bus_lock. Lockdep does not like this
+ * nested locking, to make lockdep happy in the case of busses with muxes, the
+ * i2c-core's i2c_adapter_lock_bus function calls:
+ * rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
+ *
+ * But i2c_adapter_depth only works when the direct parent of the adapter is
+ * another adapter, as it is only meant for muxes. In our case there is an
+ * i2c-client and MFD instantiated platform_device in the parent->child chain
+ * between the 2 devices.
+ *
+ * So we override the default i2c_lock_operations and pass a hardcoded
+ * depth of 1 to rt_mutex_lock_nested, to make lockdep happy.
+ *
+ * Note that if there were to be a mux attached to our adapter, this would
+ * break things again since the i2c-mux code expects the root-adapter to have
+ * a locking depth of 0. But we always have only 1 client directly attached
+ * in the form of the Charger IC paired with the CHT Whiskey Cove PMIC.
+ */
+static void cht_wc_i2c_adap_lock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
+{
+ rt_mutex_lock_nested(&adapter->bus_lock, 1);
+}
+
+static int cht_wc_i2c_adap_trylock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
+{
+ return rt_mutex_trylock(&adapter->bus_lock);
+}
+
+static void cht_wc_i2c_adap_unlock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
+{
+ rt_mutex_unlock(&adapter->bus_lock);
+}
+
+static const struct i2c_lock_operations cht_wc_i2c_adap_lock_ops = {
+ .lock_bus = cht_wc_i2c_adap_lock_bus,
+ .trylock_bus = cht_wc_i2c_adap_trylock_bus,
+ .unlock_bus = cht_wc_i2c_adap_unlock_bus,
+};
+
/**** irqchip for the client connected to the extchgr i2c adapter ****/
static void cht_wc_i2c_irq_lock(struct irq_data *data)
{
@@ -286,6 +331,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
adap->adapter.owner = THIS_MODULE;
adap->adapter.class = I2C_CLASS_HWMON;
adap->adapter.algo = &cht_wc_i2c_adap_algo;
+ adap->adapter.lock_ops = &cht_wc_i2c_adap_lock_ops;
strlcpy(adap->adapter.name, "PMIC I2C Adapter",
sizeof(adap->adapter.name));
adap->adapter.dev.parent = &pdev->dev;
@@ -363,8 +409,7 @@ static int cht_wc_i2c_adap_i2c_remove(struct platform_device *pdev)
{
struct cht_wc_i2c_adap *adap = platform_get_drvdata(pdev);
- if (adap->client)
- i2c_unregister_device(adap->client);
+ i2c_unregister_device(adap->client);
i2c_del_adapter(&adap->adapter);
irq_domain_remove(adap->irq_domain);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index d464799e40a3..e8b328242256 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -655,15 +655,11 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
struct i2c_adapter *adap = &dev->adapter;
struct gpio_desc *gpio;
- int r;
-
- gpio = devm_gpiod_get(dev->dev, "scl", GPIOD_OUT_HIGH);
- if (IS_ERR(gpio)) {
- r = PTR_ERR(gpio);
- if (r == -ENOENT || r == -ENOSYS)
- return 0;
- return r;
- }
+
+ gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH);
+ if (IS_ERR_OR_NULL(gpio))
+ return PTR_ERR_OR_ZERO(gpio);
+
rinfo->scl_gpiod = gpio;
gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN);
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 76810deb2de6..050adda7c1bd 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -33,6 +33,7 @@ enum dw_pci_ctl_id_t {
baytrail,
cherrytrail,
haswell,
+ elkhartlake,
};
struct dw_scl_sda_cfg {
@@ -168,13 +169,20 @@ static struct dw_pci_controller dw_pci_controllers[] = {
.flags = MODEL_CHERRYTRAIL,
.scl_sda_cfg = &byt_config,
},
+ [elkhartlake] = {
+ .bus_num = -1,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 32,
+ .rx_fifo_depth = 32,
+ .functionality = I2C_FUNC_10BIT_ADDR,
+ .clk_khz = 100000,
+ },
};
#ifdef CONFIG_PM
static int i2c_dw_pci_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct dw_i2c_dev *i_dev = pci_get_drvdata(pdev);
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
i_dev->suspended = true;
i_dev->disable(i_dev);
@@ -184,8 +192,7 @@ static int i2c_dw_pci_suspend(struct device *dev)
static int i2c_dw_pci_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct dw_i2c_dev *i_dev = pci_get_drvdata(pdev);
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
int ret;
ret = i_dev->init(i_dev);
@@ -227,6 +234,8 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
return r;
}
+ pci_set_master(pdev);
+
r = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
if (r) {
dev_err(&pdev->dev, "I/O memory remapping failed\n");
@@ -237,18 +246,24 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
if (!dev)
return -ENOMEM;
+ r = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (r < 0)
+ return r;
+
dev->clk = NULL;
dev->controller = controller;
dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
dev->base = pcim_iomap_table(pdev)[0];
dev->dev = &pdev->dev;
- dev->irq = pdev->irq;
+ dev->irq = pci_irq_vector(pdev, 0);
dev->flags |= controller->flags;
if (controller->setup) {
r = controller->setup(pdev, controller);
- if (r)
+ if (r) {
+ pci_free_irq_vectors(pdev);
return r;
+ }
}
dev->functionality = controller->functionality |
@@ -276,8 +291,10 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
adap->nr = controller->bus_num;
r = i2c_dw_probe(dev);
- if (r)
+ if (r) {
+ pci_free_irq_vectors(pdev);
return r;
+ }
pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -296,6 +313,7 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
pm_runtime_get_noresume(&pdev->dev);
i2c_del_adapter(&dev->adapter);
+ pci_free_irq_vectors(pdev);
}
/* work with hotplug and coldplug */
@@ -331,6 +349,15 @@ static const struct pci_device_id i2_designware_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x22C5), cherrytrail },
{ PCI_VDEVICE(INTEL, 0x22C6), cherrytrail },
{ PCI_VDEVICE(INTEL, 0x22C7), cherrytrail },
+ /* Elkhart Lake (PSE I2C) */
+ { PCI_VDEVICE(INTEL, 0x4bb9), elkhartlake },
+ { PCI_VDEVICE(INTEL, 0x4bba), elkhartlake },
+ { PCI_VDEVICE(INTEL, 0x4bbb), elkhartlake },
+ { PCI_VDEVICE(INTEL, 0x4bbc), elkhartlake },
+ { PCI_VDEVICE(INTEL, 0x4bbd), elkhartlake },
+ { PCI_VDEVICE(INTEL, 0x4bbe), elkhartlake },
+ { PCI_VDEVICE(INTEL, 0x4bbf), elkhartlake },
+ { PCI_VDEVICE(INTEL, 0x4bc0), elkhartlake },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index ddfb81872906..16dd338877d0 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -279,12 +279,10 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
- if (IS_ERR(dev->rst)) {
- if (PTR_ERR(dev->rst) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- } else {
- reset_control_deassert(dev->rst);
- }
+ if (IS_ERR(dev->rst))
+ return PTR_ERR(dev->rst);
+
+ reset_control_deassert(dev->rst);
t = &dev->timings;
if (pdata)
@@ -346,8 +344,10 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
/* Optional interface clock */
dev->pclk = devm_clk_get_optional(&pdev->dev, "pclk");
- if (IS_ERR(dev->pclk))
- return PTR_ERR(dev->pclk);
+ if (IS_ERR(dev->pclk)) {
+ ret = PTR_ERR(dev->pclk);
+ goto exit_reset;
+ }
dev->clk = devm_clk_get(&pdev->dev, NULL);
if (!i2c_dw_prepare_clk(dev, true)) {
@@ -400,8 +400,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
exit_probe:
dw_i2c_plat_pm_cleanup(dev);
exit_reset:
- if (!IS_ERR_OR_NULL(dev->rst))
- reset_control_assert(dev->rst);
+ reset_control_assert(dev->rst);
return ret;
}
@@ -419,8 +418,7 @@ static int dw_i2c_plat_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
dw_i2c_plat_pm_cleanup(dev);
- if (!IS_ERR_OR_NULL(dev->rst))
- reset_control_assert(dev->rst);
+ reset_control_assert(dev->rst);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index e4e7932f7800..e7514c16b756 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -791,9 +791,7 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
}
ret = devm_request_irq(&pdev->dev, i2c->irq, exynos5_i2c_irq,
- IRQF_NO_SUSPEND | IRQF_ONESHOT,
- dev_name(&pdev->dev), i2c);
-
+ IRQF_NO_SUSPEND, dev_name(&pdev->dev), i2c);
if (ret != 0) {
dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", i2c->irq);
goto err_clk;
diff --git a/drivers/i2c/busses/i2c-fsi.c b/drivers/i2c/busses/i2c-fsi.c
index da5eb3960def..e0c256922d4f 100644
--- a/drivers/i2c/busses/i2c-fsi.c
+++ b/drivers/i2c/busses/i2c-fsi.c
@@ -707,8 +707,10 @@ static int fsi_i2c_probe(struct device *dev)
continue;
port = kzalloc(sizeof(*port), GFP_KERNEL);
- if (!port)
+ if (!port) {
+ of_node_put(np);
break;
+ }
port->master = i2c;
port->port = port_no;
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 4df1434b3597..8497c7a95dd4 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -445,8 +445,7 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
hix5hd2_i2c_init(priv);
ret = devm_request_irq(&pdev->dev, irq, hix5hd2_i2c_irq,
- IRQF_NO_SUSPEND | IRQF_ONESHOT,
- dev_name(&pdev->dev), priv);
+ IRQF_NO_SUSPEND, dev_name(&pdev->dev), priv);
if (ret != 0) {
dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", irq);
goto err_clk;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 36e9559f880c..c09791fb4929 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -292,7 +292,8 @@ struct i801_priv {
#define FEATURE_HOST_NOTIFY BIT(5)
/* Not really a feature, but it's convenient to handle it as such */
#define FEATURE_IDF BIT(15)
-#define FEATURE_TCO BIT(16)
+#define FEATURE_TCO_SPT BIT(16)
+#define FEATURE_TCO_CNL BIT(17)
static const char *i801_feature_names[] = {
"SMBus PEC",
@@ -1500,57 +1501,23 @@ static inline unsigned int i801_get_adapter_class(struct i801_priv *priv)
}
#endif
-static const struct itco_wdt_platform_data tco_platform_data = {
+static const struct itco_wdt_platform_data spt_tco_platform_data = {
.name = "Intel PCH",
.version = 4,
};
static DEFINE_SPINLOCK(p2sb_spinlock);
-static void i801_add_tco(struct i801_priv *priv)
+static struct platform_device *
+i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
+ struct resource *tco_res)
{
- struct pci_dev *pci_dev = priv->pci_dev;
- struct resource tco_res[3], *res;
- struct platform_device *pdev;
+ struct resource *res;
unsigned int devfn;
- u32 tco_base, tco_ctl;
- u32 base_addr, ctrl_val;
u64 base64_addr;
+ u32 base_addr;
u8 hidden;
- if (!(priv->features & FEATURE_TCO))
- return;
-
- pci_read_config_dword(pci_dev, TCOBASE, &tco_base);
- pci_read_config_dword(pci_dev, TCOCTL, &tco_ctl);
- if (!(tco_ctl & TCOCTL_EN))
- return;
-
- memset(tco_res, 0, sizeof(tco_res));
-
- res = &tco_res[ICH_RES_IO_TCO];
- res->start = tco_base & ~1;
- res->end = res->start + 32 - 1;
- res->flags = IORESOURCE_IO;
-
- /*
- * Power Management registers.
- */
- devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 2);
- pci_bus_read_config_dword(pci_dev->bus, devfn, ACPIBASE, &base_addr);
-
- res = &tco_res[ICH_RES_IO_SMI];
- res->start = (base_addr & ~1) + ACPIBASE_SMI_OFF;
- res->end = res->start + 3;
- res->flags = IORESOURCE_IO;
-
- /*
- * Enable the ACPI I/O space.
- */
- pci_bus_read_config_dword(pci_dev->bus, devfn, ACPICTRL, &ctrl_val);
- ctrl_val |= ACPICTRL_EN;
- pci_bus_write_config_dword(pci_dev->bus, devfn, ACPICTRL, ctrl_val);
-
/*
* We must access the NO_REBOOT bit over the Primary to Sideband
* bridge (P2SB). The BIOS prevents the P2SB device from being
@@ -1586,15 +1553,76 @@ static void i801_add_tco(struct i801_priv *priv)
res->end = res->start + 3;
res->flags = IORESOURCE_MEM;
- pdev = platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
- tco_res, 3, &tco_platform_data,
- sizeof(tco_platform_data));
- if (IS_ERR(pdev)) {
- dev_warn(&pci_dev->dev, "failed to create iTCO device\n");
+ return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
+ tco_res, 3, &spt_tco_platform_data,
+ sizeof(spt_tco_platform_data));
+}
+
+static const struct itco_wdt_platform_data cnl_tco_platform_data = {
+ .name = "Intel PCH",
+ .version = 6,
+};
+
+static struct platform_device *
+i801_add_tco_cnl(struct i801_priv *priv, struct pci_dev *pci_dev,
+ struct resource *tco_res)
+{
+ return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
+ tco_res, 2, &cnl_tco_platform_data,
+ sizeof(cnl_tco_platform_data));
+}
+
+static void i801_add_tco(struct i801_priv *priv)
+{
+ u32 base_addr, tco_base, tco_ctl, ctrl_val;
+ struct pci_dev *pci_dev = priv->pci_dev;
+ struct resource tco_res[3], *res;
+ unsigned int devfn;
+
+ /* If we have ACPI based watchdog use that instead */
+ if (acpi_has_watchdog())
return;
- }
- priv->tco_pdev = pdev;
+ if (!(priv->features & (FEATURE_TCO_SPT | FEATURE_TCO_CNL)))
+ return;
+
+ pci_read_config_dword(pci_dev, TCOBASE, &tco_base);
+ pci_read_config_dword(pci_dev, TCOCTL, &tco_ctl);
+ if (!(tco_ctl & TCOCTL_EN))
+ return;
+
+ memset(tco_res, 0, sizeof(tco_res));
+
+ res = &tco_res[ICH_RES_IO_TCO];
+ res->start = tco_base & ~1;
+ res->end = res->start + 32 - 1;
+ res->flags = IORESOURCE_IO;
+
+ /*
+ * Power Management registers.
+ */
+ devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 2);
+ pci_bus_read_config_dword(pci_dev->bus, devfn, ACPIBASE, &base_addr);
+
+ res = &tco_res[ICH_RES_IO_SMI];
+ res->start = (base_addr & ~1) + ACPIBASE_SMI_OFF;
+ res->end = res->start + 3;
+ res->flags = IORESOURCE_IO;
+
+ /*
+ * Enable the ACPI I/O space.
+ */
+ pci_bus_read_config_dword(pci_dev->bus, devfn, ACPICTRL, &ctrl_val);
+ ctrl_val |= ACPICTRL_EN;
+ pci_bus_write_config_dword(pci_dev->bus, devfn, ACPICTRL, ctrl_val);
+
+ if (priv->features & FEATURE_TCO_CNL)
+ priv->tco_pdev = i801_add_tco_cnl(priv, pci_dev, tco_res);
+ else
+ priv->tco_pdev = i801_add_tco_spt(priv, pci_dev, tco_res);
+
+ if (IS_ERR(priv->tco_pdev))
+ dev_warn(&pci_dev->dev, "failed to create iTCO device\n");
}
#ifdef CONFIG_ACPI
@@ -1704,13 +1732,21 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
switch (dev->device) {
case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
- case PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS:
- case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS:
case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
- case PCI_DEVICE_ID_INTEL_CDF_SMBUS:
case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
+ priv->features |= FEATURE_I2C_BLOCK_READ;
+ priv->features |= FEATURE_IRQ;
+ priv->features |= FEATURE_SMBUS_PEC;
+ priv->features |= FEATURE_BLOCK_BUFFER;
+ priv->features |= FEATURE_TCO_SPT;
+ priv->features |= FEATURE_HOST_NOTIFY;
+ break;
+
+ case PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS:
+ case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS:
+ case PCI_DEVICE_ID_INTEL_CDF_SMBUS:
case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS:
case PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS:
@@ -1720,9 +1756,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
priv->features |= FEATURE_IRQ;
priv->features |= FEATURE_SMBUS_PEC;
priv->features |= FEATURE_BLOCK_BUFFER;
- /* If we have ACPI based watchdog use that instead */
- if (!acpi_has_watchdog())
- priv->features |= FEATURE_TCO;
+ priv->features |= FEATURE_TCO_CNL;
priv->features |= FEATURE_HOST_NOTIFY;
break;
@@ -1921,8 +1955,7 @@ static int i801_suspend(struct device *dev)
static int i801_resume(struct device *dev)
{
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct i801_priv *priv = pci_get_drvdata(pci_dev);
+ struct i801_priv *priv = dev_get_drvdata(dev);
i801_enable_host_notify(&priv->adapter);
diff --git a/drivers/i2c/busses/i2c-icy.c b/drivers/i2c/busses/i2c-icy.c
new file mode 100644
index 000000000000..8382eb64b424
--- /dev/null
+++ b/drivers/i2c/busses/i2c-icy.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * I2C driver for stand-alone PCF8584 style adapters on Zorro cards
+ *
+ * Original ICY documentation can be found on Aminet:
+ * https://aminet.net/package/docs/hard/icy
+ *
+ * There has been a modern community re-print of this design in 2019:
+ * https://www.a1k.org/forum/index.php?threads/70106/
+ *
+ * The card is basically a Philips PCF8584 connected straight to the
+ * beginning of the AutoConfig'd address space (register S1 on base+2),
+ * with /INT on /INT2 on the Zorro bus.
+ *
+ * Copyright (c) 2019 Max Staudt <[email protected]>
+ *
+ * This started as a fork of i2c-elektor.c and has evolved since.
+ * Thanks go to its authors for providing a base to grow on.
+ *
+ *
+ * IRQ support is currently not implemented.
+ *
+ * As it turns out, i2c-algo-pcf is really written with i2c-elektor's
+ * edge-triggered ISA interrupts in mind, while the Amiga's Zorro bus has
+ * level-triggered interrupts. This means that once an interrupt occurs, we
+ * have to tell the PCF8584 to shut up immediately, or it will keep the
+ * interrupt line busy and cause an IRQ storm.
+
+ * However, because of the PCF8584's host-side protocol, there is no good
+ * way to just quieten it without side effects. Rather, we have to perform
+ * the next read/write operation straight away, which will reset the /INT
+ * pin. This entails re-designing the core of i2c-algo-pcf in the future.
+ * For now, we never request an IRQ from the PCF8584, and poll it instead.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-pcf.h>
+
+#include <asm/amigaints.h>
+#include <linux/zorro.h>
+
+#include "../algos/i2c-algo-pcf.h"
+
+struct icy_i2c {
+ struct i2c_adapter adapter;
+
+ void __iomem *reg_s0;
+ void __iomem *reg_s1;
+ struct fwnode_handle *ltc2990_fwnode;
+ struct i2c_client *ltc2990_client;
+};
+
+/*
+ * Functions called by i2c-algo-pcf
+ */
+static void icy_pcf_setpcf(void *data, int ctl, int val)
+{
+ struct icy_i2c *i2c = (struct icy_i2c *)data;
+
+ u8 __iomem *address = ctl ? i2c->reg_s1 : i2c->reg_s0;
+
+ z_writeb(val, address);
+}
+
+static int icy_pcf_getpcf(void *data, int ctl)
+{
+ struct icy_i2c *i2c = (struct icy_i2c *)data;
+
+ u8 __iomem *address = ctl ? i2c->reg_s1 : i2c->reg_s0;
+
+ return z_readb(address);
+}
+
+static int icy_pcf_getown(void *data)
+{
+ return 0x55;
+}
+
+static int icy_pcf_getclock(void *data)
+{
+ return 0x1c;
+}
+
+static void icy_pcf_waitforpin(void *data)
+{
+ usleep_range(50, 150);
+}
+
+/*
+ * Main i2c-icy part
+ */
+static unsigned short const icy_ltc2990_addresses[] = {
+ 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END
+};
+
+/*
+ * Additional sensors exposed once this property is applied:
+ *
+ * in1 will be the voltage of the 5V rail, divided by 2.
+ * in2 will be the voltage of the 12V rail, divided by 4.
+ * temp3 will be measured using a PCB loop next the chip.
+ */
+static const u32 icy_ltc2990_meas_mode[] = {0, 3};
+
+static const struct property_entry icy_ltc2990_props[] = {
+ PROPERTY_ENTRY_U32_ARRAY("lltc,meas-mode", icy_ltc2990_meas_mode),
+ { }
+};
+
+static int icy_probe(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ struct icy_i2c *i2c;
+ struct i2c_algo_pcf_data *algo_data;
+ struct fwnode_handle *new_fwnode;
+ struct i2c_board_info ltc2990_info = {
+ .type = "ltc2990",
+ .addr = 0x4c,
+ };
+
+ i2c = devm_kzalloc(&z->dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ algo_data = devm_kzalloc(&z->dev, sizeof(*algo_data), GFP_KERNEL);
+ if (!algo_data)
+ return -ENOMEM;
+
+ dev_set_drvdata(&z->dev, i2c);
+ i2c->adapter.dev.parent = &z->dev;
+ i2c->adapter.owner = THIS_MODULE;
+ /* i2c->adapter.algo assigned by i2c_pcf_add_bus() */
+ i2c->adapter.algo_data = algo_data;
+ strlcpy(i2c->adapter.name, "ICY I2C Zorro adapter",
+ sizeof(i2c->adapter.name));
+
+ if (!devm_request_mem_region(&z->dev,
+ z->resource.start,
+ 4, i2c->adapter.name))
+ return -ENXIO;
+
+ /* Driver private data */
+ i2c->reg_s0 = ZTWO_VADDR(z->resource.start);
+ i2c->reg_s1 = ZTWO_VADDR(z->resource.start + 2);
+
+ algo_data->data = i2c;
+ algo_data->setpcf = icy_pcf_setpcf;
+ algo_data->getpcf = icy_pcf_getpcf;
+ algo_data->getown = icy_pcf_getown;
+ algo_data->getclock = icy_pcf_getclock;
+ algo_data->waitforpin = icy_pcf_waitforpin;
+
+ if (i2c_pcf_add_bus(&i2c->adapter)) {
+ dev_err(&z->dev, "i2c_pcf_add_bus() failed\n");
+ return -ENXIO;
+ }
+
+ dev_info(&z->dev, "ICY I2C controller at %pa, IRQ not implemented\n",
+ &z->resource.start);
+
+ /*
+ * The 2019 a1k.org PCBs have an LTC2990 at 0x4c, so start
+ * it automatically once ltc2990 is modprobed.
+ *
+ * in0 is the voltage of the internal 5V power supply.
+ * temp1 is the temperature inside the chip.
+ *
+ * See property_entry above for in1, in2, temp3.
+ */
+ new_fwnode = fwnode_create_software_node(icy_ltc2990_props, NULL);
+ if (IS_ERR(new_fwnode)) {
+ dev_info(&z->dev, "Failed to create fwnode for LTC2990, error: %ld\n",
+ PTR_ERR(new_fwnode));
+ } else {
+ /*
+ * Store the fwnode so we can destroy it on .remove().
+ * Only store it on success, as fwnode_remove_software_node()
+ * is NULL safe, but not PTR_ERR safe.
+ */
+ i2c->ltc2990_fwnode = new_fwnode;
+ ltc2990_info.fwnode = new_fwnode;
+
+ i2c->ltc2990_client =
+ i2c_new_probed_device(&i2c->adapter,
+ &ltc2990_info,
+ icy_ltc2990_addresses,
+ NULL);
+ }
+
+ return 0;
+}
+
+static void icy_remove(struct zorro_dev *z)
+{
+ struct icy_i2c *i2c = dev_get_drvdata(&z->dev);
+
+ i2c_unregister_device(i2c->ltc2990_client);
+ fwnode_remove_software_node(i2c->ltc2990_fwnode);
+
+ i2c_del_adapter(&i2c->adapter);
+}
+
+static const struct zorro_device_id icy_zorro_tbl[] = {
+ { ZORRO_ID(VMC, 15, 0), },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(zorro, icy_zorro_tbl);
+
+static struct zorro_driver icy_driver = {
+ .name = "i2c-icy",
+ .id_table = icy_zorro_tbl,
+ .probe = icy_probe,
+ .remove = icy_remove,
+};
+
+module_driver(icy_driver,
+ zorro_register_driver,
+ zorro_unregister_driver);
+
+MODULE_AUTHOR("Max Staudt <[email protected]>");
+MODULE_DESCRIPTION("I2C bus via PCF8584 on ICY Zorro card");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index dc00fabc919a..c92b56485fa6 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -545,7 +545,6 @@ MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
static int lpi2c_imx_probe(struct platform_device *pdev)
{
struct lpi2c_imx_struct *lpi2c_imx;
- struct resource *res;
unsigned int temp;
int irq, ret;
@@ -553,8 +552,7 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
if (!lpi2c_imx)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lpi2c_imx->base = devm_ioremap_resource(&pdev->dev, res);
+ lpi2c_imx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpi2c_imx->base))
return PTR_ERR(lpi2c_imx->base);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 15f6cde6452f..a3b61336fe55 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -20,6 +20,7 @@
*
*/
+#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
@@ -255,6 +256,12 @@ static const struct of_device_id i2c_imx_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, i2c_imx_dt_ids);
+static const struct acpi_device_id i2c_imx_acpi_ids[] = {
+ {"NXP0001", .driver_data = (kernel_ulong_t)&vf610_i2c_hwdata},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, i2c_imx_acpi_ids);
+
static inline int is_imx1_i2c(struct imx_i2c_struct *i2c_imx)
{
return i2c_imx->hwdata->devtype == IMX1_I2C;
@@ -1048,14 +1055,13 @@ static const struct i2c_algorithm i2c_imx_algo = {
static int i2c_imx_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id = of_match_device(i2c_imx_dt_ids,
- &pdev->dev);
struct imx_i2c_struct *i2c_imx;
struct resource *res;
struct imxi2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
int irq, ret;
dma_addr_t phy_addr;
+ const struct imx_i2c_hwdata *match;
dev_dbg(&pdev->dev, "<%s>\n", __func__);
@@ -1075,8 +1081,9 @@ static int i2c_imx_probe(struct platform_device *pdev)
if (!i2c_imx)
return -ENOMEM;
- if (of_id)
- i2c_imx->hwdata = of_id->data;
+ match = device_get_match_data(&pdev->dev);
+ if (match)
+ i2c_imx->hwdata = match;
else
i2c_imx->hwdata = (struct imx_i2c_hwdata *)
platform_get_device_id(pdev)->driver_data;
@@ -1089,6 +1096,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
i2c_imx->adapter.nr = pdev->id;
i2c_imx->adapter.dev.of_node = pdev->dev.of_node;
i2c_imx->base = base;
+ ACPI_COMPANION_SET(&i2c_imx->adapter.dev, ACPI_COMPANION(&pdev->dev));
/* Get I2C clock */
i2c_imx->clk = devm_clk_get(&pdev->dev, NULL);
@@ -1247,6 +1255,7 @@ static struct platform_driver i2c_imx_driver = {
.name = DRIVER_NAME,
.pm = &i2c_imx_pm_ops,
.of_match_table = i2c_imx_dt_ids,
+ .acpi_match_table = i2c_imx_acpi_ids,
},
.id_table = imx_i2c_devtype,
};
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 02d23edb2fb1..2f95e25a10f7 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -781,8 +781,6 @@ static int ismt_dev_init(struct ismt_priv *priv)
if (!priv->hw)
return -ENOMEM;
- memset(priv->hw, 0, (ISMT_DESC_ENTRIES * sizeof(struct ismt_desc)));
-
priv->head = 0;
init_completion(&priv->cmp);
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 7d79317a1046..89224913f578 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -802,7 +802,6 @@ static int mxs_i2c_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mxs_i2c_dev *i2c;
struct i2c_adapter *adap;
- struct resource *res;
int err, irq;
i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL);
@@ -814,8 +813,7 @@ static int mxs_i2c_probe(struct platform_device *pdev)
i2c->dev_type = device_id->driver_data;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- i2c->regs = devm_ioremap_resource(&pdev->dev, res);
+ i2c->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(i2c->regs))
return PTR_ERR(i2c->regs);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 4117f1abc7c6..ca8b3ecfa93d 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -703,8 +703,9 @@ static int ocores_i2c_probe(struct platform_device *pdev)
}
if (ocores_algorithm.master_xfer != ocores_xfer_polling) {
- ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
- pdev->name, i2c);
+ ret = devm_request_any_context_irq(&pdev->dev, irq,
+ ocores_isr, 0,
+ pdev->name, i2c);
if (ret) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
goto err_clk;
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index cba325eb852f..30ded6422e7b 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -72,7 +72,8 @@
#define PIIX4_BLOCK_DATA 0x14
/* Multi-port constants */
-#define PIIX4_MAX_ADAPTERS 4
+#define PIIX4_MAX_ADAPTERS 4
+#define HUDSON2_MAIN_PORTS 2 /* HUDSON2, KERNCZ reserves ports 3, 4 */
/* SB800 constants */
#define SB800_PIIX4_SMB_IDX 0xcd6
@@ -806,10 +807,12 @@ MODULE_DEVICE_TABLE (pci, piix4_ids);
static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS];
static struct i2c_adapter *piix4_aux_adapter;
+static int piix4_adapter_count;
static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
bool sb800_main, u8 port, bool notify_imc,
- const char *name, struct i2c_adapter **padap)
+ u8 hw_port_nr, const char *name,
+ struct i2c_adapter **padap)
{
struct i2c_adapter *adap;
struct i2c_piix4_adapdata *adapdata;
@@ -841,6 +844,12 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
/* set up the sysfs linkage to our parent device */
adap->dev.parent = &dev->dev;
+ if (has_acpi_companion(&dev->dev)) {
+ acpi_preset_companion(&adap->dev,
+ ACPI_COMPANION(&dev->dev),
+ hw_port_nr);
+ }
+
snprintf(adap->name, sizeof(adap->name),
"SMBus PIIX4 adapter%s at %04x", name, smba);
@@ -865,8 +874,19 @@ static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba,
int port;
int retval;
- for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) {
+ if (dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS ||
+ (dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
+ dev->revision >= 0x1F)) {
+ piix4_adapter_count = HUDSON2_MAIN_PORTS;
+ } else {
+ piix4_adapter_count = PIIX4_MAX_ADAPTERS;
+ }
+
+ for (port = 0; port < piix4_adapter_count; port++) {
+ u8 hw_port_nr = port == 0 ? 0 : port + 1;
+
retval = piix4_add_adapter(dev, smba, true, port, notify_imc,
+ hw_port_nr,
piix4_main_port_names_sb800[port],
&piix4_main_adapters[port]);
if (retval < 0)
@@ -937,8 +957,8 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
return retval;
/* Try to register main SMBus adapter, give up if we can't */
- retval = piix4_add_adapter(dev, retval, false, 0, false, "",
- &piix4_main_adapters[0]);
+ retval = piix4_add_adapter(dev, retval, false, 0, false, 0,
+ "", &piix4_main_adapters[0]);
if (retval < 0)
return retval;
}
@@ -964,7 +984,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (retval > 0) {
/* Try to add the aux adapter if it exists,
* piix4_add_adapter will clean up if this fails */
- piix4_add_adapter(dev, retval, false, 0, false,
+ piix4_add_adapter(dev, retval, false, 0, false, 1,
is_sb800 ? piix4_aux_port_name_sb800 : "",
&piix4_aux_adapter);
}
@@ -987,7 +1007,7 @@ static void piix4_adap_remove(struct i2c_adapter *adap)
static void piix4_remove(struct pci_dev *dev)
{
- int port = PIIX4_MAX_ADAPTERS;
+ int port = piix4_adapter_count;
while (--port >= 0) {
if (piix4_main_adapters[port]) {
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 961123529678..b432e7580458 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -465,9 +466,9 @@ static int sprd_i2c_clk_init(struct sprd_i2c *i2c_dev)
i2c_dev->clk = devm_clk_get(i2c_dev->dev, "enable");
if (IS_ERR(i2c_dev->clk)) {
- dev_warn(i2c_dev->dev, "i2c%d can't get the enable clock\n",
- i2c_dev->adap.nr);
- i2c_dev->clk = NULL;
+ dev_err(i2c_dev->dev, "i2c%d can't get the enable clock\n",
+ i2c_dev->adap.nr);
+ return PTR_ERR(i2c_dev->clk);
}
return 0;
@@ -477,7 +478,6 @@ static int sprd_i2c_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sprd_i2c *i2c_dev;
- struct resource *res;
u32 prop;
int ret;
@@ -487,8 +487,7 @@ static int sprd_i2c_probe(struct platform_device *pdev)
if (!i2c_dev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- i2c_dev->base = devm_ioremap_resource(dev, res);
+ i2c_dev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(i2c_dev->base))
return PTR_ERR(i2c_dev->base);
@@ -520,7 +519,10 @@ static int sprd_i2c_probe(struct platform_device *pdev)
if (i2c_dev->bus_freq != 100000 && i2c_dev->bus_freq != 400000)
return -EINVAL;
- sprd_i2c_clk_init(i2c_dev);
+ ret = sprd_i2c_clk_init(i2c_dev);
+ if (ret)
+ return ret;
+
platform_set_drvdata(pdev, i2c_dev);
ret = clk_prepare_enable(i2c_dev->clk);
@@ -644,8 +646,7 @@ static struct platform_driver sprd_i2c_driver = {
},
};
-static int sprd_i2c_init(void)
-{
- return platform_driver_register(&sprd_i2c_driver);
-}
-arch_initcall_sync(sprd_i2c_init);
+module_platform_driver(sprd_i2c_driver);
+
+MODULE_DESCRIPTION("Spreadtrum I2C master controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 266d1c269b83..d36cf08461f7 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -1809,7 +1809,7 @@ static u32 stm32f7_i2c_func(struct i2c_adapter *adap)
I2C_FUNC_SMBUS_I2C_BLOCK;
}
-static struct i2c_algorithm stm32f7_i2c_algo = {
+static const struct i2c_algorithm stm32f7_i2c_algo = {
.master_xfer = stm32f7_i2c_xfer,
.smbus_xfer = stm32f7_i2c_smbus_xfer,
.functionality = stm32f7_i2c_func,
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index f724c8e6b360..39762f0611b1 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -526,7 +526,7 @@ static const struct i2c_algorithm synquacer_i2c_algo = {
.functionality = synquacer_i2c_functionality,
};
-static struct i2c_adapter synquacer_i2c_ops = {
+static const struct i2c_adapter synquacer_i2c_ops = {
.owner = THIS_MODULE,
.name = "synquacer_i2c-adapter",
.algo = &synquacer_i2c_algo,
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index 37347c93e8e0..0bff3f3a8779 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -39,7 +39,7 @@ struct taos_data {
};
/* TAOS TSL2550 EVM */
-static struct i2c_board_info tsl2550_info = {
+static const struct i2c_board_info tsl2550_info = {
I2C_BOARD_INFO("tsl2550", 0x39),
};
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 9fcb13beeb8f..c1683f9338b4 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -636,7 +636,7 @@ static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev)
dvc_writel(i2c_dev, val, DVC_CTRL_REG1);
}
-static int tegra_i2c_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_i2c_runtime_resume(struct device *dev)
{
struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
int ret;
@@ -665,7 +665,7 @@ static int tegra_i2c_runtime_resume(struct device *dev)
return 0;
}
-static int tegra_i2c_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra_i2c_runtime_suspend(struct device *dev)
{
struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
@@ -713,12 +713,6 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit)
u32 tsu_thd;
u8 tlow, thigh;
- err = pm_runtime_get_sync(i2c_dev->dev);
- if (err < 0) {
- dev_err(i2c_dev->dev, "runtime resume failed %d\n", err);
- return err;
- }
-
reset_control_assert(i2c_dev->rst);
udelay(2);
reset_control_deassert(i2c_dev->rst);
@@ -772,7 +766,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit)
if (err) {
dev_err(i2c_dev->dev,
"failed changing clock rate: %d\n", err);
- goto err;
+ return err;
}
}
@@ -787,23 +781,21 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit)
err = tegra_i2c_flush_fifos(i2c_dev);
if (err)
- goto err;
+ return err;
if (i2c_dev->is_multimaster_mode && i2c_dev->hw->has_slcg_override_reg)
i2c_writel(i2c_dev, I2C_MST_CORE_CLKEN_OVR, I2C_CLKEN_OVERRIDE);
err = tegra_i2c_wait_for_config_load(i2c_dev);
if (err)
- goto err;
+ return err;
if (i2c_dev->irq_disabled) {
i2c_dev->irq_disabled = false;
enable_irq(i2c_dev->irq);
}
-err:
- pm_runtime_put(i2c_dev->dev);
- return err;
+ return 0;
}
static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev)
@@ -1616,12 +1608,14 @@ static int tegra_i2c_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- if (!pm_runtime_enabled(&pdev->dev)) {
+ if (!pm_runtime_enabled(&pdev->dev))
ret = tegra_i2c_runtime_resume(&pdev->dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "runtime resume failed\n");
- goto unprepare_div_clk;
- }
+ else
+ ret = pm_runtime_get_sync(i2c_dev->dev);
+
+ if (ret < 0) {
+ dev_err(&pdev->dev, "runtime resume failed\n");
+ goto unprepare_div_clk;
}
if (i2c_dev->is_multimaster_mode) {
@@ -1666,6 +1660,8 @@ static int tegra_i2c_probe(struct platform_device *pdev)
if (ret)
goto release_dma;
+ pm_runtime_put(&pdev->dev);
+
return 0;
release_dma:
@@ -1711,8 +1707,7 @@ static int tegra_i2c_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int tegra_i2c_suspend(struct device *dev)
+static int __maybe_unused tegra_i2c_suspend(struct device *dev)
{
struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
@@ -1721,38 +1716,41 @@ static int tegra_i2c_suspend(struct device *dev)
return 0;
}
-static int tegra_i2c_resume(struct device *dev)
+static int __maybe_unused tegra_i2c_resume(struct device *dev)
{
struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
int err;
+ err = tegra_i2c_runtime_resume(dev);
+ if (err)
+ return err;
+
err = tegra_i2c_init(i2c_dev, false);
if (err)
return err;
+ err = tegra_i2c_runtime_suspend(dev);
+ if (err)
+ return err;
+
i2c_mark_adapter_resumed(&i2c_dev->adapter);
return 0;
}
static const struct dev_pm_ops tegra_i2c_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(tegra_i2c_suspend, tegra_i2c_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_i2c_suspend, tegra_i2c_resume)
SET_RUNTIME_PM_OPS(tegra_i2c_runtime_suspend, tegra_i2c_runtime_resume,
NULL)
};
-#define TEGRA_I2C_PM (&tegra_i2c_pm)
-#else
-#define TEGRA_I2C_PM NULL
-#endif
-
static struct platform_driver tegra_i2c_driver = {
.probe = tegra_i2c_probe,
.remove = tegra_i2c_remove,
.driver = {
.name = "tegra-i2c",
.of_match_table = tegra_i2c_of_match,
- .pm = TEGRA_I2C_PM,
+ .pm = &tegra_i2c_pm,
},
};
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index 7acca2599f04..4241aac79e7e 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -108,7 +108,6 @@ static void uniphier_fi2c_fill_txfifo(struct uniphier_fi2c_priv *priv,
if (fifo_space-- <= 0)
break;
- dev_dbg(&priv->adap.dev, "write data: %02x\n", *priv->buf);
writel(*priv->buf++, priv->membase + UNIPHIER_FI2C_DTTX);
priv->len--;
}
@@ -124,7 +123,6 @@ static void uniphier_fi2c_drain_rxfifo(struct uniphier_fi2c_priv *priv)
break;
*priv->buf++ = readl(priv->membase + UNIPHIER_FI2C_DTRX);
- dev_dbg(&priv->adap.dev, "read data: %02x\n", priv->buf[-1]);
priv->len--;
}
}
@@ -142,8 +140,6 @@ static void uniphier_fi2c_clear_irqs(struct uniphier_fi2c_priv *priv,
static void uniphier_fi2c_stop(struct uniphier_fi2c_priv *priv)
{
- dev_dbg(&priv->adap.dev, "stop condition\n");
-
priv->enabled_irqs |= UNIPHIER_FI2C_INT_STOP;
uniphier_fi2c_set_irqs(priv);
writel(UNIPHIER_FI2C_CR_MST | UNIPHIER_FI2C_CR_STO,
@@ -160,21 +156,15 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id)
irq_status = readl(priv->membase + UNIPHIER_FI2C_INT);
irq_status &= priv->enabled_irqs;
- dev_dbg(&priv->adap.dev,
- "interrupt: enabled_irqs=%04x, irq_status=%04x\n",
- priv->enabled_irqs, irq_status);
-
if (irq_status & UNIPHIER_FI2C_INT_STOP)
goto complete;
if (unlikely(irq_status & UNIPHIER_FI2C_INT_AL)) {
- dev_dbg(&priv->adap.dev, "arbitration lost\n");
priv->error = -EAGAIN;
goto complete;
}
if (unlikely(irq_status & UNIPHIER_FI2C_INT_NA)) {
- dev_dbg(&priv->adap.dev, "could not get ACK\n");
priv->error = -ENXIO;
if (priv->flags & UNIPHIER_FI2C_RD) {
/*
@@ -215,18 +205,14 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id)
if (unlikely(priv->flags & UNIPHIER_FI2C_MANUAL_NACK)) {
if (priv->len <= UNIPHIER_FI2C_FIFO_SIZE &&
!(priv->flags & UNIPHIER_FI2C_BYTE_WISE)) {
- dev_dbg(&priv->adap.dev,
- "enable read byte count IRQ\n");
priv->enabled_irqs |= UNIPHIER_FI2C_INT_RB;
uniphier_fi2c_set_irqs(priv);
priv->flags |= UNIPHIER_FI2C_BYTE_WISE;
}
- if (priv->len <= 1) {
- dev_dbg(&priv->adap.dev, "set NACK\n");
+ if (priv->len <= 1)
writel(UNIPHIER_FI2C_CR_MST |
UNIPHIER_FI2C_CR_NACK,
priv->membase + UNIPHIER_FI2C_CR);
- }
}
goto handled;
@@ -334,10 +320,6 @@ static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap,
bool is_read = msg->flags & I2C_M_RD;
unsigned long time_left, flags;
- dev_dbg(&adap->dev, "%s: addr=0x%02x, len=%d, repeat=%d, stop=%d\n",
- is_read ? "receive" : "transmit", msg->addr, msg->len,
- repeat, stop);
-
priv->len = msg->len;
priv->buf = msg->buf;
priv->enabled_irqs = UNIPHIER_FI2C_INT_FAULTS;
@@ -359,7 +341,6 @@ static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap,
else
uniphier_fi2c_tx_init(priv, msg->addr, repeat);
- dev_dbg(&adap->dev, "start condition\n");
/*
* For a repeated START condition, writing a slave address to the FIFO
* kicks the controller. So, the UNIPHIER_FI2C_CR register should be
@@ -383,7 +364,6 @@ static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap,
uniphier_fi2c_recover(priv);
return -ETIMEDOUT;
}
- dev_dbg(&adap->dev, "complete\n");
if (unlikely(priv->flags & UNIPHIER_FI2C_DEFER_STOP_COMP)) {
u32 status;
@@ -538,7 +518,6 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct uniphier_fi2c_priv *priv;
- struct resource *regs;
u32 bus_speed;
unsigned long clk_rate;
int irq, ret;
@@ -547,8 +526,7 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->membase = devm_ioremap_resource(dev, regs);
+ priv->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->membase))
return PTR_ERR(priv->membase);
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 0173840c32af..0270090c0360 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -71,7 +71,6 @@ static int uniphier_i2c_xfer_byte(struct i2c_adapter *adap, u32 txdata,
reinit_completion(&priv->comp);
txdata |= UNIPHIER_I2C_DTRM_IRQEN;
- dev_dbg(&adap->dev, "write data: 0x%04x\n", txdata);
writel(txdata, priv->membase + UNIPHIER_I2C_DTRM);
time_left = wait_for_completion_timeout(&priv->comp, adap->timeout);
@@ -81,8 +80,6 @@ static int uniphier_i2c_xfer_byte(struct i2c_adapter *adap, u32 txdata,
}
rxdata = readl(priv->membase + UNIPHIER_I2C_DREC);
- dev_dbg(&adap->dev, "read data: 0x%04x\n", rxdata);
-
if (rxdatap)
*rxdatap = rxdata;
@@ -98,14 +95,11 @@ static int uniphier_i2c_send_byte(struct i2c_adapter *adap, u32 txdata)
if (ret)
return ret;
- if (unlikely(rxdata & UNIPHIER_I2C_DREC_LAB)) {
- dev_dbg(&adap->dev, "arbitration lost\n");
+ if (unlikely(rxdata & UNIPHIER_I2C_DREC_LAB))
return -EAGAIN;
- }
- if (unlikely(rxdata & UNIPHIER_I2C_DREC_LRB)) {
- dev_dbg(&adap->dev, "could not get ACK\n");
+
+ if (unlikely(rxdata & UNIPHIER_I2C_DREC_LRB))
return -ENXIO;
- }
return 0;
}
@@ -115,7 +109,6 @@ static int uniphier_i2c_tx(struct i2c_adapter *adap, u16 addr, u16 len,
{
int ret;
- dev_dbg(&adap->dev, "start condition\n");
ret = uniphier_i2c_send_byte(adap, addr << 1 |
UNIPHIER_I2C_DTRM_STA |
UNIPHIER_I2C_DTRM_NACK);
@@ -137,7 +130,6 @@ static int uniphier_i2c_rx(struct i2c_adapter *adap, u16 addr, u16 len,
{
int ret;
- dev_dbg(&adap->dev, "start condition\n");
ret = uniphier_i2c_send_byte(adap, addr << 1 |
UNIPHIER_I2C_DTRM_STA |
UNIPHIER_I2C_DTRM_NACK |
@@ -161,7 +153,6 @@ static int uniphier_i2c_rx(struct i2c_adapter *adap, u16 addr, u16 len,
static int uniphier_i2c_stop(struct i2c_adapter *adap)
{
- dev_dbg(&adap->dev, "stop condition\n");
return uniphier_i2c_send_byte(adap, UNIPHIER_I2C_DTRM_STO |
UNIPHIER_I2C_DTRM_NACK);
}
@@ -173,9 +164,6 @@ static int uniphier_i2c_master_xfer_one(struct i2c_adapter *adap,
bool recovery = false;
int ret;
- dev_dbg(&adap->dev, "%s: addr=0x%02x, len=%d, stop=%d\n",
- is_read ? "receive" : "transmit", msg->addr, msg->len, stop);
-
if (is_read)
ret = uniphier_i2c_rx(adap, msg->addr, msg->len, msg->buf);
else
@@ -326,7 +314,6 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct uniphier_i2c_priv *priv;
- struct resource *regs;
u32 bus_speed;
unsigned long clk_rate;
int irq, ret;
@@ -335,8 +322,7 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->membase = devm_ioremap_resource(dev, regs);
+ priv->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->membase))
return PTR_ERR(priv->membase);
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 72b300174cb8..5f6a4985f2bc 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -350,13 +350,11 @@ static int i2c_device_probe(struct device *dev)
return -ENODEV;
if (client->flags & I2C_CLIENT_WAKE) {
- int wakeirq = -ENOENT;
+ int wakeirq;
- if (dev->of_node) {
- wakeirq = of_irq_get_byname(dev->of_node, "wakeup");
- if (wakeirq == -EPROBE_DEFER)
- return wakeirq;
- }
+ wakeirq = of_irq_get_byname(dev->of_node, "wakeup");
+ if (wakeirq == -EPROBE_DEFER)
+ return wakeirq;
device_init_wakeup(&client->dev, true);
@@ -966,7 +964,7 @@ struct i2c_client *devm_i2c_new_dummy_device(struct device *dev,
EXPORT_SYMBOL_GPL(devm_i2c_new_dummy_device);
/**
- * i2c_new_secondary_device - Helper to get the instantiated secondary address
+ * i2c_new_ancillary_device - Helper to get the instantiated secondary address
* and create the associated device
* @client: Handle to the primary client
* @name: Handle to specify which secondary address to get
@@ -985,9 +983,9 @@ EXPORT_SYMBOL_GPL(devm_i2c_new_dummy_device);
* cell whose "reg-names" value matches the slave name.
*
* This returns the new i2c client, which should be saved for later use with
- * i2c_unregister_device(); or NULL to indicate an error.
+ * i2c_unregister_device(); or an ERR_PTR to describe the error.
*/
-struct i2c_client *i2c_new_secondary_device(struct i2c_client *client,
+struct i2c_client *i2c_new_ancillary_device(struct i2c_client *client,
const char *name,
u16 default_addr)
{
@@ -1002,9 +1000,9 @@ struct i2c_client *i2c_new_secondary_device(struct i2c_client *client,
}
dev_dbg(&client->adapter->dev, "Address for %s : 0x%x\n", name, addr);
- return i2c_new_dummy(client->adapter, addr);
+ return i2c_new_dummy_device(client->adapter, addr);
}
-EXPORT_SYMBOL_GPL(i2c_new_secondary_device);
+EXPORT_SYMBOL_GPL(i2c_new_ancillary_device);
/* ------------------------------------------------------------------------- */
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index be65d3842878..92ff9991bae8 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -11,6 +11,13 @@
* pointer, yet implementation is deferred until the need actually arises.
*/
+/*
+ * FIXME: What to do if only 8 bits of a 16 bit address are sent?
+ * The ST-M24C64 sends only 0xff then. Needs verification with other
+ * EEPROMs, though. We currently use the 8 bit as a valid address.
+ */
+
+#include <linux/bitfield.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -21,12 +28,18 @@
struct eeprom_data {
struct bin_attribute bin;
- bool first_write;
spinlock_t buffer_lock;
- u8 buffer_idx;
+ u16 buffer_idx;
+ u16 address_mask;
+ u8 num_address_bytes;
+ u8 idx_write_cnt;
u8 buffer[];
};
+#define I2C_SLAVE_BYTELEN GENMASK(15, 0)
+#define I2C_SLAVE_FLAG_ADDR16 BIT(16)
+#define I2C_SLAVE_DEVICE_MAGIC(_len, _flags) ((_flags) | (_len))
+
static int i2c_slave_eeprom_slave_cb(struct i2c_client *client,
enum i2c_slave_event event, u8 *val)
{
@@ -34,12 +47,14 @@ static int i2c_slave_eeprom_slave_cb(struct i2c_client *client,
switch (event) {
case I2C_SLAVE_WRITE_RECEIVED:
- if (eeprom->first_write) {
- eeprom->buffer_idx = *val;
- eeprom->first_write = false;
+ if (eeprom->idx_write_cnt < eeprom->num_address_bytes) {
+ if (eeprom->idx_write_cnt == 0)
+ eeprom->buffer_idx = 0;
+ eeprom->buffer_idx = *val | (eeprom->buffer_idx << 8);
+ eeprom->idx_write_cnt++;
} else {
spin_lock(&eeprom->buffer_lock);
- eeprom->buffer[eeprom->buffer_idx++] = *val;
+ eeprom->buffer[eeprom->buffer_idx++ & eeprom->address_mask] = *val;
spin_unlock(&eeprom->buffer_lock);
}
break;
@@ -50,7 +65,7 @@ static int i2c_slave_eeprom_slave_cb(struct i2c_client *client,
/* fallthrough */
case I2C_SLAVE_READ_REQUESTED:
spin_lock(&eeprom->buffer_lock);
- *val = eeprom->buffer[eeprom->buffer_idx];
+ *val = eeprom->buffer[eeprom->buffer_idx & eeprom->address_mask];
spin_unlock(&eeprom->buffer_lock);
/*
* Do not increment buffer_idx here, because we don't know if
@@ -61,7 +76,7 @@ static int i2c_slave_eeprom_slave_cb(struct i2c_client *client,
case I2C_SLAVE_STOP:
case I2C_SLAVE_WRITE_REQUESTED:
- eeprom->first_write = true;
+ eeprom->idx_write_cnt = 0;
break;
default:
@@ -105,13 +120,16 @@ static int i2c_slave_eeprom_probe(struct i2c_client *client, const struct i2c_de
{
struct eeprom_data *eeprom;
int ret;
- unsigned size = id->driver_data;
+ unsigned int size = FIELD_GET(I2C_SLAVE_BYTELEN, id->driver_data);
+ unsigned int flag_addr16 = FIELD_GET(I2C_SLAVE_FLAG_ADDR16, id->driver_data);
eeprom = devm_kzalloc(&client->dev, sizeof(struct eeprom_data) + size, GFP_KERNEL);
if (!eeprom)
return -ENOMEM;
- eeprom->first_write = true;
+ eeprom->idx_write_cnt = 0;
+ eeprom->num_address_bytes = flag_addr16 ? 2 : 1;
+ eeprom->address_mask = size - 1;
spin_lock_init(&eeprom->buffer_lock);
i2c_set_clientdata(client, eeprom);
@@ -146,7 +164,9 @@ static int i2c_slave_eeprom_remove(struct i2c_client *client)
}
static const struct i2c_device_id i2c_slave_eeprom_id[] = {
- { "slave-24c02", 2048 / 8 },
+ { "slave-24c02", I2C_SLAVE_DEVICE_MAGIC(2048 / 8, 0) },
+ { "slave-24c32", I2C_SLAVE_DEVICE_MAGIC(32768 / 8, I2C_SLAVE_FLAG_ADDR16) },
+ { "slave-24c64", I2C_SLAVE_DEVICE_MAGIC(65536 / 8, I2C_SLAVE_FLAG_ADDR16) },
{ }
};
MODULE_DEVICE_TABLE(i2c, i2c_slave_eeprom_id);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 41f9e268e3fb..24244a2f68cc 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -54,10 +54,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
page = sg_page_iter_page(&sg_iter);
- if (umem->writable && dirty)
- put_user_pages_dirty_lock(&page, 1);
- else
- put_user_page(page);
+ put_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
}
sg_free_table(&umem->sg_head);
diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
index b89a9b9aef7a..469acb961fbd 100644
--- a/drivers/infiniband/hw/hfi1/user_pages.c
+++ b/drivers/infiniband/hw/hfi1/user_pages.c
@@ -118,10 +118,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
size_t npages, bool dirty)
{
- if (dirty)
- put_user_pages_dirty_lock(p, npages);
- else
- put_user_pages(p, npages);
+ put_user_pages_dirty_lock(p, npages, dirty);
if (mm) { /* during close after signal, mm can be NULL */
atomic64_sub(npages, &mm->pinned_vm);
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 9f53f63b1453..7bff0a1e713d 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1041,7 +1041,7 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
if (cb)
iowait_pio_inc(&priv->s_iowait);
pbuf = sc_buffer_alloc(sc, plen, cb, qp);
- if (unlikely(IS_ERR_OR_NULL(pbuf))) {
+ if (IS_ERR_OR_NULL(pbuf)) {
if (cb)
verbs_pio_complete(qp, 0);
if (IS_ERR(pbuf)) {
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index bfbfbb7e0ff4..6bf764e41891 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -40,10 +40,7 @@
static void __qib_release_user_pages(struct page **p, size_t num_pages,
int dirty)
{
- if (dirty)
- put_user_pages_dirty_lock(p, num_pages);
- else
- put_user_pages(p, num_pages);
+ put_user_pages_dirty_lock(p, num_pages, dirty);
}
/**
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 0b0237d41613..62e6ffa9ad78 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -75,10 +75,7 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
for_each_sg(chunk->page_list, sg, chunk->nents, i) {
page = sg_page(sg);
pa = sg_phys(sg);
- if (dirty)
- put_user_pages_dirty_lock(&page, 1);
- else
- put_user_page(page);
+ put_user_pages_dirty_lock(&page, 1, dirty);
usnic_dbg("pa: %pa\n", &pa);
}
kfree(chunk);
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index 87a56039f0ef..e99983f07663 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -63,15 +63,7 @@ struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index)
static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages,
bool dirty)
{
- struct page **p = chunk->plist;
-
- while (num_pages--) {
- if (!PageDirty(*p) && dirty)
- put_user_pages_dirty_lock(p, 1);
- else
- put_user_page(*p);
- p++;
- }
+ put_user_pages_dirty_lock(chunk->plist, num_pages, dirty);
}
void siw_umem_release(struct siw_umem *umem, bool dirty)
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 9118ab85cb3a..dab4446fe7d8 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -345,6 +345,14 @@ static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
#define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
#endif
+static void dm_integrity_prepare(struct request *rq)
+{
+}
+
+static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
+{
+}
+
/*
* DM Integrity profile, protection is performed layer above (dm-crypt)
*/
@@ -352,6 +360,8 @@ static const struct blk_integrity_profile dm_integrity_profile = {
.name = "DM-DIF-EXT-TAG",
.generate_fn = NULL,
.verify_fn = NULL,
+ .prepare_fn = dm_integrity_prepare,
+ .complete_fn = dm_integrity_complete,
};
static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
diff --git a/drivers/media/i2c/adv748x/adv748x-core.c b/drivers/media/i2c/adv748x/adv748x-core.c
index 0a47d474e97a..23e02ff27b17 100644
--- a/drivers/media/i2c/adv748x/adv748x-core.c
+++ b/drivers/media/i2c/adv748x/adv748x-core.c
@@ -183,14 +183,14 @@ static int adv748x_initialise_clients(struct adv748x_state *state)
int ret;
for (i = ADV748X_PAGE_DPLL; i < ADV748X_PAGE_MAX; ++i) {
- state->i2c_clients[i] = i2c_new_secondary_device(
+ state->i2c_clients[i] = i2c_new_ancillary_device(
state->client,
adv748x_default_addresses[i].name,
adv748x_default_addresses[i].default_addr);
- if (state->i2c_clients[i] == NULL) {
+ if (IS_ERR(state->i2c_clients[i])) {
adv_err(state, "failed to create i2c client %u\n", i);
- return -ENOMEM;
+ return PTR_ERR(state->i2c_clients[i]);
}
ret = adv748x_configure_regmap(state, i);
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 28a84bf9f8a9..2dedd6ebb236 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -2862,10 +2862,8 @@ static void adv76xx_unregister_clients(struct adv76xx_state *state)
{
unsigned int i;
- for (i = 1; i < ARRAY_SIZE(state->i2c_clients); ++i) {
- if (state->i2c_clients[i])
- i2c_unregister_device(state->i2c_clients[i]);
- }
+ for (i = 1; i < ARRAY_SIZE(state->i2c_clients); ++i)
+ i2c_unregister_device(state->i2c_clients[i]);
}
static struct i2c_client *adv76xx_dummy_client(struct v4l2_subdev *sd,
@@ -2878,14 +2876,14 @@ static struct i2c_client *adv76xx_dummy_client(struct v4l2_subdev *sd,
struct i2c_client *new_client;
if (pdata && pdata->i2c_addresses[page])
- new_client = i2c_new_dummy(client->adapter,
+ new_client = i2c_new_dummy_device(client->adapter,
pdata->i2c_addresses[page]);
else
- new_client = i2c_new_secondary_device(client,
+ new_client = i2c_new_ancillary_device(client,
adv76xx_default_addresses[page].name,
adv76xx_default_addresses[page].default_addr);
- if (new_client)
+ if (!IS_ERR(new_client))
io_write(sd, io_reg, new_client->addr << 1);
return new_client;
@@ -3516,15 +3514,19 @@ static int adv76xx_probe(struct i2c_client *client,
}
for (i = 1; i < ADV76XX_PAGE_MAX; ++i) {
+ struct i2c_client *dummy_client;
+
if (!(BIT(i) & state->info->page_mask))
continue;
- state->i2c_clients[i] = adv76xx_dummy_client(sd, i);
- if (!state->i2c_clients[i]) {
- err = -EINVAL;
+ dummy_client = adv76xx_dummy_client(sd, i);
+ if (IS_ERR(dummy_client)) {
+ err = PTR_ERR(dummy_client);
v4l2_err(sd, "failed to create i2c client %u\n", i);
goto err_i2c;
}
+
+ state->i2c_clients[i] = dummy_client;
}
INIT_DELAYED_WORK(&state->delayed_work_enable_hotplug,
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index 76b4ac7b1678..aeb2f497c683 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -157,6 +157,7 @@ static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
struct videobuf_buffer *vb)
{
+ unsigned long untagged_baddr = untagged_addr(vb->baddr);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long prev_pfn, this_pfn;
@@ -164,22 +165,22 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
unsigned int offset;
int ret;
- offset = vb->baddr & ~PAGE_MASK;
+ offset = untagged_baddr & ~PAGE_MASK;
mem->size = PAGE_ALIGN(vb->size + offset);
ret = -EINVAL;
down_read(&mm->mmap_sem);
- vma = find_vma(mm, vb->baddr);
+ vma = find_vma(mm, untagged_baddr);
if (!vma)
goto out_up;
- if ((vb->baddr + mem->size) > vma->vm_end)
+ if ((untagged_baddr + mem->size) > vma->vm_end)
goto out_up;
pages_done = 0;
prev_pfn = 0; /* kill warning */
- user_address = vb->baddr;
+ user_address = untagged_baddr;
while (pages_done < (mem->size >> PAGE_SHIFT)) {
ret = follow_pfn(vma, user_address, &this_pfn);
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 518945b2f737..2cccd82a3106 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -14,7 +14,6 @@
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/mod_devicetable.h>
-#include <linux/log2.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
#include <linux/property.h>
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index ebd64e083726..1255302e251e 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -654,8 +654,7 @@ void i2400m_tx_close(struct i2400m *i2400m)
padding = aligned_size - tx_msg_moved->size;
if (padding > 0) {
pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0, 0);
- if (unlikely(WARN_ON(pad_buf == NULL
- || pad_buf == TAIL_FULL))) {
+ if (WARN_ON(pad_buf == NULL || pad_buf == TAIL_FULL)) {
/* This should not happen -- append should verify
* there is always space left at least to append
* tx_block_size */
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index 2859cc99b73e..156c2a18a239 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -78,7 +78,7 @@ static int ndev_mw_to_bar(struct amd_ntb_dev *ndev, int idx)
if (idx < 0 || idx > ndev->mw_count)
return -EINVAL;
- return 1 << idx;
+ return ndev->dev_data->mw_idx << idx;
}
static int amd_ntb_mw_count(struct ntb_dev *ntb, int pidx)
@@ -909,7 +909,7 @@ static int amd_init_ntb(struct amd_ntb_dev *ndev)
{
void __iomem *mmio = ndev->self_mmio;
- ndev->mw_count = AMD_MW_CNT;
+ ndev->mw_count = ndev->dev_data->mw_count;
ndev->spad_count = AMD_SPADS_CNT;
ndev->db_count = AMD_DB_CNT;
@@ -1069,6 +1069,8 @@ static int amd_ntb_pci_probe(struct pci_dev *pdev,
goto err_ndev;
}
+ ndev->dev_data = (struct ntb_dev_data *)id->driver_data;
+
ndev_init_struct(ndev, pdev);
rc = amd_ntb_init_pci(ndev, pdev);
@@ -1123,9 +1125,21 @@ static const struct file_operations amd_ntb_debugfs_info = {
.read = ndev_debugfs_read,
};
+static const struct ntb_dev_data dev_data[] = {
+ { /* for device 145b */
+ .mw_count = 3,
+ .mw_idx = 1,
+ },
+ { /* for device 148b */
+ .mw_count = 2,
+ .mw_idx = 2,
+ },
+};
+
static const struct pci_device_id amd_ntb_pci_tbl[] = {
- {PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NTB)},
- {0}
+ { PCI_VDEVICE(AMD, 0x145b), (kernel_ulong_t)&dev_data[0] },
+ { PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] },
+ { 0, }
};
MODULE_DEVICE_TABLE(pci, amd_ntb_pci_tbl);
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.h b/drivers/ntb/hw/amd/ntb_hw_amd.h
index 8f3617a46292..139a307147bc 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.h
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.h
@@ -52,7 +52,6 @@
#include <linux/ntb.h>
#include <linux/pci.h>
-#define PCI_DEVICE_ID_AMD_NTB 0x145B
#define AMD_LINK_HB_TIMEOUT msecs_to_jiffies(1000)
#define AMD_LINK_STATUS_OFFSET 0x68
#define NTB_LIN_STA_ACTIVE_BIT 0x00000002
@@ -93,7 +92,6 @@ static inline void _write64(u64 val, void __iomem *mmio)
enum {
/* AMD NTB Capability */
- AMD_MW_CNT = 3,
AMD_DB_CNT = 16,
AMD_MSIX_VECTOR_CNT = 24,
AMD_SPADS_CNT = 16,
@@ -170,6 +168,11 @@ enum {
AMD_PEER_OFFSET = 0x400,
};
+struct ntb_dev_data {
+ const unsigned char mw_count;
+ const unsigned int mw_idx;
+};
+
struct amd_ntb_dev;
struct amd_ntb_vec {
@@ -185,6 +188,7 @@ struct amd_ntb_dev {
u32 cntl_sta;
u32 peer_sta;
+ struct ntb_dev_data *dev_data;
unsigned char mw_count;
unsigned char spad_count;
unsigned char db_count;
diff --git a/drivers/ntb/hw/idt/Kconfig b/drivers/ntb/hw/idt/Kconfig
index bfc7cac94102..c79b54c1747d 100644
--- a/drivers/ntb/hw/idt/Kconfig
+++ b/drivers/ntb/hw/idt/Kconfig
@@ -4,11 +4,11 @@ config NTB_IDT
depends on PCI
select HWMON
help
- This driver supports NTB of cappable IDT PCIe-switches.
+ This driver supports NTB of capable IDT PCIe-switches.
Some of the pre-initializations must be made before IDT PCIe-switch
- exposes it NT-functions correctly. It should be done by either proper
- initialisation of EEPROM connected to master smbus of the switch or
+ exposes its NT-functions correctly. It should be done by either proper
+ initialization of EEPROM connected to master SMbus of the switch or
by BIOS using slave-SMBus interface changing corresponding registers
value. Evidently it must be done before PCI bus enumeration is
finished in Linux kernel.
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index f4959458d909..86ffa716eaf2 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -306,7 +306,7 @@ static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
if (rc)
return rc;
- if (addr == 0 || size == 0) {
+ if (size == 0) {
if (widx < nr_direct_mw)
switchtec_ntb_mw_clr_direct(sndev, widx);
else
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 40c90ca10729..00a5d5764993 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -292,7 +292,7 @@ static int ntb_transport_bus_match(struct device *dev,
static int ntb_transport_bus_probe(struct device *dev)
{
const struct ntb_transport_client *client;
- int rc = -EINVAL;
+ int rc;
get_device(dev);
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index d028331558ea..e9b7c2dfc730 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -1378,7 +1378,7 @@ static int perf_setup_peer_mw(struct perf_peer *peer)
int ret;
/* Get outbound MW parameters and map it */
- ret = ntb_peer_mw_get_addr(perf->ntb, peer->gidx, &phys_addr,
+ ret = ntb_peer_mw_get_addr(perf->ntb, perf->gidx, &phys_addr,
&peer->outbuf_size);
if (ret)
return ret;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 1ede1763a5ee..108f60b46804 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -666,8 +666,6 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
return BLK_STS_NOTSUPP;
control |= NVME_RW_PRINFO_PRACT;
- } else if (req_op(req) == REQ_OP_WRITE) {
- t10_pi_prepare(req, ns->pi_type);
}
switch (ns->pi_type) {
@@ -690,13 +688,6 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
void nvme_cleanup_cmd(struct request *req)
{
- if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
- nvme_req(req)->status == 0) {
- struct nvme_ns *ns = req->rq_disk->private_data;
-
- t10_pi_complete(req, ns->pi_type,
- blk_rq_bytes(req) >> ns->lba_shift);
- }
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
struct nvme_ns *ns = req->rq_disk->private_data;
struct page *page = req->special_vec.bv_page;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 1b67bb578f9f..ae21d08c65e8 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -674,6 +674,7 @@ config EEEPC_LAPTOP
config ASUS_WMI
tristate "ASUS WMI Driver"
depends on ACPI_WMI
+ depends on ACPI_BATTERY
depends on INPUT
depends on HWMON
depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
index 61fe341a85aa..ea68f6ed66ae 100644
--- a/drivers/platform/x86/i2c-multi-instantiate.c
+++ b/drivers/platform/x86/i2c-multi-instantiate.c
@@ -90,7 +90,7 @@ static int i2c_multi_inst_probe(struct platform_device *pdev)
for (i = 0; i < multi->num_clients && inst_data[i].type; i++) {
memset(&board_info, 0, sizeof(board_info));
strlcpy(board_info.type, inst_data[i].type, I2C_NAME_SIZE);
- snprintf(name, sizeof(name), "%s-%s.%d", match->id,
+ snprintf(name, sizeof(name), "%s-%s.%d", dev_name(dev),
inst_data[i].type, i);
board_info.dev_name = name;
switch (inst_data[i].flags & IRQ_RESOURCE_TYPE) {
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index 9aca5e7ce6d0..07d1b911e72f 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -422,6 +422,13 @@ static const struct dmi_system_id critclk_systems[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "6ES7647-8B"),
},
},
+ {
+ .ident = "SIMATIC IPC277E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "6AV7882-0"),
+ },
+ },
{ /*sentinel*/ }
};
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index b0e632ba8590..e3a2518503ed 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -44,7 +44,7 @@ config PWM_AB8500
config PWM_ATMEL
tristate "Atmel PWM support"
- depends on ARCH_AT91
+ depends on ARCH_AT91 && OF
help
Generic PWM framework driver for Atmel SoC.
@@ -423,6 +423,17 @@ config PWM_SPEAR
To compile this driver as a module, choose M here: the module
will be called pwm-spear.
+config PWM_SPRD
+ tristate "Spreadtrum PWM support"
+ depends on ARCH_SPRD || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Generic PWM framework driver for the PWM controller on
+ Spreadtrum SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-sprd.
+
config PWM_STI
tristate "STiH4xx PWM support"
depends on ARCH_STI
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 76b555b51887..26326adf71d7 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_PWM_ROCKCHIP) += pwm-rockchip.o
obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
obj-$(CONFIG_PWM_SIFIVE) += pwm-sifive.o
obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o
+obj-$(CONFIG_PWM_SPRD) += pwm-sprd.o
obj-$(CONFIG_PWM_STI) += pwm-sti.o
obj-$(CONFIG_PWM_STM32) += pwm-stm32.o
obj-$(CONFIG_PWM_STM32_LP) += pwm-stm32-lp.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 8edfac17364e..6ad51aa60c03 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -448,36 +448,44 @@ EXPORT_SYMBOL_GPL(pwm_free);
/**
* pwm_apply_state() - atomically apply a new state to a PWM device
* @pwm: PWM device
- * @state: new state to apply. This can be adjusted by the PWM driver
- * if the requested config is not achievable, for example,
- * ->duty_cycle and ->period might be approximated.
+ * @state: new state to apply
*/
-int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state)
+int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
{
+ struct pwm_chip *chip;
int err;
if (!pwm || !state || !state->period ||
state->duty_cycle > state->period)
return -EINVAL;
+ chip = pwm->chip;
+
if (state->period == pwm->state.period &&
state->duty_cycle == pwm->state.duty_cycle &&
state->polarity == pwm->state.polarity &&
state->enabled == pwm->state.enabled)
return 0;
- if (pwm->chip->ops->apply) {
- err = pwm->chip->ops->apply(pwm->chip, pwm, state);
+ if (chip->ops->apply) {
+ err = chip->ops->apply(chip, pwm, state);
if (err)
return err;
- pwm->state = *state;
+ /*
+ * .apply might have to round some values in *state, if possible
+ * read the actually implemented value back.
+ */
+ if (chip->ops->get_state)
+ chip->ops->get_state(chip, pwm, &pwm->state);
+ else
+ pwm->state = *state;
} else {
/*
* FIXME: restore the initial state in case of error.
*/
if (state->polarity != pwm->state.polarity) {
- if (!pwm->chip->ops->set_polarity)
+ if (!chip->ops->set_polarity)
return -ENOTSUPP;
/*
@@ -486,12 +494,12 @@ int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state)
* ->apply().
*/
if (pwm->state.enabled) {
- pwm->chip->ops->disable(pwm->chip, pwm);
+ chip->ops->disable(chip, pwm);
pwm->state.enabled = false;
}
- err = pwm->chip->ops->set_polarity(pwm->chip, pwm,
- state->polarity);
+ err = chip->ops->set_polarity(chip, pwm,
+ state->polarity);
if (err)
return err;
@@ -500,9 +508,9 @@ int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state)
if (state->period != pwm->state.period ||
state->duty_cycle != pwm->state.duty_cycle) {
- err = pwm->chip->ops->config(pwm->chip, pwm,
- state->duty_cycle,
- state->period);
+ err = chip->ops->config(pwm->chip, pwm,
+ state->duty_cycle,
+ state->period);
if (err)
return err;
@@ -512,11 +520,11 @@ int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state)
if (state->enabled != pwm->state.enabled) {
if (state->enabled) {
- err = pwm->chip->ops->enable(pwm->chip, pwm);
+ err = chip->ops->enable(chip, pwm);
if (err)
return err;
} else {
- pwm->chip->ops->disable(pwm->chip, pwm);
+ chip->ops->disable(chip, pwm);
}
pwm->state.enabled = state->enabled;
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index d13a83f430ac..dcbc0489dfd4 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -39,7 +39,7 @@ static inline struct atmel_hlcdc_pwm *to_atmel_hlcdc_pwm(struct pwm_chip *chip)
}
static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c);
struct atmel_hlcdc *hlcdc = chip->hlcdc;
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index e5e1eaf372fa..9ba733467e26 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -209,7 +209,7 @@ static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm,
}
static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
struct pwm_state cstate;
@@ -318,19 +318,6 @@ static const struct atmel_pwm_data mchp_sam9x60_pwm_data = {
},
};
-static const struct platform_device_id atmel_pwm_devtypes[] = {
- {
- .name = "at91sam9rl-pwm",
- .driver_data = (kernel_ulong_t)&atmel_sam9rl_pwm_data,
- }, {
- .name = "sama5d3-pwm",
- .driver_data = (kernel_ulong_t)&atmel_sama5_pwm_data,
- }, {
- /* sentinel */
- },
-};
-MODULE_DEVICE_TABLE(platform, atmel_pwm_devtypes);
-
static const struct of_device_id atmel_pwm_dt_ids[] = {
{
.compatible = "atmel,at91sam9rl-pwm",
@@ -350,34 +337,20 @@ static const struct of_device_id atmel_pwm_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids);
-static inline const struct atmel_pwm_data *
-atmel_pwm_get_driver_data(struct platform_device *pdev)
-{
- const struct platform_device_id *id;
-
- if (pdev->dev.of_node)
- return of_device_get_match_data(&pdev->dev);
-
- id = platform_get_device_id(pdev);
-
- return (struct atmel_pwm_data *)id->driver_data;
-}
-
static int atmel_pwm_probe(struct platform_device *pdev)
{
- const struct atmel_pwm_data *data;
struct atmel_pwm_chip *atmel_pwm;
struct resource *res;
int ret;
- data = atmel_pwm_get_driver_data(pdev);
- if (!data)
- return -ENODEV;
-
atmel_pwm = devm_kzalloc(&pdev->dev, sizeof(*atmel_pwm), GFP_KERNEL);
if (!atmel_pwm)
return -ENOMEM;
+ mutex_init(&atmel_pwm->isr_lock);
+ atmel_pwm->data = of_device_get_match_data(&pdev->dev);
+ atmel_pwm->updated_pwms = 0;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
atmel_pwm->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(atmel_pwm->base))
@@ -395,17 +368,10 @@ static int atmel_pwm_probe(struct platform_device *pdev)
atmel_pwm->chip.dev = &pdev->dev;
atmel_pwm->chip.ops = &atmel_pwm_ops;
-
- if (pdev->dev.of_node) {
- atmel_pwm->chip.of_xlate = of_pwm_xlate_with_flags;
- atmel_pwm->chip.of_pwm_n_cells = 3;
- }
-
+ atmel_pwm->chip.of_xlate = of_pwm_xlate_with_flags;
+ atmel_pwm->chip.of_pwm_n_cells = 3;
atmel_pwm->chip.base = -1;
atmel_pwm->chip.npwm = 4;
- atmel_pwm->data = data;
- atmel_pwm->updated_pwms = 0;
- mutex_init(&atmel_pwm->isr_lock);
ret = pwmchip_add(&atmel_pwm->chip);
if (ret < 0) {
@@ -437,7 +403,6 @@ static struct platform_driver atmel_pwm_driver = {
.name = "atmel-pwm",
.of_match_table = of_match_ptr(atmel_pwm_dt_ids),
},
- .id_table = atmel_pwm_devtypes,
.probe = atmel_pwm_probe,
.remove = atmel_pwm_remove,
};
diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
index d961a8207b1c..56c38cfae92c 100644
--- a/drivers/pwm/pwm-bcm-iproc.c
+++ b/drivers/pwm/pwm-bcm-iproc.c
@@ -115,7 +115,7 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
}
static int iproc_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
unsigned long prescale = IPROC_PWM_PRESCALE_MIN;
struct iproc_pwmc *ip = to_iproc_pwmc(chip);
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
index f6fe0b922e1e..91e24f01b54e 100644
--- a/drivers/pwm/pwm-bcm2835.c
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -21,7 +21,7 @@
#define PERIOD(x) (((x) * 0x10) + 0x10)
#define DUTY(x) (((x) * 0x10) + 0x14)
-#define MIN_PERIOD 108 /* 9.2 MHz max. PWM clock */
+#define PERIOD_MIN 0x2
struct bcm2835_pwm {
struct pwm_chip chip;
@@ -64,6 +64,7 @@ static int bcm2835_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
unsigned long rate = clk_get_rate(pc->clk);
unsigned long scaler;
+ u32 period;
if (!rate) {
dev_err(pc->dev, "failed to get clock rate\n");
@@ -71,17 +72,14 @@ static int bcm2835_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
}
scaler = DIV_ROUND_CLOSEST(NSEC_PER_SEC, rate);
+ period = DIV_ROUND_CLOSEST(period_ns, scaler);
- if (period_ns <= MIN_PERIOD) {
- dev_err(pc->dev, "period %d not supported, minimum %d\n",
- period_ns, MIN_PERIOD);
+ if (period < PERIOD_MIN)
return -EINVAL;
- }
writel(DIV_ROUND_CLOSEST(duty_ns, scaler),
pc->base + DUTY(pwm->hwpwm));
- writel(DIV_ROUND_CLOSEST(period_ns, scaler),
- pc->base + PERIOD(pwm->hwpwm));
+ writel(period, pc->base + PERIOD(pwm->hwpwm));
return 0;
}
@@ -155,8 +153,11 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
pc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pc->clk)) {
- dev_err(&pdev->dev, "clock not found: %ld\n", PTR_ERR(pc->clk));
- return PTR_ERR(pc->clk);
+ ret = PTR_ERR(pc->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "clock not found: %d\n", ret);
+
+ return ret;
}
ret = clk_prepare_enable(pc->clk);
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 85bea2d40b7d..89497448d217 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -93,7 +93,7 @@ static int cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index)
}
static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip);
int duty_cycle;
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 9d31a217111d..59272a920479 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -227,7 +227,7 @@ static bool fsl_pwm_is_other_pwm_enabled(struct fsl_pwm_chip *fpc,
static int fsl_pwm_apply_config(struct fsl_pwm_chip *fpc,
struct pwm_device *pwm,
- struct pwm_state *newstate)
+ const struct pwm_state *newstate)
{
unsigned int duty;
u32 reg_polarity;
@@ -292,17 +292,13 @@ static int fsl_pwm_apply_config(struct fsl_pwm_chip *fpc,
regmap_update_bits(fpc->regmap, FTM_POL, BIT(pwm->hwpwm), reg_polarity);
- newstate->period = fsl_pwm_ticks_to_ns(fpc,
- fpc->period.mod_period + 1);
- newstate->duty_cycle = fsl_pwm_ticks_to_ns(fpc, duty);
-
ftm_set_write_protection(fpc);
return 0;
}
static int fsl_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *newstate)
+ const struct pwm_state *newstate)
{
struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
struct pwm_state *oldstate = &pwm->state;
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index 753bd58111e4..ad205fdad372 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -149,7 +149,7 @@ static void hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
}
static int hibvt_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index e8385c1cf342..9145f6160649 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -89,7 +89,7 @@ to_imx_tpm_pwm_chip(struct pwm_chip *chip)
static int pwm_imx_tpm_round_state(struct pwm_chip *chip,
struct imx_tpm_pwm_param *p,
struct pwm_state *real_state,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct imx_tpm_pwm_chip *tpm = to_imx_tpm_pwm_chip(chip);
u32 rate, prescale, period_count, clock_unit;
@@ -289,7 +289,7 @@ static int pwm_imx_tpm_apply_hw(struct pwm_chip *chip,
static int pwm_imx_tpm_apply(struct pwm_chip *chip,
struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct imx_tpm_pwm_chip *tpm = to_imx_tpm_pwm_chip(chip);
struct imx_tpm_pwm_param param;
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index 434a351fb626..ae11d8577f18 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -3,6 +3,10 @@
* simple driver for PWM (Pulse Width Modulator) controller
*
* Derived from pxa PWM driver by eric miao <[email protected]>
+ *
+ * Limitations:
+ * - When disabled the output is driven to 0 independent of the configured
+ * polarity.
*/
#include <linux/bitfield.h>
@@ -205,7 +209,7 @@ static void pwm_imx27_wait_fifo_slot(struct pwm_chip *chip,
}
static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
unsigned long period_cycles, duty_cycles, prescale;
struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index f901e8a0d33d..9d78cc21cb12 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -2,6 +2,11 @@
/*
* Copyright (C) 2010, Lars-Peter Clausen <[email protected]>
* JZ4740 platform PWM support
+ *
+ * Limitations:
+ * - The .apply callback doesn't complete the currently running period before
+ * reconfiguring the hardware.
+ * - Each period starts with the inactive part.
*/
#include <linux/clk.h>
@@ -83,7 +88,7 @@ static void jz4740_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
}
static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct jz4740_pwm_chip *jz4740 = to_jz4740(pwm->chip);
unsigned long long tmp;
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 4098a4601691..75bbfe5f3bc2 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -122,7 +122,7 @@ static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
}
static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct pwm_lpss_chip *lpwm = to_lpwm(chip);
int ret;
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index eb6674ce995f..b94e0d09c300 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Mediatek Pulse Width Modulator driver
+ * MediaTek Pulse Width Modulator driver
*
* Copyright (C) 2015 John Crispin <[email protected]>
* Copyright (C) 2017 Zhi Mao <[email protected]>
*
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/err.h>
@@ -35,125 +33,107 @@
#define PWM_CLK_DIV_MAX 7
-enum {
- MTK_CLK_MAIN = 0,
- MTK_CLK_TOP,
- MTK_CLK_PWM1,
- MTK_CLK_PWM2,
- MTK_CLK_PWM3,
- MTK_CLK_PWM4,
- MTK_CLK_PWM5,
- MTK_CLK_PWM6,
- MTK_CLK_PWM7,
- MTK_CLK_PWM8,
- MTK_CLK_MAX,
-};
-
-static const char * const mtk_pwm_clk_name[MTK_CLK_MAX] = {
- "main", "top", "pwm1", "pwm2", "pwm3", "pwm4", "pwm5", "pwm6", "pwm7",
- "pwm8"
-};
-
-struct mtk_pwm_platform_data {
+struct pwm_mediatek_of_data {
unsigned int num_pwms;
bool pwm45_fixup;
- bool has_clks;
};
/**
- * struct mtk_pwm_chip - struct representing PWM chip
+ * struct pwm_mediatek_chip - struct representing PWM chip
* @chip: linux PWM chip representation
* @regs: base address of PWM chip
- * @clks: list of clocks
+ * @clk_top: the top clock generator
+ * @clk_main: the clock used by PWM core
+ * @clk_pwms: the clock used by each PWM channel
+ * @clk_freq: the fix clock frequency of legacy MIPS SoC
*/
-struct mtk_pwm_chip {
+struct pwm_mediatek_chip {
struct pwm_chip chip;
void __iomem *regs;
- struct clk *clks[MTK_CLK_MAX];
- const struct mtk_pwm_platform_data *soc;
+ struct clk *clk_top;
+ struct clk *clk_main;
+ struct clk **clk_pwms;
+ const struct pwm_mediatek_of_data *soc;
};
-static const unsigned int mtk_pwm_reg_offset[] = {
+static const unsigned int pwm_mediatek_reg_offset[] = {
0x0010, 0x0050, 0x0090, 0x00d0, 0x0110, 0x0150, 0x0190, 0x0220
};
-static inline struct mtk_pwm_chip *to_mtk_pwm_chip(struct pwm_chip *chip)
+static inline struct pwm_mediatek_chip *
+to_pwm_mediatek_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct mtk_pwm_chip, chip);
+ return container_of(chip, struct pwm_mediatek_chip, chip);
}
-static int mtk_pwm_clk_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+static int pwm_mediatek_clk_enable(struct pwm_chip *chip,
+ struct pwm_device *pwm)
{
- struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip);
+ struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip);
int ret;
- if (!pc->soc->has_clks)
- return 0;
-
- ret = clk_prepare_enable(pc->clks[MTK_CLK_TOP]);
+ ret = clk_prepare_enable(pc->clk_top);
if (ret < 0)
return ret;
- ret = clk_prepare_enable(pc->clks[MTK_CLK_MAIN]);
+ ret = clk_prepare_enable(pc->clk_main);
if (ret < 0)
goto disable_clk_top;
- ret = clk_prepare_enable(pc->clks[MTK_CLK_PWM1 + pwm->hwpwm]);
+ ret = clk_prepare_enable(pc->clk_pwms[pwm->hwpwm]);
if (ret < 0)
goto disable_clk_main;
return 0;
disable_clk_main:
- clk_disable_unprepare(pc->clks[MTK_CLK_MAIN]);
+ clk_disable_unprepare(pc->clk_main);
disable_clk_top:
- clk_disable_unprepare(pc->clks[MTK_CLK_TOP]);
+ clk_disable_unprepare(pc->clk_top);
return ret;
}
-static void mtk_pwm_clk_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+static void pwm_mediatek_clk_disable(struct pwm_chip *chip,
+ struct pwm_device *pwm)
{
- struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip);
-
- if (!pc->soc->has_clks)
- return;
+ struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip);
- clk_disable_unprepare(pc->clks[MTK_CLK_PWM1 + pwm->hwpwm]);
- clk_disable_unprepare(pc->clks[MTK_CLK_MAIN]);
- clk_disable_unprepare(pc->clks[MTK_CLK_TOP]);
+ clk_disable_unprepare(pc->clk_pwms[pwm->hwpwm]);
+ clk_disable_unprepare(pc->clk_main);
+ clk_disable_unprepare(pc->clk_top);
}
-static inline u32 mtk_pwm_readl(struct mtk_pwm_chip *chip, unsigned int num,
- unsigned int offset)
+static inline u32 pwm_mediatek_readl(struct pwm_mediatek_chip *chip,
+ unsigned int num, unsigned int offset)
{
- return readl(chip->regs + mtk_pwm_reg_offset[num] + offset);
+ return readl(chip->regs + pwm_mediatek_reg_offset[num] + offset);
}
-static inline void mtk_pwm_writel(struct mtk_pwm_chip *chip,
- unsigned int num, unsigned int offset,
- u32 value)
+static inline void pwm_mediatek_writel(struct pwm_mediatek_chip *chip,
+ unsigned int num, unsigned int offset,
+ u32 value)
{
- writel(value, chip->regs + mtk_pwm_reg_offset[num] + offset);
+ writel(value, chip->regs + pwm_mediatek_reg_offset[num] + offset);
}
-static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
{
- struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip);
- struct clk *clk = pc->clks[MTK_CLK_PWM1 + pwm->hwpwm];
+ struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip);
u32 clkdiv = 0, cnt_period, cnt_duty, reg_width = PWMDWIDTH,
reg_thres = PWMTHRES;
u64 resolution;
int ret;
- ret = mtk_pwm_clk_enable(chip, pwm);
+ ret = pwm_mediatek_clk_enable(chip, pwm);
+
if (ret < 0)
return ret;
/* Using resolution in picosecond gets accuracy higher */
resolution = (u64)NSEC_PER_SEC * 1000;
- do_div(resolution, clk_get_rate(clk));
+ do_div(resolution, clk_get_rate(pc->clk_pwms[pwm->hwpwm]));
cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000, resolution);
while (cnt_period > 8191) {
@@ -164,7 +144,7 @@ static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
}
if (clkdiv > PWM_CLK_DIV_MAX) {
- mtk_pwm_clk_disable(chip, pwm);
+ pwm_mediatek_clk_disable(chip, pwm);
dev_err(chip->dev, "period %d not supported\n", period_ns);
return -EINVAL;
}
@@ -179,22 +159,22 @@ static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
}
cnt_duty = DIV_ROUND_CLOSEST_ULL((u64)duty_ns * 1000, resolution);
- mtk_pwm_writel(pc, pwm->hwpwm, PWMCON, BIT(15) | clkdiv);
- mtk_pwm_writel(pc, pwm->hwpwm, reg_width, cnt_period);
- mtk_pwm_writel(pc, pwm->hwpwm, reg_thres, cnt_duty);
+ pwm_mediatek_writel(pc, pwm->hwpwm, PWMCON, BIT(15) | clkdiv);
+ pwm_mediatek_writel(pc, pwm->hwpwm, reg_width, cnt_period);
+ pwm_mediatek_writel(pc, pwm->hwpwm, reg_thres, cnt_duty);
- mtk_pwm_clk_disable(chip, pwm);
+ pwm_mediatek_clk_disable(chip, pwm);
return 0;
}
-static int mtk_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+static int pwm_mediatek_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip);
+ struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip);
u32 value;
int ret;
- ret = mtk_pwm_clk_enable(chip, pwm);
+ ret = pwm_mediatek_clk_enable(chip, pwm);
if (ret < 0)
return ret;
@@ -205,29 +185,28 @@ static int mtk_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
return 0;
}
-static void mtk_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+static void pwm_mediatek_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip);
+ struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip);
u32 value;
value = readl(pc->regs);
value &= ~BIT(pwm->hwpwm);
writel(value, pc->regs);
- mtk_pwm_clk_disable(chip, pwm);
+ pwm_mediatek_clk_disable(chip, pwm);
}
-static const struct pwm_ops mtk_pwm_ops = {
- .config = mtk_pwm_config,
- .enable = mtk_pwm_enable,
- .disable = mtk_pwm_disable,
+static const struct pwm_ops pwm_mediatek_ops = {
+ .config = pwm_mediatek_config,
+ .enable = pwm_mediatek_enable,
+ .disable = pwm_mediatek_disable,
.owner = THIS_MODULE,
};
-static int mtk_pwm_probe(struct platform_device *pdev)
+static int pwm_mediatek_probe(struct platform_device *pdev)
{
- const struct mtk_pwm_platform_data *data;
- struct mtk_pwm_chip *pc;
+ struct pwm_mediatek_chip *pc;
struct resource *res;
unsigned int i;
int ret;
@@ -236,31 +215,51 @@ static int mtk_pwm_probe(struct platform_device *pdev)
if (!pc)
return -ENOMEM;
- data = of_device_get_match_data(&pdev->dev);
- if (data == NULL)
- return -EINVAL;
- pc->soc = data;
+ pc->soc = of_device_get_match_data(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pc->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pc->regs))
return PTR_ERR(pc->regs);
- for (i = 0; i < data->num_pwms + 2 && pc->soc->has_clks; i++) {
- pc->clks[i] = devm_clk_get(&pdev->dev, mtk_pwm_clk_name[i]);
- if (IS_ERR(pc->clks[i])) {
+ pc->clk_pwms = devm_kcalloc(&pdev->dev, pc->soc->num_pwms,
+ sizeof(*pc->clk_pwms), GFP_KERNEL);
+ if (!pc->clk_pwms)
+ return -ENOMEM;
+
+ pc->clk_top = devm_clk_get(&pdev->dev, "top");
+ if (IS_ERR(pc->clk_top)) {
+ dev_err(&pdev->dev, "clock: top fail: %ld\n",
+ PTR_ERR(pc->clk_top));
+ return PTR_ERR(pc->clk_top);
+ }
+
+ pc->clk_main = devm_clk_get(&pdev->dev, "main");
+ if (IS_ERR(pc->clk_main)) {
+ dev_err(&pdev->dev, "clock: main fail: %ld\n",
+ PTR_ERR(pc->clk_main));
+ return PTR_ERR(pc->clk_main);
+ }
+
+ for (i = 0; i < pc->soc->num_pwms; i++) {
+ char name[8];
+
+ snprintf(name, sizeof(name), "pwm%d", i + 1);
+
+ pc->clk_pwms[i] = devm_clk_get(&pdev->dev, name);
+ if (IS_ERR(pc->clk_pwms[i])) {
dev_err(&pdev->dev, "clock: %s fail: %ld\n",
- mtk_pwm_clk_name[i], PTR_ERR(pc->clks[i]));
- return PTR_ERR(pc->clks[i]);
+ name, PTR_ERR(pc->clk_pwms[i]));
+ return PTR_ERR(pc->clk_pwms[i]);
}
}
platform_set_drvdata(pdev, pc);
pc->chip.dev = &pdev->dev;
- pc->chip.ops = &mtk_pwm_ops;
+ pc->chip.ops = &pwm_mediatek_ops;
pc->chip.base = -1;
- pc->chip.npwm = data->num_pwms;
+ pc->chip.npwm = pc->soc->num_pwms;
ret = pwmchip_add(&pc->chip);
if (ret < 0) {
@@ -271,55 +270,63 @@ static int mtk_pwm_probe(struct platform_device *pdev)
return 0;
}
-static int mtk_pwm_remove(struct platform_device *pdev)
+static int pwm_mediatek_remove(struct platform_device *pdev)
{
- struct mtk_pwm_chip *pc = platform_get_drvdata(pdev);
+ struct pwm_mediatek_chip *pc = platform_get_drvdata(pdev);
return pwmchip_remove(&pc->chip);
}
-static const struct mtk_pwm_platform_data mt2712_pwm_data = {
+static const struct pwm_mediatek_of_data mt2712_pwm_data = {
.num_pwms = 8,
.pwm45_fixup = false,
- .has_clks = true,
};
-static const struct mtk_pwm_platform_data mt7622_pwm_data = {
+static const struct pwm_mediatek_of_data mt7622_pwm_data = {
.num_pwms = 6,
.pwm45_fixup = false,
- .has_clks = true,
};
-static const struct mtk_pwm_platform_data mt7623_pwm_data = {
+static const struct pwm_mediatek_of_data mt7623_pwm_data = {
.num_pwms = 5,
.pwm45_fixup = true,
- .has_clks = true,
};
-static const struct mtk_pwm_platform_data mt7628_pwm_data = {
+static const struct pwm_mediatek_of_data mt7628_pwm_data = {
.num_pwms = 4,
.pwm45_fixup = true,
- .has_clks = false,
};
-static const struct of_device_id mtk_pwm_of_match[] = {
+static const struct pwm_mediatek_of_data mt7629_pwm_data = {
+ .num_pwms = 1,
+ .pwm45_fixup = false,
+};
+
+static const struct pwm_mediatek_of_data mt8516_pwm_data = {
+ .num_pwms = 5,
+ .pwm45_fixup = false,
+};
+
+static const struct of_device_id pwm_mediatek_of_match[] = {
{ .compatible = "mediatek,mt2712-pwm", .data = &mt2712_pwm_data },
{ .compatible = "mediatek,mt7622-pwm", .data = &mt7622_pwm_data },
{ .compatible = "mediatek,mt7623-pwm", .data = &mt7623_pwm_data },
{ .compatible = "mediatek,mt7628-pwm", .data = &mt7628_pwm_data },
+ { .compatible = "mediatek,mt7629-pwm", .data = &mt7629_pwm_data },
+ { .compatible = "mediatek,mt8516-pwm", .data = &mt8516_pwm_data },
{ },
};
-MODULE_DEVICE_TABLE(of, mtk_pwm_of_match);
+MODULE_DEVICE_TABLE(of, pwm_mediatek_of_match);
-static struct platform_driver mtk_pwm_driver = {
+static struct platform_driver pwm_mediatek_driver = {
.driver = {
- .name = "mtk-pwm",
- .of_match_table = mtk_pwm_of_match,
+ .name = "pwm-mediatek",
+ .of_match_table = pwm_mediatek_of_match,
},
- .probe = mtk_pwm_probe,
- .remove = mtk_pwm_remove,
+ .probe = pwm_mediatek_probe,
+ .remove = pwm_mediatek_remove,
};
-module_platform_driver(mtk_pwm_driver);
+module_platform_driver(pwm_mediatek_driver);
MODULE_AUTHOR("John Crispin <[email protected]>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 3cbff5cbb789..6245bbdb6e6c 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -159,7 +159,7 @@ static void meson_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
}
static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
unsigned int duty, period, pre_div, cnt, duty_cnt;
@@ -265,7 +265,7 @@ static void meson_pwm_disable(struct meson_pwm *meson, struct pwm_device *pwm)
}
static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
struct meson_pwm *meson = to_meson_pwm(chip);
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index 04c0f6b95c1a..b14376b47ac8 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -126,15 +126,13 @@ static int mxs_pwm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mxs_pwm_chip *mxs;
- struct resource *res;
int ret;
mxs = devm_kzalloc(&pdev->dev, sizeof(*mxs), GFP_KERNEL);
if (!mxs)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mxs->base = devm_ioremap_resource(&pdev->dev, res);
+ mxs->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mxs->base))
return PTR_ERR(mxs->base);
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 5b2b8ecc354c..852eb2347954 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -158,7 +158,7 @@ static void rcar_pwm_disable(struct rcar_pwm_chip *rp)
}
static int rcar_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip);
struct pwm_state cur_state;
@@ -187,7 +187,7 @@ static int rcar_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
/* The SYNC should be set to 0 even if rcar_pwm_set_counter failed */
rcar_pwm_update(rp, RCAR_PWMCR_SYNC, 0, RCAR_PWMCR);
- if (!ret && state->enabled)
+ if (!ret)
ret = rcar_pwm_enable(rp);
return ret;
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index 51b96cb7dd25..73352e6fbccb 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -90,16 +90,16 @@ static void rockchip_pwm_get_state(struct pwm_chip *chip,
state->enabled = ((val & enable_conf) == enable_conf) ?
true : false;
- if (pc->data->supports_polarity) {
- if (!(val & PWM_DUTY_POSITIVE))
- state->polarity = PWM_POLARITY_INVERSED;
- }
+ if (pc->data->supports_polarity && !(val & PWM_DUTY_POSITIVE))
+ state->polarity = PWM_POLARITY_INVERSED;
+ else
+ state->polarity = PWM_POLARITY_NORMAL;
clk_disable(pc->pclk);
}
static void rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
unsigned long period, duty;
@@ -183,7 +183,7 @@ static int rockchip_pwm_enable(struct pwm_chip *chip,
}
static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
struct pwm_state curstate;
@@ -212,12 +212,6 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
goto out;
}
- /*
- * Update the state with the real hardware, which can differ a bit
- * because of period/duty_cycle approximation.
- */
- rockchip_pwm_get_state(chip, pwm, state);
-
out:
clk_disable(pc->pclk);
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index a7c107f19e66..cc63f9baa481 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -147,7 +147,7 @@ static int pwm_sifive_enable(struct pwm_chip *chip, bool enable)
}
static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip);
struct pwm_state cur_state;
@@ -250,10 +250,8 @@ static int pwm_sifive_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ddata->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(ddata->regs)) {
- dev_err(dev, "Unable to map IO resources\n");
+ if (IS_ERR(ddata->regs))
return PTR_ERR(ddata->regs);
- }
ddata->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ddata->clk)) {
diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c
new file mode 100644
index 000000000000..be2394227423
--- /dev/null
+++ b/drivers/pwm/pwm-sprd.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Spreadtrum Communications Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+
+#define SPRD_PWM_PRESCALE 0x0
+#define SPRD_PWM_MOD 0x4
+#define SPRD_PWM_DUTY 0x8
+#define SPRD_PWM_ENABLE 0x18
+
+#define SPRD_PWM_MOD_MAX GENMASK(7, 0)
+#define SPRD_PWM_DUTY_MSK GENMASK(15, 0)
+#define SPRD_PWM_PRESCALE_MSK GENMASK(7, 0)
+#define SPRD_PWM_ENABLE_BIT BIT(0)
+
+#define SPRD_PWM_CHN_NUM 4
+#define SPRD_PWM_REGS_SHIFT 5
+#define SPRD_PWM_CHN_CLKS_NUM 2
+#define SPRD_PWM_CHN_OUTPUT_CLK 1
+
+struct sprd_pwm_chn {
+ struct clk_bulk_data clks[SPRD_PWM_CHN_CLKS_NUM];
+ u32 clk_rate;
+};
+
+struct sprd_pwm_chip {
+ void __iomem *base;
+ struct device *dev;
+ struct pwm_chip chip;
+ int num_pwms;
+ struct sprd_pwm_chn chn[SPRD_PWM_CHN_NUM];
+};
+
+/*
+ * The list of clocks required by PWM channels, and each channel has 2 clocks:
+ * enable clock and pwm clock.
+ */
+static const char * const sprd_pwm_clks[] = {
+ "enable0", "pwm0",
+ "enable1", "pwm1",
+ "enable2", "pwm2",
+ "enable3", "pwm3",
+};
+
+static u32 sprd_pwm_read(struct sprd_pwm_chip *spc, u32 hwid, u32 reg)
+{
+ u32 offset = reg + (hwid << SPRD_PWM_REGS_SHIFT);
+
+ return readl_relaxed(spc->base + offset);
+}
+
+static void sprd_pwm_write(struct sprd_pwm_chip *spc, u32 hwid,
+ u32 reg, u32 val)
+{
+ u32 offset = reg + (hwid << SPRD_PWM_REGS_SHIFT);
+
+ writel_relaxed(val, spc->base + offset);
+}
+
+static void sprd_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct sprd_pwm_chip *spc =
+ container_of(chip, struct sprd_pwm_chip, chip);
+ struct sprd_pwm_chn *chn = &spc->chn[pwm->hwpwm];
+ u32 val, duty, prescale;
+ u64 tmp;
+ int ret;
+
+ /*
+ * The clocks to PWM channel has to be enabled first before
+ * reading to the registers.
+ */
+ ret = clk_bulk_prepare_enable(SPRD_PWM_CHN_CLKS_NUM, chn->clks);
+ if (ret) {
+ dev_err(spc->dev, "failed to enable pwm%u clocks\n",
+ pwm->hwpwm);
+ return;
+ }
+
+ val = sprd_pwm_read(spc, pwm->hwpwm, SPRD_PWM_ENABLE);
+ if (val & SPRD_PWM_ENABLE_BIT)
+ state->enabled = true;
+ else
+ state->enabled = false;
+
+ /*
+ * The hardware provides a counter that is feed by the source clock.
+ * The period length is (PRESCALE + 1) * MOD counter steps.
+ * The duty cycle length is (PRESCALE + 1) * DUTY counter steps.
+ * Thus the period_ns and duty_ns calculation formula should be:
+ * period_ns = NSEC_PER_SEC * (prescale + 1) * mod / clk_rate
+ * duty_ns = NSEC_PER_SEC * (prescale + 1) * duty / clk_rate
+ */
+ val = sprd_pwm_read(spc, pwm->hwpwm, SPRD_PWM_PRESCALE);
+ prescale = val & SPRD_PWM_PRESCALE_MSK;
+ tmp = (prescale + 1) * NSEC_PER_SEC * SPRD_PWM_MOD_MAX;
+ state->period = DIV_ROUND_CLOSEST_ULL(tmp, chn->clk_rate);
+
+ val = sprd_pwm_read(spc, pwm->hwpwm, SPRD_PWM_DUTY);
+ duty = val & SPRD_PWM_DUTY_MSK;
+ tmp = (prescale + 1) * NSEC_PER_SEC * duty;
+ state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, chn->clk_rate);
+
+ /* Disable PWM clocks if the PWM channel is not in enable state. */
+ if (!state->enabled)
+ clk_bulk_disable_unprepare(SPRD_PWM_CHN_CLKS_NUM, chn->clks);
+}
+
+static int sprd_pwm_config(struct sprd_pwm_chip *spc, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct sprd_pwm_chn *chn = &spc->chn[pwm->hwpwm];
+ u32 prescale, duty;
+ u64 tmp;
+
+ /*
+ * The hardware provides a counter that is feed by the source clock.
+ * The period length is (PRESCALE + 1) * MOD counter steps.
+ * The duty cycle length is (PRESCALE + 1) * DUTY counter steps.
+ *
+ * To keep the maths simple we're always using MOD = SPRD_PWM_MOD_MAX.
+ * The value for PRESCALE is selected such that the resulting period
+ * gets the maximal length not bigger than the requested one with the
+ * given settings (MOD = SPRD_PWM_MOD_MAX and input clock).
+ */
+ duty = duty_ns * SPRD_PWM_MOD_MAX / period_ns;
+
+ tmp = (u64)chn->clk_rate * period_ns;
+ do_div(tmp, NSEC_PER_SEC);
+ prescale = DIV_ROUND_CLOSEST_ULL(tmp, SPRD_PWM_MOD_MAX) - 1;
+ if (prescale > SPRD_PWM_PRESCALE_MSK)
+ prescale = SPRD_PWM_PRESCALE_MSK;
+
+ /*
+ * Note: Writing DUTY triggers the hardware to actually apply the
+ * values written to MOD and DUTY to the output, so must keep writing
+ * DUTY last.
+ *
+ * The hardware can ensures that current running period is completed
+ * before changing a new configuration to avoid mixed settings.
+ */
+ sprd_pwm_write(spc, pwm->hwpwm, SPRD_PWM_PRESCALE, prescale);
+ sprd_pwm_write(spc, pwm->hwpwm, SPRD_PWM_MOD, SPRD_PWM_MOD_MAX);
+ sprd_pwm_write(spc, pwm->hwpwm, SPRD_PWM_DUTY, duty);
+
+ return 0;
+}
+
+static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct sprd_pwm_chip *spc =
+ container_of(chip, struct sprd_pwm_chip, chip);
+ struct sprd_pwm_chn *chn = &spc->chn[pwm->hwpwm];
+ struct pwm_state *cstate = &pwm->state;
+ int ret;
+
+ if (state->enabled) {
+ if (!cstate->enabled) {
+ /*
+ * The clocks to PWM channel has to be enabled first
+ * before writing to the registers.
+ */
+ ret = clk_bulk_prepare_enable(SPRD_PWM_CHN_CLKS_NUM,
+ chn->clks);
+ if (ret) {
+ dev_err(spc->dev,
+ "failed to enable pwm%u clocks\n",
+ pwm->hwpwm);
+ return ret;
+ }
+ }
+
+ if (state->period != cstate->period ||
+ state->duty_cycle != cstate->duty_cycle) {
+ ret = sprd_pwm_config(spc, pwm, state->duty_cycle,
+ state->period);
+ if (ret)
+ return ret;
+ }
+
+ sprd_pwm_write(spc, pwm->hwpwm, SPRD_PWM_ENABLE, 1);
+ } else if (cstate->enabled) {
+ /*
+ * Note: After setting SPRD_PWM_ENABLE to zero, the controller
+ * will not wait for current period to be completed, instead it
+ * will stop the PWM channel immediately.
+ */
+ sprd_pwm_write(spc, pwm->hwpwm, SPRD_PWM_ENABLE, 0);
+
+ clk_bulk_disable_unprepare(SPRD_PWM_CHN_CLKS_NUM, chn->clks);
+ }
+
+ return 0;
+}
+
+static const struct pwm_ops sprd_pwm_ops = {
+ .apply = sprd_pwm_apply,
+ .get_state = sprd_pwm_get_state,
+ .owner = THIS_MODULE,
+};
+
+static int sprd_pwm_clk_init(struct sprd_pwm_chip *spc)
+{
+ struct clk *clk_pwm;
+ int ret, i;
+
+ for (i = 0; i < SPRD_PWM_CHN_NUM; i++) {
+ struct sprd_pwm_chn *chn = &spc->chn[i];
+ int j;
+
+ for (j = 0; j < SPRD_PWM_CHN_CLKS_NUM; ++j)
+ chn->clks[j].id =
+ sprd_pwm_clks[i * SPRD_PWM_CHN_CLKS_NUM + j];
+
+ ret = devm_clk_bulk_get(spc->dev, SPRD_PWM_CHN_CLKS_NUM,
+ chn->clks);
+ if (ret) {
+ if (ret == -ENOENT)
+ break;
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(spc->dev,
+ "failed to get channel clocks\n");
+
+ return ret;
+ }
+
+ clk_pwm = chn->clks[SPRD_PWM_CHN_OUTPUT_CLK].clk;
+ chn->clk_rate = clk_get_rate(clk_pwm);
+ }
+
+ if (!i) {
+ dev_err(spc->dev, "no available PWM channels\n");
+ return -ENODEV;
+ }
+
+ spc->num_pwms = i;
+
+ return 0;
+}
+
+static int sprd_pwm_probe(struct platform_device *pdev)
+{
+ struct sprd_pwm_chip *spc;
+ int ret;
+
+ spc = devm_kzalloc(&pdev->dev, sizeof(*spc), GFP_KERNEL);
+ if (!spc)
+ return -ENOMEM;
+
+ spc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(spc->base))
+ return PTR_ERR(spc->base);
+
+ spc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, spc);
+
+ ret = sprd_pwm_clk_init(spc);
+ if (ret)
+ return ret;
+
+ spc->chip.dev = &pdev->dev;
+ spc->chip.ops = &sprd_pwm_ops;
+ spc->chip.base = -1;
+ spc->chip.npwm = spc->num_pwms;
+
+ ret = pwmchip_add(&spc->chip);
+ if (ret)
+ dev_err(&pdev->dev, "failed to add PWM chip\n");
+
+ return ret;
+}
+
+static int sprd_pwm_remove(struct platform_device *pdev)
+{
+ struct sprd_pwm_chip *spc = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&spc->chip);
+}
+
+static const struct of_device_id sprd_pwm_of_match[] = {
+ { .compatible = "sprd,ums512-pwm", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_pwm_of_match);
+
+static struct platform_driver sprd_pwm_driver = {
+ .driver = {
+ .name = "sprd-pwm",
+ .of_match_table = sprd_pwm_of_match,
+ },
+ .probe = sprd_pwm_probe,
+ .remove = sprd_pwm_remove,
+};
+
+module_platform_driver(sprd_pwm_driver);
+
+MODULE_DESCRIPTION("Spreadtrum PWM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
index 20450e34ad57..1508616d794c 100644
--- a/drivers/pwm/pwm-sti.c
+++ b/drivers/pwm/pwm-sti.c
@@ -564,10 +564,8 @@ static int sti_pwm_probe(struct platform_device *pdev)
return PTR_ERR(pc->regmap);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to obtain IRQ\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, sti_pwm_interrupt, 0,
pdev->name, pc);
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 2211a642066d..67fca62524dc 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -32,7 +32,7 @@ static inline struct stm32_pwm_lp *to_stm32_pwm_lp(struct pwm_chip *chip)
#define STM32_LPTIM_MAX_PRESCALER 128
static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct stm32_pwm_lp *priv = to_stm32_pwm_lp(chip);
unsigned long long prd, div, dty;
@@ -59,6 +59,12 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
/* Calculate the period and prescaler value */
div = (unsigned long long)clk_get_rate(priv->clk) * state->period;
do_div(div, NSEC_PER_SEC);
+ if (!div) {
+ /* Clock is too slow to achieve requested period. */
+ dev_dbg(priv->chip.dev, "Can't reach %u ns\n", state->period);
+ return -EINVAL;
+ }
+
prd = div;
while (div > STM32_LPTIM_MAX_ARR) {
presc++;
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index 740e2dec8313..359b08596d9e 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -440,7 +440,7 @@ static void stm32_pwm_disable(struct stm32_pwm *priv, int ch)
}
static int stm32_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
bool enabled;
struct stm32_pwm *priv = to_stm32_pwm_dev(chip);
@@ -468,7 +468,7 @@ static int stm32_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
}
static int stm32_pwm_apply_locked(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct stm32_pwm *priv = to_stm32_pwm_dev(chip);
int ret;
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index de78c824bbfd..6f5840a1a82d 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -145,7 +145,7 @@ static void sun4i_pwm_get_state(struct pwm_chip *chip,
}
static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
- struct pwm_state *state,
+ const struct pwm_state *state,
u32 *dty, u32 *prd, unsigned int *prsclr)
{
u64 clk_rate, div = 0;
@@ -192,17 +192,11 @@ static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
*dty = div;
*prsclr = prescaler;
- div = (u64)pval * NSEC_PER_SEC * *prd;
- state->period = DIV_ROUND_CLOSEST_ULL(div, clk_rate);
-
- div = (u64)pval * NSEC_PER_SEC * *dty;
- state->duty_cycle = DIV_ROUND_CLOSEST_ULL(div, clk_rate);
-
return 0;
}
static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
struct pwm_state cstate;
diff --git a/drivers/pwm/pwm-zx.c b/drivers/pwm/pwm-zx.c
index e24f4be35316..e2c21cc34a96 100644
--- a/drivers/pwm/pwm-zx.c
+++ b/drivers/pwm/pwm-zx.c
@@ -148,7 +148,7 @@ static int zx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
}
static int zx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
+ const struct pwm_state *state)
{
struct zx_pwm_chip *zpc = to_zx_pwm_chip(chip);
struct pwm_state cstate;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 0005ec9285aa..b42a93736668 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -372,7 +372,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
goto error;
}
/* Check for trailing stuff. */
- if (i == num_devices && strlen(buf) > 0) {
+ if (i == num_devices && buf && strlen(buf) > 0) {
rc = -EINVAL;
goto error;
}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 22c55816100b..1fbfb0a93f5f 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1388,6 +1388,8 @@ device_initcall(cio_settle_init);
int sch_is_pseudo_sch(struct subchannel *sch)
{
+ if (!sch->dev.parent)
+ return 0;
return sch == to_css(sch->dev.parent)->pseudo_subchannel;
}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index d722458c5928..65841af15748 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -124,9 +124,7 @@ EXPORT_SYMBOL(ccw_device_is_multipath);
/**
* ccw_device_clear() - terminate I/O request processing
* @cdev: target ccw device
- * @intparm: interruption parameter; value is only used if no I/O is
- * outstanding, otherwise the intparm associated with the I/O request
- * is returned
+ * @intparm: interruption parameter to be returned upon conclusion of csch
*
* ccw_device_clear() calls csch on @cdev's subchannel.
* Returns:
@@ -179,6 +177,9 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
* completed during the time specified by @expires. If a timeout occurs, the
* channel program is terminated via xsch, hsch or csch, and the device's
* interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
+ * The interruption handler will echo back the @intparm specified here, unless
+ * another interruption parameter is specified by a subsequent invocation of
+ * ccw_device_halt() or ccw_device_clear().
* Returns:
* %0, if the operation was successful;
* -%EBUSY, if the device is busy, or status pending;
@@ -256,6 +257,9 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
* Start a S/390 channel program. When the interrupt arrives, the
* IRQ handler is called, either immediately, delayed (dev-end missing,
* or sense required) or never (no IRQ handler registered).
+ * The interruption handler will echo back the @intparm specified here, unless
+ * another interruption parameter is specified by a subsequent invocation of
+ * ccw_device_halt() or ccw_device_clear().
* Returns:
* %0, if the operation was successful;
* -%EBUSY, if the device is busy, or status pending;
@@ -287,6 +291,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
* Start a S/390 channel program. When the interrupt arrives, the
* IRQ handler is called, either immediately, delayed (dev-end missing,
* or sense required) or never (no IRQ handler registered).
+ * The interruption handler will echo back the @intparm specified here, unless
+ * another interruption parameter is specified by a subsequent invocation of
+ * ccw_device_halt() or ccw_device_clear().
* Returns:
* %0, if the operation was successful;
* -%EBUSY, if the device is busy, or status pending;
@@ -322,6 +329,9 @@ int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
* completed during the time specified by @expires. If a timeout occurs, the
* channel program is terminated via xsch, hsch or csch, and the device's
* interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
+ * The interruption handler will echo back the @intparm specified here, unless
+ * another interruption parameter is specified by a subsequent invocation of
+ * ccw_device_halt() or ccw_device_clear().
* Returns:
* %0, if the operation was successful;
* -%EBUSY, if the device is busy, or status pending;
@@ -343,11 +353,12 @@ int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
/**
* ccw_device_halt() - halt I/O request processing
* @cdev: target ccw device
- * @intparm: interruption parameter; value is only used if no I/O is
- * outstanding, otherwise the intparm associated with the I/O request
- * is returned
+ * @intparm: interruption parameter to be returned upon conclusion of hsch
*
* ccw_device_halt() calls hsch on @cdev's subchannel.
+ * The interruption handler will echo back the @intparm specified here, unless
+ * another interruption parameter is specified by a subsequent invocation of
+ * ccw_device_clear().
* Returns:
* %0 on success,
* -%ENODEV on device not operational,
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index a76b8a8bcbbb..a1915061932e 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1322,24 +1322,24 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
/* < CEX2A is not supported */
if (rawtype < AP_DEVICE_TYPE_CEX2A)
return 0;
- /* up to CEX6 known and fully supported */
- if (rawtype <= AP_DEVICE_TYPE_CEX6)
+ /* up to CEX7 known and fully supported */
+ if (rawtype <= AP_DEVICE_TYPE_CEX7)
return rawtype;
/*
- * unknown new type > CEX6, check for compatibility
+ * unknown new type > CEX7, check for compatibility
* to the highest known and supported type which is
- * currently CEX6 with the help of the QACT function.
+ * currently CEX7 with the help of the QACT function.
*/
if (ap_qact_available()) {
struct ap_queue_status status;
union ap_qact_ap_info apinfo = {0};
apinfo.mode = (func >> 26) & 0x07;
- apinfo.cat = AP_DEVICE_TYPE_CEX6;
+ apinfo.cat = AP_DEVICE_TYPE_CEX7;
status = ap_qact(qid, 0, &apinfo);
if (status.response_code == AP_RESPONSE_NORMAL
&& apinfo.cat >= AP_DEVICE_TYPE_CEX2A
- && apinfo.cat <= AP_DEVICE_TYPE_CEX6)
+ && apinfo.cat <= AP_DEVICE_TYPE_CEX7)
comp_type = apinfo.cat;
}
if (!comp_type)
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 6f3cf37776ca..433b7b64368d 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * Copyright IBM Corp. 2006, 2012
+ * Copyright IBM Corp. 2006, 2019
* Author(s): Cornelia Huck <[email protected]>
* Martin Schwidefsky <[email protected]>
* Ralph Wuerthner <[email protected]>
@@ -63,6 +63,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
#define AP_DEVICE_TYPE_CEX4 10
#define AP_DEVICE_TYPE_CEX5 11
#define AP_DEVICE_TYPE_CEX6 12
+#define AP_DEVICE_TYPE_CEX7 13
/*
* Known function facilities
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index f76a1d0f54c4..9de3d46b3253 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -1363,9 +1363,122 @@ static struct attribute_group ccadata_attr_group = {
.bin_attrs = ccadata_attrs,
};
+#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80)
+
+/*
+ * Sysfs attribute read function for all secure key ccacipher binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * secure key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
+ bool is_xts, char *buf, loff_t off,
+ size_t count)
+{
+ size_t keysize;
+ int rc;
+
+ if (off != 0 || count < CCACIPHERTOKENSIZE)
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * CCACIPHERTOKENSIZE)
+ return -EINVAL;
+
+ keysize = CCACIPHERTOKENSIZE;
+ rc = cca_gencipherkey(-1, -1, keybits, 0, buf, &keysize);
+ if (rc)
+ return rc;
+ memset(buf + keysize, 0, CCACIPHERTOKENSIZE - keysize);
+
+ if (is_xts) {
+ keysize = CCACIPHERTOKENSIZE;
+ rc = cca_gencipherkey(-1, -1, keybits, 0,
+ buf + CCACIPHERTOKENSIZE, &keysize);
+ if (rc)
+ return rc;
+ memset(buf + CCACIPHERTOKENSIZE + keysize, 0,
+ CCACIPHERTOKENSIZE - keysize);
+
+ return 2 * CCACIPHERTOKENSIZE;
+ }
+
+ return CCACIPHERTOKENSIZE;
+}
+
+static ssize_t ccacipher_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
+ off, count);
+}
+
+static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE);
+
+static struct bin_attribute *ccacipher_attrs[] = {
+ &bin_attr_ccacipher_aes_128,
+ &bin_attr_ccacipher_aes_192,
+ &bin_attr_ccacipher_aes_256,
+ &bin_attr_ccacipher_aes_128_xts,
+ &bin_attr_ccacipher_aes_256_xts,
+ NULL
+};
+
+static struct attribute_group ccacipher_attr_group = {
+ .name = "ccacipher",
+ .bin_attrs = ccacipher_attrs,
+};
+
static const struct attribute_group *pkey_attr_groups[] = {
&protkey_attr_group,
&ccadata_attr_group,
+ &ccacipher_attr_group,
NULL,
};
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index 003662aa8060..be2520cc010b 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -36,6 +36,8 @@ static struct ap_device_id ap_queue_ids[] = {
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX6,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX7,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ /* end of sibling */ },
};
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 2d3f2732344f..d464618cd84f 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * Copyright IBM Corp. 2001, 2018
+ * Copyright IBM Corp. 2001, 2019
* Author(s): Robert Burroughs
* Eric Rossman ([email protected])
* Cornelia Huck <[email protected]>
@@ -29,6 +29,7 @@
#define ZCRYPT_CEX4 10
#define ZCRYPT_CEX5 11
#define ZCRYPT_CEX6 12
+#define ZCRYPT_CEX7 13
/**
* Large random numbers are pulled in 4096 byte chunks from the crypto cards
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index f58d8dec19dc..442e3d6162f7 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright IBM Corp. 2012
+ * Copyright IBM Corp. 2012, 2019
* Author(s): Holger Dengler <[email protected]>
*/
@@ -38,8 +38,8 @@
#define CEX4_CLEANUP_TIME (900*HZ)
MODULE_AUTHOR("IBM Corporation");
-MODULE_DESCRIPTION("CEX4/CEX5/CEX6 Cryptographic Card device driver, " \
- "Copyright IBM Corp. 2018");
+MODULE_DESCRIPTION("CEX4/CEX5/CEX6/CEX7 Cryptographic Card device driver, " \
+ "Copyright IBM Corp. 2019");
MODULE_LICENSE("GPL");
static struct ap_device_id zcrypt_cex4_card_ids[] = {
@@ -49,6 +49,8 @@ static struct ap_device_id zcrypt_cex4_card_ids[] = {
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX6,
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX7,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ /* end of list */ },
};
@@ -61,6 +63,8 @@ static struct ap_device_id zcrypt_cex4_queue_ids[] = {
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX6,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX7,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ /* end of list */ },
};
@@ -146,7 +150,7 @@ static const struct attribute_group cca_queue_attr_group = {
};
/**
- * Probe function for CEX4/CEX5/CEX6 card device. It always
+ * Probe function for CEX4/CEX5/CEX6/CEX7 card device. It always
* accepts the AP device since the bus_match already checked
* the hardware type.
* @ap_dev: pointer to the AP device.
@@ -158,25 +162,31 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
* MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
*/
static const int CEX4A_SPEED_IDX[] = {
- 14, 19, 249, 42, 228, 1458, 0, 0};
+ 14, 19, 249, 42, 228, 1458, 0, 0};
static const int CEX5A_SPEED_IDX[] = {
- 8, 9, 20, 18, 66, 458, 0, 0};
+ 8, 9, 20, 18, 66, 458, 0, 0};
static const int CEX6A_SPEED_IDX[] = {
- 6, 9, 20, 17, 65, 438, 0, 0};
+ 6, 9, 20, 17, 65, 438, 0, 0};
+ static const int CEX7A_SPEED_IDX[] = {
+ 6, 8, 17, 15, 54, 362, 0, 0};
static const int CEX4C_SPEED_IDX[] = {
59, 69, 308, 83, 278, 2204, 209, 40};
static const int CEX5C_SPEED_IDX[] = {
- 24, 31, 50, 37, 90, 479, 27, 10};
+ 24, 31, 50, 37, 90, 479, 27, 10};
static const int CEX6C_SPEED_IDX[] = {
- 16, 20, 32, 27, 77, 455, 23, 9};
+ 16, 20, 32, 27, 77, 455, 24, 9};
+ static const int CEX7C_SPEED_IDX[] = {
+ 14, 16, 26, 23, 64, 376, 23, 8};
static const int CEX4P_SPEED_IDX[] = {
- 224, 313, 3560, 359, 605, 2827, 0, 50};
+ 0, 0, 0, 0, 0, 0, 0, 50};
static const int CEX5P_SPEED_IDX[] = {
- 63, 84, 156, 83, 142, 533, 0, 10};
+ 0, 0, 0, 0, 0, 0, 0, 10};
static const int CEX6P_SPEED_IDX[] = {
- 55, 70, 121, 73, 129, 522, 0, 9};
+ 0, 0, 0, 0, 0, 0, 0, 9};
+ static const int CEX7P_SPEED_IDX[] = {
+ 0, 0, 0, 0, 0, 0, 0, 8};
struct ap_card *ac = to_ap_card(&ap_dev->device);
struct zcrypt_card *zc;
@@ -198,11 +208,19 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX5;
memcpy(zc->speed_rating, CEX5A_SPEED_IDX,
sizeof(CEX5A_SPEED_IDX));
- } else {
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
zc->type_string = "CEX6A";
zc->user_space_type = ZCRYPT_CEX6;
memcpy(zc->speed_rating, CEX6A_SPEED_IDX,
sizeof(CEX6A_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX7A";
+ /* wrong user space type, just for compatibility
+ * with the ZCRYPT_STATUS_MASK ioctl.
+ */
+ zc->user_space_type = ZCRYPT_CEX6;
+ memcpy(zc->speed_rating, CEX7A_SPEED_IDX,
+ sizeof(CEX7A_SPEED_IDX));
}
zc->min_mod_size = CEX4A_MIN_MOD_SIZE;
if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
@@ -232,7 +250,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX3C;
memcpy(zc->speed_rating, CEX5C_SPEED_IDX,
sizeof(CEX5C_SPEED_IDX));
- } else {
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
zc->type_string = "CEX6C";
/* wrong user space type, must be CEX6
* just keep it for cca compatibility
@@ -240,6 +258,14 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX3C;
memcpy(zc->speed_rating, CEX6C_SPEED_IDX,
sizeof(CEX6C_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX7C";
+ /* wrong user space type, must be CEX7
+ * just keep it for cca compatibility
+ */
+ zc->user_space_type = ZCRYPT_CEX3C;
+ memcpy(zc->speed_rating, CEX7C_SPEED_IDX,
+ sizeof(CEX7C_SPEED_IDX));
}
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
@@ -255,11 +281,19 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX5;
memcpy(zc->speed_rating, CEX5P_SPEED_IDX,
sizeof(CEX5P_SPEED_IDX));
- } else {
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
zc->type_string = "CEX6P";
zc->user_space_type = ZCRYPT_CEX6;
memcpy(zc->speed_rating, CEX6P_SPEED_IDX,
sizeof(CEX6P_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX7P";
+ /* wrong user space type, just for compatibility
+ * with the ZCRYPT_STATUS_MASK ioctl.
+ */
+ zc->user_space_type = ZCRYPT_CEX6;
+ memcpy(zc->speed_rating, CEX7P_SPEED_IDX,
+ sizeof(CEX7P_SPEED_IDX));
}
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
@@ -289,8 +323,8 @@ out:
}
/**
- * This is called to remove the CEX4/CEX5/CEX6 card driver information
- * if an AP card device is removed.
+ * This is called to remove the CEX4/CEX5/CEX6/CEX7 card driver
+ * information if an AP card device is removed.
*/
static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
{
@@ -311,7 +345,7 @@ static struct ap_driver zcrypt_cex4_card_driver = {
};
/**
- * Probe function for CEX4/CEX5/CEX6 queue device. It always
+ * Probe function for CEX4/CEX5/CEX6/CEX7 queue device. It always
* accepts the AP device since the bus_match already checked
* the hardware type.
* @ap_dev: pointer to the AP device.
@@ -369,7 +403,7 @@ out:
}
/**
- * This is called to remove the CEX4/CEX5/CEX6 queue driver
+ * This is called to remove the CEX4/CEX5/CEX6/CEX7 queue driver
* information if an AP queue device is removed.
*/
static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 7623196de9e3..50928bc266eb 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1211,9 +1211,6 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
dix = scsi_prot_sg_count(cmd);
dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
- if (write && dix)
- t10_pi_prepare(cmd->request, sdkp->protection_type);
-
if (dif || dix)
protect = sd_setup_protect_cmnd(cmd, dix, dif);
else
@@ -2055,11 +2052,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
"sd_done: completed %d of %d bytes\n",
good_bytes, scsi_bufflen(SCpnt)));
- if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt) &&
- good_bytes)
- t10_pi_complete(SCpnt->request, sdkp->protection_type,
- good_bytes / scsi_prot_interval(SCpnt));
-
return good_bytes;
}
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index aa8d8425be25..b83a1d16bd89 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -120,7 +120,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
if (!page)
goto free_pages;
list_add_tail(&page->lru, &pages);
- size_remaining -= PAGE_SIZE << compound_order(page);
+ size_remaining -= page_size(page);
max_order = compound_order(page);
i++;
}
@@ -133,7 +133,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
sg = table->sgl;
list_for_each_entry_safe(page, tmp_page, &pages, lru) {
- sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
+ sg_set_page(sg, page, page_size(page), 0);
sg = sg_next(sg);
list_del(&page->lru);
}
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index a254792d882c..1354a157e9af 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -136,8 +136,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
page, off_in_page, tlen);
fr_len(fp) += tlen;
fp_skb(fp)->data_len += tlen;
- fp_skb(fp)->truesize +=
- PAGE_SIZE << compound_order(page);
+ fp_skb(fp)->truesize += page_size(page);
} else {
BUG_ON(!page);
from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 2da026fd12c9..09ddcd06c715 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -254,6 +254,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
shm->teedev = teedev;
shm->ctx = ctx;
shm->id = -1;
+ addr = untagged_addr(addr);
start = rounddown(addr, PAGE_SIZE);
shm->offset = addr - start;
shm->size = length;
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 8c07a393dc2e..709a22f455e9 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -53,7 +53,6 @@
#define CONTROL0_TSEN_MODE_EXTERNAL 0x2
#define CONTROL0_TSEN_MODE_MASK 0x3
-#define CONTROL1_TSEN_AVG_SHIFT 0
#define CONTROL1_TSEN_AVG_MASK 0x7
#define CONTROL1_EXT_TSEN_SW_RESET BIT(7)
#define CONTROL1_EXT_TSEN_HW_RESETn BIT(8)
@@ -267,8 +266,8 @@ static void armada_cp110_init(struct platform_device *pdev,
/* Average the output value over 2^1 = 2 samples */
regmap_read(priv->syscon, data->syscon_control1_off, &reg);
- reg &= ~CONTROL1_TSEN_AVG_MASK << CONTROL1_TSEN_AVG_SHIFT;
- reg |= 1 << CONTROL1_TSEN_AVG_SHIFT;
+ reg &= ~CONTROL1_TSEN_AVG_MASK;
+ reg |= 1;
regmap_write(priv->syscon, data->syscon_control1_off, reg);
}
diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
index 9716bc3abaf9..7130e90773ed 100644
--- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
@@ -77,9 +77,6 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
struct acpi_buffer element = { 0, NULL };
struct acpi_buffer trt_format = { sizeof("RRNNNNNN"), "RRNNNNNN" };
- if (!acpi_has_method(handle, "_TRT"))
- return -ENODEV;
-
status = acpi_evaluate_object(handle, "_TRT", NULL, &buffer);
if (ACPI_FAILURE(status))
return -ENODEV;
@@ -158,9 +155,6 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
struct acpi_buffer art_format = {
sizeof("RRNNNNNNNNNNN"), "RRNNNNNNNNNNN" };
- if (!acpi_has_method(handle, "_ART"))
- return -ENODEV;
-
status = acpi_evaluate_object(handle, "_ART", NULL, &buffer);
if (ACPI_FAILURE(status))
return -ENODEV;
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index f5749d4418ae..a7bbd8584ae2 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -181,7 +181,7 @@ static int int3403_cdev_add(struct int3403_priv *priv)
p = buf.pointer;
if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
- printk(KERN_WARNING "Invalid PPSS data\n");
+ pr_warn("Invalid PPSS data\n");
kfree(buf.pointer);
return -EFAULT;
}
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index d3446acf9bbd..89a015387283 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -39,6 +39,9 @@
/* GeminiLake thermal reporting device */
#define PCI_DEVICE_ID_PROC_GLK_THERMAL 0x318C
+/* IceLake thermal reporting device */
+#define PCI_DEVICE_ID_PROC_ICL_THERMAL 0x8a03
+
#define DRV_NAME "proc_thermal"
struct power_config {
@@ -137,6 +140,72 @@ static const struct attribute_group power_limit_attribute_group = {
.name = "power_limits"
};
+static ssize_t tcc_offset_degree_celsius_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u64 val;
+ int err;
+
+ err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
+ if (err)
+ return err;
+
+ val = (val >> 24) & 0xff;
+ return sprintf(buf, "%d\n", (int)val);
+}
+
+static int tcc_offset_update(int tcc)
+{
+ u64 val;
+ int err;
+
+ if (!tcc)
+ return -EINVAL;
+
+ err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
+ if (err)
+ return err;
+
+ val &= ~GENMASK_ULL(31, 24);
+ val |= (tcc & 0xff) << 24;
+
+ err = wrmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, val);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tcc_offset_save;
+
+static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ u64 val;
+ int tcc, err;
+
+ err = rdmsrl_safe(MSR_PLATFORM_INFO, &val);
+ if (err)
+ return err;
+
+ if (!(val & BIT(30)))
+ return -EACCES;
+
+ if (kstrtoint(buf, 0, &tcc))
+ return -EINVAL;
+
+ err = tcc_offset_update(tcc);
+ if (err)
+ return err;
+
+ tcc_offset_save = tcc;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(tcc_offset_degree_celsius);
+
static int stored_tjmax; /* since it is fixed, we can have local storage */
static int get_tjmax(void)
@@ -332,6 +401,7 @@ static void proc_thermal_remove(struct proc_thermal_device *proc_priv)
acpi_remove_notify_handler(proc_priv->adev->handle,
ACPI_DEVICE_NOTIFY, proc_thermal_notify);
int340x_thermal_zone_remove(proc_priv->int340x_zone);
+ sysfs_remove_file(&proc_priv->dev->kobj, &dev_attr_tcc_offset_degree_celsius.attr);
sysfs_remove_group(&proc_priv->dev->kobj,
&power_limit_attribute_group);
}
@@ -355,8 +425,15 @@ static int int3401_add(struct platform_device *pdev)
dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PLATFORM_DEV\n");
- return sysfs_create_group(&pdev->dev.kobj,
- &power_limit_attribute_group);
+ ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_tcc_offset_degree_celsius.attr);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &power_limit_attribute_group);
+ if (ret)
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_tcc_offset_degree_celsius.attr);
+
+ return ret;
}
static int int3401_remove(struct platform_device *pdev)
@@ -588,8 +665,15 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PCI\n");
- return sysfs_create_group(&pdev->dev.kobj,
- &power_limit_attribute_group);
+ ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_tcc_offset_degree_celsius.attr);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &power_limit_attribute_group);
+ if (ret)
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_tcc_offset_degree_celsius.attr);
+
+ return ret;
}
static void proc_thermal_pci_remove(struct pci_dev *pdev)
@@ -615,6 +699,8 @@ static int proc_thermal_resume(struct device *dev)
proc_dev = dev_get_drvdata(dev);
proc_thermal_read_ppcc(proc_dev);
+ tcc_offset_update(tcc_offset_save);
+
return 0;
}
#else
@@ -636,6 +722,8 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_CNL_THERMAL)},
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_CFL_THERMAL)},
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_GLK_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_ICL_THERMAL),
+ .driver_data = (kernel_ulong_t)&rapl_mmio_hsw, },
{ 0, },
};
diff --git a/drivers/thermal/intel/intel_pch_thermal.c b/drivers/thermal/intel/intel_pch_thermal.c
index 99f8b2540f18..4f0bb8f502e1 100644
--- a/drivers/thermal/intel/intel_pch_thermal.c
+++ b/drivers/thermal/intel/intel_pch_thermal.c
@@ -371,16 +371,14 @@ static void intel_pch_thermal_remove(struct pci_dev *pdev)
static int intel_pch_thermal_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct pch_thermal_device *ptd = pci_get_drvdata(pdev);
+ struct pch_thermal_device *ptd = dev_get_drvdata(device);
return ptd->ops->suspend(ptd);
}
static int intel_pch_thermal_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct pch_thermal_device *ptd = pci_get_drvdata(pdev);
+ struct pch_thermal_device *ptd = dev_get_drvdata(device);
return ptd->ops->resume(ptd);
}
diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c
index 8d9b721dadb6..e46a4e3f25c4 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -229,6 +229,8 @@ static int calibrate_8960(struct tsens_priv *priv)
for (i = 0; i < num_read; i++, s++)
s->offset = data[i];
+ kfree(data);
+
return 0;
}
diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
index 6f26fadf4c27..055647bcee67 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -145,8 +145,10 @@ static int calibrate_8916(struct tsens_priv *priv)
return PTR_ERR(qfprom_cdata);
qfprom_csel = (u32 *)qfprom_read(priv->dev, "calib_sel");
- if (IS_ERR(qfprom_csel))
+ if (IS_ERR(qfprom_csel)) {
+ kfree(qfprom_cdata);
return PTR_ERR(qfprom_csel);
+ }
mode = (qfprom_csel[0] & MSM8916_CAL_SEL_MASK) >> MSM8916_CAL_SEL_SHIFT;
dev_dbg(priv->dev, "calibration mode is %d\n", mode);
@@ -181,6 +183,8 @@ static int calibrate_8916(struct tsens_priv *priv)
}
compute_intercept_slope(priv, p1, p2, mode);
+ kfree(qfprom_cdata);
+ kfree(qfprom_csel);
return 0;
}
@@ -198,8 +202,10 @@ static int calibrate_8974(struct tsens_priv *priv)
return PTR_ERR(calib);
bkp = (u32 *)qfprom_read(priv->dev, "calib_backup");
- if (IS_ERR(bkp))
+ if (IS_ERR(bkp)) {
+ kfree(calib);
return PTR_ERR(bkp);
+ }
calib_redun_sel = bkp[1] & BKP_REDUN_SEL;
calib_redun_sel >>= BKP_REDUN_SHIFT;
@@ -313,6 +319,8 @@ static int calibrate_8974(struct tsens_priv *priv)
}
compute_intercept_slope(priv, p1, p2, mode);
+ kfree(calib);
+ kfree(bkp);
return 0;
}
diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
index 10b595d4f619..870f502f2cb6 100644
--- a/drivers/thermal/qcom/tsens-v1.c
+++ b/drivers/thermal/qcom/tsens-v1.c
@@ -138,6 +138,7 @@ static int calibrate_v1(struct tsens_priv *priv)
}
compute_intercept_slope(priv, p1, p2, mode);
+ kfree(qfprom_cdata);
return 0;
}
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index 2fd94997245b..b89083b61c38 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -17,6 +17,7 @@
#include <linux/thermal.h>
#include <linux/regmap.h>
+#include <linux/slab.h>
struct tsens_priv;
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 7b364933bfb1..39542c670301 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -2,6 +2,7 @@
//
// Copyright 2016 Freescale Semiconductor, Inc.
+#include <linux/clk.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/err.h>
@@ -72,6 +73,7 @@ struct qoriq_sensor {
struct qoriq_tmu_data {
struct qoriq_tmu_regs __iomem *regs;
+ struct clk *clk;
bool little_endian;
struct qoriq_sensor *sensor[SITES_MAX];
};
@@ -202,32 +204,39 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
data->little_endian = of_property_read_bool(np, "little-endian");
- data->regs = of_iomap(np, 0);
- if (!data->regs) {
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->regs)) {
dev_err(&pdev->dev, "Failed to get memory region\n");
- ret = -ENODEV;
- goto err_iomap;
+ return PTR_ERR(data->regs);
+ }
+
+ data->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(data->clk))
+ return PTR_ERR(data->clk);
+
+ ret = clk_prepare_enable(data->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable clock\n");
+ return ret;
}
qoriq_tmu_init_device(data); /* TMU initialization */
ret = qoriq_tmu_calibration(pdev); /* TMU calibration */
if (ret < 0)
- goto err_tmu;
+ goto err;
ret = qoriq_tmu_register_tmu_zone(pdev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register sensors\n");
ret = -ENODEV;
- goto err_iomap;
+ goto err;
}
return 0;
-err_tmu:
- iounmap(data->regs);
-
-err_iomap:
+err:
+ clk_disable_unprepare(data->clk);
platform_set_drvdata(pdev, NULL);
return ret;
@@ -240,14 +249,14 @@ static int qoriq_tmu_remove(struct platform_device *pdev)
/* Disable monitoring */
tmu_write(data, TMR_DISABLE, &data->regs->tmr);
- iounmap(data->regs);
+ clk_disable_unprepare(data->clk);
+
platform_set_drvdata(pdev, NULL);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int qoriq_tmu_suspend(struct device *dev)
+static int __maybe_unused qoriq_tmu_suspend(struct device *dev)
{
u32 tmr;
struct qoriq_tmu_data *data = dev_get_drvdata(dev);
@@ -257,14 +266,21 @@ static int qoriq_tmu_suspend(struct device *dev)
tmr &= ~TMR_ME;
tmu_write(data, tmr, &data->regs->tmr);
+ clk_disable_unprepare(data->clk);
+
return 0;
}
-static int qoriq_tmu_resume(struct device *dev)
+static int __maybe_unused qoriq_tmu_resume(struct device *dev)
{
u32 tmr;
+ int ret;
struct qoriq_tmu_data *data = dev_get_drvdata(dev);
+ ret = clk_prepare_enable(data->clk);
+ if (ret)
+ return ret;
+
/* Enable monitoring */
tmr = tmu_read(data, &data->regs->tmr);
tmr |= TMR_ME;
@@ -272,7 +288,6 @@ static int qoriq_tmu_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(qoriq_tmu_pm_ops,
qoriq_tmu_suspend, qoriq_tmu_resume);
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index a56463308694..755d2b5bd2c2 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -443,9 +443,8 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
if (ret)
goto error_unregister;
- ret = devm_add_action(dev, rcar_gen3_hwmon_action, zone);
+ ret = devm_add_action_or_reset(dev, rcar_gen3_hwmon_action, zone);
if (ret) {
- rcar_gen3_hwmon_action(zone);
goto error_unregister;
}
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 43941eb734eb..5acaad3a594f 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -202,7 +202,7 @@
/* get dividend from the depth */
#define THROT_DEPTH_DIVIDEND(depth) ((256 * (100 - (depth)) / 100) - 1)
-/* gk20a nv_therm interface N:3 Mapping. Levels defined in tegra124-sochterm.h
+/* gk20a nv_therm interface N:3 Mapping. Levels defined in tegra124-soctherm.h
* level vector
* NONE 3'b000
* LOW 3'b001
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 6bab66e84eb5..d4481cc8958f 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -304,7 +304,7 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
&tz->poll_queue,
msecs_to_jiffies(delay));
else
- cancel_delayed_work(&tz->poll_queue);
+ cancel_delayed_work_sync(&tz->poll_queue);
}
static void monitor_thermal_zone(struct thermal_zone_device *tz)
@@ -985,7 +985,7 @@ __thermal_cooling_device_register(struct device_node *np,
result = device_register(&cdev->device);
if (result) {
ida_simple_remove(&thermal_cdev_ida, cdev->id);
- kfree(cdev);
+ put_device(&cdev->device);
return ERR_PTR(result);
}
@@ -1240,21 +1240,31 @@ thermal_zone_device_register(const char *type, int trips, int mask,
struct thermal_zone_device *tz;
enum thermal_trip_type trip_type;
int trip_temp;
+ int id;
int result;
int count;
struct thermal_governor *governor;
- if (!type || strlen(type) == 0)
+ if (!type || strlen(type) == 0) {
+ pr_err("Error: No thermal zone type defined\n");
return ERR_PTR(-EINVAL);
+ }
- if (type && strlen(type) >= THERMAL_NAME_LENGTH)
+ if (type && strlen(type) >= THERMAL_NAME_LENGTH) {
+ pr_err("Error: Thermal zone name (%s) too long, should be under %d chars\n",
+ type, THERMAL_NAME_LENGTH);
return ERR_PTR(-EINVAL);
+ }
- if (trips > THERMAL_MAX_TRIPS || trips < 0 || mask >> trips)
+ if (trips > THERMAL_MAX_TRIPS || trips < 0 || mask >> trips) {
+ pr_err("Error: Incorrect number of thermal trips\n");
return ERR_PTR(-EINVAL);
+ }
- if (!ops)
+ if (!ops) {
+ pr_err("Error: Thermal zone device ops not defined\n");
return ERR_PTR(-EINVAL);
+ }
if (trips > 0 && (!ops->get_trip_type || !ops->get_trip_temp))
return ERR_PTR(-EINVAL);
@@ -1266,11 +1276,13 @@ thermal_zone_device_register(const char *type, int trips, int mask,
INIT_LIST_HEAD(&tz->thermal_instances);
ida_init(&tz->ida);
mutex_init(&tz->lock);
- result = ida_simple_get(&thermal_tz_ida, 0, 0, GFP_KERNEL);
- if (result < 0)
+ id = ida_simple_get(&thermal_tz_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ result = id;
goto free_tz;
+ }
- tz->id = result;
+ tz->id = id;
strlcpy(tz->type, type, sizeof(tz->type));
tz->ops = ops;
tz->tzp = tzp;
@@ -1292,7 +1304,7 @@ thermal_zone_device_register(const char *type, int trips, int mask,
dev_set_name(&tz->device, "thermal_zone%d", tz->id);
result = device_register(&tz->device);
if (result)
- goto remove_device_groups;
+ goto release_device;
for (count = 0; count < trips; count++) {
if (tz->ops->get_trip_type(tz, count, &trip_type))
@@ -1343,14 +1355,12 @@ thermal_zone_device_register(const char *type, int trips, int mask,
return tz;
unregister:
- ida_simple_remove(&thermal_tz_ida, tz->id);
- device_unregister(&tz->device);
- return ERR_PTR(result);
-
-remove_device_groups:
- thermal_zone_destroy_device_groups(tz);
+ device_del(&tz->device);
+release_device:
+ put_device(&tz->device);
+ tz = NULL;
remove_id:
- ida_simple_remove(&thermal_tz_ida, tz->id);
+ ida_simple_remove(&thermal_tz_ida, id);
free_tz:
kfree(tz);
return ERR_PTR(result);
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index 40c69a533b24..dd5d8ee37928 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -87,13 +87,17 @@ static struct thermal_hwmon_device *
thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz)
{
struct thermal_hwmon_device *hwmon;
+ char type[THERMAL_NAME_LENGTH];
mutex_lock(&thermal_hwmon_list_lock);
- list_for_each_entry(hwmon, &thermal_hwmon_list, node)
- if (!strcmp(hwmon->type, tz->type)) {
+ list_for_each_entry(hwmon, &thermal_hwmon_list, node) {
+ strcpy(type, tz->type);
+ strreplace(type, '-', '_');
+ if (!strcmp(hwmon->type, type)) {
mutex_unlock(&thermal_hwmon_list_lock);
return hwmon;
}
+ }
mutex_unlock(&thermal_hwmon_list_lock);
return NULL;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 213ff03c8a9f..59d9d512dcda 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -17,6 +17,7 @@
#include <linux/blkdev.h>
#include <linux/pagemap.h>
#include <linux/export.h>
+#include <linux/fs_parser.h>
#include <linux/hid.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -1451,9 +1452,9 @@ struct ffs_sb_fill_data {
struct ffs_data *ffs_data;
};
-static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
+static int ffs_sb_fill(struct super_block *sb, struct fs_context *fc)
{
- struct ffs_sb_fill_data *data = _data;
+ struct ffs_sb_fill_data *data = fc->fs_private;
struct inode *inode;
struct ffs_data *ffs = data->ffs_data;
@@ -1486,147 +1487,152 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
return 0;
}
-static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
-{
- ENTER();
+enum {
+ Opt_no_disconnect,
+ Opt_rmode,
+ Opt_fmode,
+ Opt_mode,
+ Opt_uid,
+ Opt_gid,
+};
- if (!opts || !*opts)
- return 0;
+static const struct fs_parameter_spec ffs_fs_param_specs[] = {
+ fsparam_bool ("no_disconnect", Opt_no_disconnect),
+ fsparam_u32 ("rmode", Opt_rmode),
+ fsparam_u32 ("fmode", Opt_fmode),
+ fsparam_u32 ("mode", Opt_mode),
+ fsparam_u32 ("uid", Opt_uid),
+ fsparam_u32 ("gid", Opt_gid),
+ {}
+};
- for (;;) {
- unsigned long value;
- char *eq, *comma;
-
- /* Option limit */
- comma = strchr(opts, ',');
- if (comma)
- *comma = 0;
-
- /* Value limit */
- eq = strchr(opts, '=');
- if (unlikely(!eq)) {
- pr_err("'=' missing in %s\n", opts);
- return -EINVAL;
- }
- *eq = 0;
+static const struct fs_parameter_description ffs_fs_fs_parameters = {
+ .name = "kAFS",
+ .specs = ffs_fs_param_specs,
+};
- /* Parse value */
- if (kstrtoul(eq + 1, 0, &value)) {
- pr_err("%s: invalid value: %s\n", opts, eq + 1);
- return -EINVAL;
- }
+static int ffs_fs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct ffs_sb_fill_data *data = fc->fs_private;
+ struct fs_parse_result result;
+ int opt;
- /* Interpret option */
- switch (eq - opts) {
- case 13:
- if (!memcmp(opts, "no_disconnect", 13))
- data->no_disconnect = !!value;
- else
- goto invalid;
- break;
- case 5:
- if (!memcmp(opts, "rmode", 5))
- data->root_mode = (value & 0555) | S_IFDIR;
- else if (!memcmp(opts, "fmode", 5))
- data->perms.mode = (value & 0666) | S_IFREG;
- else
- goto invalid;
- break;
+ ENTER();
- case 4:
- if (!memcmp(opts, "mode", 4)) {
- data->root_mode = (value & 0555) | S_IFDIR;
- data->perms.mode = (value & 0666) | S_IFREG;
- } else {
- goto invalid;
- }
- break;
+ opt = fs_parse(fc, &ffs_fs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
- case 3:
- if (!memcmp(opts, "uid", 3)) {
- data->perms.uid = make_kuid(current_user_ns(), value);
- if (!uid_valid(data->perms.uid)) {
- pr_err("%s: unmapped value: %lu\n", opts, value);
- return -EINVAL;
- }
- } else if (!memcmp(opts, "gid", 3)) {
- data->perms.gid = make_kgid(current_user_ns(), value);
- if (!gid_valid(data->perms.gid)) {
- pr_err("%s: unmapped value: %lu\n", opts, value);
- return -EINVAL;
- }
- } else {
- goto invalid;
- }
- break;
+ switch (opt) {
+ case Opt_no_disconnect:
+ data->no_disconnect = result.boolean;
+ break;
+ case Opt_rmode:
+ data->root_mode = (result.uint_32 & 0555) | S_IFDIR;
+ break;
+ case Opt_fmode:
+ data->perms.mode = (result.uint_32 & 0666) | S_IFREG;
+ break;
+ case Opt_mode:
+ data->root_mode = (result.uint_32 & 0555) | S_IFDIR;
+ data->perms.mode = (result.uint_32 & 0666) | S_IFREG;
+ break;
- default:
-invalid:
- pr_err("%s: invalid option\n", opts);
- return -EINVAL;
- }
+ case Opt_uid:
+ data->perms.uid = make_kuid(current_user_ns(), result.uint_32);
+ if (!uid_valid(data->perms.uid))
+ goto unmapped_value;
+ break;
+ case Opt_gid:
+ data->perms.gid = make_kgid(current_user_ns(), result.uint_32);
+ if (!gid_valid(data->perms.gid))
+ goto unmapped_value;
+ break;
- /* Next iteration */
- if (!comma)
- break;
- opts = comma + 1;
+ default:
+ return -ENOPARAM;
}
return 0;
-}
-/* "mount -t functionfs dev_name /dev/function" ends up here */
+unmapped_value:
+ return invalf(fc, "%s: unmapped value: %u", param->key, result.uint_32);
+}
-static struct dentry *
-ffs_fs_mount(struct file_system_type *t, int flags,
- const char *dev_name, void *opts)
-{
- struct ffs_sb_fill_data data = {
- .perms = {
- .mode = S_IFREG | 0600,
- .uid = GLOBAL_ROOT_UID,
- .gid = GLOBAL_ROOT_GID,
- },
- .root_mode = S_IFDIR | 0500,
- .no_disconnect = false,
- };
- struct dentry *rv;
- int ret;
+/*
+ * Set up the superblock for a mount.
+ */
+static int ffs_fs_get_tree(struct fs_context *fc)
+{
+ struct ffs_sb_fill_data *ctx = fc->fs_private;
void *ffs_dev;
struct ffs_data *ffs;
ENTER();
- ret = ffs_fs_parse_opts(&data, opts);
- if (unlikely(ret < 0))
- return ERR_PTR(ret);
+ if (!fc->source)
+ return invalf(fc, "No source specified");
- ffs = ffs_data_new(dev_name);
+ ffs = ffs_data_new(fc->source);
if (unlikely(!ffs))
- return ERR_PTR(-ENOMEM);
- ffs->file_perms = data.perms;
- ffs->no_disconnect = data.no_disconnect;
+ return -ENOMEM;
+ ffs->file_perms = ctx->perms;
+ ffs->no_disconnect = ctx->no_disconnect;
- ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
+ ffs->dev_name = kstrdup(fc->source, GFP_KERNEL);
if (unlikely(!ffs->dev_name)) {
ffs_data_put(ffs);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
- ffs_dev = ffs_acquire_dev(dev_name);
+ ffs_dev = ffs_acquire_dev(ffs->dev_name);
if (IS_ERR(ffs_dev)) {
ffs_data_put(ffs);
- return ERR_CAST(ffs_dev);
+ return PTR_ERR(ffs_dev);
}
+
ffs->private_data = ffs_dev;
- data.ffs_data = ffs;
+ ctx->ffs_data = ffs;
+ return get_tree_nodev(fc, ffs_sb_fill);
+}
+
+static void ffs_fs_free_fc(struct fs_context *fc)
+{
+ struct ffs_sb_fill_data *ctx = fc->fs_private;
+
+ if (ctx) {
+ if (ctx->ffs_data) {
+ ffs_release_dev(ctx->ffs_data);
+ ffs_data_put(ctx->ffs_data);
+ }
- rv = mount_nodev(t, flags, &data, ffs_sb_fill);
- if (IS_ERR(rv) && data.ffs_data) {
- ffs_release_dev(data.ffs_data);
- ffs_data_put(data.ffs_data);
+ kfree(ctx);
}
- return rv;
+}
+
+static const struct fs_context_operations ffs_fs_context_ops = {
+ .free = ffs_fs_free_fc,
+ .parse_param = ffs_fs_parse_param,
+ .get_tree = ffs_fs_get_tree,
+};
+
+static int ffs_fs_init_fs_context(struct fs_context *fc)
+{
+ struct ffs_sb_fill_data *ctx;
+
+ ctx = kzalloc(sizeof(struct ffs_sb_fill_data), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->perms.mode = S_IFREG | 0600;
+ ctx->perms.uid = GLOBAL_ROOT_UID;
+ ctx->perms.gid = GLOBAL_ROOT_GID;
+ ctx->root_mode = S_IFDIR | 0500;
+ ctx->no_disconnect = false;
+
+ fc->fs_private = ctx;
+ fc->ops = &ffs_fs_context_ops;
+ return 0;
}
static void
@@ -1644,7 +1650,8 @@ ffs_fs_kill_sb(struct super_block *sb)
static struct file_system_type ffs_fs_type = {
.owner = THIS_MODULE,
.name = "functionfs",
- .mount = ffs_fs_mount,
+ .init_fs_context = ffs_fs_init_fs_context,
+ .parameters = &ffs_fs_fs_parameters,
.kill_sb = ffs_fs_kill_sb,
};
MODULE_ALIAS_FS("functionfs");
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 3b18fa4d090a..26cef65b41e7 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -176,13 +176,13 @@ put_exit:
}
static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
- unsigned int page_shift)
+ unsigned int it_page_shift)
{
struct page *page;
unsigned long size = 0;
- if (mm_iommu_is_devmem(mm, hpa, page_shift, &size))
- return size == (1UL << page_shift);
+ if (mm_iommu_is_devmem(mm, hpa, it_page_shift, &size))
+ return size == (1UL << it_page_shift);
page = pfn_to_page(hpa >> PAGE_SHIFT);
/*
@@ -190,7 +190,7 @@ static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
* a page we just found. Otherwise the hardware can get access to
* a bigger memory chunk that it should.
*/
- return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
+ return page_shift(compound_head(page)) >= it_page_shift;
}
static inline bool tce_groups_attached(struct tce_container *container)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 9a50b0558fa9..96fddc1dafc3 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -375,6 +375,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
down_read(&mm->mmap_sem);
+ vaddr = untagged_addr(vaddr);
+
vma = find_vma_intersection(mm, vaddr, vaddr + 1);
if (vma && vma->vm_flags & VM_PFNMAP) {
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index a45f9e3e442b..58e7c100b6ad 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -477,13 +477,6 @@ config IXP4XX_WATCHDOG
Say N if you are unsure.
-config KS8695_WATCHDOG
- tristate "KS8695 watchdog"
- depends on ARCH_KS8695
- help
- Watchdog timer embedded into KS8695 processor. This will reboot your
- system when the timeout is reached.
-
config HAVE_S3C2410_WATCHDOG
bool
help
@@ -662,15 +655,6 @@ config STMP3XXX_RTC_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called stmp3xxx_rtc_wdt.
-config NUC900_WATCHDOG
- tristate "Nuvoton NUC900 watchdog"
- depends on ARCH_W90X900 || COMPILE_TEST
- help
- Say Y here if to include support for the watchdog timer
- for the Nuvoton NUC900 series SoCs.
- To compile this driver as a module, choose M here: the
- module will be called nuc900_wdt.
-
config TS4800_WATCHDOG
tristate "TS-4800 Watchdog"
depends on HAS_IOMEM && OF
@@ -740,6 +724,19 @@ config IMX_SC_WDT
To compile this driver as a module, choose M here: the
module will be called imx_sc_wdt.
+config IMX7ULP_WDT
+ tristate "IMX7ULP Watchdog"
+ depends on ARCH_MXC || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ This is the driver for the hardware watchdog on the Freescale
+ IMX7ULP and later processors. If you have one of these
+ processors and wish to have watchdog support enabled,
+ say Y, otherwise say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx7ulp_wdt.
+
config UX500_WATCHDOG
tristate "ST-Ericsson Ux500 watchdog"
depends on MFD_DB8500_PRCMU
@@ -1046,8 +1043,8 @@ config F71808E_WDT
depends on X86
help
This is the driver for the hardware watchdog on the Fintek F71808E,
- F71862FG, F71868, F71869, F71882FG, F71889FG, F81865 and F81866
- Super I/O controllers.
+ F71862FG, F71868, F71869, F71882FG, F71889FG, F81803, F81865, and
+ F81866 Super I/O controllers.
You can compile this driver directly into the kernel, or use
it as a module. The module will be called f71808e_wdt.
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 7caa920e7e60..2ee352bf3372 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -49,7 +49,6 @@ obj-$(CONFIG_21285_WATCHDOG) += wdt285.o
obj-$(CONFIG_977_WATCHDOG) += wdt977.o
obj-$(CONFIG_FTWDT010_WATCHDOG) += ftwdt010_wdt.o
obj-$(CONFIG_IXP4XX_WATCHDOG) += ixp4xx_wdt.o
-obj-$(CONFIG_KS8695_WATCHDOG) += ks8695_wdt.o
obj-$(CONFIG_S3C2410_WATCHDOG) += s3c2410_wdt.o
obj-$(CONFIG_SA1100_WATCHDOG) += sa1100_wdt.o
obj-$(CONFIG_SAMA5D4_WATCHDOG) += sama5d4_wdt.o
@@ -64,11 +63,11 @@ obj-$(CONFIG_RN5T618_WATCHDOG) += rn5t618_wdt.o
obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
obj-$(CONFIG_NPCM7XX_WATCHDOG) += npcm_wdt.o
obj-$(CONFIG_STMP3XXX_RTC_WATCHDOG) += stmp3xxx_rtc_wdt.o
-obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
obj-$(CONFIG_TS4800_WATCHDOG) += ts4800_wdt.o
obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
obj-$(CONFIG_IMX_SC_WDT) += imx_sc_wdt.o
+obj-$(CONFIG_IMX7ULP_WDT) += imx7ulp_wdt.o
obj-$(CONFIG_UX500_WATCHDOG) += ux500_wdt.o
obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o
obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index cc71861e033a..4ec0906bf12c 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -34,6 +34,7 @@ static const struct aspeed_wdt_config ast2500_config = {
static const struct of_device_id aspeed_wdt_of_table[] = {
{ .compatible = "aspeed,ast2400-wdt", .data = &ast2400_config },
{ .compatible = "aspeed,ast2500-wdt", .data = &ast2500_config },
+ { .compatible = "aspeed,ast2600-wdt", .data = &ast2500_config },
{ },
};
MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
@@ -53,6 +54,8 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
#define WDT_CTRL_ENABLE BIT(0)
#define WDT_TIMEOUT_STATUS 0x10
#define WDT_TIMEOUT_STATUS_BOOT_SECONDARY BIT(1)
+#define WDT_CLEAR_TIMEOUT_STATUS 0x14
+#define WDT_CLEAR_TIMEOUT_AND_BOOT_CODE_SELECTION BIT(0)
/*
* WDT_RESET_WIDTH controls the characteristics of the external pulse (if
@@ -165,6 +168,60 @@ static int aspeed_wdt_restart(struct watchdog_device *wdd,
return 0;
}
+/* access_cs0 shows if cs0 is accessible, hence the reverted bit */
+static ssize_t access_cs0_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct aspeed_wdt *wdt = dev_get_drvdata(dev);
+ u32 status = readl(wdt->base + WDT_TIMEOUT_STATUS);
+
+ return sprintf(buf, "%u\n",
+ !(status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY));
+}
+
+static ssize_t access_cs0_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ struct aspeed_wdt *wdt = dev_get_drvdata(dev);
+ unsigned long val;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+
+ if (val)
+ writel(WDT_CLEAR_TIMEOUT_AND_BOOT_CODE_SELECTION,
+ wdt->base + WDT_CLEAR_TIMEOUT_STATUS);
+
+ return size;
+}
+
+/*
+ * This attribute exists only if the system has booted from the alternate
+ * flash with 'alt-boot' option.
+ *
+ * At alternate flash the 'access_cs0' sysfs node provides:
+ * ast2400: a way to get access to the primary SPI flash chip at CS0
+ * after booting from the alternate chip at CS1.
+ * ast2500: a way to restore the normal address mapping from
+ * (CS0->CS1, CS1->CS0) to (CS0->CS0, CS1->CS1).
+ *
+ * Clearing the boot code selection and timeout counter also resets to the
+ * initial state the chip select line mapping. When the SoC is in normal
+ * mapping state (i.e. booted from CS0), clearing those bits does nothing for
+ * both versions of the SoC. For alternate boot mode (booted from CS1 due to
+ * wdt2 expiration) the behavior differs as described above.
+ *
+ * This option can be used with wdt2 (watchdog1) only.
+ */
+static DEVICE_ATTR_RW(access_cs0);
+
+static struct attribute *bswitch_attrs[] = {
+ &dev_attr_access_cs0.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bswitch);
+
static const struct watchdog_ops aspeed_wdt_ops = {
.start = aspeed_wdt_start,
.stop = aspeed_wdt_stop,
@@ -259,7 +316,8 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
}
- if (of_device_is_compatible(np, "aspeed,ast2500-wdt")) {
+ if ((of_device_is_compatible(np, "aspeed,ast2500-wdt")) ||
+ (of_device_is_compatible(np, "aspeed,ast2600-wdt"))) {
u32 reg = readl(wdt->base + WDT_RESET_WIDTH);
reg &= config->ext_pulse_width_mask;
@@ -306,9 +364,16 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
}
status = readl(wdt->base + WDT_TIMEOUT_STATUS);
- if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY)
+ if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY) {
wdt->wdd.bootstatus = WDIOF_CARDRESET;
+ if (of_device_is_compatible(np, "aspeed,ast2400-wdt") ||
+ of_device_is_compatible(np, "aspeed,ast2500-wdt"))
+ wdt->wdd.groups = bswitch_groups;
+ }
+
+ dev_set_drvdata(dev, wdt);
+
return devm_watchdog_register_device(dev, &wdt->wdd);
}
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 2e09981fe978..75de664ef4b0 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -302,7 +302,7 @@ static int ath79_wdt_remove(struct platform_device *pdev)
return 0;
}
-static void ath97_wdt_shutdown(struct platform_device *pdev)
+static void ath79_wdt_shutdown(struct platform_device *pdev)
{
ath79_wdt_disable();
}
@@ -318,7 +318,7 @@ MODULE_DEVICE_TABLE(of, ath79_wdt_match);
static struct platform_driver ath79_wdt_driver = {
.probe = ath79_wdt_probe,
.remove = ath79_wdt_remove,
- .shutdown = ath97_wdt_shutdown,
+ .shutdown = ath79_wdt_shutdown,
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(ath79_wdt_match),
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index b973b31179df..9393be584e72 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -473,29 +473,6 @@ static long cpwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
-static long cpwd_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int rval = -ENOIOCTLCMD;
-
- switch (cmd) {
- /* solaris ioctls are specific to this driver */
- case WIOCSTART:
- case WIOCSTOP:
- case WIOCGSTAT:
- mutex_lock(&cpwd_mutex);
- rval = cpwd_ioctl(file, cmd, arg);
- mutex_unlock(&cpwd_mutex);
- break;
-
- /* everything else is handled by the generic compat layer */
- default:
- break;
- }
-
- return rval;
-}
-
static ssize_t cpwd_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -520,7 +497,7 @@ static ssize_t cpwd_read(struct file *file, char __user *buffer,
static const struct file_operations cpwd_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = cpwd_ioctl,
- .compat_ioctl = cpwd_compat_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = cpwd_open,
.write = cpwd_write,
.read = cpwd_read,
diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c
index 181440b7b4d0..aafc8d98bf9f 100644
--- a/drivers/watchdog/diag288_wdt.c
+++ b/drivers/watchdog/diag288_wdt.c
@@ -26,13 +26,11 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/suspend.h>
#include <asm/ebcdic.h>
#include <asm/diag.h>
#include <linux/io.h>
-#include <linux/uaccess.h>
#define MAX_CMDLEN 240
#define DEFAULT_CMD "SYSTEM RESTART"
@@ -70,7 +68,6 @@ MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog is ac
module_param_named(nowayout, nowayout_info, bool, 0444);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default = CONFIG_WATCHDOG_NOWAYOUT)");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("vmwatchdog");
static int __diag288(unsigned int func, unsigned int timeout,
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index ff5cf1b48a4d..e46104c2fd94 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -31,8 +31,10 @@
#define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */
#define SIO_REG_DEVREV 0x22 /* Device revision */
#define SIO_REG_MANID 0x23 /* Fintek ID (2 bytes) */
+#define SIO_REG_CLOCK_SEL 0x26 /* Clock select */
#define SIO_REG_ROM_ADDR_SEL 0x27 /* ROM address select */
#define SIO_F81866_REG_PORT_SEL 0x27 /* F81866 Multi-Function Register */
+#define SIO_REG_TSI_LEVEL_SEL 0x28 /* TSI Level select */
#define SIO_REG_MFUNCT1 0x29 /* Multi function select 1 */
#define SIO_REG_MFUNCT2 0x2a /* Multi function select 2 */
#define SIO_REG_MFUNCT3 0x2b /* Multi function select 3 */
@@ -49,6 +51,7 @@
#define SIO_F71869A_ID 0x1007 /* Chipset ID */
#define SIO_F71882_ID 0x0541 /* Chipset ID */
#define SIO_F71889_ID 0x0723 /* Chipset ID */
+#define SIO_F81803_ID 0x1210 /* Chipset ID */
#define SIO_F81865_ID 0x0704 /* Chipset ID */
#define SIO_F81866_ID 0x1010 /* Chipset ID */
@@ -108,7 +111,7 @@ MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with"
" given initial timeout. Zero (default) disables this feature.");
enum chips { f71808fg, f71858fg, f71862fg, f71868, f71869, f71882fg, f71889fg,
- f81865, f81866};
+ f81803, f81865, f81866};
static const char *f71808e_names[] = {
"f71808fg",
@@ -118,6 +121,7 @@ static const char *f71808e_names[] = {
"f71869",
"f71882fg",
"f71889fg",
+ "f81803",
"f81865",
"f81866",
};
@@ -370,6 +374,14 @@ static int watchdog_start(void)
superio_inb(watchdog.sioaddr, SIO_REG_MFUNCT3) & 0xcf);
break;
+ case f81803:
+ /* Enable TSI Level register bank */
+ superio_clear_bit(watchdog.sioaddr, SIO_REG_CLOCK_SEL, 3);
+ /* Set pin 27 to WDTRST# */
+ superio_outb(watchdog.sioaddr, SIO_REG_TSI_LEVEL_SEL, 0x5f &
+ superio_inb(watchdog.sioaddr, SIO_REG_TSI_LEVEL_SEL));
+ break;
+
case f81865:
/* Set pin 70 to WDTRST# */
superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT3, 5);
@@ -809,6 +821,9 @@ static int __init f71808e_find(int sioaddr)
/* Confirmed (by datasheet) not to have a watchdog. */
err = -ENODEV;
goto exit;
+ case SIO_F81803_ID:
+ watchdog.type = f81803;
+ break;
case SIO_F81865_ID:
watchdog.type = f81865;
break;
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index c559f706ae7e..156360e37714 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -48,6 +48,7 @@
/* Includes */
#include <linux/acpi.h> /* For ACPI support */
+#include <linux/bits.h> /* For BIT() */
#include <linux/module.h> /* For module specific items */
#include <linux/moduleparam.h> /* For new moduleparam's */
#include <linux/types.h> /* For standard types (like size_t) */
@@ -215,6 +216,23 @@ static int update_no_reboot_bit_mem(void *priv, bool set)
return 0;
}
+static int update_no_reboot_bit_cnt(void *priv, bool set)
+{
+ struct iTCO_wdt_private *p = priv;
+ u16 val, newval;
+
+ val = inw(TCO1_CNT(p));
+ if (set)
+ val |= BIT(0);
+ else
+ val &= ~BIT(0);
+ outw(val, TCO1_CNT(p));
+ newval = inw(TCO1_CNT(p));
+
+ /* make sure the update is successful */
+ return val != newval ? -EIO : 0;
+}
+
static void iTCO_wdt_no_reboot_bit_setup(struct iTCO_wdt_private *p,
struct itco_wdt_platform_data *pdata)
{
@@ -224,7 +242,9 @@ static void iTCO_wdt_no_reboot_bit_setup(struct iTCO_wdt_private *p,
return;
}
- if (p->iTCO_version >= 2)
+ if (p->iTCO_version >= 6)
+ p->update_no_reboot_bit = update_no_reboot_bit_cnt;
+ else if (p->iTCO_version >= 2)
p->update_no_reboot_bit = update_no_reboot_bit_mem;
else if (p->iTCO_version == 1)
p->update_no_reboot_bit = update_no_reboot_bit_pci;
@@ -452,7 +472,8 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
* Get the Memory-Mapped GCS or PMC register, we need it for the
* NO_REBOOT flag (TCO v2 and v3).
*/
- if (p->iTCO_version >= 2 && !pdata->update_no_reboot_bit) {
+ if (p->iTCO_version >= 2 && p->iTCO_version < 6 &&
+ !pdata->update_no_reboot_bit) {
p->gcs_pmc_res = platform_get_resource(pdev,
IORESOURCE_MEM,
ICH_RES_MEM_GCS_PMC);
@@ -502,6 +523,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
/* Clear out the (probably old) status */
switch (p->iTCO_version) {
+ case 6:
case 5:
case 4:
outw(0x0008, TCO1_STS(p)); /* Clear the Time Out Status bit */
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 32af3974e6bb..8d019a961ccc 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -55,7 +55,7 @@
#define IMX2_WDT_WMCR 0x08 /* Misc Register */
-#define IMX2_WDT_MAX_TIME 128
+#define IMX2_WDT_MAX_TIME 128U
#define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */
#define WDOG_SEC_TO_COUNT(s) ((s * 2 - 1) << 8)
@@ -180,7 +180,7 @@ static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
{
unsigned int actual;
- actual = min(new_timeout, wdog->max_hw_heartbeat_ms * 1000);
+ actual = min(new_timeout, IMX2_WDT_MAX_TIME);
__imx2_wdt_set_timeout(wdog, actual);
wdog->timeout = new_timeout;
return 0;
diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
new file mode 100644
index 000000000000..5ce51026989a
--- /dev/null
+++ b/drivers/watchdog/imx7ulp_wdt.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/watchdog.h>
+
+#define WDOG_CS 0x0
+#define WDOG_CS_CMD32EN BIT(13)
+#define WDOG_CS_ULK BIT(11)
+#define WDOG_CS_RCS BIT(10)
+#define WDOG_CS_EN BIT(7)
+#define WDOG_CS_UPDATE BIT(5)
+
+#define WDOG_CNT 0x4
+#define WDOG_TOVAL 0x8
+
+#define REFRESH_SEQ0 0xA602
+#define REFRESH_SEQ1 0xB480
+#define REFRESH ((REFRESH_SEQ1 << 16) | REFRESH_SEQ0)
+
+#define UNLOCK_SEQ0 0xC520
+#define UNLOCK_SEQ1 0xD928
+#define UNLOCK ((UNLOCK_SEQ1 << 16) | UNLOCK_SEQ0)
+
+#define DEFAULT_TIMEOUT 60
+#define MAX_TIMEOUT 128
+#define WDOG_CLOCK_RATE 1000
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0000);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+struct imx7ulp_wdt_device {
+ struct notifier_block restart_handler;
+ struct watchdog_device wdd;
+ void __iomem *base;
+ struct clk *clk;
+};
+
+static inline void imx7ulp_wdt_enable(void __iomem *base, bool enable)
+{
+ u32 val = readl(base + WDOG_CS);
+
+ writel(UNLOCK, base + WDOG_CNT);
+ if (enable)
+ writel(val | WDOG_CS_EN, base + WDOG_CS);
+ else
+ writel(val & ~WDOG_CS_EN, base + WDOG_CS);
+}
+
+static inline bool imx7ulp_wdt_is_enabled(void __iomem *base)
+{
+ u32 val = readl(base + WDOG_CS);
+
+ return val & WDOG_CS_EN;
+}
+
+static int imx7ulp_wdt_ping(struct watchdog_device *wdog)
+{
+ struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
+
+ writel(REFRESH, wdt->base + WDOG_CNT);
+
+ return 0;
+}
+
+static int imx7ulp_wdt_start(struct watchdog_device *wdog)
+{
+ struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
+
+ imx7ulp_wdt_enable(wdt->base, true);
+
+ return 0;
+}
+
+static int imx7ulp_wdt_stop(struct watchdog_device *wdog)
+{
+ struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
+
+ imx7ulp_wdt_enable(wdt->base, false);
+
+ return 0;
+}
+
+static int imx7ulp_wdt_set_timeout(struct watchdog_device *wdog,
+ unsigned int timeout)
+{
+ struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
+ u32 val = WDOG_CLOCK_RATE * timeout;
+
+ writel(UNLOCK, wdt->base + WDOG_CNT);
+ writel(val, wdt->base + WDOG_TOVAL);
+
+ wdog->timeout = timeout;
+
+ return 0;
+}
+
+static const struct watchdog_ops imx7ulp_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = imx7ulp_wdt_start,
+ .stop = imx7ulp_wdt_stop,
+ .ping = imx7ulp_wdt_ping,
+ .set_timeout = imx7ulp_wdt_set_timeout,
+};
+
+static const struct watchdog_info imx7ulp_wdt_info = {
+ .identity = "i.MX7ULP watchdog timer",
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+};
+
+static inline void imx7ulp_wdt_init(void __iomem *base, unsigned int timeout)
+{
+ u32 val;
+
+ /* unlock the wdog for reconfiguration */
+ writel_relaxed(UNLOCK_SEQ0, base + WDOG_CNT);
+ writel_relaxed(UNLOCK_SEQ1, base + WDOG_CNT);
+
+ /* set an initial timeout value in TOVAL */
+ writel(timeout, base + WDOG_TOVAL);
+ /* enable 32bit command sequence and reconfigure */
+ val = BIT(13) | BIT(8) | BIT(5);
+ writel(val, base + WDOG_CS);
+}
+
+static void imx7ulp_wdt_action(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int imx7ulp_wdt_probe(struct platform_device *pdev)
+{
+ struct imx7ulp_wdt_device *imx7ulp_wdt;
+ struct device *dev = &pdev->dev;
+ struct watchdog_device *wdog;
+ int ret;
+
+ imx7ulp_wdt = devm_kzalloc(dev, sizeof(*imx7ulp_wdt), GFP_KERNEL);
+ if (!imx7ulp_wdt)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, imx7ulp_wdt);
+
+ imx7ulp_wdt->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(imx7ulp_wdt->base))
+ return PTR_ERR(imx7ulp_wdt->base);
+
+ imx7ulp_wdt->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(imx7ulp_wdt->clk)) {
+ dev_err(dev, "Failed to get watchdog clock\n");
+ return PTR_ERR(imx7ulp_wdt->clk);
+ }
+
+ ret = clk_prepare_enable(imx7ulp_wdt->clk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, imx7ulp_wdt_action, imx7ulp_wdt->clk);
+ if (ret)
+ return ret;
+
+ wdog = &imx7ulp_wdt->wdd;
+ wdog->info = &imx7ulp_wdt_info;
+ wdog->ops = &imx7ulp_wdt_ops;
+ wdog->min_timeout = 1;
+ wdog->max_timeout = MAX_TIMEOUT;
+ wdog->parent = dev;
+ wdog->timeout = DEFAULT_TIMEOUT;
+
+ watchdog_init_timeout(wdog, 0, dev);
+ watchdog_stop_on_reboot(wdog);
+ watchdog_stop_on_unregister(wdog);
+ watchdog_set_drvdata(wdog, imx7ulp_wdt);
+ imx7ulp_wdt_init(imx7ulp_wdt->base, wdog->timeout * WDOG_CLOCK_RATE);
+
+ return devm_watchdog_register_device(dev, wdog);
+}
+
+static int __maybe_unused imx7ulp_wdt_suspend(struct device *dev)
+{
+ struct imx7ulp_wdt_device *imx7ulp_wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&imx7ulp_wdt->wdd))
+ imx7ulp_wdt_stop(&imx7ulp_wdt->wdd);
+
+ clk_disable_unprepare(imx7ulp_wdt->clk);
+
+ return 0;
+}
+
+static int __maybe_unused imx7ulp_wdt_resume(struct device *dev)
+{
+ struct imx7ulp_wdt_device *imx7ulp_wdt = dev_get_drvdata(dev);
+ u32 timeout = imx7ulp_wdt->wdd.timeout * WDOG_CLOCK_RATE;
+ int ret;
+
+ ret = clk_prepare_enable(imx7ulp_wdt->clk);
+ if (ret)
+ return ret;
+
+ if (imx7ulp_wdt_is_enabled(imx7ulp_wdt->base))
+ imx7ulp_wdt_init(imx7ulp_wdt->base, timeout);
+
+ if (watchdog_active(&imx7ulp_wdt->wdd))
+ imx7ulp_wdt_start(&imx7ulp_wdt->wdd);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(imx7ulp_wdt_pm_ops, imx7ulp_wdt_suspend,
+ imx7ulp_wdt_resume);
+
+static const struct of_device_id imx7ulp_wdt_dt_ids[] = {
+ { .compatible = "fsl,imx7ulp-wdt", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx7ulp_wdt_dt_ids);
+
+static struct platform_driver imx7ulp_wdt_driver = {
+ .probe = imx7ulp_wdt_probe,
+ .driver = {
+ .name = "imx7ulp-wdt",
+ .pm = &imx7ulp_wdt_pm_ops,
+ .of_match_table = imx7ulp_wdt_dt_ids,
+ },
+};
+module_platform_driver(imx7ulp_wdt_driver);
+
+MODULE_AUTHOR("Anson Huang <[email protected]>");
+MODULE_DESCRIPTION("Freescale i.MX7ULP watchdog driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c
index 78eaaf75a263..7ea5cf54e94a 100644
--- a/drivers/watchdog/imx_sc_wdt.c
+++ b/drivers/watchdog/imx_sc_wdt.c
@@ -175,12 +175,9 @@ static int imx_sc_wdt_probe(struct platform_device *pdev)
watchdog_stop_on_unregister(wdog);
ret = devm_watchdog_register_device(dev, wdog);
-
- if (ret) {
- dev_err(dev, "Failed to register watchdog device\n");
- return ret;
- }
-
+ if (ret)
+ return ret;
+
ret = imx_scu_irq_group_enable(SC_IRQ_GROUP_WDOG,
SC_IRQ_WDOG,
true);
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index d4a90916dd38..c6052ae54f32 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -162,7 +162,6 @@ static int jz4740_wdt_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct jz4740_wdt_drvdata *drvdata;
struct watchdog_device *jz4740_wdt;
- int ret;
drvdata = devm_kzalloc(dev, sizeof(struct jz4740_wdt_drvdata),
GFP_KERNEL);
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
deleted file mode 100644
index 1550ce3c5702..000000000000
--- a/drivers/watchdog/ks8695_wdt.c
+++ /dev/null
@@ -1,319 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Watchdog driver for Kendin/Micrel KS8695.
- *
- * (C) 2007 Andrew Victor
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/bitops.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
-#include <linux/types.h>
-#include <linux/watchdog.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <mach/hardware.h>
-
-#define KS8695_TMR_OFFSET (0xF0000 + 0xE400)
-#define KS8695_TMR_VA (KS8695_IO_VA + KS8695_TMR_OFFSET)
-
-/*
- * Timer registers
- */
-#define KS8695_TMCON (0x00) /* Timer Control Register */
-#define KS8695_T0TC (0x08) /* Timer 0 Timeout Count Register */
-#define TMCON_T0EN (1 << 0) /* Timer 0 Enable */
-
-/* Timer0 Timeout Counter Register */
-#define T0TC_WATCHDOG (0xff) /* Enable watchdog mode */
-
-#define WDT_DEFAULT_TIME 5 /* seconds */
-#define WDT_MAX_TIME 171 /* seconds */
-
-static int wdt_time = WDT_DEFAULT_TIME;
-static bool nowayout = WATCHDOG_NOWAYOUT;
-
-module_param(wdt_time, int, 0);
-MODULE_PARM_DESC(wdt_time, "Watchdog time in seconds. (default="
- __MODULE_STRING(WDT_DEFAULT_TIME) ")");
-
-#ifdef CONFIG_WATCHDOG_NOWAYOUT
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
- __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-#endif
-
-
-static unsigned long ks8695wdt_busy;
-static DEFINE_SPINLOCK(ks8695_lock);
-
-/* ......................................................................... */
-
-/*
- * Disable the watchdog.
- */
-static inline void ks8695_wdt_stop(void)
-{
- unsigned long tmcon;
-
- spin_lock(&ks8695_lock);
- /* disable timer0 */
- tmcon = __raw_readl(KS8695_TMR_VA + KS8695_TMCON);
- __raw_writel(tmcon & ~TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
- spin_unlock(&ks8695_lock);
-}
-
-/*
- * Enable and reset the watchdog.
- */
-static inline void ks8695_wdt_start(void)
-{
- unsigned long tmcon;
- unsigned long tval = wdt_time * KS8695_CLOCK_RATE;
-
- spin_lock(&ks8695_lock);
- /* disable timer0 */
- tmcon = __raw_readl(KS8695_TMR_VA + KS8695_TMCON);
- __raw_writel(tmcon & ~TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
-
- /* program timer0 */
- __raw_writel(tval | T0TC_WATCHDOG, KS8695_TMR_VA + KS8695_T0TC);
-
- /* re-enable timer0 */
- tmcon = __raw_readl(KS8695_TMR_VA + KS8695_TMCON);
- __raw_writel(tmcon | TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
- spin_unlock(&ks8695_lock);
-}
-
-/*
- * Reload the watchdog timer. (ie, pat the watchdog)
- */
-static inline void ks8695_wdt_reload(void)
-{
- unsigned long tmcon;
-
- spin_lock(&ks8695_lock);
- /* disable, then re-enable timer0 */
- tmcon = __raw_readl(KS8695_TMR_VA + KS8695_TMCON);
- __raw_writel(tmcon & ~TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
- __raw_writel(tmcon | TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
- spin_unlock(&ks8695_lock);
-}
-
-/*
- * Change the watchdog time interval.
- */
-static int ks8695_wdt_settimeout(int new_time)
-{
- /*
- * All counting occurs at KS8695_CLOCK_RATE / 128 = 0.256 Hz
- *
- * Since WDV is a 16-bit counter, the maximum period is
- * 65536 / 0.256 = 256 seconds.
- */
- if ((new_time <= 0) || (new_time > WDT_MAX_TIME))
- return -EINVAL;
-
- /* Set new watchdog time. It will be used when
- ks8695_wdt_start() is called. */
- wdt_time = new_time;
- return 0;
-}
-
-/* ......................................................................... */
-
-/*
- * Watchdog device is opened, and watchdog starts running.
- */
-static int ks8695_wdt_open(struct inode *inode, struct file *file)
-{
- if (test_and_set_bit(0, &ks8695wdt_busy))
- return -EBUSY;
-
- ks8695_wdt_start();
- return stream_open(inode, file);
-}
-
-/*
- * Close the watchdog device.
- * If CONFIG_WATCHDOG_NOWAYOUT is NOT defined then the watchdog is also
- * disabled.
- */
-static int ks8695_wdt_close(struct inode *inode, struct file *file)
-{
- /* Disable the watchdog when file is closed */
- if (!nowayout)
- ks8695_wdt_stop();
- clear_bit(0, &ks8695wdt_busy);
- return 0;
-}
-
-static const struct watchdog_info ks8695_wdt_info = {
- .identity = "ks8695 watchdog",
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
-};
-
-/*
- * Handle commands from user-space.
- */
-static long ks8695_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- int new_value;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(argp, &ks8695_wdt_info,
- sizeof(ks8695_wdt_info)) ? -EFAULT : 0;
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
- case WDIOC_SETOPTIONS:
- if (get_user(new_value, p))
- return -EFAULT;
- if (new_value & WDIOS_DISABLECARD)
- ks8695_wdt_stop();
- if (new_value & WDIOS_ENABLECARD)
- ks8695_wdt_start();
- return 0;
- case WDIOC_KEEPALIVE:
- ks8695_wdt_reload(); /* pat the watchdog */
- return 0;
- case WDIOC_SETTIMEOUT:
- if (get_user(new_value, p))
- return -EFAULT;
- if (ks8695_wdt_settimeout(new_value))
- return -EINVAL;
- /* Enable new time value */
- ks8695_wdt_start();
- /* Return current value */
- return put_user(wdt_time, p);
- case WDIOC_GETTIMEOUT:
- return put_user(wdt_time, p);
- default:
- return -ENOTTY;
- }
-}
-
-/*
- * Pat the watchdog whenever device is written to.
- */
-static ssize_t ks8695_wdt_write(struct file *file, const char *data,
- size_t len, loff_t *ppos)
-{
- ks8695_wdt_reload(); /* pat the watchdog */
- return len;
-}
-
-/* ......................................................................... */
-
-static const struct file_operations ks8695wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .unlocked_ioctl = ks8695_wdt_ioctl,
- .open = ks8695_wdt_open,
- .release = ks8695_wdt_close,
- .write = ks8695_wdt_write,
-};
-
-static struct miscdevice ks8695wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &ks8695wdt_fops,
-};
-
-static int ks8695wdt_probe(struct platform_device *pdev)
-{
- int res;
-
- if (ks8695wdt_miscdev.parent)
- return -EBUSY;
- ks8695wdt_miscdev.parent = &pdev->dev;
-
- res = misc_register(&ks8695wdt_miscdev);
- if (res)
- return res;
-
- pr_info("KS8695 Watchdog Timer enabled (%d seconds%s)\n",
- wdt_time, nowayout ? ", nowayout" : "");
- return 0;
-}
-
-static int ks8695wdt_remove(struct platform_device *pdev)
-{
- misc_deregister(&ks8695wdt_miscdev);
- ks8695wdt_miscdev.parent = NULL;
-
- return 0;
-}
-
-static void ks8695wdt_shutdown(struct platform_device *pdev)
-{
- ks8695_wdt_stop();
-}
-
-#ifdef CONFIG_PM
-
-static int ks8695wdt_suspend(struct platform_device *pdev, pm_message_t message)
-{
- ks8695_wdt_stop();
- return 0;
-}
-
-static int ks8695wdt_resume(struct platform_device *pdev)
-{
- if (ks8695wdt_busy)
- ks8695_wdt_start();
- return 0;
-}
-
-#else
-#define ks8695wdt_suspend NULL
-#define ks8695wdt_resume NULL
-#endif
-
-static struct platform_driver ks8695wdt_driver = {
- .probe = ks8695wdt_probe,
- .remove = ks8695wdt_remove,
- .shutdown = ks8695wdt_shutdown,
- .suspend = ks8695wdt_suspend,
- .resume = ks8695wdt_resume,
- .driver = {
- .name = "ks8695_wdt",
- },
-};
-
-static int __init ks8695_wdt_init(void)
-{
- /* Check that the heartbeat value is within range;
- if not reset to the default */
- if (ks8695_wdt_settimeout(wdt_time)) {
- ks8695_wdt_settimeout(WDT_DEFAULT_TIME);
- pr_info("ks8695_wdt: wdt_time value must be 1 <= wdt_time <= %i"
- ", using %d\n", wdt_time, WDT_MAX_TIME);
- }
- return platform_driver_register(&ks8695wdt_driver);
-}
-
-static void __exit ks8695_wdt_exit(void)
-{
- platform_driver_unregister(&ks8695wdt_driver);
-}
-
-module_init(ks8695_wdt_init);
-module_exit(ks8695_wdt_exit);
-
-MODULE_AUTHOR("Andrew Victor");
-MODULE_DESCRIPTION("Watchdog driver for KS8695");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ks8695_wdt");
diff --git a/drivers/watchdog/nuc900_wdt.c b/drivers/watchdog/nuc900_wdt.c
deleted file mode 100644
index db124cebe838..000000000000
--- a/drivers/watchdog/nuc900_wdt.c
+++ /dev/null
@@ -1,302 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2009 Nuvoton technology corporation.
- *
- * Wan ZongShun <[email protected]>
- */
-
-#include <linux/bitops.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/kernel.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/types.h>
-#include <linux/watchdog.h>
-#include <linux/uaccess.h>
-
-#define REG_WTCR 0x1c
-#define WTCLK (0x01 << 10)
-#define WTE (0x01 << 7) /*wdt enable*/
-#define WTIS (0x03 << 4)
-#define WTIF (0x01 << 3)
-#define WTRF (0x01 << 2)
-#define WTRE (0x01 << 1)
-#define WTR (0x01 << 0)
-/*
- * The watchdog time interval can be calculated via following formula:
- * WTIS real time interval (formula)
- * 0x00 ((2^ 14 ) * ((external crystal freq) / 256))seconds
- * 0x01 ((2^ 16 ) * ((external crystal freq) / 256))seconds
- * 0x02 ((2^ 18 ) * ((external crystal freq) / 256))seconds
- * 0x03 ((2^ 20 ) * ((external crystal freq) / 256))seconds
- *
- * The external crystal freq is 15Mhz in the nuc900 evaluation board.
- * So 0x00 = +-0.28 seconds, 0x01 = +-1.12 seconds, 0x02 = +-4.48 seconds,
- * 0x03 = +- 16.92 seconds..
- */
-#define WDT_HW_TIMEOUT 0x02
-#define WDT_TIMEOUT (HZ/2)
-#define WDT_HEARTBEAT 15
-
-static int heartbeat = WDT_HEARTBEAT;
-module_param(heartbeat, int, 0);
-MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. "
- "(default = " __MODULE_STRING(WDT_HEARTBEAT) ")");
-
-static bool nowayout = WATCHDOG_NOWAYOUT;
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
- "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-
-struct nuc900_wdt {
- struct clk *wdt_clock;
- struct platform_device *pdev;
- void __iomem *wdt_base;
- char expect_close;
- struct timer_list timer;
- spinlock_t wdt_lock;
- unsigned long next_heartbeat;
-};
-
-static unsigned long nuc900wdt_busy;
-static struct nuc900_wdt *nuc900_wdt;
-
-static inline void nuc900_wdt_keepalive(void)
-{
- unsigned int val;
-
- spin_lock(&nuc900_wdt->wdt_lock);
-
- val = __raw_readl(nuc900_wdt->wdt_base + REG_WTCR);
- val |= (WTR | WTIF);
- __raw_writel(val, nuc900_wdt->wdt_base + REG_WTCR);
-
- spin_unlock(&nuc900_wdt->wdt_lock);
-}
-
-static inline void nuc900_wdt_start(void)
-{
- unsigned int val;
-
- spin_lock(&nuc900_wdt->wdt_lock);
-
- val = __raw_readl(nuc900_wdt->wdt_base + REG_WTCR);
- val |= (WTRE | WTE | WTR | WTCLK | WTIF);
- val &= ~WTIS;
- val |= (WDT_HW_TIMEOUT << 0x04);
- __raw_writel(val, nuc900_wdt->wdt_base + REG_WTCR);
-
- spin_unlock(&nuc900_wdt->wdt_lock);
-
- nuc900_wdt->next_heartbeat = jiffies + heartbeat * HZ;
- mod_timer(&nuc900_wdt->timer, jiffies + WDT_TIMEOUT);
-}
-
-static inline void nuc900_wdt_stop(void)
-{
- unsigned int val;
-
- del_timer(&nuc900_wdt->timer);
-
- spin_lock(&nuc900_wdt->wdt_lock);
-
- val = __raw_readl(nuc900_wdt->wdt_base + REG_WTCR);
- val &= ~WTE;
- __raw_writel(val, nuc900_wdt->wdt_base + REG_WTCR);
-
- spin_unlock(&nuc900_wdt->wdt_lock);
-}
-
-static inline void nuc900_wdt_ping(void)
-{
- nuc900_wdt->next_heartbeat = jiffies + heartbeat * HZ;
-}
-
-static int nuc900_wdt_open(struct inode *inode, struct file *file)
-{
-
- if (test_and_set_bit(0, &nuc900wdt_busy))
- return -EBUSY;
-
- nuc900_wdt_start();
-
- return stream_open(inode, file);
-}
-
-static int nuc900_wdt_close(struct inode *inode, struct file *file)
-{
- if (nuc900_wdt->expect_close == 42)
- nuc900_wdt_stop();
- else {
- dev_crit(&nuc900_wdt->pdev->dev,
- "Unexpected close, not stopping watchdog!\n");
- nuc900_wdt_ping();
- }
-
- nuc900_wdt->expect_close = 0;
- clear_bit(0, &nuc900wdt_busy);
- return 0;
-}
-
-static const struct watchdog_info nuc900_wdt_info = {
- .identity = "nuc900 watchdog",
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
- WDIOF_MAGICCLOSE,
-};
-
-static long nuc900_wdt_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- int new_value;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(argp, &nuc900_wdt_info,
- sizeof(nuc900_wdt_info)) ? -EFAULT : 0;
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
-
- case WDIOC_KEEPALIVE:
- nuc900_wdt_ping();
- return 0;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(new_value, p))
- return -EFAULT;
-
- heartbeat = new_value;
- nuc900_wdt_ping();
-
- return put_user(new_value, p);
- case WDIOC_GETTIMEOUT:
- return put_user(heartbeat, p);
- default:
- return -ENOTTY;
- }
-}
-
-static ssize_t nuc900_wdt_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- if (!len)
- return 0;
-
- /* Scan for magic character */
- if (!nowayout) {
- size_t i;
-
- nuc900_wdt->expect_close = 0;
-
- for (i = 0; i < len; i++) {
- char c;
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V') {
- nuc900_wdt->expect_close = 42;
- break;
- }
- }
- }
-
- nuc900_wdt_ping();
- return len;
-}
-
-static void nuc900_wdt_timer_ping(struct timer_list *unused)
-{
- if (time_before(jiffies, nuc900_wdt->next_heartbeat)) {
- nuc900_wdt_keepalive();
- mod_timer(&nuc900_wdt->timer, jiffies + WDT_TIMEOUT);
- } else
- dev_warn(&nuc900_wdt->pdev->dev, "Will reset the machine !\n");
-}
-
-static const struct file_operations nuc900wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .unlocked_ioctl = nuc900_wdt_ioctl,
- .open = nuc900_wdt_open,
- .release = nuc900_wdt_close,
- .write = nuc900_wdt_write,
-};
-
-static struct miscdevice nuc900wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &nuc900wdt_fops,
-};
-
-static int nuc900wdt_probe(struct platform_device *pdev)
-{
- int ret = 0;
-
- nuc900_wdt = devm_kzalloc(&pdev->dev, sizeof(*nuc900_wdt),
- GFP_KERNEL);
- if (!nuc900_wdt)
- return -ENOMEM;
-
- nuc900_wdt->pdev = pdev;
-
- spin_lock_init(&nuc900_wdt->wdt_lock);
-
- nuc900_wdt->wdt_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(nuc900_wdt->wdt_base))
- return PTR_ERR(nuc900_wdt->wdt_base);
-
- nuc900_wdt->wdt_clock = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(nuc900_wdt->wdt_clock)) {
- dev_err(&pdev->dev, "failed to find watchdog clock source\n");
- return PTR_ERR(nuc900_wdt->wdt_clock);
- }
-
- clk_enable(nuc900_wdt->wdt_clock);
-
- timer_setup(&nuc900_wdt->timer, nuc900_wdt_timer_ping, 0);
-
- ret = misc_register(&nuc900wdt_miscdev);
- if (ret) {
- dev_err(&pdev->dev, "err register miscdev on minor=%d (%d)\n",
- WATCHDOG_MINOR, ret);
- goto err_clk;
- }
-
- return 0;
-
-err_clk:
- clk_disable(nuc900_wdt->wdt_clock);
- return ret;
-}
-
-static int nuc900wdt_remove(struct platform_device *pdev)
-{
- misc_deregister(&nuc900wdt_miscdev);
-
- clk_disable(nuc900_wdt->wdt_clock);
-
- return 0;
-}
-
-static struct platform_driver nuc900wdt_driver = {
- .probe = nuc900wdt_probe,
- .remove = nuc900wdt_remove,
- .driver = {
- .name = "nuc900-wdt",
- },
-};
-
-module_platform_driver(nuc900wdt_driver);
-
-MODULE_AUTHOR("Wan ZongShun <[email protected]>");
-MODULE_DESCRIPTION("Watchdog driver for NUC900");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:nuc900-wdt");
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index cdb0d174c5e2..1cccf8eb1c5d 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -35,7 +35,15 @@
* Watchdog timer block registers.
*/
#define TIMER_CTRL 0x0000
-#define TIMER_A370_STATUS 0x04
+#define TIMER1_FIXED_ENABLE_BIT BIT(12)
+#define WDT_AXP_FIXED_ENABLE_BIT BIT(10)
+#define TIMER1_ENABLE_BIT BIT(2)
+
+#define TIMER_A370_STATUS 0x0004
+#define WDT_A370_EXPIRED BIT(31)
+#define TIMER1_STATUS_BIT BIT(8)
+
+#define TIMER1_VAL_OFF 0x001c
#define WDT_MAX_CYCLE_COUNT 0xffffffff
@@ -43,9 +51,6 @@
#define WDT_A370_RATIO_SHIFT 5
#define WDT_A370_RATIO (1 << WDT_A370_RATIO_SHIFT)
-#define WDT_AXP_FIXED_ENABLE_BIT BIT(10)
-#define WDT_A370_EXPIRED BIT(31)
-
static bool nowayout = WATCHDOG_NOWAYOUT;
static int heartbeat = -1; /* module parameter (seconds) */
@@ -158,6 +163,7 @@ static int armadaxp_wdt_clock_init(struct platform_device *pdev,
struct orion_watchdog *dev)
{
int ret;
+ u32 val;
dev->clk = of_clk_get_by_name(pdev->dev.of_node, "fixed");
if (IS_ERR(dev->clk))
@@ -168,10 +174,9 @@ static int armadaxp_wdt_clock_init(struct platform_device *pdev,
return ret;
}
- /* Enable the fixed watchdog clock input */
- atomic_io_modify(dev->reg + TIMER_CTRL,
- WDT_AXP_FIXED_ENABLE_BIT,
- WDT_AXP_FIXED_ENABLE_BIT);
+ /* Fix the wdt and timer1 clock freqency to 25MHz */
+ val = WDT_AXP_FIXED_ENABLE_BIT | TIMER1_FIXED_ENABLE_BIT;
+ atomic_io_modify(dev->reg + TIMER_CTRL, val, val);
dev->clk_rate = clk_get_rate(dev->clk);
return 0;
@@ -183,6 +188,10 @@ static int orion_wdt_ping(struct watchdog_device *wdt_dev)
/* Reload watchdog duration */
writel(dev->clk_rate * wdt_dev->timeout,
dev->reg + dev->data->wdt_counter_offset);
+ if (dev->wdt.info->options & WDIOF_PRETIMEOUT)
+ writel(dev->clk_rate * (wdt_dev->timeout - wdt_dev->pretimeout),
+ dev->reg + TIMER1_VAL_OFF);
+
return 0;
}
@@ -194,13 +203,18 @@ static int armada375_start(struct watchdog_device *wdt_dev)
/* Set watchdog duration */
writel(dev->clk_rate * wdt_dev->timeout,
dev->reg + dev->data->wdt_counter_offset);
+ if (dev->wdt.info->options & WDIOF_PRETIMEOUT)
+ writel(dev->clk_rate * (wdt_dev->timeout - wdt_dev->pretimeout),
+ dev->reg + TIMER1_VAL_OFF);
/* Clear the watchdog expiration bit */
atomic_io_modify(dev->reg + TIMER_A370_STATUS, WDT_A370_EXPIRED, 0);
/* Enable watchdog timer */
- atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit,
- dev->data->wdt_enable_bit);
+ reg = dev->data->wdt_enable_bit;
+ if (dev->wdt.info->options & WDIOF_PRETIMEOUT)
+ reg |= TIMER1_ENABLE_BIT;
+ atomic_io_modify(dev->reg + TIMER_CTRL, reg, reg);
/* Enable reset on watchdog */
reg = readl(dev->rstout);
@@ -277,7 +291,7 @@ static int orion_stop(struct watchdog_device *wdt_dev)
static int armada375_stop(struct watchdog_device *wdt_dev)
{
struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
- u32 reg;
+ u32 reg, mask;
/* Disable reset on watchdog */
atomic_io_modify(dev->rstout_mask, dev->data->rstout_mask_bit,
@@ -287,7 +301,10 @@ static int armada375_stop(struct watchdog_device *wdt_dev)
writel(reg, dev->rstout);
/* Disable watchdog timer */
- atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit, 0);
+ mask = dev->data->wdt_enable_bit;
+ if (wdt_dev->info->options & WDIOF_PRETIMEOUT)
+ mask |= TIMER1_ENABLE_BIT;
+ atomic_io_modify(dev->reg + TIMER_CTRL, mask, 0);
return 0;
}
@@ -349,7 +366,7 @@ static unsigned int orion_wdt_get_timeleft(struct watchdog_device *wdt_dev)
return readl(dev->reg + dev->data->wdt_counter_offset) / dev->clk_rate;
}
-static const struct watchdog_info orion_wdt_info = {
+static struct watchdog_info orion_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.identity = "Orion Watchdog",
};
@@ -368,6 +385,16 @@ static irqreturn_t orion_wdt_irq(int irq, void *devid)
return IRQ_HANDLED;
}
+static irqreturn_t orion_wdt_pre_irq(int irq, void *devid)
+{
+ struct orion_watchdog *dev = devid;
+
+ atomic_io_modify(dev->reg + TIMER_A370_STATUS,
+ TIMER1_STATUS_BIT, 0);
+ watchdog_notify_pretimeout(&dev->wdt);
+ return IRQ_HANDLED;
+}
+
/*
* The original devicetree binding for this driver specified only
* one memory resource, so in order to keep DT backwards compatibility
@@ -589,6 +616,19 @@ static int orion_wdt_probe(struct platform_device *pdev)
}
}
+ /* Optional 2nd interrupt for pretimeout */
+ irq = platform_get_irq(pdev, 1);
+ if (irq > 0) {
+ orion_wdt_info.options |= WDIOF_PRETIMEOUT;
+ ret = devm_request_irq(&pdev->dev, irq, orion_wdt_pre_irq,
+ 0, pdev->name, dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ goto disable_clk;
+ }
+ }
+
+
watchdog_set_nowayout(&dev->wdt, nowayout);
ret = watchdog_register_device(&dev->wdt);
if (ret)
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index 7be7f87be28f..a494543d3ae1 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -19,6 +21,9 @@ enum wdt_reg {
WDT_BITE_TIME,
};
+#define QCOM_WDT_ENABLE BIT(0)
+#define QCOM_WDT_ENABLE_IRQ BIT(1)
+
static const u32 reg_offset_data_apcs_tmr[] = {
[WDT_RST] = 0x38,
[WDT_EN] = 0x40,
@@ -37,7 +42,6 @@ static const u32 reg_offset_data_kpss[] = {
struct qcom_wdt {
struct watchdog_device wdd;
- struct clk *clk;
unsigned long rate;
void __iomem *base;
const u32 *layout;
@@ -54,15 +58,35 @@ struct qcom_wdt *to_qcom_wdt(struct watchdog_device *wdd)
return container_of(wdd, struct qcom_wdt, wdd);
}
+static inline int qcom_get_enable(struct watchdog_device *wdd)
+{
+ int enable = QCOM_WDT_ENABLE;
+
+ if (wdd->pretimeout)
+ enable |= QCOM_WDT_ENABLE_IRQ;
+
+ return enable;
+}
+
+static irqreturn_t qcom_wdt_isr(int irq, void *arg)
+{
+ struct watchdog_device *wdd = arg;
+
+ watchdog_notify_pretimeout(wdd);
+
+ return IRQ_HANDLED;
+}
+
static int qcom_wdt_start(struct watchdog_device *wdd)
{
struct qcom_wdt *wdt = to_qcom_wdt(wdd);
+ unsigned int bark = wdd->timeout - wdd->pretimeout;
writel(0, wdt_addr(wdt, WDT_EN));
writel(1, wdt_addr(wdt, WDT_RST));
- writel(wdd->timeout * wdt->rate, wdt_addr(wdt, WDT_BARK_TIME));
+ writel(bark * wdt->rate, wdt_addr(wdt, WDT_BARK_TIME));
writel(wdd->timeout * wdt->rate, wdt_addr(wdt, WDT_BITE_TIME));
- writel(1, wdt_addr(wdt, WDT_EN));
+ writel(qcom_get_enable(wdd), wdt_addr(wdt, WDT_EN));
return 0;
}
@@ -89,6 +113,13 @@ static int qcom_wdt_set_timeout(struct watchdog_device *wdd,
return qcom_wdt_start(wdd);
}
+static int qcom_wdt_set_pretimeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ wdd->pretimeout = timeout;
+ return qcom_wdt_start(wdd);
+}
+
static int qcom_wdt_restart(struct watchdog_device *wdd, unsigned long action,
void *data)
{
@@ -105,7 +136,7 @@ static int qcom_wdt_restart(struct watchdog_device *wdd, unsigned long action,
writel(1, wdt_addr(wdt, WDT_RST));
writel(timeout, wdt_addr(wdt, WDT_BARK_TIME));
writel(timeout, wdt_addr(wdt, WDT_BITE_TIME));
- writel(1, wdt_addr(wdt, WDT_EN));
+ writel(QCOM_WDT_ENABLE, wdt_addr(wdt, WDT_EN));
/*
* Actually make sure the above sequence hits hardware before sleeping.
@@ -121,6 +152,7 @@ static const struct watchdog_ops qcom_wdt_ops = {
.stop = qcom_wdt_stop,
.ping = qcom_wdt_ping,
.set_timeout = qcom_wdt_set_timeout,
+ .set_pretimeout = qcom_wdt_set_pretimeout,
.restart = qcom_wdt_restart,
.owner = THIS_MODULE,
};
@@ -133,6 +165,15 @@ static const struct watchdog_info qcom_wdt_info = {
.identity = KBUILD_MODNAME,
};
+static const struct watchdog_info qcom_wdt_pt_info = {
+ .options = WDIOF_KEEPALIVEPING
+ | WDIOF_MAGICCLOSE
+ | WDIOF_SETTIMEOUT
+ | WDIOF_PRETIMEOUT
+ | WDIOF_CARDRESET,
+ .identity = KBUILD_MODNAME,
+};
+
static void qcom_clk_disable_unprepare(void *data)
{
clk_disable_unprepare(data);
@@ -146,7 +187,8 @@ static int qcom_wdt_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
const u32 *regs;
u32 percpu_offset;
- int ret;
+ int irq, ret;
+ struct clk *clk;
regs = of_device_get_match_data(dev);
if (!regs) {
@@ -173,19 +215,18 @@ static int qcom_wdt_probe(struct platform_device *pdev)
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
- wdt->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(wdt->clk)) {
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
dev_err(dev, "failed to get input clock\n");
- return PTR_ERR(wdt->clk);
+ return PTR_ERR(clk);
}
- ret = clk_prepare_enable(wdt->clk);
+ ret = clk_prepare_enable(clk);
if (ret) {
dev_err(dev, "failed to setup clock\n");
return ret;
}
- ret = devm_add_action_or_reset(dev, qcom_clk_disable_unprepare,
- wdt->clk);
+ ret = devm_add_action_or_reset(dev, qcom_clk_disable_unprepare, clk);
if (ret)
return ret;
@@ -197,14 +238,31 @@ static int qcom_wdt_probe(struct platform_device *pdev)
* that it would bite before a second elapses it's usefulness is
* limited. Bail if this is the case.
*/
- wdt->rate = clk_get_rate(wdt->clk);
+ wdt->rate = clk_get_rate(clk);
if (wdt->rate == 0 ||
wdt->rate > 0x10000000U) {
dev_err(dev, "invalid clock rate\n");
return -EINVAL;
}
- wdt->wdd.info = &qcom_wdt_info;
+ /* check if there is pretimeout support */
+ irq = platform_get_irq(pdev, 0);
+ if (irq > 0) {
+ ret = devm_request_irq(dev, irq, qcom_wdt_isr,
+ IRQF_TRIGGER_RISING,
+ "wdt_bark", &wdt->wdd);
+ if (ret)
+ return ret;
+
+ wdt->wdd.info = &qcom_wdt_pt_info;
+ wdt->wdd.pretimeout = 1;
+ } else {
+ if (irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ wdt->wdd.info = &qcom_wdt_info;
+ }
+
wdt->wdd.ops = &qcom_wdt_ops;
wdt->wdd.min_timeout = 1;
wdt->wdd.max_timeout = 0x10000000U / wdt->rate;
diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c
index edba4e278685..0bb17b046140 100644
--- a/drivers/watchdog/sprd_wdt.c
+++ b/drivers/watchdog/sprd_wdt.c
@@ -284,10 +284,8 @@ static int sprd_wdt_probe(struct platform_device *pdev)
}
wdt->irq = platform_get_irq(pdev, 0);
- if (wdt->irq < 0) {
- dev_err(dev, "failed to get IRQ resource\n");
+ if (wdt->irq < 0)
return wdt->irq;
- }
ret = devm_request_irq(dev, wdt->irq, sprd_wdt_isr, IRQF_NO_SUSPEND,
"sprd-wdt", (void *)wdt);
diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
index dec660c509b3..4a363a8b2d20 100644
--- a/drivers/watchdog/ziirave_wdt.c
+++ b/drivers/watchdog/ziirave_wdt.c
@@ -21,8 +21,11 @@
#include <linux/version.h>
#include <linux/watchdog.h>
+#include <asm/unaligned.h>
+
#define ZIIRAVE_TIMEOUT_MIN 3
#define ZIIRAVE_TIMEOUT_MAX 255
+#define ZIIRAVE_TIMEOUT_DEFAULT 30
#define ZIIRAVE_PING_VALUE 0x0
@@ -48,16 +51,12 @@ static char *ziirave_reasons[] = {"power cycle", "hw watchdog", NULL, NULL,
#define ZIIRAVE_FIRM_PKT_TOTAL_SIZE 20
#define ZIIRAVE_FIRM_PKT_DATA_SIZE 16
-#define ZIIRAVE_FIRM_FLASH_MEMORY_START 0x1600
-#define ZIIRAVE_FIRM_FLASH_MEMORY_END 0x2bbf
+#define ZIIRAVE_FIRM_FLASH_MEMORY_START (2 * 0x1600)
+#define ZIIRAVE_FIRM_FLASH_MEMORY_END (2 * 0x2bbf)
+#define ZIIRAVE_FIRM_PAGE_SIZE 128
/* Received and ready for next Download packet. */
#define ZIIRAVE_FIRM_DOWNLOAD_ACK 1
-/* Currently writing to flash. Retry Download status in a moment! */
-#define ZIIRAVE_FIRM_DOWNLOAD_BUSY 2
-
-/* Wait for ACK timeout in ms */
-#define ZIIRAVE_FIRM_WAIT_FOR_ACK_TIMEOUT 50
/* Firmware commands */
#define ZIIRAVE_CMD_DOWNLOAD_START 0x10
@@ -68,6 +67,12 @@ static char *ziirave_reasons[] = {"power cycle", "hw watchdog", NULL, NULL,
#define ZIIRAVE_CMD_JUMP_TO_BOOTLOADER 0x0c
#define ZIIRAVE_CMD_DOWNLOAD_PACKET 0x0e
+#define ZIIRAVE_CMD_JUMP_TO_BOOTLOADER_MAGIC 1
+#define ZIIRAVE_CMD_RESET_PROCESSOR_MAGIC 1
+
+#define ZIIRAVE_FW_VERSION_FMT "02.%02u.%02u"
+#define ZIIRAVE_BL_VERSION_FMT "01.%02u.%02u"
+
struct ziirave_wdt_rev {
unsigned char major;
unsigned char minor;
@@ -165,67 +170,37 @@ static unsigned int ziirave_wdt_get_timeleft(struct watchdog_device *wdd)
return ret;
}
-static int ziirave_firm_wait_for_ack(struct watchdog_device *wdd)
+static int ziirave_firm_read_ack(struct watchdog_device *wdd)
{
struct i2c_client *client = to_i2c_client(wdd->parent);
int ret;
- unsigned long timeout;
- timeout = jiffies + msecs_to_jiffies(ZIIRAVE_FIRM_WAIT_FOR_ACK_TIMEOUT);
- do {
- if (time_after(jiffies, timeout))
- return -ETIMEDOUT;
-
- usleep_range(5000, 10000);
-
- ret = i2c_smbus_read_byte(client);
- if (ret < 0) {
- dev_err(&client->dev, "Failed to read byte\n");
- return ret;
- }
- } while (ret == ZIIRAVE_FIRM_DOWNLOAD_BUSY);
+ ret = i2c_smbus_read_byte(client);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read status byte\n");
+ return ret;
+ }
return ret == ZIIRAVE_FIRM_DOWNLOAD_ACK ? 0 : -EIO;
}
-static int ziirave_firm_set_read_addr(struct watchdog_device *wdd, u16 addr)
+static int ziirave_firm_set_read_addr(struct watchdog_device *wdd, u32 addr)
{
struct i2c_client *client = to_i2c_client(wdd->parent);
+ const u16 addr16 = (u16)addr / 2;
u8 address[2];
- address[0] = addr & 0xff;
- address[1] = (addr >> 8) & 0xff;
+ put_unaligned_le16(addr16, address);
return i2c_smbus_write_block_data(client,
ZIIRAVE_CMD_DOWNLOAD_SET_READ_ADDR,
- ARRAY_SIZE(address), address);
-}
-
-static int ziirave_firm_write_block_data(struct watchdog_device *wdd,
- u8 command, u8 length, const u8 *data,
- bool wait_for_ack)
-{
- struct i2c_client *client = to_i2c_client(wdd->parent);
- int ret;
-
- ret = i2c_smbus_write_block_data(client, command, length, data);
- if (ret) {
- dev_err(&client->dev,
- "Failed to send command 0x%02x: %d\n", command, ret);
- return ret;
- }
-
- if (wait_for_ack)
- ret = ziirave_firm_wait_for_ack(wdd);
-
- return ret;
+ sizeof(address), address);
}
-static int ziirave_firm_write_byte(struct watchdog_device *wdd, u8 command,
- u8 byte, bool wait_for_ack)
+static bool ziirave_firm_addr_readonly(u32 addr)
{
- return ziirave_firm_write_block_data(wdd, command, 1, &byte,
- wait_for_ack);
+ return addr < ZIIRAVE_FIRM_FLASH_MEMORY_START ||
+ addr > ZIIRAVE_FIRM_FLASH_MEMORY_END;
}
/*
@@ -240,35 +215,53 @@ static int ziirave_firm_write_byte(struct watchdog_device *wdd, u8 command,
* Data0 .. Data15: Array of 16 bytes of data.
* Checksum: Checksum byte to verify data integrity.
*/
-static int ziirave_firm_write_pkt(struct watchdog_device *wdd,
- const struct ihex_binrec *rec)
+static int __ziirave_firm_write_pkt(struct watchdog_device *wdd,
+ u32 addr, const u8 *data, u8 len)
{
+ const u16 addr16 = (u16)addr / 2;
struct i2c_client *client = to_i2c_client(wdd->parent);
u8 i, checksum = 0, packet[ZIIRAVE_FIRM_PKT_TOTAL_SIZE];
int ret;
- u16 addr;
- memset(packet, 0, ARRAY_SIZE(packet));
+ /* Check max data size */
+ if (len > ZIIRAVE_FIRM_PKT_DATA_SIZE) {
+ dev_err(&client->dev, "Firmware packet too long (%d)\n",
+ len);
+ return -EMSGSIZE;
+ }
+
+ /*
+ * Ignore packets that are targeting program memory outisde of
+ * app partition, since they will be ignored by the
+ * bootloader. At the same time, we need to make sure we'll
+ * allow zero length packet that will be sent as the last step
+ * of firmware update
+ */
+ if (len && ziirave_firm_addr_readonly(addr))
+ return 0;
/* Packet length */
- packet[0] = (u8)be16_to_cpu(rec->len);
+ packet[0] = len;
/* Packet address */
- addr = (be32_to_cpu(rec->addr) & 0xffff) >> 1;
- packet[1] = addr & 0xff;
- packet[2] = (addr & 0xff00) >> 8;
+ put_unaligned_le16(addr16, packet + 1);
- /* Packet data */
- if (be16_to_cpu(rec->len) > ZIIRAVE_FIRM_PKT_DATA_SIZE)
- return -EMSGSIZE;
- memcpy(packet + 3, rec->data, be16_to_cpu(rec->len));
+ memcpy(packet + 3, data, len);
+ memset(packet + 3 + len, 0, ZIIRAVE_FIRM_PKT_DATA_SIZE - len);
/* Packet checksum */
- for (i = 0; i < ZIIRAVE_FIRM_PKT_TOTAL_SIZE - 1; i++)
+ for (i = 0; i < len + 3; i++)
checksum += packet[i];
packet[ZIIRAVE_FIRM_PKT_TOTAL_SIZE - 1] = checksum;
- ret = ziirave_firm_write_block_data(wdd, ZIIRAVE_CMD_DOWNLOAD_PACKET,
- ARRAY_SIZE(packet), packet, true);
+ ret = i2c_smbus_write_block_data(client, ZIIRAVE_CMD_DOWNLOAD_PACKET,
+ sizeof(packet), packet);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to send DOWNLOAD_PACKET: %d\n", ret);
+ return ret;
+ }
+
+ ret = ziirave_firm_read_ack(wdd);
if (ret)
dev_err(&client->dev,
"Failed to write firmware packet at address 0x%04x: %d\n",
@@ -277,6 +270,30 @@ static int ziirave_firm_write_pkt(struct watchdog_device *wdd,
return ret;
}
+static int ziirave_firm_write_pkt(struct watchdog_device *wdd,
+ u32 addr, const u8 *data, u8 len)
+{
+ const u8 max_write_len = ZIIRAVE_FIRM_PAGE_SIZE -
+ (addr - ALIGN_DOWN(addr, ZIIRAVE_FIRM_PAGE_SIZE));
+ int ret;
+
+ if (len > max_write_len) {
+ /*
+ * If data crossed page boundary we need to split this
+ * write in two
+ */
+ ret = __ziirave_firm_write_pkt(wdd, addr, data, max_write_len);
+ if (ret)
+ return ret;
+
+ addr += max_write_len;
+ data += max_write_len;
+ len -= max_write_len;
+ }
+
+ return __ziirave_firm_write_pkt(wdd, addr, data, len);
+}
+
static int ziirave_firm_verify(struct watchdog_device *wdd,
const struct firmware *fw)
{
@@ -284,16 +301,12 @@ static int ziirave_firm_verify(struct watchdog_device *wdd,
const struct ihex_binrec *rec;
int i, ret;
u8 data[ZIIRAVE_FIRM_PKT_DATA_SIZE];
- u16 addr;
for (rec = (void *)fw->data; rec; rec = ihex_next_binrec(rec)) {
- /* Zero length marks end of records */
- if (!be16_to_cpu(rec->len))
- break;
+ const u16 len = be16_to_cpu(rec->len);
+ const u32 addr = be32_to_cpu(rec->addr);
- addr = (be32_to_cpu(rec->addr) & 0xffff) >> 1;
- if (addr < ZIIRAVE_FIRM_FLASH_MEMORY_START ||
- addr > ZIIRAVE_FIRM_FLASH_MEMORY_END)
+ if (ziirave_firm_addr_readonly(addr))
continue;
ret = ziirave_firm_set_read_addr(wdd, addr);
@@ -304,7 +317,7 @@ static int ziirave_firm_verify(struct watchdog_device *wdd,
return ret;
}
- for (i = 0; i < ARRAY_SIZE(data); i++) {
+ for (i = 0; i < len; i++) {
ret = i2c_smbus_read_byte_data(client,
ZIIRAVE_CMD_DOWNLOAD_READ_BYTE);
if (ret < 0) {
@@ -315,7 +328,7 @@ static int ziirave_firm_verify(struct watchdog_device *wdd,
data[i] = ret;
}
- if (memcmp(data, rec->data, be16_to_cpu(rec->len))) {
+ if (memcmp(data, rec->data, len)) {
dev_err(&client->dev,
"Firmware mismatch at address 0x%04x\n", addr);
return -EINVAL;
@@ -329,97 +342,45 @@ static int ziirave_firm_upload(struct watchdog_device *wdd,
const struct firmware *fw)
{
struct i2c_client *client = to_i2c_client(wdd->parent);
- int ret, words_till_page_break;
const struct ihex_binrec *rec;
- struct ihex_binrec *rec_new;
+ int ret;
- ret = ziirave_firm_write_byte(wdd, ZIIRAVE_CMD_JUMP_TO_BOOTLOADER, 1,
- false);
- if (ret)
+ ret = i2c_smbus_write_byte_data(client,
+ ZIIRAVE_CMD_JUMP_TO_BOOTLOADER,
+ ZIIRAVE_CMD_JUMP_TO_BOOTLOADER_MAGIC);
+ if (ret) {
+ dev_err(&client->dev, "Failed to jump to bootloader\n");
return ret;
+ }
msleep(500);
- ret = ziirave_firm_write_byte(wdd, ZIIRAVE_CMD_DOWNLOAD_START, 1, true);
- if (ret)
+ ret = i2c_smbus_write_byte(client, ZIIRAVE_CMD_DOWNLOAD_START);
+ if (ret) {
+ dev_err(&client->dev, "Failed to start download\n");
return ret;
+ }
+
+ ret = ziirave_firm_read_ack(wdd);
+ if (ret) {
+ dev_err(&client->dev, "No ACK for start download\n");
+ return ret;
+ }
msleep(500);
for (rec = (void *)fw->data; rec; rec = ihex_next_binrec(rec)) {
- /* Zero length marks end of records */
- if (!be16_to_cpu(rec->len))
- break;
-
- /* Check max data size */
- if (be16_to_cpu(rec->len) > ZIIRAVE_FIRM_PKT_DATA_SIZE) {
- dev_err(&client->dev, "Firmware packet too long (%d)\n",
- be16_to_cpu(rec->len));
- return -EMSGSIZE;
- }
-
- /* Calculate words till page break */
- words_till_page_break = (64 - ((be32_to_cpu(rec->addr) >> 1) &
- 0x3f));
- if ((be16_to_cpu(rec->len) >> 1) > words_till_page_break) {
- /*
- * Data in passes page boundary, so we need to split in
- * two blocks of data. Create a packet with the first
- * block of data.
- */
- rec_new = kzalloc(sizeof(struct ihex_binrec) +
- (words_till_page_break << 1),
- GFP_KERNEL);
- if (!rec_new)
- return -ENOMEM;
-
- rec_new->len = cpu_to_be16(words_till_page_break << 1);
- rec_new->addr = rec->addr;
- memcpy(rec_new->data, rec->data,
- be16_to_cpu(rec_new->len));
-
- ret = ziirave_firm_write_pkt(wdd, rec_new);
- kfree(rec_new);
- if (ret)
- return ret;
-
- /* Create a packet with the second block of data */
- rec_new = kzalloc(sizeof(struct ihex_binrec) +
- be16_to_cpu(rec->len) -
- (words_till_page_break << 1),
- GFP_KERNEL);
- if (!rec_new)
- return -ENOMEM;
-
- /* Remaining bytes */
- rec_new->len = rec->len -
- cpu_to_be16(words_till_page_break << 1);
-
- rec_new->addr = cpu_to_be32(be32_to_cpu(rec->addr) +
- (words_till_page_break << 1));
-
- memcpy(rec_new->data,
- rec->data + (words_till_page_break << 1),
- be16_to_cpu(rec_new->len));
-
- ret = ziirave_firm_write_pkt(wdd, rec_new);
- kfree(rec_new);
- if (ret)
- return ret;
- } else {
- ret = ziirave_firm_write_pkt(wdd, rec);
- if (ret)
- return ret;
- }
+ ret = ziirave_firm_write_pkt(wdd, be32_to_cpu(rec->addr),
+ rec->data, be16_to_cpu(rec->len));
+ if (ret)
+ return ret;
}
- /* For end of download, the length field will be set to 0 */
- rec_new = kzalloc(sizeof(struct ihex_binrec) + 1, GFP_KERNEL);
- if (!rec_new)
- return -ENOMEM;
-
- ret = ziirave_firm_write_pkt(wdd, rec_new);
- kfree(rec_new);
+ /*
+ * Finish firmware download process by sending a zero length
+ * payload
+ */
+ ret = ziirave_firm_write_pkt(wdd, 0, NULL, 0);
if (ret) {
dev_err(&client->dev, "Failed to send EMPTY packet: %d\n", ret);
return ret;
@@ -437,15 +398,22 @@ static int ziirave_firm_upload(struct watchdog_device *wdd,
}
/* End download operation */
- ret = ziirave_firm_write_byte(wdd, ZIIRAVE_CMD_DOWNLOAD_END, 1, false);
- if (ret)
+ ret = i2c_smbus_write_byte(client, ZIIRAVE_CMD_DOWNLOAD_END);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to end firmware download: %d\n", ret);
return ret;
+ }
/* Reset the processor */
- ret = ziirave_firm_write_byte(wdd, ZIIRAVE_CMD_RESET_PROCESSOR, 1,
- false);
- if (ret)
+ ret = i2c_smbus_write_byte_data(client,
+ ZIIRAVE_CMD_RESET_PROCESSOR,
+ ZIIRAVE_CMD_RESET_PROCESSOR_MAGIC);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to reset the watchdog: %d\n", ret);
return ret;
+ }
msleep(500);
@@ -478,7 +446,7 @@ static ssize_t ziirave_wdt_sysfs_show_firm(struct device *dev,
if (ret)
return ret;
- ret = sprintf(buf, "02.%02u.%02u", w_priv->firmware_rev.major,
+ ret = sprintf(buf, ZIIRAVE_FW_VERSION_FMT, w_priv->firmware_rev.major,
w_priv->firmware_rev.minor);
mutex_unlock(&w_priv->sysfs_mutex);
@@ -501,7 +469,7 @@ static ssize_t ziirave_wdt_sysfs_show_boot(struct device *dev,
if (ret)
return ret;
- ret = sprintf(buf, "01.%02u.%02u", w_priv->bootloader_rev.major,
+ ret = sprintf(buf, ZIIRAVE_BL_VERSION_FMT, w_priv->bootloader_rev.major,
w_priv->bootloader_rev.minor);
mutex_unlock(&w_priv->sysfs_mutex);
@@ -568,7 +536,8 @@ static ssize_t ziirave_wdt_sysfs_store_firm(struct device *dev,
goto unlock_mutex;
}
- dev_info(&client->dev, "Firmware updated to version 02.%02u.%02u\n",
+ dev_info(&client->dev,
+ "Firmware updated to version " ZIIRAVE_FW_VERSION_FMT "\n",
w_priv->firmware_rev.major, w_priv->firmware_rev.minor);
/* Restore the watchdog timeout */
@@ -611,7 +580,7 @@ static int ziirave_wdt_init_duration(struct i2c_client *client)
&reset_duration);
if (ret) {
dev_info(&client->dev,
- "Unable to set reset pulse duration, using default\n");
+ "No reset pulse duration specified, using default\n");
return 0;
}
}
@@ -633,7 +602,10 @@ static int ziirave_wdt_probe(struct i2c_client *client,
struct ziirave_wdt_data *w_priv;
int val;
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WRITE_BLOCK_DATA))
return -ENODEV;
w_priv = devm_kzalloc(&client->dev, sizeof(*w_priv), GFP_KERNEL);
@@ -658,57 +630,80 @@ static int ziirave_wdt_probe(struct i2c_client *client,
*/
if (w_priv->wdd.timeout == 0) {
val = i2c_smbus_read_byte_data(client, ZIIRAVE_WDT_TIMEOUT);
- if (val < 0)
+ if (val < 0) {
+ dev_err(&client->dev, "Failed to read timeout\n");
return val;
+ }
- if (val < ZIIRAVE_TIMEOUT_MIN)
- return -ENODEV;
+ if (val > ZIIRAVE_TIMEOUT_MAX ||
+ val < ZIIRAVE_TIMEOUT_MIN)
+ val = ZIIRAVE_TIMEOUT_DEFAULT;
w_priv->wdd.timeout = val;
- } else {
- ret = ziirave_wdt_set_timeout(&w_priv->wdd,
- w_priv->wdd.timeout);
- if (ret)
- return ret;
+ }
- dev_info(&client->dev, "Timeout set to %ds.",
- w_priv->wdd.timeout);
+ ret = ziirave_wdt_set_timeout(&w_priv->wdd, w_priv->wdd.timeout);
+ if (ret) {
+ dev_err(&client->dev, "Failed to set timeout\n");
+ return ret;
}
+ dev_info(&client->dev, "Timeout set to %ds\n", w_priv->wdd.timeout);
+
watchdog_set_nowayout(&w_priv->wdd, nowayout);
i2c_set_clientdata(client, w_priv);
/* If in unconfigured state, set to stopped */
val = i2c_smbus_read_byte_data(client, ZIIRAVE_WDT_STATE);
- if (val < 0)
+ if (val < 0) {
+ dev_err(&client->dev, "Failed to read state\n");
return val;
+ }
if (val == ZIIRAVE_STATE_INITIAL)
ziirave_wdt_stop(&w_priv->wdd);
ret = ziirave_wdt_init_duration(client);
- if (ret)
+ if (ret) {
+ dev_err(&client->dev, "Failed to init duration\n");
return ret;
+ }
ret = ziirave_wdt_revision(client, &w_priv->firmware_rev,
ZIIRAVE_WDT_FIRM_VER_MAJOR);
- if (ret)
+ if (ret) {
+ dev_err(&client->dev, "Failed to read firmware version\n");
return ret;
+ }
+
+ dev_info(&client->dev,
+ "Firmware version: " ZIIRAVE_FW_VERSION_FMT "\n",
+ w_priv->firmware_rev.major, w_priv->firmware_rev.minor);
ret = ziirave_wdt_revision(client, &w_priv->bootloader_rev,
ZIIRAVE_WDT_BOOT_VER_MAJOR);
- if (ret)
+ if (ret) {
+ dev_err(&client->dev, "Failed to read bootloader version\n");
return ret;
+ }
+
+ dev_info(&client->dev,
+ "Bootloader version: " ZIIRAVE_BL_VERSION_FMT "\n",
+ w_priv->bootloader_rev.major, w_priv->bootloader_rev.minor);
w_priv->reset_reason = i2c_smbus_read_byte_data(client,
ZIIRAVE_WDT_RESET_REASON);
- if (w_priv->reset_reason < 0)
+ if (w_priv->reset_reason < 0) {
+ dev_err(&client->dev, "Failed to read reset reason\n");
return w_priv->reset_reason;
+ }
if (w_priv->reset_reason >= ARRAY_SIZE(ziirave_reasons) ||
- !ziirave_reasons[w_priv->reset_reason])
+ !ziirave_reasons[w_priv->reset_reason]) {
+ dev_err(&client->dev, "Invalid reset reason\n");
return -ENODEV;
+ }
ret = watchdog_register_device(&w_priv->wdd);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 2e8570c09789..6c8843968a52 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -247,7 +247,7 @@ static void xen_irq_info_cleanup(struct irq_info *info)
*/
unsigned int evtchn_from_irq(unsigned irq)
{
- if (unlikely(WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq)))
+ if (WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq))
return 0;
return info_for_irq(irq)->evtchn;
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 3eeb9bea7630..224df03ce42e 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -17,6 +17,8 @@
#include "../pci/pci.h"
#ifdef CONFIG_PCI_MMCONFIG
#include <asm/pci_x86.h>
+
+static int xen_mcfg_late(void);
#endif
static bool __read_mostly pci_seg_supported = true;
@@ -28,7 +30,18 @@ static int xen_add_device(struct device *dev)
#ifdef CONFIG_PCI_IOV
struct pci_dev *physfn = pci_dev->physfn;
#endif
-
+#ifdef CONFIG_PCI_MMCONFIG
+ static bool pci_mcfg_reserved = false;
+ /*
+ * Reserve MCFG areas in Xen on first invocation due to this being
+ * potentially called from inside of acpi_init immediately after
+ * MCFG table has been finally parsed.
+ */
+ if (!pci_mcfg_reserved) {
+ xen_mcfg_late();
+ pci_mcfg_reserved = true;
+ }
+#endif
if (pci_seg_supported) {
struct {
struct physdev_pci_device_add add;
@@ -201,7 +214,7 @@ static int __init register_xen_pci_notifier(void)
arch_initcall(register_xen_pci_notifier);
#ifdef CONFIG_PCI_MMCONFIG
-static int __init xen_mcfg_late(void)
+static int xen_mcfg_late(void)
{
struct pci_mmcfg_region *cfg;
int rc;
@@ -240,8 +253,4 @@ static int __init xen_mcfg_late(void)
}
return 0;
}
-/*
- * Needs to be done after acpi_init which are subsys_initcall.
- */
-subsys_initcall_sync(xen_mcfg_late);
#endif
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 58c9365fa217..bd3a10dfac15 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -39,6 +39,7 @@
#include <asm/xen/page-coherent.h>
#include <trace/events/swiotlb.h>
+#define MAX_DMA_BITS 32
/*
* Used to do a quick range check in swiotlb_tbl_unmap_single and
* swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
@@ -115,8 +116,6 @@ static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
return 0;
}
-static int max_dma_bits = 32;
-
static int
xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
{
@@ -136,7 +135,7 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
p + (i << IO_TLB_SHIFT),
get_order(slabs << IO_TLB_SHIFT),
dma_bits, &dma_handle);
- } while (rc && dma_bits++ < max_dma_bits);
+ } while (rc && dma_bits++ < MAX_DMA_BITS);
if (rc)
return rc;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index d4e11b2e04f6..ad4c6b1d5074 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -670,26 +670,6 @@ out:
* libraries. There is no binary dependent code anywhere else.
*/
-#ifndef STACK_RND_MASK
-#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
-#endif
-
-static unsigned long randomize_stack_top(unsigned long stack_top)
-{
- unsigned long random_variable = 0;
-
- if (current->flags & PF_RANDOMIZE) {
- random_variable = get_random_long();
- random_variable &= STACK_RND_MASK;
- random_variable <<= PAGE_SHIFT;
- }
-#ifdef CONFIG_STACK_GROWSUP
- return PAGE_ALIGN(stack_top) + random_variable;
-#else
- return PAGE_ALIGN(stack_top) - random_variable;
-#endif
-}
-
static int load_elf_binary(struct linux_binprm *bprm)
{
struct file *interpreter = NULL; /* to shut gcc up */
@@ -1141,7 +1121,8 @@ out_free_interp:
* (since it grows up, and may collide early with the stack
* growing down), and into the unused ELF_ET_DYN_BASE region.
*/
- if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && !interpreter)
+ if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
+ loc->elf_ex.e_type == ET_DYN && !interpreter)
current->mm->brk = current->mm->start_brk =
ELF_ET_DYN_BASE;
diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile
index a699e320393f..c1da294418d1 100644
--- a/fs/ceph/Makefile
+++ b/fs/ceph/Makefile
@@ -6,7 +6,7 @@
obj-$(CONFIG_CEPH_FS) += ceph.o
ceph-y := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
- export.o caps.o snap.o xattr.o quota.o \
+ export.o caps.o snap.o xattr.o quota.o io.o \
mds_client.o mdsmap.o strings.o ceph_frag.o \
debugfs.o
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index b3c8b886bf64..7ab616601141 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -189,8 +189,7 @@ static int ceph_do_readpage(struct file *filp, struct page *page)
{
struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_osd_client *osdc =
- &ceph_inode_to_client(inode)->client->osdc;
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
int err = 0;
u64 off = page_offset(page);
u64 len = PAGE_SIZE;
@@ -219,8 +218,8 @@ static int ceph_do_readpage(struct file *filp, struct page *page)
dout("readpage inode %p file %p page %p index %lu\n",
inode, filp, page, page->index);
- err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
- off, &len,
+ err = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
+ &ci->i_layout, off, &len,
ci->i_truncate_seq, ci->i_truncate_size,
&page, 1, 0);
if (err == -ENOENT)
@@ -228,6 +227,8 @@ static int ceph_do_readpage(struct file *filp, struct page *page)
if (err < 0) {
SetPageError(page);
ceph_fscache_readpage_cancel(inode, page);
+ if (err == -EBLACKLISTED)
+ fsc->blacklisted = true;
goto out;
}
if (err < PAGE_SIZE)
@@ -266,6 +267,8 @@ static void finish_read(struct ceph_osd_request *req)
int i;
dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes);
+ if (rc == -EBLACKLISTED)
+ ceph_inode_to_client(inode)->blacklisted = true;
/* unlock all pages, zeroing any data we didn't read */
osd_data = osd_req_op_extent_osd_data(req, 0);
@@ -323,7 +326,8 @@ static int start_read(struct inode *inode, struct ceph_rw_context *rw_ctx,
/* caller of readpages does not hold buffer and read caps
* (fadvise, madvise and readahead cases) */
int want = CEPH_CAP_FILE_CACHE;
- ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, true, &got);
+ ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want,
+ true, &got);
if (ret < 0) {
dout("start_read %p, error getting cap\n", inode);
} else if (!(got & want)) {
@@ -569,7 +573,7 @@ static u64 get_writepages_data_length(struct inode *inode,
/*
* Write a single page, but leave the page locked.
*
- * If we get a write error, set the page error bit, but still adjust the
+ * If we get a write error, mark the mapping for error, but still adjust the
* dirty page accounting (i.e., page is no longer dirty).
*/
static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
@@ -640,9 +644,10 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
end_page_writeback(page);
return err;
}
+ if (err == -EBLACKLISTED)
+ fsc->blacklisted = true;
dout("writepage setting page/mapping error %d %p\n",
err, page);
- SetPageError(page);
mapping_set_error(&inode->i_data, err);
wbc->pages_skipped++;
} else {
@@ -680,23 +685,6 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
}
/*
- * lame release_pages helper. release_pages() isn't exported to
- * modules.
- */
-static void ceph_release_pages(struct page **pages, int num)
-{
- struct pagevec pvec;
- int i;
-
- pagevec_init(&pvec);
- for (i = 0; i < num; i++) {
- if (pagevec_add(&pvec, pages[i]) == 0)
- pagevec_release(&pvec);
- }
- pagevec_release(&pvec);
-}
-
-/*
* async writeback completion handler.
*
* If we get an error, set the mapping error bit, but not the individual
@@ -720,6 +708,8 @@ static void writepages_finish(struct ceph_osd_request *req)
if (rc < 0) {
mapping_set_error(mapping, rc);
ceph_set_error_write(ci);
+ if (rc == -EBLACKLISTED)
+ fsc->blacklisted = true;
} else {
ceph_clear_error_write(ci);
}
@@ -769,7 +759,7 @@ static void writepages_finish(struct ceph_osd_request *req)
dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n",
inode, osd_data->length, rc >= 0 ? num_pages : 0);
- ceph_release_pages(osd_data->pages, num_pages);
+ release_pages(osd_data->pages, num_pages);
}
ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc);
@@ -1452,7 +1442,8 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
want = CEPH_CAP_FILE_CACHE;
got = 0;
- err = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
+ err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1,
+ &got, &pinned_page);
if (err < 0)
goto out_restore;
@@ -1540,6 +1531,7 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
if (!prealloc_cf)
return VM_FAULT_OOM;
+ sb_start_pagefault(inode->i_sb);
ceph_block_sigs(&oldset);
if (ci->i_inline_version != CEPH_INLINE_NONE) {
@@ -1568,7 +1560,7 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
want = CEPH_CAP_FILE_BUFFER;
got = 0;
- err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, off + len,
+ err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len,
&got, NULL);
if (err < 0)
goto out_free;
@@ -1614,6 +1606,7 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
ceph_put_cap_refs(ci, got);
out_free:
ceph_restore_sigs(&oldset);
+ sb_end_pagefault(inode->i_sb);
ceph_free_cap_flush(prealloc_cf);
if (err < 0)
ret = vmf_error(err);
@@ -1946,12 +1939,17 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
if (err >= 0 || err == -ENOENT)
have |= POOL_READ;
- else if (err != -EPERM)
+ else if (err != -EPERM) {
+ if (err == -EBLACKLISTED)
+ fsc->blacklisted = true;
goto out_unlock;
+ }
if (err2 == 0 || err2 == -EEXIST)
have |= POOL_WRITE;
else if (err2 != -EPERM) {
+ if (err2 == -EBLACKLISTED)
+ fsc->blacklisted = true;
err = err2;
goto out_unlock;
}
@@ -1989,10 +1987,11 @@ out:
return err;
}
-int ceph_pool_perm_check(struct ceph_inode_info *ci, int need)
+int ceph_pool_perm_check(struct inode *inode, int need)
{
- s64 pool;
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_string *pool_ns;
+ s64 pool;
int ret, flags;
if (ci->i_vino.snap != CEPH_NOSNAP) {
@@ -2004,7 +2003,7 @@ int ceph_pool_perm_check(struct ceph_inode_info *ci, int need)
return 0;
}
- if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode),
+ if (ceph_test_mount_opt(ceph_inode_to_client(inode),
NOPOOLPERM))
return 0;
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index bc90cf6ad7ed..b2ec29eeb4c4 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -6,6 +6,8 @@
* Written by Milosz Tanski ([email protected])
*/
+#include <linux/ceph/ceph_debug.h>
+
#include "super.h"
#include "cache.h"
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index ce0f5658720a..d3b9c9d5c1bd 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -458,37 +458,6 @@ struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
}
/*
- * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1.
- */
-static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
-{
- struct ceph_cap *cap;
- int mds = -1;
- struct rb_node *p;
-
- /* prefer mds with WR|BUFFER|EXCL caps */
- for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
- cap = rb_entry(p, struct ceph_cap, ci_node);
- mds = cap->mds;
- if (cap->issued & (CEPH_CAP_FILE_WR |
- CEPH_CAP_FILE_BUFFER |
- CEPH_CAP_FILE_EXCL))
- break;
- }
- return mds;
-}
-
-int ceph_get_cap_mds(struct inode *inode)
-{
- struct ceph_inode_info *ci = ceph_inode(inode);
- int mds;
- spin_lock(&ci->i_ceph_lock);
- mds = __ceph_get_cap_mds(ceph_inode(inode));
- spin_unlock(&ci->i_ceph_lock);
- return mds;
-}
-
-/*
* Called under i_ceph_lock.
*/
static void __insert_cap_node(struct ceph_inode_info *ci,
@@ -628,7 +597,7 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
/*
* Add a capability under the given MDS session.
*
- * Caller should hold session snap_rwsem (read) and s_mutex.
+ * Caller should hold session snap_rwsem (read) and ci->i_ceph_lock
*
* @fmode is the open file mode, if we are opening a file, otherwise
* it is < 0. (This is so we can atomically add the cap and add an
@@ -645,6 +614,9 @@ void ceph_add_cap(struct inode *inode,
struct ceph_cap *cap;
int mds = session->s_mds;
int actual_wanted;
+ u32 gen;
+
+ lockdep_assert_held(&ci->i_ceph_lock);
dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
session->s_mds, cap_id, ceph_cap_string(issued), seq);
@@ -656,6 +628,10 @@ void ceph_add_cap(struct inode *inode,
if (fmode >= 0)
wanted |= ceph_caps_for_mode(fmode);
+ spin_lock(&session->s_gen_ttl_lock);
+ gen = session->s_cap_gen;
+ spin_unlock(&session->s_gen_ttl_lock);
+
cap = __get_cap_for_mds(ci, mds);
if (!cap) {
cap = *new_cap;
@@ -681,7 +657,7 @@ void ceph_add_cap(struct inode *inode,
list_move_tail(&cap->session_caps, &session->s_caps);
spin_unlock(&session->s_cap_lock);
- if (cap->cap_gen < session->s_cap_gen)
+ if (cap->cap_gen < gen)
cap->issued = cap->implemented = CEPH_CAP_PIN;
/*
@@ -775,7 +751,7 @@ void ceph_add_cap(struct inode *inode,
cap->seq = seq;
cap->issue_seq = seq;
cap->mseq = mseq;
- cap->cap_gen = session->s_cap_gen;
+ cap->cap_gen = gen;
if (fmode >= 0)
__ceph_get_fmode(ci, fmode);
@@ -1284,10 +1260,6 @@ void __ceph_remove_caps(struct ceph_inode_info *ci)
* Make note of max_size reported/requested from mds, revoked caps
* that have now been implemented.
*
- * Make half-hearted attempt ot to invalidate page cache if we are
- * dropping RDCACHE. Note that this will leave behind locked pages
- * that we'll then need to deal with elsewhere.
- *
* Return non-zero if delayed release, or we experienced an error
* such that the caller should requeue + retry later.
*
@@ -1746,11 +1718,11 @@ static bool __finish_cap_flush(struct ceph_mds_client *mdsc,
* Add dirty inode to the flushing list. Assigned a seq number so we
* can wait for caps to flush without starving.
*
- * Called under i_ceph_lock.
+ * Called under i_ceph_lock. Returns the flush tid.
*/
-static int __mark_caps_flushing(struct inode *inode,
+static u64 __mark_caps_flushing(struct inode *inode,
struct ceph_mds_session *session, bool wake,
- u64 *flush_tid, u64 *oldest_flush_tid)
+ u64 *oldest_flush_tid)
{
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
@@ -1789,8 +1761,7 @@ static int __mark_caps_flushing(struct inode *inode,
list_add_tail(&cf->i_list, &ci->i_cap_flush_list);
- *flush_tid = cf->tid;
- return flushing;
+ return cf->tid;
}
/*
@@ -2028,11 +1999,6 @@ retry_locked:
}
ack:
- if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
- dout(" skipping %p I_NOFLUSH set\n", inode);
- continue;
- }
-
if (session && session != cap->session) {
dout("oops, wrong session %p mutex\n", session);
mutex_unlock(&session->s_mutex);
@@ -2080,9 +2046,9 @@ ack:
}
if (cap == ci->i_auth_cap && ci->i_dirty_caps) {
- flushing = __mark_caps_flushing(inode, session, false,
- &flush_tid,
- &oldest_flush_tid);
+ flushing = ci->i_dirty_caps;
+ flush_tid = __mark_caps_flushing(inode, session, false,
+ &oldest_flush_tid);
} else {
flushing = 0;
flush_tid = 0;
@@ -2130,16 +2096,11 @@ static int try_flush_caps(struct inode *inode, u64 *ptid)
retry:
spin_lock(&ci->i_ceph_lock);
retry_locked:
- if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
- spin_unlock(&ci->i_ceph_lock);
- dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
- goto out;
- }
if (ci->i_dirty_caps && ci->i_auth_cap) {
struct ceph_cap *cap = ci->i_auth_cap;
int delayed;
- if (!session || session != cap->session) {
+ if (session != cap->session) {
spin_unlock(&ci->i_ceph_lock);
if (session)
mutex_unlock(&session->s_mutex);
@@ -2161,8 +2122,9 @@ retry_locked:
goto retry_locked;
}
- flushing = __mark_caps_flushing(inode, session, true,
- &flush_tid, &oldest_flush_tid);
+ flushing = ci->i_dirty_caps;
+ flush_tid = __mark_caps_flushing(inode, session, true,
+ &oldest_flush_tid);
/* __send_cap drops i_ceph_lock */
delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
@@ -2261,35 +2223,45 @@ static int unsafe_request_wait(struct inode *inode)
int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
+ struct ceph_file_info *fi = file->private_data;
struct inode *inode = file->f_mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
u64 flush_tid;
- int ret;
+ int ret, err;
int dirty;
dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
ret = file_write_and_wait_range(file, start, end);
- if (ret < 0)
- goto out;
-
if (datasync)
goto out;
dirty = try_flush_caps(inode, &flush_tid);
dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
- ret = unsafe_request_wait(inode);
+ err = unsafe_request_wait(inode);
/*
* only wait on non-file metadata writeback (the mds
* can recover size and mtime, so we don't need to
* wait for that)
*/
- if (!ret && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
- ret = wait_event_interruptible(ci->i_cap_wq,
+ if (!err && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
+ err = wait_event_interruptible(ci->i_cap_wq,
caps_are_flushed(inode, flush_tid));
}
+
+ if (err < 0)
+ ret = err;
+
+ if (errseq_check(&ci->i_meta_err, READ_ONCE(fi->meta_err))) {
+ spin_lock(&file->f_lock);
+ err = errseq_check_and_advance(&ci->i_meta_err,
+ &fi->meta_err);
+ spin_unlock(&file->f_lock);
+ if (err < 0)
+ ret = err;
+ }
out:
dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
return ret;
@@ -2560,10 +2532,15 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got,
*
* FIXME: how does a 0 return differ from -EAGAIN?
*/
-static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
- loff_t endoff, bool nonblock, int *got)
+enum {
+ NON_BLOCKING = 1,
+ CHECK_FILELOCK = 2,
+};
+
+static int try_get_cap_refs(struct inode *inode, int need, int want,
+ loff_t endoff, int flags, int *got)
{
- struct inode *inode = &ci->vfs_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
int ret = 0;
int have, implemented;
@@ -2576,6 +2553,13 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
again:
spin_lock(&ci->i_ceph_lock);
+ if ((flags & CHECK_FILELOCK) &&
+ (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK)) {
+ dout("try_get_cap_refs %p error filelock\n", inode);
+ ret = -EIO;
+ goto out_unlock;
+ }
+
/* make sure file is actually open */
file_wanted = __ceph_caps_file_wanted(ci);
if ((file_wanted & need) != need) {
@@ -2637,7 +2621,7 @@ again:
* we can not call down_read() when
* task isn't in TASK_RUNNING state
*/
- if (nonblock) {
+ if (flags & NON_BLOCKING) {
ret = -EAGAIN;
goto out_unlock;
}
@@ -2731,18 +2715,19 @@ static void check_max_size(struct inode *inode, loff_t endoff)
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
}
-int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want,
+int ceph_try_get_caps(struct inode *inode, int need, int want,
bool nonblock, int *got)
{
int ret;
BUG_ON(need & ~CEPH_CAP_FILE_RD);
BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO|CEPH_CAP_FILE_SHARED));
- ret = ceph_pool_perm_check(ci, need);
+ ret = ceph_pool_perm_check(inode, need);
if (ret < 0)
return ret;
- ret = try_get_cap_refs(ci, need, want, 0, nonblock, got);
+ ret = try_get_cap_refs(inode, need, want, 0,
+ (nonblock ? NON_BLOCKING : 0), got);
return ret == -EAGAIN ? 0 : ret;
}
@@ -2751,30 +2736,40 @@ int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want,
* due to a small max_size, make sure we check_max_size (and possibly
* ask the mds) so we don't get hung up indefinitely.
*/
-int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
+int ceph_get_caps(struct file *filp, int need, int want,
loff_t endoff, int *got, struct page **pinned_page)
{
- int _got, ret;
+ struct ceph_file_info *fi = filp->private_data;
+ struct inode *inode = file_inode(filp);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ int ret, _got, flags;
- ret = ceph_pool_perm_check(ci, need);
+ ret = ceph_pool_perm_check(inode, need);
if (ret < 0)
return ret;
+ if ((fi->fmode & CEPH_FILE_MODE_WR) &&
+ fi->filp_gen != READ_ONCE(fsc->filp_gen))
+ return -EBADF;
+
while (true) {
if (endoff > 0)
- check_max_size(&ci->vfs_inode, endoff);
+ check_max_size(inode, endoff);
+ flags = atomic_read(&fi->num_locks) ? CHECK_FILELOCK : 0;
_got = 0;
- ret = try_get_cap_refs(ci, need, want, endoff,
- false, &_got);
+ ret = try_get_cap_refs(inode, need, want, endoff,
+ flags, &_got);
if (ret == -EAGAIN)
continue;
if (!ret) {
DEFINE_WAIT_FUNC(wait, woken_wake_function);
add_wait_queue(&ci->i_cap_wq, &wait);
- while (!(ret = try_get_cap_refs(ci, need, want, endoff,
- true, &_got))) {
+ flags |= NON_BLOCKING;
+ while (!(ret = try_get_cap_refs(inode, need, want,
+ endoff, flags, &_got))) {
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
@@ -2786,10 +2781,18 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
if (ret == -EAGAIN)
continue;
}
+
+ if ((fi->fmode & CEPH_FILE_MODE_WR) &&
+ fi->filp_gen != READ_ONCE(fsc->filp_gen)) {
+ if (ret >= 0 && _got)
+ ceph_put_cap_refs(ci, _got);
+ return -EBADF;
+ }
+
if (ret < 0) {
if (ret == -ESTALE) {
/* session was killed, try renew caps */
- ret = ceph_renew_caps(&ci->vfs_inode);
+ ret = ceph_renew_caps(inode);
if (ret == 0)
continue;
}
@@ -2798,9 +2801,9 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
if (ci->i_inline_version != CEPH_INLINE_NONE &&
(_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
- i_size_read(&ci->vfs_inode) > 0) {
+ i_size_read(inode) > 0) {
struct page *page =
- find_get_page(ci->vfs_inode.i_mapping, 0);
+ find_get_page(inode->i_mapping, 0);
if (page) {
if (PageUptodate(page)) {
*pinned_page = page;
@@ -2819,7 +2822,7 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
* getattr request will bring inline data into
* page cache
*/
- ret = __ceph_do_getattr(&ci->vfs_inode, NULL,
+ ret = __ceph_do_getattr(inode, NULL,
CEPH_STAT_CAP_INLINE_DATA,
true);
if (ret < 0)
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 2eb88ed22993..facb387c2735 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -294,7 +294,6 @@ void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
{
- return 0;
}
void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 15ff1b09cfa2..b6bfa94332c3 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -35,7 +35,7 @@ struct ceph_nfs_snapfh {
static int ceph_encode_snapfh(struct inode *inode, u32 *rawfh, int *max_len,
struct inode *parent_inode)
{
- const static int snap_handle_length =
+ static const int snap_handle_length =
sizeof(struct ceph_nfs_snapfh) >> 2;
struct ceph_nfs_snapfh *sfh = (void *)rawfh;
u64 snapid = ceph_snap(inode);
@@ -85,9 +85,9 @@ out:
static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
struct inode *parent_inode)
{
- const static int handle_length =
+ static const int handle_length =
sizeof(struct ceph_nfs_fh) >> 2;
- const static int connected_handle_length =
+ static const int connected_handle_length =
sizeof(struct ceph_nfs_confh) >> 2;
int type;
@@ -458,33 +458,33 @@ static int __get_snap_name(struct dentry *parent, char *name,
if (err < 0)
goto out;
- rinfo = &req->r_reply_info;
- for (i = 0; i < rinfo->dir_nr; i++) {
- rde = rinfo->dir_entries + i;
- BUG_ON(!rde->inode.in);
- if (ceph_snap(inode) ==
- le64_to_cpu(rde->inode.in->snapid)) {
- memcpy(name, rde->name, rde->name_len);
- name[rde->name_len] = '\0';
- err = 0;
- goto out;
- }
- }
-
- if (rinfo->dir_end)
- break;
-
- BUG_ON(rinfo->dir_nr <= 0);
- rde = rinfo->dir_entries + (rinfo->dir_nr - 1);
- next_offset += rinfo->dir_nr;
- last_name = kstrndup(rde->name, rde->name_len, GFP_KERNEL);
- if (!last_name) {
- err = -ENOMEM;
- goto out;
- }
-
- ceph_mdsc_put_request(req);
- req = NULL;
+ rinfo = &req->r_reply_info;
+ for (i = 0; i < rinfo->dir_nr; i++) {
+ rde = rinfo->dir_entries + i;
+ BUG_ON(!rde->inode.in);
+ if (ceph_snap(inode) ==
+ le64_to_cpu(rde->inode.in->snapid)) {
+ memcpy(name, rde->name, rde->name_len);
+ name[rde->name_len] = '\0';
+ err = 0;
+ goto out;
+ }
+ }
+
+ if (rinfo->dir_end)
+ break;
+
+ BUG_ON(rinfo->dir_nr <= 0);
+ rde = rinfo->dir_entries + (rinfo->dir_nr - 1);
+ next_offset += rinfo->dir_nr;
+ last_name = kstrndup(rde->name, rde->name_len, GFP_KERNEL);
+ if (!last_name) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ceph_mdsc_put_request(req);
+ req = NULL;
}
err = -ENOENT;
out:
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 685a03cc4b77..d277f71abe0b 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -15,6 +15,7 @@
#include "super.h"
#include "mds_client.h"
#include "cache.h"
+#include "io.h"
static __le32 ceph_flags_sys2wire(u32 flags)
{
@@ -201,6 +202,7 @@ out:
static int ceph_init_file_info(struct inode *inode, struct file *file,
int fmode, bool isdir)
{
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_file_info *fi;
dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
@@ -211,7 +213,7 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
struct ceph_dir_file_info *dfi =
kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
if (!dfi) {
- ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
+ ceph_put_fmode(ci, fmode); /* clean up */
return -ENOMEM;
}
@@ -222,7 +224,7 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
} else {
fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
if (!fi) {
- ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
+ ceph_put_fmode(ci, fmode); /* clean up */
return -ENOMEM;
}
@@ -232,6 +234,8 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
fi->fmode = fmode;
spin_lock_init(&fi->rw_contexts_lock);
INIT_LIST_HEAD(&fi->rw_contexts);
+ fi->meta_err = errseq_sample(&ci->i_meta_err);
+ fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
return 0;
}
@@ -695,7 +699,13 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
ceph_release_page_vector(pages, num_pages);
}
- if (ret <= 0 || off >= i_size || !more)
+ if (ret < 0) {
+ if (ret == -EBLACKLISTED)
+ fsc->blacklisted = true;
+ break;
+ }
+
+ if (off >= i_size || !more)
break;
}
@@ -921,7 +931,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
struct ceph_aio_request *aio_req = NULL;
int num_pages = 0;
int flags;
- int ret;
+ int ret = 0;
struct timespec64 mtime = current_time(inode);
size_t count = iov_iter_count(iter);
loff_t pos = iocb->ki_pos;
@@ -935,11 +945,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
(write ? "write" : "read"), file, pos, (unsigned)count,
snapc, snapc ? snapc->seq : 0);
- ret = filemap_write_and_wait_range(inode->i_mapping,
- pos, pos + count - 1);
- if (ret < 0)
- return ret;
-
if (write) {
int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
pos >> PAGE_SHIFT,
@@ -1260,7 +1265,8 @@ again:
want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
else
want = CEPH_CAP_FILE_CACHE;
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
+ ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1,
+ &got, &pinned_page);
if (ret < 0)
return ret;
@@ -1274,12 +1280,16 @@ again:
if (ci->i_inline_version == CEPH_INLINE_NONE) {
if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
+ ceph_start_io_direct(inode);
ret = ceph_direct_read_write(iocb, to,
NULL, NULL);
+ ceph_end_io_direct(inode);
if (ret >= 0 && ret < len)
retry_op = CHECK_EOF;
} else {
+ ceph_start_io_read(inode);
ret = ceph_sync_read(iocb, to, &retry_op);
+ ceph_end_io_read(inode);
}
} else {
retry_op = READ_INLINE;
@@ -1290,7 +1300,9 @@ again:
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
ceph_cap_string(got));
ceph_add_rw_context(fi, &rw_ctx);
+ ceph_start_io_read(inode);
ret = generic_file_read_iter(iocb, to);
+ ceph_end_io_read(inode);
ceph_del_rw_context(fi, &rw_ctx);
}
dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
@@ -1399,7 +1411,10 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
return -ENOMEM;
retry_snap:
- inode_lock(inode);
+ if (iocb->ki_flags & IOCB_DIRECT)
+ ceph_start_io_direct(inode);
+ else
+ ceph_start_io_write(inode);
/* We can write back this queue in page reclaim */
current->backing_dev_info = inode_to_bdi(inode);
@@ -1457,7 +1472,7 @@ retry_snap:
else
want = CEPH_CAP_FILE_BUFFER;
got = 0;
- err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
+ err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count,
&got, NULL);
if (err < 0)
goto out;
@@ -1470,7 +1485,6 @@ retry_snap:
(ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
struct ceph_snap_context *snapc;
struct iov_iter data;
- inode_unlock(inode);
spin_lock(&ci->i_ceph_lock);
if (__ceph_have_pending_cap_snap(ci)) {
@@ -1487,11 +1501,14 @@ retry_snap:
/* we might need to revert back to that point */
data = *from;
- if (iocb->ki_flags & IOCB_DIRECT)
+ if (iocb->ki_flags & IOCB_DIRECT) {
written = ceph_direct_read_write(iocb, &data, snapc,
&prealloc_cf);
- else
+ ceph_end_io_direct(inode);
+ } else {
written = ceph_sync_write(iocb, &data, pos, snapc);
+ ceph_end_io_write(inode);
+ }
if (written > 0)
iov_iter_advance(from, written);
ceph_put_snap_context(snapc);
@@ -1506,7 +1523,7 @@ retry_snap:
written = generic_perform_write(file, from, pos);
if (likely(written >= 0))
iocb->ki_pos = pos + written;
- inode_unlock(inode);
+ ceph_end_io_write(inode);
}
if (written >= 0) {
@@ -1541,9 +1558,11 @@ retry_snap:
}
goto out_unlocked;
-
out:
- inode_unlock(inode);
+ if (iocb->ki_flags & IOCB_DIRECT)
+ ceph_end_io_direct(inode);
+ else
+ ceph_end_io_write(inode);
out_unlocked:
ceph_free_cap_flush(prealloc_cf);
current->backing_dev_info = NULL;
@@ -1781,7 +1800,7 @@ static long ceph_fallocate(struct file *file, int mode,
else
want = CEPH_CAP_FILE_BUFFER;
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
+ ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
if (ret < 0)
goto unlock;
@@ -1810,16 +1829,15 @@ unlock:
* src_ci. Two attempts are made to obtain both caps, and an error is return if
* this fails; zero is returned on success.
*/
-static int get_rd_wr_caps(struct ceph_inode_info *src_ci,
- loff_t src_endoff, int *src_got,
- struct ceph_inode_info *dst_ci,
+static int get_rd_wr_caps(struct file *src_filp, int *src_got,
+ struct file *dst_filp,
loff_t dst_endoff, int *dst_got)
{
int ret = 0;
bool retrying = false;
retry_caps:
- ret = ceph_get_caps(dst_ci, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
+ ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
dst_endoff, dst_got, NULL);
if (ret < 0)
return ret;
@@ -1829,24 +1847,24 @@ retry_caps:
* we would risk a deadlock by using ceph_get_caps. Thus, we'll do some
* retry dance instead to try to get both capabilities.
*/
- ret = ceph_try_get_caps(src_ci, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
+ ret = ceph_try_get_caps(file_inode(src_filp),
+ CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
false, src_got);
if (ret <= 0) {
/* Start by dropping dst_ci caps and getting src_ci caps */
- ceph_put_cap_refs(dst_ci, *dst_got);
+ ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
if (retrying) {
if (!ret)
/* ceph_try_get_caps masks EAGAIN */
ret = -EAGAIN;
return ret;
}
- ret = ceph_get_caps(src_ci, CEPH_CAP_FILE_RD,
- CEPH_CAP_FILE_SHARED, src_endoff,
- src_got, NULL);
+ ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
+ CEPH_CAP_FILE_SHARED, -1, src_got, NULL);
if (ret < 0)
return ret;
/*... drop src_ci caps too, and retry */
- ceph_put_cap_refs(src_ci, *src_got);
+ ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
retrying = true;
goto retry_caps;
}
@@ -1904,6 +1922,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
struct ceph_inode_info *src_ci = ceph_inode(src_inode);
struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
struct ceph_cap_flush *prealloc_cf;
+ struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
struct ceph_object_locator src_oloc, dst_oloc;
struct ceph_object_id src_oid, dst_oid;
loff_t endoff = 0, size;
@@ -1913,10 +1932,16 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
int src_got = 0, dst_got = 0, err, dirty;
bool do_final_copy = false;
- if (src_inode == dst_inode)
- return -EINVAL;
- if (src_inode->i_sb != dst_inode->i_sb)
- return -EXDEV;
+ if (src_inode->i_sb != dst_inode->i_sb) {
+ struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
+
+ if (ceph_fsid_compare(&src_fsc->client->fsid,
+ &dst_fsc->client->fsid)) {
+ dout("Copying files across clusters: src: %pU dst: %pU\n",
+ &src_fsc->client->fsid, &dst_fsc->client->fsid);
+ return -EXDEV;
+ }
+ }
if (ceph_snap(dst_inode) != CEPH_NOSNAP)
return -EROFS;
@@ -1928,7 +1953,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
* efficient).
*/
- if (ceph_test_mount_opt(ceph_inode_to_client(src_inode), NOCOPYFROM))
+ if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
return -EOPNOTSUPP;
if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
@@ -1960,8 +1985,8 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
* clients may have dirty data in their caches. And OSDs know nothing
* about caps, so they can't safely do the remote object copies.
*/
- err = get_rd_wr_caps(src_ci, (src_off + len), &src_got,
- dst_ci, (dst_off + len), &dst_got);
+ err = get_rd_wr_caps(src_file, &src_got,
+ dst_file, (dst_off + len), &dst_got);
if (err < 0) {
dout("get_rd_wr_caps returned %d\n", err);
ret = -EOPNOTSUPP;
@@ -2018,9 +2043,8 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
goto out;
}
len -= ret;
- err = get_rd_wr_caps(src_ci, (src_off + len),
- &src_got, dst_ci,
- (dst_off + len), &dst_got);
+ err = get_rd_wr_caps(src_file, &src_got,
+ dst_file, (dst_off + len), &dst_got);
if (err < 0)
goto out;
err = is_file_size_ok(src_inode, dst_inode,
@@ -2044,7 +2068,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
dst_ci->i_vino.ino, dst_objnum);
/* Do an object remote copy */
err = ceph_osdc_copy_from(
- &ceph_inode_to_client(src_inode)->client->osdc,
+ &src_fsc->client->osdc,
src_ci->i_vino.snap, 0,
&src_oid, &src_oloc,
CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 18500edefc56..9f135624ae47 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -515,6 +515,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
ceph_fscache_inode_init(ci);
+ ci->i_meta_err = 0;
+
return &ci->vfs_inode;
}
@@ -801,7 +803,12 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
/* update inode */
inode->i_rdev = le32_to_cpu(info->rdev);
- inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+ /* directories have fl_stripe_unit set to zero */
+ if (le32_to_cpu(info->layout.fl_stripe_unit))
+ inode->i_blkbits =
+ fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+ else
+ inode->i_blkbits = CEPH_BLOCK_SHIFT;
__ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
@@ -1982,7 +1989,7 @@ static const struct inode_operations ceph_symlink_iops = {
int __ceph_setattr(struct inode *inode, struct iattr *attr)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- const unsigned int ia_valid = attr->ia_valid;
+ unsigned int ia_valid = attr->ia_valid;
struct ceph_mds_request *req;
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_cap_flush *prealloc_cf;
@@ -2087,6 +2094,26 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
}
}
+ if (ia_valid & ATTR_SIZE) {
+ dout("setattr %p size %lld -> %lld\n", inode,
+ inode->i_size, attr->ia_size);
+ if ((issued & CEPH_CAP_FILE_EXCL) &&
+ attr->ia_size > inode->i_size) {
+ i_size_write(inode, attr->ia_size);
+ inode->i_blocks = calc_inode_blocks(attr->ia_size);
+ ci->i_reported_size = attr->ia_size;
+ dirtied |= CEPH_CAP_FILE_EXCL;
+ ia_valid |= ATTR_MTIME;
+ } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
+ attr->ia_size != inode->i_size) {
+ req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
+ req->r_args.setattr.old_size =
+ cpu_to_le64(inode->i_size);
+ mask |= CEPH_SETATTR_SIZE;
+ release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
+ CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
+ }
+ }
if (ia_valid & ATTR_MTIME) {
dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode,
inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
@@ -2109,25 +2136,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
}
}
- if (ia_valid & ATTR_SIZE) {
- dout("setattr %p size %lld -> %lld\n", inode,
- inode->i_size, attr->ia_size);
- if ((issued & CEPH_CAP_FILE_EXCL) &&
- attr->ia_size > inode->i_size) {
- i_size_write(inode, attr->ia_size);
- inode->i_blocks = calc_inode_blocks(attr->ia_size);
- ci->i_reported_size = attr->ia_size;
- dirtied |= CEPH_CAP_FILE_EXCL;
- } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
- attr->ia_size != inode->i_size) {
- req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
- req->r_args.setattr.old_size =
- cpu_to_le64(inode->i_size);
- mask |= CEPH_SETATTR_SIZE;
- release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
- CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
- }
- }
/* these do nothing */
if (ia_valid & ATTR_CTIME) {
diff --git a/fs/ceph/io.c b/fs/ceph/io.c
new file mode 100644
index 000000000000..97602ea92ff4
--- /dev/null
+++ b/fs/ceph/io.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016 Trond Myklebust
+ * Copyright (c) 2019 Jeff Layton
+ *
+ * I/O and data path helper functionality.
+ *
+ * Heavily borrowed from equivalent code in fs/nfs/io.c
+ */
+
+#include <linux/ceph/ceph_debug.h>
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/rwsem.h>
+#include <linux/fs.h>
+
+#include "super.h"
+#include "io.h"
+
+/* Call with exclusively locked inode->i_rwsem */
+static void ceph_block_o_direct(struct ceph_inode_info *ci, struct inode *inode)
+{
+ lockdep_assert_held_write(&inode->i_rwsem);
+
+ if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT) {
+ spin_lock(&ci->i_ceph_lock);
+ ci->i_ceph_flags &= ~CEPH_I_ODIRECT;
+ spin_unlock(&ci->i_ceph_lock);
+ inode_dio_wait(inode);
+ }
+}
+
+/**
+ * ceph_start_io_read - declare the file is being used for buffered reads
+ * @inode: file inode
+ *
+ * Declare that a buffered read operation is about to start, and ensure
+ * that we block all direct I/O.
+ * On exit, the function ensures that the CEPH_I_ODIRECT flag is unset,
+ * and holds a shared lock on inode->i_rwsem to ensure that the flag
+ * cannot be changed.
+ * In practice, this means that buffered read operations are allowed to
+ * execute in parallel, thanks to the shared lock, whereas direct I/O
+ * operations need to wait to grab an exclusive lock in order to set
+ * CEPH_I_ODIRECT.
+ * Note that buffered writes and truncates both take a write lock on
+ * inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
+ */
+void
+ceph_start_io_read(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ /* Be an optimist! */
+ down_read(&inode->i_rwsem);
+ if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT))
+ return;
+ up_read(&inode->i_rwsem);
+ /* Slow path.... */
+ down_write(&inode->i_rwsem);
+ ceph_block_o_direct(ci, inode);
+ downgrade_write(&inode->i_rwsem);
+}
+
+/**
+ * ceph_end_io_read - declare that the buffered read operation is done
+ * @inode: file inode
+ *
+ * Declare that a buffered read operation is done, and release the shared
+ * lock on inode->i_rwsem.
+ */
+void
+ceph_end_io_read(struct inode *inode)
+{
+ up_read(&inode->i_rwsem);
+}
+
+/**
+ * ceph_start_io_write - declare the file is being used for buffered writes
+ * @inode: file inode
+ *
+ * Declare that a buffered write operation is about to start, and ensure
+ * that we block all direct I/O.
+ */
+void
+ceph_start_io_write(struct inode *inode)
+{
+ down_write(&inode->i_rwsem);
+ ceph_block_o_direct(ceph_inode(inode), inode);
+}
+
+/**
+ * ceph_end_io_write - declare that the buffered write operation is done
+ * @inode: file inode
+ *
+ * Declare that a buffered write operation is done, and release the
+ * lock on inode->i_rwsem.
+ */
+void
+ceph_end_io_write(struct inode *inode)
+{
+ up_write(&inode->i_rwsem);
+}
+
+/* Call with exclusively locked inode->i_rwsem */
+static void ceph_block_buffered(struct ceph_inode_info *ci, struct inode *inode)
+{
+ lockdep_assert_held_write(&inode->i_rwsem);
+
+ if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT)) {
+ spin_lock(&ci->i_ceph_lock);
+ ci->i_ceph_flags |= CEPH_I_ODIRECT;
+ spin_unlock(&ci->i_ceph_lock);
+ /* FIXME: unmap_mapping_range? */
+ filemap_write_and_wait(inode->i_mapping);
+ }
+}
+
+/**
+ * ceph_end_io_direct - declare the file is being used for direct i/o
+ * @inode: file inode
+ *
+ * Declare that a direct I/O operation is about to start, and ensure
+ * that we block all buffered I/O.
+ * On exit, the function ensures that the CEPH_I_ODIRECT flag is set,
+ * and holds a shared lock on inode->i_rwsem to ensure that the flag
+ * cannot be changed.
+ * In practice, this means that direct I/O operations are allowed to
+ * execute in parallel, thanks to the shared lock, whereas buffered I/O
+ * operations need to wait to grab an exclusive lock in order to clear
+ * CEPH_I_ODIRECT.
+ * Note that buffered writes and truncates both take a write lock on
+ * inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
+ */
+void
+ceph_start_io_direct(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ /* Be an optimist! */
+ down_read(&inode->i_rwsem);
+ if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT)
+ return;
+ up_read(&inode->i_rwsem);
+ /* Slow path.... */
+ down_write(&inode->i_rwsem);
+ ceph_block_buffered(ci, inode);
+ downgrade_write(&inode->i_rwsem);
+}
+
+/**
+ * ceph_end_io_direct - declare that the direct i/o operation is done
+ * @inode: file inode
+ *
+ * Declare that a direct I/O operation is done, and release the shared
+ * lock on inode->i_rwsem.
+ */
+void
+ceph_end_io_direct(struct inode *inode)
+{
+ up_read(&inode->i_rwsem);
+}
diff --git a/fs/ceph/io.h b/fs/ceph/io.h
new file mode 100644
index 000000000000..fa594cd77348
--- /dev/null
+++ b/fs/ceph/io.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _FS_CEPH_IO_H
+#define _FS_CEPH_IO_H
+
+void ceph_start_io_read(struct inode *inode);
+void ceph_end_io_read(struct inode *inode);
+void ceph_start_io_write(struct inode *inode);
+void ceph_end_io_write(struct inode *inode);
+void ceph_start_io_direct(struct inode *inode);
+void ceph_end_io_direct(struct inode *inode);
+
+#endif /* FS_CEPH_IO_H */
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 5083e238ad15..544e9e85b120 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -32,14 +32,18 @@ void __init ceph_flock_init(void)
static void ceph_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
{
- struct inode *inode = file_inode(src->fl_file);
+ struct ceph_file_info *fi = dst->fl_file->private_data;
+ struct inode *inode = file_inode(dst->fl_file);
atomic_inc(&ceph_inode(inode)->i_filelock_ref);
+ atomic_inc(&fi->num_locks);
}
static void ceph_fl_release_lock(struct file_lock *fl)
{
+ struct ceph_file_info *fi = fl->fl_file->private_data;
struct inode *inode = file_inode(fl->fl_file);
struct ceph_inode_info *ci = ceph_inode(inode);
+ atomic_dec(&fi->num_locks);
if (atomic_dec_and_test(&ci->i_filelock_ref)) {
/* clear error when all locks are released */
spin_lock(&ci->i_ceph_lock);
@@ -73,7 +77,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
* window. Caller function will decrease the counter.
*/
fl->fl_ops = &ceph_fl_lock_ops;
- atomic_inc(&ceph_inode(inode)->i_filelock_ref);
+ fl->fl_ops->fl_copy_lock(fl, NULL);
}
if (operation != CEPH_MDS_OP_SETFILELOCK || cmd == CEPH_LOCK_UNLOCK)
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 920e9f048bd8..a8a8f84f3bbf 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -639,7 +639,6 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
s->s_renew_seq = 0;
INIT_LIST_HEAD(&s->s_caps);
s->s_nr_caps = 0;
- s->s_trim_caps = 0;
refcount_set(&s->s_ref, 1);
INIT_LIST_HEAD(&s->s_waiting);
INIT_LIST_HEAD(&s->s_unsafe);
@@ -1270,6 +1269,7 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
{
struct ceph_mds_request *req;
struct rb_node *p;
+ struct ceph_inode_info *ci;
dout("cleanup_session_requests mds%d\n", session->s_mds);
mutex_lock(&mdsc->mutex);
@@ -1278,6 +1278,16 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
struct ceph_mds_request, r_unsafe_item);
pr_warn_ratelimited(" dropping unsafe request %llu\n",
req->r_tid);
+ if (req->r_target_inode) {
+ /* dropping unsafe change of inode's attributes */
+ ci = ceph_inode(req->r_target_inode);
+ errseq_set(&ci->i_meta_err, -EIO);
+ }
+ if (req->r_unsafe_dir) {
+ /* dropping unsafe directory operation */
+ ci = ceph_inode(req->r_unsafe_dir);
+ errseq_set(&ci->i_meta_err, -EIO);
+ }
__unregister_request(mdsc, req);
}
/* zero r_attempts, so kick_requests() will re-send requests */
@@ -1370,7 +1380,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
struct ceph_inode_info *ci = ceph_inode(inode);
LIST_HEAD(to_remove);
- bool drop = false;
+ bool dirty_dropped = false;
bool invalidate = false;
dout("removing cap %p, ci is %p, inode is %p\n",
@@ -1383,9 +1393,12 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
struct ceph_cap_flush *cf;
struct ceph_mds_client *mdsc = fsc->mdsc;
- if (ci->i_wrbuffer_ref > 0 &&
- READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
- invalidate = true;
+ if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
+ if (inode->i_data.nrpages > 0)
+ invalidate = true;
+ if (ci->i_wrbuffer_ref > 0)
+ mapping_set_error(&inode->i_data, -EIO);
+ }
while (!list_empty(&ci->i_cap_flush_list)) {
cf = list_first_entry(&ci->i_cap_flush_list,
@@ -1405,7 +1418,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
inode, ceph_ino(inode));
ci->i_dirty_caps = 0;
list_del_init(&ci->i_dirty_item);
- drop = true;
+ dirty_dropped = true;
}
if (!list_empty(&ci->i_flushing_item)) {
pr_warn_ratelimited(
@@ -1415,10 +1428,22 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
ci->i_flushing_caps = 0;
list_del_init(&ci->i_flushing_item);
mdsc->num_cap_flushing--;
- drop = true;
+ dirty_dropped = true;
}
spin_unlock(&mdsc->cap_dirty_lock);
+ if (dirty_dropped) {
+ errseq_set(&ci->i_meta_err, -EIO);
+
+ if (ci->i_wrbuffer_ref_head == 0 &&
+ ci->i_wr_ref == 0 &&
+ ci->i_dirty_caps == 0 &&
+ ci->i_flushing_caps == 0) {
+ ceph_put_snap_context(ci->i_head_snapc);
+ ci->i_head_snapc = NULL;
+ }
+ }
+
if (atomic_read(&ci->i_filelock_ref) > 0) {
/* make further file lock syscall return -EIO */
ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
@@ -1430,15 +1455,6 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
ci->i_prealloc_cap_flush = NULL;
}
-
- if (drop &&
- ci->i_wrbuffer_ref_head == 0 &&
- ci->i_wr_ref == 0 &&
- ci->i_dirty_caps == 0 &&
- ci->i_flushing_caps == 0) {
- ceph_put_snap_context(ci->i_head_snapc);
- ci->i_head_snapc = NULL;
- }
}
spin_unlock(&ci->i_ceph_lock);
while (!list_empty(&to_remove)) {
@@ -1452,7 +1468,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
wake_up_all(&ci->i_cap_wq);
if (invalidate)
ceph_queue_invalidate(inode);
- if (drop)
+ if (dirty_dropped)
iput(inode);
return 0;
}
@@ -1705,11 +1721,11 @@ out:
*/
static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
{
- struct ceph_mds_session *session = arg;
+ int *remaining = arg;
struct ceph_inode_info *ci = ceph_inode(inode);
int used, wanted, oissued, mine;
- if (session->s_trim_caps <= 0)
+ if (*remaining <= 0)
return -1;
spin_lock(&ci->i_ceph_lock);
@@ -1746,7 +1762,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
if (oissued) {
/* we aren't the only cap.. just remove us */
__ceph_remove_cap(cap, true);
- session->s_trim_caps--;
+ (*remaining)--;
} else {
struct dentry *dentry;
/* try dropping referring dentries */
@@ -1758,7 +1774,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
d_prune_aliases(inode);
count = atomic_read(&inode->i_count);
if (count == 1)
- session->s_trim_caps--;
+ (*remaining)--;
dout("trim_caps_cb %p cap %p pruned, count now %d\n",
inode, cap, count);
} else {
@@ -1784,12 +1800,12 @@ int ceph_trim_caps(struct ceph_mds_client *mdsc,
dout("trim_caps mds%d start: %d / %d, trim %d\n",
session->s_mds, session->s_nr_caps, max_caps, trim_caps);
if (trim_caps > 0) {
- session->s_trim_caps = trim_caps;
- ceph_iterate_session_caps(session, trim_caps_cb, session);
+ int remaining = trim_caps;
+
+ ceph_iterate_session_caps(session, trim_caps_cb, &remaining);
dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
session->s_mds, session->s_nr_caps, max_caps,
- trim_caps - session->s_trim_caps);
- session->s_trim_caps = 0;
+ trim_caps - remaining);
}
ceph_flush_cap_releases(mdsc, session);
@@ -3015,18 +3031,23 @@ bad:
pr_err("mdsc_handle_forward decode error err=%d\n", err);
}
-static int __decode_and_drop_session_metadata(void **p, void *end)
+static int __decode_session_metadata(void **p, void *end,
+ bool *blacklisted)
{
/* map<string,string> */
u32 n;
+ bool err_str;
ceph_decode_32_safe(p, end, n, bad);
while (n-- > 0) {
u32 len;
ceph_decode_32_safe(p, end, len, bad);
ceph_decode_need(p, end, len, bad);
+ err_str = !strncmp(*p, "error_string", len);
*p += len;
ceph_decode_32_safe(p, end, len, bad);
ceph_decode_need(p, end, len, bad);
+ if (err_str && strnstr(*p, "blacklisted", len))
+ *blacklisted = true;
*p += len;
}
return 0;
@@ -3050,6 +3071,7 @@ static void handle_session(struct ceph_mds_session *session,
u64 seq;
unsigned long features = 0;
int wake = 0;
+ bool blacklisted = false;
/* decode */
ceph_decode_need(&p, end, sizeof(*h), bad);
@@ -3062,7 +3084,7 @@ static void handle_session(struct ceph_mds_session *session,
if (msg_version >= 3) {
u32 len;
/* version >= 2, metadata */
- if (__decode_and_drop_session_metadata(&p, end) < 0)
+ if (__decode_session_metadata(&p, end, &blacklisted) < 0)
goto bad;
/* version >= 3, feature bits */
ceph_decode_32_safe(&p, end, len, bad);
@@ -3149,6 +3171,8 @@ static void handle_session(struct ceph_mds_session *session,
session->s_state = CEPH_MDS_SESSION_REJECTED;
cleanup_session_requests(mdsc, session);
remove_session_caps(session);
+ if (blacklisted)
+ mdsc->fsc->blacklisted = true;
wake = 2; /* for good measure */
break;
@@ -3998,7 +4022,27 @@ static void lock_unlock_sessions(struct ceph_mds_client *mdsc)
mutex_unlock(&mdsc->mutex);
}
+static void maybe_recover_session(struct ceph_mds_client *mdsc)
+{
+ struct ceph_fs_client *fsc = mdsc->fsc;
+
+ if (!ceph_test_mount_opt(fsc, CLEANRECOVER))
+ return;
+
+ if (READ_ONCE(fsc->mount_state) != CEPH_MOUNT_MOUNTED)
+ return;
+
+ if (!READ_ONCE(fsc->blacklisted))
+ return;
+
+ if (fsc->last_auto_reconnect &&
+ time_before(jiffies, fsc->last_auto_reconnect + HZ * 60 * 30))
+ return;
+ pr_info("auto reconnect after blacklisted\n");
+ fsc->last_auto_reconnect = jiffies;
+ ceph_force_reconnect(fsc->sb);
+}
/*
* delayed work -- periodically trim expired leases, renew caps with mds
@@ -4044,7 +4088,9 @@ static void delayed_work(struct work_struct *work)
pr_info("mds%d hung\n", s->s_mds);
}
}
- if (s->s_state < CEPH_MDS_SESSION_OPEN) {
+ if (s->s_state == CEPH_MDS_SESSION_NEW ||
+ s->s_state == CEPH_MDS_SESSION_RESTARTING ||
+ s->s_state == CEPH_MDS_SESSION_REJECTED) {
/* this mds is failed or recovering, just wait */
ceph_put_mds_session(s);
continue;
@@ -4072,6 +4118,8 @@ static void delayed_work(struct work_struct *work)
ceph_trim_snapid_map(mdsc);
+ maybe_recover_session(mdsc);
+
schedule_delayed(mdsc);
}
@@ -4355,7 +4403,12 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
session = __ceph_lookup_mds_session(mdsc, mds);
if (!session)
continue;
+
+ if (session->s_state == CEPH_MDS_SESSION_REJECTED)
+ __unregister_session(mdsc, session);
+ __wake_requests(mdsc, &session->s_waiting);
mutex_unlock(&mdsc->mutex);
+
mutex_lock(&session->s_mutex);
__close_session(mdsc, session);
if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
@@ -4364,6 +4417,7 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
}
mutex_unlock(&session->s_mutex);
ceph_put_mds_session(session);
+
mutex_lock(&mdsc->mutex);
kick_requests(mdsc, mds);
}
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index f7c8603484fe..5cd131b41d84 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -148,9 +148,9 @@ enum {
CEPH_MDS_SESSION_OPENING = 2,
CEPH_MDS_SESSION_OPEN = 3,
CEPH_MDS_SESSION_HUNG = 4,
- CEPH_MDS_SESSION_CLOSING = 5,
- CEPH_MDS_SESSION_RESTARTING = 6,
- CEPH_MDS_SESSION_RECONNECTING = 7,
+ CEPH_MDS_SESSION_RESTARTING = 5,
+ CEPH_MDS_SESSION_RECONNECTING = 6,
+ CEPH_MDS_SESSION_CLOSING = 7,
CEPH_MDS_SESSION_REJECTED = 8,
};
@@ -176,7 +176,7 @@ struct ceph_mds_session {
spinlock_t s_cap_lock;
struct list_head s_caps; /* all caps issued by this session */
struct ceph_cap *s_cap_iterator;
- int s_nr_caps, s_trim_caps;
+ int s_nr_caps;
int s_num_cap_releases;
int s_cap_reconnect;
int s_readonly;
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 377fafc76f20..edfd643a8205 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -143,6 +143,7 @@ enum {
Opt_snapdirname,
Opt_mds_namespace,
Opt_fscache_uniq,
+ Opt_recover_session,
Opt_last_string,
/* string args above */
Opt_dirstat,
@@ -184,6 +185,7 @@ static match_table_t fsopt_tokens = {
/* int args above */
{Opt_snapdirname, "snapdirname=%s"},
{Opt_mds_namespace, "mds_namespace=%s"},
+ {Opt_recover_session, "recover_session=%s"},
{Opt_fscache_uniq, "fsc=%s"},
/* string args above */
{Opt_dirstat, "dirstat"},
@@ -254,6 +256,17 @@ static int parse_fsopt_token(char *c, void *private)
if (!fsopt->mds_namespace)
return -ENOMEM;
break;
+ case Opt_recover_session:
+ if (!strncmp(argstr[0].from, "no",
+ argstr[0].to - argstr[0].from)) {
+ fsopt->flags &= ~CEPH_MOUNT_OPT_CLEANRECOVER;
+ } else if (!strncmp(argstr[0].from, "clean",
+ argstr[0].to - argstr[0].from)) {
+ fsopt->flags |= CEPH_MOUNT_OPT_CLEANRECOVER;
+ } else {
+ return -EINVAL;
+ }
+ break;
case Opt_fscache_uniq:
kfree(fsopt->fscache_uniq);
fsopt->fscache_uniq = kstrndup(argstr[0].from,
@@ -576,6 +589,10 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
if (fsopt->mds_namespace)
seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
+
+ if (fsopt->flags & CEPH_MOUNT_OPT_CLEANRECOVER)
+ seq_show_option(m, "recover_session", "clean");
+
if (fsopt->wsize != CEPH_MAX_WRITE_SIZE)
seq_printf(m, ",wsize=%d", fsopt->wsize);
if (fsopt->rsize != CEPH_MAX_READ_SIZE)
@@ -664,6 +681,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
fsc->sb = NULL;
fsc->mount_state = CEPH_MOUNT_MOUNTING;
+ fsc->filp_gen = 1;
atomic_long_set(&fsc->writeback_count, 0);
@@ -713,6 +731,7 @@ static void destroy_fs_client(struct ceph_fs_client *fsc)
{
dout("destroy_fs_client %p\n", fsc);
+ ceph_mdsc_destroy(fsc);
destroy_workqueue(fsc->inode_wq);
destroy_workqueue(fsc->cap_wq);
@@ -829,7 +848,7 @@ static void ceph_umount_begin(struct super_block *sb)
fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
ceph_osdc_abort_requests(&fsc->client->osdc, -EIO);
ceph_mdsc_force_umount(fsc->mdsc);
- return;
+ fsc->filp_gen++; // invalidate open files
}
static int ceph_remount(struct super_block *sb, int *flags, char *data)
@@ -1089,7 +1108,6 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
}
if (ceph_sb_to_client(sb) != fsc) {
- ceph_mdsc_destroy(fsc);
destroy_fs_client(fsc);
fsc = ceph_sb_to_client(sb);
dout("get_sb got existing client %p\n", fsc);
@@ -1115,7 +1133,6 @@ out_splat:
goto out_final;
out:
- ceph_mdsc_destroy(fsc);
destroy_fs_client(fsc);
out_final:
dout("ceph_mount fail %ld\n", PTR_ERR(res));
@@ -1139,8 +1156,6 @@ static void ceph_kill_sb(struct super_block *s)
ceph_fscache_unregister_fs(fsc);
- ceph_mdsc_destroy(fsc);
-
destroy_fs_client(fsc);
free_anon_bdev(dev);
}
@@ -1154,6 +1169,33 @@ static struct file_system_type ceph_fs_type = {
};
MODULE_ALIAS_FS("ceph");
+int ceph_force_reconnect(struct super_block *sb)
+{
+ struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+ int err = 0;
+
+ ceph_umount_begin(sb);
+
+ /* Make sure all page caches get invalidated.
+ * see remove_session_caps_cb() */
+ flush_workqueue(fsc->inode_wq);
+
+ /* In case that we were blacklisted. This also reset
+ * all mon/osd connections */
+ ceph_reset_client_addr(fsc->client);
+
+ ceph_osdc_clear_abort_err(&fsc->client->osdc);
+
+ fsc->blacklisted = false;
+ fsc->mount_state = CEPH_MOUNT_MOUNTED;
+
+ if (sb->s_root) {
+ err = __ceph_do_getattr(d_inode(sb->s_root), NULL,
+ CEPH_STAT_CAP_INODE, true);
+ }
+ return err;
+}
+
static int __init init_ceph(void)
{
int ret = init_caches();
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 6b9f1ee7de85..f98d9247f9cb 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/posix_acl.h>
#include <linux/refcount.h>
+#include <linux/security.h>
#include <linux/ceph/libceph.h>
@@ -31,6 +32,7 @@
#define CEPH_BLOCK_SHIFT 22 /* 4 MB */
#define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT)
+#define CEPH_MOUNT_OPT_CLEANRECOVER (1<<1) /* auto reonnect (clean mode) after blacklisted */
#define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */
#define CEPH_MOUNT_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */
#define CEPH_MOUNT_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */
@@ -101,6 +103,11 @@ struct ceph_fs_client {
struct ceph_client *client;
unsigned long mount_state;
+
+ unsigned long last_auto_reconnect;
+ bool blacklisted;
+
+ u32 filp_gen;
loff_t max_file_size;
struct ceph_mds_client *mdsc;
@@ -395,6 +402,8 @@ struct ceph_inode_info {
struct fscache_cookie *fscache;
u32 i_fscache_gen;
#endif
+ errseq_t i_meta_err;
+
struct inode vfs_inode; /* at end */
};
@@ -499,17 +508,16 @@ static inline struct inode *ceph_find_inode(struct super_block *sb,
#define CEPH_I_DIR_ORDERED (1 << 0) /* dentries in dir are ordered */
#define CEPH_I_NODELAY (1 << 1) /* do not delay cap release */
#define CEPH_I_FLUSH (1 << 2) /* do not delay flush of dirty metadata */
-#define CEPH_I_NOFLUSH (1 << 3) /* do not flush dirty caps */
-#define CEPH_I_POOL_PERM (1 << 4) /* pool rd/wr bits are valid */
-#define CEPH_I_POOL_RD (1 << 5) /* can read from pool */
-#define CEPH_I_POOL_WR (1 << 6) /* can write to pool */
-#define CEPH_I_SEC_INITED (1 << 7) /* security initialized */
-#define CEPH_I_CAP_DROPPED (1 << 8) /* caps were forcibly dropped */
-#define CEPH_I_KICK_FLUSH (1 << 9) /* kick flushing caps */
-#define CEPH_I_FLUSH_SNAPS (1 << 10) /* need flush snapss */
-#define CEPH_I_ERROR_WRITE (1 << 11) /* have seen write errors */
-#define CEPH_I_ERROR_FILELOCK (1 << 12) /* have seen file lock errors */
-
+#define CEPH_I_POOL_PERM (1 << 3) /* pool rd/wr bits are valid */
+#define CEPH_I_POOL_RD (1 << 4) /* can read from pool */
+#define CEPH_I_POOL_WR (1 << 5) /* can write to pool */
+#define CEPH_I_SEC_INITED (1 << 6) /* security initialized */
+#define CEPH_I_CAP_DROPPED (1 << 7) /* caps were forcibly dropped */
+#define CEPH_I_KICK_FLUSH (1 << 8) /* kick flushing caps */
+#define CEPH_I_FLUSH_SNAPS (1 << 9) /* need flush snapss */
+#define CEPH_I_ERROR_WRITE (1 << 10) /* have seen write errors */
+#define CEPH_I_ERROR_FILELOCK (1 << 11) /* have seen file lock errors */
+#define CEPH_I_ODIRECT (1 << 12) /* inode in direct I/O mode */
/*
* Masks of ceph inode work.
@@ -703,6 +711,10 @@ struct ceph_file_info {
spinlock_t rw_contexts_lock;
struct list_head rw_contexts;
+
+ errseq_t meta_err;
+ u32 filp_gen;
+ atomic_t num_locks;
};
struct ceph_dir_file_info {
@@ -842,7 +854,8 @@ static inline int default_congestion_kb(void)
}
-
+/* super.c */
+extern int ceph_force_reconnect(struct super_block *sb);
/* snap.c */
struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
u64 ino);
@@ -959,7 +972,10 @@ static inline bool ceph_security_xattr_wanted(struct inode *in)
#ifdef CONFIG_CEPH_FS_SECURITY_LABEL
extern int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
struct ceph_acl_sec_ctx *ctx);
-extern void ceph_security_invalidate_secctx(struct inode *inode);
+static inline void ceph_security_invalidate_secctx(struct inode *inode)
+{
+ security_inode_invalidate_secctx(inode);
+}
#else
static inline int ceph_security_init_secctx(struct dentry *dentry, umode_t mode,
struct ceph_acl_sec_ctx *ctx)
@@ -1039,7 +1055,6 @@ extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session);
extern struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci,
int mds);
-extern int ceph_get_cap_mds(struct inode *inode);
extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps);
extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
@@ -1058,9 +1073,9 @@ extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
struct inode *dir,
int mds, int drop, int unless);
-extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
+extern int ceph_get_caps(struct file *filp, int need, int want,
loff_t endoff, int *got, struct page **pinned_page);
-extern int ceph_try_get_caps(struct ceph_inode_info *ci,
+extern int ceph_try_get_caps(struct inode *inode,
int need, int want, bool nonblock, int *got);
/* for counting open files by mode */
@@ -1071,7 +1086,7 @@ extern void ceph_put_fmode(struct ceph_inode_info *ci, int mode);
extern const struct address_space_operations ceph_aops;
extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
extern int ceph_uninline_data(struct file *filp, struct page *locked_page);
-extern int ceph_pool_perm_check(struct ceph_inode_info *ci, int need);
+extern int ceph_pool_perm_check(struct inode *inode, int need);
extern void ceph_pool_perm_destroy(struct ceph_mds_client* mdsc);
/* file.c */
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 939eab7aa219..cb18ee637cb7 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -20,7 +20,8 @@ static int __remove_xattr(struct ceph_inode_info *ci,
static bool ceph_is_valid_xattr(const char *name)
{
- return !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
+ return !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) ||
+ !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
}
@@ -892,7 +893,8 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
memcpy(value, xattr->val, xattr->val_len);
if (current->journal_info &&
- !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN))
+ !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
+ security_ismaclabel(name + XATTR_SECURITY_PREFIX_LEN))
ci->i_ceph_flags |= CEPH_I_SEC_INITED;
out:
spin_unlock(&ci->i_ceph_lock);
@@ -903,11 +905,9 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
{
struct inode *inode = d_inode(dentry);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_vxattr *vxattrs = ceph_inode_vxattrs(inode);
bool len_only = (size == 0);
u32 namelen;
int err;
- int i;
spin_lock(&ci->i_ceph_lock);
dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
@@ -936,33 +936,6 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
names = __copy_xattr_names(ci, names);
size -= namelen;
}
-
-
- /* virtual xattr names, too */
- if (vxattrs) {
- for (i = 0; vxattrs[i].name; i++) {
- size_t this_len;
-
- if (vxattrs[i].flags & VXATTR_FLAG_HIDDEN)
- continue;
- if (vxattrs[i].exists_cb && !vxattrs[i].exists_cb(ci))
- continue;
-
- this_len = strlen(vxattrs[i].name) + 1;
- namelen += this_len;
- if (len_only)
- continue;
-
- if (this_len > size) {
- err = -ERANGE;
- goto out;
- }
-
- memcpy(names, vxattrs[i].name, this_len);
- names += this_len;
- size -= this_len;
- }
- }
err = namelen;
out:
spin_unlock(&ci->i_ceph_lock);
@@ -1293,42 +1266,8 @@ out:
ceph_pagelist_release(pagelist);
return err;
}
-
-void ceph_security_invalidate_secctx(struct inode *inode)
-{
- security_inode_invalidate_secctx(inode);
-}
-
-static int ceph_xattr_set_security_label(const struct xattr_handler *handler,
- struct dentry *unused, struct inode *inode,
- const char *key, const void *buf,
- size_t buflen, int flags)
-{
- if (security_ismaclabel(key)) {
- const char *name = xattr_full_name(handler, key);
- return __ceph_setxattr(inode, name, buf, buflen, flags);
- }
- return -EOPNOTSUPP;
-}
-
-static int ceph_xattr_get_security_label(const struct xattr_handler *handler,
- struct dentry *unused, struct inode *inode,
- const char *key, void *buf, size_t buflen)
-{
- if (security_ismaclabel(key)) {
- const char *name = xattr_full_name(handler, key);
- return __ceph_getxattr(inode, name, buf, buflen);
- }
- return -EOPNOTSUPP;
-}
-
-static const struct xattr_handler ceph_security_label_handler = {
- .prefix = XATTR_SECURITY_PREFIX,
- .get = ceph_xattr_get_security_label,
- .set = ceph_xattr_set_security_label,
-};
-#endif
-#endif
+#endif /* CONFIG_CEPH_FS_SECURITY_LABEL */
+#endif /* CONFIG_SECURITY */
void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx)
{
@@ -1352,9 +1291,6 @@ const struct xattr_handler *ceph_xattr_handlers[] = {
&posix_acl_access_xattr_handler,
&posix_acl_default_xattr_handler,
#endif
-#ifdef CONFIG_CEPH_FS_SECURITY_LABEL
- &ceph_security_label_handler,
-#endif
&ceph_other_xattr_handler,
NULL,
};
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 1bda2ab6745b..054acd9fd033 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -88,9 +88,7 @@ static int fat__get_entry(struct inode *dir, loff_t *pos,
int err, offset;
next:
- if (*bh)
- brelse(*bh);
-
+ brelse(*bh);
*bh = NULL;
iblock = *pos >> sb->s_blocksize_bits;
err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0, false);
@@ -1100,8 +1098,11 @@ static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used,
err = -ENOMEM;
goto error;
}
+ /* Avoid race with userspace read via bdev */
+ lock_buffer(bhs[n]);
memset(bhs[n]->b_data, 0, sb->s_blocksize);
set_buffer_uptodate(bhs[n]);
+ unlock_buffer(bhs[n]);
mark_buffer_dirty_inode(bhs[n], dir);
n++;
@@ -1158,6 +1159,8 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
de = (struct msdos_dir_entry *)bhs[0]->b_data;
+ /* Avoid race with userspace read via bdev */
+ lock_buffer(bhs[0]);
/* filling the new directory slots ("." and ".." entries) */
memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME);
memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME);
@@ -1180,6 +1183,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
de[0].size = de[1].size = 0;
memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de));
set_buffer_uptodate(bhs[0]);
+ unlock_buffer(bhs[0]);
mark_buffer_dirty_inode(bhs[0], dir);
err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE);
@@ -1237,11 +1241,14 @@ static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
/* fill the directory entry */
copy = min(size, sb->s_blocksize);
+ /* Avoid race with userspace read via bdev */
+ lock_buffer(bhs[n]);
memcpy(bhs[n]->b_data, slots, copy);
- slots += copy;
- size -= copy;
set_buffer_uptodate(bhs[n]);
+ unlock_buffer(bhs[n]);
mark_buffer_dirty_inode(bhs[n], dir);
+ slots += copy;
+ size -= copy;
if (!size)
break;
n++;
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 265983635f2b..3647c65a0f48 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -388,8 +388,11 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
err = -ENOMEM;
goto error;
}
+ /* Avoid race with userspace read via bdev */
+ lock_buffer(c_bh);
memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
set_buffer_uptodate(c_bh);
+ unlock_buffer(c_bh);
mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
if (sb->s_flags & SB_SYNCHRONOUS)
err = sync_dirty_buffer(c_bh);
diff --git a/fs/fs_context.c b/fs/fs_context.c
index 87c2c9687d90..138b5b4d621d 100644
--- a/fs/fs_context.c
+++ b/fs/fs_context.c
@@ -504,7 +504,6 @@ void put_fs_context(struct fs_context *fc)
put_net(fc->net_ns);
put_user_ns(fc->user_ns);
put_cred(fc->cred);
- kfree(fc->subtype);
put_fc_log(fc);
put_filesystem(fc->fs_type);
kfree(fc->source);
@@ -571,17 +570,6 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param)
return 0;
}
- if ((fc->fs_type->fs_flags & FS_HAS_SUBTYPE) &&
- strcmp(param->key, "subtype") == 0) {
- if (param->type != fs_value_is_string)
- return invalf(fc, "VFS: Legacy: Non-string subtype");
- if (fc->subtype)
- return invalf(fc, "VFS: Legacy: Multiple subtype");
- fc->subtype = param->string;
- param->string = NULL;
- return 0;
- }
-
if (ctx->param_type == LEGACY_FS_MONOLITHIC_PARAMS)
return invalf(fc, "VFS: Legacy: Can't mix monolithic and individual options");
@@ -738,8 +726,6 @@ void vfs_clean_context(struct fs_context *fc)
fc->s_fs_info = NULL;
fc->sb_flags = 0;
security_free_mnt_opts(&fc->security);
- kfree(fc->subtype);
- fc->subtype = NULL;
kfree(fc->source);
fc->source = NULL;
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index bab7a0db81dd..00015d851382 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -142,11 +142,10 @@ static int cuse_open(struct inode *inode, struct file *file)
static int cuse_release(struct inode *inode, struct file *file)
{
- struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
- fuse_sync_release(fi, ff, file->f_flags);
+ fuse_sync_release(NULL, ff, file->f_flags);
fuse_conn_put(fc);
return 0;
@@ -299,6 +298,14 @@ static void cuse_gendev_release(struct device *dev)
kfree(dev);
}
+struct cuse_init_args {
+ struct fuse_args_pages ap;
+ struct cuse_init_in in;
+ struct cuse_init_out out;
+ struct page *page;
+ struct fuse_page_desc desc;
+};
+
/**
* cuse_process_init_reply - finish initializing CUSE channel
*
@@ -306,21 +313,22 @@ static void cuse_gendev_release(struct device *dev)
* required data structures for it. Please read the comment at the
* top of this file for high level overview.
*/
-static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
+static void cuse_process_init_reply(struct fuse_conn *fc,
+ struct fuse_args *args, int error)
{
+ struct cuse_init_args *ia = container_of(args, typeof(*ia), ap.args);
+ struct fuse_args_pages *ap = &ia->ap;
struct cuse_conn *cc = fc_to_cc(fc), *pos;
- struct cuse_init_out *arg = req->out.args[0].value;
- struct page *page = req->pages[0];
+ struct cuse_init_out *arg = &ia->out;
+ struct page *page = ap->pages[0];
struct cuse_devinfo devinfo = { };
struct device *dev;
struct cdev *cdev;
dev_t devt;
int rc, i;
- if (req->out.h.error ||
- arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) {
+ if (error || arg->major != FUSE_KERNEL_VERSION || arg->minor < 11)
goto err;
- }
fc->minor = arg->minor;
fc->max_read = max_t(unsigned, arg->max_read, 4096);
@@ -329,7 +337,7 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
/* parse init reply */
cc->unrestricted_ioctl = arg->flags & CUSE_UNRESTRICTED_IOCTL;
- rc = cuse_parse_devinfo(page_address(page), req->out.args[1].size,
+ rc = cuse_parse_devinfo(page_address(page), ap->args.out_args[1].size,
&devinfo);
if (rc)
goto err;
@@ -396,7 +404,7 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
dev_set_uevent_suppress(dev, 0);
kobject_uevent(&dev->kobj, KOBJ_ADD);
out:
- kfree(arg);
+ kfree(ia);
__free_page(page);
return;
@@ -415,55 +423,49 @@ err:
static int cuse_send_init(struct cuse_conn *cc)
{
int rc;
- struct fuse_req *req;
struct page *page;
struct fuse_conn *fc = &cc->fc;
- struct cuse_init_in *arg;
- void *outarg;
+ struct cuse_init_args *ia;
+ struct fuse_args_pages *ap;
BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE);
- req = fuse_get_req_for_background(fc, 1);
- if (IS_ERR(req)) {
- rc = PTR_ERR(req);
- goto err;
- }
-
rc = -ENOMEM;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
- goto err_put_req;
+ goto err;
- outarg = kzalloc(sizeof(struct cuse_init_out), GFP_KERNEL);
- if (!outarg)
+ ia = kzalloc(sizeof(*ia), GFP_KERNEL);
+ if (!ia)
goto err_free_page;
- arg = &req->misc.cuse_init_in;
- arg->major = FUSE_KERNEL_VERSION;
- arg->minor = FUSE_KERNEL_MINOR_VERSION;
- arg->flags |= CUSE_UNRESTRICTED_IOCTL;
- req->in.h.opcode = CUSE_INIT;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(struct cuse_init_in);
- req->in.args[0].value = arg;
- req->out.numargs = 2;
- req->out.args[0].size = sizeof(struct cuse_init_out);
- req->out.args[0].value = outarg;
- req->out.args[1].size = CUSE_INIT_INFO_MAX;
- req->out.argvar = 1;
- req->out.argpages = 1;
- req->pages[0] = page;
- req->page_descs[0].length = req->out.args[1].size;
- req->num_pages = 1;
- req->end = cuse_process_init_reply;
- fuse_request_send_background(fc, req);
-
- return 0;
-
+ ap = &ia->ap;
+ ia->in.major = FUSE_KERNEL_VERSION;
+ ia->in.minor = FUSE_KERNEL_MINOR_VERSION;
+ ia->in.flags |= CUSE_UNRESTRICTED_IOCTL;
+ ap->args.opcode = CUSE_INIT;
+ ap->args.in_numargs = 1;
+ ap->args.in_args[0].size = sizeof(ia->in);
+ ap->args.in_args[0].value = &ia->in;
+ ap->args.out_numargs = 2;
+ ap->args.out_args[0].size = sizeof(ia->out);
+ ap->args.out_args[0].value = &ia->out;
+ ap->args.out_args[1].size = CUSE_INIT_INFO_MAX;
+ ap->args.out_argvar = 1;
+ ap->args.out_pages = 1;
+ ap->num_pages = 1;
+ ap->pages = &ia->page;
+ ap->descs = &ia->desc;
+ ia->page = page;
+ ia->desc.length = ap->args.out_args[1].size;
+ ap->args.end = cuse_process_init_reply;
+
+ rc = fuse_simple_background(fc, &ap->args, GFP_KERNEL);
+ if (rc) {
+ kfree(ia);
err_free_page:
- __free_page(page);
-err_put_req:
- fuse_put_request(fc, req);
+ __free_page(page);
+ }
err:
return rc;
}
@@ -504,9 +506,9 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
* Limit the cuse channel to requests that can
* be represented in file->f_cred->user_ns.
*/
- fuse_conn_init(&cc->fc, file->f_cred->user_ns);
+ fuse_conn_init(&cc->fc, file->f_cred->user_ns, &fuse_dev_fiq_ops, NULL);
- fud = fuse_dev_alloc(&cc->fc);
+ fud = fuse_dev_alloc_install(&cc->fc);
if (!fud) {
kfree(cc);
return -ENOMEM;
@@ -519,6 +521,7 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
rc = cuse_send_init(cc);
if (rc) {
fuse_dev_free(fud);
+ fuse_conn_put(&cc->fc);
return rc;
}
file->private_data = fud;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ea8237513dfa..dadd617d826c 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -40,107 +40,30 @@ static struct fuse_dev *fuse_get_dev(struct file *file)
return READ_ONCE(file->private_data);
}
-static void fuse_request_init(struct fuse_req *req, struct page **pages,
- struct fuse_page_desc *page_descs,
- unsigned npages)
+static void fuse_request_init(struct fuse_req *req)
{
INIT_LIST_HEAD(&req->list);
INIT_LIST_HEAD(&req->intr_entry);
init_waitqueue_head(&req->waitq);
refcount_set(&req->count, 1);
- req->pages = pages;
- req->page_descs = page_descs;
- req->max_pages = npages;
__set_bit(FR_PENDING, &req->flags);
}
-static struct page **fuse_req_pages_alloc(unsigned int npages, gfp_t flags,
- struct fuse_page_desc **desc)
-{
- struct page **pages;
-
- pages = kzalloc(npages * (sizeof(struct page *) +
- sizeof(struct fuse_page_desc)), flags);
- *desc = (void *) pages + npages * sizeof(struct page *);
-
- return pages;
-}
-
-static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
+static struct fuse_req *fuse_request_alloc(gfp_t flags)
{
struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
- if (req) {
- struct page **pages = NULL;
- struct fuse_page_desc *page_descs = NULL;
-
- WARN_ON(npages > FUSE_MAX_MAX_PAGES);
- if (npages > FUSE_REQ_INLINE_PAGES) {
- pages = fuse_req_pages_alloc(npages, flags,
- &page_descs);
- if (!pages) {
- kmem_cache_free(fuse_req_cachep, req);
- return NULL;
- }
- } else if (npages) {
- pages = req->inline_pages;
- page_descs = req->inline_page_descs;
- }
+ if (req)
+ fuse_request_init(req);
- fuse_request_init(req, pages, page_descs, npages);
- }
return req;
}
-struct fuse_req *fuse_request_alloc(unsigned npages)
-{
- return __fuse_request_alloc(npages, GFP_KERNEL);
-}
-EXPORT_SYMBOL_GPL(fuse_request_alloc);
-
-struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
-{
- return __fuse_request_alloc(npages, GFP_NOFS);
-}
-
-static void fuse_req_pages_free(struct fuse_req *req)
-{
- if (req->pages != req->inline_pages)
- kfree(req->pages);
-}
-
-bool fuse_req_realloc_pages(struct fuse_conn *fc, struct fuse_req *req,
- gfp_t flags)
-{
- struct page **pages;
- struct fuse_page_desc *page_descs;
- unsigned int npages = min_t(unsigned int,
- max_t(unsigned int, req->max_pages * 2,
- FUSE_DEFAULT_MAX_PAGES_PER_REQ),
- fc->max_pages);
- WARN_ON(npages <= req->max_pages);
-
- pages = fuse_req_pages_alloc(npages, flags, &page_descs);
- if (!pages)
- return false;
-
- memcpy(pages, req->pages, sizeof(struct page *) * req->max_pages);
- memcpy(page_descs, req->page_descs,
- sizeof(struct fuse_page_desc) * req->max_pages);
- fuse_req_pages_free(req);
- req->pages = pages;
- req->page_descs = page_descs;
- req->max_pages = npages;
-
- return true;
-}
-
-void fuse_request_free(struct fuse_req *req)
+static void fuse_request_free(struct fuse_req *req)
{
- fuse_req_pages_free(req);
kmem_cache_free(fuse_req_cachep, req);
}
-void __fuse_get_request(struct fuse_req *req)
+static void __fuse_get_request(struct fuse_req *req)
{
refcount_inc(&req->count);
}
@@ -177,8 +100,9 @@ static void fuse_drop_waiting(struct fuse_conn *fc)
}
}
-static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
- bool for_background)
+static void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req);
+
+static struct fuse_req *fuse_get_req(struct fuse_conn *fc, bool for_background)
{
struct fuse_req *req;
int err;
@@ -201,7 +125,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
if (fc->conn_error)
goto out;
- req = fuse_request_alloc(npages);
+ req = fuse_request_alloc(GFP_KERNEL);
err = -ENOMEM;
if (!req) {
if (for_background)
@@ -229,101 +153,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
return ERR_PTR(err);
}
-struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
-{
- return __fuse_get_req(fc, npages, false);
-}
-EXPORT_SYMBOL_GPL(fuse_get_req);
-
-struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
- unsigned npages)
-{
- return __fuse_get_req(fc, npages, true);
-}
-EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
-
-/*
- * Return request in fuse_file->reserved_req. However that may
- * currently be in use. If that is the case, wait for it to become
- * available.
- */
-static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
- struct file *file)
-{
- struct fuse_req *req = NULL;
- struct fuse_inode *fi = get_fuse_inode(file_inode(file));
- struct fuse_file *ff = file->private_data;
-
- do {
- wait_event(fc->reserved_req_waitq, ff->reserved_req);
- spin_lock(&fi->lock);
- if (ff->reserved_req) {
- req = ff->reserved_req;
- ff->reserved_req = NULL;
- req->stolen_file = get_file(file);
- }
- spin_unlock(&fi->lock);
- } while (!req);
-
- return req;
-}
-
-/*
- * Put stolen request back into fuse_file->reserved_req
- */
-static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
-{
- struct file *file = req->stolen_file;
- struct fuse_inode *fi = get_fuse_inode(file_inode(file));
- struct fuse_file *ff = file->private_data;
-
- WARN_ON(req->max_pages);
- spin_lock(&fi->lock);
- memset(req, 0, sizeof(*req));
- fuse_request_init(req, NULL, NULL, 0);
- BUG_ON(ff->reserved_req);
- ff->reserved_req = req;
- wake_up_all(&fc->reserved_req_waitq);
- spin_unlock(&fi->lock);
- fput(file);
-}
-
-/*
- * Gets a requests for a file operation, always succeeds
- *
- * This is used for sending the FLUSH request, which must get to
- * userspace, due to POSIX locks which may need to be unlocked.
- *
- * If allocation fails due to OOM, use the reserved request in
- * fuse_file.
- *
- * This is very unlikely to deadlock accidentally, since the
- * filesystem should not have it's own file open. If deadlock is
- * intentional, it can still be broken by "aborting" the filesystem.
- */
-struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
- struct file *file)
-{
- struct fuse_req *req;
-
- atomic_inc(&fc->num_waiting);
- wait_event(fc->blocked_waitq, fc->initialized);
- /* Matches smp_wmb() in fuse_set_initialized() */
- smp_rmb();
- req = fuse_request_alloc(0);
- if (!req)
- req = get_reserved_req(fc, file);
-
- req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
- req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
- req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
-
- __set_bit(FR_WAITING, &req->flags);
- __clear_bit(FR_BACKGROUND, &req->flags);
- return req;
-}
-
-void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
{
if (refcount_dec_and_test(&req->count)) {
if (test_bit(FR_BACKGROUND, &req->flags)) {
@@ -342,15 +172,11 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
fuse_drop_waiting(fc);
}
- if (req->stolen_file)
- put_reserved_req(fc, req);
- else
- fuse_request_free(req);
+ fuse_request_free(req);
}
}
-EXPORT_SYMBOL_GPL(fuse_put_request);
-static unsigned len_args(unsigned numargs, struct fuse_arg *args)
+unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
{
unsigned nbytes = 0;
unsigned i;
@@ -360,25 +186,47 @@ static unsigned len_args(unsigned numargs, struct fuse_arg *args)
return nbytes;
}
+EXPORT_SYMBOL_GPL(fuse_len_args);
-static u64 fuse_get_unique(struct fuse_iqueue *fiq)
+u64 fuse_get_unique(struct fuse_iqueue *fiq)
{
fiq->reqctr += FUSE_REQ_ID_STEP;
return fiq->reqctr;
}
+EXPORT_SYMBOL_GPL(fuse_get_unique);
static unsigned int fuse_req_hash(u64 unique)
{
return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
}
-static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
+/**
+ * A new request is available, wake fiq->waitq
+ */
+static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq)
+__releases(fiq->lock)
+{
+ wake_up(&fiq->waitq);
+ kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ spin_unlock(&fiq->lock);
+}
+
+const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
+ .wake_forget_and_unlock = fuse_dev_wake_and_unlock,
+ .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock,
+ .wake_pending_and_unlock = fuse_dev_wake_and_unlock,
+};
+EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
+
+static void queue_request_and_unlock(struct fuse_iqueue *fiq,
+ struct fuse_req *req)
+__releases(fiq->lock)
{
req->in.h.len = sizeof(struct fuse_in_header) +
- len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
+ fuse_len_args(req->args->in_numargs,
+ (struct fuse_arg *) req->args->in_args);
list_add_tail(&req->list, &fiq->pending);
- wake_up_locked(&fiq->waitq);
- kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ fiq->ops->wake_pending_and_unlock(fiq);
}
void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
@@ -389,16 +237,15 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
forget->forget_one.nodeid = nodeid;
forget->forget_one.nlookup = nlookup;
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
if (fiq->connected) {
fiq->forget_list_tail->next = forget;
fiq->forget_list_tail = forget;
- wake_up_locked(&fiq->waitq);
- kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ fiq->ops->wake_forget_and_unlock(fiq);
} else {
kfree(forget);
+ spin_unlock(&fiq->lock);
}
- spin_unlock(&fiq->waitq.lock);
}
static void flush_bg_queue(struct fuse_conn *fc)
@@ -412,10 +259,9 @@ static void flush_bg_queue(struct fuse_conn *fc)
req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
list_del(&req->list);
fc->active_background++;
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
req->in.h.unique = fuse_get_unique(fiq);
- queue_request(fiq, req);
- spin_unlock(&fiq->waitq.lock);
+ queue_request_and_unlock(fiq, req);
}
}
@@ -427,9 +273,10 @@ static void flush_bg_queue(struct fuse_conn *fc)
* the 'end' callback is called if given, else the reference to the
* request is released
*/
-static void request_end(struct fuse_conn *fc, struct fuse_req *req)
+void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
{
struct fuse_iqueue *fiq = &fc->iq;
+ bool async = req->args->end;
if (test_and_set_bit(FR_FINISHED, &req->flags))
goto put_request;
@@ -439,9 +286,9 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
* smp_mb() from queue_interrupt().
*/
if (!list_empty(&req->intr_entry)) {
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
list_del_init(&req->intr_entry);
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
}
WARN_ON(test_bit(FR_PENDING, &req->flags));
WARN_ON(test_bit(FR_SENT, &req->flags));
@@ -475,18 +322,19 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
wake_up(&req->waitq);
}
- if (req->end)
- req->end(fc, req);
+ if (async)
+ req->args->end(fc, req->args, req->out.h.error);
put_request:
fuse_put_request(fc, req);
}
+EXPORT_SYMBOL_GPL(fuse_request_end);
static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
/* Check for we've sent request to interrupt this req */
if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
return -EINVAL;
}
@@ -499,13 +347,13 @@ static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
smp_mb();
if (test_bit(FR_FINISHED, &req->flags)) {
list_del_init(&req->intr_entry);
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
return 0;
}
- wake_up_locked(&fiq->waitq);
- kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ fiq->ops->wake_interrupt_and_unlock(fiq);
+ } else {
+ spin_unlock(&fiq->lock);
}
- spin_unlock(&fiq->waitq.lock);
return 0;
}
@@ -535,16 +383,16 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
if (!err)
return;
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
/* Request is not yet in userspace, bail out */
if (test_bit(FR_PENDING, &req->flags)) {
list_del(&req->list);
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
__fuse_put_request(req);
req->out.h.error = -EINTR;
return;
}
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
}
/*
@@ -559,101 +407,110 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
struct fuse_iqueue *fiq = &fc->iq;
BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
if (!fiq->connected) {
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
req->out.h.error = -ENOTCONN;
} else {
req->in.h.unique = fuse_get_unique(fiq);
- queue_request(fiq, req);
/* acquire extra reference, since request is still needed
- after request_end() */
+ after fuse_request_end() */
__fuse_get_request(req);
- spin_unlock(&fiq->waitq.lock);
+ queue_request_and_unlock(fiq, req);
request_wait_answer(fc, req);
- /* Pairs with smp_wmb() in request_end() */
+ /* Pairs with smp_wmb() in fuse_request_end() */
smp_rmb();
}
}
-void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
-{
- __set_bit(FR_ISREPLY, &req->flags);
- if (!test_bit(FR_WAITING, &req->flags)) {
- __set_bit(FR_WAITING, &req->flags);
- atomic_inc(&fc->num_waiting);
- }
- __fuse_request_send(fc, req);
-}
-EXPORT_SYMBOL_GPL(fuse_request_send);
-
static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
{
- if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
- args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
+ if (fc->minor < 4 && args->opcode == FUSE_STATFS)
+ args->out_args[0].size = FUSE_COMPAT_STATFS_SIZE;
if (fc->minor < 9) {
- switch (args->in.h.opcode) {
+ switch (args->opcode) {
case FUSE_LOOKUP:
case FUSE_CREATE:
case FUSE_MKNOD:
case FUSE_MKDIR:
case FUSE_SYMLINK:
case FUSE_LINK:
- args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
+ args->out_args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
break;
case FUSE_GETATTR:
case FUSE_SETATTR:
- args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
+ args->out_args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
break;
}
}
if (fc->minor < 12) {
- switch (args->in.h.opcode) {
+ switch (args->opcode) {
case FUSE_CREATE:
- args->in.args[0].size = sizeof(struct fuse_open_in);
+ args->in_args[0].size = sizeof(struct fuse_open_in);
break;
case FUSE_MKNOD:
- args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
+ args->in_args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
break;
}
}
}
+static void fuse_force_creds(struct fuse_conn *fc, struct fuse_req *req)
+{
+ req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
+ req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
+ req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
+}
+
+static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
+{
+ req->in.h.opcode = args->opcode;
+ req->in.h.nodeid = args->nodeid;
+ req->args = args;
+}
+
ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
{
struct fuse_req *req;
ssize_t ret;
- req = fuse_get_req(fc, 0);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ if (args->force) {
+ atomic_inc(&fc->num_waiting);
+ req = fuse_request_alloc(GFP_KERNEL | __GFP_NOFAIL);
+
+ if (!args->nocreds)
+ fuse_force_creds(fc, req);
+
+ __set_bit(FR_WAITING, &req->flags);
+ __set_bit(FR_FORCE, &req->flags);
+ } else {
+ WARN_ON(args->nocreds);
+ req = fuse_get_req(fc, false);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ }
/* Needs to be done after fuse_get_req() so that fc->minor is valid */
fuse_adjust_compat(fc, args);
+ fuse_args_to_req(req, args);
- req->in.h.opcode = args->in.h.opcode;
- req->in.h.nodeid = args->in.h.nodeid;
- req->in.numargs = args->in.numargs;
- memcpy(req->in.args, args->in.args,
- args->in.numargs * sizeof(struct fuse_in_arg));
- req->out.argvar = args->out.argvar;
- req->out.numargs = args->out.numargs;
- memcpy(req->out.args, args->out.args,
- args->out.numargs * sizeof(struct fuse_arg));
- fuse_request_send(fc, req);
+ if (!args->noreply)
+ __set_bit(FR_ISREPLY, &req->flags);
+ __fuse_request_send(fc, req);
ret = req->out.h.error;
- if (!ret && args->out.argvar) {
- BUG_ON(args->out.numargs != 1);
- ret = req->out.args[0].size;
+ if (!ret && args->out_argvar) {
+ BUG_ON(args->out_numargs == 0);
+ ret = args->out_args[args->out_numargs - 1].size;
}
fuse_put_request(fc, req);
return ret;
}
-bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req)
+static bool fuse_request_queue_background(struct fuse_conn *fc,
+ struct fuse_req *req)
{
bool queued = false;
@@ -681,56 +538,63 @@ bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req)
return queued;
}
-void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
+int fuse_simple_background(struct fuse_conn *fc, struct fuse_args *args,
+ gfp_t gfp_flags)
{
- WARN_ON(!req->end);
+ struct fuse_req *req;
+
+ if (args->force) {
+ WARN_ON(!args->nocreds);
+ req = fuse_request_alloc(gfp_flags);
+ if (!req)
+ return -ENOMEM;
+ __set_bit(FR_BACKGROUND, &req->flags);
+ } else {
+ WARN_ON(args->nocreds);
+ req = fuse_get_req(fc, true);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ }
+
+ fuse_args_to_req(req, args);
+
if (!fuse_request_queue_background(fc, req)) {
- req->out.h.error = -ENOTCONN;
- req->end(fc, req);
fuse_put_request(fc, req);
+ return -ENOTCONN;
}
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(fuse_request_send_background);
+EXPORT_SYMBOL_GPL(fuse_simple_background);
-static int fuse_request_send_notify_reply(struct fuse_conn *fc,
- struct fuse_req *req, u64 unique)
+static int fuse_simple_notify_reply(struct fuse_conn *fc,
+ struct fuse_args *args, u64 unique)
{
- int err = -ENODEV;
+ struct fuse_req *req;
struct fuse_iqueue *fiq = &fc->iq;
+ int err = 0;
+
+ req = fuse_get_req(fc, false);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
__clear_bit(FR_ISREPLY, &req->flags);
req->in.h.unique = unique;
- spin_lock(&fiq->waitq.lock);
+
+ fuse_args_to_req(req, args);
+
+ spin_lock(&fiq->lock);
if (fiq->connected) {
- queue_request(fiq, req);
- err = 0;
+ queue_request_and_unlock(fiq, req);
+ } else {
+ err = -ENODEV;
+ spin_unlock(&fiq->lock);
+ fuse_put_request(fc, req);
}
- spin_unlock(&fiq->waitq.lock);
return err;
}
-void fuse_force_forget(struct file *file, u64 nodeid)
-{
- struct inode *inode = file_inode(file);
- struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
- struct fuse_forget_in inarg;
-
- memset(&inarg, 0, sizeof(inarg));
- inarg.nlookup = 1;
- req = fuse_get_req_nofail_nopages(fc, file);
- req->in.h.opcode = FUSE_FORGET;
- req->in.h.nodeid = nodeid;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- __clear_bit(FR_ISREPLY, &req->flags);
- __fuse_request_send(fc, req);
- /* ignore errors */
- fuse_put_request(fc, req);
-}
-
/*
* Lock the request. Up to the next unlock_request() there mustn't be
* anything that could cause a page-fault. If the request was already
@@ -1084,14 +948,15 @@ static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
{
unsigned i;
struct fuse_req *req = cs->req;
+ struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
+
- for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
+ for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) {
int err;
- unsigned offset = req->page_descs[i].offset;
- unsigned count = min(nbytes, req->page_descs[i].length);
+ unsigned int offset = ap->descs[i].offset;
+ unsigned int count = min(nbytes, ap->descs[i].length);
- err = fuse_copy_page(cs, &req->pages[i], offset, count,
- zeroing);
+ err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing);
if (err)
return err;
@@ -1149,12 +1014,12 @@ static int request_pending(struct fuse_iqueue *fiq)
* Unlike other requests this is assembled on demand, without a need
* to allocate a separate fuse_req structure.
*
- * Called with fiq->waitq.lock held, releases it
+ * Called with fiq->lock held, releases it
*/
static int fuse_read_interrupt(struct fuse_iqueue *fiq,
struct fuse_copy_state *cs,
size_t nbytes, struct fuse_req *req)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
{
struct fuse_in_header ih;
struct fuse_interrupt_in arg;
@@ -1169,7 +1034,7 @@ __releases(fiq->waitq.lock)
ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
arg.unique = req->in.h.unique;
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
if (nbytes < reqsize)
return -EINVAL;
@@ -1181,9 +1046,9 @@ __releases(fiq->waitq.lock)
return err ? err : reqsize;
}
-static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
- unsigned max,
- unsigned *countp)
+struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
+ unsigned int max,
+ unsigned int *countp)
{
struct fuse_forget_link *head = fiq->forget_list_head.next;
struct fuse_forget_link **newhead = &head;
@@ -1202,14 +1067,15 @@ static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
return head;
}
+EXPORT_SYMBOL(fuse_dequeue_forget);
static int fuse_read_single_forget(struct fuse_iqueue *fiq,
struct fuse_copy_state *cs,
size_t nbytes)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
{
int err;
- struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
+ struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL);
struct fuse_forget_in arg = {
.nlookup = forget->forget_one.nlookup,
};
@@ -1220,7 +1086,7 @@ __releases(fiq->waitq.lock)
.len = sizeof(ih) + sizeof(arg),
};
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
kfree(forget);
if (nbytes < ih.len)
return -EINVAL;
@@ -1238,7 +1104,7 @@ __releases(fiq->waitq.lock)
static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
struct fuse_copy_state *cs, size_t nbytes)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
{
int err;
unsigned max_forgets;
@@ -1252,13 +1118,13 @@ __releases(fiq->waitq.lock)
};
if (nbytes < ih.len) {
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
return -EINVAL;
}
max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
- head = dequeue_forget(fiq, max_forgets, &count);
- spin_unlock(&fiq->waitq.lock);
+ head = fuse_dequeue_forget(fiq, max_forgets, &count);
+ spin_unlock(&fiq->lock);
arg.count = count;
ih.len += count * sizeof(struct fuse_forget_one);
@@ -1288,7 +1154,7 @@ __releases(fiq->waitq.lock)
static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
struct fuse_copy_state *cs,
size_t nbytes)
-__releases(fiq->waitq.lock)
+__releases(fiq->lock)
{
if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
return fuse_read_single_forget(fiq, cs, nbytes);
@@ -1302,7 +1168,7 @@ __releases(fiq->waitq.lock)
* the pending list and copies request data to userspace buffer. If
* no reply is needed (FORGET) or request has been aborted or there
* was an error during the copying then it's finished by calling
- * request_end(). Otherwise add it to the processing list, and set
+ * fuse_request_end(). Otherwise add it to the processing list, and set
* the 'sent' flag.
*/
static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
@@ -1313,21 +1179,42 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
struct fuse_iqueue *fiq = &fc->iq;
struct fuse_pqueue *fpq = &fud->pq;
struct fuse_req *req;
- struct fuse_in *in;
+ struct fuse_args *args;
unsigned reqsize;
unsigned int hash;
+ /*
+ * Require sane minimum read buffer - that has capacity for fixed part
+ * of any request header + negotiated max_write room for data.
+ *
+ * Historically libfuse reserves 4K for fixed header room, but e.g.
+ * GlusterFS reserves only 80 bytes
+ *
+ * = `sizeof(fuse_in_header) + sizeof(fuse_write_in)`
+ *
+ * which is the absolute minimum any sane filesystem should be using
+ * for header room.
+ */
+ if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER,
+ sizeof(struct fuse_in_header) +
+ sizeof(struct fuse_write_in) +
+ fc->max_write))
+ return -EINVAL;
+
restart:
- spin_lock(&fiq->waitq.lock);
- err = -EAGAIN;
- if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
- !request_pending(fiq))
- goto err_unlock;
+ for (;;) {
+ spin_lock(&fiq->lock);
+ if (!fiq->connected || request_pending(fiq))
+ break;
+ spin_unlock(&fiq->lock);
- err = wait_event_interruptible_exclusive_locked(fiq->waitq,
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ err = wait_event_interruptible_exclusive(fiq->waitq,
!fiq->connected || request_pending(fiq));
- if (err)
- goto err_unlock;
+ if (err)
+ return err;
+ }
if (!fiq->connected) {
err = fc->aborted ? -ECONNABORTED : -ENODEV;
@@ -1351,28 +1238,28 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
req = list_entry(fiq->pending.next, struct fuse_req, list);
clear_bit(FR_PENDING, &req->flags);
list_del_init(&req->list);
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
- in = &req->in;
- reqsize = in->h.len;
+ args = req->args;
+ reqsize = req->in.h.len;
/* If request is too large, reply with an error and restart the read */
if (nbytes < reqsize) {
req->out.h.error = -EIO;
/* SETXATTR is special, since it may contain too large data */
- if (in->h.opcode == FUSE_SETXATTR)
+ if (args->opcode == FUSE_SETXATTR)
req->out.h.error = -E2BIG;
- request_end(fc, req);
+ fuse_request_end(fc, req);
goto restart;
}
spin_lock(&fpq->lock);
list_add(&req->list, &fpq->io);
spin_unlock(&fpq->lock);
cs->req = req;
- err = fuse_copy_one(cs, &in->h, sizeof(in->h));
+ err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h));
if (!err)
- err = fuse_copy_args(cs, in->numargs, in->argpages,
- (struct fuse_arg *) in->args, 0);
+ err = fuse_copy_args(cs, args->in_numargs, args->in_pages,
+ (struct fuse_arg *) args->in_args, 0);
fuse_copy_finish(cs);
spin_lock(&fpq->lock);
clear_bit(FR_LOCKED, &req->flags);
@@ -1405,11 +1292,11 @@ out_end:
if (!test_bit(FR_PRIVATE, &req->flags))
list_del_init(&req->list);
spin_unlock(&fpq->lock);
- request_end(fc, req);
+ fuse_request_end(fc, req);
return err;
err_unlock:
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
return err;
}
@@ -1728,9 +1615,19 @@ out_finish:
return err;
}
-static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
+struct fuse_retrieve_args {
+ struct fuse_args_pages ap;
+ struct fuse_notify_retrieve_in inarg;
+};
+
+static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_args *args,
+ int error)
{
- release_pages(req->pages, req->num_pages);
+ struct fuse_retrieve_args *ra =
+ container_of(args, typeof(*ra), ap.args);
+
+ release_pages(ra->ap.pages, ra->ap.num_pages);
+ kfree(ra);
}
static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
@@ -1738,13 +1635,16 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
{
int err;
struct address_space *mapping = inode->i_mapping;
- struct fuse_req *req;
pgoff_t index;
loff_t file_size;
unsigned int num;
unsigned int offset;
size_t total_len = 0;
unsigned int num_pages;
+ struct fuse_retrieve_args *ra;
+ size_t args_size = sizeof(*ra);
+ struct fuse_args_pages *ap;
+ struct fuse_args *args;
offset = outarg->offset & ~PAGE_MASK;
file_size = i_size_read(inode);
@@ -1758,19 +1658,26 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
num_pages = min(num_pages, fc->max_pages);
- req = fuse_get_req(fc, num_pages);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0]));
- req->in.h.opcode = FUSE_NOTIFY_REPLY;
- req->in.h.nodeid = outarg->nodeid;
- req->in.numargs = 2;
- req->in.argpages = 1;
- req->end = fuse_retrieve_end;
+ ra = kzalloc(args_size, GFP_KERNEL);
+ if (!ra)
+ return -ENOMEM;
+
+ ap = &ra->ap;
+ ap->pages = (void *) (ra + 1);
+ ap->descs = (void *) (ap->pages + num_pages);
+
+ args = &ap->args;
+ args->nodeid = outarg->nodeid;
+ args->opcode = FUSE_NOTIFY_REPLY;
+ args->in_numargs = 2;
+ args->in_pages = true;
+ args->end = fuse_retrieve_end;
index = outarg->offset >> PAGE_SHIFT;
- while (num && req->num_pages < num_pages) {
+ while (num && ap->num_pages < num_pages) {
struct page *page;
unsigned int this_num;
@@ -1779,27 +1686,25 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
break;
this_num = min_t(unsigned, num, PAGE_SIZE - offset);
- req->pages[req->num_pages] = page;
- req->page_descs[req->num_pages].offset = offset;
- req->page_descs[req->num_pages].length = this_num;
- req->num_pages++;
+ ap->pages[ap->num_pages] = page;
+ ap->descs[ap->num_pages].offset = offset;
+ ap->descs[ap->num_pages].length = this_num;
+ ap->num_pages++;
offset = 0;
num -= this_num;
total_len += this_num;
index++;
}
- req->misc.retrieve_in.offset = outarg->offset;
- req->misc.retrieve_in.size = total_len;
- req->in.args[0].size = sizeof(req->misc.retrieve_in);
- req->in.args[0].value = &req->misc.retrieve_in;
- req->in.args[1].size = total_len;
+ ra->inarg.offset = outarg->offset;
+ ra->inarg.size = total_len;
+ args->in_args[0].size = sizeof(ra->inarg);
+ args->in_args[0].value = &ra->inarg;
+ args->in_args[1].size = total_len;
- err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
- if (err) {
- fuse_retrieve_end(fc, req);
- fuse_put_request(fc, req);
- }
+ err = fuse_simple_notify_reply(fc, args, outarg->notify_unique);
+ if (err)
+ fuse_retrieve_end(fc, args, err);
return err;
}
@@ -1885,27 +1790,25 @@ static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
return NULL;
}
-static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
+static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
unsigned nbytes)
{
unsigned reqsize = sizeof(struct fuse_out_header);
- if (out->h.error)
- return nbytes != reqsize ? -EINVAL : 0;
-
- reqsize += len_args(out->numargs, out->args);
+ reqsize += fuse_len_args(args->out_numargs, args->out_args);
- if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
+ if (reqsize < nbytes || (reqsize > nbytes && !args->out_argvar))
return -EINVAL;
else if (reqsize > nbytes) {
- struct fuse_arg *lastarg = &out->args[out->numargs-1];
+ struct fuse_arg *lastarg = &args->out_args[args->out_numargs-1];
unsigned diffsize = reqsize - nbytes;
+
if (diffsize > lastarg->size)
return -EINVAL;
lastarg->size -= diffsize;
}
- return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
- out->page_zeroing);
+ return fuse_copy_args(cs, args->out_numargs, args->out_pages,
+ args->out_args, args->page_zeroing);
}
/*
@@ -1913,7 +1816,7 @@ static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
* the write buffer. The request is then searched on the processing
* list by the unique ID found in the header. If found, then remove
* it from the list and copy the rest of the buffer to the request.
- * The request is finished by calling request_end()
+ * The request is finished by calling fuse_request_end().
*/
static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
struct fuse_copy_state *cs, size_t nbytes)
@@ -1984,10 +1887,13 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
set_bit(FR_LOCKED, &req->flags);
spin_unlock(&fpq->lock);
cs->req = req;
- if (!req->out.page_replace)
+ if (!req->args->page_replace)
cs->move_pages = 0;
- err = copy_out_args(cs, &req->out, nbytes);
+ if (oh.error)
+ err = nbytes != sizeof(oh) ? -EINVAL : 0;
+ else
+ err = copy_out_args(cs, req->args, nbytes);
fuse_copy_finish(cs);
spin_lock(&fpq->lock);
@@ -2000,7 +1906,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
list_del_init(&req->list);
spin_unlock(&fpq->lock);
- request_end(fc, req);
+ fuse_request_end(fc, req);
out:
return err ? err : nbytes;
@@ -2121,12 +2027,12 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
fiq = &fud->fc->iq;
poll_wait(file, &fiq->waitq, wait);
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
if (!fiq->connected)
mask = EPOLLERR;
else if (request_pending(fiq))
mask |= EPOLLIN | EPOLLRDNORM;
- spin_unlock(&fiq->waitq.lock);
+ spin_unlock(&fiq->lock);
return mask;
}
@@ -2140,7 +2046,7 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
req->out.h.error = -ECONNABORTED;
clear_bit(FR_SENT, &req->flags);
list_del_init(&req->list);
- request_end(fc, req);
+ fuse_request_end(fc, req);
}
}
@@ -2221,15 +2127,15 @@ void fuse_abort_conn(struct fuse_conn *fc)
flush_bg_queue(fc);
spin_unlock(&fc->bg_lock);
- spin_lock(&fiq->waitq.lock);
+ spin_lock(&fiq->lock);
fiq->connected = 0;
list_for_each_entry(req, &fiq->pending, list)
clear_bit(FR_PENDING, &req->flags);
list_splice_tail_init(&fiq->pending, &to_end);
while (forget_pending(fiq))
- kfree(dequeue_forget(fiq, 1, NULL));
- wake_up_all_locked(&fiq->waitq);
- spin_unlock(&fiq->waitq.lock);
+ kfree(fuse_dequeue_forget(fiq, 1, NULL));
+ wake_up_all(&fiq->waitq);
+ spin_unlock(&fiq->lock);
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
end_polls(fc);
wake_up_all(&fc->blocked_waitq);
@@ -2296,7 +2202,7 @@ static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
if (new->private_data)
return -EINVAL;
- fud = fuse_dev_alloc(fc);
+ fud = fuse_dev_alloc_install(fc);
if (!fud)
return -ENOMEM;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index dd0f64f7bc06..d572c900bb0f 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -24,20 +24,54 @@ static void fuse_advise_use_readdirplus(struct inode *dir)
set_bit(FUSE_I_ADVISE_RDPLUS, &fi->state);
}
+#if BITS_PER_LONG >= 64
+static inline void __fuse_dentry_settime(struct dentry *entry, u64 time)
+{
+ entry->d_fsdata = (void *) time;
+}
+
+static inline u64 fuse_dentry_time(const struct dentry *entry)
+{
+ return (u64)entry->d_fsdata;
+}
+
+#else
union fuse_dentry {
u64 time;
struct rcu_head rcu;
};
-static inline void fuse_dentry_settime(struct dentry *entry, u64 time)
+static inline void __fuse_dentry_settime(struct dentry *dentry, u64 time)
{
- ((union fuse_dentry *) entry->d_fsdata)->time = time;
+ ((union fuse_dentry *) dentry->d_fsdata)->time = time;
}
-static inline u64 fuse_dentry_time(struct dentry *entry)
+static inline u64 fuse_dentry_time(const struct dentry *entry)
{
return ((union fuse_dentry *) entry->d_fsdata)->time;
}
+#endif
+
+static void fuse_dentry_settime(struct dentry *dentry, u64 time)
+{
+ struct fuse_conn *fc = get_fuse_conn_super(dentry->d_sb);
+ bool delete = !time && fc->delete_stale;
+ /*
+ * Mess with DCACHE_OP_DELETE because dput() will be faster without it.
+ * Don't care about races, either way it's just an optimization
+ */
+ if ((!delete && (dentry->d_flags & DCACHE_OP_DELETE)) ||
+ (delete && !(dentry->d_flags & DCACHE_OP_DELETE))) {
+ spin_lock(&dentry->d_lock);
+ if (!delete)
+ dentry->d_flags &= ~DCACHE_OP_DELETE;
+ else
+ dentry->d_flags |= DCACHE_OP_DELETE;
+ spin_unlock(&dentry->d_lock);
+ }
+
+ __fuse_dentry_settime(dentry, time);
+}
/*
* FUSE caches dentries and attributes with separate timeout. The
@@ -139,14 +173,14 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
struct fuse_entry_out *outarg)
{
memset(outarg, 0, sizeof(struct fuse_entry_out));
- args->in.h.opcode = FUSE_LOOKUP;
- args->in.h.nodeid = nodeid;
- args->in.numargs = 1;
- args->in.args[0].size = name->len + 1;
- args->in.args[0].value = name->name;
- args->out.numargs = 1;
- args->out.args[0].size = sizeof(struct fuse_entry_out);
- args->out.args[0].value = outarg;
+ args->opcode = FUSE_LOOKUP;
+ args->nodeid = nodeid;
+ args->in_numargs = 1;
+ args->in_args[0].size = name->len + 1;
+ args->in_args[0].value = name->name;
+ args->out_numargs = 1;
+ args->out_args[0].size = sizeof(struct fuse_entry_out);
+ args->out_args[0].value = outarg;
}
/*
@@ -242,9 +276,11 @@ invalid:
goto out;
}
+#if BITS_PER_LONG < 64
static int fuse_dentry_init(struct dentry *dentry)
{
- dentry->d_fsdata = kzalloc(sizeof(union fuse_dentry), GFP_KERNEL);
+ dentry->d_fsdata = kzalloc(sizeof(union fuse_dentry),
+ GFP_KERNEL_ACCOUNT | __GFP_RECLAIMABLE);
return dentry->d_fsdata ? 0 : -ENOMEM;
}
@@ -254,16 +290,27 @@ static void fuse_dentry_release(struct dentry *dentry)
kfree_rcu(fd, rcu);
}
+#endif
+
+static int fuse_dentry_delete(const struct dentry *dentry)
+{
+ return time_before64(fuse_dentry_time(dentry), get_jiffies_64());
+}
const struct dentry_operations fuse_dentry_operations = {
.d_revalidate = fuse_dentry_revalidate,
+ .d_delete = fuse_dentry_delete,
+#if BITS_PER_LONG < 64
.d_init = fuse_dentry_init,
.d_release = fuse_dentry_release,
+#endif
};
const struct dentry_operations fuse_root_dentry_operations = {
+#if BITS_PER_LONG < 64
.d_init = fuse_dentry_init,
.d_release = fuse_dentry_release,
+#endif
};
int fuse_valid_type(int m)
@@ -410,18 +457,18 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
inarg.flags = flags;
inarg.mode = mode;
inarg.umask = current_umask();
- args.in.h.opcode = FUSE_CREATE;
- args.in.h.nodeid = get_node_id(dir);
- args.in.numargs = 2;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.in.args[1].size = entry->d_name.len + 1;
- args.in.args[1].value = entry->d_name.name;
- args.out.numargs = 2;
- args.out.args[0].size = sizeof(outentry);
- args.out.args[0].value = &outentry;
- args.out.args[1].size = sizeof(outopen);
- args.out.args[1].value = &outopen;
+ args.opcode = FUSE_CREATE;
+ args.nodeid = get_node_id(dir);
+ args.in_numargs = 2;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.in_args[1].size = entry->d_name.len + 1;
+ args.in_args[1].value = entry->d_name.name;
+ args.out_numargs = 2;
+ args.out_args[0].size = sizeof(outentry);
+ args.out_args[0].value = &outentry;
+ args.out_args[1].size = sizeof(outopen);
+ args.out_args[1].value = &outopen;
err = fuse_simple_request(fc, &args);
if (err)
goto out_free_ff;
@@ -526,10 +573,10 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
return -ENOMEM;
memset(&outarg, 0, sizeof(outarg));
- args->in.h.nodeid = get_node_id(dir);
- args->out.numargs = 1;
- args->out.args[0].size = sizeof(outarg);
- args->out.args[0].value = &outarg;
+ args->nodeid = get_node_id(dir);
+ args->out_numargs = 1;
+ args->out_args[0].size = sizeof(outarg);
+ args->out_args[0].value = &outarg;
err = fuse_simple_request(fc, args);
if (err)
goto out_put_forget_req;
@@ -582,12 +629,12 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
inarg.mode = mode;
inarg.rdev = new_encode_dev(rdev);
inarg.umask = current_umask();
- args.in.h.opcode = FUSE_MKNOD;
- args.in.numargs = 2;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.in.args[1].size = entry->d_name.len + 1;
- args.in.args[1].value = entry->d_name.name;
+ args.opcode = FUSE_MKNOD;
+ args.in_numargs = 2;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.in_args[1].size = entry->d_name.len + 1;
+ args.in_args[1].value = entry->d_name.name;
return create_new_entry(fc, &args, dir, entry, mode);
}
@@ -609,12 +656,12 @@ static int fuse_mkdir(struct inode *dir, struct dentry *entry, umode_t mode)
memset(&inarg, 0, sizeof(inarg));
inarg.mode = mode;
inarg.umask = current_umask();
- args.in.h.opcode = FUSE_MKDIR;
- args.in.numargs = 2;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.in.args[1].size = entry->d_name.len + 1;
- args.in.args[1].value = entry->d_name.name;
+ args.opcode = FUSE_MKDIR;
+ args.in_numargs = 2;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.in_args[1].size = entry->d_name.len + 1;
+ args.in_args[1].value = entry->d_name.name;
return create_new_entry(fc, &args, dir, entry, S_IFDIR);
}
@@ -625,12 +672,12 @@ static int fuse_symlink(struct inode *dir, struct dentry *entry,
unsigned len = strlen(link) + 1;
FUSE_ARGS(args);
- args.in.h.opcode = FUSE_SYMLINK;
- args.in.numargs = 2;
- args.in.args[0].size = entry->d_name.len + 1;
- args.in.args[0].value = entry->d_name.name;
- args.in.args[1].size = len;
- args.in.args[1].value = link;
+ args.opcode = FUSE_SYMLINK;
+ args.in_numargs = 2;
+ args.in_args[0].size = entry->d_name.len + 1;
+ args.in_args[0].value = entry->d_name.name;
+ args.in_args[1].size = len;
+ args.in_args[1].value = link;
return create_new_entry(fc, &args, dir, entry, S_IFLNK);
}
@@ -648,11 +695,11 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
struct fuse_conn *fc = get_fuse_conn(dir);
FUSE_ARGS(args);
- args.in.h.opcode = FUSE_UNLINK;
- args.in.h.nodeid = get_node_id(dir);
- args.in.numargs = 1;
- args.in.args[0].size = entry->d_name.len + 1;
- args.in.args[0].value = entry->d_name.name;
+ args.opcode = FUSE_UNLINK;
+ args.nodeid = get_node_id(dir);
+ args.in_numargs = 1;
+ args.in_args[0].size = entry->d_name.len + 1;
+ args.in_args[0].value = entry->d_name.name;
err = fuse_simple_request(fc, &args);
if (!err) {
struct inode *inode = d_inode(entry);
@@ -684,11 +731,11 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
struct fuse_conn *fc = get_fuse_conn(dir);
FUSE_ARGS(args);
- args.in.h.opcode = FUSE_RMDIR;
- args.in.h.nodeid = get_node_id(dir);
- args.in.numargs = 1;
- args.in.args[0].size = entry->d_name.len + 1;
- args.in.args[0].value = entry->d_name.name;
+ args.opcode = FUSE_RMDIR;
+ args.nodeid = get_node_id(dir);
+ args.in_numargs = 1;
+ args.in_args[0].size = entry->d_name.len + 1;
+ args.in_args[0].value = entry->d_name.name;
err = fuse_simple_request(fc, &args);
if (!err) {
clear_nlink(d_inode(entry));
@@ -711,15 +758,15 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
memset(&inarg, 0, argsize);
inarg.newdir = get_node_id(newdir);
inarg.flags = flags;
- args.in.h.opcode = opcode;
- args.in.h.nodeid = get_node_id(olddir);
- args.in.numargs = 3;
- args.in.args[0].size = argsize;
- args.in.args[0].value = &inarg;
- args.in.args[1].size = oldent->d_name.len + 1;
- args.in.args[1].value = oldent->d_name.name;
- args.in.args[2].size = newent->d_name.len + 1;
- args.in.args[2].value = newent->d_name.name;
+ args.opcode = opcode;
+ args.nodeid = get_node_id(olddir);
+ args.in_numargs = 3;
+ args.in_args[0].size = argsize;
+ args.in_args[0].value = &inarg;
+ args.in_args[1].size = oldent->d_name.len + 1;
+ args.in_args[1].value = oldent->d_name.name;
+ args.in_args[2].size = newent->d_name.len + 1;
+ args.in_args[2].value = newent->d_name.name;
err = fuse_simple_request(fc, &args);
if (!err) {
/* ctime changes */
@@ -796,12 +843,12 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
memset(&inarg, 0, sizeof(inarg));
inarg.oldnodeid = get_node_id(inode);
- args.in.h.opcode = FUSE_LINK;
- args.in.numargs = 2;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.in.args[1].size = newent->d_name.len + 1;
- args.in.args[1].value = newent->d_name.name;
+ args.opcode = FUSE_LINK;
+ args.in_numargs = 2;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.in_args[1].size = newent->d_name.len + 1;
+ args.in_args[1].value = newent->d_name.name;
err = create_new_entry(fc, &args, newdir, newent, inode->i_mode);
/* Contrary to "normal" filesystems it can happen that link
makes two "logical" inodes point to the same "physical"
@@ -884,14 +931,14 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
inarg.getattr_flags |= FUSE_GETATTR_FH;
inarg.fh = ff->fh;
}
- args.in.h.opcode = FUSE_GETATTR;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.out.numargs = 1;
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
+ args.opcode = FUSE_GETATTR;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.out_numargs = 1;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err) {
if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
@@ -1056,11 +1103,11 @@ static int fuse_access(struct inode *inode, int mask)
memset(&inarg, 0, sizeof(inarg));
inarg.mask = mask & (MAY_READ | MAY_WRITE | MAY_EXEC);
- args.in.h.opcode = FUSE_ACCESS;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
+ args.opcode = FUSE_ACCESS;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_access = 1;
@@ -1152,38 +1199,36 @@ static int fuse_permission(struct inode *inode, int mask)
static int fuse_readlink_page(struct inode *inode, struct page *page)
{
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
- int err;
+ struct fuse_page_desc desc = { .length = PAGE_SIZE - 1 };
+ struct fuse_args_pages ap = {
+ .num_pages = 1,
+ .pages = &page,
+ .descs = &desc,
+ };
+ char *link;
+ ssize_t res;
+
+ ap.args.opcode = FUSE_READLINK;
+ ap.args.nodeid = get_node_id(inode);
+ ap.args.out_pages = true;
+ ap.args.out_argvar = true;
+ ap.args.page_zeroing = true;
+ ap.args.out_numargs = 1;
+ ap.args.out_args[0].size = desc.length;
+ res = fuse_simple_request(fc, &ap.args);
- req = fuse_get_req(fc, 1);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- req->out.page_zeroing = 1;
- req->out.argpages = 1;
- req->num_pages = 1;
- req->pages[0] = page;
- req->page_descs[0].length = PAGE_SIZE - 1;
- req->in.h.opcode = FUSE_READLINK;
- req->in.h.nodeid = get_node_id(inode);
- req->out.argvar = 1;
- req->out.numargs = 1;
- req->out.args[0].size = PAGE_SIZE - 1;
- fuse_request_send(fc, req);
- err = req->out.h.error;
+ fuse_invalidate_atime(inode);
- if (!err) {
- char *link = page_address(page);
- size_t len = req->out.args[0].size;
+ if (res < 0)
+ return res;
- BUG_ON(len >= PAGE_SIZE);
- link[len] = '\0';
- }
+ if (WARN_ON(res >= PAGE_SIZE))
+ return -EIO;
- fuse_put_request(fc, req);
- fuse_invalidate_atime(inode);
+ link = page_address(page);
+ link[res] = '\0';
- return err;
+ return 0;
}
static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
@@ -1383,14 +1428,14 @@ static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
struct fuse_setattr_in *inarg_p,
struct fuse_attr_out *outarg_p)
{
- args->in.h.opcode = FUSE_SETATTR;
- args->in.h.nodeid = get_node_id(inode);
- args->in.numargs = 1;
- args->in.args[0].size = sizeof(*inarg_p);
- args->in.args[0].value = inarg_p;
- args->out.numargs = 1;
- args->out.args[0].size = sizeof(*outarg_p);
- args->out.args[0].value = outarg_p;
+ args->opcode = FUSE_SETATTR;
+ args->nodeid = get_node_id(inode);
+ args->in_numargs = 1;
+ args->in_args[0].size = sizeof(*inarg_p);
+ args->in_args[0].value = inarg_p;
+ args->out_numargs = 1;
+ args->out_args[0].size = sizeof(*outarg_p);
+ args->out_args[0].value = outarg_p;
}
/*
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 5ae2828beb00..0f0225686aee 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -19,6 +19,18 @@
#include <linux/falloc.h>
#include <linux/uio.h>
+static struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags,
+ struct fuse_page_desc **desc)
+{
+ struct page **pages;
+
+ pages = kzalloc(npages * (sizeof(struct page *) +
+ sizeof(struct fuse_page_desc)), flags);
+ *desc = (void *) (pages + npages);
+
+ return pages;
+}
+
static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
int opcode, struct fuse_open_out *outargp)
{
@@ -29,29 +41,36 @@ static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
if (!fc->atomic_o_trunc)
inarg.flags &= ~O_TRUNC;
- args.in.h.opcode = opcode;
- args.in.h.nodeid = nodeid;
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.out.numargs = 1;
- args.out.args[0].size = sizeof(*outargp);
- args.out.args[0].value = outargp;
+ args.opcode = opcode;
+ args.nodeid = nodeid;
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.out_numargs = 1;
+ args.out_args[0].size = sizeof(*outargp);
+ args.out_args[0].value = outargp;
return fuse_simple_request(fc, &args);
}
+struct fuse_release_args {
+ struct fuse_args args;
+ struct fuse_release_in inarg;
+ struct inode *inode;
+};
+
struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
{
struct fuse_file *ff;
- ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL);
+ ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL_ACCOUNT);
if (unlikely(!ff))
return NULL;
ff->fc = fc;
- ff->reserved_req = fuse_request_alloc(0);
- if (unlikely(!ff->reserved_req)) {
+ ff->release_args = kzalloc(sizeof(*ff->release_args),
+ GFP_KERNEL_ACCOUNT);
+ if (!ff->release_args) {
kfree(ff);
return NULL;
}
@@ -69,7 +88,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
void fuse_file_free(struct fuse_file *ff)
{
- fuse_request_free(ff->reserved_req);
+ kfree(ff->release_args);
mutex_destroy(&ff->readdir.lock);
kfree(ff);
}
@@ -80,34 +99,31 @@ static struct fuse_file *fuse_file_get(struct fuse_file *ff)
return ff;
}
-static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_release_end(struct fuse_conn *fc, struct fuse_args *args,
+ int error)
{
- iput(req->misc.release.inode);
+ struct fuse_release_args *ra = container_of(args, typeof(*ra), args);
+
+ iput(ra->inode);
+ kfree(ra);
}
static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
{
if (refcount_dec_and_test(&ff->count)) {
- struct fuse_req *req = ff->reserved_req;
+ struct fuse_args *args = &ff->release_args->args;
if (isdir ? ff->fc->no_opendir : ff->fc->no_open) {
- /*
- * Drop the release request when client does not
- * implement 'open'
- */
- __clear_bit(FR_BACKGROUND, &req->flags);
- iput(req->misc.release.inode);
- fuse_put_request(ff->fc, req);
+ /* Do nothing when client does not implement 'open' */
+ fuse_release_end(ff->fc, args, 0);
} else if (sync) {
- __set_bit(FR_FORCE, &req->flags);
- __clear_bit(FR_BACKGROUND, &req->flags);
- fuse_request_send(ff->fc, req);
- iput(req->misc.release.inode);
- fuse_put_request(ff->fc, req);
+ fuse_simple_request(ff->fc, args);
+ fuse_release_end(ff->fc, args, 0);
} else {
- req->end = fuse_release_end;
- __set_bit(FR_BACKGROUND, &req->flags);
- fuse_request_send_background(ff->fc, req);
+ args->end = fuse_release_end;
+ if (fuse_simple_background(ff->fc, args,
+ GFP_KERNEL | __GFP_NOFAIL))
+ fuse_release_end(ff->fc, args, -ENOTCONN);
}
kfree(ff);
}
@@ -227,8 +243,7 @@ static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
int flags, int opcode)
{
struct fuse_conn *fc = ff->fc;
- struct fuse_req *req = ff->reserved_req;
- struct fuse_release_in *inarg = &req->misc.release.in;
+ struct fuse_release_args *ra = ff->release_args;
/* Inode is NULL on error path of fuse_create_open() */
if (likely(fi)) {
@@ -243,32 +258,33 @@ static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
wake_up_interruptible_all(&ff->poll_wait);
- inarg->fh = ff->fh;
- inarg->flags = flags;
- req->in.h.opcode = opcode;
- req->in.h.nodeid = ff->nodeid;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(struct fuse_release_in);
- req->in.args[0].value = inarg;
+ ra->inarg.fh = ff->fh;
+ ra->inarg.flags = flags;
+ ra->args.in_numargs = 1;
+ ra->args.in_args[0].size = sizeof(struct fuse_release_in);
+ ra->args.in_args[0].value = &ra->inarg;
+ ra->args.opcode = opcode;
+ ra->args.nodeid = ff->nodeid;
+ ra->args.force = true;
+ ra->args.nocreds = true;
}
void fuse_release_common(struct file *file, bool isdir)
{
struct fuse_inode *fi = get_fuse_inode(file_inode(file));
struct fuse_file *ff = file->private_data;
- struct fuse_req *req = ff->reserved_req;
+ struct fuse_release_args *ra = ff->release_args;
int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
fuse_prepare_release(fi, ff, file->f_flags, opcode);
if (ff->flock) {
- struct fuse_release_in *inarg = &req->misc.release.in;
- inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
- inarg->lock_owner = fuse_lock_owner_id(ff->fc,
- (fl_owner_t) file);
+ ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
+ ra->inarg.lock_owner = fuse_lock_owner_id(ff->fc,
+ (fl_owner_t) file);
}
/* Hold inode until release is finished */
- req->misc.release.inode = igrab(file_inode(file));
+ ra->inode = igrab(file_inode(file));
/*
* Normally this will send the RELEASE request, however if
@@ -279,7 +295,7 @@ void fuse_release_common(struct file *file, bool isdir)
* synchronous RELEASE is allowed (and desirable) in this case
* because the server can be trusted not to screw up.
*/
- fuse_file_put(ff, ff->fc->destroy_req != NULL, isdir);
+ fuse_file_put(ff, ff->fc->destroy, isdir);
}
static int fuse_open(struct inode *inode, struct file *file)
@@ -335,19 +351,27 @@ u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
return (u64) v0 + ((u64) v1 << 32);
}
-static struct fuse_req *fuse_find_writeback(struct fuse_inode *fi,
+struct fuse_writepage_args {
+ struct fuse_io_args ia;
+ struct list_head writepages_entry;
+ struct list_head queue_entry;
+ struct fuse_writepage_args *next;
+ struct inode *inode;
+};
+
+static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
pgoff_t idx_from, pgoff_t idx_to)
{
- struct fuse_req *req;
+ struct fuse_writepage_args *wpa;
- list_for_each_entry(req, &fi->writepages, writepages_entry) {
+ list_for_each_entry(wpa, &fi->writepages, writepages_entry) {
pgoff_t curr_index;
- WARN_ON(get_fuse_inode(req->inode) != fi);
- curr_index = req->misc.write.in.offset >> PAGE_SHIFT;
- if (idx_from < curr_index + req->num_pages &&
+ WARN_ON(get_fuse_inode(wpa->inode) != fi);
+ curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT;
+ if (idx_from < curr_index + wpa->ia.ap.num_pages &&
curr_index <= idx_to) {
- return req;
+ return wpa;
}
}
return NULL;
@@ -383,12 +407,11 @@ static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
* Since fuse doesn't rely on the VM writeback tracking, this has to
* use some other means.
*/
-static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
+static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
{
struct fuse_inode *fi = get_fuse_inode(inode);
wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
- return 0;
}
/*
@@ -411,8 +434,8 @@ static int fuse_flush(struct file *file, fl_owner_t id)
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_file *ff = file->private_data;
- struct fuse_req *req;
struct fuse_flush_in inarg;
+ FUSE_ARGS(args);
int err;
if (is_bad_inode(inode))
@@ -433,19 +456,17 @@ static int fuse_flush(struct file *file, fl_owner_t id)
if (err)
return err;
- req = fuse_get_req_nofail_nopages(fc, file);
memset(&inarg, 0, sizeof(inarg));
inarg.fh = ff->fh;
inarg.lock_owner = fuse_lock_owner_id(fc, id);
- req->in.h.opcode = FUSE_FLUSH;
- req->in.h.nodeid = get_node_id(inode);
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- __set_bit(FR_FORCE, &req->flags);
- fuse_request_send(fc, req);
- err = req->out.h.error;
- fuse_put_request(fc, req);
+ args.opcode = FUSE_FLUSH;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.force = true;
+
+ err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_flush = 1;
err = 0;
@@ -465,11 +486,11 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
memset(&inarg, 0, sizeof(inarg));
inarg.fh = ff->fh;
inarg.fsync_flags = datasync ? FUSE_FSYNC_FDATASYNC : 0;
- args.in.h.opcode = opcode;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
+ args.opcode = opcode;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
return fuse_simple_request(fc, &args);
}
@@ -523,35 +544,35 @@ out:
return err;
}
-void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
- size_t count, int opcode)
+void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos,
+ size_t count, int opcode)
{
- struct fuse_read_in *inarg = &req->misc.read.in;
struct fuse_file *ff = file->private_data;
+ struct fuse_args *args = &ia->ap.args;
- inarg->fh = ff->fh;
- inarg->offset = pos;
- inarg->size = count;
- inarg->flags = file->f_flags;
- req->in.h.opcode = opcode;
- req->in.h.nodeid = ff->nodeid;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(struct fuse_read_in);
- req->in.args[0].value = inarg;
- req->out.argvar = 1;
- req->out.numargs = 1;
- req->out.args[0].size = count;
+ ia->read.in.fh = ff->fh;
+ ia->read.in.offset = pos;
+ ia->read.in.size = count;
+ ia->read.in.flags = file->f_flags;
+ args->opcode = opcode;
+ args->nodeid = ff->nodeid;
+ args->in_numargs = 1;
+ args->in_args[0].size = sizeof(ia->read.in);
+ args->in_args[0].value = &ia->read.in;
+ args->out_argvar = true;
+ args->out_numargs = 1;
+ args->out_args[0].size = count;
}
-static void fuse_release_user_pages(struct fuse_req *req, bool should_dirty)
+static void fuse_release_user_pages(struct fuse_args_pages *ap,
+ bool should_dirty)
{
- unsigned i;
+ unsigned int i;
- for (i = 0; i < req->num_pages; i++) {
- struct page *page = req->pages[i];
+ for (i = 0; i < ap->num_pages; i++) {
if (should_dirty)
- set_page_dirty_lock(page);
- put_page(page);
+ set_page_dirty_lock(ap->pages[i]);
+ put_page(ap->pages[i]);
}
}
@@ -621,64 +642,94 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
kref_put(&io->refcnt, fuse_io_release);
}
-static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
+static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io,
+ unsigned int npages)
+{
+ struct fuse_io_args *ia;
+
+ ia = kzalloc(sizeof(*ia), GFP_KERNEL);
+ if (ia) {
+ ia->io = io;
+ ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL,
+ &ia->ap.descs);
+ if (!ia->ap.pages) {
+ kfree(ia);
+ ia = NULL;
+ }
+ }
+ return ia;
+}
+
+static void fuse_io_free(struct fuse_io_args *ia)
+{
+ kfree(ia->ap.pages);
+ kfree(ia);
+}
+
+static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_args *args,
+ int err)
{
- struct fuse_io_priv *io = req->io;
+ struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
+ struct fuse_io_priv *io = ia->io;
ssize_t pos = -1;
- fuse_release_user_pages(req, io->should_dirty);
+ fuse_release_user_pages(&ia->ap, io->should_dirty);
- if (io->write) {
- if (req->misc.write.in.size != req->misc.write.out.size)
- pos = req->misc.write.in.offset - io->offset +
- req->misc.write.out.size;
+ if (err) {
+ /* Nothing */
+ } else if (io->write) {
+ if (ia->write.out.size > ia->write.in.size) {
+ err = -EIO;
+ } else if (ia->write.in.size != ia->write.out.size) {
+ pos = ia->write.in.offset - io->offset +
+ ia->write.out.size;
+ }
} else {
- if (req->misc.read.in.size != req->out.args[0].size)
- pos = req->misc.read.in.offset - io->offset +
- req->out.args[0].size;
+ u32 outsize = args->out_args[0].size;
+
+ if (ia->read.in.size != outsize)
+ pos = ia->read.in.offset - io->offset + outsize;
}
- fuse_aio_complete(io, req->out.h.error, pos);
+ fuse_aio_complete(io, err, pos);
+ fuse_io_free(ia);
}
-static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
- size_t num_bytes, struct fuse_io_priv *io)
+static ssize_t fuse_async_req_send(struct fuse_conn *fc,
+ struct fuse_io_args *ia, size_t num_bytes)
{
+ ssize_t err;
+ struct fuse_io_priv *io = ia->io;
+
spin_lock(&io->lock);
kref_get(&io->refcnt);
io->size += num_bytes;
io->reqs++;
spin_unlock(&io->lock);
- req->io = io;
- req->end = fuse_aio_complete_req;
+ ia->ap.args.end = fuse_aio_complete_req;
+ err = fuse_simple_background(fc, &ia->ap.args, GFP_KERNEL);
- __fuse_get_request(req);
- fuse_request_send_background(fc, req);
-
- return num_bytes;
+ return err ?: num_bytes;
}
-static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io,
- loff_t pos, size_t count, fl_owner_t owner)
+static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count,
+ fl_owner_t owner)
{
- struct file *file = io->iocb->ki_filp;
+ struct file *file = ia->io->iocb->ki_filp;
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
- fuse_read_fill(req, file, pos, count, FUSE_READ);
+ fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
if (owner != NULL) {
- struct fuse_read_in *inarg = &req->misc.read.in;
-
- inarg->read_flags |= FUSE_READ_LOCKOWNER;
- inarg->lock_owner = fuse_lock_owner_id(fc, owner);
+ ia->read.in.read_flags |= FUSE_READ_LOCKOWNER;
+ ia->read.in.lock_owner = fuse_lock_owner_id(fc, owner);
}
- if (io->async)
- return fuse_async_req_send(fc, req, count, io);
+ if (ia->io->async)
+ return fuse_async_req_send(fc, ia, count);
- fuse_request_send(fc, req);
- return req->out.args[0].size;
+ return fuse_simple_request(fc, &ia->ap.args);
}
static void fuse_read_update_size(struct inode *inode, loff_t size,
@@ -696,10 +747,9 @@ static void fuse_read_update_size(struct inode *inode, loff_t size,
spin_unlock(&fi->lock);
}
-static void fuse_short_read(struct fuse_req *req, struct inode *inode,
- u64 attr_ver)
+static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read,
+ struct fuse_args_pages *ap)
{
- size_t num_read = req->out.args[0].size;
struct fuse_conn *fc = get_fuse_conn(inode);
if (fc->writeback_cache) {
@@ -712,28 +762,31 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode,
int start_idx = num_read >> PAGE_SHIFT;
size_t off = num_read & (PAGE_SIZE - 1);
- for (i = start_idx; i < req->num_pages; i++) {
- zero_user_segment(req->pages[i], off, PAGE_SIZE);
+ for (i = start_idx; i < ap->num_pages; i++) {
+ zero_user_segment(ap->pages[i], off, PAGE_SIZE);
off = 0;
}
} else {
- loff_t pos = page_offset(req->pages[0]) + num_read;
+ loff_t pos = page_offset(ap->pages[0]) + num_read;
fuse_read_update_size(inode, pos, attr_ver);
}
}
static int fuse_do_readpage(struct file *file, struct page *page)
{
- struct kiocb iocb;
- struct fuse_io_priv io;
struct inode *inode = page->mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
- size_t num_read;
loff_t pos = page_offset(page);
- size_t count = PAGE_SIZE;
+ struct fuse_page_desc desc = { .length = PAGE_SIZE };
+ struct fuse_io_args ia = {
+ .ap.args.page_zeroing = true,
+ .ap.args.out_pages = true,
+ .ap.num_pages = 1,
+ .ap.pages = &page,
+ .ap.descs = &desc,
+ };
+ ssize_t res;
u64 attr_ver;
- int err;
/*
* Page writeback can extend beyond the lifetime of the
@@ -742,35 +795,21 @@ static int fuse_do_readpage(struct file *file, struct page *page)
*/
fuse_wait_on_page_writeback(inode, page->index);
- req = fuse_get_req(fc, 1);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
attr_ver = fuse_get_attr_version(fc);
- req->out.page_zeroing = 1;
- req->out.argpages = 1;
- req->num_pages = 1;
- req->pages[0] = page;
- req->page_descs[0].length = count;
- init_sync_kiocb(&iocb, file);
- io = (struct fuse_io_priv) FUSE_IO_PRIV_SYNC(&iocb);
- num_read = fuse_send_read(req, &io, pos, count, NULL);
- err = req->out.h.error;
-
- if (!err) {
- /*
- * Short read means EOF. If file size is larger, truncate it
- */
- if (num_read < count)
- fuse_short_read(req, inode, attr_ver);
-
- SetPageUptodate(page);
- }
+ fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ);
+ res = fuse_simple_request(fc, &ia.ap.args);
+ if (res < 0)
+ return res;
+ /*
+ * Short read means EOF. If file size is larger, truncate it
+ */
+ if (res < desc.length)
+ fuse_short_read(inode, attr_ver, res, &ia.ap);
- fuse_put_request(fc, req);
+ SetPageUptodate(page);
- return err;
+ return 0;
}
static int fuse_readpage(struct file *file, struct page *page)
@@ -789,15 +828,18 @@ static int fuse_readpage(struct file *file, struct page *page)
return err;
}
-static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_args *args,
+ int err)
{
int i;
- size_t count = req->misc.read.in.size;
- size_t num_read = req->out.args[0].size;
+ struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
+ struct fuse_args_pages *ap = &ia->ap;
+ size_t count = ia->read.in.size;
+ size_t num_read = args->out_args[0].size;
struct address_space *mapping = NULL;
- for (i = 0; mapping == NULL && i < req->num_pages; i++)
- mapping = req->pages[i]->mapping;
+ for (i = 0; mapping == NULL && i < ap->num_pages; i++)
+ mapping = ap->pages[i]->mapping;
if (mapping) {
struct inode *inode = mapping->host;
@@ -805,93 +847,97 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
/*
* Short read means EOF. If file size is larger, truncate it
*/
- if (!req->out.h.error && num_read < count)
- fuse_short_read(req, inode, req->misc.read.attr_ver);
+ if (!err && num_read < count)
+ fuse_short_read(inode, ia->read.attr_ver, num_read, ap);
fuse_invalidate_atime(inode);
}
- for (i = 0; i < req->num_pages; i++) {
- struct page *page = req->pages[i];
- if (!req->out.h.error)
+ for (i = 0; i < ap->num_pages; i++) {
+ struct page *page = ap->pages[i];
+
+ if (!err)
SetPageUptodate(page);
else
SetPageError(page);
unlock_page(page);
put_page(page);
}
- if (req->ff)
- fuse_file_put(req->ff, false, false);
+ if (ia->ff)
+ fuse_file_put(ia->ff, false, false);
+
+ fuse_io_free(ia);
}
-static void fuse_send_readpages(struct fuse_req *req, struct file *file)
+static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
{
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
- loff_t pos = page_offset(req->pages[0]);
- size_t count = req->num_pages << PAGE_SHIFT;
-
- req->out.argpages = 1;
- req->out.page_zeroing = 1;
- req->out.page_replace = 1;
- fuse_read_fill(req, file, pos, count, FUSE_READ);
- req->misc.read.attr_ver = fuse_get_attr_version(fc);
+ struct fuse_args_pages *ap = &ia->ap;
+ loff_t pos = page_offset(ap->pages[0]);
+ size_t count = ap->num_pages << PAGE_SHIFT;
+ int err;
+
+ ap->args.out_pages = true;
+ ap->args.page_zeroing = true;
+ ap->args.page_replace = true;
+ fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
+ ia->read.attr_ver = fuse_get_attr_version(fc);
if (fc->async_read) {
- req->ff = fuse_file_get(ff);
- req->end = fuse_readpages_end;
- fuse_request_send_background(fc, req);
+ ia->ff = fuse_file_get(ff);
+ ap->args.end = fuse_readpages_end;
+ err = fuse_simple_background(fc, &ap->args, GFP_KERNEL);
+ if (!err)
+ return;
} else {
- fuse_request_send(fc, req);
- fuse_readpages_end(fc, req);
- fuse_put_request(fc, req);
+ err = fuse_simple_request(fc, &ap->args);
}
+ fuse_readpages_end(fc, &ap->args, err);
}
struct fuse_fill_data {
- struct fuse_req *req;
+ struct fuse_io_args *ia;
struct file *file;
struct inode *inode;
- unsigned nr_pages;
+ unsigned int nr_pages;
+ unsigned int max_pages;
};
static int fuse_readpages_fill(void *_data, struct page *page)
{
struct fuse_fill_data *data = _data;
- struct fuse_req *req = data->req;
+ struct fuse_io_args *ia = data->ia;
+ struct fuse_args_pages *ap = &ia->ap;
struct inode *inode = data->inode;
struct fuse_conn *fc = get_fuse_conn(inode);
fuse_wait_on_page_writeback(inode, page->index);
- if (req->num_pages &&
- (req->num_pages == fc->max_pages ||
- (req->num_pages + 1) * PAGE_SIZE > fc->max_read ||
- req->pages[req->num_pages - 1]->index + 1 != page->index)) {
- unsigned int nr_alloc = min_t(unsigned int, data->nr_pages,
- fc->max_pages);
- fuse_send_readpages(req, data->file);
- if (fc->async_read)
- req = fuse_get_req_for_background(fc, nr_alloc);
- else
- req = fuse_get_req(fc, nr_alloc);
-
- data->req = req;
- if (IS_ERR(req)) {
+ if (ap->num_pages &&
+ (ap->num_pages == fc->max_pages ||
+ (ap->num_pages + 1) * PAGE_SIZE > fc->max_read ||
+ ap->pages[ap->num_pages - 1]->index + 1 != page->index)) {
+ data->max_pages = min_t(unsigned int, data->nr_pages,
+ fc->max_pages);
+ fuse_send_readpages(ia, data->file);
+ data->ia = ia = fuse_io_alloc(NULL, data->max_pages);
+ if (!ia) {
unlock_page(page);
- return PTR_ERR(req);
+ return -ENOMEM;
}
+ ap = &ia->ap;
}
- if (WARN_ON(req->num_pages >= req->max_pages)) {
+ if (WARN_ON(ap->num_pages >= data->max_pages)) {
unlock_page(page);
- fuse_put_request(fc, req);
+ fuse_io_free(ia);
return -EIO;
}
get_page(page);
- req->pages[req->num_pages] = page;
- req->page_descs[req->num_pages].length = PAGE_SIZE;
- req->num_pages++;
+ ap->pages[ap->num_pages] = page;
+ ap->descs[ap->num_pages].length = PAGE_SIZE;
+ ap->num_pages++;
data->nr_pages--;
return 0;
}
@@ -903,7 +949,6 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_fill_data data;
int err;
- unsigned int nr_alloc = min_t(unsigned int, nr_pages, fc->max_pages);
err = -EIO;
if (is_bad_inode(inode))
@@ -911,21 +956,20 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
data.file = file;
data.inode = inode;
- if (fc->async_read)
- data.req = fuse_get_req_for_background(fc, nr_alloc);
- else
- data.req = fuse_get_req(fc, nr_alloc);
data.nr_pages = nr_pages;
- err = PTR_ERR(data.req);
- if (IS_ERR(data.req))
+ data.max_pages = min_t(unsigned int, nr_pages, fc->max_pages);
+;
+ data.ia = fuse_io_alloc(NULL, data.max_pages);
+ err = -ENOMEM;
+ if (!data.ia)
goto out;
err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
if (!err) {
- if (data.req->num_pages)
- fuse_send_readpages(data.req, file);
+ if (data.ia->ap.num_pages)
+ fuse_send_readpages(data.ia, file);
else
- fuse_put_request(fc, data.req);
+ fuse_io_free(data.ia);
}
out:
return err;
@@ -952,54 +996,65 @@ static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
return generic_file_read_iter(iocb, to);
}
-static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
- loff_t pos, size_t count)
+static void fuse_write_args_fill(struct fuse_io_args *ia, struct fuse_file *ff,
+ loff_t pos, size_t count)
{
- struct fuse_write_in *inarg = &req->misc.write.in;
- struct fuse_write_out *outarg = &req->misc.write.out;
+ struct fuse_args *args = &ia->ap.args;
- inarg->fh = ff->fh;
- inarg->offset = pos;
- inarg->size = count;
- req->in.h.opcode = FUSE_WRITE;
- req->in.h.nodeid = ff->nodeid;
- req->in.numargs = 2;
+ ia->write.in.fh = ff->fh;
+ ia->write.in.offset = pos;
+ ia->write.in.size = count;
+ args->opcode = FUSE_WRITE;
+ args->nodeid = ff->nodeid;
+ args->in_numargs = 2;
if (ff->fc->minor < 9)
- req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
+ args->in_args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
else
- req->in.args[0].size = sizeof(struct fuse_write_in);
- req->in.args[0].value = inarg;
- req->in.args[1].size = count;
- req->out.numargs = 1;
- req->out.args[0].size = sizeof(struct fuse_write_out);
- req->out.args[0].value = outarg;
+ args->in_args[0].size = sizeof(ia->write.in);
+ args->in_args[0].value = &ia->write.in;
+ args->in_args[1].size = count;
+ args->out_numargs = 1;
+ args->out_args[0].size = sizeof(ia->write.out);
+ args->out_args[0].value = &ia->write.out;
}
-static size_t fuse_send_write(struct fuse_req *req, struct fuse_io_priv *io,
- loff_t pos, size_t count, fl_owner_t owner)
+static unsigned int fuse_write_flags(struct kiocb *iocb)
{
- struct kiocb *iocb = io->iocb;
+ unsigned int flags = iocb->ki_filp->f_flags;
+
+ if (iocb->ki_flags & IOCB_DSYNC)
+ flags |= O_DSYNC;
+ if (iocb->ki_flags & IOCB_SYNC)
+ flags |= O_SYNC;
+
+ return flags;
+}
+
+static ssize_t fuse_send_write(struct fuse_io_args *ia, loff_t pos,
+ size_t count, fl_owner_t owner)
+{
+ struct kiocb *iocb = ia->io->iocb;
struct file *file = iocb->ki_filp;
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
- struct fuse_write_in *inarg = &req->misc.write.in;
+ struct fuse_write_in *inarg = &ia->write.in;
+ ssize_t err;
- fuse_write_fill(req, ff, pos, count);
- inarg->flags = file->f_flags;
- if (iocb->ki_flags & IOCB_DSYNC)
- inarg->flags |= O_DSYNC;
- if (iocb->ki_flags & IOCB_SYNC)
- inarg->flags |= O_SYNC;
+ fuse_write_args_fill(ia, ff, pos, count);
+ inarg->flags = fuse_write_flags(iocb);
if (owner != NULL) {
inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
inarg->lock_owner = fuse_lock_owner_id(fc, owner);
}
- if (io->async)
- return fuse_async_req_send(fc, req, count, io);
+ if (ia->io->async)
+ return fuse_async_req_send(fc, ia, count);
+
+ err = fuse_simple_request(fc, &ia->ap.args);
+ if (!err && ia->write.out.size > count)
+ err = -EIO;
- fuse_request_send(fc, req);
- return req->misc.write.out.size;
+ return err ?: ia->write.out.size;
}
bool fuse_write_update_size(struct inode *inode, loff_t pos)
@@ -1019,26 +1074,31 @@ bool fuse_write_update_size(struct inode *inode, loff_t pos)
return ret;
}
-static size_t fuse_send_write_pages(struct fuse_req *req, struct kiocb *iocb,
- struct inode *inode, loff_t pos,
- size_t count)
+static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
+ struct kiocb *iocb, struct inode *inode,
+ loff_t pos, size_t count)
{
- size_t res;
- unsigned offset;
- unsigned i;
- struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
+ struct fuse_args_pages *ap = &ia->ap;
+ struct file *file = iocb->ki_filp;
+ struct fuse_file *ff = file->private_data;
+ struct fuse_conn *fc = ff->fc;
+ unsigned int offset, i;
+ int err;
- for (i = 0; i < req->num_pages; i++)
- fuse_wait_on_page_writeback(inode, req->pages[i]->index);
+ for (i = 0; i < ap->num_pages; i++)
+ fuse_wait_on_page_writeback(inode, ap->pages[i]->index);
- res = fuse_send_write(req, &io, pos, count, NULL);
+ fuse_write_args_fill(ia, ff, pos, count);
+ ia->write.in.flags = fuse_write_flags(iocb);
- offset = req->page_descs[0].offset;
- count = res;
- for (i = 0; i < req->num_pages; i++) {
- struct page *page = req->pages[i];
+ err = fuse_simple_request(fc, &ap->args);
- if (!req->out.h.error && !offset && count >= PAGE_SIZE)
+ offset = ap->descs[0].offset;
+ count = ia->write.out.size;
+ for (i = 0; i < ap->num_pages; i++) {
+ struct page *page = ap->pages[i];
+
+ if (!err && !offset && count >= PAGE_SIZE)
SetPageUptodate(page);
if (count > PAGE_SIZE - offset)
@@ -1051,20 +1111,21 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct kiocb *iocb,
put_page(page);
}
- return res;
+ return err;
}
-static ssize_t fuse_fill_write_pages(struct fuse_req *req,
- struct address_space *mapping,
- struct iov_iter *ii, loff_t pos)
+static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap,
+ struct address_space *mapping,
+ struct iov_iter *ii, loff_t pos,
+ unsigned int max_pages)
{
struct fuse_conn *fc = get_fuse_conn(mapping->host);
unsigned offset = pos & (PAGE_SIZE - 1);
size_t count = 0;
int err;
- req->in.argpages = 1;
- req->page_descs[0].offset = offset;
+ ap->args.in_pages = true;
+ ap->descs[0].offset = offset;
do {
size_t tmp;
@@ -1100,9 +1161,9 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
}
err = 0;
- req->pages[req->num_pages] = page;
- req->page_descs[req->num_pages].length = tmp;
- req->num_pages++;
+ ap->pages[ap->num_pages] = page;
+ ap->descs[ap->num_pages].length = tmp;
+ ap->num_pages++;
count += tmp;
pos += tmp;
@@ -1113,7 +1174,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
if (!fc->big_writes)
break;
} while (iov_iter_count(ii) && count < fc->max_write &&
- req->num_pages < req->max_pages && offset == 0);
+ ap->num_pages < max_pages && offset == 0);
return count > 0 ? count : err;
}
@@ -1141,27 +1202,27 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
do {
- struct fuse_req *req;
ssize_t count;
+ struct fuse_io_args ia = {};
+ struct fuse_args_pages *ap = &ia.ap;
unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii),
fc->max_pages);
- req = fuse_get_req(fc, nr_pages);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
+ ap->pages = fuse_pages_alloc(nr_pages, GFP_KERNEL, &ap->descs);
+ if (!ap->pages) {
+ err = -ENOMEM;
break;
}
- count = fuse_fill_write_pages(req, mapping, ii, pos);
+ count = fuse_fill_write_pages(ap, mapping, ii, pos, nr_pages);
if (count <= 0) {
err = count;
} else {
- size_t num_written;
-
- num_written = fuse_send_write_pages(req, iocb, inode,
- pos, count);
- err = req->out.h.error;
+ err = fuse_send_write_pages(&ia, iocb, inode,
+ pos, count);
if (!err) {
+ size_t num_written = ia.write.out.size;
+
res += num_written;
pos += num_written;
@@ -1170,7 +1231,7 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
err = -EIO;
}
}
- fuse_put_request(fc, req);
+ kfree(ap->pages);
} while (!err && iov_iter_count(ii));
if (res > 0)
@@ -1258,14 +1319,14 @@ out:
return written ? written : err;
}
-static inline void fuse_page_descs_length_init(struct fuse_req *req,
- unsigned index, unsigned nr_pages)
+static inline void fuse_page_descs_length_init(struct fuse_page_desc *descs,
+ unsigned int index,
+ unsigned int nr_pages)
{
int i;
for (i = index; i < index + nr_pages; i++)
- req->page_descs[i].length = PAGE_SIZE -
- req->page_descs[i].offset;
+ descs[i].length = PAGE_SIZE - descs[i].offset;
}
static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
@@ -1279,8 +1340,9 @@ static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
return min(iov_iter_single_seg_count(ii), max_size);
}
-static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
- size_t *nbytesp, int write)
+static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
+ size_t *nbytesp, int write,
+ unsigned int max_pages)
{
size_t nbytes = 0; /* # bytes already packed in req */
ssize_t ret = 0;
@@ -1291,21 +1353,21 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
if (write)
- req->in.args[1].value = (void *) user_addr;
+ ap->args.in_args[1].value = (void *) user_addr;
else
- req->out.args[0].value = (void *) user_addr;
+ ap->args.out_args[0].value = (void *) user_addr;
iov_iter_advance(ii, frag_size);
*nbytesp = frag_size;
return 0;
}
- while (nbytes < *nbytesp && req->num_pages < req->max_pages) {
+ while (nbytes < *nbytesp && ap->num_pages < max_pages) {
unsigned npages;
size_t start;
- ret = iov_iter_get_pages(ii, &req->pages[req->num_pages],
+ ret = iov_iter_get_pages(ii, &ap->pages[ap->num_pages],
*nbytesp - nbytes,
- req->max_pages - req->num_pages,
+ max_pages - ap->num_pages,
&start);
if (ret < 0)
break;
@@ -1316,18 +1378,18 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
ret += start;
npages = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
- req->page_descs[req->num_pages].offset = start;
- fuse_page_descs_length_init(req, req->num_pages, npages);
+ ap->descs[ap->num_pages].offset = start;
+ fuse_page_descs_length_init(ap->descs, ap->num_pages, npages);
- req->num_pages += npages;
- req->page_descs[req->num_pages - 1].length -=
+ ap->num_pages += npages;
+ ap->descs[ap->num_pages - 1].length -=
(PAGE_SIZE - ret) & (PAGE_SIZE - 1);
}
if (write)
- req->in.argpages = 1;
+ ap->args.in_pages = 1;
else
- req->out.argpages = 1;
+ ap->args.out_pages = 1;
*nbytesp = nbytes;
@@ -1349,17 +1411,16 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
pgoff_t idx_from = pos >> PAGE_SHIFT;
pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
ssize_t res = 0;
- struct fuse_req *req;
int err = 0;
+ struct fuse_io_args *ia;
+ unsigned int max_pages;
- if (io->async)
- req = fuse_get_req_for_background(fc, iov_iter_npages(iter,
- fc->max_pages));
- else
- req = fuse_get_req(fc, iov_iter_npages(iter, fc->max_pages));
- if (IS_ERR(req))
- return PTR_ERR(req);
+ max_pages = iov_iter_npages(iter, fc->max_pages);
+ ia = fuse_io_alloc(io, max_pages);
+ if (!ia)
+ return -ENOMEM;
+ ia->io = io;
if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) {
if (!write)
inode_lock(inode);
@@ -1370,54 +1431,49 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
io->should_dirty = !write && iter_is_iovec(iter);
while (count) {
- size_t nres;
+ ssize_t nres;
fl_owner_t owner = current->files;
size_t nbytes = min(count, nmax);
- err = fuse_get_user_pages(req, iter, &nbytes, write);
+
+ err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write,
+ max_pages);
if (err && !nbytes)
break;
if (write) {
- if (!capable(CAP_FSETID)) {
- struct fuse_write_in *inarg;
+ if (!capable(CAP_FSETID))
+ ia->write.in.write_flags |= FUSE_WRITE_KILL_PRIV;
- inarg = &req->misc.write.in;
- inarg->write_flags |= FUSE_WRITE_KILL_PRIV;
- }
- nres = fuse_send_write(req, io, pos, nbytes, owner);
+ nres = fuse_send_write(ia, pos, nbytes, owner);
} else {
- nres = fuse_send_read(req, io, pos, nbytes, owner);
+ nres = fuse_send_read(ia, pos, nbytes, owner);
}
- if (!io->async)
- fuse_release_user_pages(req, io->should_dirty);
- if (req->out.h.error) {
- err = req->out.h.error;
- break;
- } else if (nres > nbytes) {
- res = 0;
- err = -EIO;
+ if (!io->async || nres < 0) {
+ fuse_release_user_pages(&ia->ap, io->should_dirty);
+ fuse_io_free(ia);
+ }
+ ia = NULL;
+ if (nres < 0) {
+ err = nres;
break;
}
+ WARN_ON(nres > nbytes);
+
count -= nres;
res += nres;
pos += nres;
if (nres != nbytes)
break;
if (count) {
- fuse_put_request(fc, req);
- if (io->async)
- req = fuse_get_req_for_background(fc,
- iov_iter_npages(iter, fc->max_pages));
- else
- req = fuse_get_req(fc, iov_iter_npages(iter,
- fc->max_pages));
- if (IS_ERR(req))
+ max_pages = iov_iter_npages(iter, fc->max_pages);
+ ia = fuse_io_alloc(io, max_pages);
+ if (!ia)
break;
}
}
- if (!IS_ERR(req))
- fuse_put_request(fc, req);
+ if (ia)
+ fuse_io_free(ia);
if (res > 0)
*ppos = pos;
@@ -1509,45 +1565,53 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
return fuse_direct_write_iter(iocb, from);
}
-static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_writepage_free(struct fuse_writepage_args *wpa)
{
+ struct fuse_args_pages *ap = &wpa->ia.ap;
int i;
- for (i = 0; i < req->num_pages; i++)
- __free_page(req->pages[i]);
+ for (i = 0; i < ap->num_pages; i++)
+ __free_page(ap->pages[i]);
+
+ if (wpa->ia.ff)
+ fuse_file_put(wpa->ia.ff, false, false);
- if (req->ff)
- fuse_file_put(req->ff, false, false);
+ kfree(ap->pages);
+ kfree(wpa);
}
-static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_writepage_finish(struct fuse_conn *fc,
+ struct fuse_writepage_args *wpa)
{
- struct inode *inode = req->inode;
+ struct fuse_args_pages *ap = &wpa->ia.ap;
+ struct inode *inode = wpa->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
struct backing_dev_info *bdi = inode_to_bdi(inode);
int i;
- list_del(&req->writepages_entry);
- for (i = 0; i < req->num_pages; i++) {
+ list_del(&wpa->writepages_entry);
+ for (i = 0; i < ap->num_pages; i++) {
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_node_page_state(req->pages[i], NR_WRITEBACK_TEMP);
+ dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP);
wb_writeout_inc(&bdi->wb);
}
wake_up(&fi->page_waitq);
}
/* Called under fi->lock, may release and reacquire it */
-static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req,
- loff_t size)
+static void fuse_send_writepage(struct fuse_conn *fc,
+ struct fuse_writepage_args *wpa, loff_t size)
__releases(fi->lock)
__acquires(fi->lock)
{
- struct fuse_req *aux, *next;
- struct fuse_inode *fi = get_fuse_inode(req->inode);
- struct fuse_write_in *inarg = &req->misc.write.in;
- __u64 data_size = req->num_pages * PAGE_SIZE;
- bool queued;
+ struct fuse_writepage_args *aux, *next;
+ struct fuse_inode *fi = get_fuse_inode(wpa->inode);
+ struct fuse_write_in *inarg = &wpa->ia.write.in;
+ struct fuse_args *args = &wpa->ia.ap.args;
+ __u64 data_size = wpa->ia.ap.num_pages * PAGE_SIZE;
+ int err;
+ fi->writectr++;
if (inarg->offset + data_size <= size) {
inarg->size = data_size;
} else if (inarg->offset < size) {
@@ -1557,29 +1621,36 @@ __acquires(fi->lock)
goto out_free;
}
- req->in.args[1].size = inarg->size;
- queued = fuse_request_queue_background(fc, req);
+ args->in_args[1].size = inarg->size;
+ args->force = true;
+ args->nocreds = true;
+
+ err = fuse_simple_background(fc, args, GFP_ATOMIC);
+ if (err == -ENOMEM) {
+ spin_unlock(&fi->lock);
+ err = fuse_simple_background(fc, args, GFP_NOFS | __GFP_NOFAIL);
+ spin_lock(&fi->lock);
+ }
+
/* Fails on broken connection only */
- if (unlikely(!queued))
+ if (unlikely(err))
goto out_free;
- fi->writectr++;
return;
out_free:
- fuse_writepage_finish(fc, req);
+ fi->writectr--;
+ fuse_writepage_finish(fc, wpa);
spin_unlock(&fi->lock);
/* After fuse_writepage_finish() aux request list is private */
- for (aux = req->misc.write.next; aux; aux = next) {
- next = aux->misc.write.next;
- aux->misc.write.next = NULL;
- fuse_writepage_free(fc, aux);
- fuse_put_request(fc, aux);
+ for (aux = wpa->next; aux; aux = next) {
+ next = aux->next;
+ aux->next = NULL;
+ fuse_writepage_free(aux);
}
- fuse_writepage_free(fc, req);
- fuse_put_request(fc, req);
+ fuse_writepage_free(wpa);
spin_lock(&fi->lock);
}
@@ -1596,29 +1667,34 @@ __acquires(fi->lock)
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
loff_t crop = i_size_read(inode);
- struct fuse_req *req;
+ struct fuse_writepage_args *wpa;
while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
- req = list_entry(fi->queued_writes.next, struct fuse_req, list);
- list_del_init(&req->list);
- fuse_send_writepage(fc, req, crop);
+ wpa = list_entry(fi->queued_writes.next,
+ struct fuse_writepage_args, queue_entry);
+ list_del_init(&wpa->queue_entry);
+ fuse_send_writepage(fc, wpa, crop);
}
}
-static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_args *args,
+ int error)
{
- struct inode *inode = req->inode;
+ struct fuse_writepage_args *wpa =
+ container_of(args, typeof(*wpa), ia.ap.args);
+ struct inode *inode = wpa->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
- mapping_set_error(inode->i_mapping, req->out.h.error);
+ mapping_set_error(inode->i_mapping, error);
spin_lock(&fi->lock);
- while (req->misc.write.next) {
+ while (wpa->next) {
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_write_in *inarg = &req->misc.write.in;
- struct fuse_req *next = req->misc.write.next;
- req->misc.write.next = next->misc.write.next;
- next->misc.write.next = NULL;
- next->ff = fuse_file_get(req->ff);
+ struct fuse_write_in *inarg = &wpa->ia.write.in;
+ struct fuse_writepage_args *next = wpa->next;
+
+ wpa->next = next->next;
+ next->next = NULL;
+ next->ia.ff = fuse_file_get(wpa->ia.ff);
list_add(&next->writepages_entry, &fi->writepages);
/*
@@ -1647,9 +1723,9 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
fuse_send_writepage(fc, next, inarg->offset + inarg->size);
}
fi->writectr--;
- fuse_writepage_finish(fc, req);
+ fuse_writepage_finish(fc, wpa);
spin_unlock(&fi->lock);
- fuse_writepage_free(fc, req);
+ fuse_writepage_free(wpa);
}
static struct fuse_file *__fuse_write_file_get(struct fuse_conn *fc,
@@ -1691,52 +1767,71 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
return err;
}
+static struct fuse_writepage_args *fuse_writepage_args_alloc(void)
+{
+ struct fuse_writepage_args *wpa;
+ struct fuse_args_pages *ap;
+
+ wpa = kzalloc(sizeof(*wpa), GFP_NOFS);
+ if (wpa) {
+ ap = &wpa->ia.ap;
+ ap->num_pages = 0;
+ ap->pages = fuse_pages_alloc(1, GFP_NOFS, &ap->descs);
+ if (!ap->pages) {
+ kfree(wpa);
+ wpa = NULL;
+ }
+ }
+ return wpa;
+
+}
+
static int fuse_writepage_locked(struct page *page)
{
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
- struct fuse_req *req;
+ struct fuse_writepage_args *wpa;
+ struct fuse_args_pages *ap;
struct page *tmp_page;
int error = -ENOMEM;
set_page_writeback(page);
- req = fuse_request_alloc_nofs(1);
- if (!req)
+ wpa = fuse_writepage_args_alloc();
+ if (!wpa)
goto err;
+ ap = &wpa->ia.ap;
- /* writeback always goes to bg_queue */
- __set_bit(FR_BACKGROUND, &req->flags);
tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (!tmp_page)
goto err_free;
error = -EIO;
- req->ff = fuse_write_file_get(fc, fi);
- if (!req->ff)
+ wpa->ia.ff = fuse_write_file_get(fc, fi);
+ if (!wpa->ia.ff)
goto err_nofile;
- fuse_write_fill(req, req->ff, page_offset(page), 0);
+ fuse_write_args_fill(&wpa->ia, wpa->ia.ff, page_offset(page), 0);
copy_highpage(tmp_page, page);
- req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
- req->misc.write.next = NULL;
- req->in.argpages = 1;
- req->num_pages = 1;
- req->pages[0] = tmp_page;
- req->page_descs[0].offset = 0;
- req->page_descs[0].length = PAGE_SIZE;
- req->end = fuse_writepage_end;
- req->inode = inode;
+ wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
+ wpa->next = NULL;
+ ap->args.in_pages = true;
+ ap->num_pages = 1;
+ ap->pages[0] = tmp_page;
+ ap->descs[0].offset = 0;
+ ap->descs[0].length = PAGE_SIZE;
+ ap->args.end = fuse_writepage_end;
+ wpa->inode = inode;
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
spin_lock(&fi->lock);
- list_add(&req->writepages_entry, &fi->writepages);
- list_add_tail(&req->list, &fi->queued_writes);
+ list_add(&wpa->writepages_entry, &fi->writepages);
+ list_add_tail(&wpa->queue_entry, &fi->queued_writes);
fuse_flush_writepages(inode);
spin_unlock(&fi->lock);
@@ -1747,7 +1842,7 @@ static int fuse_writepage_locked(struct page *page)
err_nofile:
__free_page(tmp_page);
err_free:
- fuse_request_free(req);
+ kfree(wpa);
err:
mapping_set_error(page->mapping, error);
end_page_writeback(page);
@@ -1767,6 +1862,7 @@ static int fuse_writepage(struct page *page, struct writeback_control *wbc)
WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
redirty_page_for_writepage(wbc, page);
+ unlock_page(page);
return 0;
}
@@ -1777,23 +1873,50 @@ static int fuse_writepage(struct page *page, struct writeback_control *wbc)
}
struct fuse_fill_wb_data {
- struct fuse_req *req;
+ struct fuse_writepage_args *wpa;
struct fuse_file *ff;
struct inode *inode;
struct page **orig_pages;
+ unsigned int max_pages;
};
+static bool fuse_pages_realloc(struct fuse_fill_wb_data *data)
+{
+ struct fuse_args_pages *ap = &data->wpa->ia.ap;
+ struct fuse_conn *fc = get_fuse_conn(data->inode);
+ struct page **pages;
+ struct fuse_page_desc *descs;
+ unsigned int npages = min_t(unsigned int,
+ max_t(unsigned int, data->max_pages * 2,
+ FUSE_DEFAULT_MAX_PAGES_PER_REQ),
+ fc->max_pages);
+ WARN_ON(npages <= data->max_pages);
+
+ pages = fuse_pages_alloc(npages, GFP_NOFS, &descs);
+ if (!pages)
+ return false;
+
+ memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages);
+ memcpy(descs, ap->descs, sizeof(struct fuse_page_desc) * ap->num_pages);
+ kfree(ap->pages);
+ ap->pages = pages;
+ ap->descs = descs;
+ data->max_pages = npages;
+
+ return true;
+}
+
static void fuse_writepages_send(struct fuse_fill_wb_data *data)
{
- struct fuse_req *req = data->req;
+ struct fuse_writepage_args *wpa = data->wpa;
struct inode *inode = data->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
- int num_pages = req->num_pages;
+ int num_pages = wpa->ia.ap.num_pages;
int i;
- req->ff = fuse_file_get(data->ff);
+ wpa->ia.ff = fuse_file_get(data->ff);
spin_lock(&fi->lock);
- list_add_tail(&req->list, &fi->queued_writes);
+ list_add_tail(&wpa->queue_entry, &fi->queued_writes);
fuse_flush_writepages(inode);
spin_unlock(&fi->lock);
@@ -1808,54 +1931,52 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data)
* this new request onto the auxiliary list, otherwise reuse the existing one by
* copying the new page contents over to the old temporary page.
*/
-static bool fuse_writepage_in_flight(struct fuse_req *new_req,
+static bool fuse_writepage_in_flight(struct fuse_writepage_args *new_wpa,
struct page *page)
{
- struct fuse_conn *fc = get_fuse_conn(new_req->inode);
- struct fuse_inode *fi = get_fuse_inode(new_req->inode);
- struct fuse_req *tmp;
- struct fuse_req *old_req;
+ struct fuse_inode *fi = get_fuse_inode(new_wpa->inode);
+ struct fuse_writepage_args *tmp;
+ struct fuse_writepage_args *old_wpa;
+ struct fuse_args_pages *new_ap = &new_wpa->ia.ap;
- WARN_ON(new_req->num_pages != 0);
+ WARN_ON(new_ap->num_pages != 0);
spin_lock(&fi->lock);
- list_del(&new_req->writepages_entry);
- old_req = fuse_find_writeback(fi, page->index, page->index);
- if (!old_req) {
- list_add(&new_req->writepages_entry, &fi->writepages);
+ list_del(&new_wpa->writepages_entry);
+ old_wpa = fuse_find_writeback(fi, page->index, page->index);
+ if (!old_wpa) {
+ list_add(&new_wpa->writepages_entry, &fi->writepages);
spin_unlock(&fi->lock);
return false;
}
- new_req->num_pages = 1;
- for (tmp = old_req->misc.write.next; tmp; tmp = tmp->misc.write.next) {
+ new_ap->num_pages = 1;
+ for (tmp = old_wpa->next; tmp; tmp = tmp->next) {
pgoff_t curr_index;
- WARN_ON(tmp->inode != new_req->inode);
- curr_index = tmp->misc.write.in.offset >> PAGE_SHIFT;
+ WARN_ON(tmp->inode != new_wpa->inode);
+ curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT;
if (curr_index == page->index) {
- WARN_ON(tmp->num_pages != 1);
- WARN_ON(!test_bit(FR_PENDING, &tmp->flags));
- swap(tmp->pages[0], new_req->pages[0]);
+ WARN_ON(tmp->ia.ap.num_pages != 1);
+ swap(tmp->ia.ap.pages[0], new_ap->pages[0]);
break;
}
}
if (!tmp) {
- new_req->misc.write.next = old_req->misc.write.next;
- old_req->misc.write.next = new_req;
+ new_wpa->next = old_wpa->next;
+ old_wpa->next = new_wpa;
}
spin_unlock(&fi->lock);
if (tmp) {
- struct backing_dev_info *bdi = inode_to_bdi(new_req->inode);
+ struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode);
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_node_page_state(new_req->pages[0], NR_WRITEBACK_TEMP);
+ dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP);
wb_writeout_inc(&bdi->wb);
- fuse_writepage_free(fc, new_req);
- fuse_request_free(new_req);
+ fuse_writepage_free(new_wpa);
}
return true;
@@ -1865,7 +1986,8 @@ static int fuse_writepages_fill(struct page *page,
struct writeback_control *wbc, void *_data)
{
struct fuse_fill_wb_data *data = _data;
- struct fuse_req *req = data->req;
+ struct fuse_writepage_args *wpa = data->wpa;
+ struct fuse_args_pages *ap = &wpa->ia.ap;
struct inode *inode = data->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = get_fuse_conn(inode);
@@ -1888,16 +2010,16 @@ static int fuse_writepages_fill(struct page *page,
*/
is_writeback = fuse_page_is_writeback(inode, page->index);
- if (req && req->num_pages &&
- (is_writeback || req->num_pages == fc->max_pages ||
- (req->num_pages + 1) * PAGE_SIZE > fc->max_write ||
- data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
+ if (wpa && ap->num_pages &&
+ (is_writeback || ap->num_pages == fc->max_pages ||
+ (ap->num_pages + 1) * PAGE_SIZE > fc->max_write ||
+ data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)) {
fuse_writepages_send(data);
- data->req = NULL;
- } else if (req && req->num_pages == req->max_pages) {
- if (!fuse_req_realloc_pages(fc, req, GFP_NOFS)) {
+ data->wpa = NULL;
+ } else if (wpa && ap->num_pages == data->max_pages) {
+ if (!fuse_pages_realloc(data)) {
fuse_writepages_send(data);
- req = data->req = NULL;
+ data->wpa = NULL;
}
}
@@ -1915,59 +2037,60 @@ static int fuse_writepages_fill(struct page *page,
* This is ensured by holding the page lock in page_mkwrite() while
* checking fuse_page_is_writeback(). We already hold the page lock
* since clear_page_dirty_for_io() and keep it held until we add the
- * request to the fi->writepages list and increment req->num_pages.
+ * request to the fi->writepages list and increment ap->num_pages.
* After this fuse_page_is_writeback() will indicate that the page is
* under writeback, so we can release the page lock.
*/
- if (data->req == NULL) {
+ if (data->wpa == NULL) {
struct fuse_inode *fi = get_fuse_inode(inode);
err = -ENOMEM;
- req = fuse_request_alloc_nofs(FUSE_REQ_INLINE_PAGES);
- if (!req) {
+ wpa = fuse_writepage_args_alloc();
+ if (!wpa) {
__free_page(tmp_page);
goto out_unlock;
}
+ data->max_pages = 1;
- fuse_write_fill(req, data->ff, page_offset(page), 0);
- req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
- req->misc.write.next = NULL;
- req->in.argpages = 1;
- __set_bit(FR_BACKGROUND, &req->flags);
- req->num_pages = 0;
- req->end = fuse_writepage_end;
- req->inode = inode;
+ ap = &wpa->ia.ap;
+ fuse_write_args_fill(&wpa->ia, data->ff, page_offset(page), 0);
+ wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
+ wpa->next = NULL;
+ ap->args.in_pages = true;
+ ap->args.end = fuse_writepage_end;
+ ap->num_pages = 0;
+ wpa->inode = inode;
spin_lock(&fi->lock);
- list_add(&req->writepages_entry, &fi->writepages);
+ list_add(&wpa->writepages_entry, &fi->writepages);
spin_unlock(&fi->lock);
- data->req = req;
+ data->wpa = wpa;
}
set_page_writeback(page);
copy_highpage(tmp_page, page);
- req->pages[req->num_pages] = tmp_page;
- req->page_descs[req->num_pages].offset = 0;
- req->page_descs[req->num_pages].length = PAGE_SIZE;
+ ap->pages[ap->num_pages] = tmp_page;
+ ap->descs[ap->num_pages].offset = 0;
+ ap->descs[ap->num_pages].length = PAGE_SIZE;
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
err = 0;
- if (is_writeback && fuse_writepage_in_flight(req, page)) {
+ if (is_writeback && fuse_writepage_in_flight(wpa, page)) {
end_page_writeback(page);
- data->req = NULL;
+ data->wpa = NULL;
goto out_unlock;
}
- data->orig_pages[req->num_pages] = page;
+ data->orig_pages[ap->num_pages] = page;
/*
* Protected by fi->lock against concurrent access by
* fuse_page_is_writeback().
*/
spin_lock(&fi->lock);
- req->num_pages++;
+ ap->num_pages++;
spin_unlock(&fi->lock);
out_unlock:
@@ -1989,7 +2112,7 @@ static int fuse_writepages(struct address_space *mapping,
goto out;
data.inode = inode;
- data.req = NULL;
+ data.wpa = NULL;
data.ff = NULL;
err = -ENOMEM;
@@ -2000,9 +2123,9 @@ static int fuse_writepages(struct address_space *mapping,
goto out;
err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
- if (data.req) {
+ if (data.wpa) {
/* Ignore errors if we can write at least one page */
- BUG_ON(!data.req->num_pages);
+ WARN_ON(!data.wpa->ia.ap.num_pages);
fuse_writepages_send(&data);
err = 0;
}
@@ -2222,11 +2345,11 @@ static void fuse_lk_fill(struct fuse_args *args, struct file *file,
inarg->lk.pid = pid;
if (flock)
inarg->lk_flags |= FUSE_LK_FLOCK;
- args->in.h.opcode = opcode;
- args->in.h.nodeid = get_node_id(inode);
- args->in.numargs = 1;
- args->in.args[0].size = sizeof(*inarg);
- args->in.args[0].value = inarg;
+ args->opcode = opcode;
+ args->nodeid = get_node_id(inode);
+ args->in_numargs = 1;
+ args->in_args[0].size = sizeof(*inarg);
+ args->in_args[0].value = inarg;
}
static int fuse_getlk(struct file *file, struct file_lock *fl)
@@ -2239,9 +2362,9 @@ static int fuse_getlk(struct file *file, struct file_lock *fl)
int err;
fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg);
- args.out.numargs = 1;
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
+ args.out_numargs = 1;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err)
err = convert_fuse_file_lock(fc, &outarg.lk, fl);
@@ -2336,14 +2459,14 @@ static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
memset(&inarg, 0, sizeof(inarg));
inarg.block = block;
inarg.blocksize = inode->i_sb->s_blocksize;
- args.in.h.opcode = FUSE_BMAP;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.out.numargs = 1;
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
+ args.opcode = FUSE_BMAP;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.out_numargs = 1;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (err == -ENOSYS)
fc->no_bmap = 1;
@@ -2368,14 +2491,14 @@ static loff_t fuse_lseek(struct file *file, loff_t offset, int whence)
if (fc->no_lseek)
goto fallback;
- args.in.h.opcode = FUSE_LSEEK;
- args.in.h.nodeid = ff->nodeid;
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.out.numargs = 1;
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
+ args.opcode = FUSE_LSEEK;
+ args.nodeid = ff->nodeid;
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.out_numargs = 1;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (err) {
if (err == -ENOSYS) {
@@ -2573,14 +2696,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
.flags = flags
};
struct fuse_ioctl_out outarg;
- struct fuse_req *req = NULL;
- struct page **pages = NULL;
struct iovec *iov_page = NULL;
struct iovec *in_iov = NULL, *out_iov = NULL;
- unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages;
- size_t in_size, out_size, transferred, c;
+ unsigned int in_iovs = 0, out_iovs = 0, max_pages;
+ size_t in_size, out_size, c;
+ ssize_t transferred;
int err, i;
struct iov_iter ii;
+ struct fuse_args_pages ap = {};
#if BITS_PER_LONG == 32
inarg.flags |= FUSE_IOCTL_32BIT;
@@ -2598,11 +2721,13 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
err = -ENOMEM;
- pages = kcalloc(fc->max_pages, sizeof(pages[0]), GFP_KERNEL);
+ ap.pages = fuse_pages_alloc(fc->max_pages, GFP_KERNEL, &ap.descs);
iov_page = (struct iovec *) __get_free_page(GFP_KERNEL);
- if (!pages || !iov_page)
+ if (!ap.pages || !iov_page)
goto out;
+ fuse_page_descs_length_init(ap.descs, 0, fc->max_pages);
+
/*
* If restricted, initialize IO parameters as encoded in @cmd.
* RETRY from server is not allowed.
@@ -2639,56 +2764,44 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
err = -ENOMEM;
if (max_pages > fc->max_pages)
goto out;
- while (num_pages < max_pages) {
- pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
- if (!pages[num_pages])
+ while (ap.num_pages < max_pages) {
+ ap.pages[ap.num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+ if (!ap.pages[ap.num_pages])
goto out;
- num_pages++;
+ ap.num_pages++;
}
- req = fuse_get_req(fc, num_pages);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- req = NULL;
- goto out;
- }
- memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages);
- req->num_pages = num_pages;
- fuse_page_descs_length_init(req, 0, req->num_pages);
/* okay, let's send it to the client */
- req->in.h.opcode = FUSE_IOCTL;
- req->in.h.nodeid = ff->nodeid;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
+ ap.args.opcode = FUSE_IOCTL;
+ ap.args.nodeid = ff->nodeid;
+ ap.args.in_numargs = 1;
+ ap.args.in_args[0].size = sizeof(inarg);
+ ap.args.in_args[0].value = &inarg;
if (in_size) {
- req->in.numargs++;
- req->in.args[1].size = in_size;
- req->in.argpages = 1;
+ ap.args.in_numargs++;
+ ap.args.in_args[1].size = in_size;
+ ap.args.in_pages = true;
err = -EFAULT;
iov_iter_init(&ii, WRITE, in_iov, in_iovs, in_size);
- for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= num_pages); i++) {
- c = copy_page_from_iter(pages[i], 0, PAGE_SIZE, &ii);
+ for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) {
+ c = copy_page_from_iter(ap.pages[i], 0, PAGE_SIZE, &ii);
if (c != PAGE_SIZE && iov_iter_count(&ii))
goto out;
}
}
- req->out.numargs = 2;
- req->out.args[0].size = sizeof(outarg);
- req->out.args[0].value = &outarg;
- req->out.args[1].size = out_size;
- req->out.argpages = 1;
- req->out.argvar = 1;
+ ap.args.out_numargs = 2;
+ ap.args.out_args[0].size = sizeof(outarg);
+ ap.args.out_args[0].value = &outarg;
+ ap.args.out_args[1].size = out_size;
+ ap.args.out_pages = true;
+ ap.args.out_argvar = true;
- fuse_request_send(fc, req);
- err = req->out.h.error;
- transferred = req->out.args[1].size;
- fuse_put_request(fc, req);
- req = NULL;
- if (err)
+ transferred = fuse_simple_request(fc, &ap.args);
+ err = transferred;
+ if (transferred < 0)
goto out;
/* did it ask for retry? */
@@ -2713,7 +2826,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
goto out;
- vaddr = kmap_atomic(pages[0]);
+ vaddr = kmap_atomic(ap.pages[0]);
err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr,
transferred, in_iovs + out_iovs,
(flags & FUSE_IOCTL_COMPAT) != 0);
@@ -2741,19 +2854,17 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
err = -EFAULT;
iov_iter_init(&ii, READ, out_iov, out_iovs, transferred);
- for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= num_pages); i++) {
- c = copy_page_to_iter(pages[i], 0, PAGE_SIZE, &ii);
+ for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) {
+ c = copy_page_to_iter(ap.pages[i], 0, PAGE_SIZE, &ii);
if (c != PAGE_SIZE && iov_iter_count(&ii))
goto out;
}
err = 0;
out:
- if (req)
- fuse_put_request(fc, req);
free_page((unsigned long) iov_page);
- while (num_pages)
- __free_page(pages[--num_pages]);
- kfree(pages);
+ while (ap.num_pages)
+ __free_page(ap.pages[--ap.num_pages]);
+ kfree(ap.pages);
return err ? err : outarg.result;
}
@@ -2861,14 +2972,14 @@ __poll_t fuse_file_poll(struct file *file, poll_table *wait)
fuse_register_polled_file(fc, ff);
}
- args.in.h.opcode = FUSE_POLL;
- args.in.h.nodeid = ff->nodeid;
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.out.numargs = 1;
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
+ args.opcode = FUSE_POLL;
+ args.nodeid = ff->nodeid;
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.out_numargs = 1;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err)
@@ -3076,11 +3187,11 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
if (!(mode & FALLOC_FL_KEEP_SIZE))
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
- args.in.h.opcode = FUSE_FALLOCATE;
- args.in.h.nodeid = ff->nodeid;
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
+ args.opcode = FUSE_FALLOCATE;
+ args.nodeid = ff->nodeid;
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_fallocate = 1;
@@ -3168,14 +3279,14 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
if (is_unstable)
set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
- args.in.h.opcode = FUSE_COPY_FILE_RANGE;
- args.in.h.nodeid = ff_in->nodeid;
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.out.numargs = 1;
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
+ args.opcode = FUSE_COPY_FILE_RANGE;
+ args.nodeid = ff_in->nodeid;
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.out_numargs = 1;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_copy_file_range = 1;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 24dbca777775..fc89cb40e874 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -47,9 +47,6 @@
/** Number of dentries for each connection in the control filesystem */
#define FUSE_CTL_NUM_DENTRIES 5
-/** Number of page pointers embedded in fuse_req */
-#define FUSE_REQ_INLINE_PAGES 1
-
/** List of active connections */
extern struct list_head fuse_conn_list;
@@ -164,17 +161,15 @@ enum {
};
struct fuse_conn;
+struct fuse_release_args;
/** FUSE specific file data */
struct fuse_file {
/** Fuse connection for this file */
struct fuse_conn *fc;
- /*
- * Request reserved for flush and release.
- * Modified under relative fuse_inode::lock.
- */
- struct fuse_req *reserved_req;
+ /* Argument space reserved for release */
+ struct fuse_release_args *release_args;
/** Kernel file handle guaranteed to be unique */
u64 kh;
@@ -229,57 +224,12 @@ struct fuse_in_arg {
const void *value;
};
-/** The request input */
-struct fuse_in {
- /** The request header */
- struct fuse_in_header h;
-
- /** True if the data for the last argument is in req->pages */
- unsigned argpages:1;
-
- /** Number of arguments */
- unsigned numargs;
-
- /** Array of arguments */
- struct fuse_in_arg args[3];
-};
-
/** One output argument of a request */
struct fuse_arg {
unsigned size;
void *value;
};
-/** The request output */
-struct fuse_out {
- /** Header returned from userspace */
- struct fuse_out_header h;
-
- /*
- * The following bitfields are not changed during the request
- * processing
- */
-
- /** Last argument is variable length (can be shorter than
- arg->size) */
- unsigned argvar:1;
-
- /** Last argument is a list of pages to copy data to */
- unsigned argpages:1;
-
- /** Zero partially or not copied pages */
- unsigned page_zeroing:1;
-
- /** Pages may be replaced with new ones */
- unsigned page_replace:1;
-
- /** Number or arguments */
- unsigned numargs;
-
- /** Array of arguments */
- struct fuse_arg args[2];
-};
-
/** FUSE page descriptor */
struct fuse_page_desc {
unsigned int length;
@@ -287,20 +237,28 @@ struct fuse_page_desc {
};
struct fuse_args {
- struct {
- struct {
- uint32_t opcode;
- uint64_t nodeid;
- } h;
- unsigned numargs;
- struct fuse_in_arg args[3];
+ uint64_t nodeid;
+ uint32_t opcode;
+ unsigned short in_numargs;
+ unsigned short out_numargs;
+ bool force:1;
+ bool noreply:1;
+ bool nocreds:1;
+ bool in_pages:1;
+ bool out_pages:1;
+ bool out_argvar:1;
+ bool page_zeroing:1;
+ bool page_replace:1;
+ struct fuse_in_arg in_args[3];
+ struct fuse_arg out_args[2];
+ void (*end)(struct fuse_conn *fc, struct fuse_args *args, int error);
+};
- } in;
- struct {
- unsigned argvar:1;
- unsigned numargs;
- struct fuse_arg args[2];
- } out;
+struct fuse_args_pages {
+ struct fuse_args args;
+ struct page **pages;
+ struct fuse_page_desc *descs;
+ unsigned int num_pages;
};
#define FUSE_ARGS(args) struct fuse_args args = {}
@@ -373,83 +331,70 @@ struct fuse_req {
/** Entry on the interrupts list */
struct list_head intr_entry;
+ /* Input/output arguments */
+ struct fuse_args *args;
+
/** refcount */
refcount_t count;
/* Request flags, updated with test/set/clear_bit() */
unsigned long flags;
- /** The request input */
- struct fuse_in in;
+ /* The request input header */
+ struct {
+ struct fuse_in_header h;
+ } in;
- /** The request output */
- struct fuse_out out;
+ /* The request output header */
+ struct {
+ struct fuse_out_header h;
+ } out;
/** Used to wake up the task waiting for completion of request*/
wait_queue_head_t waitq;
- /** Data for asynchronous requests */
- union {
- struct {
- struct fuse_release_in in;
- struct inode *inode;
- } release;
- struct fuse_init_in init_in;
- struct fuse_init_out init_out;
- struct cuse_init_in cuse_init_in;
- struct {
- struct fuse_read_in in;
- u64 attr_ver;
- } read;
- struct {
- struct fuse_write_in in;
- struct fuse_write_out out;
- struct fuse_req *next;
- } write;
- struct fuse_notify_retrieve_in retrieve_in;
- } misc;
-
- /** page vector */
- struct page **pages;
-
- /** page-descriptor vector */
- struct fuse_page_desc *page_descs;
-
- /** size of the 'pages' array */
- unsigned max_pages;
-
- /** inline page vector */
- struct page *inline_pages[FUSE_REQ_INLINE_PAGES];
-
- /** inline page-descriptor vector */
- struct fuse_page_desc inline_page_descs[FUSE_REQ_INLINE_PAGES];
-
- /** number of pages in vector */
- unsigned num_pages;
-
- /** File used in the request (or NULL) */
- struct fuse_file *ff;
-
- /** Inode used in the request or NULL */
- struct inode *inode;
+};
- /** AIO control block */
- struct fuse_io_priv *io;
+struct fuse_iqueue;
- /** Link on fi->writepages */
- struct list_head writepages_entry;
+/**
+ * Input queue callbacks
+ *
+ * Input queue signalling is device-specific. For example, the /dev/fuse file
+ * uses fiq->waitq and fasync to wake processes that are waiting on queue
+ * readiness. These callbacks allow other device types to respond to input
+ * queue activity.
+ */
+struct fuse_iqueue_ops {
+ /**
+ * Signal that a forget has been queued
+ */
+ void (*wake_forget_and_unlock)(struct fuse_iqueue *fiq)
+ __releases(fiq->lock);
- /** Request completion callback */
- void (*end)(struct fuse_conn *, struct fuse_req *);
+ /**
+ * Signal that an INTERRUPT request has been queued
+ */
+ void (*wake_interrupt_and_unlock)(struct fuse_iqueue *fiq)
+ __releases(fiq->lock);
- /** Request is stolen from fuse_file->reserved_req */
- struct file *stolen_file;
+ /**
+ * Signal that a request has been queued
+ */
+ void (*wake_pending_and_unlock)(struct fuse_iqueue *fiq)
+ __releases(fiq->lock);
};
+/** /dev/fuse input queue operations */
+extern const struct fuse_iqueue_ops fuse_dev_fiq_ops;
+
struct fuse_iqueue {
/** Connection established */
unsigned connected;
+ /** Lock protecting accesses to members of this structure */
+ spinlock_t lock;
+
/** Readers of the connection are waiting on this */
wait_queue_head_t waitq;
@@ -471,6 +416,12 @@ struct fuse_iqueue {
/** O_ASYNC requests */
struct fasync_struct *fasync;
+
+ /** Device-specific callbacks */
+ const struct fuse_iqueue_ops *ops;
+
+ /** Device-specific state */
+ void *priv;
};
#define FUSE_PQ_HASH_BITS 8
@@ -504,6 +455,29 @@ struct fuse_dev {
struct list_head entry;
};
+struct fuse_fs_context {
+ int fd;
+ unsigned int rootmode;
+ kuid_t user_id;
+ kgid_t group_id;
+ bool is_bdev:1;
+ bool fd_present:1;
+ bool rootmode_present:1;
+ bool user_id_present:1;
+ bool group_id_present:1;
+ bool default_permissions:1;
+ bool allow_other:1;
+ bool destroy:1;
+ bool no_control:1;
+ bool no_force_umount:1;
+ unsigned int max_read;
+ unsigned int blksize;
+ const char *subtype;
+
+ /* fuse_dev pointer to fill in, should contain NULL on entry */
+ void **fudptr;
+};
+
/**
* A Fuse connection.
*
@@ -584,9 +558,6 @@ struct fuse_conn {
/** waitq for blocked connection */
wait_queue_head_t blocked_waitq;
- /** waitq for reserved requests */
- wait_queue_head_t reserved_req_waitq;
-
/** Connection established, cleared on umount, connection
abort and device release */
unsigned connected;
@@ -721,6 +692,18 @@ struct fuse_conn {
/** Does the filesystem support copy_file_range? */
unsigned no_copy_file_range:1;
+ /* Send DESTROY request */
+ unsigned int destroy:1;
+
+ /* Delete dentries that have gone stale */
+ unsigned int delete_stale:1;
+
+ /** Do not create entry in fusectl fs */
+ unsigned int no_control:1;
+
+ /** Do not allow MNT_FORCE umount */
+ unsigned int no_force_umount:1;
+
/** The number of requests waiting for completion */
atomic_t num_waiting;
@@ -742,9 +725,6 @@ struct fuse_conn {
/** Key for lock owner ID scrambling */
u32 scramble_key[4];
- /** Reserved request for the DESTROY message */
- struct fuse_req *destroy_req;
-
/** Version counter for attribute changes */
atomic64_t attr_version;
@@ -820,14 +800,32 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
struct fuse_forget_link *fuse_alloc_forget(void);
-/* Used by READDIRPLUS */
-void fuse_force_forget(struct file *file, u64 nodeid);
+struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
+ unsigned int max,
+ unsigned int *countp);
-/**
+/*
* Initialize READ or READDIR request
*/
-void fuse_read_fill(struct fuse_req *req, struct file *file,
- loff_t pos, size_t count, int opcode);
+struct fuse_io_args {
+ union {
+ struct {
+ struct fuse_read_in in;
+ u64 attr_ver;
+ } read;
+ struct {
+ struct fuse_write_in in;
+ struct fuse_write_out out;
+ } write;
+ };
+ struct fuse_args_pages ap;
+ struct fuse_io_priv *io;
+ struct fuse_file *ff;
+};
+
+void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos,
+ size_t count, int opcode);
+
/**
* Send OPEN or OPENDIR request
@@ -900,61 +898,16 @@ int fuse_ctl_init(void);
void __exit fuse_ctl_cleanup(void);
/**
- * Allocate a request
- */
-struct fuse_req *fuse_request_alloc(unsigned npages);
-
-struct fuse_req *fuse_request_alloc_nofs(unsigned npages);
-
-bool fuse_req_realloc_pages(struct fuse_conn *fc, struct fuse_req *req,
- gfp_t flags);
-
-
-/**
- * Free a request
- */
-void fuse_request_free(struct fuse_req *req);
-
-/**
- * Get a request, may fail with -ENOMEM,
- * caller should specify # elements in req->pages[] explicitly
- */
-struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages);
-struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
- unsigned npages);
-
-/*
- * Increment reference count on request
- */
-void __fuse_get_request(struct fuse_req *req);
-
-/**
- * Gets a requests for a file operation, always succeeds
- */
-struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
- struct file *file);
-
-/**
- * Decrement reference count of a request. If count goes to zero free
- * the request.
- */
-void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req);
-
-/**
- * Send a request (synchronous)
- */
-void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req);
-
-/**
* Simple request sending that does request allocation and freeing
*/
ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args);
+int fuse_simple_background(struct fuse_conn *fc, struct fuse_args *args,
+ gfp_t gfp_flags);
/**
- * Send a request in the background
+ * End a finished request
*/
-void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req);
-bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req);
+void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req);
/* Abort all requests */
void fuse_abort_conn(struct fuse_conn *fc);
@@ -980,15 +933,33 @@ struct fuse_conn *fuse_conn_get(struct fuse_conn *fc);
/**
* Initialize fuse_conn
*/
-void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns);
+void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns,
+ const struct fuse_iqueue_ops *fiq_ops, void *fiq_priv);
/**
* Release reference to fuse_conn
*/
void fuse_conn_put(struct fuse_conn *fc);
-struct fuse_dev *fuse_dev_alloc(struct fuse_conn *fc);
+struct fuse_dev *fuse_dev_alloc_install(struct fuse_conn *fc);
+struct fuse_dev *fuse_dev_alloc(void);
+void fuse_dev_install(struct fuse_dev *fud, struct fuse_conn *fc);
void fuse_dev_free(struct fuse_dev *fud);
+void fuse_send_init(struct fuse_conn *fc);
+
+/**
+ * Fill in superblock and initialize fuse connection
+ * @sb: partially-initialized superblock to fill in
+ * @ctx: mount context
+ */
+int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx);
+
+/**
+ * Disassociate fuse connection from superblock and kill the superblock
+ *
+ * Calls kill_anon_super(), do not use with bdev mounts.
+ */
+void fuse_kill_sb_anon(struct super_block *sb);
/**
* Add connection to control filesystem
@@ -1093,4 +1064,15 @@ int fuse_set_acl(struct inode *inode, struct posix_acl *acl, int type);
/* readdir.c */
int fuse_readdir(struct file *file, struct dir_context *ctx);
+/**
+ * Return the number of bytes in an arguments list
+ */
+unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args);
+
+/**
+ * Get the next unique ID for a request
+ */
+u64 fuse_get_unique(struct fuse_iqueue *fiq);
+void fuse_free_conn(struct fuse_conn *fc);
+
#endif /* _FS_FUSE_I_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 4bb885b0f032..51cb471f4dc3 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -15,7 +15,8 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/parser.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/statfs.h>
#include <linux/random.h>
#include <linux/sched.h>
@@ -59,24 +60,13 @@ MODULE_PARM_DESC(max_user_congthresh,
/** Congestion starts at 75% of maximum */
#define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4)
-struct fuse_mount_data {
- int fd;
- unsigned rootmode;
- kuid_t user_id;
- kgid_t group_id;
- unsigned fd_present:1;
- unsigned rootmode_present:1;
- unsigned user_id_present:1;
- unsigned group_id_present:1;
- unsigned default_permissions:1;
- unsigned allow_other:1;
- unsigned max_read;
- unsigned blksize;
-};
+#ifdef CONFIG_BLOCK
+static struct file_system_type fuseblk_fs_type;
+#endif
struct fuse_forget_link *fuse_alloc_forget(void)
{
- return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL);
+ return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL_ACCOUNT);
}
static struct inode *fuse_alloc_inode(struct super_block *sb)
@@ -374,19 +364,21 @@ void fuse_unlock_inode(struct inode *inode, bool locked)
static void fuse_umount_begin(struct super_block *sb)
{
- fuse_abort_conn(get_fuse_conn_super(sb));
+ struct fuse_conn *fc = get_fuse_conn_super(sb);
+
+ if (!fc->no_force_umount)
+ fuse_abort_conn(fc);
}
static void fuse_send_destroy(struct fuse_conn *fc)
{
- struct fuse_req *req = fc->destroy_req;
- if (req && fc->conn_init) {
- fc->destroy_req = NULL;
- req->in.h.opcode = FUSE_DESTROY;
- __set_bit(FR_FORCE, &req->flags);
- __clear_bit(FR_BACKGROUND, &req->flags);
- fuse_request_send(fc, req);
- fuse_put_request(fc, req);
+ if (fc->conn_init) {
+ FUSE_ARGS(args);
+
+ args.opcode = FUSE_DESTROY;
+ args.force = true;
+ args.nocreds = true;
+ fuse_simple_request(fc, &args);
}
}
@@ -430,12 +422,12 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
}
memset(&outarg, 0, sizeof(outarg));
- args.in.numargs = 0;
- args.in.h.opcode = FUSE_STATFS;
- args.in.h.nodeid = get_node_id(d_inode(dentry));
- args.out.numargs = 1;
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
+ args.in_numargs = 0;
+ args.opcode = FUSE_STATFS;
+ args.nodeid = get_node_id(d_inode(dentry));
+ args.out_numargs = 1;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err)
convert_fuse_statfs(buf, &outarg.st);
@@ -443,6 +435,8 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
}
enum {
+ OPT_SOURCE,
+ OPT_SUBTYPE,
OPT_FD,
OPT_ROOTMODE,
OPT_USER_ID,
@@ -454,111 +448,109 @@ enum {
OPT_ERR
};
-static const match_table_t tokens = {
- {OPT_FD, "fd=%u"},
- {OPT_ROOTMODE, "rootmode=%o"},
- {OPT_USER_ID, "user_id=%u"},
- {OPT_GROUP_ID, "group_id=%u"},
- {OPT_DEFAULT_PERMISSIONS, "default_permissions"},
- {OPT_ALLOW_OTHER, "allow_other"},
- {OPT_MAX_READ, "max_read=%u"},
- {OPT_BLKSIZE, "blksize=%u"},
- {OPT_ERR, NULL}
+static const struct fs_parameter_spec fuse_param_specs[] = {
+ fsparam_string ("source", OPT_SOURCE),
+ fsparam_u32 ("fd", OPT_FD),
+ fsparam_u32oct ("rootmode", OPT_ROOTMODE),
+ fsparam_u32 ("user_id", OPT_USER_ID),
+ fsparam_u32 ("group_id", OPT_GROUP_ID),
+ fsparam_flag ("default_permissions", OPT_DEFAULT_PERMISSIONS),
+ fsparam_flag ("allow_other", OPT_ALLOW_OTHER),
+ fsparam_u32 ("max_read", OPT_MAX_READ),
+ fsparam_u32 ("blksize", OPT_BLKSIZE),
+ fsparam_string ("subtype", OPT_SUBTYPE),
+ {}
+};
+
+static const struct fs_parameter_description fuse_fs_parameters = {
+ .name = "fuse",
+ .specs = fuse_param_specs,
};
-static int fuse_match_uint(substring_t *s, unsigned int *res)
+static int fuse_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- int err = -ENOMEM;
- char *buf = match_strdup(s);
- if (buf) {
- err = kstrtouint(buf, 10, res);
- kfree(buf);
+ struct fs_parse_result result;
+ struct fuse_fs_context *ctx = fc->fs_private;
+ int opt;
+
+ opt = fs_parse(fc, &fuse_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case OPT_SOURCE:
+ if (fc->source)
+ return invalf(fc, "fuse: Multiple sources specified");
+ fc->source = param->string;
+ param->string = NULL;
+ break;
+
+ case OPT_SUBTYPE:
+ if (ctx->subtype)
+ return invalf(fc, "fuse: Multiple subtypes specified");
+ ctx->subtype = param->string;
+ param->string = NULL;
+ return 0;
+
+ case OPT_FD:
+ ctx->fd = result.uint_32;
+ ctx->fd_present = 1;
+ break;
+
+ case OPT_ROOTMODE:
+ if (!fuse_valid_type(result.uint_32))
+ return invalf(fc, "fuse: Invalid rootmode");
+ ctx->rootmode = result.uint_32;
+ ctx->rootmode_present = 1;
+ break;
+
+ case OPT_USER_ID:
+ ctx->user_id = make_kuid(fc->user_ns, result.uint_32);
+ if (!uid_valid(ctx->user_id))
+ return invalf(fc, "fuse: Invalid user_id");
+ ctx->user_id_present = 1;
+ break;
+
+ case OPT_GROUP_ID:
+ ctx->group_id = make_kgid(fc->user_ns, result.uint_32);
+ if (!gid_valid(ctx->group_id))
+ return invalf(fc, "fuse: Invalid group_id");
+ ctx->group_id_present = 1;
+ break;
+
+ case OPT_DEFAULT_PERMISSIONS:
+ ctx->default_permissions = 1;
+ break;
+
+ case OPT_ALLOW_OTHER:
+ ctx->allow_other = 1;
+ break;
+
+ case OPT_MAX_READ:
+ ctx->max_read = result.uint_32;
+ break;
+
+ case OPT_BLKSIZE:
+ if (!ctx->is_bdev)
+ return invalf(fc, "fuse: blksize only supported for fuseblk");
+ ctx->blksize = result.uint_32;
+ break;
+
+ default:
+ return -EINVAL;
}
- return err;
+
+ return 0;
}
-static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev,
- struct user_namespace *user_ns)
+static void fuse_free_fc(struct fs_context *fc)
{
- char *p;
- memset(d, 0, sizeof(struct fuse_mount_data));
- d->max_read = ~0;
- d->blksize = FUSE_DEFAULT_BLKSIZE;
-
- while ((p = strsep(&opt, ",")) != NULL) {
- int token;
- int value;
- unsigned uv;
- substring_t args[MAX_OPT_ARGS];
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case OPT_FD:
- if (match_int(&args[0], &value))
- return 0;
- d->fd = value;
- d->fd_present = 1;
- break;
-
- case OPT_ROOTMODE:
- if (match_octal(&args[0], &value))
- return 0;
- if (!fuse_valid_type(value))
- return 0;
- d->rootmode = value;
- d->rootmode_present = 1;
- break;
-
- case OPT_USER_ID:
- if (fuse_match_uint(&args[0], &uv))
- return 0;
- d->user_id = make_kuid(user_ns, uv);
- if (!uid_valid(d->user_id))
- return 0;
- d->user_id_present = 1;
- break;
-
- case OPT_GROUP_ID:
- if (fuse_match_uint(&args[0], &uv))
- return 0;
- d->group_id = make_kgid(user_ns, uv);
- if (!gid_valid(d->group_id))
- return 0;
- d->group_id_present = 1;
- break;
-
- case OPT_DEFAULT_PERMISSIONS:
- d->default_permissions = 1;
- break;
-
- case OPT_ALLOW_OTHER:
- d->allow_other = 1;
- break;
-
- case OPT_MAX_READ:
- if (match_int(&args[0], &value))
- return 0;
- d->max_read = value;
- break;
-
- case OPT_BLKSIZE:
- if (!is_bdev || match_int(&args[0], &value))
- return 0;
- d->blksize = value;
- break;
-
- default:
- return 0;
- }
- }
+ struct fuse_fs_context *ctx = fc->fs_private;
- if (!d->fd_present || !d->rootmode_present ||
- !d->user_id_present || !d->group_id_present)
- return 0;
-
- return 1;
+ if (ctx) {
+ kfree(ctx->subtype);
+ kfree(ctx);
+ }
}
static int fuse_show_options(struct seq_file *m, struct dentry *root)
@@ -579,14 +571,19 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
return 0;
}
-static void fuse_iqueue_init(struct fuse_iqueue *fiq)
+static void fuse_iqueue_init(struct fuse_iqueue *fiq,
+ const struct fuse_iqueue_ops *ops,
+ void *priv)
{
memset(fiq, 0, sizeof(struct fuse_iqueue));
+ spin_lock_init(&fiq->lock);
init_waitqueue_head(&fiq->waitq);
INIT_LIST_HEAD(&fiq->pending);
INIT_LIST_HEAD(&fiq->interrupts);
fiq->forget_list_tail = &fiq->forget_list_head;
fiq->connected = 1;
+ fiq->ops = ops;
+ fiq->priv = priv;
}
static void fuse_pqueue_init(struct fuse_pqueue *fpq)
@@ -600,7 +597,8 @@ static void fuse_pqueue_init(struct fuse_pqueue *fpq)
fpq->connected = 1;
}
-void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns)
+void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns,
+ const struct fuse_iqueue_ops *fiq_ops, void *fiq_priv)
{
memset(fc, 0, sizeof(*fc));
spin_lock_init(&fc->lock);
@@ -609,8 +607,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns)
refcount_set(&fc->count, 1);
atomic_set(&fc->dev_count, 1);
init_waitqueue_head(&fc->blocked_waitq);
- init_waitqueue_head(&fc->reserved_req_waitq);
- fuse_iqueue_init(&fc->iq);
+ fuse_iqueue_init(&fc->iq, fiq_ops, fiq_priv);
INIT_LIST_HEAD(&fc->bg_queue);
INIT_LIST_HEAD(&fc->entry);
INIT_LIST_HEAD(&fc->devices);
@@ -633,8 +630,6 @@ EXPORT_SYMBOL_GPL(fuse_conn_init);
void fuse_conn_put(struct fuse_conn *fc)
{
if (refcount_dec_and_test(&fc->count)) {
- if (fc->destroy_req)
- fuse_request_free(fc->destroy_req);
put_pid_ns(fc->pid_ns);
put_user_ns(fc->user_ns);
fc->release(fc);
@@ -822,9 +817,12 @@ static const struct super_operations fuse_super_operations = {
static void sanitize_global_limit(unsigned *limit)
{
+ /*
+ * The default maximum number of async requests is calculated to consume
+ * 1/2^13 of the total memory, assuming 392 bytes per request.
+ */
if (*limit == 0)
- *limit = ((totalram_pages() << PAGE_SHIFT) >> 13) /
- sizeof(struct fuse_req);
+ *limit = ((totalram_pages() << PAGE_SHIFT) >> 13) / 392;
if (*limit >= 1 << 16)
*limit = (1 << 16) - 1;
@@ -870,11 +868,19 @@ static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
spin_unlock(&fc->bg_lock);
}
-static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
+struct fuse_init_args {
+ struct fuse_args args;
+ struct fuse_init_in in;
+ struct fuse_init_out out;
+};
+
+static void process_init_reply(struct fuse_conn *fc, struct fuse_args *args,
+ int error)
{
- struct fuse_init_out *arg = &req->misc.init_out;
+ struct fuse_init_args *ia = container_of(args, typeof(*ia), args);
+ struct fuse_init_out *arg = &ia->out;
- if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION)
+ if (error || arg->major != FUSE_KERNEL_VERSION)
fc->conn_error = 1;
else {
unsigned long ra_pages;
@@ -951,18 +957,23 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
fc->max_write = max_t(unsigned, 4096, fc->max_write);
fc->conn_init = 1;
}
+ kfree(ia);
+
fuse_set_initialized(fc);
wake_up_all(&fc->blocked_waitq);
}
-static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
+void fuse_send_init(struct fuse_conn *fc)
{
- struct fuse_init_in *arg = &req->misc.init_in;
+ struct fuse_init_args *ia;
+
+ ia = kzalloc(sizeof(*ia), GFP_KERNEL | __GFP_NOFAIL);
- arg->major = FUSE_KERNEL_VERSION;
- arg->minor = FUSE_KERNEL_MINOR_VERSION;
- arg->max_readahead = fc->sb->s_bdi->ra_pages * PAGE_SIZE;
- arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
+ ia->in.major = FUSE_KERNEL_VERSION;
+ ia->in.minor = FUSE_KERNEL_MINOR_VERSION;
+ ia->in.max_readahead = fc->sb->s_bdi->ra_pages * PAGE_SIZE;
+ ia->in.flags |=
+ FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
@@ -971,26 +982,32 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL |
FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS |
FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA;
- req->in.h.opcode = FUSE_INIT;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(*arg);
- req->in.args[0].value = arg;
- req->out.numargs = 1;
+ ia->args.opcode = FUSE_INIT;
+ ia->args.in_numargs = 1;
+ ia->args.in_args[0].size = sizeof(ia->in);
+ ia->args.in_args[0].value = &ia->in;
+ ia->args.out_numargs = 1;
/* Variable length argument used for backward compatibility
with interface version < 7.5. Rest of init_out is zeroed
by do_get_request(), so a short reply is not a problem */
- req->out.argvar = 1;
- req->out.args[0].size = sizeof(struct fuse_init_out);
- req->out.args[0].value = &req->misc.init_out;
- req->end = process_init_reply;
- fuse_request_send_background(fc, req);
+ ia->args.out_argvar = 1;
+ ia->args.out_args[0].size = sizeof(ia->out);
+ ia->args.out_args[0].value = &ia->out;
+ ia->args.force = true;
+ ia->args.nocreds = true;
+ ia->args.end = process_init_reply;
+
+ if (fuse_simple_background(fc, &ia->args, GFP_KERNEL) != 0)
+ process_init_reply(fc, &ia->args, -ENOTCONN);
}
+EXPORT_SYMBOL_GPL(fuse_send_init);
-static void fuse_free_conn(struct fuse_conn *fc)
+void fuse_free_conn(struct fuse_conn *fc)
{
WARN_ON(!list_empty(&fc->devices));
kfree_rcu(fc, rcu);
}
+EXPORT_SYMBOL_GPL(fuse_free_conn);
static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
{
@@ -1032,7 +1049,7 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
return 0;
}
-struct fuse_dev *fuse_dev_alloc(struct fuse_conn *fc)
+struct fuse_dev *fuse_dev_alloc(void)
{
struct fuse_dev *fud;
struct list_head *pq;
@@ -1048,16 +1065,33 @@ struct fuse_dev *fuse_dev_alloc(struct fuse_conn *fc)
}
fud->pq.processing = pq;
- fud->fc = fuse_conn_get(fc);
fuse_pqueue_init(&fud->pq);
+ return fud;
+}
+EXPORT_SYMBOL_GPL(fuse_dev_alloc);
+
+void fuse_dev_install(struct fuse_dev *fud, struct fuse_conn *fc)
+{
+ fud->fc = fuse_conn_get(fc);
spin_lock(&fc->lock);
list_add_tail(&fud->entry, &fc->devices);
spin_unlock(&fc->lock);
+}
+EXPORT_SYMBOL_GPL(fuse_dev_install);
+struct fuse_dev *fuse_dev_alloc_install(struct fuse_conn *fc)
+{
+ struct fuse_dev *fud;
+
+ fud = fuse_dev_alloc();
+ if (!fud)
+ return NULL;
+
+ fuse_dev_install(fud, fc);
return fud;
}
-EXPORT_SYMBOL_GPL(fuse_dev_alloc);
+EXPORT_SYMBOL_GPL(fuse_dev_alloc_install);
void fuse_dev_free(struct fuse_dev *fud)
{
@@ -1075,17 +1109,13 @@ void fuse_dev_free(struct fuse_dev *fud)
}
EXPORT_SYMBOL_GPL(fuse_dev_free);
-static int fuse_fill_super(struct super_block *sb, void *data, int silent)
+int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
{
struct fuse_dev *fud;
- struct fuse_conn *fc;
+ struct fuse_conn *fc = get_fuse_conn_super(sb);
struct inode *root;
- struct fuse_mount_data d;
- struct file *file;
struct dentry *root_dentry;
- struct fuse_req *init_req;
int err;
- int is_bdev = sb->s_bdev != NULL;
err = -EINVAL;
if (sb->s_flags & SB_MANDLOCK)
@@ -1093,19 +1123,19 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION);
- if (!parse_fuse_opt(data, &d, is_bdev, sb->s_user_ns))
- goto err;
-
- if (is_bdev) {
+ if (ctx->is_bdev) {
#ifdef CONFIG_BLOCK
err = -EINVAL;
- if (!sb_set_blocksize(sb, d.blksize))
+ if (!sb_set_blocksize(sb, ctx->blksize))
goto err;
#endif
} else {
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
}
+
+ sb->s_subtype = ctx->subtype;
+ ctx->subtype = NULL;
sb->s_magic = FUSE_SUPER_MAGIC;
sb->s_op = &fuse_super_operations;
sb->s_xattr = fuse_xattr_handlers;
@@ -1116,19 +1146,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
if (sb->s_user_ns != &init_user_ns)
sb->s_iflags |= SB_I_UNTRUSTED_MOUNTER;
- file = fget(d.fd);
- err = -EINVAL;
- if (!file)
- goto err;
-
- /*
- * Require mount to happen from the same user namespace which
- * opened /dev/fuse to prevent potential attacks.
- */
- if (file->f_op != &fuse_dev_operations ||
- file->f_cred->user_ns != sb->s_user_ns)
- goto err_fput;
-
/*
* If we are not in the initial user namespace posix
* acls must be translated.
@@ -1136,17 +1153,9 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
if (sb->s_user_ns != &init_user_ns)
sb->s_xattr = fuse_no_acl_xattr_handlers;
- fc = kmalloc(sizeof(*fc), GFP_KERNEL);
- err = -ENOMEM;
- if (!fc)
- goto err_fput;
-
- fuse_conn_init(fc, sb->s_user_ns);
- fc->release = fuse_free_conn;
-
- fud = fuse_dev_alloc(fc);
+ fud = fuse_dev_alloc_install(fc);
if (!fud)
- goto err_put_conn;
+ goto err;
fc->dev = sb->s_dev;
fc->sb = sb;
@@ -1159,17 +1168,17 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
fc->dont_mask = 1;
sb->s_flags |= SB_POSIXACL;
- fc->default_permissions = d.default_permissions;
- fc->allow_other = d.allow_other;
- fc->user_id = d.user_id;
- fc->group_id = d.group_id;
- fc->max_read = max_t(unsigned, 4096, d.max_read);
-
- /* Used by get_root_inode() */
- sb->s_fs_info = fc;
+ fc->default_permissions = ctx->default_permissions;
+ fc->allow_other = ctx->allow_other;
+ fc->user_id = ctx->user_id;
+ fc->group_id = ctx->group_id;
+ fc->max_read = max_t(unsigned, 4096, ctx->max_read);
+ fc->destroy = ctx->destroy;
+ fc->no_control = ctx->no_control;
+ fc->no_force_umount = ctx->no_force_umount;
err = -ENOMEM;
- root = fuse_get_root_inode(sb, d.rootmode);
+ root = fuse_get_root_inode(sb, ctx->rootmode);
sb->s_d_op = &fuse_root_dentry_operations;
root_dentry = d_make_root(root);
if (!root_dentry)
@@ -1177,20 +1186,9 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
/* Root dentry doesn't have .d_revalidate */
sb->s_d_op = &fuse_dentry_operations;
- init_req = fuse_request_alloc(0);
- if (!init_req)
- goto err_put_root;
- __set_bit(FR_BACKGROUND, &init_req->flags);
-
- if (is_bdev) {
- fc->destroy_req = fuse_request_alloc(0);
- if (!fc->destroy_req)
- goto err_free_init_req;
- }
-
mutex_lock(&fuse_mutex);
err = -EINVAL;
- if (file->private_data)
+ if (*ctx->fudptr)
goto err_unlock;
err = fuse_ctl_add_conn(fc);
@@ -1199,27 +1197,62 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
list_add_tail(&fc->entry, &fuse_conn_list);
sb->s_root = root_dentry;
- file->private_data = fud;
+ *ctx->fudptr = fud;
mutex_unlock(&fuse_mutex);
+ return 0;
+
+ err_unlock:
+ mutex_unlock(&fuse_mutex);
+ dput(root_dentry);
+ err_dev_free:
+ fuse_dev_free(fud);
+ err:
+ return err;
+}
+EXPORT_SYMBOL_GPL(fuse_fill_super_common);
+
+static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc)
+{
+ struct fuse_fs_context *ctx = fsc->fs_private;
+ struct file *file;
+ int err;
+ struct fuse_conn *fc;
+
+ err = -EINVAL;
+ file = fget(ctx->fd);
+ if (!file)
+ goto err;
+
+ /*
+ * Require mount to happen from the same user namespace which
+ * opened /dev/fuse to prevent potential attacks.
+ */
+ if ((file->f_op != &fuse_dev_operations) ||
+ (file->f_cred->user_ns != sb->s_user_ns))
+ goto err_fput;
+ ctx->fudptr = &file->private_data;
+
+ fc = kmalloc(sizeof(*fc), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!fc)
+ goto err_fput;
+
+ fuse_conn_init(fc, sb->s_user_ns, &fuse_dev_fiq_ops, NULL);
+ fc->release = fuse_free_conn;
+ sb->s_fs_info = fc;
+
+ err = fuse_fill_super_common(sb, ctx);
+ if (err)
+ goto err_put_conn;
/*
* atomic_dec_and_test() in fput() provides the necessary
* memory barrier for file->private_data to be visible on all
* CPUs after this
*/
fput(file);
-
- fuse_send_init(fc, init_req);
-
+ fuse_send_init(get_fuse_conn_super(sb));
return 0;
- err_unlock:
- mutex_unlock(&fuse_mutex);
- err_free_init_req:
- fuse_request_free(init_req);
- err_put_root:
- dput(root_dentry);
- err_dev_free:
- fuse_dev_free(fud);
err_put_conn:
fuse_conn_put(fc);
sb->s_fs_info = NULL;
@@ -1229,11 +1262,52 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
return err;
}
-static struct dentry *fuse_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *raw_data)
+static int fuse_get_tree(struct fs_context *fc)
+{
+ struct fuse_fs_context *ctx = fc->fs_private;
+
+ if (!ctx->fd_present || !ctx->rootmode_present ||
+ !ctx->user_id_present || !ctx->group_id_present)
+ return -EINVAL;
+
+#ifdef CONFIG_BLOCK
+ if (ctx->is_bdev)
+ return get_tree_bdev(fc, fuse_fill_super);
+#endif
+
+ return get_tree_nodev(fc, fuse_fill_super);
+}
+
+static const struct fs_context_operations fuse_context_ops = {
+ .free = fuse_free_fc,
+ .parse_param = fuse_parse_param,
+ .get_tree = fuse_get_tree,
+};
+
+/*
+ * Set up the filesystem mount context.
+ */
+static int fuse_init_fs_context(struct fs_context *fc)
{
- return mount_nodev(fs_type, flags, raw_data, fuse_fill_super);
+ struct fuse_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->max_read = ~0;
+ ctx->blksize = FUSE_DEFAULT_BLKSIZE;
+
+#ifdef CONFIG_BLOCK
+ if (fc->fs_type == &fuseblk_fs_type) {
+ ctx->is_bdev = true;
+ ctx->destroy = true;
+ }
+#endif
+
+ fc->fs_private = ctx;
+ fc->ops = &fuse_context_ops;
+ return 0;
}
static void fuse_sb_destroy(struct super_block *sb)
@@ -1241,7 +1315,8 @@ static void fuse_sb_destroy(struct super_block *sb)
struct fuse_conn *fc = get_fuse_conn_super(sb);
if (fc) {
- fuse_send_destroy(fc);
+ if (fc->destroy)
+ fuse_send_destroy(fc);
fuse_abort_conn(fc);
fuse_wait_aborted(fc);
@@ -1252,29 +1327,24 @@ static void fuse_sb_destroy(struct super_block *sb)
}
}
-static void fuse_kill_sb_anon(struct super_block *sb)
+void fuse_kill_sb_anon(struct super_block *sb)
{
fuse_sb_destroy(sb);
kill_anon_super(sb);
}
+EXPORT_SYMBOL_GPL(fuse_kill_sb_anon);
static struct file_system_type fuse_fs_type = {
.owner = THIS_MODULE,
.name = "fuse",
.fs_flags = FS_HAS_SUBTYPE | FS_USERNS_MOUNT,
- .mount = fuse_mount,
+ .init_fs_context = fuse_init_fs_context,
+ .parameters = &fuse_fs_parameters,
.kill_sb = fuse_kill_sb_anon,
};
MODULE_ALIAS_FS("fuse");
#ifdef CONFIG_BLOCK
-static struct dentry *fuse_mount_blk(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *raw_data)
-{
- return mount_bdev(fs_type, flags, dev_name, raw_data, fuse_fill_super);
-}
-
static void fuse_kill_sb_blk(struct super_block *sb)
{
fuse_sb_destroy(sb);
@@ -1284,7 +1354,8 @@ static void fuse_kill_sb_blk(struct super_block *sb)
static struct file_system_type fuseblk_fs_type = {
.owner = THIS_MODULE,
.name = "fuseblk",
- .mount = fuse_mount_blk,
+ .init_fs_context = fuse_init_fs_context,
+ .parameters = &fuse_fs_parameters,
.kill_sb = fuse_kill_sb_blk,
.fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
};
diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
index 574d03f8a573..5c38b9d84c6e 100644
--- a/fs/fuse/readdir.c
+++ b/fs/fuse/readdir.c
@@ -249,6 +249,27 @@ retry:
return 0;
}
+static void fuse_force_forget(struct file *file, u64 nodeid)
+{
+ struct inode *inode = file_inode(file);
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_forget_in inarg;
+ FUSE_ARGS(args);
+
+ memset(&inarg, 0, sizeof(inarg));
+ inarg.nlookup = 1;
+ args.opcode = FUSE_FORGET;
+ args.nodeid = nodeid;
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.force = true;
+ args.noreply = true;
+
+ fuse_simple_request(fc, &args);
+ /* ignore errors */
+}
+
static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
struct dir_context *ctx, u64 attr_version)
{
@@ -295,62 +316,55 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
static int fuse_readdir_uncached(struct file *file, struct dir_context *ctx)
{
- int plus, err;
- size_t nbytes;
+ int plus;
+ ssize_t res;
struct page *page;
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
+ struct fuse_io_args ia = {};
+ struct fuse_args_pages *ap = &ia.ap;
+ struct fuse_page_desc desc = { .length = PAGE_SIZE };
u64 attr_version = 0;
bool locked;
- req = fuse_get_req(fc, 1);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
page = alloc_page(GFP_KERNEL);
- if (!page) {
- fuse_put_request(fc, req);
+ if (!page)
return -ENOMEM;
- }
plus = fuse_use_readdirplus(inode, ctx);
- req->out.argpages = 1;
- req->num_pages = 1;
- req->pages[0] = page;
- req->page_descs[0].length = PAGE_SIZE;
+ ap->args.out_pages = 1;
+ ap->num_pages = 1;
+ ap->pages = &page;
+ ap->descs = &desc;
if (plus) {
attr_version = fuse_get_attr_version(fc);
- fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
- FUSE_READDIRPLUS);
+ fuse_read_args_fill(&ia, file, ctx->pos, PAGE_SIZE,
+ FUSE_READDIRPLUS);
} else {
- fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
- FUSE_READDIR);
+ fuse_read_args_fill(&ia, file, ctx->pos, PAGE_SIZE,
+ FUSE_READDIR);
}
locked = fuse_lock_inode(inode);
- fuse_request_send(fc, req);
+ res = fuse_simple_request(fc, &ap->args);
fuse_unlock_inode(inode, locked);
- nbytes = req->out.args[0].size;
- err = req->out.h.error;
- fuse_put_request(fc, req);
- if (!err) {
- if (!nbytes) {
+ if (res >= 0) {
+ if (!res) {
struct fuse_file *ff = file->private_data;
if (ff->open_flags & FOPEN_CACHE_DIR)
fuse_readdir_cache_end(file, ctx->pos);
} else if (plus) {
- err = parse_dirplusfile(page_address(page), nbytes,
+ res = parse_dirplusfile(page_address(page), res,
file, ctx, attr_version);
} else {
- err = parse_dirfile(page_address(page), nbytes, file,
+ res = parse_dirfile(page_address(page), res, file,
ctx);
}
}
__free_page(page);
fuse_invalidate_atime(inode);
- return err;
+ return res;
}
enum fuse_parse_result {
@@ -372,11 +386,13 @@ static enum fuse_parse_result fuse_parse_cache(struct fuse_file *ff,
for (;;) {
struct fuse_dirent *dirent = addr + offset;
unsigned int nbytes = size - offset;
- size_t reclen = FUSE_DIRENT_SIZE(dirent);
+ size_t reclen;
if (nbytes < FUSE_NAME_OFFSET || !dirent->namelen)
break;
+ reclen = FUSE_DIRENT_SIZE(dirent); /* derefs ->namelen */
+
if (WARN_ON(dirent->namelen > FUSE_NAME_MAX))
return FOUND_ERR;
if (WARN_ON(reclen > nbytes))
diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c
index 433717640f78..20d052e08b3b 100644
--- a/fs/fuse/xattr.c
+++ b/fs/fuse/xattr.c
@@ -25,15 +25,15 @@ int fuse_setxattr(struct inode *inode, const char *name, const void *value,
memset(&inarg, 0, sizeof(inarg));
inarg.size = size;
inarg.flags = flags;
- args.in.h.opcode = FUSE_SETXATTR;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 3;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.in.args[1].size = strlen(name) + 1;
- args.in.args[1].value = name;
- args.in.args[2].size = size;
- args.in.args[2].value = value;
+ args.opcode = FUSE_SETXATTR;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 3;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.in_args[1].size = strlen(name) + 1;
+ args.in_args[1].value = name;
+ args.in_args[2].size = size;
+ args.in_args[2].value = value;
err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_setxattr = 1;
@@ -60,22 +60,22 @@ ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value,
memset(&inarg, 0, sizeof(inarg));
inarg.size = size;
- args.in.h.opcode = FUSE_GETXATTR;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 2;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- args.in.args[1].size = strlen(name) + 1;
- args.in.args[1].value = name;
+ args.opcode = FUSE_GETXATTR;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 2;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.in_args[1].size = strlen(name) + 1;
+ args.in_args[1].value = name;
/* This is really two different operations rolled into one */
- args.out.numargs = 1;
+ args.out_numargs = 1;
if (size) {
- args.out.argvar = 1;
- args.out.args[0].size = size;
- args.out.args[0].value = value;
+ args.out_argvar = true;
+ args.out_args[0].size = size;
+ args.out_args[0].value = value;
} else {
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
}
ret = fuse_simple_request(fc, &args);
if (!ret && !size)
@@ -121,20 +121,20 @@ ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
memset(&inarg, 0, sizeof(inarg));
inarg.size = size;
- args.in.h.opcode = FUSE_LISTXATTR;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
+ args.opcode = FUSE_LISTXATTR;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
/* This is really two different operations rolled into one */
- args.out.numargs = 1;
+ args.out_numargs = 1;
if (size) {
- args.out.argvar = 1;
- args.out.args[0].size = size;
- args.out.args[0].value = list;
+ args.out_argvar = true;
+ args.out_args[0].size = size;
+ args.out_args[0].value = list;
} else {
- args.out.args[0].size = sizeof(outarg);
- args.out.args[0].value = &outarg;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
}
ret = fuse_simple_request(fc, &args);
if (!ret && !size)
@@ -157,11 +157,11 @@ int fuse_removexattr(struct inode *inode, const char *name)
if (fc->no_removexattr)
return -EOPNOTSUPP;
- args.in.h.opcode = FUSE_REMOVEXATTR;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 1;
- args.in.args[0].size = strlen(name) + 1;
- args.in.args[0].value = name;
+ args.opcode = FUSE_REMOVEXATTR;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 1;
+ args.in_args[0].size = strlen(name) + 1;
+ args.in_args[0].value = name;
err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_removexattr = 1;
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 6b450065b9d5..5f89c515f5bb 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -584,10 +584,10 @@ struct gfs2_args {
unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */
unsigned int ar_loccookie:1; /* use location based readdir
cookies */
- int ar_commit; /* Commit interval */
- int ar_statfs_quantum; /* The fast statfs interval */
- int ar_quota_quantum; /* The quota interval */
- int ar_statfs_percent; /* The % change to force sync */
+ s32 ar_commit; /* Commit interval */
+ s32 ar_statfs_quantum; /* The fast statfs interval */
+ s32 ar_quota_quantum; /* The quota interval */
+ s32 ar_statfs_percent; /* The % change to force sync */
};
struct gfs2_tune {
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index f3fd5cd9d43f..681b44682b0d 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -21,6 +21,7 @@
#include <linux/lockdep.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
+#include <linux/fs_parser.h>
#include "gfs2.h"
#include "incore.h"
@@ -1031,16 +1032,17 @@ void gfs2_online_uevent(struct gfs2_sbd *sdp)
}
/**
- * fill_super - Read in superblock
+ * gfs2_fill_super - Read in superblock
* @sb: The VFS superblock
- * @data: Mount options
+ * @args: Mount options
* @silent: Don't complain if it's not a GFS2 filesystem
*
- * Returns: errno
+ * Returns: -errno
*/
-
-static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent)
+static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
{
+ struct gfs2_args *args = fc->fs_private;
+ int silent = fc->sb_flags & SB_SILENT;
struct gfs2_sbd *sdp;
struct gfs2_holder mount_gh;
int error;
@@ -1205,161 +1207,411 @@ fail_debug:
return error;
}
-static int set_gfs2_super(struct super_block *s, void *data)
+/**
+ * gfs2_get_tree - Get the GFS2 superblock and root directory
+ * @fc: The filesystem context
+ *
+ * Returns: 0 or -errno on error
+ */
+static int gfs2_get_tree(struct fs_context *fc)
{
- s->s_bdev = data;
- s->s_dev = s->s_bdev->bd_dev;
- s->s_bdi = bdi_get(s->s_bdev->bd_bdi);
+ struct gfs2_args *args = fc->fs_private;
+ struct gfs2_sbd *sdp;
+ int error;
+
+ error = get_tree_bdev(fc, gfs2_fill_super);
+ if (error)
+ return error;
+
+ sdp = fc->root->d_sb->s_fs_info;
+ dput(fc->root);
+ if (args->ar_meta)
+ fc->root = dget(sdp->sd_master_dir);
+ else
+ fc->root = dget(sdp->sd_root_dir);
return 0;
}
-static int test_gfs2_super(struct super_block *s, void *ptr)
+static void gfs2_fc_free(struct fs_context *fc)
{
- struct block_device *bdev = ptr;
- return (bdev == s->s_bdev);
+ struct gfs2_args *args = fc->fs_private;
+
+ kfree(args);
}
-/**
- * gfs2_mount - Get the GFS2 superblock
- * @fs_type: The GFS2 filesystem type
- * @flags: Mount flags
- * @dev_name: The name of the device
- * @data: The mount arguments
- *
- * Q. Why not use get_sb_bdev() ?
- * A. We need to select one of two root directories to mount, independent
- * of whether this is the initial, or subsequent, mount of this sb
- *
- * Returns: 0 or -ve on error
- */
+enum gfs2_param {
+ Opt_lockproto,
+ Opt_locktable,
+ Opt_hostdata,
+ Opt_spectator,
+ Opt_ignore_local_fs,
+ Opt_localflocks,
+ Opt_localcaching,
+ Opt_debug,
+ Opt_upgrade,
+ Opt_acl,
+ Opt_quota,
+ Opt_suiddir,
+ Opt_data,
+ Opt_meta,
+ Opt_discard,
+ Opt_commit,
+ Opt_errors,
+ Opt_statfs_quantum,
+ Opt_statfs_percent,
+ Opt_quota_quantum,
+ Opt_barrier,
+ Opt_rgrplvb,
+ Opt_loccookie,
+};
-static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
-{
- struct block_device *bdev;
- struct super_block *s;
- fmode_t mode = FMODE_READ | FMODE_EXCL;
- int error;
- struct gfs2_args args;
- struct gfs2_sbd *sdp;
+enum opt_quota {
+ Opt_quota_unset = 0,
+ Opt_quota_off,
+ Opt_quota_account,
+ Opt_quota_on,
+};
+
+static const unsigned int opt_quota_values[] = {
+ [Opt_quota_off] = GFS2_QUOTA_OFF,
+ [Opt_quota_account] = GFS2_QUOTA_ACCOUNT,
+ [Opt_quota_on] = GFS2_QUOTA_ON,
+};
- if (!(flags & SB_RDONLY))
- mode |= FMODE_WRITE;
+enum opt_data {
+ Opt_data_writeback = GFS2_DATA_WRITEBACK,
+ Opt_data_ordered = GFS2_DATA_ORDERED,
+};
- bdev = blkdev_get_by_path(dev_name, mode, fs_type);
- if (IS_ERR(bdev))
- return ERR_CAST(bdev);
+enum opt_errors {
+ Opt_errors_withdraw = GFS2_ERRORS_WITHDRAW,
+ Opt_errors_panic = GFS2_ERRORS_PANIC,
+};
- /*
- * once the super is inserted into the list by sget, s_umount
- * will protect the lockfs code from trying to start a snapshot
- * while we are mounting
- */
- mutex_lock(&bdev->bd_fsfreeze_mutex);
- if (bdev->bd_fsfreeze_count > 0) {
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- error = -EBUSY;
- goto error_bdev;
- }
- s = sget(fs_type, test_gfs2_super, set_gfs2_super, flags, bdev);
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- error = PTR_ERR(s);
- if (IS_ERR(s))
- goto error_bdev;
-
- if (s->s_root) {
- /*
- * s_umount nests inside bd_mutex during
- * __invalidate_device(). blkdev_put() acquires
- * bd_mutex and can't be called under s_umount. Drop
- * s_umount temporarily. This is safe as we're
- * holding an active reference.
- */
- up_write(&s->s_umount);
- blkdev_put(bdev, mode);
- down_write(&s->s_umount);
- } else {
- /* s_mode must be set before deactivate_locked_super calls */
- s->s_mode = mode;
- }
+static const struct fs_parameter_spec gfs2_param_specs[] = {
+ fsparam_string ("lockproto", Opt_lockproto),
+ fsparam_string ("locktable", Opt_locktable),
+ fsparam_string ("hostdata", Opt_hostdata),
+ fsparam_flag ("spectator", Opt_spectator),
+ fsparam_flag ("norecovery", Opt_spectator),
+ fsparam_flag ("ignore_local_fs", Opt_ignore_local_fs),
+ fsparam_flag ("localflocks", Opt_localflocks),
+ fsparam_flag ("localcaching", Opt_localcaching),
+ fsparam_flag_no("debug", Opt_debug),
+ fsparam_flag ("upgrade", Opt_upgrade),
+ fsparam_flag_no("acl", Opt_acl),
+ fsparam_flag_no("suiddir", Opt_suiddir),
+ fsparam_enum ("data", Opt_data),
+ fsparam_flag ("meta", Opt_meta),
+ fsparam_flag_no("discard", Opt_discard),
+ fsparam_s32 ("commit", Opt_commit),
+ fsparam_enum ("errors", Opt_errors),
+ fsparam_s32 ("statfs_quantum", Opt_statfs_quantum),
+ fsparam_s32 ("statfs_percent", Opt_statfs_percent),
+ fsparam_s32 ("quota_quantum", Opt_quota_quantum),
+ fsparam_flag_no("barrier", Opt_barrier),
+ fsparam_flag_no("rgrplvb", Opt_rgrplvb),
+ fsparam_flag_no("loccookie", Opt_loccookie),
+ /* quota can be a flag or an enum so it gets special treatment */
+ __fsparam(fs_param_is_enum, "quota", Opt_quota, fs_param_neg_with_no|fs_param_v_optional),
+ {}
+};
- memset(&args, 0, sizeof(args));
- args.ar_quota = GFS2_QUOTA_DEFAULT;
- args.ar_data = GFS2_DATA_DEFAULT;
- args.ar_commit = 30;
- args.ar_statfs_quantum = 30;
- args.ar_quota_quantum = 60;
- args.ar_errors = GFS2_ERRORS_DEFAULT;
+static const struct fs_parameter_enum gfs2_param_enums[] = {
+ { Opt_quota, "off", Opt_quota_off },
+ { Opt_quota, "account", Opt_quota_account },
+ { Opt_quota, "on", Opt_quota_on },
+ { Opt_data, "writeback", Opt_data_writeback },
+ { Opt_data, "ordered", Opt_data_ordered },
+ { Opt_errors, "withdraw", Opt_errors_withdraw },
+ { Opt_errors, "panic", Opt_errors_panic },
+ {}
+};
- error = gfs2_mount_args(&args, data);
- if (error) {
- pr_warn("can't parse mount arguments\n");
- goto error_super;
+const struct fs_parameter_description gfs2_fs_parameters = {
+ .name = "gfs2",
+ .specs = gfs2_param_specs,
+ .enums = gfs2_param_enums,
+};
+
+/* Parse a single mount parameter */
+static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct gfs2_args *args = fc->fs_private;
+ struct fs_parse_result result;
+ int o;
+
+ o = fs_parse(fc, &gfs2_fs_parameters, param, &result);
+ if (o < 0)
+ return o;
+
+ switch (o) {
+ case Opt_lockproto:
+ strlcpy(args->ar_lockproto, param->string, GFS2_LOCKNAME_LEN);
+ break;
+ case Opt_locktable:
+ strlcpy(args->ar_locktable, param->string, GFS2_LOCKNAME_LEN);
+ break;
+ case Opt_hostdata:
+ strlcpy(args->ar_hostdata, param->string, GFS2_LOCKNAME_LEN);
+ break;
+ case Opt_spectator:
+ args->ar_spectator = 1;
+ break;
+ case Opt_ignore_local_fs:
+ /* Retained for backwards compat only */
+ break;
+ case Opt_localflocks:
+ args->ar_localflocks = 1;
+ break;
+ case Opt_localcaching:
+ /* Retained for backwards compat only */
+ break;
+ case Opt_debug:
+ if (result.boolean && args->ar_errors == GFS2_ERRORS_PANIC)
+ return invalf(fc, "gfs2: -o debug and -o errors=panic are mutually exclusive");
+ args->ar_debug = result.boolean;
+ break;
+ case Opt_upgrade:
+ /* Retained for backwards compat only */
+ break;
+ case Opt_acl:
+ args->ar_posix_acl = result.boolean;
+ break;
+ case Opt_quota:
+ /* The quota option can be a flag or an enum. A non-zero int_32
+ result means that we have an enum index. Otherwise we have
+ to rely on the 'negated' flag to tell us whether 'quota' or
+ 'noquota' was specified. */
+ if (result.negated)
+ args->ar_quota = GFS2_QUOTA_OFF;
+ else if (result.int_32 > 0)
+ args->ar_quota = opt_quota_values[result.int_32];
+ else
+ args->ar_quota = GFS2_QUOTA_ON;
+ break;
+ case Opt_suiddir:
+ args->ar_suiddir = result.boolean;
+ break;
+ case Opt_data:
+ /* The uint_32 result maps directly to GFS2_DATA_* */
+ args->ar_data = result.uint_32;
+ break;
+ case Opt_meta:
+ args->ar_meta = 1;
+ break;
+ case Opt_discard:
+ args->ar_discard = result.boolean;
+ break;
+ case Opt_commit:
+ if (result.int_32 <= 0)
+ return invalf(fc, "gfs2: commit mount option requires a positive numeric argument");
+ args->ar_commit = result.int_32;
+ break;
+ case Opt_statfs_quantum:
+ if (result.int_32 < 0)
+ return invalf(fc, "gfs2: statfs_quantum mount option requires a non-negative numeric argument");
+ args->ar_statfs_quantum = result.int_32;
+ break;
+ case Opt_quota_quantum:
+ if (result.int_32 <= 0)
+ return invalf(fc, "gfs2: quota_quantum mount option requires a positive numeric argument");
+ args->ar_quota_quantum = result.int_32;
+ break;
+ case Opt_statfs_percent:
+ if (result.int_32 < 0 || result.int_32 > 100)
+ return invalf(fc, "gfs2: statfs_percent mount option requires a numeric argument between 0 and 100");
+ args->ar_statfs_percent = result.int_32;
+ break;
+ case Opt_errors:
+ if (args->ar_debug && result.uint_32 == GFS2_ERRORS_PANIC)
+ return invalf(fc, "gfs2: -o debug and -o errors=panic are mutually exclusive");
+ args->ar_errors = result.uint_32;
+ break;
+ case Opt_barrier:
+ args->ar_nobarrier = result.boolean;
+ break;
+ case Opt_rgrplvb:
+ args->ar_rgrplvb = result.boolean;
+ break;
+ case Opt_loccookie:
+ args->ar_loccookie = result.boolean;
+ break;
+ default:
+ return invalf(fc, "gfs2: invalid mount option: %s", param->key);
}
+ return 0;
+}
- if (s->s_root) {
- error = -EBUSY;
- if ((flags ^ s->s_flags) & SB_RDONLY)
- goto error_super;
- } else {
- snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
- sb_set_blocksize(s, block_size(bdev));
- error = fill_super(s, &args, flags & SB_SILENT ? 1 : 0);
- if (error)
- goto error_super;
- s->s_flags |= SB_ACTIVE;
- bdev->bd_super = s;
+static int gfs2_reconfigure(struct fs_context *fc)
+{
+ struct super_block *sb = fc->root->d_sb;
+ struct gfs2_sbd *sdp = sb->s_fs_info;
+ struct gfs2_args *oldargs = &sdp->sd_args;
+ struct gfs2_args *newargs = fc->fs_private;
+ struct gfs2_tune *gt = &sdp->sd_tune;
+ int error = 0;
+
+ sync_filesystem(sb);
+
+ spin_lock(&gt->gt_spin);
+ oldargs->ar_commit = gt->gt_logd_secs;
+ oldargs->ar_quota_quantum = gt->gt_quota_quantum;
+ if (gt->gt_statfs_slow)
+ oldargs->ar_statfs_quantum = 0;
+ else
+ oldargs->ar_statfs_quantum = gt->gt_statfs_quantum;
+ spin_unlock(&gt->gt_spin);
+
+ if (strcmp(newargs->ar_lockproto, oldargs->ar_lockproto)) {
+ errorf(fc, "gfs2: reconfiguration of locking protocol not allowed");
+ return -EINVAL;
+ }
+ if (strcmp(newargs->ar_locktable, oldargs->ar_locktable)) {
+ errorf(fc, "gfs2: reconfiguration of lock table not allowed");
+ return -EINVAL;
+ }
+ if (strcmp(newargs->ar_hostdata, oldargs->ar_hostdata)) {
+ errorf(fc, "gfs2: reconfiguration of host data not allowed");
+ return -EINVAL;
+ }
+ if (newargs->ar_spectator != oldargs->ar_spectator) {
+ errorf(fc, "gfs2: reconfiguration of spectator mode not allowed");
+ return -EINVAL;
+ }
+ if (newargs->ar_localflocks != oldargs->ar_localflocks) {
+ errorf(fc, "gfs2: reconfiguration of localflocks not allowed");
+ return -EINVAL;
+ }
+ if (newargs->ar_meta != oldargs->ar_meta) {
+ errorf(fc, "gfs2: switching between gfs2 and gfs2meta not allowed");
+ return -EINVAL;
+ }
+ if (oldargs->ar_spectator)
+ fc->sb_flags |= SB_RDONLY;
+
+ if ((sb->s_flags ^ fc->sb_flags) & SB_RDONLY) {
+ if (fc->sb_flags & SB_RDONLY) {
+ error = gfs2_make_fs_ro(sdp);
+ if (error)
+ errorf(fc, "gfs2: unable to remount read-only");
+ } else {
+ error = gfs2_make_fs_rw(sdp);
+ if (error)
+ errorf(fc, "gfs2: unable to remount read-write");
+ }
}
+ sdp->sd_args = *newargs;
- sdp = s->s_fs_info;
- if (args.ar_meta)
- return dget(sdp->sd_master_dir);
+ if (sdp->sd_args.ar_posix_acl)
+ sb->s_flags |= SB_POSIXACL;
+ else
+ sb->s_flags &= ~SB_POSIXACL;
+ if (sdp->sd_args.ar_nobarrier)
+ set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
else
- return dget(sdp->sd_root_dir);
-
-error_super:
- deactivate_locked_super(s);
- return ERR_PTR(error);
-error_bdev:
- blkdev_put(bdev, mode);
- return ERR_PTR(error);
+ clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
+ spin_lock(&gt->gt_spin);
+ gt->gt_logd_secs = newargs->ar_commit;
+ gt->gt_quota_quantum = newargs->ar_quota_quantum;
+ if (newargs->ar_statfs_quantum) {
+ gt->gt_statfs_slow = 0;
+ gt->gt_statfs_quantum = newargs->ar_statfs_quantum;
+ }
+ else {
+ gt->gt_statfs_slow = 1;
+ gt->gt_statfs_quantum = 30;
+ }
+ spin_unlock(&gt->gt_spin);
+
+ gfs2_online_uevent(sdp);
+ return error;
+}
+
+static const struct fs_context_operations gfs2_context_ops = {
+ .free = gfs2_fc_free,
+ .parse_param = gfs2_parse_param,
+ .get_tree = gfs2_get_tree,
+ .reconfigure = gfs2_reconfigure,
+};
+
+/* Set up the filesystem mount context */
+static int gfs2_init_fs_context(struct fs_context *fc)
+{
+ struct gfs2_args *args;
+
+ args = kzalloc(sizeof(*args), GFP_KERNEL);
+ if (args == NULL)
+ return -ENOMEM;
+
+ args->ar_quota = GFS2_QUOTA_DEFAULT;
+ args->ar_data = GFS2_DATA_DEFAULT;
+ args->ar_commit = 30;
+ args->ar_statfs_quantum = 30;
+ args->ar_quota_quantum = 60;
+ args->ar_errors = GFS2_ERRORS_DEFAULT;
+
+ fc->fs_private = args;
+ fc->ops = &gfs2_context_ops;
+ return 0;
}
-static int set_meta_super(struct super_block *s, void *ptr)
+static int set_meta_super(struct super_block *s, struct fs_context *fc)
{
return -EINVAL;
}
-static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int test_meta_super(struct super_block *s, struct fs_context *fc)
+{
+ return (fc->sget_key == s->s_bdev);
+}
+
+static int gfs2_meta_get_tree(struct fs_context *fc)
{
struct super_block *s;
struct gfs2_sbd *sdp;
struct path path;
int error;
- if (!dev_name || !*dev_name)
- return ERR_PTR(-EINVAL);
+ if (!fc->source || !*fc->source)
+ return -EINVAL;
- error = kern_path(dev_name, LOOKUP_FOLLOW, &path);
+ error = kern_path(fc->source, LOOKUP_FOLLOW, &path);
if (error) {
pr_warn("path_lookup on %s returned error %d\n",
- dev_name, error);
- return ERR_PTR(error);
+ fc->source, error);
+ return error;
}
- s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, flags,
- path.dentry->d_sb->s_bdev);
+ fc->fs_type = &gfs2_fs_type;
+ fc->sget_key = path.dentry->d_sb->s_bdev;
+ s = sget_fc(fc, test_meta_super, set_meta_super);
path_put(&path);
if (IS_ERR(s)) {
pr_warn("gfs2 mount does not exist\n");
- return ERR_CAST(s);
+ return PTR_ERR(s);
}
- if ((flags ^ s->s_flags) & SB_RDONLY) {
+ if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
deactivate_locked_super(s);
- return ERR_PTR(-EBUSY);
+ return -EBUSY;
}
sdp = s->s_fs_info;
- return dget(sdp->sd_master_dir);
+ fc->root = dget(sdp->sd_master_dir);
+ return 0;
+}
+
+static const struct fs_context_operations gfs2_meta_context_ops = {
+ .get_tree = gfs2_meta_get_tree,
+};
+
+static int gfs2_meta_init_fs_context(struct fs_context *fc)
+{
+ int ret = gfs2_init_fs_context(fc);
+
+ if (ret)
+ return ret;
+
+ fc->ops = &gfs2_meta_context_ops;
+ return 0;
}
static void gfs2_kill_sb(struct super_block *sb)
@@ -1383,7 +1635,8 @@ static void gfs2_kill_sb(struct super_block *sb)
struct file_system_type gfs2_fs_type = {
.name = "gfs2",
.fs_flags = FS_REQUIRES_DEV,
- .mount = gfs2_mount,
+ .init_fs_context = gfs2_init_fs_context,
+ .parameters = &gfs2_fs_parameters,
.kill_sb = gfs2_kill_sb,
.owner = THIS_MODULE,
};
@@ -1392,7 +1645,7 @@ MODULE_ALIAS_FS("gfs2");
struct file_system_type gfs2meta_fs_type = {
.name = "gfs2meta",
.fs_flags = FS_REQUIRES_DEV,
- .mount = gfs2_mount_meta,
+ .init_fs_context = gfs2_meta_init_fs_context,
.owner = THIS_MODULE,
};
MODULE_ALIAS_FS("gfs2meta");
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 644c70ae09f7..5fa1eec4fb4f 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -44,258 +44,6 @@
#include "xattr.h"
#include "lops.h"
-#define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x)
-
-enum {
- Opt_lockproto,
- Opt_locktable,
- Opt_hostdata,
- Opt_spectator,
- Opt_ignore_local_fs,
- Opt_localflocks,
- Opt_localcaching,
- Opt_debug,
- Opt_nodebug,
- Opt_upgrade,
- Opt_acl,
- Opt_noacl,
- Opt_quota_off,
- Opt_quota_account,
- Opt_quota_on,
- Opt_quota,
- Opt_noquota,
- Opt_suiddir,
- Opt_nosuiddir,
- Opt_data_writeback,
- Opt_data_ordered,
- Opt_meta,
- Opt_discard,
- Opt_nodiscard,
- Opt_commit,
- Opt_err_withdraw,
- Opt_err_panic,
- Opt_statfs_quantum,
- Opt_statfs_percent,
- Opt_quota_quantum,
- Opt_barrier,
- Opt_nobarrier,
- Opt_rgrplvb,
- Opt_norgrplvb,
- Opt_loccookie,
- Opt_noloccookie,
- Opt_error,
-};
-
-static const match_table_t tokens = {
- {Opt_lockproto, "lockproto=%s"},
- {Opt_locktable, "locktable=%s"},
- {Opt_hostdata, "hostdata=%s"},
- {Opt_spectator, "spectator"},
- {Opt_spectator, "norecovery"},
- {Opt_ignore_local_fs, "ignore_local_fs"},
- {Opt_localflocks, "localflocks"},
- {Opt_localcaching, "localcaching"},
- {Opt_debug, "debug"},
- {Opt_nodebug, "nodebug"},
- {Opt_upgrade, "upgrade"},
- {Opt_acl, "acl"},
- {Opt_noacl, "noacl"},
- {Opt_quota_off, "quota=off"},
- {Opt_quota_account, "quota=account"},
- {Opt_quota_on, "quota=on"},
- {Opt_quota, "quota"},
- {Opt_noquota, "noquota"},
- {Opt_suiddir, "suiddir"},
- {Opt_nosuiddir, "nosuiddir"},
- {Opt_data_writeback, "data=writeback"},
- {Opt_data_ordered, "data=ordered"},
- {Opt_meta, "meta"},
- {Opt_discard, "discard"},
- {Opt_nodiscard, "nodiscard"},
- {Opt_commit, "commit=%d"},
- {Opt_err_withdraw, "errors=withdraw"},
- {Opt_err_panic, "errors=panic"},
- {Opt_statfs_quantum, "statfs_quantum=%d"},
- {Opt_statfs_percent, "statfs_percent=%d"},
- {Opt_quota_quantum, "quota_quantum=%d"},
- {Opt_barrier, "barrier"},
- {Opt_nobarrier, "nobarrier"},
- {Opt_rgrplvb, "rgrplvb"},
- {Opt_norgrplvb, "norgrplvb"},
- {Opt_loccookie, "loccookie"},
- {Opt_noloccookie, "noloccookie"},
- {Opt_error, NULL}
-};
-
-/**
- * gfs2_mount_args - Parse mount options
- * @args: The structure into which the parsed options will be written
- * @options: The options to parse
- *
- * Return: errno
- */
-
-int gfs2_mount_args(struct gfs2_args *args, char *options)
-{
- char *o;
- int token;
- substring_t tmp[MAX_OPT_ARGS];
- int rv;
-
- /* Split the options into tokens with the "," character and
- process them */
-
- while (1) {
- o = strsep(&options, ",");
- if (o == NULL)
- break;
- if (*o == '\0')
- continue;
-
- token = match_token(o, tokens, tmp);
- switch (token) {
- case Opt_lockproto:
- match_strlcpy(args->ar_lockproto, &tmp[0],
- GFS2_LOCKNAME_LEN);
- break;
- case Opt_locktable:
- match_strlcpy(args->ar_locktable, &tmp[0],
- GFS2_LOCKNAME_LEN);
- break;
- case Opt_hostdata:
- match_strlcpy(args->ar_hostdata, &tmp[0],
- GFS2_LOCKNAME_LEN);
- break;
- case Opt_spectator:
- args->ar_spectator = 1;
- break;
- case Opt_ignore_local_fs:
- /* Retained for backwards compat only */
- break;
- case Opt_localflocks:
- args->ar_localflocks = 1;
- break;
- case Opt_localcaching:
- /* Retained for backwards compat only */
- break;
- case Opt_debug:
- if (args->ar_errors == GFS2_ERRORS_PANIC) {
- pr_warn("-o debug and -o errors=panic are mutually exclusive\n");
- return -EINVAL;
- }
- args->ar_debug = 1;
- break;
- case Opt_nodebug:
- args->ar_debug = 0;
- break;
- case Opt_upgrade:
- /* Retained for backwards compat only */
- break;
- case Opt_acl:
- args->ar_posix_acl = 1;
- break;
- case Opt_noacl:
- args->ar_posix_acl = 0;
- break;
- case Opt_quota_off:
- case Opt_noquota:
- args->ar_quota = GFS2_QUOTA_OFF;
- break;
- case Opt_quota_account:
- args->ar_quota = GFS2_QUOTA_ACCOUNT;
- break;
- case Opt_quota_on:
- case Opt_quota:
- args->ar_quota = GFS2_QUOTA_ON;
- break;
- case Opt_suiddir:
- args->ar_suiddir = 1;
- break;
- case Opt_nosuiddir:
- args->ar_suiddir = 0;
- break;
- case Opt_data_writeback:
- args->ar_data = GFS2_DATA_WRITEBACK;
- break;
- case Opt_data_ordered:
- args->ar_data = GFS2_DATA_ORDERED;
- break;
- case Opt_meta:
- args->ar_meta = 1;
- break;
- case Opt_discard:
- args->ar_discard = 1;
- break;
- case Opt_nodiscard:
- args->ar_discard = 0;
- break;
- case Opt_commit:
- rv = match_int(&tmp[0], &args->ar_commit);
- if (rv || args->ar_commit <= 0) {
- pr_warn("commit mount option requires a positive numeric argument\n");
- return rv ? rv : -EINVAL;
- }
- break;
- case Opt_statfs_quantum:
- rv = match_int(&tmp[0], &args->ar_statfs_quantum);
- if (rv || args->ar_statfs_quantum < 0) {
- pr_warn("statfs_quantum mount option requires a non-negative numeric argument\n");
- return rv ? rv : -EINVAL;
- }
- break;
- case Opt_quota_quantum:
- rv = match_int(&tmp[0], &args->ar_quota_quantum);
- if (rv || args->ar_quota_quantum <= 0) {
- pr_warn("quota_quantum mount option requires a positive numeric argument\n");
- return rv ? rv : -EINVAL;
- }
- break;
- case Opt_statfs_percent:
- rv = match_int(&tmp[0], &args->ar_statfs_percent);
- if (rv || args->ar_statfs_percent < 0 ||
- args->ar_statfs_percent > 100) {
- pr_warn("statfs_percent mount option requires a numeric argument between 0 and 100\n");
- return rv ? rv : -EINVAL;
- }
- break;
- case Opt_err_withdraw:
- args->ar_errors = GFS2_ERRORS_WITHDRAW;
- break;
- case Opt_err_panic:
- if (args->ar_debug) {
- pr_warn("-o debug and -o errors=panic are mutually exclusive\n");
- return -EINVAL;
- }
- args->ar_errors = GFS2_ERRORS_PANIC;
- break;
- case Opt_barrier:
- args->ar_nobarrier = 0;
- break;
- case Opt_nobarrier:
- args->ar_nobarrier = 1;
- break;
- case Opt_rgrplvb:
- args->ar_rgrplvb = 1;
- break;
- case Opt_norgrplvb:
- args->ar_rgrplvb = 0;
- break;
- case Opt_loccookie:
- args->ar_loccookie = 1;
- break;
- case Opt_noloccookie:
- args->ar_loccookie = 0;
- break;
- case Opt_error:
- default:
- pr_warn("invalid mount option: %s\n", o);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
/**
* gfs2_jindex_free - Clear all the journal index information
* @sdp: The GFS2 superblock
@@ -847,7 +595,7 @@ out:
* Returns: errno
*/
-static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
+int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
{
struct gfs2_holder freeze_gh;
int error;
@@ -1227,84 +975,6 @@ static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
}
/**
- * gfs2_remount_fs - called when the FS is remounted
- * @sb: the filesystem
- * @flags: the remount flags
- * @data: extra data passed in (not used right now)
- *
- * Returns: errno
- */
-
-static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
-{
- struct gfs2_sbd *sdp = sb->s_fs_info;
- struct gfs2_args args = sdp->sd_args; /* Default to current settings */
- struct gfs2_tune *gt = &sdp->sd_tune;
- int error;
-
- sync_filesystem(sb);
-
- spin_lock(&gt->gt_spin);
- args.ar_commit = gt->gt_logd_secs;
- args.ar_quota_quantum = gt->gt_quota_quantum;
- if (gt->gt_statfs_slow)
- args.ar_statfs_quantum = 0;
- else
- args.ar_statfs_quantum = gt->gt_statfs_quantum;
- spin_unlock(&gt->gt_spin);
- error = gfs2_mount_args(&args, data);
- if (error)
- return error;
-
- /* Not allowed to change locking details */
- if (strcmp(args.ar_lockproto, sdp->sd_args.ar_lockproto) ||
- strcmp(args.ar_locktable, sdp->sd_args.ar_locktable) ||
- strcmp(args.ar_hostdata, sdp->sd_args.ar_hostdata))
- return -EINVAL;
-
- /* Some flags must not be changed */
- if (args_neq(&args, &sdp->sd_args, spectator) ||
- args_neq(&args, &sdp->sd_args, localflocks) ||
- args_neq(&args, &sdp->sd_args, meta))
- return -EINVAL;
-
- if (sdp->sd_args.ar_spectator)
- *flags |= SB_RDONLY;
-
- if ((sb->s_flags ^ *flags) & SB_RDONLY) {
- if (*flags & SB_RDONLY)
- error = gfs2_make_fs_ro(sdp);
- else
- error = gfs2_make_fs_rw(sdp);
- }
-
- sdp->sd_args = args;
- if (sdp->sd_args.ar_posix_acl)
- sb->s_flags |= SB_POSIXACL;
- else
- sb->s_flags &= ~SB_POSIXACL;
- if (sdp->sd_args.ar_nobarrier)
- set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
- else
- clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
- spin_lock(&gt->gt_spin);
- gt->gt_logd_secs = args.ar_commit;
- gt->gt_quota_quantum = args.ar_quota_quantum;
- if (args.ar_statfs_quantum) {
- gt->gt_statfs_slow = 0;
- gt->gt_statfs_quantum = args.ar_statfs_quantum;
- }
- else {
- gt->gt_statfs_slow = 1;
- gt->gt_statfs_quantum = 30;
- }
- spin_unlock(&gt->gt_spin);
-
- gfs2_online_uevent(sdp);
- return error;
-}
-
-/**
* gfs2_drop_inode - Drop an inode (test for remote unlink)
* @inode: The inode to drop
*
@@ -1748,7 +1418,6 @@ const struct super_operations gfs2_super_ops = {
.freeze_super = gfs2_freeze,
.thaw_super = gfs2_unfreeze,
.statfs = gfs2_statfs,
- .remount_fs = gfs2_remount_fs,
.drop_inode = gfs2_drop_inode,
.show_options = gfs2_show_options,
};
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 9d49eaadb9d9..b8bf811a1305 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -24,8 +24,6 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
extern void gfs2_jindex_free(struct gfs2_sbd *sdp);
-extern int gfs2_mount_args(struct gfs2_args *args, char *data);
-
extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
extern int gfs2_jdesc_check(struct gfs2_jdesc *jd);
@@ -33,6 +31,7 @@ extern int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
struct gfs2_inode **ipp);
extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
+extern int gfs2_make_fs_ro(struct gfs2_sbd *sdp);
extern void gfs2_online_uevent(struct gfs2_sbd *sdp);
extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
diff --git a/fs/inode.c b/fs/inode.c
index 64bf28cf05cd..fef457a42882 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -181,6 +181,9 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
mapping->flags = 0;
mapping->wb_err = 0;
atomic_set(&mapping->i_mmap_writable, 0);
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ atomic_set(&mapping->nr_thps, 0);
+#endif
mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
mapping->private_data = NULL;
mapping->writeback_index = 0;
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 0dadbdbead0f..aa8ac557493c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -200,6 +200,7 @@ struct io_ring_ctx {
struct io_uring_sqe *sq_sqes;
struct list_head defer_list;
+ struct list_head timeout_list;
} ____cacheline_aligned_in_smp;
/* IO offload */
@@ -216,6 +217,7 @@ struct io_ring_ctx {
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
struct eventfd_ctx *cq_ev_fd;
+ atomic_t cq_timeouts;
} ____cacheline_aligned_in_smp;
struct io_rings *rings;
@@ -283,6 +285,11 @@ struct io_poll_iocb {
struct wait_queue_entry wait;
};
+struct io_timeout {
+ struct file *file;
+ struct hrtimer timer;
+};
+
/*
* NOTE! Each of the iocb union members has the file pointer
* as the first entry in their struct definition. So you can
@@ -294,6 +301,7 @@ struct io_kiocb {
struct file *file;
struct kiocb rw;
struct io_poll_iocb poll;
+ struct io_timeout timeout;
};
struct sqe_submit submit;
@@ -313,6 +321,7 @@ struct io_kiocb {
#define REQ_F_LINK_DONE 128 /* linked sqes done */
#define REQ_F_FAIL_LINK 256 /* fail rest of links */
#define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */
+#define REQ_F_TIMEOUT 1024 /* timeout request */
u64 user_data;
u32 result;
u32 sequence;
@@ -344,6 +353,8 @@ struct io_submit_state {
};
static void io_sq_wq_submit_work(struct work_struct *work);
+static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
+ long res);
static void __io_free_req(struct io_kiocb *req);
static struct kmem_cache *req_cachep;
@@ -400,26 +411,30 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_LIST_HEAD(&ctx->poll_list);
INIT_LIST_HEAD(&ctx->cancel_list);
INIT_LIST_HEAD(&ctx->defer_list);
+ INIT_LIST_HEAD(&ctx->timeout_list);
return ctx;
}
static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
struct io_kiocb *req)
{
- if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
+ /* timeout requests always honor sequence */
+ if (!(req->flags & REQ_F_TIMEOUT) &&
+ (req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
return false;
return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped;
}
-static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
+static struct io_kiocb *__io_get_deferred_req(struct io_ring_ctx *ctx,
+ struct list_head *list)
{
struct io_kiocb *req;
- if (list_empty(&ctx->defer_list))
+ if (list_empty(list))
return NULL;
- req = list_first_entry(&ctx->defer_list, struct io_kiocb, list);
+ req = list_first_entry(list, struct io_kiocb, list);
if (!io_sequence_defer(ctx, req)) {
list_del_init(&req->list);
return req;
@@ -428,6 +443,16 @@ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
return NULL;
}
+static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
+{
+ return __io_get_deferred_req(ctx, &ctx->defer_list);
+}
+
+static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
+{
+ return __io_get_deferred_req(ctx, &ctx->timeout_list);
+}
+
static void __io_commit_cqring(struct io_ring_ctx *ctx)
{
struct io_rings *rings = ctx->rings;
@@ -446,25 +471,50 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
static inline void io_queue_async_work(struct io_ring_ctx *ctx,
struct io_kiocb *req)
{
- int rw;
+ int rw = 0;
- switch (req->submit.sqe->opcode) {
- case IORING_OP_WRITEV:
- case IORING_OP_WRITE_FIXED:
- rw = !(req->rw.ki_flags & IOCB_DIRECT);
- break;
- default:
- rw = 0;
- break;
+ if (req->submit.sqe) {
+ switch (req->submit.sqe->opcode) {
+ case IORING_OP_WRITEV:
+ case IORING_OP_WRITE_FIXED:
+ rw = !(req->rw.ki_flags & IOCB_DIRECT);
+ break;
+ }
}
queue_work(ctx->sqo_wq[rw], &req->work);
}
+static void io_kill_timeout(struct io_kiocb *req)
+{
+ int ret;
+
+ ret = hrtimer_try_to_cancel(&req->timeout.timer);
+ if (ret != -1) {
+ atomic_inc(&req->ctx->cq_timeouts);
+ list_del(&req->list);
+ io_cqring_fill_event(req->ctx, req->user_data, 0);
+ __io_free_req(req);
+ }
+}
+
+static void io_kill_timeouts(struct io_ring_ctx *ctx)
+{
+ struct io_kiocb *req, *tmp;
+
+ spin_lock_irq(&ctx->completion_lock);
+ list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
+ io_kill_timeout(req);
+ spin_unlock_irq(&ctx->completion_lock);
+}
+
static void io_commit_cqring(struct io_ring_ctx *ctx)
{
struct io_kiocb *req;
+ while ((req = io_get_timeout_req(ctx)) != NULL)
+ io_kill_timeout(req);
+
__io_commit_cqring(ctx);
while ((req = io_get_deferred_req(ctx)) != NULL) {
@@ -1248,6 +1298,51 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
}
}
+/*
+ * For files that don't have ->read_iter() and ->write_iter(), handle them
+ * by looping over ->read() or ->write() manually.
+ */
+static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
+ struct iov_iter *iter)
+{
+ ssize_t ret = 0;
+
+ /*
+ * Don't support polled IO through this interface, and we can't
+ * support non-blocking either. For the latter, this just causes
+ * the kiocb to be handled from an async context.
+ */
+ if (kiocb->ki_flags & IOCB_HIPRI)
+ return -EOPNOTSUPP;
+ if (kiocb->ki_flags & IOCB_NOWAIT)
+ return -EAGAIN;
+
+ while (iov_iter_count(iter)) {
+ struct iovec iovec = iov_iter_iovec(iter);
+ ssize_t nr;
+
+ if (rw == READ) {
+ nr = file->f_op->read(file, iovec.iov_base,
+ iovec.iov_len, &kiocb->ki_pos);
+ } else {
+ nr = file->f_op->write(file, iovec.iov_base,
+ iovec.iov_len, &kiocb->ki_pos);
+ }
+
+ if (nr < 0) {
+ if (!ret)
+ ret = nr;
+ break;
+ }
+ ret += nr;
+ if (nr != iovec.iov_len)
+ break;
+ iov_iter_advance(iter, nr);
+ }
+
+ return ret;
+}
+
static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
bool force_nonblock)
{
@@ -1265,8 +1360,6 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
if (unlikely(!(file->f_mode & FMODE_READ)))
return -EBADF;
- if (unlikely(!file->f_op->read_iter))
- return -EINVAL;
ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
if (ret < 0)
@@ -1281,7 +1374,11 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
if (!ret) {
ssize_t ret2;
- ret2 = call_read_iter(file, kiocb, &iter);
+ if (file->f_op->read_iter)
+ ret2 = call_read_iter(file, kiocb, &iter);
+ else
+ ret2 = loop_rw_iter(READ, file, kiocb, &iter);
+
/*
* In case of a short read, punt to async. This can happen
* if we have data partially cached. Alternatively we can
@@ -1326,8 +1423,6 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
file = kiocb->ki_filp;
if (unlikely(!(file->f_mode & FMODE_WRITE)))
return -EBADF;
- if (unlikely(!file->f_op->write_iter))
- return -EINVAL;
ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
if (ret < 0)
@@ -1365,7 +1460,10 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
}
kiocb->ki_flags |= IOCB_WRITE;
- ret2 = call_write_iter(file, kiocb, &iter);
+ if (file->f_op->write_iter)
+ ret2 = call_write_iter(file, kiocb, &iter);
+ else
+ ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
if (!force_nonblock || ret2 != -EAGAIN) {
io_rw_done(kiocb, ret2);
} else {
@@ -1714,6 +1812,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (!poll->file)
return -EBADF;
+ req->submit.sqe = NULL;
INIT_WORK(&req->work, io_poll_complete_work);
events = READ_ONCE(sqe->poll_events);
poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
@@ -1765,6 +1864,81 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return ipt.error;
}
+static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
+{
+ struct io_ring_ctx *ctx;
+ struct io_kiocb *req;
+ unsigned long flags;
+
+ req = container_of(timer, struct io_kiocb, timeout.timer);
+ ctx = req->ctx;
+ atomic_inc(&ctx->cq_timeouts);
+
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+ list_del(&req->list);
+
+ io_cqring_fill_event(ctx, req->user_data, -ETIME);
+ io_commit_cqring(ctx);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+
+ io_cqring_ev_posted(ctx);
+
+ io_put_req(req);
+ return HRTIMER_NORESTART;
+}
+
+static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ unsigned count, req_dist, tail_index;
+ struct io_ring_ctx *ctx = req->ctx;
+ struct list_head *entry;
+ struct timespec ts;
+
+ if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+ if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags ||
+ sqe->len != 1)
+ return -EINVAL;
+ if (copy_from_user(&ts, (void __user *) (unsigned long) sqe->addr,
+ sizeof(ts)))
+ return -EFAULT;
+
+ /*
+ * sqe->off holds how many events that need to occur for this
+ * timeout event to be satisfied.
+ */
+ count = READ_ONCE(sqe->off);
+ if (!count)
+ count = 1;
+
+ req->sequence = ctx->cached_sq_head + count - 1;
+ req->flags |= REQ_F_TIMEOUT;
+
+ /*
+ * Insertion sort, ensuring the first entry in the list is always
+ * the one we need first.
+ */
+ tail_index = ctx->cached_cq_tail - ctx->rings->sq_dropped;
+ req_dist = req->sequence - tail_index;
+ spin_lock_irq(&ctx->completion_lock);
+ list_for_each_prev(entry, &ctx->timeout_list) {
+ struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
+ unsigned dist;
+
+ dist = nxt->sequence - tail_index;
+ if (req_dist >= dist)
+ break;
+ }
+ list_add(&req->list, entry);
+ spin_unlock_irq(&ctx->completion_lock);
+
+ hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ req->timeout.timer.function = io_timeout_fn;
+ hrtimer_start(&req->timeout.timer, timespec_to_ktime(ts),
+ HRTIMER_MODE_REL);
+ return 0;
+}
+
static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
@@ -1842,6 +2016,9 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
case IORING_OP_RECVMSG:
ret = io_recvmsg(req, s->sqe, force_nonblock);
break;
+ case IORING_OP_TIMEOUT:
+ ret = io_timeout(req, s->sqe);
+ break;
default:
ret = -EINVAL;
break;
@@ -2098,13 +2275,11 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
struct io_uring_sqe *sqe_copy;
- sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
+ sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
if (sqe_copy) {
struct async_list *list;
- memcpy(sqe_copy, s->sqe, sizeof(*sqe_copy));
s->sqe = sqe_copy;
-
memcpy(&req->submit, s, sizeof(*s));
list = io_async_list_from_sqe(ctx, s->sqe);
if (!io_add_to_prev_work(list, req)) {
@@ -2359,18 +2534,22 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
io_queue_link_head(ctx, link, &link->submit, shadow_req,
true);
link = NULL;
+ shadow_req = NULL;
}
prev_was_link = (sqes[i].sqe->flags & IOSQE_IO_LINK) != 0;
if (link && (sqes[i].sqe->flags & IOSQE_IO_DRAIN)) {
if (!shadow_req) {
shadow_req = io_get_req(ctx, NULL);
+ if (unlikely(!shadow_req))
+ goto out;
shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
refcount_dec(&shadow_req->refs);
}
shadow_req->sequence = sqes[i].sequence;
}
+out:
if (unlikely(mm_fault)) {
io_cqring_add_event(ctx, sqes[i].sqe->user_data,
-EFAULT);
@@ -2436,7 +2615,7 @@ static int io_sq_thread(void *data)
* to sleep.
*/
if (inflight || !time_after(jiffies, timeout)) {
- cpu_relax();
+ cond_resched();
continue;
}
@@ -2545,18 +2724,22 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
io_queue_link_head(ctx, link, &link->submit, shadow_req,
force_nonblock);
link = NULL;
+ shadow_req = NULL;
}
prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
if (!shadow_req) {
shadow_req = io_get_req(ctx, NULL);
+ if (unlikely(!shadow_req))
+ goto out;
shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
refcount_dec(&shadow_req->refs);
}
shadow_req->sequence = s.sequence;
}
+out:
s.has_user = true;
s.needs_lock = false;
s.needs_fixed_file = false;
@@ -2585,6 +2768,38 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
return submit;
}
+struct io_wait_queue {
+ struct wait_queue_entry wq;
+ struct io_ring_ctx *ctx;
+ unsigned to_wait;
+ unsigned nr_timeouts;
+};
+
+static inline bool io_should_wake(struct io_wait_queue *iowq)
+{
+ struct io_ring_ctx *ctx = iowq->ctx;
+
+ /*
+ * Wake up if we have enough events, or if a timeout occured since we
+ * started waiting. For timeouts, we always want to return to userspace,
+ * regardless of event count.
+ */
+ return io_cqring_events(ctx->rings) >= iowq->to_wait ||
+ atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
+}
+
+static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
+ int wake_flags, void *key)
+{
+ struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
+ wq);
+
+ if (!io_should_wake(iowq))
+ return -1;
+
+ return autoremove_wake_function(curr, mode, wake_flags, key);
+}
+
/*
* Wait until events become available, if we don't already have some. The
* application must reap them itself, as they reside on the shared cq ring.
@@ -2592,6 +2807,15 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
const sigset_t __user *sig, size_t sigsz)
{
+ struct io_wait_queue iowq = {
+ .wq = {
+ .private = current,
+ .func = io_wake_function,
+ .entry = LIST_HEAD_INIT(iowq.wq.entry),
+ },
+ .ctx = ctx,
+ .to_wait = min_events,
+ };
struct io_rings *rings = ctx->rings;
int ret;
@@ -2611,7 +2835,21 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
return ret;
}
- ret = wait_event_interruptible(ctx->wait, io_cqring_events(rings) >= min_events);
+ ret = 0;
+ iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
+ do {
+ prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
+ TASK_INTERRUPTIBLE);
+ if (io_should_wake(&iowq))
+ break;
+ schedule();
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ } while (1);
+ finish_wait(&ctx->wait, &iowq.wq);
+
restore_saved_sigmask_unless(ret == -ERESTARTSYS);
if (ret == -ERESTARTSYS)
ret = -EINTR;
@@ -3263,7 +3501,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
ctx->rings->sq_ring_entries)
mask |= EPOLLOUT | EPOLLWRNORM;
- if (READ_ONCE(ctx->rings->sq.head) != ctx->cached_cq_tail)
+ if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail)
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
@@ -3282,6 +3520,7 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
percpu_ref_kill(&ctx->refs);
mutex_unlock(&ctx->uring_lock);
+ io_kill_timeouts(ctx);
io_poll_remove_all(ctx);
io_iopoll_reap_events(ctx);
wait_for_completion(&ctx->ctx_done);
@@ -3319,7 +3558,7 @@ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
}
page = virt_to_head_page(ptr);
- if (sz > (PAGE_SIZE << compound_order(page)))
+ if (sz > page_size(page))
return -EINVAL;
pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 10517cea9682..1fc28c2da279 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -24,7 +24,7 @@
struct iomap_dio {
struct kiocb *iocb;
- iomap_dio_end_io_t *end_io;
+ const struct iomap_dio_ops *dops;
loff_t i_size;
loff_t size;
atomic_t ref;
@@ -72,18 +72,14 @@ static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap,
static ssize_t iomap_dio_complete(struct iomap_dio *dio)
{
+ const struct iomap_dio_ops *dops = dio->dops;
struct kiocb *iocb = dio->iocb;
struct inode *inode = file_inode(iocb->ki_filp);
loff_t offset = iocb->ki_pos;
- ssize_t ret;
+ ssize_t ret = dio->error;
- if (dio->end_io) {
- ret = dio->end_io(iocb,
- dio->error ? dio->error : dio->size,
- dio->flags);
- } else {
- ret = dio->error;
- }
+ if (dops && dops->end_io)
+ ret = dops->end_io(iocb, dio->size, ret, dio->flags);
if (likely(!ret)) {
ret = dio->size;
@@ -101,9 +97,9 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio)
* one is a pretty crazy thing to do, so we don't support it 100%. If
* this invalidation fails, tough, the write still worked...
*
- * And this page cache invalidation has to be after dio->end_io(), as
- * some filesystems convert unwritten extents to real allocations in
- * end_io() when necessary, otherwise a racing buffer read would cache
+ * And this page cache invalidation has to be after ->end_io(), as some
+ * filesystems convert unwritten extents to real allocations in
+ * ->end_io() when necessary, otherwise a racing buffer read would cache
* zeros from unwritten extents.
*/
if (!dio->error &&
@@ -396,7 +392,7 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
*/
ssize_t
iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
- const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
+ const struct iomap_ops *ops, const struct iomap_dio_ops *dops)
{
struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = file_inode(iocb->ki_filp);
@@ -421,7 +417,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
atomic_set(&dio->ref, 1);
dio->size = 0;
dio->i_size = i_size_read(inode);
- dio->end_io = end_io;
+ dio->dops = dops;
dio->error = 0;
dio->flags = 0;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 953990eb70a9..1c58859aa592 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -89,8 +89,6 @@ EXPORT_SYMBOL(jbd2_journal_blocks_per_page);
EXPORT_SYMBOL(jbd2_journal_invalidatepage);
EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
EXPORT_SYMBOL(jbd2_journal_force_commit);
-EXPORT_SYMBOL(jbd2_journal_inode_add_write);
-EXPORT_SYMBOL(jbd2_journal_inode_add_wait);
EXPORT_SYMBOL(jbd2_journal_inode_ranged_write);
EXPORT_SYMBOL(jbd2_journal_inode_ranged_wait);
EXPORT_SYMBOL(jbd2_journal_init_jbd_inode);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index afc06daee5bb..bee8498d7792 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -2622,18 +2622,6 @@ done:
return 0;
}
-int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *jinode)
-{
- return jbd2_journal_file_inode(handle, jinode,
- JI_WRITE_DATA | JI_WAIT_DATA, 0, LLONG_MAX);
-}
-
-int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *jinode)
-{
- return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA, 0,
- LLONG_MAX);
-}
-
int jbd2_journal_inode_ranged_write(handle_t *handle,
struct jbd2_inode *jinode, loff_t start_byte, loff_t length)
{
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index cbe70637c117..0e6406c4f362 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -163,13 +163,11 @@ static const struct export_operations jffs2_export_ops = {
* Opt_rp_size: size of reserved pool in KiB
*/
enum {
- Opt_source,
Opt_override_compr,
Opt_rp_size,
};
static const struct fs_parameter_spec jffs2_param_specs[] = {
- fsparam_string ("source", Opt_source),
fsparam_enum ("compr", Opt_override_compr),
fsparam_u32 ("rp_size", Opt_rp_size),
{}
diff --git a/fs/namespace.c b/fs/namespace.c
index 93c043245c46..fe0e9e1410fe 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2802,8 +2802,6 @@ static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
put_filesystem(type);
return -EINVAL;
}
- } else {
- subtype = "";
}
}
@@ -3028,7 +3026,7 @@ void *copy_mount_options(const void __user * data)
* the remainder of the page.
*/
/* copy_from_user cannot cross TASK_SIZE ! */
- size = TASK_SIZE - (unsigned long)data;
+ size = TASK_SIZE - (unsigned long)untagged_addr(data);
if (size > PAGE_SIZE)
size = PAGE_SIZE;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 0adfd8840110..e180033e35cf 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1669,10 +1669,8 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
#endif /* CONFIG_NFSV4 */
-/*
- * Code common to create, mkdir, and mknod.
- */
-int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
+struct dentry *
+nfs_add_or_obtain(struct dentry *dentry, struct nfs_fh *fhandle,
struct nfs_fattr *fattr,
struct nfs4_label *label)
{
@@ -1680,13 +1678,10 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
struct inode *dir = d_inode(parent);
struct inode *inode;
struct dentry *d;
- int error = -EACCES;
+ int error;
d_drop(dentry);
- /* We may have been initialized further down */
- if (d_really_is_positive(dentry))
- goto out;
if (fhandle->size == 0) {
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, NULL);
if (error)
@@ -1702,18 +1697,32 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
}
inode = nfs_fhget(dentry->d_sb, fhandle, fattr, label);
d = d_splice_alias(inode, dentry);
- if (IS_ERR(d)) {
- error = PTR_ERR(d);
- goto out_error;
- }
- dput(d);
out:
dput(parent);
- return 0;
+ return d;
out_error:
nfs_mark_for_revalidate(dir);
- dput(parent);
- return error;
+ d = ERR_PTR(error);
+ goto out;
+}
+EXPORT_SYMBOL_GPL(nfs_add_or_obtain);
+
+/*
+ * Code common to create, mkdir, and mknod.
+ */
+int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
+ struct nfs_fattr *fattr,
+ struct nfs4_label *label)
+{
+ struct dentry *d;
+
+ d = nfs_add_or_obtain(dentry, fhandle, fattr, label);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
+
+ /* Callers don't care */
+ dput(d);
+ return 0;
}
EXPORT_SYMBOL_GPL(nfs_instantiate);
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 3cb073c50fa6..c9b605f6c9cb 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -1164,6 +1164,7 @@ static struct pnfs_layoutdriver_type filelayout_type = {
.id = LAYOUT_NFSV4_1_FILES,
.name = "LAYOUT_NFSV4_1_FILES",
.owner = THIS_MODULE,
+ .flags = PNFS_LAYOUTGET_ON_OPEN,
.max_layoutget_response = 4096, /* 1 page or so... */
.alloc_layout_hdr = filelayout_alloc_layout_hdr,
.free_layout_hdr = filelayout_free_layout_hdr,
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e64f810223be..447a3c17fa8e 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -16,14 +16,6 @@ extern const struct export_operations nfs_export_ops;
struct nfs_string;
-/* Maximum number of readahead requests
- * FIXME: this should really be a sysctl so that users may tune it to suit
- * their needs. People that do NFS over a slow network, might for
- * instance want to reduce it to something closer to 1 for improved
- * interactive response.
- */
-#define NFS_MAX_READAHEAD (RPC_DEF_SLOT_TABLE - 1)
-
static inline void nfs_attr_check_mountpoint(struct super_block *parent, struct nfs_fattr *fattr)
{
if (!nfs_fsid_equal(&NFS_SB(parent)->fsid, &fattr->fsid))
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index a3ad2d46fd42..9eb2f1a503ab 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -279,15 +279,17 @@ static struct nfs3_createdata *nfs3_alloc_createdata(void)
return data;
}
-static int nfs3_do_create(struct inode *dir, struct dentry *dentry, struct nfs3_createdata *data)
+static struct dentry *
+nfs3_do_create(struct inode *dir, struct dentry *dentry, struct nfs3_createdata *data)
{
int status;
status = rpc_call_sync(NFS_CLIENT(dir), &data->msg, 0);
nfs_post_op_update_inode(dir, data->res.dir_attr);
- if (status == 0)
- status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, NULL);
- return status;
+ if (status != 0)
+ return ERR_PTR(status);
+
+ return nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr, NULL);
}
static void nfs3_free_createdata(struct nfs3_createdata *data)
@@ -304,6 +306,7 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
{
struct posix_acl *default_acl, *acl;
struct nfs3_createdata *data;
+ struct dentry *d_alias;
int status = -ENOMEM;
dprintk("NFS call create %pd\n", dentry);
@@ -330,7 +333,8 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
goto out;
for (;;) {
- status = nfs3_do_create(dir, dentry, data);
+ d_alias = nfs3_do_create(dir, dentry, data);
+ status = PTR_ERR_OR_ZERO(d_alias);
if (status != -ENOTSUPP)
break;
@@ -355,6 +359,9 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
if (status != 0)
goto out_release_acls;
+ if (d_alias)
+ dentry = d_alias;
+
/* When we created the file with exclusive semantics, make
* sure we set the attributes afterwards. */
if (data->arg.create.createmode == NFS3_CREATE_EXCLUSIVE) {
@@ -372,11 +379,13 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
nfs_post_op_update_inode(d_inode(dentry), data->res.fattr);
dprintk("NFS reply setattr (post-create): %d\n", status);
if (status != 0)
- goto out_release_acls;
+ goto out_dput;
}
status = nfs3_proc_setacls(d_inode(dentry), acl, default_acl);
+out_dput:
+ dput(d_alias);
out_release_acls:
posix_acl_release(acl);
posix_acl_release(default_acl);
@@ -504,6 +513,7 @@ nfs3_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
unsigned int len, struct iattr *sattr)
{
struct nfs3_createdata *data;
+ struct dentry *d_alias;
int status = -ENOMEM;
if (len > NFS3_MAXPATHLEN)
@@ -522,7 +532,11 @@ nfs3_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
data->arg.symlink.pathlen = len;
data->arg.symlink.sattr = sattr;
- status = nfs3_do_create(dir, dentry, data);
+ d_alias = nfs3_do_create(dir, dentry, data);
+ status = PTR_ERR_OR_ZERO(d_alias);
+
+ if (status == 0)
+ dput(d_alias);
nfs3_free_createdata(data);
out:
@@ -535,6 +549,7 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
{
struct posix_acl *default_acl, *acl;
struct nfs3_createdata *data;
+ struct dentry *d_alias;
int status = -ENOMEM;
dprintk("NFS call mkdir %pd\n", dentry);
@@ -553,12 +568,18 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
data->arg.mkdir.len = dentry->d_name.len;
data->arg.mkdir.sattr = sattr;
- status = nfs3_do_create(dir, dentry, data);
+ d_alias = nfs3_do_create(dir, dentry, data);
+ status = PTR_ERR_OR_ZERO(d_alias);
+
if (status != 0)
goto out_release_acls;
+ if (d_alias)
+ dentry = d_alias;
+
status = nfs3_proc_setacls(d_inode(dentry), acl, default_acl);
+ dput(d_alias);
out_release_acls:
posix_acl_release(acl);
posix_acl_release(default_acl);
@@ -660,6 +681,7 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
{
struct posix_acl *default_acl, *acl;
struct nfs3_createdata *data;
+ struct dentry *d_alias;
int status = -ENOMEM;
dprintk("NFS call mknod %pd %u:%u\n", dentry,
@@ -698,12 +720,17 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
goto out;
}
- status = nfs3_do_create(dir, dentry, data);
+ d_alias = nfs3_do_create(dir, dentry, data);
+ status = PTR_ERR_OR_ZERO(d_alias);
if (status != 0)
goto out_release_acls;
+ if (d_alias)
+ dentry = d_alias;
+
status = nfs3_proc_setacls(d_inode(dentry), acl, default_acl);
+ dput(d_alias);
out_release_acls:
posix_acl_release(acl);
posix_acl_release(default_acl);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 3564da1ba8a1..16b2e5cc3e94 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -491,8 +491,6 @@ extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
extern int nfs4_select_rw_stateid(struct nfs4_state *, fmode_t,
const struct nfs_lock_context *, nfs4_stateid *,
const struct cred **);
-extern bool nfs4_refresh_open_stateid(nfs4_stateid *dst,
- struct nfs4_state *state);
extern bool nfs4_copy_open_stateid(nfs4_stateid *dst,
struct nfs4_state *state);
@@ -574,6 +572,15 @@ static inline bool nfs4_stateid_is_newer(const nfs4_stateid *s1, const nfs4_stat
return (s32)(be32_to_cpu(s1->seqid) - be32_to_cpu(s2->seqid)) > 0;
}
+static inline void nfs4_stateid_seqid_inc(nfs4_stateid *s1)
+{
+ u32 seqid = be32_to_cpu(s1->seqid);
+
+ if (++seqid == 0)
+ ++seqid;
+ s1->seqid = cpu_to_be32(seqid);
+}
+
static inline bool nfs4_valid_open_stateid(const struct nfs4_state *state)
{
return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 1406858bae6c..11eafcfc490b 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1073,14 +1073,26 @@ static const struct rpc_call_ops nfs40_call_sync_ops = {
.rpc_call_done = nfs40_call_sync_done,
};
+static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup)
+{
+ int ret;
+ struct rpc_task *task;
+
+ task = rpc_run_task(task_setup);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ ret = task->tk_status;
+ rpc_put_task(task);
+ return ret;
+}
+
static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res)
{
- int ret;
- struct rpc_task *task;
struct nfs_client *clp = server->nfs_client;
struct nfs4_call_sync_data data = {
.seq_server = server,
@@ -1094,14 +1106,7 @@ static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
.callback_data = &data
};
- task = rpc_run_task(&task_setup);
- if (IS_ERR(task))
- ret = PTR_ERR(task);
- else {
- ret = task->tk_status;
- rpc_put_task(task);
- }
- return ret;
+ return nfs4_call_sync_custom(&task_setup);
}
int nfs4_call_sync(struct rpc_clnt *clnt,
@@ -3308,6 +3313,75 @@ nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
return pnfs_wait_on_layoutreturn(inode, task);
}
+/*
+ * Update the seqid of an open stateid
+ */
+static void nfs4_sync_open_stateid(nfs4_stateid *dst,
+ struct nfs4_state *state)
+{
+ __be32 seqid_open;
+ u32 dst_seqid;
+ int seq;
+
+ for (;;) {
+ if (!nfs4_valid_open_stateid(state))
+ break;
+ seq = read_seqbegin(&state->seqlock);
+ if (!nfs4_state_match_open_stateid_other(state, dst)) {
+ nfs4_stateid_copy(dst, &state->open_stateid);
+ if (read_seqretry(&state->seqlock, seq))
+ continue;
+ break;
+ }
+ seqid_open = state->open_stateid.seqid;
+ if (read_seqretry(&state->seqlock, seq))
+ continue;
+
+ dst_seqid = be32_to_cpu(dst->seqid);
+ if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0)
+ dst->seqid = seqid_open;
+ break;
+ }
+}
+
+/*
+ * Update the seqid of an open stateid after receiving
+ * NFS4ERR_OLD_STATEID
+ */
+static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
+ struct nfs4_state *state)
+{
+ __be32 seqid_open;
+ u32 dst_seqid;
+ bool ret;
+ int seq;
+
+ for (;;) {
+ ret = false;
+ if (!nfs4_valid_open_stateid(state))
+ break;
+ seq = read_seqbegin(&state->seqlock);
+ if (!nfs4_state_match_open_stateid_other(state, dst)) {
+ if (read_seqretry(&state->seqlock, seq))
+ continue;
+ break;
+ }
+ seqid_open = state->open_stateid.seqid;
+ if (read_seqretry(&state->seqlock, seq))
+ continue;
+
+ dst_seqid = be32_to_cpu(dst->seqid);
+ if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) >= 0)
+ dst->seqid = cpu_to_be32(dst_seqid + 1);
+ else
+ dst->seqid = seqid_open;
+ ret = true;
+ break;
+ }
+
+ return ret;
+}
+
struct nfs4_closedata {
struct inode *inode;
struct nfs4_state *state;
@@ -3358,32 +3432,11 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
/* Handle Layoutreturn errors */
- if (calldata->arg.lr_args && task->tk_status != 0) {
- switch (calldata->res.lr_ret) {
- default:
- calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
- break;
- case 0:
- calldata->arg.lr_args = NULL;
- calldata->res.lr_res = NULL;
- break;
- case -NFS4ERR_OLD_STATEID:
- if (nfs4_layoutreturn_refresh_stateid(&calldata->arg.lr_args->stateid,
- &calldata->arg.lr_args->range,
- calldata->inode))
- goto lr_restart;
- /* Fallthrough */
- case -NFS4ERR_ADMIN_REVOKED:
- case -NFS4ERR_DELEG_REVOKED:
- case -NFS4ERR_EXPIRED:
- case -NFS4ERR_BAD_STATEID:
- case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
- case -NFS4ERR_WRONG_CRED:
- calldata->arg.lr_args = NULL;
- calldata->res.lr_res = NULL;
- goto lr_restart;
- }
- }
+ if (pnfs_roc_done(task, calldata->inode,
+ &calldata->arg.lr_args,
+ &calldata->res.lr_res,
+ &calldata->res.lr_ret) == -EAGAIN)
+ goto out_restart;
/* hmm. we are done with the inode, and in the process of freeing
* the state_owner. we keep this around to process errors
@@ -3403,7 +3456,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
break;
case -NFS4ERR_OLD_STATEID:
/* Did we race with OPEN? */
- if (nfs4_refresh_open_stateid(&calldata->arg.stateid,
+ if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid,
state))
goto out_restart;
goto out_release;
@@ -3415,7 +3468,9 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
task->tk_msg.rpc_cred);
/* Fallthrough */
case -NFS4ERR_BAD_STATEID:
- break;
+ if (calldata->arg.fmode == 0)
+ break;
+ /* Fallthrough */
default:
task->tk_status = nfs4_async_handle_exception(task,
server, task->tk_status, &exception);
@@ -3430,8 +3485,6 @@ out_release:
nfs_refresh_inode(calldata->inode, &calldata->fattr);
dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
return;
-lr_restart:
- calldata->res.lr_ret = 0;
out_restart:
task->tk_status = 0;
rpc_restart_call_prepare(task);
@@ -3472,8 +3525,8 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
} else if (is_rdwr)
calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
- if (!nfs4_valid_open_stateid(state) ||
- !nfs4_refresh_open_stateid(&calldata->arg.stateid, state))
+ nfs4_sync_open_stateid(&calldata->arg.stateid, state);
+ if (!nfs4_valid_open_stateid(state))
call_close = 0;
spin_unlock(&state->owner->so_lock);
@@ -6018,7 +6071,6 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
.rpc_resp = res,
.rpc_cred = cred,
};
- struct rpc_task *task;
struct rpc_task_setup task_setup_data = {
.rpc_client = clp->cl_rpcclient,
.rpc_message = &msg,
@@ -6051,17 +6103,12 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
dprintk("NFS call setclientid auth=%s, '%s'\n",
clp->cl_rpcclient->cl_auth->au_ops->au_name,
clp->cl_owner_id);
- task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task)) {
- status = PTR_ERR(task);
- goto out;
- }
- status = task->tk_status;
+
+ status = nfs4_call_sync_custom(&task_setup_data);
if (setclientid.sc_cred) {
clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
put_rpccred(setclientid.sc_cred);
}
- rpc_put_task(task);
out:
trace_nfs4_setclientid(clp, status);
dprintk("NFS reply setclientid: %d\n", status);
@@ -6129,32 +6176,11 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
/* Handle Layoutreturn errors */
- if (data->args.lr_args && task->tk_status != 0) {
- switch(data->res.lr_ret) {
- default:
- data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
- break;
- case 0:
- data->args.lr_args = NULL;
- data->res.lr_res = NULL;
- break;
- case -NFS4ERR_OLD_STATEID:
- if (nfs4_layoutreturn_refresh_stateid(&data->args.lr_args->stateid,
- &data->args.lr_args->range,
- data->inode))
- goto lr_restart;
- /* Fallthrough */
- case -NFS4ERR_ADMIN_REVOKED:
- case -NFS4ERR_DELEG_REVOKED:
- case -NFS4ERR_EXPIRED:
- case -NFS4ERR_BAD_STATEID:
- case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
- case -NFS4ERR_WRONG_CRED:
- data->args.lr_args = NULL;
- data->res.lr_res = NULL;
- goto lr_restart;
- }
- }
+ if (pnfs_roc_done(task, data->inode,
+ &data->args.lr_args,
+ &data->res.lr_res,
+ &data->res.lr_ret) == -EAGAIN)
+ goto out_restart;
switch (task->tk_status) {
case 0:
@@ -6192,8 +6218,6 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
}
data->rpc_status = task->tk_status;
return;
-lr_restart:
- data->res.lr_ret = 0;
out_restart:
task->tk_status = 0;
rpc_restart_call_prepare(task);
@@ -6386,6 +6410,42 @@ static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *
return err;
}
+/*
+ * Update the seqid of a lock stateid after receiving
+ * NFS4ERR_OLD_STATEID
+ */
+static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst,
+ struct nfs4_lock_state *lsp)
+{
+ struct nfs4_state *state = lsp->ls_state;
+ bool ret = false;
+
+ spin_lock(&state->state_lock);
+ if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid))
+ goto out;
+ if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst))
+ nfs4_stateid_seqid_inc(dst);
+ else
+ dst->seqid = lsp->ls_stateid.seqid;
+ ret = true;
+out:
+ spin_unlock(&state->state_lock);
+ return ret;
+}
+
+static bool nfs4_sync_lock_stateid(nfs4_stateid *dst,
+ struct nfs4_lock_state *lsp)
+{
+ struct nfs4_state *state = lsp->ls_state;
+ bool ret;
+
+ spin_lock(&state->state_lock);
+ ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid);
+ nfs4_stateid_copy(dst, &lsp->ls_stateid);
+ spin_unlock(&state->state_lock);
+ return ret;
+}
+
struct nfs4_unlockdata {
struct nfs_locku_args arg;
struct nfs_locku_res res;
@@ -6403,7 +6463,8 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
struct nfs_seqid *seqid)
{
struct nfs4_unlockdata *p;
- struct inode *inode = lsp->ls_state->inode;
+ struct nfs4_state *state = lsp->ls_state;
+ struct inode *inode = state->inode;
p = kzalloc(sizeof(*p), GFP_NOFS);
if (p == NULL)
@@ -6419,6 +6480,9 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
locks_init_lock(&p->fl);
locks_copy_lock(&p->fl, fl);
p->server = NFS_SERVER(inode);
+ spin_lock(&state->state_lock);
+ nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid);
+ spin_unlock(&state->state_lock);
return p;
}
@@ -6457,10 +6521,14 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
task->tk_msg.rpc_cred);
/* Fall through */
case -NFS4ERR_BAD_STATEID:
- case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_STALE_STATEID:
- if (!nfs4_stateid_match(&calldata->arg.stateid,
- &calldata->lsp->ls_stateid))
+ if (nfs4_sync_lock_stateid(&calldata->arg.stateid,
+ calldata->lsp))
+ rpc_restart_call_prepare(task);
+ break;
+ case -NFS4ERR_OLD_STATEID:
+ if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid,
+ calldata->lsp))
rpc_restart_call_prepare(task);
break;
default:
@@ -6483,7 +6551,6 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
goto out_wait;
- nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
/* Note: exit _without_ running nfs4_locku_done */
goto out_no_action;
@@ -7645,6 +7712,8 @@ int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred)
static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
{
int status;
+ struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
+ struct nfs_client *clp = NFS_SERVER(dir)->nfs_client;
struct nfs4_secinfo_arg args = {
.dir_fh = NFS_FH(dir),
.name = name,
@@ -7657,26 +7726,37 @@ static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct
.rpc_argp = &args,
.rpc_resp = &res,
};
- struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
+ struct nfs4_call_sync_data data = {
+ .seq_server = NFS_SERVER(dir),
+ .seq_args = &args.seq_args,
+ .seq_res = &res.seq_res,
+ };
+ struct rpc_task_setup task_setup = {
+ .rpc_client = clnt,
+ .rpc_message = &msg,
+ .callback_ops = clp->cl_mvops->call_sync_ops,
+ .callback_data = &data,
+ .flags = RPC_TASK_NO_ROUND_ROBIN,
+ };
const struct cred *cred = NULL;
if (use_integrity) {
- clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
- cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client);
+ clnt = clp->cl_rpcclient;
+ task_setup.rpc_client = clnt;
+
+ cred = nfs4_get_clid_cred(clp);
msg.rpc_cred = cred;
}
dprintk("NFS call secinfo %s\n", name->name);
- nfs4_state_protect(NFS_SERVER(dir)->nfs_client,
- NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
+ nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
+ nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
+ status = nfs4_call_sync_custom(&task_setup);
- status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args,
- &res.seq_res, RPC_TASK_NO_ROUND_ROBIN);
dprintk("NFS reply secinfo: %d\n", status);
put_cred(cred);
-
return status;
}
@@ -8344,7 +8424,6 @@ static const struct rpc_call_ops nfs4_get_lease_time_ops = {
int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
{
- struct rpc_task *task;
struct nfs4_get_lease_time_args args;
struct nfs4_get_lease_time_res res = {
.lr_fsinfo = fsinfo,
@@ -8366,17 +8445,9 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
.callback_data = &data,
.flags = RPC_TASK_TIMEOUT,
};
- int status;
nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1);
- task = rpc_run_task(&task_setup);
-
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- status = task->tk_status;
- rpc_put_task(task);
- return status;
+ return nfs4_call_sync_custom(&task_setup);
}
#ifdef CONFIG_NFS_V4_1
@@ -8845,7 +8916,6 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
const struct cred *cred)
{
struct nfs4_reclaim_complete_data *calldata;
- struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
.rpc_cred = cred,
@@ -8854,7 +8924,7 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
.rpc_client = clp->cl_rpcclient,
.rpc_message = &msg,
.callback_ops = &nfs4_reclaim_complete_call_ops,
- .flags = RPC_TASK_ASYNC | RPC_TASK_NO_ROUND_ROBIN,
+ .flags = RPC_TASK_NO_ROUND_ROBIN,
};
int status = -ENOMEM;
@@ -8869,15 +8939,7 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
msg.rpc_argp = &calldata->arg;
msg.rpc_resp = &calldata->res;
task_setup_data.callback_data = calldata;
- task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task)) {
- status = PTR_ERR(task);
- goto out;
- }
- status = rpc_wait_for_completion_task(task);
- if (status == 0)
- status = task->tk_status;
- rpc_put_task(task);
+ status = nfs4_call_sync_custom(&task_setup_data);
out:
dprintk("<-- %s status=%d\n", __func__, status);
return status;
@@ -9103,10 +9165,19 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
if (!nfs41_sequence_process(task, &lrp->res.seq_res))
return;
+ /*
+ * Was there an RPC level error? Assume the call succeeded,
+ * and that we need to release the layout
+ */
+ if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) {
+ lrp->res.lrs_present = 0;
+ return;
+ }
+
server = NFS_SERVER(lrp->args.inode);
switch (task->tk_status) {
case -NFS4ERR_OLD_STATEID:
- if (nfs4_layoutreturn_refresh_stateid(&lrp->args.stateid,
+ if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid,
&lrp->args.range,
lrp->args.inode))
goto out_restart;
@@ -9362,18 +9433,32 @@ _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_resp = &res,
};
struct rpc_clnt *clnt = server->client;
+ struct nfs4_call_sync_data data = {
+ .seq_server = server,
+ .seq_args = &args.seq_args,
+ .seq_res = &res.seq_res,
+ };
+ struct rpc_task_setup task_setup = {
+ .rpc_client = server->client,
+ .rpc_message = &msg,
+ .callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
+ .callback_data = &data,
+ .flags = RPC_TASK_NO_ROUND_ROBIN,
+ };
const struct cred *cred = NULL;
int status;
if (use_integrity) {
clnt = server->nfs_client->cl_rpcclient;
+ task_setup.rpc_client = clnt;
+
cred = nfs4_get_clid_cred(server->nfs_client);
msg.rpc_cred = cred;
}
dprintk("--> %s\n", __func__);
- status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
- &res.seq_res, RPC_TASK_NO_ROUND_ROBIN);
+ nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
+ status = nfs4_call_sync_custom(&task_setup);
dprintk("<-- %s status=%d\n", __func__, status);
put_cred(cred);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index cad4e064b328..0c6d53dc3672 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1015,22 +1015,6 @@ out:
return ret;
}
-bool nfs4_refresh_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
-{
- bool ret;
- int seq;
-
- do {
- ret = false;
- seq = read_seqbegin(&state->seqlock);
- if (nfs4_state_match_open_stateid_other(state, dst)) {
- dst->seqid = state->open_stateid.seqid;
- ret = true;
- }
- } while (read_seqretry(&state->seqlock, seq));
- return ret;
-}
-
bool nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
{
bool ret;
@@ -2095,8 +2079,10 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
}
status = nfs4_begin_drain_session(clp);
- if (status != 0)
- return status;
+ if (status != 0) {
+ result = status;
+ goto out;
+ }
status = nfs4_replace_transport(server, locations);
if (status != 0) {
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 46a8d636d151..ab07db0f07cd 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1174,7 +1174,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
} else
*p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
}
- if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
+ if (label && (bmval[2] & FATTR4_WORD2_SECURITY_LABEL)) {
*p++ = cpu_to_be32(label->lfs);
*p++ = cpu_to_be32(label->pi);
*p++ = cpu_to_be32(label->len);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 4525d5acae38..bb80034a7661 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -359,9 +359,10 @@ pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg,
}
/*
- * Update the seqid of a layout stateid
+ * Update the seqid of a layout stateid after receiving
+ * NFS4ERR_OLD_STATEID
*/
-bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
+bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
struct pnfs_layout_range *dst_range,
struct inode *inode)
{
@@ -377,7 +378,15 @@ bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
spin_lock(&inode->i_lock);
lo = NFS_I(inode)->layout;
- if (lo && nfs4_stateid_match_other(dst, &lo->plh_stateid)) {
+ if (lo && pnfs_layout_is_valid(lo) &&
+ nfs4_stateid_match_other(dst, &lo->plh_stateid)) {
+ /* Is our call using the most recent seqid? If so, bump it */
+ if (!nfs4_stateid_is_newer(&lo->plh_stateid, dst)) {
+ nfs4_stateid_seqid_inc(dst);
+ ret = true;
+ goto out;
+ }
+ /* Try to update the seqid to the most recent */
err = pnfs_mark_matching_lsegs_return(lo, &head, &range, 0);
if (err != -EBUSY) {
dst->seqid = lo->plh_stateid.seqid;
@@ -385,6 +394,7 @@ bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
ret = true;
}
}
+out:
spin_unlock(&inode->i_lock);
pnfs_free_lseg_list(&head);
return ret;
@@ -1440,6 +1450,52 @@ out_noroc:
return false;
}
+int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
+ struct nfs4_layoutreturn_args **argpp,
+ struct nfs4_layoutreturn_res **respp,
+ int *ret)
+{
+ struct nfs4_layoutreturn_args *arg = *argpp;
+ int retval = -EAGAIN;
+
+ if (!arg)
+ return 0;
+ /* Handle Layoutreturn errors */
+ switch (*ret) {
+ case 0:
+ retval = 0;
+ break;
+ case -NFS4ERR_NOMATCHING_LAYOUT:
+ /* Was there an RPC level error? If not, retry */
+ if (task->tk_rpc_status == 0)
+ break;
+ /* If the call was not sent, let caller handle it */
+ if (!RPC_WAS_SENT(task))
+ return 0;
+ /*
+ * Otherwise, assume the call succeeded and
+ * that we need to release the layout
+ */
+ *ret = 0;
+ (*respp)->lrs_present = 0;
+ retval = 0;
+ break;
+ case -NFS4ERR_DELAY:
+ /* Let the caller handle the retry */
+ *ret = -NFS4ERR_NOMATCHING_LAYOUT;
+ return 0;
+ case -NFS4ERR_OLD_STATEID:
+ if (!nfs4_layout_refresh_old_stateid(&arg->stateid,
+ &arg->range, inode))
+ break;
+ *ret = -NFS4ERR_NOMATCHING_LAYOUT;
+ return -EAGAIN;
+ }
+ *argpp = NULL;
+ *respp = NULL;
+ return retval;
+}
+
void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
int ret)
@@ -1449,10 +1505,15 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
const nfs4_stateid *res_stateid = NULL;
struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
- if (ret == 0) {
- arg_stateid = &args->stateid;
+ switch (ret) {
+ case -NFS4ERR_NOMATCHING_LAYOUT:
+ break;
+ case 0:
if (res->lrs_present)
res_stateid = &res->stateid;
+ /* Fallthrough */
+ default:
+ arg_stateid = &args->stateid;
}
pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
res_stateid);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index f15609c003d8..f8a38065c7e4 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -261,7 +261,7 @@ int pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
bool is_recall);
int pnfs_destroy_layouts_byclid(struct nfs_client *clp,
bool is_recall);
-bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
+bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
struct pnfs_layout_range *dst_range,
struct inode *inode);
void pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo);
@@ -282,6 +282,10 @@ bool pnfs_roc(struct inode *ino,
struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
const struct cred *cred);
+int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
+ struct nfs4_layoutreturn_args **argpp,
+ struct nfs4_layoutreturn_res **respp,
+ int *ret);
void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
int ret);
@@ -701,6 +705,15 @@ pnfs_roc(struct inode *ino,
return false;
}
+static inline int
+pnfs_roc_done(struct rpc_task *task, struct inode *inode,
+ struct nfs4_layoutreturn_args **argpp,
+ struct nfs4_layoutreturn_res **respp,
+ int *ret)
+{
+ return 0;
+}
+
static inline void
pnfs_roc_release(struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
@@ -785,7 +798,7 @@ static inline void nfs4_pnfs_v3_ds_connect_unload(void)
{
}
-static inline bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
+static inline bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
struct pnfs_layout_range *dst_range,
struct inode *inode)
{
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 19a76cfa8b1f..a84df7d63403 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2645,6 +2645,13 @@ int nfs_clone_sb_security(struct super_block *s, struct dentry *mntroot,
}
EXPORT_SYMBOL_GPL(nfs_clone_sb_security);
+static void nfs_set_readahead(struct backing_dev_info *bdi,
+ unsigned long iomax_pages)
+{
+ bdi->ra_pages = VM_READAHEAD_PAGES;
+ bdi->io_pages = iomax_pages;
+}
+
struct dentry *nfs_fs_mount_common(struct nfs_server *server,
int flags, const char *dev_name,
struct nfs_mount_info *mount_info,
@@ -2687,7 +2694,7 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
mntroot = ERR_PTR(error);
goto error_splat_super;
}
- s->s_bdi->ra_pages = server->rpages * NFS_MAX_READAHEAD;
+ nfs_set_readahead(s->s_bdi, server->rpages);
server->super = s;
}
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 20c841a906f2..3aac5c917afe 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -71,7 +71,7 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
}
/* Read, map, and pin the page. */
page = ntfs_map_page(mft_vi->i_mapping, index);
- if (likely(!IS_ERR(page))) {
+ if (!IS_ERR(page)) {
/* Catch multi sector transfer fixup errors. */
if (likely(ntfs_is_mft_recordp((le32*)(page_address(page) +
ofs)))) {
@@ -154,7 +154,7 @@ MFT_RECORD *map_mft_record(ntfs_inode *ni)
mutex_lock(&ni->mrec_lock);
m = map_mft_record_page(ni);
- if (likely(!IS_ERR(m)))
+ if (!IS_ERR(m))
return m;
mutex_unlock(&ni->mrec_lock);
@@ -271,7 +271,7 @@ MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
m = map_mft_record(ni);
/* map_mft_record() has incremented this on success. */
atomic_dec(&ni->count);
- if (likely(!IS_ERR(m))) {
+ if (!IS_ERR(m)) {
/* Verify the sequence number. */
if (likely(le16_to_cpu(m->sequence_number) == seq_no)) {
ntfs_debug("Done 1.");
@@ -1303,7 +1303,7 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
read_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
rl = ntfs_attr_find_vcn_nolock(mftbmp_ni,
(ll - 1) >> vol->cluster_size_bits, NULL);
- if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) {
+ if (IS_ERR(rl) || unlikely(!rl->length || rl->lcn < 0)) {
up_write(&mftbmp_ni->runlist.lock);
ntfs_error(vol->sb, "Failed to determine last allocated "
"cluster of mft bitmap attribute.");
@@ -1734,7 +1734,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
read_unlock_irqrestore(&mft_ni->size_lock, flags);
rl = ntfs_attr_find_vcn_nolock(mft_ni,
(ll - 1) >> vol->cluster_size_bits, NULL);
- if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) {
+ if (IS_ERR(rl) || unlikely(!rl->length || rl->lcn < 0)) {
up_write(&mft_ni->runlist.lock);
ntfs_error(vol->sb, "Failed to determine last allocated "
"cluster of mft data attribute.");
@@ -1776,7 +1776,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
do {
rl2 = ntfs_cluster_alloc(vol, old_last_vcn, nr, lcn, MFT_ZONE,
true);
- if (likely(!IS_ERR(rl2)))
+ if (!IS_ERR(rl2))
break;
if (PTR_ERR(rl2) != -ENOSPC || nr == min_nr) {
ntfs_error(vol->sb, "Failed to allocate the minimal "
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index 2d3cc9e3395d..4e6a44bc654c 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -115,7 +115,7 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
dent_ino = MREF(mref);
ntfs_debug("Found inode 0x%lx. Calling ntfs_iget.", dent_ino);
dent_inode = ntfs_iget(vol->sb, dent_ino);
- if (likely(!IS_ERR(dent_inode))) {
+ if (!IS_ERR(dent_inode)) {
/* Consistency check. */
if (is_bad_inode(dent_inode) || MSEQNO(mref) ==
NTFS_I(dent_inode)->seq_no ||
diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c
index 508744a93180..97932fb5179c 100644
--- a/fs/ntfs/runlist.c
+++ b/fs/ntfs/runlist.c
@@ -951,7 +951,7 @@ mpa_err:
}
/* Now combine the new and old runlists checking for overlaps. */
old_rl = ntfs_runlists_merge(old_rl, rl);
- if (likely(!IS_ERR(old_rl)))
+ if (!IS_ERR(old_rl))
return old_rl;
ntfs_free(rl);
ntfs_error(vol->sb, "Failed to merge runlists.");
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 29621d40f448..7dc3bc604f78 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -1475,7 +1475,7 @@ not_enabled:
kfree(name);
/* Get the inode. */
tmp_ino = ntfs_iget(vol->sb, MREF(mref));
- if (unlikely(IS_ERR(tmp_ino) || is_bad_inode(tmp_ino))) {
+ if (IS_ERR(tmp_ino) || unlikely(is_bad_inode(tmp_ino))) {
if (!IS_ERR(tmp_ino))
iput(tmp_ino);
ntfs_error(vol->sb, "Failed to load $UsnJrnl.");
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 0c335b51043d..f9baefc76cf9 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -5993,6 +5993,7 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
struct buffer_head *data_alloc_bh = NULL;
struct ocfs2_dinode *di;
struct ocfs2_truncate_log *tl;
+ struct ocfs2_journal *journal = osb->journal;
BUG_ON(inode_trylock(tl_inode));
@@ -6013,6 +6014,20 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
goto out;
}
+ /* Appending truncate log(TA) and and flushing truncate log(TF) are
+ * two separated transactions. They can be both committed but not
+ * checkpointed. If crash occurs then, both two transaction will be
+ * replayed with several already released to global bitmap clusters.
+ * Then truncate log will be replayed resulting in cluster double free.
+ */
+ jbd2_journal_lock_updates(journal->j_journal);
+ status = jbd2_journal_flush(journal->j_journal);
+ jbd2_journal_unlock_updates(journal->j_journal);
+ if (status < 0) {
+ mlog_errno(status);
+ goto out;
+ }
+
data_alloc_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT);
@@ -6792,6 +6807,8 @@ void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
struct page *page, int zero, u64 *phys)
{
int ret, partial = 0;
+ loff_t start_byte = ((loff_t)page->index << PAGE_SHIFT) + from;
+ loff_t length = to - from;
ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0);
if (ret)
@@ -6811,7 +6828,8 @@ void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
if (ret < 0)
mlog_errno(ret);
else if (ocfs2_should_order_data(inode)) {
- ret = ocfs2_jbd2_file_inode(handle, inode);
+ ret = ocfs2_jbd2_inode_add_write(handle, inode,
+ start_byte, length);
if (ret < 0)
mlog_errno(ret);
}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index a4c905d6b575..8de1c9d644f6 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -942,7 +942,8 @@ static void ocfs2_write_failure(struct inode *inode,
if (tmppage && page_has_buffers(tmppage)) {
if (ocfs2_should_order_data(inode))
- ocfs2_jbd2_file_inode(wc->w_handle, inode);
+ ocfs2_jbd2_inode_add_write(wc->w_handle, inode,
+ user_pos, user_len);
block_commit_write(tmppage, from, to);
}
@@ -2023,8 +2024,14 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
}
if (page_has_buffers(tmppage)) {
- if (handle && ocfs2_should_order_data(inode))
- ocfs2_jbd2_file_inode(handle, inode);
+ if (handle && ocfs2_should_order_data(inode)) {
+ loff_t start_byte =
+ ((loff_t)tmppage->index << PAGE_SHIFT) +
+ from;
+ loff_t length = to - from;
+ ocfs2_jbd2_inode_add_write(handle, inode,
+ start_byte, length);
+ }
block_commit_write(tmppage, from, to);
}
}
diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c
index 429e6a8359a5..eaf042feaf5e 100644
--- a/fs/ocfs2/blockcheck.c
+++ b/fs/ocfs2/blockcheck.c
@@ -231,14 +231,6 @@ static int blockcheck_u64_get(void *data, u64 *val)
}
DEFINE_SIMPLE_ATTRIBUTE(blockcheck_fops, blockcheck_u64_get, NULL, "%llu\n");
-static struct dentry *blockcheck_debugfs_create(const char *name,
- struct dentry *parent,
- u64 *value)
-{
- return debugfs_create_file(name, S_IFREG | S_IRUSR, parent, value,
- &blockcheck_fops);
-}
-
static void ocfs2_blockcheck_debug_remove(struct ocfs2_blockcheck_stats *stats)
{
if (stats) {
@@ -250,16 +242,20 @@ static void ocfs2_blockcheck_debug_remove(struct ocfs2_blockcheck_stats *stats)
static void ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats,
struct dentry *parent)
{
- stats->b_debug_dir = debugfs_create_dir("blockcheck", parent);
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("blockcheck", parent);
+ stats->b_debug_dir = dir;
+
+ debugfs_create_file("blocks_checked", S_IFREG | S_IRUSR, dir,
+ &stats->b_check_count, &blockcheck_fops);
- blockcheck_debugfs_create("blocks_checked", stats->b_debug_dir,
- &stats->b_check_count);
+ debugfs_create_file("checksums_failed", S_IFREG | S_IRUSR, dir,
+ &stats->b_failure_count, &blockcheck_fops);
- blockcheck_debugfs_create("checksums_failed", stats->b_debug_dir,
- &stats->b_failure_count);
+ debugfs_create_file("ecc_recoveries", S_IFREG | S_IRUSR, dir,
+ &stats->b_recover_count, &blockcheck_fops);
- blockcheck_debugfs_create("ecc_recoveries", stats->b_debug_dir,
- &stats->b_recover_count);
}
#else
static inline void ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats,
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index f1b613327ac8..a368350d4c27 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -225,10 +225,6 @@ struct o2hb_region {
unsigned int hr_region_num;
struct dentry *hr_debug_dir;
- struct dentry *hr_debug_livenodes;
- struct dentry *hr_debug_regnum;
- struct dentry *hr_debug_elapsed_time;
- struct dentry *hr_debug_pinned;
struct o2hb_debug_buf *hr_db_livenodes;
struct o2hb_debug_buf *hr_db_regnum;
struct o2hb_debug_buf *hr_db_elapsed_time;
@@ -1394,21 +1390,20 @@ void o2hb_exit(void)
kfree(o2hb_db_failedregions);
}
-static struct dentry *o2hb_debug_create(const char *name, struct dentry *dir,
- struct o2hb_debug_buf **db, int db_len,
- int type, int size, int len, void *data)
+static void o2hb_debug_create(const char *name, struct dentry *dir,
+ struct o2hb_debug_buf **db, int db_len, int type,
+ int size, int len, void *data)
{
*db = kmalloc(db_len, GFP_KERNEL);
if (!*db)
- return NULL;
+ return;
(*db)->db_type = type;
(*db)->db_size = size;
(*db)->db_len = len;
(*db)->db_data = data;
- return debugfs_create_file(name, S_IFREG|S_IRUSR, dir, *db,
- &o2hb_debug_fops);
+ debugfs_create_file(name, S_IFREG|S_IRUSR, dir, *db, &o2hb_debug_fops);
}
static void o2hb_debug_init(void)
@@ -1525,11 +1520,7 @@ static void o2hb_region_release(struct config_item *item)
kfree(reg->hr_slots);
- debugfs_remove(reg->hr_debug_livenodes);
- debugfs_remove(reg->hr_debug_regnum);
- debugfs_remove(reg->hr_debug_elapsed_time);
- debugfs_remove(reg->hr_debug_pinned);
- debugfs_remove(reg->hr_debug_dir);
+ debugfs_remove_recursive(reg->hr_debug_dir);
kfree(reg->hr_db_livenodes);
kfree(reg->hr_db_regnum);
kfree(reg->hr_db_elapsed_time);
@@ -1988,69 +1979,33 @@ static struct o2hb_heartbeat_group *to_o2hb_heartbeat_group(struct config_group
: NULL;
}
-static int o2hb_debug_region_init(struct o2hb_region *reg, struct dentry *dir)
+static void o2hb_debug_region_init(struct o2hb_region *reg,
+ struct dentry *parent)
{
- int ret = -ENOMEM;
+ struct dentry *dir;
- reg->hr_debug_dir =
- debugfs_create_dir(config_item_name(&reg->hr_item), dir);
- if (!reg->hr_debug_dir) {
- mlog_errno(ret);
- goto bail;
- }
+ dir = debugfs_create_dir(config_item_name(&reg->hr_item), parent);
+ reg->hr_debug_dir = dir;
- reg->hr_debug_livenodes =
- o2hb_debug_create(O2HB_DEBUG_LIVENODES,
- reg->hr_debug_dir,
- &(reg->hr_db_livenodes),
- sizeof(*(reg->hr_db_livenodes)),
- O2HB_DB_TYPE_REGION_LIVENODES,
- sizeof(reg->hr_live_node_bitmap),
- O2NM_MAX_NODES, reg);
- if (!reg->hr_debug_livenodes) {
- mlog_errno(ret);
- goto bail;
- }
+ o2hb_debug_create(O2HB_DEBUG_LIVENODES, dir, &(reg->hr_db_livenodes),
+ sizeof(*(reg->hr_db_livenodes)),
+ O2HB_DB_TYPE_REGION_LIVENODES,
+ sizeof(reg->hr_live_node_bitmap), O2NM_MAX_NODES,
+ reg);
- reg->hr_debug_regnum =
- o2hb_debug_create(O2HB_DEBUG_REGION_NUMBER,
- reg->hr_debug_dir,
- &(reg->hr_db_regnum),
- sizeof(*(reg->hr_db_regnum)),
- O2HB_DB_TYPE_REGION_NUMBER,
- 0, O2NM_MAX_NODES, reg);
- if (!reg->hr_debug_regnum) {
- mlog_errno(ret);
- goto bail;
- }
+ o2hb_debug_create(O2HB_DEBUG_REGION_NUMBER, dir, &(reg->hr_db_regnum),
+ sizeof(*(reg->hr_db_regnum)),
+ O2HB_DB_TYPE_REGION_NUMBER, 0, O2NM_MAX_NODES, reg);
- reg->hr_debug_elapsed_time =
- o2hb_debug_create(O2HB_DEBUG_REGION_ELAPSED_TIME,
- reg->hr_debug_dir,
- &(reg->hr_db_elapsed_time),
- sizeof(*(reg->hr_db_elapsed_time)),
- O2HB_DB_TYPE_REGION_ELAPSED_TIME,
- 0, 0, reg);
- if (!reg->hr_debug_elapsed_time) {
- mlog_errno(ret);
- goto bail;
- }
+ o2hb_debug_create(O2HB_DEBUG_REGION_ELAPSED_TIME, dir,
+ &(reg->hr_db_elapsed_time),
+ sizeof(*(reg->hr_db_elapsed_time)),
+ O2HB_DB_TYPE_REGION_ELAPSED_TIME, 0, 0, reg);
- reg->hr_debug_pinned =
- o2hb_debug_create(O2HB_DEBUG_REGION_PINNED,
- reg->hr_debug_dir,
- &(reg->hr_db_pinned),
- sizeof(*(reg->hr_db_pinned)),
- O2HB_DB_TYPE_REGION_PINNED,
- 0, 0, reg);
- if (!reg->hr_debug_pinned) {
- mlog_errno(ret);
- goto bail;
- }
+ o2hb_debug_create(O2HB_DEBUG_REGION_PINNED, dir, &(reg->hr_db_pinned),
+ sizeof(*(reg->hr_db_pinned)),
+ O2HB_DB_TYPE_REGION_PINNED, 0, 0, reg);
- ret = 0;
-bail:
- return ret;
}
static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *group,
@@ -2106,11 +2061,7 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
if (ret)
goto unregister_handler;
- ret = o2hb_debug_region_init(reg, o2hb_debug_dir);
- if (ret) {
- config_item_put(&reg->hr_item);
- goto unregister_handler;
- }
+ o2hb_debug_region_init(reg, o2hb_debug_dir);
return &reg->hr_item;
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 784426dee56c..bdef72c0f099 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -3636,7 +3636,7 @@ static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash,
int i, j, num_used;
u32 major_hash;
struct ocfs2_dx_leaf *orig_dx_leaf, *new_dx_leaf;
- struct ocfs2_dx_entry_list *orig_list, *new_list, *tmp_list;
+ struct ocfs2_dx_entry_list *orig_list, *tmp_list;
struct ocfs2_dx_entry *dx_entry;
tmp_list = &tmp_dx_leaf->dl_list;
@@ -3645,7 +3645,6 @@ static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash,
orig_dx_leaf = (struct ocfs2_dx_leaf *) orig_dx_leaves[i]->b_data;
orig_list = &orig_dx_leaf->dl_list;
new_dx_leaf = (struct ocfs2_dx_leaf *) new_dx_leaves[i]->b_data;
- new_list = &new_dx_leaf->dl_list;
num_used = le16_to_cpu(orig_list->de_num_used);
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 69a429b625cc..aaf24548b02a 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -142,7 +142,6 @@ struct dlm_ctxt
atomic_t res_tot_count;
atomic_t res_cur_count;
- struct dlm_debug_ctxt *dlm_debug_ctxt;
struct dentry *dlm_debugfs_subroot;
/* NOTE: Next three are protected by dlm_domain_lock */
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index a4b58ba99927..4d0b452012b2 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -853,67 +853,34 @@ static const struct file_operations debug_state_fops = {
/* files in subroot */
void dlm_debug_init(struct dlm_ctxt *dlm)
{
- struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt;
-
/* for dumping dlm_ctxt */
- dc->debug_state_dentry = debugfs_create_file(DLM_DEBUGFS_DLM_STATE,
- S_IFREG|S_IRUSR,
- dlm->dlm_debugfs_subroot,
- dlm, &debug_state_fops);
+ debugfs_create_file(DLM_DEBUGFS_DLM_STATE, S_IFREG|S_IRUSR,
+ dlm->dlm_debugfs_subroot, dlm, &debug_state_fops);
/* for dumping lockres */
- dc->debug_lockres_dentry =
- debugfs_create_file(DLM_DEBUGFS_LOCKING_STATE,
- S_IFREG|S_IRUSR,
- dlm->dlm_debugfs_subroot,
- dlm, &debug_lockres_fops);
+ debugfs_create_file(DLM_DEBUGFS_LOCKING_STATE, S_IFREG|S_IRUSR,
+ dlm->dlm_debugfs_subroot, dlm, &debug_lockres_fops);
/* for dumping mles */
- dc->debug_mle_dentry = debugfs_create_file(DLM_DEBUGFS_MLE_STATE,
- S_IFREG|S_IRUSR,
- dlm->dlm_debugfs_subroot,
- dlm, &debug_mle_fops);
+ debugfs_create_file(DLM_DEBUGFS_MLE_STATE, S_IFREG|S_IRUSR,
+ dlm->dlm_debugfs_subroot, dlm, &debug_mle_fops);
/* for dumping lockres on the purge list */
- dc->debug_purgelist_dentry =
- debugfs_create_file(DLM_DEBUGFS_PURGE_LIST,
- S_IFREG|S_IRUSR,
- dlm->dlm_debugfs_subroot,
- dlm, &debug_purgelist_fops);
-}
-
-void dlm_debug_shutdown(struct dlm_ctxt *dlm)
-{
- struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt;
-
- if (dc) {
- debugfs_remove(dc->debug_purgelist_dentry);
- debugfs_remove(dc->debug_mle_dentry);
- debugfs_remove(dc->debug_lockres_dentry);
- debugfs_remove(dc->debug_state_dentry);
- kfree(dc);
- dc = NULL;
- }
+ debugfs_create_file(DLM_DEBUGFS_PURGE_LIST, S_IFREG|S_IRUSR,
+ dlm->dlm_debugfs_subroot, dlm,
+ &debug_purgelist_fops);
}
/* subroot - domain dir */
-int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
+void dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
{
- dlm->dlm_debug_ctxt = kzalloc(sizeof(struct dlm_debug_ctxt),
- GFP_KERNEL);
- if (!dlm->dlm_debug_ctxt) {
- mlog_errno(-ENOMEM);
- return -ENOMEM;
- }
-
dlm->dlm_debugfs_subroot = debugfs_create_dir(dlm->name,
dlm_debugfs_root);
- return 0;
}
void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
{
- debugfs_remove(dlm->dlm_debugfs_subroot);
+ debugfs_remove_recursive(dlm->dlm_debugfs_subroot);
}
/* debugfs root */
diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h
index 7d0c7c9013ce..f8fd8680a4b6 100644
--- a/fs/ocfs2/dlm/dlmdebug.h
+++ b/fs/ocfs2/dlm/dlmdebug.h
@@ -14,13 +14,6 @@ void dlm_print_one_mle(struct dlm_master_list_entry *mle);
#ifdef CONFIG_DEBUG_FS
-struct dlm_debug_ctxt {
- struct dentry *debug_state_dentry;
- struct dentry *debug_lockres_dentry;
- struct dentry *debug_mle_dentry;
- struct dentry *debug_purgelist_dentry;
-};
-
struct debug_lockres {
int dl_len;
char *dl_buf;
@@ -29,9 +22,8 @@ struct debug_lockres {
};
void dlm_debug_init(struct dlm_ctxt *dlm);
-void dlm_debug_shutdown(struct dlm_ctxt *dlm);
-int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm);
+void dlm_create_debugfs_subroot(struct dlm_ctxt *dlm);
void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm);
void dlm_create_debugfs_root(void);
@@ -42,12 +34,8 @@ void dlm_destroy_debugfs_root(void);
static inline void dlm_debug_init(struct dlm_ctxt *dlm)
{
}
-static inline void dlm_debug_shutdown(struct dlm_ctxt *dlm)
-{
-}
-static inline int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
+static inline void dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
{
- return 0;
}
static inline void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
{
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 7338b5d4647c..ee6f459f9770 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -387,7 +387,6 @@ static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm)
static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm)
{
dlm_unregister_domain_handlers(dlm);
- dlm_debug_shutdown(dlm);
dlm_complete_thread(dlm);
dlm_complete_recovery_thread(dlm);
dlm_destroy_dlm_worker(dlm);
@@ -1938,7 +1937,6 @@ bail:
if (status) {
dlm_unregister_domain_handlers(dlm);
- dlm_debug_shutdown(dlm);
dlm_complete_thread(dlm);
dlm_complete_recovery_thread(dlm);
dlm_destroy_dlm_worker(dlm);
@@ -1992,9 +1990,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
dlm->key = key;
dlm->node_num = o2nm_this_node();
- ret = dlm_create_debugfs_subroot(dlm);
- if (ret < 0)
- goto leave;
+ dlm_create_debugfs_subroot(dlm);
spin_lock_init(&dlm->spinlock);
spin_lock_init(&dlm->master_lock);
@@ -2056,6 +2052,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
mlog(0, "context init: refcount %u\n",
kref_read(&dlm->dlm_refs));
+ ret = 0;
leave:
if (ret < 0 && dlm) {
if (dlm->master_hash)
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index e78657742bd8..3883633e82eb 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -90,7 +90,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
enum dlm_status status;
int actions = 0;
int in_use;
- u8 owner;
+ u8 owner;
+ int recovery_wait = 0;
mlog(0, "master_node = %d, valblk = %d\n", master_node,
flags & LKM_VALBLK);
@@ -193,9 +194,12 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
}
if (flags & LKM_CANCEL)
lock->cancel_pending = 0;
- else
- lock->unlock_pending = 0;
-
+ else {
+ if (!lock->unlock_pending)
+ recovery_wait = 1;
+ else
+ lock->unlock_pending = 0;
+ }
}
/* get an extra ref on lock. if we are just switching
@@ -229,6 +233,17 @@ leave:
spin_unlock(&res->spinlock);
wake_up(&res->wq);
+ if (recovery_wait) {
+ spin_lock(&res->spinlock);
+ /* Unlock request will directly succeed after owner dies,
+ * and the lock is already removed from grant list. We have to
+ * wait for RECOVERING done or we miss the chance to purge it
+ * since the removement is much faster than RECOVERING proc.
+ */
+ __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING);
+ spin_unlock(&res->spinlock);
+ }
+
/* let the caller's final dlm_lock_put handle the actual kfree */
if (actions & DLM_UNLOCK_FREE_LOCK) {
/* this should always be coupled with list removal */
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 14207234fa3d..6e774c5ea13b 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2508,9 +2508,7 @@ bail:
ocfs2_inode_unlock(inode, ex);
}
- if (local_bh)
- brelse(local_bh);
-
+ brelse(local_bh);
return status;
}
@@ -2593,8 +2591,7 @@ int ocfs2_inode_lock_atime(struct inode *inode,
*level = 1;
if (ocfs2_should_update_atime(inode, vfsmnt))
ocfs2_update_inode_atime(inode, bh);
- if (bh)
- brelse(bh);
+ brelse(bh);
} else
*level = 0;
@@ -3012,8 +3009,6 @@ struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
kref_init(&dlm_debug->d_refcnt);
INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
- dlm_debug->d_locking_state = NULL;
- dlm_debug->d_locking_filter = NULL;
dlm_debug->d_filter_secs = 0;
out:
return dlm_debug;
@@ -3282,27 +3277,19 @@ static void ocfs2_dlm_init_debug(struct ocfs2_super *osb)
{
struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
- dlm_debug->d_locking_state = debugfs_create_file("locking_state",
- S_IFREG|S_IRUSR,
- osb->osb_debug_root,
- osb,
- &ocfs2_dlm_debug_fops);
+ debugfs_create_file("locking_state", S_IFREG|S_IRUSR,
+ osb->osb_debug_root, osb, &ocfs2_dlm_debug_fops);
- dlm_debug->d_locking_filter = debugfs_create_u32("locking_filter",
- 0600,
- osb->osb_debug_root,
- &dlm_debug->d_filter_secs);
+ debugfs_create_u32("locking_filter", 0600, osb->osb_debug_root,
+ &dlm_debug->d_filter_secs);
}
static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
{
struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
- if (dlm_debug) {
- debugfs_remove(dlm_debug->d_locking_state);
- debugfs_remove(dlm_debug->d_locking_filter);
+ if (dlm_debug)
ocfs2_put_dlm_debug(dlm_debug);
- }
}
int ocfs2_dlm_init(struct ocfs2_super *osb)
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index e66a249fe07c..e3e2d1b2af51 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -590,8 +590,7 @@ int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
*extent_flags = rec->e_flags;
}
out:
- if (eb_bh)
- brelse(eb_bh);
+ brelse(eb_bh);
return ret;
}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 4435df3e5adb..2e982db3e1ae 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -706,7 +706,9 @@ leave:
* Thus, we need to explicitly order the zeroed pages.
*/
static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
- struct buffer_head *di_bh)
+ struct buffer_head *di_bh,
+ loff_t start_byte,
+ loff_t length)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
handle_t *handle = NULL;
@@ -722,7 +724,7 @@ static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
goto out;
}
- ret = ocfs2_jbd2_file_inode(handle, inode);
+ ret = ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length);
if (ret < 0) {
mlog_errno(ret);
goto out;
@@ -761,7 +763,9 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
BUG_ON(abs_from & (inode->i_blkbits - 1));
- handle = ocfs2_zero_start_ordered_transaction(inode, di_bh);
+ handle = ocfs2_zero_start_ordered_transaction(inode, di_bh,
+ abs_from,
+ abs_to - abs_from);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
@@ -2126,7 +2130,6 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = d_inode(dentry);
struct buffer_head *di_bh = NULL;
- loff_t end;
/*
* We start with a read level meta lock and only jump to an ex
@@ -2190,8 +2193,6 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
}
}
- end = pos + count;
-
ret = ocfs2_check_range_for_refcount(inode, pos, count);
if (ret == 1) {
ocfs2_inode_unlock(inode, meta_level);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 7ad9d6590818..7c9dfd50c1c1 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -534,7 +534,7 @@ static int ocfs2_read_locked_inode(struct inode *inode,
*/
mlog_bug_on_msg(!!(fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) !=
!!(args->fi_flags & OCFS2_FI_FLAG_SYSFILE),
- "Inode %llu: system file state is ambigous\n",
+ "Inode %llu: system file state is ambiguous\n",
(unsigned long long)args->fi_blkno);
if (S_ISCHR(le16_to_cpu(fe->i_mode)) ||
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index c0fe6ed08ab1..3103ba7f97a2 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -144,7 +144,6 @@ static inline void ocfs2_ci_set_new(struct ocfs2_super *osb,
void ocfs2_orphan_scan_init(struct ocfs2_super *osb);
void ocfs2_orphan_scan_start(struct ocfs2_super *osb);
void ocfs2_orphan_scan_stop(struct ocfs2_super *osb);
-void ocfs2_orphan_scan_exit(struct ocfs2_super *osb);
void ocfs2_complete_recovery(struct work_struct *work);
void ocfs2_wait_for_recovery(struct ocfs2_super *osb);
@@ -232,8 +231,8 @@ static inline void ocfs2_checkpoint_inode(struct inode *inode)
* ocfs2_journal_access_*() unless you intend to
* manage the checksum by hand.
* ocfs2_journal_dirty - Mark a journalled buffer as having dirty data.
- * ocfs2_jbd2_file_inode - Mark an inode so that its data goes out before
- * the current handle commits.
+ * ocfs2_jbd2_inode_add_write - Mark an inode with range so that its data goes
+ * out before the current handle commits.
*/
/* You must always start_trans with a number of buffs > 0, but it's
@@ -441,7 +440,7 @@ static inline int ocfs2_mknod_credits(struct super_block *sb, int is_dir,
* previous dirblock update in the free list */
static inline int ocfs2_link_credits(struct super_block *sb)
{
- return 2*OCFS2_INODE_UPDATE_CREDITS + 4 +
+ return 2 * OCFS2_INODE_UPDATE_CREDITS + 4 +
ocfs2_quota_trans_credits(sb);
}
@@ -575,37 +574,12 @@ static inline int ocfs2_calc_bg_discontig_credits(struct super_block *sb)
return ocfs2_extent_recs_per_gd(sb);
}
-static inline int ocfs2_calc_tree_trunc_credits(struct super_block *sb,
- unsigned int clusters_to_del,
- struct ocfs2_dinode *fe,
- struct ocfs2_extent_list *last_el)
+static inline int ocfs2_jbd2_inode_add_write(handle_t *handle, struct inode *inode,
+ loff_t start_byte, loff_t length)
{
- /* for dinode + all headers in this pass + update to next leaf */
- u16 next_free = le16_to_cpu(last_el->l_next_free_rec);
- u16 tree_depth = le16_to_cpu(fe->id2.i_list.l_tree_depth);
- int credits = 1 + tree_depth + 1;
- int i;
-
- i = next_free - 1;
- BUG_ON(i < 0);
-
- /* We may be deleting metadata blocks, so metadata alloc dinode +
- one desc. block for each possible delete. */
- if (tree_depth && next_free == 1 &&
- ocfs2_rec_clusters(last_el, &last_el->l_recs[i]) == clusters_to_del)
- credits += 1 + tree_depth;
-
- /* update to the truncate log. */
- credits += OCFS2_TRUNCATE_LOG_UPDATE;
-
- credits += ocfs2_quota_trans_credits(sb);
-
- return credits;
-}
-
-static inline int ocfs2_jbd2_file_inode(handle_t *handle, struct inode *inode)
-{
- return jbd2_journal_inode_add_write(handle, &OCFS2_I(inode)->ip_jinode);
+ return jbd2_journal_inode_ranged_write(handle,
+ &OCFS2_I(inode)->ip_jinode,
+ start_byte, length);
}
static inline int ocfs2_begin_ordered_truncate(struct inode *inode,
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 6f8e1c4fdb9c..8ea51cf27b97 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -2486,7 +2486,6 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
struct inode *inode = NULL;
struct inode *orphan_dir = NULL;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
- struct ocfs2_dinode *di = NULL;
handle_t *handle = NULL;
char orphan_name[OCFS2_ORPHAN_NAMELEN + 1];
struct buffer_head *parent_di_bh = NULL;
@@ -2552,7 +2551,6 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
goto leave;
}
- di = (struct ocfs2_dinode *)new_di_bh->b_data;
status = ocfs2_orphan_add(osb, handle, inode, new_di_bh, orphan_name,
&orphan_insert, orphan_dir, false);
if (status < 0) {
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index fddbbd60f434..9150cfa4df7d 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -223,8 +223,6 @@ struct ocfs2_orphan_scan {
struct ocfs2_dlm_debug {
struct kref d_refcnt;
- struct dentry *d_locking_state;
- struct dentry *d_locking_filter;
u32 d_filter_secs;
struct list_head d_lockres_tracking;
};
@@ -401,7 +399,6 @@ struct ocfs2_super
struct ocfs2_dlm_debug *osb_dlm_debug;
struct dentry *osb_debug_root;
- struct dentry *osb_ctxt;
wait_queue_head_t recovery_event;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 8b2f39506648..c81e86c62380 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1080,10 +1080,8 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
osb->osb_debug_root = debugfs_create_dir(osb->uuid_str,
ocfs2_debugfs_root);
- osb->osb_ctxt = debugfs_create_file("fs_state", S_IFREG|S_IRUSR,
- osb->osb_debug_root,
- osb,
- &ocfs2_osb_debug_fops);
+ debugfs_create_file("fs_state", S_IFREG|S_IRUSR, osb->osb_debug_root,
+ osb, &ocfs2_osb_debug_fops);
if (ocfs2_meta_ecc(osb))
ocfs2_blockcheck_stats_debugfs_install( &osb->osb_ecc_stats,
@@ -1861,8 +1859,6 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
kset_unregister(osb->osb_dev_kset);
- debugfs_remove(osb->osb_ctxt);
-
/* Orphan scan should be stopped as early as possible */
ocfs2_orphan_scan_stop(osb);
@@ -1918,7 +1914,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
ocfs2_dlm_shutdown(osb, hangup_needed);
ocfs2_blockcheck_stats_debugfs_remove(&osb->osb_ecc_stats);
- debugfs_remove(osb->osb_debug_root);
+ debugfs_remove_recursive(osb->osb_debug_root);
if (hangup_needed)
ocfs2_cluster_hangup(osb->uuid_str, strlen(osb->uuid_str));
diff --git a/fs/open.c b/fs/open.c
index a59abe3c669a..b62f5c0923a8 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -776,7 +776,7 @@ static int do_dentry_open(struct file *f,
f->f_mode |= FMODE_ATOMIC_POS;
f->f_op = fops_get(inode->i_fop);
- if (unlikely(WARN_ON(!f->f_op))) {
+ if (WARN_ON(!f->f_op)) {
error = -ENODEV;
goto cleanup_all;
}
@@ -818,6 +818,14 @@ static int do_dentry_open(struct file *f,
if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO)
return -EINVAL;
}
+
+ /*
+ * XXX: Huge page cache doesn't support writing yet. Drop all page
+ * cache for this file before processing writes.
+ */
+ if ((f->f_mode & FMODE_WRITE) && filemap_nr_thps(inode->i_mapping))
+ truncate_pagecache(inode, 0);
+
return 0;
cleanup_all:
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 465ea0153b2a..ac9247371871 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -8,7 +8,6 @@
#include <linux/mmzone.h>
#include <linux/proc_fs.h>
#include <linux/percpu.h>
-#include <linux/quicklist.h>
#include <linux/seq_file.h>
#include <linux/swap.h>
#include <linux/vmstat.h>
@@ -106,9 +105,6 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
global_zone_page_state(NR_KERNEL_STACK_KB));
show_val_kb(m, "PageTables: ",
global_zone_page_state(NR_PAGETABLE));
-#ifdef CONFIG_QUICKLIST
- show_val_kb(m, "Quicklists: ", quicklist_total_size());
-#endif
show_val_kb(m, "NFS_Unstable: ",
global_node_page_state(NR_UNSTABLE_NFS));
@@ -136,6 +132,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR);
show_val_kb(m, "ShmemPmdMapped: ",
global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR);
+ show_val_kb(m, "FileHugePages: ",
+ global_node_page_state(NR_FILE_THPS) * HPAGE_PMD_NR);
+ show_val_kb(m, "FilePmdMapped: ",
+ global_node_page_state(NR_FILE_PMDMAPPED) * HPAGE_PMD_NR);
#endif
#ifdef CONFIG_CMA
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index bf43d1d60059..9442631fd4af 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -417,6 +417,7 @@ struct mem_size_stats {
unsigned long lazyfree;
unsigned long anonymous_thp;
unsigned long shmem_thp;
+ unsigned long file_thp;
unsigned long swap;
unsigned long shared_hugetlb;
unsigned long private_hugetlb;
@@ -461,7 +462,7 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
static void smaps_account(struct mem_size_stats *mss, struct page *page,
bool compound, bool young, bool dirty, bool locked)
{
- int i, nr = compound ? 1 << compound_order(page) : 1;
+ int i, nr = compound ? compound_nr(page) : 1;
unsigned long size = nr * PAGE_SIZE;
/*
@@ -588,7 +589,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
else if (is_zone_device_page(page))
/* pass */;
else
- VM_BUG_ON_PAGE(1, page);
+ mss->file_thp += HPAGE_PMD_SIZE;
smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
}
#else
@@ -809,6 +810,7 @@ static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree);
SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp);
SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
+ SEQ_PUT_DEC(" kB\nFilePmdMapped: ", mss->file_thp);
SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
mss->private_hugetlb >> 10, 7);
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index e16fb8f2049e..273ee82d8aa9 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -88,7 +88,7 @@ static inline void mangle(struct seq_file *m, const char *s)
static void show_type(struct seq_file *m, struct super_block *sb)
{
mangle(m, sb->s_type->name);
- if (sb->s_subtype && sb->s_subtype[0]) {
+ if (sb->s_subtype) {
seq_putc(m, '.');
mangle(m, sb->s_subtype);
}
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 9c02d96d3a42..4075e41408b4 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -239,10 +239,8 @@ static int balance_leaf_when_delete_left(struct tree_balance *tb)
static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
{
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
- int item_pos = PATH_LAST_POSITION(tb->tb_path);
struct buffer_info bi;
int n;
- struct item_head *ih;
RFALSE(tb->FR[0] && B_LEVEL(tb->FR[0]) != DISK_LEAF_NODE_LEVEL + 1,
"vs- 12000: level: wrong FR %z", tb->FR[0]);
@@ -251,7 +249,6 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
RFALSE(!tb->blknum[0] && !PATH_H_PPARENT(tb->tb_path, 0),
"PAP-12010: tree can not be empty");
- ih = item_head(tbS0, item_pos);
buffer_info_init_tbS0(tb, &bi);
/* Delete or truncate the item */
@@ -298,7 +295,6 @@ static unsigned int balance_leaf_insert_left(struct tree_balance *tb,
if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
/* part of new item falls into L[0] */
int new_item_len, shift;
- int version;
ret = leaf_shift_left(tb, tb->lnum[0] - 1, -1);
@@ -317,8 +313,6 @@ static unsigned int balance_leaf_insert_left(struct tree_balance *tb,
leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body,
min_t(int, tb->zeroes_num, ih_item_len(ih)));
- version = ih_version(ih);
-
/*
* Calculate key component, item length and body to
* insert into S[0]
@@ -632,7 +626,6 @@ static void balance_leaf_insert_right(struct tree_balance *tb,
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
int n = B_NR_ITEMS(tbS0);
struct buffer_info bi;
- int ret;
/* new item or part of it doesn't fall into R[0] */
if (n - tb->rnum[0] >= tb->item_pos) {
@@ -646,13 +639,11 @@ static void balance_leaf_insert_right(struct tree_balance *tb,
if (tb->item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) {
loff_t old_key_comp, old_len, r_zeroes_number;
const char *r_body;
- int version, shift;
+ int shift;
loff_t offset;
leaf_shift_right(tb, tb->rnum[0] - 1, -1);
- version = ih_version(ih);
-
/* Remember key component and item length */
old_key_comp = le_ih_k_offset(ih);
old_len = ih_item_len(ih);
@@ -698,7 +689,7 @@ static void balance_leaf_insert_right(struct tree_balance *tb,
/* whole new item falls into R[0] */
/* Shift rnum[0]-1 items to R[0] */
- ret = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes);
+ leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes);
/* Insert new item into R[0] */
buffer_info_init_right(tb, &bi);
@@ -950,14 +941,12 @@ static void balance_leaf_new_nodes_insert(struct tree_balance *tb,
if (tb->item_pos == n - tb->snum[i] + 1 && tb->sbytes[i] != -1) {
int old_key_comp, old_len, r_zeroes_number;
const char *r_body;
- int version;
/* Move snum[i]-1 items from S[0] to S_new[i] */
leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i] - 1, -1,
tb->S_new[i]);
/* Remember key component and item length */
- version = ih_version(ih);
old_key_comp = le_ih_k_offset(ih);
old_len = ih_item_len(ih);
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
index 6b0ddb2a9091..117092224111 100644
--- a/fs/reiserfs/fix_node.c
+++ b/fs/reiserfs/fix_node.c
@@ -376,7 +376,6 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
int to, int to_bytes, short *snum012, int flow)
{
int i;
- int cur_free;
int units;
struct virtual_node *vn = tb->tb_vn;
int total_node_size, max_node_size, current_item_size;
@@ -438,7 +437,6 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
/* leaf level */
needed_nodes = 1;
total_node_size = 0;
- cur_free = max_node_size;
/* start from 'from'-th item */
start_item = from;
@@ -1734,14 +1732,12 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h)
* and Fh is its father.
*/
struct buffer_head *Sh, *Fh;
- int maxsize, ret;
+ int ret;
int lfree, rfree /* free space in L and R */ ;
Sh = PATH_H_PBUFFER(tb->tb_path, h);
Fh = PATH_H_PPARENT(tb->tb_path, h);
- maxsize = MAX_CHILD_SIZE(Sh);
-
/*
* using tb->insert_size[h], which is negative in this case,
* create_virtual_node calculates:
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 4517a1394c6f..4b3e3e73b512 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -891,7 +891,6 @@ static int flush_older_commits(struct super_block *s,
struct list_head *entry;
unsigned int trans_id = jl->j_trans_id;
unsigned int other_trans_id;
- unsigned int first_trans_id;
find_first:
/*
@@ -914,8 +913,6 @@ find_first:
return 0;
}
- first_trans_id = first_jl->j_trans_id;
-
entry = &first_jl->j_list;
while (1) {
other_jl = JOURNAL_LIST_ENTRY(entry);
@@ -1351,7 +1348,7 @@ static int flush_journal_list(struct super_block *s,
struct reiserfs_journal_list *jl, int flushall)
{
struct reiserfs_journal_list *pjl;
- struct reiserfs_journal_cnode *cn, *last;
+ struct reiserfs_journal_cnode *cn;
int count;
int was_jwait = 0;
int was_dirty = 0;
@@ -1509,7 +1506,6 @@ static int flush_journal_list(struct super_block *s,
b_blocknr, __func__);
}
free_cnode:
- last = cn;
cn = cn->next;
if (saved_bh) {
/*
@@ -1792,7 +1788,6 @@ static int flush_used_journal_lists(struct super_block *s,
{
unsigned long len = 0;
unsigned long cur_len;
- int ret;
int i;
int limit = 256;
struct reiserfs_journal_list *tjl;
@@ -1829,9 +1824,9 @@ static int flush_used_journal_lists(struct super_block *s,
* transactions, but only bother if we've actually spanned
* across multiple lists
*/
- if (flush_jl != jl) {
- ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
- }
+ if (flush_jl != jl)
+ kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
+
flush_journal_list(s, flush_jl, 1);
put_journal_list(s, flush_jl);
put_journal_list(s, jl);
@@ -1911,7 +1906,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
struct super_block *sb, int error)
{
struct reiserfs_transaction_handle myth;
- int flushed = 0;
struct reiserfs_journal *journal = SB_JOURNAL(sb);
/*
@@ -1933,7 +1927,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
1);
journal_mark_dirty(&myth, SB_BUFFER_WITH_SB(sb));
do_journal_end(&myth, FLUSH_ALL);
- flushed = 1;
}
}
@@ -3444,9 +3437,8 @@ static int remove_from_transaction(struct super_block *sb,
if (cn == journal->j_last) {
journal->j_last = cn->prev;
}
- if (bh)
- remove_journal_hash(sb, journal->j_hash_table, NULL,
- bh->b_blocknr, 0);
+ remove_journal_hash(sb, journal->j_hash_table, NULL,
+ bh->b_blocknr, 0);
clear_buffer_journaled(bh); /* don't log this one */
if (!already_cleaned) {
@@ -3988,7 +3980,6 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
struct buffer_head *c_bh; /* commit bh */
struct buffer_head *d_bh; /* desc bh */
int cur_write_start = 0; /* start index of current log write */
- int old_start;
int i;
int flush;
int wait_on_commit;
@@ -4245,7 +4236,6 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
journal->j_num_work_lists++;
/* reset journal values for the next transaction */
- old_start = journal->j_start;
journal->j_start =
(journal->j_start + journal->j_len +
2) % SB_ONDISK_JOURNAL_SIZE(sb);
diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c
index f5cebd70d903..7f868569d4d0 100644
--- a/fs/reiserfs/lbalance.c
+++ b/fs/reiserfs/lbalance.c
@@ -1322,7 +1322,7 @@ void leaf_paste_entries(struct buffer_info *bi,
char *item;
struct reiserfs_de_head *deh;
char *insert_point;
- int i, old_entry_num;
+ int i;
struct buffer_head *bh = bi->bi_bh;
if (new_entry_count == 0)
@@ -1362,7 +1362,6 @@ void leaf_paste_entries(struct buffer_info *bi,
put_deh_location(&deh[i],
deh_location(&deh[i]) + paste_size);
- old_entry_num = ih_entry_count(ih);
put_ih_entry_count(ih, ih_entry_count(ih) + new_entry_count);
/* prepare space for pasted records */
diff --git a/fs/reiserfs/objectid.c b/fs/reiserfs/objectid.c
index 415d66ca87d1..34baf5c0f265 100644
--- a/fs/reiserfs/objectid.c
+++ b/fs/reiserfs/objectid.c
@@ -183,13 +183,12 @@ int reiserfs_convert_objectid_map_v1(struct super_block *s)
int new_size = (s->s_blocksize - SB_SIZE) / sizeof(__u32) / 2 * 2;
int old_max = sb_oid_maxsize(disk_sb);
struct reiserfs_super_block_v1 *disk_sb_v1;
- __le32 *objectid_map, *new_objectid_map;
+ __le32 *objectid_map;
int i;
disk_sb_v1 =
(struct reiserfs_super_block_v1 *)(SB_BUFFER_WITH_SB(s)->b_data);
objectid_map = (__le32 *) (disk_sb_v1 + 1);
- new_objectid_map = (__le32 *) (disk_sb + 1);
if (cur_size > new_size) {
/*
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 9fed1c05f1f4..500f2000eb41 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -746,9 +746,6 @@ static void check_leaf_block_head(struct buffer_head *bh)
static void check_internal_block_head(struct buffer_head *bh)
{
- struct block_head *blkh;
-
- blkh = B_BLK_HEAD(bh);
if (!(B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL && B_LEVEL(bh) <= MAX_HEIGHT))
reiserfs_panic(NULL, "vs-6025", "invalid level %z", bh);
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 0037aea97d39..da9ebe33882b 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -593,7 +593,6 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key,
struct buffer_head *bh;
struct path_element *last_element;
int node_level, retval;
- int right_neighbor_of_leaf_node;
int fs_gen;
struct buffer_head *reada_bh[SEARCH_BY_KEY_READA];
b_blocknr_t reada_blocks[SEARCH_BY_KEY_READA];
@@ -614,8 +613,6 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key,
pathrelse(search_path);
- right_neighbor_of_leaf_node = 0;
-
/*
* With each iteration of this loop we search through the items in the
* current node, and calculate the next current node(next path element)
@@ -701,7 +698,6 @@ io_error:
*/
block_number = SB_ROOT_BLOCK(sb);
expected_level = -1;
- right_neighbor_of_leaf_node = 0;
/* repeat search from the root */
continue;
diff --git a/fs/super.c b/fs/super.c
index 8020974b2a68..f627b7c53d2b 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1555,11 +1555,6 @@ int vfs_get_tree(struct fs_context *fc)
sb = fc->root->d_sb;
WARN_ON(!sb->s_bdi);
- if (fc->subtype && !sb->s_subtype) {
- sb->s_subtype = fc->subtype;
- fc->subtype = NULL;
- }
-
/*
* Write barrier is for super_cache_count(). We place it before setting
* SB_BORN as the data dependency between the two functions is the
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index fe6d804a38dc..f9fd18670e22 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1272,21 +1272,23 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
}
static __always_inline int validate_range(struct mm_struct *mm,
- __u64 start, __u64 len)
+ __u64 *start, __u64 len)
{
__u64 task_size = mm->task_size;
- if (start & ~PAGE_MASK)
+ *start = untagged_addr(*start);
+
+ if (*start & ~PAGE_MASK)
return -EINVAL;
if (len & ~PAGE_MASK)
return -EINVAL;
if (!len)
return -EINVAL;
- if (start < mmap_min_addr)
+ if (*start < mmap_min_addr)
return -EINVAL;
- if (start >= task_size)
+ if (*start >= task_size)
return -EINVAL;
- if (len > task_size - start)
+ if (len > task_size - *start)
return -EINVAL;
return 0;
}
@@ -1336,7 +1338,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
goto out;
}
- ret = validate_range(mm, uffdio_register.range.start,
+ ret = validate_range(mm, &uffdio_register.range.start,
uffdio_register.range.len);
if (ret)
goto out;
@@ -1525,7 +1527,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
goto out;
- ret = validate_range(mm, uffdio_unregister.start,
+ ret = validate_range(mm, &uffdio_unregister.start,
uffdio_unregister.len);
if (ret)
goto out;
@@ -1676,7 +1678,7 @@ static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
goto out;
- ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
+ ret = validate_range(ctx->mm, &uffdio_wake.start, uffdio_wake.len);
if (ret)
goto out;
@@ -1716,7 +1718,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
sizeof(uffdio_copy)-sizeof(__s64)))
goto out;
- ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
+ ret = validate_range(ctx->mm, &uffdio_copy.dst, uffdio_copy.len);
if (ret)
goto out;
/*
@@ -1772,7 +1774,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
sizeof(uffdio_zeropage)-sizeof(__s64)))
goto out;
- ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
+ ret = validate_range(ctx->mm, &uffdio_zeropage.range.start,
uffdio_zeropage.range.len);
if (ret)
goto out;
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index 58fa85cec325..d6ed5d2c07c2 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -81,9 +81,10 @@ typedef struct xfs_alloc_arg {
/*
* Defines for datatype
*/
-#define XFS_ALLOC_INITIAL_USER_DATA (1 << 0)/* special case start of file */
-#define XFS_ALLOC_USERDATA_ZERO (1 << 1)/* zero extent on allocation */
-#define XFS_ALLOC_NOBUSY (1 << 2)/* Busy extents not allowed */
+#define XFS_ALLOC_USERDATA (1 << 0)/* allocation is for user data*/
+#define XFS_ALLOC_INITIAL_USER_DATA (1 << 1)/* special case start of file */
+#define XFS_ALLOC_USERDATA_ZERO (1 << 2)/* zero extent on allocation */
+#define XFS_ALLOC_NOBUSY (1 << 3)/* Busy extents not allowed */
static inline bool
xfs_alloc_is_userdata(int datatype)
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 054b4ce30033..4edc25a2ba80 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -4042,8 +4042,12 @@ xfs_bmapi_allocate(
*/
if (!(bma->flags & XFS_BMAPI_METADATA)) {
bma->datatype = XFS_ALLOC_NOBUSY;
- if (whichfork == XFS_DATA_FORK && bma->offset == 0)
- bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
+ if (whichfork == XFS_DATA_FORK) {
+ if (bma->offset == 0)
+ bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
+ else
+ bma->datatype |= XFS_ALLOC_USERDATA;
+ }
if (bma->flags & XFS_BMAPI_ZERO)
bma->datatype |= XFS_ALLOC_USERDATA_ZERO;
}
@@ -5621,6 +5625,11 @@ xfs_bmse_merge(
if (error)
return error;
+ /* change to extent format if required after extent removal */
+ error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
+ if (error)
+ return error;
+
done:
xfs_iext_remove(ip, icur, 0);
xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur);
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index a08dd8f40346..ac6cdca63e15 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -928,7 +928,7 @@ xfs_log_sb(
xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
- xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb));
+ xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb) - 1);
}
/*
diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c
index a43d1813c4ff..5533e48e605d 100644
--- a/fs/xfs/scrub/alloc.c
+++ b/fs/xfs/scrub/alloc.c
@@ -97,7 +97,6 @@ xchk_allocbt_rec(
xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
xfs_agblock_t bno;
xfs_extlen_t len;
- int error = 0;
bno = be32_to_cpu(rec->alloc.ar_startblock);
len = be32_to_cpu(rec->alloc.ar_blockcount);
@@ -109,7 +108,7 @@ xchk_allocbt_rec(
xchk_allocbt_xref(bs->sc, bno, len);
- return error;
+ return 0;
}
/* Scrub the freespace btrees for some AG. */
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 120ef99d09e8..21c243622a79 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -2097,7 +2097,7 @@ xfs_verify_magic(
int idx;
idx = xfs_sb_version_hascrc(&mp->m_sb);
- if (unlikely(WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx])))
+ if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx]))
return false;
return dmagic == bp->b_ops->magic[idx];
}
@@ -2115,7 +2115,7 @@ xfs_verify_magic16(
int idx;
idx = xfs_sb_version_hascrc(&mp->m_sb);
- if (unlikely(WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx])))
+ if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx]))
return false;
return dmagic == bp->b_ops->magic16[idx];
}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index d952d5962e93..1ffb179f35d2 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -370,21 +370,23 @@ static int
xfs_dio_write_end_io(
struct kiocb *iocb,
ssize_t size,
+ int error,
unsigned flags)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct xfs_inode *ip = XFS_I(inode);
loff_t offset = iocb->ki_pos;
unsigned int nofs_flag;
- int error = 0;
trace_xfs_end_io_direct_write(ip, offset, size);
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return -EIO;
- if (size <= 0)
- return size;
+ if (error)
+ return error;
+ if (!size)
+ return 0;
/*
* Capture amount written on completion as we can't reliably account
@@ -441,6 +443,10 @@ out:
return error;
}
+static const struct iomap_dio_ops xfs_dio_write_ops = {
+ .end_io = xfs_dio_write_end_io,
+};
+
/*
* xfs_file_dio_aio_write - handle direct IO writes
*
@@ -541,7 +547,7 @@ xfs_file_dio_aio_write(
}
trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
- ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
+ ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, &xfs_dio_write_ops);
/*
* If unaligned, this is the only IO in-flight. If it has not yet
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c
index ddd0bf7a4740..f1bc88f4367c 100644
--- a/fs/xfs/xfs_sysfs.c
+++ b/fs/xfs/xfs_sysfs.c
@@ -63,19 +63,6 @@ static const struct sysfs_ops xfs_sysfs_ops = {
.store = xfs_sysfs_object_store,
};
-/*
- * xfs_mount kobject. The mp kobject also serves as the per-mount parent object
- * that is identified by the fsname under sysfs.
- */
-
-static inline struct xfs_mount *
-to_mp(struct kobject *kobject)
-{
- struct xfs_kobj *kobj = to_kobj(kobject);
-
- return container_of(kobj, struct xfs_mount, m_kobj);
-}
-
static struct attribute *xfs_mp_attrs[] = {
NULL,
};
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 7357a3c942a0..384b5c835ced 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -10,6 +10,7 @@
#define BUGFLAG_WARNING (1 << 0)
#define BUGFLAG_ONCE (1 << 1)
#define BUGFLAG_DONE (1 << 2)
+#define BUGFLAG_NO_CUT_HERE (1 << 3) /* CUT_HERE already sent */
#define BUGFLAG_TAINT(taint) ((taint) << 8)
#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
#endif
@@ -61,18 +62,6 @@ struct bug_entry {
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
#endif
-#ifdef __WARN_FLAGS
-#define __WARN_TAINT(taint) __WARN_FLAGS(BUGFLAG_TAINT(taint))
-#define __WARN_ONCE_TAINT(taint) __WARN_FLAGS(BUGFLAG_ONCE|BUGFLAG_TAINT(taint))
-
-#define WARN_ON_ONCE(condition) ({ \
- int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) \
- __WARN_ONCE_TAINT(TAINT_WARN); \
- unlikely(__ret_warn_on); \
-})
-#endif
-
/*
* WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
* significant kernel issues that need prompt attention if they should ever
@@ -89,27 +78,27 @@ struct bug_entry {
*
* Use the versions with printk format strings to provide better diagnostics.
*/
-#ifndef __WARN_TAINT
-extern __printf(3, 4)
-void warn_slowpath_fmt(const char *file, const int line,
- const char *fmt, ...);
+#ifndef __WARN_FLAGS
extern __printf(4, 5)
-void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint,
- const char *fmt, ...);
-extern void warn_slowpath_null(const char *file, const int line);
-#define WANT_WARN_ON_SLOWPATH
-#define __WARN() warn_slowpath_null(__FILE__, __LINE__)
-#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg)
-#define __WARN_printf_taint(taint, arg...) \
- warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
+void warn_slowpath_fmt(const char *file, const int line, unsigned taint,
+ const char *fmt, ...);
+#define __WARN() __WARN_printf(TAINT_WARN, NULL)
+#define __WARN_printf(taint, arg...) \
+ warn_slowpath_fmt(__FILE__, __LINE__, taint, arg)
#else
extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
-#define __WARN() do { \
- printk(KERN_WARNING CUT_HERE); __WARN_TAINT(TAINT_WARN); \
-} while (0)
-#define __WARN_printf(arg...) __WARN_printf_taint(TAINT_WARN, arg)
-#define __WARN_printf_taint(taint, arg...) \
- do { __warn_printk(arg); __WARN_TAINT(taint); } while (0)
+#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN))
+#define __WARN_printf(taint, arg...) do { \
+ __warn_printk(arg); \
+ __WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\
+ } while (0)
+#define WARN_ON_ONCE(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_FLAGS(BUGFLAG_ONCE | \
+ BUGFLAG_TAINT(TAINT_WARN)); \
+ unlikely(__ret_warn_on); \
+})
#endif
/* used internally by panic.c */
@@ -132,7 +121,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
- __WARN_printf(format); \
+ __WARN_printf(TAINT_WARN, format); \
unlikely(__ret_warn_on); \
})
#endif
@@ -140,7 +129,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
#define WARN_TAINT(condition, taint, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
- __WARN_printf_taint(taint, format); \
+ __WARN_printf(taint, format); \
unlikely(__ret_warn_on); \
})
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index 8476175c07e7..73f7421413cb 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -49,7 +49,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
* @mm: the mm_struct of the current context
* @gfp: GFP flags to use for the allocation
*
- * Allocates a page and runs the pgtable_page_ctor().
+ * Allocates a page and runs the pgtable_pte_page_ctor().
*
* This function is intended for architectures that need
* anything beyond simple page allocation or must have custom GFP flags.
@@ -63,7 +63,7 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
pte = alloc_page(gfp);
if (!pte)
return NULL;
- if (!pgtable_page_ctor(pte)) {
+ if (!pgtable_pte_page_ctor(pte)) {
__free_page(pte);
return NULL;
}
@@ -76,7 +76,7 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
* pte_alloc_one - allocate a page for PTE-level user page table
* @mm: the mm_struct of the current context
*
- * Allocates a page and runs the pgtable_page_ctor().
+ * Allocates a page and runs the pgtable_pte_page_ctor().
*
* Return: `struct page` initialized as page table or %NULL on error
*/
@@ -98,15 +98,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
*/
static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
{
- pgtable_page_dtor(pte_page);
+ pgtable_pte_page_dtor(pte_page);
__free_page(pte_page);
}
-#else /* CONFIG_MMU */
-
-/* This is enough for a nommu architecture */
-#define check_pgt_cache() do { } while (0)
-
#endif /* CONFIG_MMU */
#endif /* __ASM_GENERIC_PGALLOC_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 75d9d68a6de7..818691846c90 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1002,9 +1002,8 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
* need this). If THP is not enabled, the pmd can't go away under the
* code even if MADV_DONTNEED runs, but if THP is enabled we need to
* run a pmd_trans_unstable before walking the ptes after
- * split_huge_page_pmd returns (because it may have run when the pmd
- * become null, but then a page fault can map in a THP and not a
- * regular page).
+ * split_huge_pmd returns (because it may have run when the pmd become
+ * null, but then a page fault can map in a THP and not a regular page).
*/
static inline int pmd_trans_unstable(pmd_t *pmd)
{
@@ -1126,7 +1125,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
static inline void init_espfix_bsp(void) { }
#endif
-extern void __init pgd_cache_init(void);
+extern void __init pgtable_cache_init(void);
#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 7d14c11bdc0a..408b6f4e63c0 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -285,12 +285,12 @@ struct drm_crtc_state {
u32 target_vblank;
/**
- * @pageflip_flags:
+ * @async_flip:
*
- * DRM_MODE_PAGE_FLIP_* flags, as passed to the page flip ioctl.
- * Zero in any other case.
+ * This is set when DRM_MODE_PAGE_FLIP_ASYNC is set in the legacy
+ * PAGE_FLIP IOCTL. It's not wired up for the atomic IOCTL itself yet.
*/
- u32 pageflip_flags;
+ bool async_flip;
/**
* @vrr_enabled:
@@ -1108,7 +1108,7 @@ struct drm_crtc {
/**
* @self_refresh_data: Holds the state for the self refresh helpers
*
- * Initialized via drm_self_refresh_helper_register().
+ * Initialized via drm_self_refresh_helper_init().
*/
struct drm_self_refresh_data *self_refresh_data;
};
diff --git a/include/drm/drm_self_refresh_helper.h b/include/drm/drm_self_refresh_helper.h
index 397a583ccca7..5b79d253fb46 100644
--- a/include/drm/drm_self_refresh_helper.h
+++ b/include/drm/drm_self_refresh_helper.h
@@ -12,9 +12,9 @@ struct drm_atomic_state;
struct drm_crtc;
void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state);
+void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state,
+ unsigned int commit_time_ms);
-int drm_self_refresh_helper_init(struct drm_crtc *crtc,
- unsigned int entry_delay_ms);
-
+int drm_self_refresh_helper_init(struct drm_crtc *crtc);
void drm_self_refresh_helper_cleanup(struct drm_crtc *crtc);
#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d9db32fb75ee..f3ea78b0c91c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1524,10 +1524,14 @@ struct blk_integrity_iter {
};
typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
+typedef void (integrity_prepare_fn) (struct request *);
+typedef void (integrity_complete_fn) (struct request *, unsigned int);
struct blk_integrity_profile {
integrity_processing_fn *generate_fn;
integrity_processing_fn *verify_fn;
+ integrity_prepare_fn *prepare_fn;
+ integrity_complete_fn *complete_fn;
const char *name;
};
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 82156da3c650..b9dbda1c26aa 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -293,6 +293,7 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private);
struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client);
u64 ceph_client_gid(struct ceph_client *client);
extern void ceph_destroy_client(struct ceph_client *client);
+extern void ceph_reset_client_addr(struct ceph_client *client);
extern int __ceph_open_session(struct ceph_client *client,
unsigned long started);
extern int ceph_open_session(struct ceph_client *client);
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 23895d178149..c4458dc6a757 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -337,6 +337,7 @@ extern void ceph_msgr_flush(void);
extern void ceph_messenger_init(struct ceph_messenger *msgr,
struct ceph_entity_addr *myaddr);
extern void ceph_messenger_fini(struct ceph_messenger *msgr);
+extern void ceph_messenger_reset_nonce(struct ceph_messenger *msgr);
extern void ceph_con_init(struct ceph_connection *con, void *private,
const struct ceph_connection_operations *ops,
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index b4d134d3312a..dbb8a6959a73 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -109,6 +109,7 @@ extern int ceph_monmap_contains(struct ceph_monmap *m,
extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl);
extern void ceph_monc_stop(struct ceph_mon_client *monc);
+extern void ceph_monc_reopen_session(struct ceph_mon_client *monc);
enum {
CEPH_SUB_MONMAP = 0,
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index ad7fe5d10dcd..eaffbdddf89a 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -381,6 +381,7 @@ extern void ceph_osdc_cleanup(void);
extern int ceph_osdc_init(struct ceph_osd_client *osdc,
struct ceph_client *client);
extern void ceph_osdc_stop(struct ceph_osd_client *osdc);
+extern void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc);
extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
@@ -388,6 +389,7 @@ extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb);
void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err);
+void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc);
#define osd_req_op_data(oreq, whch, typ, fld) \
({ \
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 9569e7c786d3..4b898cdbdf05 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -129,11 +129,8 @@ static inline bool compaction_failed(enum compact_result result)
return false;
}
-/*
- * Compaction has backed off for some reason. It might be throttling or
- * lock contention. Retrying is still worthwhile.
- */
-static inline bool compaction_withdrawn(enum compact_result result)
+/* Compaction needs reclaim to be performed first, so it can continue. */
+static inline bool compaction_needs_reclaim(enum compact_result result)
{
/*
* Compaction backed off due to watermark checks for order-0
@@ -142,6 +139,16 @@ static inline bool compaction_withdrawn(enum compact_result result)
if (result == COMPACT_SKIPPED)
return true;
+ return false;
+}
+
+/*
+ * Compaction has backed off for some reason after doing some work or none
+ * at all. It might be throttling or lock contention. Retrying might be still
+ * worthwhile, but with a higher priority if allowed.
+ */
+static inline bool compaction_withdrawn(enum compact_result result)
+{
/*
* If compaction is deferred for high-order allocations, it is
* because sync compaction recently failed. If this is the case
@@ -207,6 +214,11 @@ static inline bool compaction_failed(enum compact_result result)
return false;
}
+static inline bool compaction_needs_reclaim(enum compact_result result)
+{
+ return false;
+}
+
static inline bool compaction_withdrawn(enum compact_result result)
{
return true;
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index b5a5a1ed9efd..78a73eba64dd 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -200,8 +200,8 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#define for_each_cpu_wrap(cpu, mask, start) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
-#define for_each_cpu_and(cpu, mask, and) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
+#define for_each_cpu_and(cpu, mask1, mask2) \
+ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
#else
/**
* cpumask_first - get the first cpu in a cpumask
@@ -290,20 +290,20 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
/**
* for_each_cpu_and - iterate over every cpu in both masks
* @cpu: the (optionally unsigned) integer iterator
- * @mask: the first cpumask pointer
- * @and: the second cpumask pointer
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
*
* This saves a temporary CPU mask in many places. It is equivalent to:
* struct cpumask tmp;
- * cpumask_and(&tmp, &mask, &and);
+ * cpumask_and(&tmp, &mask1, &mask2);
* for_each_cpu(cpu, &tmp)
* ...
*
* After the loop, cpu is >= nr_cpu_ids.
*/
-#define for_each_cpu_and(cpu, mask, and) \
+#define for_each_cpu_and(cpu, mask1, mask2) \
for ((cpu) = -1; \
- (cpu) = cpumask_next_and((cpu), (mask), (and)), \
+ (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
(cpu) < nr_cpu_ids;)
#endif /* SMP */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 866268c2c6e3..b0c6b0d34d02 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -429,6 +429,7 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
* @i_pages: Cached pages.
* @gfp_mask: Memory allocation flags to use for allocating pages.
* @i_mmap_writable: Number of VM_SHARED mappings.
+ * @nr_thps: Number of THPs in the pagecache (non-shmem only).
* @i_mmap: Tree of private and shared mappings.
* @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
* @nrpages: Number of page entries, protected by the i_pages lock.
@@ -446,6 +447,10 @@ struct address_space {
struct xarray i_pages;
gfp_t gfp_mask;
atomic_t i_mmap_writable;
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ /* number of thp, only for non-shmem files */
+ atomic_t nr_thps;
+#endif
struct rb_root_cached i_mmap;
struct rw_semaphore i_mmap_rwsem;
unsigned long nrpages;
@@ -2798,6 +2803,33 @@ static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
return errseq_sample(&mapping->wb_err);
}
+static inline int filemap_nr_thps(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ return atomic_read(&mapping->nr_thps);
+#else
+ return 0;
+#endif
+}
+
+static inline void filemap_nr_thps_inc(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ atomic_inc(&mapping->nr_thps);
+#else
+ WARN_ON_ONCE(1);
+#endif
+}
+
+static inline void filemap_nr_thps_dec(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ atomic_dec(&mapping->nr_thps);
+#else
+ WARN_ON_ONCE(1);
+#endif
+}
+
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index 0424df7f6e6b..e5c14e2c53d3 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -95,7 +95,6 @@ struct fs_context {
const struct cred *cred; /* The mounter's credentials */
struct fc_log *log; /* Logging buffer */
const char *source; /* The source name (eg. dev path) */
- const char *subtype; /* The subtype to set on the superblock */
void *security; /* Linux S&M options */
void *s_fs_info; /* Proposed s_fs_info */
unsigned int sb_flags; /* Proposed superblock flags (SB_*) */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 45ede62aa85b..61c9ffd89b05 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -267,6 +267,15 @@ static inline bool thp_migration_supported(void)
return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
}
+static inline struct list_head *page_deferred_list(struct page *page)
+{
+ /*
+ * Global or memcg deferred list in the second tail pages is
+ * occupied by compound_head.
+ */
+ return &page[2].deferred_list;
+}
+
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index edfca4278319..53fc34f930d0 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -454,7 +454,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
static inline struct hstate *page_hstate(struct page *page)
{
VM_BUG_ON_PAGE(!PageHuge(page), page);
- return size_to_hstate(PAGE_SIZE << compound_order(page));
+ return size_to_hstate(page_size(page));
}
static inline unsigned hstate_index_to_shift(unsigned index)
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 2afe6fdc1dda..b4a017093b69 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -245,7 +245,10 @@ struct vmbus_channel_offer {
} pipe;
} u;
/*
- * The sub_channel_index is defined in win8.
+ * The sub_channel_index is defined in Win8: a value of zero means a
+ * primary channel and a value of non-zero means a sub-channel.
+ *
+ * Before Win8, the field is reserved, meaning it's always zero.
*/
u16 sub_channel_index;
u16 reserved3;
@@ -423,6 +426,9 @@ enum vmbus_channel_message_type {
CHANNELMSG_COUNT
};
+/* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
+#define INVALID_RELID U32_MAX
+
struct vmbus_channel_message_header {
enum vmbus_channel_message_type msgtype;
u32 padding;
@@ -934,6 +940,11 @@ static inline bool is_hvsock_channel(const struct vmbus_channel *c)
VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
}
+static inline bool is_sub_channel(const struct vmbus_channel *c)
+{
+ return c->offermsg.offer.sub_channel_index != 0;
+}
+
static inline void set_channel_affinity_state(struct vmbus_channel *c,
enum hv_numa_policy policy)
{
@@ -1149,6 +1160,9 @@ struct hv_driver {
int (*remove)(struct hv_device *);
void (*shutdown)(struct hv_device *);
+ int (*suspend)(struct hv_device *);
+ int (*resume)(struct hv_device *);
+
};
/* Base device object */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index c0a78c069117..1361637c369d 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -473,7 +473,7 @@ extern struct i2c_client *
devm_i2c_new_dummy_device(struct device *dev, struct i2c_adapter *adap, u16 address);
extern struct i2c_client *
-i2c_new_secondary_device(struct i2c_client *client,
+i2c_new_ancillary_device(struct i2c_client *client,
const char *name,
u16 default_addr);
diff --git a/include/linux/interval_tree_generic.h b/include/linux/interval_tree_generic.h
index 855476145fe1..aaa8a0767aa3 100644
--- a/include/linux/interval_tree_generic.h
+++ b/include/linux/interval_tree_generic.h
@@ -30,26 +30,8 @@
\
/* Callbacks for augmented rbtree insert and remove */ \
\
-static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \
-{ \
- ITTYPE max = ITLAST(node), subtree_last; \
- if (node->ITRB.rb_left) { \
- subtree_last = rb_entry(node->ITRB.rb_left, \
- ITSTRUCT, ITRB)->ITSUBTREE; \
- if (max < subtree_last) \
- max = subtree_last; \
- } \
- if (node->ITRB.rb_right) { \
- subtree_last = rb_entry(node->ITRB.rb_right, \
- ITSTRUCT, ITRB)->ITSUBTREE; \
- if (max < subtree_last) \
- max = subtree_last; \
- } \
- return max; \
-} \
- \
-RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \
- ITTYPE, ITSUBTREE, ITPREFIX ## _compute_subtree_last) \
+RB_DECLARE_CALLBACKS_MAX(static, ITPREFIX ## _augment, \
+ ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, ITLAST) \
\
/* Insert / remove interval nodes from the tree */ \
\
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index bc499ceae392..7aa5d6117936 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -188,10 +188,14 @@ sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
*/
#define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */
#define IOMAP_DIO_COW (1 << 1) /* covers COW extent(s) */
-typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, ssize_t ret,
- unsigned flags);
+
+struct iomap_dio_ops {
+ int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
+ unsigned flags);
+};
+
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
- const struct iomap_ops *ops, iomap_dio_end_io_t end_io);
+ const struct iomap_ops *ops, const struct iomap_dio_ops *dops);
int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
#ifdef CONFIG_SWAP
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index df03825ad1a1..603fbc4e2f70 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1410,8 +1410,6 @@ extern int jbd2_journal_clear_err (journal_t *);
extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *);
extern int jbd2_journal_force_commit(journal_t *);
extern int jbd2_journal_force_commit_nested(journal_t *);
-extern int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode);
-extern int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode);
extern int jbd2_journal_inode_ranged_write(handle_t *handle,
struct jbd2_inode *inode, loff_t start_byte,
loff_t length);
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index f0b809258ed3..cc162f3e6461 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -183,6 +183,8 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
bool get_value);
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
+int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
+ unsigned long buf_len);
void * __weak arch_kexec_kernel_image_load(struct kimage *image);
int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index fbf144aaa749..b072aeb1fd78 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -326,8 +326,10 @@ extern atomic_t kgdb_active;
(raw_smp_processor_id() == atomic_read(&kgdb_active))
extern bool dbg_is_early;
extern void __init dbg_late_init(void);
+extern void kgdb_panic(const char *msg);
#else /* ! CONFIG_KGDB */
#define in_dbg_master() (0)
#define dbg_late_init()
+static inline void kgdb_panic(const char *msg) {}
#endif /* ! CONFIG_KGDB */
#endif /* _KGDB_H_ */
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index 082d1d2a5216..bc45ea1efbf7 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -15,6 +15,14 @@ extern int __khugepaged_enter(struct mm_struct *mm);
extern void __khugepaged_exit(struct mm_struct *mm);
extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
unsigned long vm_flags);
+#ifdef CONFIG_SHMEM
+extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
+#else
+static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr)
+{
+}
+#endif
#define khugepaged_enabled() \
(transparent_hugepage_flags & \
@@ -73,6 +81,10 @@ static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
{
return 0;
}
+static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr)
+{
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_KHUGEPAGED_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ad8f1a397ae4..9b60863429cc 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -128,9 +128,8 @@ struct mem_cgroup_per_node {
struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
-#ifdef CONFIG_MEMCG_KMEM
struct memcg_shrinker_map __rcu *shrinker_map;
-#endif
+
struct rb_node tree_node; /* RB tree node */
unsigned long usage_in_excess;/* Set to the value by which */
/* the soft limit is exceeded*/
@@ -331,6 +330,10 @@ struct mem_cgroup {
struct list_head event_list;
spinlock_t event_list_lock;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct deferred_split deferred_split_queue;
+#endif
+
struct mem_cgroup_per_node *nodeinfo[0];
/* WARNING: nodeinfo must be the last member here */
};
@@ -1311,6 +1314,11 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
} while ((memcg = parent_mem_cgroup(memcg)));
return false;
}
+
+extern int memcg_expand_shrinker_maps(int new_id);
+
+extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
+ int nid, int shrinker_id);
#else
#define mem_cgroup_sockets_enabled 0
static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
@@ -1319,6 +1327,11 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
return false;
}
+
+static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
+ int nid, int shrinker_id)
+{
+}
#endif
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
@@ -1390,10 +1403,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
return memcg ? memcg->kmemcg_id : -1;
}
-extern int memcg_expand_shrinker_maps(int new_id);
-
-extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
- int nid, int shrinker_id);
#else
static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
@@ -1435,8 +1444,6 @@ static inline void memcg_put_cache_ids(void)
{
}
-static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
- int nid, int shrinker_id) { }
#endif /* CONFIG_MEMCG_KMEM */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 02e633f3ede0..0ebb105eb261 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -25,7 +25,6 @@
struct memory_block {
unsigned long start_section_nr;
- unsigned long end_section_nr;
unsigned long state; /* serialized by the dev->lock */
int section_count; /* serialized by mem_sysfs_mutex */
int online_type; /* for passing data to online routine */
@@ -80,9 +79,9 @@ struct mem_section;
#define IPC_CALLBACK_PRI 10
#ifndef CONFIG_MEMORY_HOTPLUG_SPARSE
-static inline int memory_dev_init(void)
+static inline void memory_dev_init(void)
{
- return 0;
+ return;
}
static inline int register_memory_notifier(struct notifier_block *nb)
{
@@ -113,7 +112,7 @@ extern int register_memory_isolate_notifier(struct notifier_block *nb);
extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
int create_memory_block_devices(unsigned long start, unsigned long size);
void remove_memory_block_devices(unsigned long start, unsigned long size);
-extern int memory_dev_init(void);
+extern void memory_dev_init(void);
extern int memory_notify(unsigned long val, void *v);
extern int memory_isolate_notify(unsigned long val, void *v);
extern struct memory_block *find_memory_block(struct mem_section *);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7cf955feb823..cc292273e6ba 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -805,6 +805,24 @@ static inline void set_compound_order(struct page *page, unsigned int order)
page[1].compound_order = order;
}
+/* Returns the number of pages in this potentially compound page. */
+static inline unsigned long compound_nr(struct page *page)
+{
+ return 1UL << compound_order(page);
+}
+
+/* Returns the number of bytes in this potentially compound page. */
+static inline unsigned long page_size(struct page *page)
+{
+ return PAGE_SIZE << compound_order(page);
+}
+
+/* Returns the number of bits needed for the number of bytes in a page */
+static inline unsigned int page_shift(struct page *page)
+{
+ return PAGE_SHIFT + compound_order(page);
+}
+
void free_compound_page(struct page *page);
#ifdef CONFIG_MMU
@@ -1057,8 +1075,9 @@ static inline void put_user_page(struct page *page)
put_page(page);
}
-void put_user_pages_dirty(struct page **pages, unsigned long npages);
-void put_user_pages_dirty_lock(struct page **pages, unsigned long npages);
+void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
+ bool make_dirty);
+
void put_user_pages(struct page **pages, unsigned long npages);
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
@@ -1405,7 +1424,11 @@ extern void pagefault_out_of_memory(void);
extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
+#ifdef CONFIG_MMU
extern bool can_do_mlock(void);
+#else
+static inline bool can_do_mlock(void) { return false; }
+#endif
extern int user_shm_lock(size_t, struct user_struct *);
extern void user_shm_unlock(size_t, struct user_struct *);
@@ -1926,7 +1949,7 @@ static inline void pgtable_init(void)
pgtable_cache_init();
}
-static inline bool pgtable_page_ctor(struct page *page)
+static inline bool pgtable_pte_page_ctor(struct page *page)
{
if (!ptlock_init(page))
return false;
@@ -1935,7 +1958,7 @@ static inline bool pgtable_page_ctor(struct page *page)
return true;
}
-static inline void pgtable_page_dtor(struct page *page)
+static inline void pgtable_pte_page_dtor(struct page *page)
{
ptlock_free(page);
__ClearPageTable(page);
@@ -2305,6 +2328,8 @@ extern int install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags, struct page **pages);
+unsigned long randomize_stack_top(unsigned long stack_top);
+
extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
extern unsigned long mmap_region(struct file *file, unsigned long addr,
@@ -2568,6 +2593,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_COW 0x4000 /* internal GUP flag */
#define FOLL_ANON 0x8000 /* don't do file mappings */
#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */
+#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
/*
* NOTE on FOLL_LONGTERM:
@@ -2845,5 +2871,12 @@ void __init setup_nr_node_ids(void);
static inline void setup_nr_node_ids(void) {}
#endif
+extern int memcmp_pages(struct page *page1, struct page *page2);
+
+static inline int pages_identical(struct page *page1, struct page *page2)
+{
+ return !memcmp_pages(page1, page2);
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 0b739f360cec..5183e0d77dfa 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -138,6 +138,7 @@ struct page {
struct { /* Second tail page of compound page */
unsigned long _compound_pad_1; /* compound_head */
unsigned long _compound_pad_2;
+ /* For both global and memcg */
struct list_head deferred_list;
};
struct { /* Page table pages */
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index d7016dcb245e..c1bc6731125c 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -36,6 +36,10 @@ struct vmacache {
struct vm_area_struct *vmas[VMACACHE_SIZE];
};
+/*
+ * When updating this, please also update struct resident_page_types[] in
+ * kernel/fork.c
+ */
enum {
MM_FILEPAGES, /* Resident file mapping pages */
MM_ANONPAGES, /* Resident anonymous pages */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 3f38c30d2f13..bda20282746b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -235,6 +235,8 @@ enum node_stat_item {
NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
NR_SHMEM_THPS,
NR_SHMEM_PMDMAPPED,
+ NR_FILE_THPS,
+ NR_FILE_PMDMAPPED,
NR_ANON_THPS,
NR_UNSTABLE_NFS, /* NFS unstable pages */
NR_VMSCAN_WRITE,
@@ -677,6 +679,14 @@ struct zonelist {
extern struct page *mem_map;
#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+struct deferred_split {
+ spinlock_t split_queue_lock;
+ struct list_head split_queue;
+ unsigned long split_queue_len;
+};
+#endif
+
/*
* On NUMA machines, each NUMA node would have a pg_data_t to describe
* it's memory layout. On UMA machines there is a single pglist_data which
@@ -756,9 +766,7 @@ typedef struct pglist_data {
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- spinlock_t split_queue_lock;
- struct list_head split_queue;
- unsigned long split_queue_len;
+ struct deferred_split deferred_split_queue;
#endif
/* Fields commonly accessed by the page reclaim scanner */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 0a11712a80e3..570a60c2f4f4 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -490,6 +490,9 @@ extern const struct file_operations nfs_dir_operations;
extern const struct dentry_operations nfs_dentry_operations;
extern void nfs_force_lookup_revalidate(struct inode *dir);
+extern struct dentry *nfs_add_or_obtain(struct dentry *dentry,
+ struct nfs_fh *fh, struct nfs_fattr *fattr,
+ struct nfs4_label *label);
extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh,
struct nfs_fattr *fattr, struct nfs4_label *label);
extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags);
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 09592951725c..682fd465df06 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -18,6 +18,7 @@ struct page_ext_operations {
enum page_ext_flags {
PAGE_EXT_OWNER,
+ PAGE_EXT_OWNER_ACTIVE,
#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
PAGE_EXT_YOUNG,
PAGE_EXT_IDLE,
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index c7552459a15f..37a4d9e32cd3 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -333,6 +333,16 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
mapping_gfp_mask(mapping));
}
+static inline struct page *find_subpage(struct page *page, pgoff_t offset)
+{
+ if (PageHuge(page))
+ return page;
+
+ VM_BUG_ON_PAGE(PageTail(page), page);
+
+ return page + (offset & (compound_nr(page) - 1));
+}
+
struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
diff --git a/include/linux/printk.h b/include/linux/printk.h
index cefd374c47b1..c09d67edda3a 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -488,13 +488,6 @@ extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
extern void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
const void *buf, size_t len, bool ascii);
-#if defined(CONFIG_DYNAMIC_DEBUG)
-#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \
- dynamic_hex_dump(prefix_str, prefix_type, 16, 1, buf, len, true)
-#else
-extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
- const void *buf, size_t len);
-#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
#else
static inline void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
@@ -526,4 +519,19 @@ static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type,
}
#endif
+/**
+ * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params
+ * @prefix_str: string to prefix each line with;
+ * caller supplies trailing spaces for alignment if desired
+ * @prefix_type: controls whether prefix of an offset, address, or none
+ * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
+ * @buf: data blob to dump
+ * @len: number of bytes in the @buf
+ *
+ * Calls print_hex_dump(), with log level of KERN_DEBUG,
+ * rowsize of 16, groupsize of 1, and ASCII output included.
+ */
+#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \
+ print_hex_dump_debug(prefix_str, prefix_type, 16, 1, buf, len, true)
+
#endif
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 24632a7a7d11..b2c9c460947d 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -262,7 +262,7 @@ struct pwm_ops {
int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_capture *result, unsigned long timeout);
int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state);
+ const struct pwm_state *state);
void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state);
struct module *owner;
@@ -316,7 +316,7 @@ struct pwm_capture {
/* PWM user APIs */
struct pwm_device *pwm_request(int pwm_id, const char *label);
void pwm_free(struct pwm_device *pwm);
-int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state);
+int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state);
int pwm_adjust_config(struct pwm_device *pwm);
/**
diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h
deleted file mode 100644
index 034982c98c8b..000000000000
--- a/include/linux/quicklist.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef LINUX_QUICKLIST_H
-#define LINUX_QUICKLIST_H
-/*
- * Fast allocations and disposal of pages. Pages must be in the condition
- * as needed after allocation when they are freed. Per cpu lists of pages
- * are kept that only contain node local pages.
- *
- * (C) 2007, SGI. Christoph Lameter <[email protected]>
- */
-#include <linux/kernel.h>
-#include <linux/gfp.h>
-#include <linux/percpu.h>
-
-#ifdef CONFIG_QUICKLIST
-
-struct quicklist {
- void *page;
- int nr_pages;
-};
-
-DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
-
-/*
- * The two key functions quicklist_alloc and quicklist_free are inline so
- * that they may be custom compiled for the platform.
- * Specifying a NULL ctor can remove constructor support. Specifying
- * a constant quicklist allows the determination of the exact address
- * in the per cpu area.
- *
- * The fast patch in quicklist_alloc touched only a per cpu cacheline and
- * the first cacheline of the page itself. There is minmal overhead involved.
- */
-static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *))
-{
- struct quicklist *q;
- void **p = NULL;
-
- q =&get_cpu_var(quicklist)[nr];
- p = q->page;
- if (likely(p)) {
- q->page = p[0];
- p[0] = NULL;
- q->nr_pages--;
- }
- put_cpu_var(quicklist);
- if (likely(p))
- return p;
-
- p = (void *)__get_free_page(flags | __GFP_ZERO);
- if (ctor && p)
- ctor(p);
- return p;
-}
-
-static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p,
- struct page *page)
-{
- struct quicklist *q;
-
- q = &get_cpu_var(quicklist)[nr];
- *(void **)p = q->page;
- q->page = p;
- q->nr_pages++;
- put_cpu_var(quicklist);
-}
-
-static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp)
-{
- __quicklist_free(nr, dtor, pp, virt_to_page(pp));
-}
-
-static inline void quicklist_free_page(int nr, void (*dtor)(void *),
- struct page *page)
-{
- __quicklist_free(nr, dtor, page_address(page), page);
-}
-
-void quicklist_trim(int nr, void (*dtor)(void *),
- unsigned long min_pages, unsigned long max_free);
-
-unsigned long quicklist_total_size(void);
-
-#else
-
-static inline unsigned long quicklist_total_size(void)
-{
- return 0;
-}
-
-#endif
-
-#endif /* LINUX_QUICKLIST_H */
-
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index 179faab29f52..fdd421b8d9ae 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -60,41 +60,87 @@ rb_insert_augmented_cached(struct rb_node *node,
rb_insert_augmented(node, &root->rb_root, augment);
}
-#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
- rbtype, rbaugmented, rbcompute) \
+/*
+ * Template for declaring augmented rbtree callbacks (generic case)
+ *
+ * RBSTATIC: 'static' or empty
+ * RBNAME: name of the rb_augment_callbacks structure
+ * RBSTRUCT: struct type of the tree nodes
+ * RBFIELD: name of struct rb_node field within RBSTRUCT
+ * RBAUGMENTED: name of field within RBSTRUCT holding data for subtree
+ * RBCOMPUTE: name of function that recomputes the RBAUGMENTED data
+ */
+
+#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
+ RBSTRUCT, RBFIELD, RBAUGMENTED, RBCOMPUTE) \
static inline void \
-rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \
+RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \
{ \
while (rb != stop) { \
- rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
- rbtype augmented = rbcompute(node); \
- if (node->rbaugmented == augmented) \
+ RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \
+ if (RBCOMPUTE(node, true)) \
break; \
- node->rbaugmented = augmented; \
- rb = rb_parent(&node->rbfield); \
+ rb = rb_parent(&node->RBFIELD); \
} \
} \
static inline void \
-rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
+RBNAME ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
{ \
- rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
- rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
- new->rbaugmented = old->rbaugmented; \
+ RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
+ RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
+ new->RBAUGMENTED = old->RBAUGMENTED; \
} \
static void \
-rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
+RBNAME ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
{ \
- rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
- rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
- new->rbaugmented = old->rbaugmented; \
- old->rbaugmented = rbcompute(old); \
+ RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
+ RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
+ new->RBAUGMENTED = old->RBAUGMENTED; \
+ RBCOMPUTE(old, false); \
} \
-rbstatic const struct rb_augment_callbacks rbname = { \
- .propagate = rbname ## _propagate, \
- .copy = rbname ## _copy, \
- .rotate = rbname ## _rotate \
+RBSTATIC const struct rb_augment_callbacks RBNAME = { \
+ .propagate = RBNAME ## _propagate, \
+ .copy = RBNAME ## _copy, \
+ .rotate = RBNAME ## _rotate \
};
+/*
+ * Template for declaring augmented rbtree callbacks,
+ * computing RBAUGMENTED scalar as max(RBCOMPUTE(node)) for all subtree nodes.
+ *
+ * RBSTATIC: 'static' or empty
+ * RBNAME: name of the rb_augment_callbacks structure
+ * RBSTRUCT: struct type of the tree nodes
+ * RBFIELD: name of struct rb_node field within RBSTRUCT
+ * RBTYPE: type of the RBAUGMENTED field
+ * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree
+ * RBCOMPUTE: name of function that returns the per-node RBTYPE scalar
+ */
+
+#define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \
+ RBTYPE, RBAUGMENTED, RBCOMPUTE) \
+static inline bool RBNAME ## _compute_max(RBSTRUCT *node, bool exit) \
+{ \
+ RBSTRUCT *child; \
+ RBTYPE max = RBCOMPUTE(node); \
+ if (node->RBFIELD.rb_left) { \
+ child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \
+ if (child->RBAUGMENTED > max) \
+ max = child->RBAUGMENTED; \
+ } \
+ if (node->RBFIELD.rb_right) { \
+ child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \
+ if (child->RBAUGMENTED > max) \
+ max = child->RBAUGMENTED; \
+ } \
+ if (exit && node->RBAUGMENTED == max) \
+ return true; \
+ node->RBAUGMENTED = max; \
+ return false; \
+} \
+RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
+ RBSTRUCT, RBFIELD, RBAUGMENTED, RBNAME ## _compute_max)
+
#define RB_RED 0
#define RB_BLACK 1
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 9443cafd1969..0f80123650e2 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -69,7 +69,7 @@ struct shrinker {
/* These are for internal use */
struct list_head list;
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
/* ID in shrinker_idr */
int id;
#endif
@@ -81,6 +81,11 @@ struct shrinker {
/* Flags */
#define SHRINKER_NUMA_AWARE (1 << 0)
#define SHRINKER_MEMCG_AWARE (1 << 1)
+/*
+ * It just makes sense when the shrinker is also MEMCG_AWARE for now,
+ * non-MEMCG_AWARE shrinker should not have this flag set.
+ */
+#define SHRINKER_NONSLAB (1 << 2)
extern int prealloc_shrinker(struct shrinker *shrinker);
extern void register_shrinker_prepared(struct shrinker *shrinker);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 56c9c7eed34e..ab2b98ad76e1 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -595,68 +595,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
return __kmalloc_node(size, flags, node);
}
-struct memcg_cache_array {
- struct rcu_head rcu;
- struct kmem_cache *entries[0];
-};
-
-/*
- * This is the main placeholder for memcg-related information in kmem caches.
- * Both the root cache and the child caches will have it. For the root cache,
- * this will hold a dynamically allocated array large enough to hold
- * information about the currently limited memcgs in the system. To allow the
- * array to be accessed without taking any locks, on relocation we free the old
- * version only after a grace period.
- *
- * Root and child caches hold different metadata.
- *
- * @root_cache: Common to root and child caches. NULL for root, pointer to
- * the root cache for children.
- *
- * The following fields are specific to root caches.
- *
- * @memcg_caches: kmemcg ID indexed table of child caches. This table is
- * used to index child cachces during allocation and cleared
- * early during shutdown.
- *
- * @root_caches_node: List node for slab_root_caches list.
- *
- * @children: List of all child caches. While the child caches are also
- * reachable through @memcg_caches, a child cache remains on
- * this list until it is actually destroyed.
- *
- * The following fields are specific to child caches.
- *
- * @memcg: Pointer to the memcg this cache belongs to.
- *
- * @children_node: List node for @root_cache->children list.
- *
- * @kmem_caches_node: List node for @memcg->kmem_caches list.
- */
-struct memcg_cache_params {
- struct kmem_cache *root_cache;
- union {
- struct {
- struct memcg_cache_array __rcu *memcg_caches;
- struct list_head __root_caches_node;
- struct list_head children;
- bool dying;
- };
- struct {
- struct mem_cgroup *memcg;
- struct list_head children_node;
- struct list_head kmem_caches_node;
- struct percpu_ref refcnt;
-
- void (*work_fn)(struct kmem_cache *);
- union {
- struct rcu_head rcu_head;
- struct work_struct work;
- };
- };
- };
-};
-
int memcg_update_all_caches(int num_memcgs);
/**
diff --git a/include/linux/string.h b/include/linux/string.h
index 4deb11f7976b..b2f9df7f0761 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -474,8 +474,9 @@ static inline void memcpy_and_pad(void *dest, size_t dest_len,
* But this can lead to bugs due to typos, or if prefix is a pointer
* and not a constant. Instead use str_has_prefix().
*
- * Returns: 0 if @str does not start with @prefix
- strlen(@prefix) if @str does start with @prefix
+ * Returns:
+ * * strlen(@prefix) if @str starts with @prefix
+ * * 0 if @str does not start with @prefix
*/
static __always_inline size_t str_has_prefix(const char *str, const char *prefix)
{
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 27536b961552..a6ef35184ef1 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -242,9 +242,6 @@ void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *queue,
void rpc_sleep_on_priority(struct rpc_wait_queue *,
struct rpc_task *,
int priority);
-void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
- struct rpc_wait_queue *queue,
- struct rpc_task *task);
void rpc_wake_up_queued_task(struct rpc_wait_queue *,
struct rpc_task *);
void rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *,
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 8a87d8bcb197..f33e5013bdfb 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -186,7 +186,7 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
extern void xdr_shift_buf(struct xdr_buf *, size_t);
extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
-extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, unsigned int);
+extern int xdr_buf_read_mic(struct xdr_buf *, struct xdr_netobj *, unsigned int);
extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 13e108bcc9eb..d783e15ba898 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -352,6 +352,7 @@ bool xprt_prepare_transmit(struct rpc_task *task);
void xprt_request_enqueue_transmit(struct rpc_task *task);
void xprt_request_enqueue_receive(struct rpc_task *task);
void xprt_request_wait_receive(struct rpc_task *task);
+void xprt_request_dequeue_xprt(struct rpc_task *task);
bool xprt_request_need_retransmit(struct rpc_task *task);
void xprt_transmit(struct rpc_task *task);
void xprt_end_transmit(struct rpc_task *task);
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 86fc38ff0355..16c239e0d6dd 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -49,9 +49,9 @@
* fully-chunked NFS message (read chunks are the largest). Note only
* a single chunk type per message is supported currently.
*/
-#define RPCRDMA_MIN_SLOT_TABLE (2U)
+#define RPCRDMA_MIN_SLOT_TABLE (4U)
#define RPCRDMA_DEF_SLOT_TABLE (128U)
-#define RPCRDMA_MAX_SLOT_TABLE (256U)
+#define RPCRDMA_MAX_SLOT_TABLE (16384U)
#define RPCRDMA_MIN_INLINE (1024) /* min inline thresh */
#define RPCRDMA_DEF_INLINE (4096) /* default inline thresh */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index de2c67a33b7e..063c0c1e112b 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -340,6 +340,7 @@ extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_all(void);
extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_file_page(struct page *page);
+extern void deactivate_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
extern void swap_setup(void);
@@ -364,6 +365,7 @@ extern int vm_swappiness;
extern int remove_mapping(struct address_space *mapping, struct page *page);
extern unsigned long vm_total_pages;
+extern unsigned long reclaim_pages(struct list_head *page_list);
#ifdef CONFIG_NUMA
extern int node_reclaim_mode;
extern int sysctl_min_unmapped_ratio;
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index 3e2a80cc7b56..96305a64a5a7 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -53,18 +53,4 @@ extern const struct blk_integrity_profile t10_pi_type1_ip;
extern const struct blk_integrity_profile t10_pi_type3_crc;
extern const struct blk_integrity_profile t10_pi_type3_ip;
-#ifdef CONFIG_BLK_DEV_INTEGRITY
-extern void t10_pi_prepare(struct request *rq, u8 protection_type);
-extern void t10_pi_complete(struct request *rq, u8 protection_type,
- unsigned int intervals);
-#else
-static inline void t10_pi_complete(struct request *rq, u8 protection_type,
- unsigned int intervals)
-{
-}
-static inline void t10_pi_prepare(struct request *rq, u8 protection_type)
-{
-}
-#endif
-
#endif
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 8d8821b3689a..659a4400517b 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -134,7 +134,7 @@ static inline void copy_overflow(int size, unsigned long count)
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
-static __always_inline bool
+static __always_inline __must_check bool
check_copy_size(const void *addr, size_t bytes, bool is_source)
{
int sz = __compiletime_object_size(addr);
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 34a038563d97..70bbdc38dc37 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -55,7 +55,7 @@
* as usual) and both source and destination can trigger faults.
*/
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
@@ -63,7 +63,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
return raw_copy_from_user(to, from, n);
}
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
@@ -85,7 +85,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
* The caller should also make sure he pins the user space address
* so that we don't result in page fault and sleep.
*/
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
@@ -93,7 +93,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
return raw_copy_to_user(to, from, n);
}
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
@@ -103,7 +103,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
}
#ifdef INLINE_COPY_FROM_USER
-static inline unsigned long
+static inline __must_check unsigned long
_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
@@ -117,12 +117,12 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
return res;
}
#else
-extern unsigned long
+extern __must_check unsigned long
_copy_from_user(void *, const void __user *, unsigned long);
#endif
#ifdef INLINE_COPY_TO_USER
-static inline unsigned long
+static inline __must_check unsigned long
_copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
@@ -133,7 +133,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n)
return n;
}
#else
-extern unsigned long
+extern __must_check unsigned long
_copy_to_user(void __user *, const void *, unsigned long);
#endif
@@ -222,8 +222,9 @@ static inline bool pagefault_disabled(void)
#ifndef ARCH_HAS_NOCACHE_UACCESS
-static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
- const void __user *from, unsigned long n)
+static inline __must_check unsigned long
+__copy_from_user_inatomic_nocache(void *to, const void __user *from,
+ unsigned long n)
{
return __copy_from_user_inatomic(to, from, n);
}
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index dfa718ffdd4f..4e7809408073 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -53,15 +53,21 @@ struct vmap_area {
unsigned long va_start;
unsigned long va_end;
- /*
- * Largest available free size in subtree.
- */
- unsigned long subtree_max_size;
- unsigned long flags;
struct rb_node rb_node; /* address sorted rbtree */
struct list_head list; /* address sorted list */
- struct llist_node purge_list; /* "lazy purge" list */
- struct vm_struct *vm;
+
+ /*
+ * The following three variables can be packed, because
+ * a vmap_area object is always one of the three states:
+ * 1) in "free" tree (root is vmap_area_root)
+ * 2) in "busy" tree (root is free_vmap_area_root)
+ * 3) in purge list (head is vmap_purge_list)
+ */
+ union {
+ unsigned long subtree_max_size; /* in "free" tree */
+ struct vm_struct *vm; /* in "busy" tree */
+ struct llist_node purge_list; /* in purge list */
+ };
};
/*
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index 7238865e75b0..51bf43076165 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -46,6 +46,8 @@ const char *zpool_get_type(struct zpool *pool);
void zpool_destroy_pool(struct zpool *pool);
+bool zpool_malloc_support_movable(struct zpool *pool);
+
int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
unsigned long *handle);
@@ -90,6 +92,7 @@ struct zpool_driver {
struct zpool *zpool);
void (*destroy)(void *pool);
+ bool malloc_support_movable;
int (*malloc)(void *pool, size_t size, gfp_t gfp,
unsigned long *handle);
void (*free)(void *pool, unsigned long handle);
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index f6a4eaa85a3e..a13830616107 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -451,20 +451,81 @@ TRACE_EVENT(xprtrdma_createmrs,
TP_STRUCT__entry(
__field(const void *, r_xprt)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
__field(unsigned int, count)
),
TP_fast_assign(
__entry->r_xprt = r_xprt;
__entry->count = count;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("r_xprt=%p: created %u MRs",
- __entry->r_xprt, __entry->count
+ TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
+ __get_str(addr), __get_str(port), __entry->r_xprt,
+ __entry->count
)
);
-DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
+TRACE_EVENT(xprtrdma_mr_get,
+ TP_PROTO(
+ const struct rpcrdma_req *req
+ ),
+
+ TP_ARGS(req),
+
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ const struct rpc_rqst *rqst = &req->rl_slot;
+
+ __entry->req = req;
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x req=%p",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->req
+ )
+);
+
+TRACE_EVENT(xprtrdma_nomrs,
+ TP_PROTO(
+ const struct rpcrdma_req *req
+ ),
+
+ TP_ARGS(req),
+
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ const struct rpc_rqst *rqst = &req->rl_slot;
+
+ __entry->req = req;
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x req=%p",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->req
+ )
+);
DEFINE_RDCH_EVENT(read);
DEFINE_WRCH_EVENT(write);
@@ -623,21 +684,21 @@ TRACE_EVENT(xprtrdma_post_send,
TRACE_EVENT(xprtrdma_post_recv,
TP_PROTO(
- const struct ib_cqe *cqe
+ const struct rpcrdma_rep *rep
),
- TP_ARGS(cqe),
+ TP_ARGS(rep),
TP_STRUCT__entry(
- __field(const void *, cqe)
+ __field(const void *, rep)
),
TP_fast_assign(
- __entry->cqe = cqe;
+ __entry->rep = rep;
),
- TP_printk("cqe=%p",
- __entry->cqe
+ TP_printk("rep=%p",
+ __entry->rep
)
);
@@ -715,14 +776,15 @@ TRACE_EVENT(xprtrdma_wc_receive,
TP_ARGS(wc),
TP_STRUCT__entry(
- __field(const void *, cqe)
+ __field(const void *, rep)
__field(u32, byte_len)
__field(unsigned int, status)
__field(u32, vendor_err)
),
TP_fast_assign(
- __entry->cqe = wc->wr_cqe;
+ __entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep,
+ rr_cqe);
__entry->status = wc->status;
if (wc->status) {
__entry->byte_len = 0;
@@ -733,8 +795,8 @@ TRACE_EVENT(xprtrdma_wc_receive,
}
),
- TP_printk("cqe=%p %u bytes: %s (%u/0x%x)",
- __entry->cqe, __entry->byte_len,
+ TP_printk("rep=%p %u bytes: %s (%u/0x%x)",
+ __entry->rep, __entry->byte_len,
rdma_show_wc_status(__entry->status),
__entry->status, __entry->vendor_err
)
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 3a27335fce2c..c2ce6480b4b1 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -66,8 +66,9 @@ DECLARE_EVENT_CLASS(writeback_page_template,
),
TP_fast_assign(
- strncpy(__entry->name,
- mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
+ strscpy_pad(__entry->name,
+ mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)",
+ 32);
__entry->ino = mapping ? mapping->host->i_ino : 0;
__entry->index = page->index;
),
@@ -110,8 +111,8 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
struct backing_dev_info *bdi = inode_to_bdi(inode);
/* may be called for files on pseudo FSes w/ unregistered bdi */
- strncpy(__entry->name,
- bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
+ strscpy_pad(__entry->name,
+ bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->flags = flags;
@@ -316,8 +317,8 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
),
TP_fast_assign(
- strncpy(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ strscpy_pad(__entry->name,
+ dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->sync_mode = wbc->sync_mode;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
@@ -360,8 +361,9 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__field(unsigned int, cgroup_ino)
),
TP_fast_assign(
- strncpy(__entry->name,
- wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
+ strscpy_pad(__entry->name,
+ wb->bdi->dev ? dev_name(wb->bdi->dev) :
+ "(unknown)", 32);
__entry->nr_pages = work->nr_pages;
__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
__entry->sync_mode = work->sync_mode;
@@ -414,7 +416,7 @@ DECLARE_EVENT_CLASS(writeback_class,
__field(unsigned int, cgroup_ino)
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
TP_printk("bdi %s: cgroup_ino=%u",
@@ -436,7 +438,7 @@ TRACE_EVENT(writeback_bdi_register,
__array(char, name, 32)
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
),
TP_printk("bdi %s",
__entry->name
@@ -461,7 +463,7 @@ DECLARE_EVENT_CLASS(wbc_class,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
__entry->nr_to_write = wbc->nr_to_write;
__entry->pages_skipped = wbc->pages_skipped;
__entry->sync_mode = wbc->sync_mode;
@@ -512,7 +514,7 @@ TRACE_EVENT(writeback_queue_io,
),
TP_fast_assign(
unsigned long *older_than_this = work->older_than_this;
- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
__entry->older = older_than_this ? *older_than_this : 0;
__entry->age = older_than_this ?
(jiffies - *older_than_this) * 1000 / HZ : -1;
@@ -598,7 +600,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
),
TP_fast_assign(
- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
__entry->write_bw = KBps(wb->write_bandwidth);
__entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
__entry->dirty_rate = KBps(dirty_rate);
@@ -663,7 +665,7 @@ TRACE_EVENT(balance_dirty_pages,
TP_fast_assign(
unsigned long freerun = (thresh + bg_thresh) / 2;
- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
__entry->limit = global_wb_domain.dirty_limit;
__entry->setpoint = (global_wb_domain.dirty_limit +
@@ -723,8 +725,8 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
),
TP_fast_assign(
- strncpy(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ strscpy_pad(__entry->name,
+ dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
@@ -797,8 +799,8 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
),
TP_fast_assign(
- strncpy(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ strscpy_pad(__entry->name,
+ dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index 63b1f506ea67..c160a5354eb6 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -67,6 +67,9 @@
#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+#define MADV_COLD 20 /* deactivate these pages */
+#define MADV_PAGEOUT 21 /* reclaim these pages */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/include/uapi/linux/coff.h b/include/uapi/linux/coff.h
index e4a79f80b9a0..ab5c7e847eed 100644
--- a/include/uapi/linux/coff.h
+++ b/include/uapi/linux/coff.h
@@ -11,6 +11,9 @@
more information about COFF, then O'Reilly has a very excellent book.
*/
+#ifndef _UAPI_LINUX_COFF_H
+#define _UAPI_LINUX_COFF_H
+
#define E_SYMNMLEN 8 /* Number of characters in a symbol name */
#define E_FILNMLEN 14 /* Number of characters in a file name */
#define E_DIMNUM 4 /* Number of array dimensions in auxiliary entry */
@@ -350,3 +353,5 @@ struct COFF_reloc {
/* For new sections we haven't heard of before */
#define COFF_DEF_SECTION_ALIGNMENT 4
+
+#endif /* _UAPI_LINUX_COFF_H */
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 2971d29a42e4..df2e12fb3381 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -425,6 +425,10 @@ enum fuse_opcode {
/* CUSE specific operations */
CUSE_INIT = 4096,
+
+ /* Reserved opcodes: helpful to detect structure endian-ness */
+ CUSE_INIT_BSWAP_RESERVED = 1048576, /* CUSE_INIT << 8 */
+ FUSE_INIT_BSWAP_RESERVED = 436207616, /* FUSE_INIT << 24 */
};
enum fuse_notify_code {
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 96ee9d94b73e..ea57526a5b89 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -28,6 +28,7 @@ struct io_uring_sqe {
__u16 poll_events;
__u32 sync_range_flags;
__u32 msg_flags;
+ __u32 timeout_flags;
};
__u64 user_data; /* data to be passed back at completion time */
union {
@@ -61,6 +62,7 @@ struct io_uring_sqe {
#define IORING_OP_SYNC_FILE_RANGE 8
#define IORING_OP_SENDMSG 9
#define IORING_OP_RECVMSG 10
+#define IORING_OP_TIMEOUT 11
/*
* sqe->fsync_flags
diff --git a/init/main.c b/init/main.c
index 653693da8da6..208b8fa1808e 100644
--- a/init/main.c
+++ b/init/main.c
@@ -507,7 +507,7 @@ void __init __weak mem_encrypt_init(void) { }
void __init __weak poking_init(void) { }
-void __init __weak pgd_cache_init(void) { }
+void __init __weak pgtable_cache_init(void) { }
bool initcall_debug;
core_param(initcall_debug, initcall_debug, bool, 0644);
@@ -556,6 +556,7 @@ static void __init mm_init(void)
report_meminit();
mem_init();
kmem_cache_init();
+ kmemleak_init();
pgtable_init();
debug_objects_mem_init();
vmalloc_init();
@@ -564,7 +565,6 @@ static void __init mm_init(void)
init_espfix_bsp();
/* Should be run after espfix64 is set up. */
pti_init();
- pgd_cache_init();
}
void __init __weak arch_call_rest_init(void)
@@ -594,7 +594,6 @@ asmlinkage __visible void __init start_kernel(void)
page_address_init();
pr_notice("%s", linux_banner);
setup_arch(&command_line);
- mm_init_cpumask(&init_mm);
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
@@ -740,7 +739,6 @@ asmlinkage __visible void __init start_kernel(void)
initrd_start = 0;
}
#endif
- kmemleak_init();
setup_per_cpu_pageset();
numa_policy_init();
acpi_early_init();
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 7c15729d9d25..3d920ff15c80 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -1240,15 +1240,14 @@ static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
/* create the notify skb */
nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
- if (!nc) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!nc)
+ return -ENOMEM;
+
if (copy_from_user(nc->data,
notification->sigev_value.sival_ptr,
NOTIFY_COOKIE_LEN)) {
ret = -EFAULT;
- goto out;
+ goto free_skb;
}
/* TODO: add a header? */
@@ -1264,8 +1263,7 @@ retry:
fdput(f);
if (IS_ERR(sock)) {
ret = PTR_ERR(sock);
- sock = NULL;
- goto out;
+ goto free_skb;
}
timeo = MAX_SCHEDULE_TIMEOUT;
@@ -1274,11 +1272,8 @@ retry:
sock = NULL;
goto retry;
}
- if (ret) {
- sock = NULL;
- nc = NULL;
- goto out;
- }
+ if (ret)
+ return ret;
}
}
@@ -1333,7 +1328,8 @@ out_fput:
out:
if (sock)
netlink_detachskb(sock, nc);
- else if (nc)
+ else
+free_skb:
dev_kfree_skb(nc);
return ret;
diff --git a/ipc/sem.c b/ipc/sem.c
index 7da4504bcc7c..ec97a7072413 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1852,7 +1852,8 @@ static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
{
struct sem_undo *un;
- list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
+ list_for_each_entry_rcu(un, &ulp->list_proc, list_proc,
+ spin_is_locked(&ulp->lock)) {
if (un->semid == semid)
return un;
}
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index cc0d0cf114e3..a70f7209cda3 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -14,8 +14,9 @@
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/kdev_t.h>
-#include <linux/parser.h>
#include <linux/filter.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
@@ -583,58 +584,52 @@ static const struct super_operations bpf_super_ops = {
enum {
OPT_MODE,
- OPT_ERR,
};
-static const match_table_t bpf_mount_tokens = {
- { OPT_MODE, "mode=%o" },
- { OPT_ERR, NULL },
+static const struct fs_parameter_spec bpf_param_specs[] = {
+ fsparam_u32oct ("mode", OPT_MODE),
+ {}
+};
+
+static const struct fs_parameter_description bpf_fs_parameters = {
+ .name = "bpf",
+ .specs = bpf_param_specs,
};
struct bpf_mount_opts {
umode_t mode;
};
-static int bpf_parse_options(char *data, struct bpf_mount_opts *opts)
+static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- substring_t args[MAX_OPT_ARGS];
- int option, token;
- char *ptr;
+ struct bpf_mount_opts *opts = fc->fs_private;
+ struct fs_parse_result result;
+ int opt;
- opts->mode = S_IRWXUGO;
-
- while ((ptr = strsep(&data, ",")) != NULL) {
- if (!*ptr)
- continue;
-
- token = match_token(ptr, bpf_mount_tokens, args);
- switch (token) {
- case OPT_MODE:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->mode = option & S_IALLUGO;
- break;
+ opt = fs_parse(fc, &bpf_fs_parameters, param, &result);
+ if (opt < 0)
/* We might like to report bad mount options here, but
* traditionally we've ignored all mount options, so we'd
* better continue to ignore non-existing options for bpf.
*/
- }
+ return opt == -ENOPARAM ? 0 : opt;
+
+ switch (opt) {
+ case OPT_MODE:
+ opts->mode = result.uint_32 & S_IALLUGO;
+ break;
}
return 0;
}
-static int bpf_fill_super(struct super_block *sb, void *data, int silent)
+static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
{
static const struct tree_descr bpf_rfiles[] = { { "" } };
- struct bpf_mount_opts opts;
+ struct bpf_mount_opts *opts = fc->fs_private;
struct inode *inode;
int ret;
- ret = bpf_parse_options(data, &opts);
- if (ret)
- return ret;
-
ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
if (ret)
return ret;
@@ -644,21 +639,50 @@ static int bpf_fill_super(struct super_block *sb, void *data, int silent)
inode = sb->s_root->d_inode;
inode->i_op = &bpf_dir_iops;
inode->i_mode &= ~S_IALLUGO;
- inode->i_mode |= S_ISVTX | opts.mode;
+ inode->i_mode |= S_ISVTX | opts->mode;
return 0;
}
-static struct dentry *bpf_mount(struct file_system_type *type, int flags,
- const char *dev_name, void *data)
+static int bpf_get_tree(struct fs_context *fc)
+{
+ return get_tree_nodev(fc, bpf_fill_super);
+}
+
+static void bpf_free_fc(struct fs_context *fc)
{
- return mount_nodev(type, flags, data, bpf_fill_super);
+ kfree(fc->fs_private);
+}
+
+static const struct fs_context_operations bpf_context_ops = {
+ .free = bpf_free_fc,
+ .parse_param = bpf_parse_param,
+ .get_tree = bpf_get_tree,
+};
+
+/*
+ * Set up the filesystem mount context.
+ */
+static int bpf_init_fs_context(struct fs_context *fc)
+{
+ struct bpf_mount_opts *opts;
+
+ opts = kzalloc(sizeof(struct bpf_mount_opts), GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ opts->mode = S_IRWXUGO;
+
+ fc->fs_private = opts;
+ fc->ops = &bpf_context_ops;
+ return 0;
}
static struct file_system_type bpf_fs_type = {
.owner = THIS_MODULE,
.name = "bpf",
- .mount = bpf_mount,
+ .init_fs_context = bpf_init_fs_context,
+ .parameters = &bpf_fs_parameters,
.kill_sb = kill_litter_super,
};
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 10f1187b3907..f76d6f77dd5e 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -893,30 +893,25 @@ static struct sysrq_key_op sysrq_dbg_op = {
};
#endif
-static int kgdb_panic_event(struct notifier_block *self,
- unsigned long val,
- void *data)
+void kgdb_panic(const char *msg)
{
+ if (!kgdb_io_module_registered)
+ return;
+
/*
- * Avoid entering the debugger if we were triggered due to a panic
- * We don't want to get stuck waiting for input from user in such case.
- * panic_timeout indicates the system should automatically
+ * We don't want to get stuck waiting for input from user if
+ * "panic_timeout" indicates the system should automatically
* reboot on panic.
*/
if (panic_timeout)
- return NOTIFY_DONE;
+ return;
if (dbg_kdb_mode)
- kdb_printf("PANIC: %s\n", (char *)data);
+ kdb_printf("PANIC: %s\n", msg);
+
kgdb_breakpoint();
- return NOTIFY_DONE;
}
-static struct notifier_block kgdb_panic_event_nb = {
- .notifier_call = kgdb_panic_event,
- .priority = INT_MAX,
-};
-
void __weak kgdb_arch_late(void)
{
}
@@ -965,8 +960,6 @@ static void kgdb_register_callbacks(void)
kgdb_arch_late();
register_module_notifier(&dbg_module_load_nb);
register_reboot_notifier(&dbg_reboot_notifier);
- atomic_notifier_chain_register(&panic_notifier_list,
- &kgdb_panic_event_nb);
#ifdef CONFIG_MAGIC_SYSRQ
register_sysrq_key('g', &sysrq_dbg_op);
#endif
@@ -980,16 +973,14 @@ static void kgdb_register_callbacks(void)
static void kgdb_unregister_callbacks(void)
{
/*
- * When this routine is called KGDB should unregister from the
- * panic handler and clean up, making sure it is not handling any
+ * When this routine is called KGDB should unregister from
+ * handlers and clean up, making sure it is not handling any
* break exceptions at the time.
*/
if (kgdb_io_module_registered) {
kgdb_io_module_registered = 0;
unregister_reboot_notifier(&dbg_reboot_notifier);
unregister_module_notifier(&dbg_module_load_nb);
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &kgdb_panic_event_nb);
kgdb_arch_exit();
#ifdef CONFIG_MAGIC_SYSRQ
unregister_sysrq_key('g', &sysrq_dbg_op);
diff --git a/kernel/elfcore.c b/kernel/elfcore.c
index fc482c8e0bd8..57fb4dcff434 100644
--- a/kernel/elfcore.c
+++ b/kernel/elfcore.c
@@ -3,6 +3,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/binfmts.h>
+#include <linux/elfcore.h>
Elf_Half __weak elf_core_extra_phdrs(void)
{
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4f08b17d6426..275eae05af20 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2239,7 +2239,7 @@ static void __perf_event_disable(struct perf_event *event,
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
- * remains valid. This condition is satisifed when called through
+ * remains valid. This condition is satisfied when called through
* perf_event_for_each_child or perf_event_for_each because they
* hold the top-level event's child_mutex, so any descendant that
* goes to exit will block in perf_event_exit_event().
@@ -6054,7 +6054,7 @@ static void perf_sample_regs_intr(struct perf_regs *regs_intr,
* Get remaining task size from user stack pointer.
*
* It'd be better to take stack vma map and limit this more
- * precisly, but there's no way to get it safely under interrupt,
+ * precisely, but there's no way to get it safely under interrupt,
* so using TASK_SIZE as limit.
*/
static u64 perf_ustack_task_size(struct pt_regs *regs)
@@ -6616,7 +6616,7 @@ void perf_prepare_sample(struct perf_event_header *header,
if (sample_type & PERF_SAMPLE_STACK_USER) {
/*
- * Either we need PERF_SAMPLE_STACK_USER bit to be allways
+ * Either we need PERF_SAMPLE_STACK_USER bit to be always
* processed as the last one or have additional check added
* in case new sample type is added, because we could eat
* up the rest of the sample size.
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 84fa00497c49..94d38a39d72e 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -26,6 +26,7 @@
#include <linux/percpu-rwsem.h>
#include <linux/task_work.h>
#include <linux/shmem_fs.h>
+#include <linux/khugepaged.h>
#include <linux/uprobes.h>
@@ -143,17 +144,19 @@ static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
*
* @vma: vma that holds the pte pointing to page
* @addr: address the old @page is mapped at
- * @page: the cowed page we are replacing by kpage
- * @kpage: the modified page we replace page by
+ * @old_page: the page we are replacing by new_page
+ * @new_page: the modified page we replace page by
*
- * Returns 0 on success, -EFAULT on failure.
+ * If @new_page is NULL, only unmap @old_page.
+ *
+ * Returns 0 on success, negative error code otherwise.
*/
static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
struct page *old_page, struct page *new_page)
{
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
- .page = old_page,
+ .page = compound_head(old_page),
.vma = vma,
.address = addr,
};
@@ -164,12 +167,12 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
addr + PAGE_SIZE);
- VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page);
-
- err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
- false);
- if (err)
- return err;
+ if (new_page) {
+ err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
+ &memcg, false);
+ if (err)
+ return err;
+ }
/* For try_to_free_swap() and munlock_vma_page() below */
lock_page(old_page);
@@ -177,15 +180,20 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
mmu_notifier_invalidate_range_start(&range);
err = -EAGAIN;
if (!page_vma_mapped_walk(&pvmw)) {
- mem_cgroup_cancel_charge(new_page, memcg, false);
+ if (new_page)
+ mem_cgroup_cancel_charge(new_page, memcg, false);
goto unlock;
}
VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
- get_page(new_page);
- page_add_new_anon_rmap(new_page, vma, addr, false);
- mem_cgroup_commit_charge(new_page, memcg, false, false);
- lru_cache_add_active_or_unevictable(new_page, vma);
+ if (new_page) {
+ get_page(new_page);
+ page_add_new_anon_rmap(new_page, vma, addr, false);
+ mem_cgroup_commit_charge(new_page, memcg, false, false);
+ lru_cache_add_active_or_unevictable(new_page, vma);
+ } else
+ /* no new page, just dec_mm_counter for old_page */
+ dec_mm_counter(mm, MM_ANONPAGES);
if (!PageAnon(old_page)) {
dec_mm_counter(mm, mm_counter_file(old_page));
@@ -194,8 +202,9 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
ptep_clear_flush_notify(vma, addr, pvmw.pte);
- set_pte_at_notify(mm, addr, pvmw.pte,
- mk_pte(new_page, vma->vm_page_prot));
+ if (new_page)
+ set_pte_at_notify(mm, addr, pvmw.pte,
+ mk_pte(new_page, vma->vm_page_prot));
page_remove_rmap(old_page, false);
if (!page_mapped(old_page))
@@ -464,6 +473,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
struct page *old_page, *new_page;
struct vm_area_struct *vma;
int ret, is_register, ref_ctr_updated = 0;
+ bool orig_page_huge = false;
is_register = is_swbp_insn(&opcode);
uprobe = container_of(auprobe, struct uprobe, arch);
@@ -471,7 +481,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
retry:
/* Read the page with vaddr into memory */
ret = get_user_pages_remote(NULL, mm, vaddr, 1,
- FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL);
+ FOLL_FORCE | FOLL_SPLIT_PMD, &old_page, &vma, NULL);
if (ret <= 0)
return ret;
@@ -488,6 +498,10 @@ retry:
ref_ctr_updated = 1;
}
+ ret = 0;
+ if (!is_register && !PageAnon(old_page))
+ goto put_old;
+
ret = anon_vma_prepare(vma);
if (ret)
goto put_old;
@@ -501,8 +515,33 @@ retry:
copy_highpage(new_page, old_page);
copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
+ if (!is_register) {
+ struct page *orig_page;
+ pgoff_t index;
+
+ VM_BUG_ON_PAGE(!PageAnon(old_page), old_page);
+
+ index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
+ orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
+ index);
+
+ if (orig_page) {
+ if (PageUptodate(orig_page) &&
+ pages_identical(new_page, orig_page)) {
+ /* let go new_page */
+ put_page(new_page);
+ new_page = NULL;
+
+ if (PageCompound(orig_page))
+ orig_page_huge = true;
+ }
+ put_page(orig_page);
+ }
+ }
+
ret = __replace_page(vma, vaddr, old_page, new_page);
- put_page(new_page);
+ if (new_page)
+ put_page(new_page);
put_old:
put_page(old_page);
@@ -513,6 +552,10 @@ put_old:
if (ret && is_register && ref_ctr_updated)
update_ref_ctr(uprobe, mm, -1);
+ /* try collapse pmd for compound page */
+ if (!ret && orig_page_huge)
+ collapse_pte_mapped_thp(mm, vaddr);
+
return ret;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 5a0fd518e04e..60763c043aa3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -125,6 +125,15 @@ int nr_threads; /* The idle threads do not count.. */
static int max_threads; /* tunable limit on nr_threads */
+#define NAMED_ARRAY_INDEX(x) [x] = __stringify(x)
+
+static const char * const resident_page_types[] = {
+ NAMED_ARRAY_INDEX(MM_FILEPAGES),
+ NAMED_ARRAY_INDEX(MM_ANONPAGES),
+ NAMED_ARRAY_INDEX(MM_SWAPENTS),
+ NAMED_ARRAY_INDEX(MM_SHMEMPAGES),
+};
+
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
@@ -645,12 +654,15 @@ static void check_mm(struct mm_struct *mm)
{
int i;
+ BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS,
+ "Please make sure 'struct resident_page_types[]' is updated as well");
+
for (i = 0; i < NR_MM_COUNTERS; i++) {
long x = atomic_long_read(&mm->rss_stat.count[i]);
if (unlikely(x))
- printk(KERN_ALERT "BUG: Bad rss-counter state "
- "mm:%p idx:%d val:%ld\n", mm, i, x);
+ pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
+ mm, resident_page_types[i], x);
}
if (mm_pgtables_bytes(mm))
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index d5870723b8ad..15d70a90b50d 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -300,6 +300,8 @@ static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
{
struct page *pages;
+ if (fatal_signal_pending(current))
+ return NULL;
pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
if (pages) {
unsigned int count, i;
diff --git a/kernel/panic.c b/kernel/panic.c
index 057540b6eee9..47e8ebccc22b 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -12,6 +12,7 @@
#include <linux/debug_locks.h>
#include <linux/sched/debug.h>
#include <linux/interrupt.h>
+#include <linux/kgdb.h>
#include <linux/kmsg_dump.h>
#include <linux/kallsyms.h>
#include <linux/notifier.h>
@@ -220,6 +221,13 @@ void panic(const char *fmt, ...)
#endif
/*
+ * If kgdb is enabled, give it a chance to run before we stop all
+ * the other CPUs or else we won't be able to debug processes left
+ * running on them.
+ */
+ kgdb_panic(buf);
+
+ /*
* If we have crashed and we have a crash kernel loaded let it handle
* everything else.
* If we want to run this after calling panic_notifiers, pass
@@ -551,9 +559,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
{
disable_trace_on_warning();
- if (args)
- pr_warn(CUT_HERE);
-
if (file)
pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
raw_smp_processor_id(), current->pid, file, line,
@@ -591,37 +596,26 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
add_taint(taint, LOCKDEP_STILL_OK);
}
-#ifdef WANT_WARN_ON_SLOWPATH
-void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
+#ifndef __WARN_FLAGS
+void warn_slowpath_fmt(const char *file, int line, unsigned taint,
+ const char *fmt, ...)
{
struct warn_args args;
- args.fmt = fmt;
- va_start(args.args, fmt);
- __warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL,
- &args);
- va_end(args.args);
-}
-EXPORT_SYMBOL(warn_slowpath_fmt);
+ pr_warn(CUT_HERE);
-void warn_slowpath_fmt_taint(const char *file, int line,
- unsigned taint, const char *fmt, ...)
-{
- struct warn_args args;
+ if (!fmt) {
+ __warn(file, line, __builtin_return_address(0), taint,
+ NULL, NULL);
+ return;
+ }
args.fmt = fmt;
va_start(args.args, fmt);
__warn(file, line, __builtin_return_address(0), taint, NULL, &args);
va_end(args.args);
}
-EXPORT_SYMBOL(warn_slowpath_fmt_taint);
-
-void warn_slowpath_null(const char *file, int line)
-{
- pr_warn(CUT_HERE);
- __warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL, NULL);
-}
-EXPORT_SYMBOL(warn_slowpath_null);
+EXPORT_SYMBOL(warn_slowpath_fmt);
#else
void __warn_printk(const char *fmt, ...)
{
diff --git a/kernel/resource.c b/kernel/resource.c
index 74877e9d90ca..76036a41143b 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -487,8 +487,8 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
while (start < end &&
!find_next_iomem_res(start, end, flags, IORES_DESC_NONE,
false, &res)) {
- pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
- end_pfn = (res.end + 1) >> PAGE_SHIFT;
+ pfn = PFN_UP(res.start);
+ end_pfn = PFN_DOWN(res.end + 1);
if (end_pfn > pfn)
ret = (*func)(pfn, end_pfn - pfn, arg);
if (ret)
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index c892c6280c9f..8dad5aa600ea 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -238,7 +238,6 @@ static void do_idle(void)
tick_nohz_idle_enter();
while (!need_resched()) {
- check_pgt_cache();
rmb();
local_irq_disable();
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 078950d9605b..00fcea236eba 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -264,7 +264,8 @@ extern struct ctl_table epoll_table[];
extern struct ctl_table firmware_config_table[];
#endif
-#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
+#if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \
+ defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT)
int sysctl_legacy_va_layout;
#endif
@@ -1573,7 +1574,8 @@ static struct ctl_table vm_table[] = {
.proc_handler = proc_dointvec,
.extra1 = SYSCTL_ZERO,
},
-#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
+#if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \
+ defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT)
{
.procname = "legacy_va_layout",
.data = &sysctl_legacy_va_layout,
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 0e315a2e77ae..4820823515e9 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1678,24 +1678,26 @@ void timer_clear_idle(void)
static int collect_expired_timers(struct timer_base *base,
struct hlist_head *heads)
{
+ unsigned long now = READ_ONCE(jiffies);
+
/*
* NOHZ optimization. After a long idle sleep we need to forward the
* base to current jiffies. Avoid a loop by searching the bitfield for
* the next expiring timer.
*/
- if ((long)(jiffies - base->clk) > 2) {
+ if ((long)(now - base->clk) > 2) {
unsigned long next = __next_timer_interrupt(base);
/*
* If the next timer is ahead of time forward to current
* jiffies, otherwise forward to the next expiry time:
*/
- if (time_after(next, jiffies)) {
+ if (time_after(next, now)) {
/*
* The call site will increment base->clk and then
* terminate the expiry loop immediately.
*/
- base->clk = jiffies;
+ base->clk = now;
return 0;
}
base->clk = next;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index a6697e28ddda..402dc3ce88d3 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -549,10 +549,11 @@ static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
for (i = 0; i < orig->tp.nr_args; i++) {
if (strcmp(orig->tp.args[i].comm,
comp->tp.args[i].comm))
- continue;
+ break;
}
- return true;
+ if (i == orig->tp.nr_args)
+ return true;
}
return false;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 34dd6d0016a3..dd884341f5c5 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -431,10 +431,11 @@ static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
for (i = 0; i < orig->tp.nr_args; i++) {
if (strcmp(orig->tp.args[i].comm,
comp->tp.args[i].comm))
- continue;
+ break;
}
- return true;
+ if (i == orig->tp.nr_args)
+ return true;
}
return false;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e0e14780a13d..93d97f9b0157 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -311,7 +311,7 @@ config HEADERS_CHECK
relevant for userspace, say 'Y'.
config OPTIMIZE_INLINING
- bool "Allow compiler to uninline functions marked 'inline'"
+ def_bool y
help
This option determines if the kernel forces gcc to inline the functions
developers have marked 'inline'. Doing so takes away freedom from gcc to
@@ -322,8 +322,6 @@ config OPTIMIZE_INLINING
decision will become the default in the future. Until then this option
is there to test gcc for this.
- If unsure, say N.
-
config DEBUG_SECTION_MISMATCH
bool "Enable full Section mismatch analysis"
help
@@ -576,17 +574,18 @@ config DEBUG_KMEMLEAK
In order to access the kmemleak file, debugfs needs to be
mounted (usually at /sys/kernel/debug).
-config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
- int "Maximum kmemleak early log entries"
+config DEBUG_KMEMLEAK_MEM_POOL_SIZE
+ int "Kmemleak memory pool size"
depends on DEBUG_KMEMLEAK
- range 200 40000
- default 400
+ range 200 1000000
+ default 16000
help
Kmemleak must track all the memory allocations to avoid
reporting false positives. Since memory may be allocated or
- freed before kmemleak is initialised, an early log buffer is
- used to store these actions. If kmemleak reports "early log
- buffer exceeded", please increase this value.
+ freed before kmemleak is fully initialised, use a static pool
+ of metadata objects to track such callbacks. After kmemleak is
+ fully initialised, this memory pool acts as an emergency one
+ if slab allocations fail.
config DEBUG_KMEMLEAK_TEST
tristate "Simple test for the kernel memory leak detector"
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 7fa97a8b5717..6c9682ce0254 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -134,6 +134,14 @@ config KASAN_S390_4_LEVEL_PAGING
to 3TB of RAM with KASan enabled). This options allows to force
4-level paging instead.
+config KASAN_SW_TAGS_IDENTIFY
+ bool "Enable memory corruption identification"
+ depends on KASAN_SW_TAGS
+ help
+ This option enables best-effort identification of bug type
+ (use-after-free or out-of-bounds) at the cost of increased
+ memory consumption.
+
config TEST_KASAN
tristate "Module for testing KASAN for bug detection"
depends on m && KASAN
diff --git a/lib/bug.c b/lib/bug.c
index 1077366f496b..8c98af0bf585 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -181,6 +181,15 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
}
}
+ /*
+ * BUG() and WARN_ON() families don't print a custom debug message
+ * before triggering the exception handler, so we must add the
+ * "cut here" line now. WARN() issues its own "cut here" before the
+ * extra debugging message it writes before triggering the handler.
+ */
+ if ((bug->flags & BUGFLAG_NO_CUT_HERE) == 0)
+ printk(KERN_DEFAULT CUT_HERE);
+
if (warning) {
/* this is a WARN_ON rather than BUG/BUG_ON */
__warn(file, line, (void *)bugaddr, BUG_GET_TAINT(bug), regs,
@@ -188,8 +197,6 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
return BUG_TRAP_TYPE_WARN;
}
- printk(KERN_DEFAULT CUT_HERE);
-
if (file)
pr_crit("kernel BUG at %s:%u!\n", file, line);
else
diff --git a/lib/extable.c b/lib/extable.c
index 25da4071122a..c3e59caf7ffa 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/sort.h>
#include <linux/uaccess.h>
+#include <linux/extable.h>
#ifndef ARCH_HAS_RELATIVE_EXTABLE
#define ex_to_insn(x) ((x)->insn)
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
index a7bafc413730..ae25e2fa2187 100644
--- a/lib/generic-radix-tree.c
+++ b/lib/generic-radix-tree.c
@@ -36,12 +36,12 @@ static inline size_t genradix_depth_size(unsigned depth)
#define GENRADIX_DEPTH_MASK \
((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
-unsigned genradix_root_to_depth(struct genradix_root *r)
+static inline unsigned genradix_root_to_depth(struct genradix_root *r)
{
return (unsigned long) r & GENRADIX_DEPTH_MASK;
}
-struct genradix_node *genradix_root_to_node(struct genradix_root *r)
+static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r)
{
return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
}
diff --git a/lib/hexdump.c b/lib/hexdump.c
index b1d55b669ae2..147133f8eb2f 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -270,25 +270,4 @@ void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
}
EXPORT_SYMBOL(print_hex_dump);
-#if !defined(CONFIG_DYNAMIC_DEBUG)
-/**
- * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params
- * @prefix_str: string to prefix each line with;
- * caller supplies trailing spaces for alignment if desired
- * @prefix_type: controls whether prefix of an offset, address, or none
- * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
- * @buf: data blob to dump
- * @len: number of bytes in the @buf
- *
- * Calls print_hex_dump(), with log level of KERN_DEBUG,
- * rowsize of 16, groupsize of 1, and ASCII output included.
- */
-void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
- const void *buf, size_t len)
-{
- print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
- buf, len, true);
-}
-EXPORT_SYMBOL(print_hex_dump_bytes);
-#endif /* !defined(CONFIG_DYNAMIC_DEBUG) */
#endif /* defined(CONFIG_PRINTK) */
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f1e0569b4539..639d5e7014c1 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -878,7 +878,7 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
head = compound_head(page);
v += (page - head) << PAGE_SHIFT;
- if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
+ if (likely(n <= v && v <= (page_size(head))))
return true;
WARN_ON(1);
return false;
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
index ba16c08e8cb9..717c940112f9 100644
--- a/lib/lzo/lzo1x_compress.c
+++ b/lib/lzo/lzo1x_compress.c
@@ -83,17 +83,19 @@ next:
ALIGN((uintptr_t)ir, 4)) &&
(ir < limit) && (*ir == 0))
ir++;
- for (; (ir + 4) <= limit; ir += 4) {
- dv = *((u32 *)ir);
- if (dv) {
+ if (IS_ALIGNED((uintptr_t)ir, 4)) {
+ for (; (ir + 4) <= limit; ir += 4) {
+ dv = *((u32 *)ir);
+ if (dv) {
# if defined(__LITTLE_ENDIAN)
- ir += __builtin_ctz(dv) >> 3;
+ ir += __builtin_ctz(dv) >> 3;
# elif defined(__BIG_ENDIAN)
- ir += __builtin_clz(dv) >> 3;
+ ir += __builtin_clz(dv) >> 3;
# else
# error "missing endian definition"
# endif
- break;
+ break;
+ }
}
}
#endif
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 62b8ee92643d..41ae3c7570d3 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -77,26 +77,10 @@ static inline void erase_cached(struct test_node *node, struct rb_root_cached *r
}
-static inline u32 augment_recompute(struct test_node *node)
-{
- u32 max = node->val, child_augmented;
- if (node->rb.rb_left) {
- child_augmented = rb_entry(node->rb.rb_left, struct test_node,
- rb)->augmented;
- if (max < child_augmented)
- max = child_augmented;
- }
- if (node->rb.rb_right) {
- child_augmented = rb_entry(node->rb.rb_right, struct test_node,
- rb)->augmented;
- if (max < child_augmented)
- max = child_augmented;
- }
- return max;
-}
+#define NODE_VAL(node) ((node)->val)
-RB_DECLARE_CALLBACKS(static, augment_callbacks, struct test_node, rb,
- u32, augmented, augment_recompute)
+RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
+ struct test_node, rb, u32, augmented, NODE_VAL)
static void insert_augmented(struct test_node *node,
struct rb_root_cached *root)
@@ -238,7 +222,20 @@ static void check_augmented(int nr_nodes)
check(nr_nodes);
for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) {
struct test_node *node = rb_entry(rb, struct test_node, rb);
- WARN_ON_ONCE(node->augmented != augment_recompute(node));
+ u32 subtree, max = node->val;
+ if (node->rb.rb_left) {
+ subtree = rb_entry(node->rb.rb_left, struct test_node,
+ rb)->augmented;
+ if (max < subtree)
+ max = subtree;
+ }
+ if (node->rb.rb_right) {
+ subtree = rb_entry(node->rb.rb_right, struct test_node,
+ rb)->augmented;
+ if (max < subtree)
+ max = subtree;
+ }
+ WARN_ON_ONCE(node->augmented != max);
}
}
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 5c86ef4c899f..1c26c14ffbb9 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -6,7 +6,6 @@
*/
#include <linux/mm.h>
-#include <linux/quicklist.h>
#include <linux/cma.h>
void show_mem(unsigned int filter, nodemask_t *nodemask)
@@ -39,10 +38,6 @@ void show_mem(unsigned int filter, nodemask_t *nodemask)
#ifdef CONFIG_CMA
printk("%lu pages cma reserved\n", totalcma_pages);
#endif
-#ifdef CONFIG_QUICKLIST
- printk("%lu pages in pagetable cache\n",
- quicklist_total_size());
-#endif
#ifdef CONFIG_MEMORY_FAILURE
printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
#endif
diff --git a/lib/string.c b/lib/string.c
index 461fb620f85f..cd7a10c19210 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -173,8 +173,9 @@ EXPORT_SYMBOL(strlcpy);
* doesn't unnecessarily force the tail of the destination buffer to be
* zeroed. If zeroing is desired please use strscpy_pad().
*
- * Return: The number of characters copied (not including the trailing
- * %NUL) or -E2BIG if the destination buffer wasn't big enough.
+ * Returns:
+ * * The number of characters copied (not including the trailing %NUL)
+ * * -E2BIG if count is 0 or @src was truncated.
*/
ssize_t strscpy(char *dest, const char *src, size_t count)
{
@@ -182,7 +183,7 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
size_t max = count;
long res = 0;
- if (count == 0)
+ if (count == 0 || WARN_ON_ONCE(count > INT_MAX))
return -E2BIG;
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -253,8 +254,9 @@ EXPORT_SYMBOL(strscpy);
* For full explanation of why you may want to consider using the
* 'strscpy' functions please see the function docstring for strscpy().
*
- * Return: The number of characters copied (not including the trailing
- * %NUL) or -E2BIG if the destination buffer wasn't big enough.
+ * Returns:
+ * * The number of characters copied (not including the trailing %NUL)
+ * * -E2BIG if count is 0 or @src was truncated.
*/
ssize_t strscpy_pad(char *dest, const char *src, size_t count)
{
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 023ba9f3b99f..dccb95af6003 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -6,6 +6,7 @@
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/errno.h>
+#include <linux/mm.h>
#include <asm/byteorder.h>
#include <asm/word-at-a-time.h>
@@ -108,7 +109,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
return 0;
max_addr = user_addr_max();
- src_addr = (unsigned long)src;
+ src_addr = (unsigned long)untagged_addr(src);
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
long retval;
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 7f2db3fe311f..28ff554a1be8 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -2,6 +2,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/uaccess.h>
+#include <linux/mm.h>
#include <asm/word-at-a-time.h>
@@ -109,7 +110,7 @@ long strnlen_user(const char __user *str, long count)
return 0;
max_addr = user_addr_max();
- src_addr = (unsigned long)str;
+ src_addr = (unsigned long)untagged_addr(str);
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
long retval;
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index b63b367a94e8..49cc4d570a40 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -18,6 +18,9 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include <asm/page.h>
/*
* Note: test functions are marked noinline so that their names appear in
@@ -337,6 +340,42 @@ static noinline void __init kmalloc_uaf2(void)
kfree(ptr2);
}
+static noinline void __init kfree_via_page(void)
+{
+ char *ptr;
+ size_t size = 8;
+ struct page *page;
+ unsigned long offset;
+
+ pr_info("invalid-free false positive (via page)\n");
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ page = virt_to_page(ptr);
+ offset = offset_in_page(ptr);
+ kfree(page_address(page) + offset);
+}
+
+static noinline void __init kfree_via_phys(void)
+{
+ char *ptr;
+ size_t size = 8;
+ phys_addr_t phys;
+
+ pr_info("invalid-free false positive (via phys)\n");
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ phys = virt_to_phys(ptr);
+ kfree(phys_to_virt(phys));
+}
+
static noinline void __init kmem_cache_oob(void)
{
char *p;
@@ -737,6 +776,8 @@ static int __init kmalloc_tests_init(void)
kmalloc_uaf();
kmalloc_uaf_memset();
kmalloc_uaf2();
+ kfree_via_page();
+ kfree_via_phys();
kmem_cache_oob();
memcg_accounted_kmem_cache();
kasan_stack_oob();
diff --git a/mm/Kconfig b/mm/Kconfig
index 2fe4902ad755..a5dae9a7eb51 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -273,11 +273,6 @@ config BOUNCE
by default when ZONE_DMA or HIGHMEM is selected, but you
may say n to override this.
-config NR_QUICK
- int
- depends on QUICKLIST
- default "1"
-
config VIRT_TO_BUS
bool
help
@@ -717,6 +712,17 @@ config GUP_BENCHMARK
config GUP_GET_PTE_LOW_HIGH
bool
+config READ_ONLY_THP_FOR_FS
+ bool "Read-only THP for filesystems (EXPERIMENTAL)"
+ depends on TRANSPARENT_HUGE_PAGECACHE && SHMEM
+
+ help
+ Allow khugepaged to put read-only file-backed pages in THP.
+
+ This is marked experimental because it is a new feature. Write
+ support of file THPs will be developed in the next few release
+ cycles.
+
config ARCH_HAS_PTE_SPECIAL
bool
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 82b6a20898bd..327b3ebf23bf 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -21,7 +21,9 @@ config DEBUG_PAGEALLOC
Also, the state of page tracking structures is checked more often as
pages are being allocated and freed, as unexpected state changes
often happen for same reasons as memory corruption (e.g. double free,
- use-after-free).
+ use-after-free). The error reports for these checks can be augmented
+ with stack traces of last allocation and freeing of the page, when
+ PAGE_OWNER is also selected and enabled on boot.
For architectures which don't enable ARCH_SUPPORTS_DEBUG_PAGEALLOC,
fill the pages with poison patterns after free_pages() and verify
diff --git a/mm/Makefile b/mm/Makefile
index d0b295c3b764..d996846697ef 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -21,6 +21,9 @@ KCOV_INSTRUMENT_memcontrol.o := n
KCOV_INSTRUMENT_mmzone.o := n
KCOV_INSTRUMENT_vmstat.o := n
+CFLAGS_init-mm.o += $(call cc-disable-warning, override-init)
+CFLAGS_init-mm.o += $(call cc-disable-warning, initializer-overrides)
+
mmu-y := nommu.o
mmu-$(CONFIG_MMU) := highmem.o memory.o mincore.o \
mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
@@ -72,7 +75,6 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
obj-$(CONFIG_MEMTEST) += memtest.o
obj-$(CONFIG_MIGRATION) += migrate.o
-obj-$(CONFIG_QUICKLIST) += quicklist.o
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o
obj-$(CONFIG_PAGE_COUNTER) += page_counter.o
obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o
diff --git a/mm/compaction.c b/mm/compaction.c
index 952dc2fb24e5..ce08b39d85d4 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -969,7 +969,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
* is safe to read and it's 0 for tail pages.
*/
if (unlikely(PageCompound(page))) {
- low_pfn += (1UL << compound_order(page)) - 1;
+ low_pfn += compound_nr(page) - 1;
goto isolate_fail;
}
}
@@ -1737,8 +1737,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
* starting at the block pointed to by the migrate scanner pfn within
* compact_control.
*/
-static isolate_migrate_t isolate_migratepages(struct zone *zone,
- struct compact_control *cc)
+static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
{
unsigned long block_start_pfn;
unsigned long block_end_pfn;
@@ -1756,8 +1755,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
*/
low_pfn = fast_find_migrateblock(cc);
block_start_pfn = pageblock_start_pfn(low_pfn);
- if (block_start_pfn < zone->zone_start_pfn)
- block_start_pfn = zone->zone_start_pfn;
+ if (block_start_pfn < cc->zone->zone_start_pfn)
+ block_start_pfn = cc->zone->zone_start_pfn;
/*
* fast_find_migrateblock marks a pageblock skipped so to avoid
@@ -1787,8 +1786,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
cond_resched();
- page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
- zone);
+ page = pageblock_pfn_to_page(block_start_pfn,
+ block_end_pfn, cc->zone);
if (!page)
continue;
@@ -2078,6 +2077,17 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
const bool sync = cc->mode != MIGRATE_ASYNC;
bool update_cached;
+ /*
+ * These counters track activities during zone compaction. Initialize
+ * them before compacting a new zone.
+ */
+ cc->total_migrate_scanned = 0;
+ cc->total_free_scanned = 0;
+ cc->nr_migratepages = 0;
+ cc->nr_freepages = 0;
+ INIT_LIST_HEAD(&cc->freepages);
+ INIT_LIST_HEAD(&cc->migratepages);
+
cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
cc->classzone_idx);
@@ -2158,7 +2168,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
cc->rescan = true;
}
- switch (isolate_migratepages(cc->zone, cc)) {
+ switch (isolate_migratepages(cc)) {
case ISOLATE_ABORT:
ret = COMPACT_CONTENDED;
putback_movable_pages(&cc->migratepages);
@@ -2281,10 +2291,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
{
enum compact_result ret;
struct compact_control cc = {
- .nr_freepages = 0,
- .nr_migratepages = 0,
- .total_migrate_scanned = 0,
- .total_free_scanned = 0,
.order = order,
.search_order = order,
.gfp_mask = gfp_mask,
@@ -2305,8 +2311,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
if (capture)
current->capture_control = &capc;
- INIT_LIST_HEAD(&cc.freepages);
- INIT_LIST_HEAD(&cc.migratepages);
ret = compact_zone(&cc, &capc);
@@ -2408,8 +2412,6 @@ static void compact_node(int nid)
struct zone *zone;
struct compact_control cc = {
.order = -1,
- .total_migrate_scanned = 0,
- .total_free_scanned = 0,
.mode = MIGRATE_SYNC,
.ignore_skip_hint = true,
.whole_zone = true,
@@ -2423,11 +2425,7 @@ static void compact_node(int nid)
if (!populated_zone(zone))
continue;
- cc.nr_freepages = 0;
- cc.nr_migratepages = 0;
cc.zone = zone;
- INIT_LIST_HEAD(&cc.freepages);
- INIT_LIST_HEAD(&cc.migratepages);
compact_zone(&cc, NULL);
@@ -2529,8 +2527,6 @@ static void kcompactd_do_work(pg_data_t *pgdat)
struct compact_control cc = {
.order = pgdat->kcompactd_max_order,
.search_order = pgdat->kcompactd_max_order,
- .total_migrate_scanned = 0,
- .total_free_scanned = 0,
.classzone_idx = pgdat->kcompactd_classzone_idx,
.mode = MIGRATE_SYNC_LIGHT,
.ignore_skip_hint = false,
@@ -2554,16 +2550,10 @@ static void kcompactd_do_work(pg_data_t *pgdat)
COMPACT_CONTINUE)
continue;
- cc.nr_freepages = 0;
- cc.nr_migratepages = 0;
- cc.total_migrate_scanned = 0;
- cc.total_free_scanned = 0;
- cc.zone = zone;
- INIT_LIST_HEAD(&cc.freepages);
- INIT_LIST_HEAD(&cc.migratepages);
-
if (kthread_should_stop())
return;
+
+ cc.zone = zone;
status = compact_zone(&cc, NULL);
if (status == COMPACT_SUCCESS) {
diff --git a/mm/filemap.c b/mm/filemap.c
index 40667c2f3383..1146fcfa3215 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -126,7 +126,7 @@ static void page_cache_delete(struct address_space *mapping,
/* hugetlb pages are represented by a single entry in the xarray */
if (!PageHuge(page)) {
xas_set_order(&xas, page->index, compound_order(page));
- nr = 1U << compound_order(page);
+ nr = compound_nr(page);
}
VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -203,8 +203,9 @@ static void unaccount_page_cache_page(struct address_space *mapping,
__mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
if (PageTransHuge(page))
__dec_node_page_state(page, NR_SHMEM_THPS);
- } else {
- VM_BUG_ON_PAGE(PageTransHuge(page), page);
+ } else if (PageTransHuge(page)) {
+ __dec_node_page_state(page, NR_FILE_THPS);
+ filemap_nr_thps_dec(mapping);
}
/*
@@ -281,11 +282,11 @@ EXPORT_SYMBOL(delete_from_page_cache);
* @pvec: pagevec with pages to delete
*
* The function walks over mapping->i_pages and removes pages passed in @pvec
- * from the mapping. The function expects @pvec to be sorted by page index.
+ * from the mapping. The function expects @pvec to be sorted by page index
+ * and is optimised for it to be dense.
* It tolerates holes in @pvec (mapping entries at those indices are not
* modified). The function expects only THP head pages to be present in the
- * @pvec and takes care to delete all corresponding tail pages from the
- * mapping as well.
+ * @pvec.
*
* The function expects the i_pages lock to be held.
*/
@@ -294,40 +295,43 @@ static void page_cache_delete_batch(struct address_space *mapping,
{
XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index);
int total_pages = 0;
- int i = 0, tail_pages = 0;
+ int i = 0;
struct page *page;
mapping_set_update(&xas, mapping);
xas_for_each(&xas, page, ULONG_MAX) {
- if (i >= pagevec_count(pvec) && !tail_pages)
+ if (i >= pagevec_count(pvec))
break;
+
+ /* A swap/dax/shadow entry got inserted? Skip it. */
if (xa_is_value(page))
continue;
- if (!tail_pages) {
- /*
- * Some page got inserted in our range? Skip it. We
- * have our pages locked so they are protected from
- * being removed.
- */
- if (page != pvec->pages[i]) {
- VM_BUG_ON_PAGE(page->index >
- pvec->pages[i]->index, page);
- continue;
- }
- WARN_ON_ONCE(!PageLocked(page));
- if (PageTransHuge(page) && !PageHuge(page))
- tail_pages = HPAGE_PMD_NR - 1;
+ /*
+ * A page got inserted in our range? Skip it. We have our
+ * pages locked so they are protected from being removed.
+ * If we see a page whose index is higher than ours, it
+ * means our page has been removed, which shouldn't be
+ * possible because we're holding the PageLock.
+ */
+ if (page != pvec->pages[i]) {
+ VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index,
+ page);
+ continue;
+ }
+
+ WARN_ON_ONCE(!PageLocked(page));
+
+ if (page->index == xas.xa_index)
page->mapping = NULL;
- /*
- * Leave page->index set: truncation lookup relies
- * upon it
- */
+ /* Leave page->index set: truncation lookup relies on it */
+
+ /*
+ * Move to the next page in the vector if this is a regular
+ * page or the index is of the last sub-page of this compound
+ * page.
+ */
+ if (page->index + compound_nr(page) - 1 == xas.xa_index)
i++;
- } else {
- VM_BUG_ON_PAGE(page->index + HPAGE_PMD_NR - tail_pages
- != pvec->pages[i]->index, page);
- tail_pages--;
- }
xas_store(&xas, NULL);
total_pages++;
}
@@ -408,7 +412,8 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
.range_end = end,
};
- if (!mapping_cap_writeback_dirty(mapping))
+ if (!mapping_cap_writeback_dirty(mapping) ||
+ !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
return 0;
wbc_attach_fdatawrite_inode(&wbc, mapping->host);
@@ -617,10 +622,13 @@ int filemap_fdatawait_keep_errors(struct address_space *mapping)
}
EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
+/* Returns true if writeback might be needed or already in progress. */
static bool mapping_needs_writeback(struct address_space *mapping)
{
- return (!dax_mapping(mapping) && mapping->nrpages) ||
- (dax_mapping(mapping) && mapping->nrexceptional);
+ if (dax_mapping(mapping))
+ return mapping->nrexceptional;
+
+ return mapping->nrpages;
}
int filemap_write_and_wait(struct address_space *mapping)
@@ -1516,7 +1524,7 @@ EXPORT_SYMBOL(page_cache_prev_miss);
struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
{
XA_STATE(xas, &mapping->i_pages, offset);
- struct page *head, *page;
+ struct page *page;
rcu_read_lock();
repeat:
@@ -1531,25 +1539,19 @@ repeat:
if (!page || xa_is_value(page))
goto out;
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
+ if (!page_cache_get_speculative(page))
goto repeat;
- /* The page was split under us? */
- if (compound_head(page) != head) {
- put_page(head);
- goto repeat;
- }
-
/*
- * Has the page moved?
+ * Has the page moved or been split?
* This is part of the lockless pagecache protocol. See
* include/linux/pagemap.h for details.
*/
if (unlikely(page != xas_reload(&xas))) {
- put_page(head);
+ put_page(page);
goto repeat;
}
+ page = find_subpage(page, offset);
out:
rcu_read_unlock();
@@ -1646,7 +1648,7 @@ repeat:
}
/* Has the page been truncated? */
- if (unlikely(page->mapping != mapping)) {
+ if (unlikely(compound_head(page)->mapping != mapping)) {
unlock_page(page);
put_page(page);
goto repeat;
@@ -1731,7 +1733,6 @@ unsigned find_get_entries(struct address_space *mapping,
rcu_read_lock();
xas_for_each(&xas, page, ULONG_MAX) {
- struct page *head;
if (xas_retry(&xas, page))
continue;
/*
@@ -1742,17 +1743,13 @@ unsigned find_get_entries(struct address_space *mapping,
if (xa_is_value(page))
goto export;
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
+ if (!page_cache_get_speculative(page))
goto retry;
- /* The page was split under us? */
- if (compound_head(page) != head)
- goto put_page;
-
- /* Has the page moved? */
+ /* Has the page moved or been split? */
if (unlikely(page != xas_reload(&xas)))
goto put_page;
+ page = find_subpage(page, xas.xa_index);
export:
indices[ret] = xas.xa_index;
@@ -1761,7 +1758,7 @@ export:
break;
continue;
put_page:
- put_page(head);
+ put_page(page);
retry:
xas_reset(&xas);
}
@@ -1803,33 +1800,27 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
rcu_read_lock();
xas_for_each(&xas, page, end) {
- struct page *head;
if (xas_retry(&xas, page))
continue;
/* Skip over shadow, swap and DAX entries */
if (xa_is_value(page))
continue;
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
+ if (!page_cache_get_speculative(page))
goto retry;
- /* The page was split under us? */
- if (compound_head(page) != head)
- goto put_page;
-
- /* Has the page moved? */
+ /* Has the page moved or been split? */
if (unlikely(page != xas_reload(&xas)))
goto put_page;
- pages[ret] = page;
+ pages[ret] = find_subpage(page, xas.xa_index);
if (++ret == nr_pages) {
*start = xas.xa_index + 1;
goto out;
}
continue;
put_page:
- put_page(head);
+ put_page(page);
retry:
xas_reset(&xas);
}
@@ -1874,7 +1865,6 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
rcu_read_lock();
for (page = xas_load(&xas); page; page = xas_next(&xas)) {
- struct page *head;
if (xas_retry(&xas, page))
continue;
/*
@@ -1884,24 +1874,19 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
if (xa_is_value(page))
break;
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
+ if (!page_cache_get_speculative(page))
goto retry;
- /* The page was split under us? */
- if (compound_head(page) != head)
- goto put_page;
-
- /* Has the page moved? */
+ /* Has the page moved or been split? */
if (unlikely(page != xas_reload(&xas)))
goto put_page;
- pages[ret] = page;
+ pages[ret] = find_subpage(page, xas.xa_index);
if (++ret == nr_pages)
break;
continue;
put_page:
- put_page(head);
+ put_page(page);
retry:
xas_reset(&xas);
}
@@ -1937,7 +1922,6 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
rcu_read_lock();
xas_for_each_marked(&xas, page, end, tag) {
- struct page *head;
if (xas_retry(&xas, page))
continue;
/*
@@ -1948,26 +1932,21 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
if (xa_is_value(page))
continue;
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
+ if (!page_cache_get_speculative(page))
goto retry;
- /* The page was split under us? */
- if (compound_head(page) != head)
- goto put_page;
-
- /* Has the page moved? */
+ /* Has the page moved or been split? */
if (unlikely(page != xas_reload(&xas)))
goto put_page;
- pages[ret] = page;
+ pages[ret] = find_subpage(page, xas.xa_index);
if (++ret == nr_pages) {
*index = xas.xa_index + 1;
goto out;
}
continue;
put_page:
- put_page(head);
+ put_page(page);
retry:
xas_reset(&xas);
}
@@ -2562,12 +2541,12 @@ retry_find:
goto out_retry;
/* Did it get truncated? */
- if (unlikely(page->mapping != mapping)) {
+ if (unlikely(compound_head(page)->mapping != mapping)) {
unlock_page(page);
put_page(page);
goto retry_find;
}
- VM_BUG_ON_PAGE(page->index != offset, page);
+ VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
/*
* We have a locked page in the page cache, now we need to check
@@ -2648,7 +2627,7 @@ void filemap_map_pages(struct vm_fault *vmf,
pgoff_t last_pgoff = start_pgoff;
unsigned long max_idx;
XA_STATE(xas, &mapping->i_pages, start_pgoff);
- struct page *head, *page;
+ struct page *page;
rcu_read_lock();
xas_for_each(&xas, page, end_pgoff) {
@@ -2657,24 +2636,19 @@ void filemap_map_pages(struct vm_fault *vmf,
if (xa_is_value(page))
goto next;
- head = compound_head(page);
-
/*
* Check for a locked page first, as a speculative
* reference may adversely influence page migration.
*/
- if (PageLocked(head))
+ if (PageLocked(page))
goto next;
- if (!page_cache_get_speculative(head))
+ if (!page_cache_get_speculative(page))
goto next;
- /* The page was split under us? */
- if (compound_head(page) != head)
- goto skip;
-
- /* Has the page moved? */
+ /* Has the page moved or been split? */
if (unlikely(page != xas_reload(&xas)))
goto skip;
+ page = find_subpage(page, xas.xa_index);
if (!PageUptodate(page) ||
PageReadahead(page) ||
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index c64dca6e27c2..c431ca81dad5 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -46,6 +46,8 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
if (WARN_ON_ONCE(nr_frames > vec->nr_allocated))
nr_frames = vec->nr_allocated;
+ start = untagged_addr(start);
+
down_read(&mm->mmap_sem);
locked = 1;
vma = find_vma_intersection(mm, start, start + 1);
diff --git a/mm/gup.c b/mm/gup.c
index 98f13ab37bac..23a9f9c9d377 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -29,85 +29,70 @@ struct follow_page_context {
unsigned int page_mask;
};
-typedef int (*set_dirty_func_t)(struct page *page);
-
-static void __put_user_pages_dirty(struct page **pages,
- unsigned long npages,
- set_dirty_func_t sdf)
-{
- unsigned long index;
-
- for (index = 0; index < npages; index++) {
- struct page *page = compound_head(pages[index]);
-
- /*
- * Checking PageDirty at this point may race with
- * clear_page_dirty_for_io(), but that's OK. Two key cases:
- *
- * 1) This code sees the page as already dirty, so it skips
- * the call to sdf(). That could happen because
- * clear_page_dirty_for_io() called page_mkclean(),
- * followed by set_page_dirty(). However, now the page is
- * going to get written back, which meets the original
- * intention of setting it dirty, so all is well:
- * clear_page_dirty_for_io() goes on to call
- * TestClearPageDirty(), and write the page back.
- *
- * 2) This code sees the page as clean, so it calls sdf().
- * The page stays dirty, despite being written back, so it
- * gets written back again in the next writeback cycle.
- * This is harmless.
- */
- if (!PageDirty(page))
- sdf(page);
-
- put_user_page(page);
- }
-}
-
/**
- * put_user_pages_dirty() - release and dirty an array of gup-pinned pages
- * @pages: array of pages to be marked dirty and released.
+ * put_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
+ * @pages: array of pages to be maybe marked dirty, and definitely released.
* @npages: number of pages in the @pages array.
+ * @make_dirty: whether to mark the pages dirty
*
* "gup-pinned page" refers to a page that has had one of the get_user_pages()
* variants called on that page.
*
* For each page in the @pages array, make that page (or its head page, if a
- * compound page) dirty, if it was previously listed as clean. Then, release
- * the page using put_user_page().
+ * compound page) dirty, if @make_dirty is true, and if the page was previously
+ * listed as clean. In any case, releases all pages using put_user_page(),
+ * possibly via put_user_pages(), for the non-dirty case.
*
* Please see the put_user_page() documentation for details.
*
- * set_page_dirty(), which does not lock the page, is used here.
- * Therefore, it is the caller's responsibility to ensure that this is
- * safe. If not, then put_user_pages_dirty_lock() should be called instead.
+ * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
+ * required, then the caller should a) verify that this is really correct,
+ * because _lock() is usually required, and b) hand code it:
+ * set_page_dirty_lock(), put_user_page().
*
*/
-void put_user_pages_dirty(struct page **pages, unsigned long npages)
+void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
+ bool make_dirty)
{
- __put_user_pages_dirty(pages, npages, set_page_dirty);
-}
-EXPORT_SYMBOL(put_user_pages_dirty);
+ unsigned long index;
-/**
- * put_user_pages_dirty_lock() - release and dirty an array of gup-pinned pages
- * @pages: array of pages to be marked dirty and released.
- * @npages: number of pages in the @pages array.
- *
- * For each page in the @pages array, make that page (or its head page, if a
- * compound page) dirty, if it was previously listed as clean. Then, release
- * the page using put_user_page().
- *
- * Please see the put_user_page() documentation for details.
- *
- * This is just like put_user_pages_dirty(), except that it invokes
- * set_page_dirty_lock(), instead of set_page_dirty().
- *
- */
-void put_user_pages_dirty_lock(struct page **pages, unsigned long npages)
-{
- __put_user_pages_dirty(pages, npages, set_page_dirty_lock);
+ /*
+ * TODO: this can be optimized for huge pages: if a series of pages is
+ * physically contiguous and part of the same compound page, then a
+ * single operation to the head page should suffice.
+ */
+
+ if (!make_dirty) {
+ put_user_pages(pages, npages);
+ return;
+ }
+
+ for (index = 0; index < npages; index++) {
+ struct page *page = compound_head(pages[index]);
+ /*
+ * Checking PageDirty at this point may race with
+ * clear_page_dirty_for_io(), but that's OK. Two key
+ * cases:
+ *
+ * 1) This code sees the page as already dirty, so it
+ * skips the call to set_page_dirty(). That could happen
+ * because clear_page_dirty_for_io() called
+ * page_mkclean(), followed by set_page_dirty().
+ * However, now the page is going to get written back,
+ * which meets the original intention of setting it
+ * dirty, so all is well: clear_page_dirty_for_io() goes
+ * on to call TestClearPageDirty(), and write the page
+ * back.
+ *
+ * 2) This code sees the page as clean, so it calls
+ * set_page_dirty(). The page stays dirty, despite being
+ * written back, so it gets written back again in the
+ * next writeback cycle. This is harmless.
+ */
+ if (!PageDirty(page))
+ set_page_dirty_lock(page);
+ put_user_page(page);
+ }
}
EXPORT_SYMBOL(put_user_pages_dirty_lock);
@@ -399,7 +384,7 @@ retry_locked:
spin_unlock(ptl);
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
- if (flags & FOLL_SPLIT) {
+ if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
int ret;
page = pmd_page(*pmd);
if (is_huge_zero_page(page)) {
@@ -408,7 +393,7 @@ retry_locked:
split_huge_pmd(vma, pmd, address);
if (pmd_trans_unstable(pmd))
ret = -EBUSY;
- } else {
+ } else if (flags & FOLL_SPLIT) {
if (unlikely(!try_get_page(page))) {
spin_unlock(ptl);
return ERR_PTR(-ENOMEM);
@@ -420,6 +405,10 @@ retry_locked:
put_page(page);
if (pmd_none(*pmd))
return no_page_table(vma, flags);
+ } else { /* flags & FOLL_SPLIT_PMD */
+ spin_unlock(ptl);
+ split_huge_pmd(vma, pmd, address);
+ ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
}
return ret ? ERR_PTR(ret) :
@@ -799,6 +788,8 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (!nr_pages)
return 0;
+ start = untagged_addr(start);
+
VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
/*
@@ -961,6 +952,8 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
struct vm_area_struct *vma;
vm_fault_t ret, major = 0;
+ address = untagged_addr(address);
+
if (unlocked)
fault_flags |= FAULT_FLAG_ALLOW_RETRY;
@@ -1460,7 +1453,7 @@ check_again:
* gup may start from a tail page. Advance step by the left
* part.
*/
- step = (1 << compound_order(head)) - (pages[i] - head);
+ step = compound_nr(head) - (pages[i] - head);
/*
* If we get a page from the CMA zone, since we are going to
* be pinning these entries, we might as well move them out
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index de1f15969e27..73fc517c08d2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -496,11 +496,25 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
return pmd;
}
-static inline struct list_head *page_deferred_list(struct page *page)
+#ifdef CONFIG_MEMCG
+static inline struct deferred_split *get_deferred_split_queue(struct page *page)
{
- /* ->lru in the tail pages is occupied by compound_head. */
- return &page[2].deferred_list;
+ struct mem_cgroup *memcg = compound_head(page)->mem_cgroup;
+ struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
+
+ if (memcg)
+ return &memcg->deferred_split_queue;
+ else
+ return &pgdat->deferred_split_queue;
+}
+#else
+static inline struct deferred_split *get_deferred_split_queue(struct page *page)
+{
+ struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
+
+ return &pgdat->deferred_split_queue;
}
+#endif
void prep_transhuge_page(struct page *page)
{
@@ -2497,6 +2511,8 @@ static void __split_huge_page(struct page *page, struct list_head *list,
struct page *head = compound_head(page);
pg_data_t *pgdat = page_pgdat(head);
struct lruvec *lruvec;
+ struct address_space *swap_cache = NULL;
+ unsigned long offset = 0;
int i;
lruvec = mem_cgroup_page_lruvec(head, pgdat);
@@ -2504,6 +2520,14 @@ static void __split_huge_page(struct page *page, struct list_head *list,
/* complete memcg works before add pages to LRU */
mem_cgroup_split_huge_fixup(head);
+ if (PageAnon(head) && PageSwapCache(head)) {
+ swp_entry_t entry = { .val = page_private(head) };
+
+ offset = swp_offset(entry);
+ swap_cache = swap_address_space(entry);
+ xa_lock(&swap_cache->i_pages);
+ }
+
for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
__split_huge_page_tail(head, i, lruvec, list);
/* Some pages can be beyond i_size: drop them from page cache */
@@ -2513,6 +2537,12 @@ static void __split_huge_page(struct page *page, struct list_head *list,
if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
shmem_uncharge(head->mapping->host, 1);
put_page(head + i);
+ } else if (!PageAnon(page)) {
+ __xa_store(&head->mapping->i_pages, head[i].index,
+ head + i, 0);
+ } else if (swap_cache) {
+ __xa_store(&swap_cache->i_pages, offset + i,
+ head + i, 0);
}
}
@@ -2523,10 +2553,12 @@ static void __split_huge_page(struct page *page, struct list_head *list,
/* See comment in __split_huge_page_tail() */
if (PageAnon(head)) {
/* Additional pin to swap cache */
- if (PageSwapCache(head))
+ if (PageSwapCache(head)) {
page_ref_add(head, 2);
- else
+ xa_unlock(&swap_cache->i_pages);
+ } else {
page_ref_inc(head);
+ }
} else {
/* Additional pin to page cache */
page_ref_add(head, 2);
@@ -2673,6 +2705,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
{
struct page *head = compound_head(page);
struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
+ struct deferred_split *ds_queue = get_deferred_split_queue(page);
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
int count, mapcount, extra_pins, ret;
@@ -2759,17 +2792,17 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
}
/* Prevent deferred_split_scan() touching ->_refcount */
- spin_lock(&pgdata->split_queue_lock);
+ spin_lock(&ds_queue->split_queue_lock);
count = page_count(head);
mapcount = total_mapcount(head);
if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
if (!list_empty(page_deferred_list(head))) {
- pgdata->split_queue_len--;
+ ds_queue->split_queue_len--;
list_del(page_deferred_list(head));
}
if (mapping)
__dec_node_page_state(page, NR_SHMEM_THPS);
- spin_unlock(&pgdata->split_queue_lock);
+ spin_unlock(&ds_queue->split_queue_lock);
__split_huge_page(page, list, end, flags);
if (PageSwapCache(head)) {
swp_entry_t entry = { .val = page_private(head) };
@@ -2786,7 +2819,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
dump_page(page, "total_mapcount(head) > 0");
BUG();
}
- spin_unlock(&pgdata->split_queue_lock);
+ spin_unlock(&ds_queue->split_queue_lock);
fail: if (mapping)
xa_unlock(&mapping->i_pages);
spin_unlock_irqrestore(&pgdata->lru_lock, flags);
@@ -2808,53 +2841,86 @@ out:
void free_transhuge_page(struct page *page)
{
- struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
+ struct deferred_split *ds_queue = get_deferred_split_queue(page);
unsigned long flags;
- spin_lock_irqsave(&pgdata->split_queue_lock, flags);
+ spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
if (!list_empty(page_deferred_list(page))) {
- pgdata->split_queue_len--;
+ ds_queue->split_queue_len--;
list_del(page_deferred_list(page));
}
- spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
+ spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
free_compound_page(page);
}
void deferred_split_huge_page(struct page *page)
{
- struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
+ struct deferred_split *ds_queue = get_deferred_split_queue(page);
+#ifdef CONFIG_MEMCG
+ struct mem_cgroup *memcg = compound_head(page)->mem_cgroup;
+#endif
unsigned long flags;
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
- spin_lock_irqsave(&pgdata->split_queue_lock, flags);
+ /*
+ * The try_to_unmap() in page reclaim path might reach here too,
+ * this may cause a race condition to corrupt deferred split queue.
+ * And, if page reclaim is already handling the same page, it is
+ * unnecessary to handle it again in shrinker.
+ *
+ * Check PageSwapCache to determine if the page is being
+ * handled by page reclaim since THP swap would add the page into
+ * swap cache before calling try_to_unmap().
+ */
+ if (PageSwapCache(page))
+ return;
+
+ spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
if (list_empty(page_deferred_list(page))) {
count_vm_event(THP_DEFERRED_SPLIT_PAGE);
- list_add_tail(page_deferred_list(page), &pgdata->split_queue);
- pgdata->split_queue_len++;
+ list_add_tail(page_deferred_list(page), &ds_queue->split_queue);
+ ds_queue->split_queue_len++;
+#ifdef CONFIG_MEMCG
+ if (memcg)
+ memcg_set_shrinker_bit(memcg, page_to_nid(page),
+ deferred_split_shrinker.id);
+#endif
}
- spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
+ spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
}
static unsigned long deferred_split_count(struct shrinker *shrink,
struct shrink_control *sc)
{
struct pglist_data *pgdata = NODE_DATA(sc->nid);
- return READ_ONCE(pgdata->split_queue_len);
+ struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
+
+#ifdef CONFIG_MEMCG
+ if (sc->memcg)
+ ds_queue = &sc->memcg->deferred_split_queue;
+#endif
+ return READ_ONCE(ds_queue->split_queue_len);
}
static unsigned long deferred_split_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
struct pglist_data *pgdata = NODE_DATA(sc->nid);
+ struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
unsigned long flags;
LIST_HEAD(list), *pos, *next;
struct page *page;
int split = 0;
- spin_lock_irqsave(&pgdata->split_queue_lock, flags);
+#ifdef CONFIG_MEMCG
+ if (sc->memcg)
+ ds_queue = &sc->memcg->deferred_split_queue;
+#endif
+
+ spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
/* Take pin on all head pages to avoid freeing them under us */
- list_for_each_safe(pos, next, &pgdata->split_queue) {
+ list_for_each_safe(pos, next, &ds_queue->split_queue) {
page = list_entry((void *)pos, struct page, mapping);
page = compound_head(page);
if (get_page_unless_zero(page)) {
@@ -2862,12 +2928,12 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
} else {
/* We lost race with put_compound_page() */
list_del_init(page_deferred_list(page));
- pgdata->split_queue_len--;
+ ds_queue->split_queue_len--;
}
if (!--sc->nr_to_scan)
break;
}
- spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
+ spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
list_for_each_safe(pos, next, &list) {
page = list_entry((void *)pos, struct page, mapping);
@@ -2881,15 +2947,15 @@ next:
put_page(page);
}
- spin_lock_irqsave(&pgdata->split_queue_lock, flags);
- list_splice_tail(&list, &pgdata->split_queue);
- spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
+ spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+ list_splice_tail(&list, &ds_queue->split_queue);
+ spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
/*
* Stop shrinker if we didn't split any page, but the queue is empty.
* This can happen if pages were freed under us.
*/
- if (!split && list_empty(&pgdata->split_queue))
+ if (!split && list_empty(&ds_queue->split_queue))
return SHRINK_STOP;
return split;
}
@@ -2898,7 +2964,8 @@ static struct shrinker deferred_split_shrinker = {
.count_objects = deferred_split_count,
.scan_objects = deferred_split_scan,
.seeks = DEFAULT_SEEKS,
- .flags = SHRINKER_NUMA_AWARE,
+ .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE |
+ SHRINKER_NONSLAB,
};
#ifdef CONFIG_DEBUG_FS
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6d7296dd11b8..ef37c85423a5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1405,12 +1405,25 @@ pgoff_t __basepage_index(struct page *page)
}
static struct page *alloc_buddy_huge_page(struct hstate *h,
- gfp_t gfp_mask, int nid, nodemask_t *nmask)
+ gfp_t gfp_mask, int nid, nodemask_t *nmask,
+ nodemask_t *node_alloc_noretry)
{
int order = huge_page_order(h);
struct page *page;
+ bool alloc_try_hard = true;
- gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
+ /*
+ * By default we always try hard to allocate the page with
+ * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in
+ * a loop (to adjust global huge page counts) and previous allocation
+ * failed, do not continue to try hard on the same node. Use the
+ * node_alloc_noretry bitmap to manage this state information.
+ */
+ if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
+ alloc_try_hard = false;
+ gfp_mask |= __GFP_COMP|__GFP_NOWARN;
+ if (alloc_try_hard)
+ gfp_mask |= __GFP_RETRY_MAYFAIL;
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
@@ -1419,6 +1432,22 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
else
__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
+ /*
+ * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
+ * indicates an overall state change. Clear bit so that we resume
+ * normal 'try hard' allocations.
+ */
+ if (node_alloc_noretry && page && !alloc_try_hard)
+ node_clear(nid, *node_alloc_noretry);
+
+ /*
+ * If we tried hard to get a page but failed, set bit so that
+ * subsequent attempts will not try as hard until there is an
+ * overall state change.
+ */
+ if (node_alloc_noretry && !page && alloc_try_hard)
+ node_set(nid, *node_alloc_noretry);
+
return page;
}
@@ -1427,7 +1456,8 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
* should use this function to get new hugetlb pages
*/
static struct page *alloc_fresh_huge_page(struct hstate *h,
- gfp_t gfp_mask, int nid, nodemask_t *nmask)
+ gfp_t gfp_mask, int nid, nodemask_t *nmask,
+ nodemask_t *node_alloc_noretry)
{
struct page *page;
@@ -1435,7 +1465,7 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
else
page = alloc_buddy_huge_page(h, gfp_mask,
- nid, nmask);
+ nid, nmask, node_alloc_noretry);
if (!page)
return NULL;
@@ -1450,14 +1480,16 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
* Allocates a fresh page to the hugetlb allocator pool in the node interleaved
* manner.
*/
-static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
+static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
+ nodemask_t *node_alloc_noretry)
{
struct page *page;
int nr_nodes, node;
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
- page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed);
+ page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
+ node_alloc_noretry);
if (page)
break;
}
@@ -1601,7 +1633,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
goto out_unlock;
spin_unlock(&hugetlb_lock);
- page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
+ page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
if (!page)
return NULL;
@@ -1637,7 +1669,7 @@ struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
if (hstate_is_gigantic(h))
return NULL;
- page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
+ page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
if (!page)
return NULL;
@@ -2207,13 +2239,33 @@ static void __init gather_bootmem_prealloc(void)
static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
{
unsigned long i;
+ nodemask_t *node_alloc_noretry;
+
+ if (!hstate_is_gigantic(h)) {
+ /*
+ * Bit mask controlling how hard we retry per-node allocations.
+ * Ignore errors as lower level routines can deal with
+ * node_alloc_noretry == NULL. If this kmalloc fails at boot
+ * time, we are likely in bigger trouble.
+ */
+ node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
+ GFP_KERNEL);
+ } else {
+ /* allocations done at boot time */
+ node_alloc_noretry = NULL;
+ }
+
+ /* bit mask controlling how hard we retry per-node allocations */
+ if (node_alloc_noretry)
+ nodes_clear(*node_alloc_noretry);
for (i = 0; i < h->max_huge_pages; ++i) {
if (hstate_is_gigantic(h)) {
if (!alloc_bootmem_huge_page(h))
break;
} else if (!alloc_pool_huge_page(h,
- &node_states[N_MEMORY]))
+ &node_states[N_MEMORY],
+ node_alloc_noretry))
break;
cond_resched();
}
@@ -2225,6 +2277,8 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
h->max_huge_pages, buf, i);
h->max_huge_pages = i;
}
+
+ kfree(node_alloc_noretry);
}
static void __init hugetlb_init_hstates(void)
@@ -2323,6 +2377,17 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
nodemask_t *nodes_allowed)
{
unsigned long min_count, ret;
+ NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
+
+ /*
+ * Bit mask controlling how hard we retry per-node allocations.
+ * If we can not allocate the bit mask, do not attempt to allocate
+ * the requested huge pages.
+ */
+ if (node_alloc_noretry)
+ nodes_clear(*node_alloc_noretry);
+ else
+ return -ENOMEM;
spin_lock(&hugetlb_lock);
@@ -2356,6 +2421,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
if (count > persistent_huge_pages(h)) {
spin_unlock(&hugetlb_lock);
+ NODEMASK_FREE(node_alloc_noretry);
return -EINVAL;
}
/* Fall through to decrease pool */
@@ -2388,7 +2454,8 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
/* yield cpu to avoid soft lockup */
cond_resched();
- ret = alloc_pool_huge_page(h, nodes_allowed);
+ ret = alloc_pool_huge_page(h, nodes_allowed,
+ node_alloc_noretry);
spin_lock(&hugetlb_lock);
if (!ret)
goto out;
@@ -2429,6 +2496,8 @@ out:
h->max_huge_pages = persistent_huge_pages(h);
spin_unlock(&hugetlb_lock);
+ NODEMASK_FREE(node_alloc_noretry);
+
return 0;
}
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index 68c2f2f3c05b..f1930fa0b445 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -139,7 +139,7 @@ static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
if (!page_hcg || page_hcg != h_cg)
goto out;
- nr_pages = 1 << compound_order(page);
+ nr_pages = compound_nr(page);
if (!parent) {
parent = root_h_cgroup;
/* root has no limit */
diff --git a/mm/init-mm.c b/mm/init-mm.c
index a787a319211e..fb1e15028ef0 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -35,6 +35,6 @@ struct mm_struct init_mm = {
.arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock),
.mmlist = LIST_HEAD_INIT(init_mm.mmlist),
.user_ns = &init_user_ns,
- .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
+ .cpu_bitmap = CPU_BITS_NONE,
INIT_MM_CONTEXT(init_mm)
};
diff --git a/mm/internal.h b/mm/internal.h
index e32390802fd3..0d5f720c75ab 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -39,7 +39,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf);
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling);
-static inline bool can_madv_dontneed_vma(struct vm_area_struct *vma)
+static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
{
return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
}
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 95d16a42db6b..6814d6d6a023 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -304,7 +304,6 @@ size_t kasan_metadata_size(struct kmem_cache *cache)
struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
const void *object)
{
- BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
return (void *)object + cache->kasan_info.alloc_meta_offset;
}
@@ -315,14 +314,31 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
return (void *)object + cache->kasan_info.free_meta_offset;
}
+
+static void kasan_set_free_info(struct kmem_cache *cache,
+ void *object, u8 tag)
+{
+ struct kasan_alloc_meta *alloc_meta;
+ u8 idx = 0;
+
+ alloc_meta = get_alloc_info(cache, object);
+
+#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
+ idx = alloc_meta->free_track_idx;
+ alloc_meta->free_pointer_tag[idx] = tag;
+ alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS;
+#endif
+
+ set_track(&alloc_meta->free_track[idx], GFP_NOWAIT);
+}
+
void kasan_poison_slab(struct page *page)
{
unsigned long i;
- for (i = 0; i < (1 << compound_order(page)); i++)
+ for (i = 0; i < compound_nr(page); i++)
page_kasan_tag_reset(page + i);
- kasan_poison_shadow(page_address(page),
- PAGE_SIZE << compound_order(page),
+ kasan_poison_shadow(page_address(page), page_size(page),
KASAN_KMALLOC_REDZONE);
}
@@ -452,7 +468,8 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
unlikely(!(cache->flags & SLAB_KASAN)))
return false;
- set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
+ kasan_set_free_info(cache, object, tag);
+
quarantine_put(get_free_info(cache, object), cache);
return IS_ENABLED(CONFIG_KASAN_GENERIC);
@@ -524,7 +541,7 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
page = virt_to_page(ptr);
redzone_start = round_up((unsigned long)(ptr + size),
KASAN_SHADOW_SCALE_SIZE);
- redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
+ redzone_end = (unsigned long)ptr + page_size(page);
kasan_unpoison_shadow(ptr, size);
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
@@ -560,8 +577,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
kasan_report_invalid_free(ptr, ip);
return;
}
- kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
- KASAN_FREE_PAGE);
+ kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
} else {
__kasan_slab_free(page->slab_cache, ptr, ip, false);
}
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 014f19e76247..35cff6bbb716 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -95,9 +95,19 @@ struct kasan_track {
depot_stack_handle_t stack;
};
+#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
+#define KASAN_NR_FREE_STACKS 5
+#else
+#define KASAN_NR_FREE_STACKS 1
+#endif
+
struct kasan_alloc_meta {
struct kasan_track alloc_track;
- struct kasan_track free_track;
+ struct kasan_track free_track[KASAN_NR_FREE_STACKS];
+#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
+ u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
+ u8 free_track_idx;
+#endif
};
struct qlist_node {
@@ -146,6 +156,8 @@ void kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);
void kasan_report_invalid_free(void *object, unsigned long ip);
+struct page *kasan_addr_to_page(const void *addr);
+
#if defined(CONFIG_KASAN_GENERIC) && \
(defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 0e5f965f1882..621782100eaa 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -111,7 +111,7 @@ static void print_track(struct kasan_track *track, const char *prefix)
}
}
-static struct page *addr_to_page(const void *addr)
+struct page *kasan_addr_to_page(const void *addr)
{
if ((addr >= (void *)PAGE_OFFSET) &&
(addr < high_memory))
@@ -151,15 +151,38 @@ static void describe_object_addr(struct kmem_cache *cache, void *object,
(void *)(object_addr + cache->object_size));
}
+static struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
+ void *object, u8 tag)
+{
+ struct kasan_alloc_meta *alloc_meta;
+ int i = 0;
+
+ alloc_meta = get_alloc_info(cache, object);
+
+#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
+ for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {
+ if (alloc_meta->free_pointer_tag[i] == tag)
+ break;
+ }
+ if (i == KASAN_NR_FREE_STACKS)
+ i = alloc_meta->free_track_idx;
+#endif
+
+ return &alloc_meta->free_track[i];
+}
+
static void describe_object(struct kmem_cache *cache, void *object,
- const void *addr)
+ const void *addr, u8 tag)
{
struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
if (cache->flags & SLAB_KASAN) {
+ struct kasan_track *free_track;
+
print_track(&alloc_info->alloc_track, "Allocated");
pr_err("\n");
- print_track(&alloc_info->free_track, "Freed");
+ free_track = kasan_get_free_track(cache, object, tag);
+ print_track(free_track, "Freed");
pr_err("\n");
}
@@ -344,9 +367,9 @@ static void print_address_stack_frame(const void *addr)
print_decoded_frame_descr(frame_descr);
}
-static void print_address_description(void *addr)
+static void print_address_description(void *addr, u8 tag)
{
- struct page *page = addr_to_page(addr);
+ struct page *page = kasan_addr_to_page(addr);
dump_stack();
pr_err("\n");
@@ -355,7 +378,7 @@ static void print_address_description(void *addr)
struct kmem_cache *cache = page->slab_cache;
void *object = nearest_obj(cache, page, addr);
- describe_object(cache, object, addr);
+ describe_object(cache, object, addr, tag);
}
if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) {
@@ -435,13 +458,14 @@ static bool report_enabled(void)
void kasan_report_invalid_free(void *object, unsigned long ip)
{
unsigned long flags;
+ u8 tag = get_tag(object);
+ object = reset_tag(object);
start_report(&flags);
pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip);
- print_tags(get_tag(object), reset_tag(object));
- object = reset_tag(object);
+ print_tags(tag, object);
pr_err("\n");
- print_address_description(object);
+ print_address_description(object, tag);
pr_err("\n");
print_shadow_for_address(object);
end_report(&flags);
@@ -479,7 +503,7 @@ void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned lon
pr_err("\n");
if (addr_has_shadow(untagged_addr)) {
- print_address_description(untagged_addr);
+ print_address_description(untagged_addr, get_tag(tagged_addr));
pr_err("\n");
print_shadow_for_address(info.first_bad_addr);
} else {
diff --git a/mm/kasan/tags_report.c b/mm/kasan/tags_report.c
index 8eaf5f722271..969ae08f59d7 100644
--- a/mm/kasan/tags_report.c
+++ b/mm/kasan/tags_report.c
@@ -36,6 +36,30 @@
const char *get_bug_type(struct kasan_access_info *info)
{
+#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
+ struct kasan_alloc_meta *alloc_meta;
+ struct kmem_cache *cache;
+ struct page *page;
+ const void *addr;
+ void *object;
+ u8 tag;
+ int i;
+
+ tag = get_tag(info->access_addr);
+ addr = reset_tag(info->access_addr);
+ page = kasan_addr_to_page(addr);
+ if (page && PageSlab(page)) {
+ cache = page->slab_cache;
+ object = nearest_obj(cache, page, (void *)addr);
+ alloc_meta = get_alloc_info(cache, object);
+
+ for (i = 0; i < KASAN_NR_FREE_STACKS; i++)
+ if (alloc_meta->free_pointer_tag[i] == tag)
+ return "use-after-free";
+ return "out-of-bounds";
+ }
+
+#endif
return "invalid-access";
}
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index ccede2425c3f..0a1b4b484ac5 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -48,6 +48,7 @@ enum scan_result {
SCAN_CGROUP_CHARGE_FAIL,
SCAN_EXCEED_SWAP_PTE,
SCAN_TRUNCATED,
+ SCAN_PAGE_HAS_PRIVATE,
};
#define CREATE_TRACE_POINTS
@@ -76,6 +77,8 @@ static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
static struct kmem_cache *mm_slot_cache __read_mostly;
+#define MAX_PTE_MAPPED_THP 8
+
/**
* struct mm_slot - hash lookup from mm to mm_slot
* @hash: hash collision list
@@ -86,6 +89,10 @@ struct mm_slot {
struct hlist_node hash;
struct list_head mm_node;
struct mm_struct *mm;
+
+ /* pte-mapped THP in this mm */
+ int nr_pte_mapped_thp;
+ unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
};
/**
@@ -404,7 +411,11 @@ static bool hugepage_vma_check(struct vm_area_struct *vma,
(vm_flags & VM_NOHUGEPAGE) ||
test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
return false;
- if (shmem_file(vma->vm_file)) {
+
+ if (shmem_file(vma->vm_file) ||
+ (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
+ vma->vm_file &&
+ (vm_flags & VM_DENYWRITE))) {
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
return false;
return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
@@ -456,8 +467,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
unsigned long hstart, hend;
/*
- * khugepaged does not yet work on non-shmem files or special
- * mappings. And file-private shmem THP is not supported.
+ * khugepaged only supports read-only files for non-shmem files.
+ * khugepaged does not yet work on special mappings. And
+ * file-private shmem THP is not supported.
*/
if (!hugepage_vma_check(vma, vm_flags))
return 0;
@@ -1248,6 +1260,159 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
}
#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
+/*
+ * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
+ * khugepaged should try to collapse the page table.
+ */
+static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr)
+{
+ struct mm_slot *mm_slot;
+
+ VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
+
+ spin_lock(&khugepaged_mm_lock);
+ mm_slot = get_mm_slot(mm);
+ if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
+ mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
+ spin_unlock(&khugepaged_mm_lock);
+ return 0;
+}
+
+/**
+ * Try to collapse a pte-mapped THP for mm at address haddr.
+ *
+ * This function checks whether all the PTEs in the PMD are pointing to the
+ * right THP. If so, retract the page table so the THP can refault in with
+ * as pmd-mapped.
+ */
+void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
+{
+ unsigned long haddr = addr & HPAGE_PMD_MASK;
+ struct vm_area_struct *vma = find_vma(mm, haddr);
+ struct page *hpage = NULL;
+ pte_t *start_pte, *pte;
+ pmd_t *pmd, _pmd;
+ spinlock_t *ptl;
+ int count = 0;
+ int i;
+
+ if (!vma || !vma->vm_file ||
+ vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
+ return;
+
+ /*
+ * This vm_flags may not have VM_HUGEPAGE if the page was not
+ * collapsed by this mm. But we can still collapse if the page is
+ * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
+ * will not fail the vma for missing VM_HUGEPAGE
+ */
+ if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
+ return;
+
+ pmd = mm_find_pmd(mm, haddr);
+ if (!pmd)
+ return;
+
+ start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
+
+ /* step 1: check all mapped PTEs are to the right huge page */
+ for (i = 0, addr = haddr, pte = start_pte;
+ i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
+ struct page *page;
+
+ /* empty pte, skip */
+ if (pte_none(*pte))
+ continue;
+
+ /* page swapped out, abort */
+ if (!pte_present(*pte))
+ goto abort;
+
+ page = vm_normal_page(vma, addr, *pte);
+
+ if (!page || !PageCompound(page))
+ goto abort;
+
+ if (!hpage) {
+ hpage = compound_head(page);
+ /*
+ * The mapping of the THP should not change.
+ *
+ * Note that uprobe, debugger, or MAP_PRIVATE may
+ * change the page table, but the new page will
+ * not pass PageCompound() check.
+ */
+ if (WARN_ON(hpage->mapping != vma->vm_file->f_mapping))
+ goto abort;
+ }
+
+ /*
+ * Confirm the page maps to the correct subpage.
+ *
+ * Note that uprobe, debugger, or MAP_PRIVATE may change
+ * the page table, but the new page will not pass
+ * PageCompound() check.
+ */
+ if (WARN_ON(hpage + i != page))
+ goto abort;
+ count++;
+ }
+
+ /* step 2: adjust rmap */
+ for (i = 0, addr = haddr, pte = start_pte;
+ i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
+ struct page *page;
+
+ if (pte_none(*pte))
+ continue;
+ page = vm_normal_page(vma, addr, *pte);
+ page_remove_rmap(page, false);
+ }
+
+ pte_unmap_unlock(start_pte, ptl);
+
+ /* step 3: set proper refcount and mm_counters. */
+ if (hpage) {
+ page_ref_sub(hpage, count);
+ add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
+ }
+
+ /* step 4: collapse pmd */
+ ptl = pmd_lock(vma->vm_mm, pmd);
+ _pmd = pmdp_collapse_flush(vma, addr, pmd);
+ spin_unlock(ptl);
+ mm_dec_nr_ptes(mm);
+ pte_free(mm, pmd_pgtable(_pmd));
+ return;
+
+abort:
+ pte_unmap_unlock(start_pte, ptl);
+}
+
+static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
+{
+ struct mm_struct *mm = mm_slot->mm;
+ int i;
+
+ if (likely(mm_slot->nr_pte_mapped_thp == 0))
+ return 0;
+
+ if (!down_write_trylock(&mm->mmap_sem))
+ return -EBUSY;
+
+ if (unlikely(khugepaged_test_exit(mm)))
+ goto out;
+
+ for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
+ collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
+
+out:
+ mm_slot->nr_pte_mapped_thp = 0;
+ up_write(&mm->mmap_sem);
+ return 0;
+}
+
static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
{
struct vm_area_struct *vma;
@@ -1256,7 +1421,22 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
i_mmap_lock_write(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
- /* probably overkill */
+ /*
+ * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
+ * got written to. These VMAs are likely not worth investing
+ * down_write(mmap_sem) as PMD-mapping is likely to be split
+ * later.
+ *
+ * Not that vma->anon_vma check is racy: it can be set up after
+ * the check but before we took mmap_sem by the fault path.
+ * But page lock would prevent establishing any new ptes of the
+ * page, so we are safe.
+ *
+ * An alternative would be drop the check, but check that page
+ * table is clear before calling pmdp_collapse_flush() under
+ * ptl. It has higher chance to recover THP for the VMA, but
+ * has higher cost too.
+ */
if (vma->anon_vma)
continue;
addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
@@ -1269,9 +1449,10 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
continue;
/*
* We need exclusive mmap_sem to retract page table.
- * If trylock fails we would end up with pte-mapped THP after
- * re-fault. Not ideal, but it's more important to not disturb
- * the system too much.
+ *
+ * We use trylock due to lock inversion: we need to acquire
+ * mmap_sem while holding page lock. Fault path does it in
+ * reverse order. Trylock is a way to avoid deadlock.
*/
if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
@@ -1281,18 +1462,21 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
up_write(&vma->vm_mm->mmap_sem);
mm_dec_nr_ptes(vma->vm_mm);
pte_free(vma->vm_mm, pmd_pgtable(_pmd));
+ } else {
+ /* Try again later */
+ khugepaged_add_pte_mapped_thp(vma->vm_mm, addr);
}
}
i_mmap_unlock_write(mapping);
}
/**
- * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
+ * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
*
* Basic scheme is simple, details are more complex:
* - allocate and lock a new huge page;
* - scan page cache replacing old pages with the new one
- * + swap in pages if necessary;
+ * + swap/gup in pages if necessary;
* + fill in gaps;
* + keep old pages around in case rollback is required;
* - if replacing succeeds:
@@ -1304,10 +1488,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
* + restore gaps in the page cache;
* + unlock and free huge page;
*/
-static void collapse_shmem(struct mm_struct *mm,
- struct address_space *mapping, pgoff_t start,
+static void collapse_file(struct mm_struct *mm,
+ struct file *file, pgoff_t start,
struct page **hpage, int node)
{
+ struct address_space *mapping = file->f_mapping;
gfp_t gfp;
struct page *new_page;
struct mem_cgroup *memcg;
@@ -1315,7 +1500,9 @@ static void collapse_shmem(struct mm_struct *mm,
LIST_HEAD(pagelist);
XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
int nr_none = 0, result = SCAN_SUCCEED;
+ bool is_shmem = shmem_file(file);
+ VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
/* Only allocate from the target node */
@@ -1347,7 +1534,8 @@ static void collapse_shmem(struct mm_struct *mm,
} while (1);
__SetPageLocked(new_page);
- __SetPageSwapBacked(new_page);
+ if (is_shmem)
+ __SetPageSwapBacked(new_page);
new_page->index = start;
new_page->mapping = mapping;
@@ -1362,41 +1550,75 @@ static void collapse_shmem(struct mm_struct *mm,
struct page *page = xas_next(&xas);
VM_BUG_ON(index != xas.xa_index);
- if (!page) {
- /*
- * Stop if extent has been truncated or hole-punched,
- * and is now completely empty.
- */
- if (index == start) {
- if (!xas_next_entry(&xas, end - 1)) {
- result = SCAN_TRUNCATED;
+ if (is_shmem) {
+ if (!page) {
+ /*
+ * Stop if extent has been truncated or
+ * hole-punched, and is now completely
+ * empty.
+ */
+ if (index == start) {
+ if (!xas_next_entry(&xas, end - 1)) {
+ result = SCAN_TRUNCATED;
+ goto xa_locked;
+ }
+ xas_set(&xas, index);
+ }
+ if (!shmem_charge(mapping->host, 1)) {
+ result = SCAN_FAIL;
goto xa_locked;
}
- xas_set(&xas, index);
+ xas_store(&xas, new_page);
+ nr_none++;
+ continue;
}
- if (!shmem_charge(mapping->host, 1)) {
- result = SCAN_FAIL;
+
+ if (xa_is_value(page) || !PageUptodate(page)) {
+ xas_unlock_irq(&xas);
+ /* swap in or instantiate fallocated page */
+ if (shmem_getpage(mapping->host, index, &page,
+ SGP_NOHUGE)) {
+ result = SCAN_FAIL;
+ goto xa_unlocked;
+ }
+ } else if (trylock_page(page)) {
+ get_page(page);
+ xas_unlock_irq(&xas);
+ } else {
+ result = SCAN_PAGE_LOCK;
goto xa_locked;
}
- xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
- nr_none++;
- continue;
- }
-
- if (xa_is_value(page) || !PageUptodate(page)) {
- xas_unlock_irq(&xas);
- /* swap in or instantiate fallocated page */
- if (shmem_getpage(mapping->host, index, &page,
- SGP_NOHUGE)) {
+ } else { /* !is_shmem */
+ if (!page || xa_is_value(page)) {
+ xas_unlock_irq(&xas);
+ page_cache_sync_readahead(mapping, &file->f_ra,
+ file, index,
+ PAGE_SIZE);
+ /* drain pagevecs to help isolate_lru_page() */
+ lru_add_drain();
+ page = find_lock_page(mapping, index);
+ if (unlikely(page == NULL)) {
+ result = SCAN_FAIL;
+ goto xa_unlocked;
+ }
+ } else if (!PageUptodate(page)) {
+ xas_unlock_irq(&xas);
+ wait_on_page_locked(page);
+ if (!trylock_page(page)) {
+ result = SCAN_PAGE_LOCK;
+ goto xa_unlocked;
+ }
+ get_page(page);
+ } else if (PageDirty(page)) {
result = SCAN_FAIL;
- goto xa_unlocked;
+ goto xa_locked;
+ } else if (trylock_page(page)) {
+ get_page(page);
+ xas_unlock_irq(&xas);
+ } else {
+ result = SCAN_PAGE_LOCK;
+ goto xa_locked;
}
- } else if (trylock_page(page)) {
- get_page(page);
- xas_unlock_irq(&xas);
- } else {
- result = SCAN_PAGE_LOCK;
- goto xa_locked;
}
/*
@@ -1425,6 +1647,12 @@ static void collapse_shmem(struct mm_struct *mm,
goto out_unlock;
}
+ if (page_has_private(page) &&
+ !try_to_release_page(page, GFP_KERNEL)) {
+ result = SCAN_PAGE_HAS_PRIVATE;
+ goto out_unlock;
+ }
+
if (page_mapped(page))
unmap_mapping_pages(mapping, index, 1, false);
@@ -1454,7 +1682,7 @@ static void collapse_shmem(struct mm_struct *mm,
list_add_tail(&page->lru, &pagelist);
/* Finally, replace with the new page. */
- xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
+ xas_store(&xas, new_page);
continue;
out_unlock:
unlock_page(page);
@@ -1462,12 +1690,20 @@ out_unlock:
goto xa_unlocked;
}
- __inc_node_page_state(new_page, NR_SHMEM_THPS);
+ if (is_shmem)
+ __inc_node_page_state(new_page, NR_SHMEM_THPS);
+ else {
+ __inc_node_page_state(new_page, NR_FILE_THPS);
+ filemap_nr_thps_inc(mapping);
+ }
+
if (nr_none) {
struct zone *zone = page_zone(new_page);
__mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
- __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
+ if (is_shmem)
+ __mod_node_page_state(zone->zone_pgdat,
+ NR_SHMEM, nr_none);
}
xa_locked:
@@ -1505,10 +1741,15 @@ xa_unlocked:
SetPageUptodate(new_page);
page_ref_add(new_page, HPAGE_PMD_NR - 1);
- set_page_dirty(new_page);
mem_cgroup_commit_charge(new_page, memcg, false, true);
+
+ if (is_shmem) {
+ set_page_dirty(new_page);
+ lru_cache_add_anon(new_page);
+ } else {
+ lru_cache_add_file(new_page);
+ }
count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
- lru_cache_add_anon(new_page);
/*
* Remove pte page tables, so we can re-fault the page as huge.
@@ -1523,7 +1764,9 @@ xa_unlocked:
/* Something went wrong: roll back page cache changes */
xas_lock_irq(&xas);
mapping->nrpages -= nr_none;
- shmem_uncharge(mapping->host, nr_none);
+
+ if (is_shmem)
+ shmem_uncharge(mapping->host, nr_none);
xas_set(&xas, start);
xas_for_each(&xas, page, end - 1) {
@@ -1563,11 +1806,11 @@ out:
/* TODO: tracepoints */
}
-static void khugepaged_scan_shmem(struct mm_struct *mm,
- struct address_space *mapping,
- pgoff_t start, struct page **hpage)
+static void khugepaged_scan_file(struct mm_struct *mm,
+ struct file *file, pgoff_t start, struct page **hpage)
{
struct page *page = NULL;
+ struct address_space *mapping = file->f_mapping;
XA_STATE(xas, &mapping->i_pages, start);
int present, swap;
int node = NUMA_NO_NODE;
@@ -1606,7 +1849,8 @@ static void khugepaged_scan_shmem(struct mm_struct *mm,
break;
}
- if (page_count(page) != 1 + page_mapcount(page)) {
+ if (page_count(page) !=
+ 1 + page_mapcount(page) + page_has_private(page)) {
result = SCAN_PAGE_COUNT;
break;
}
@@ -1631,19 +1875,23 @@ static void khugepaged_scan_shmem(struct mm_struct *mm,
result = SCAN_EXCEED_NONE_PTE;
} else {
node = khugepaged_find_target_node();
- collapse_shmem(mm, mapping, start, hpage, node);
+ collapse_file(mm, file, start, hpage, node);
}
}
/* TODO: tracepoints */
}
#else
-static void khugepaged_scan_shmem(struct mm_struct *mm,
- struct address_space *mapping,
- pgoff_t start, struct page **hpage)
+static void khugepaged_scan_file(struct mm_struct *mm,
+ struct file *file, pgoff_t start, struct page **hpage)
{
BUILD_BUG();
}
+
+static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
+{
+ return 0;
+}
#endif
static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
@@ -1668,6 +1916,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
khugepaged_scan.mm_slot = mm_slot;
}
spin_unlock(&khugepaged_mm_lock);
+ khugepaged_collapse_pte_mapped_thps(mm_slot);
mm = mm_slot->mm;
/*
@@ -1713,17 +1962,18 @@ skip:
VM_BUG_ON(khugepaged_scan.address < hstart ||
khugepaged_scan.address + HPAGE_PMD_SIZE >
hend);
- if (shmem_file(vma->vm_file)) {
+ if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
struct file *file;
pgoff_t pgoff = linear_page_index(vma,
khugepaged_scan.address);
- if (!shmem_huge_enabled(vma))
+
+ if (shmem_file(vma->vm_file)
+ && !shmem_huge_enabled(vma))
goto skip;
file = get_file(vma->vm_file);
up_read(&mm->mmap_sem);
ret = 1;
- khugepaged_scan_shmem(mm, file->f_mapping,
- pgoff, hpage);
+ khugepaged_scan_file(mm, file, pgoff, hpage);
fput(file);
} else {
ret = khugepaged_scan_pmd(mm, vma,
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index f6e602918dac..03a8d84badad 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -168,6 +168,8 @@ struct kmemleak_object {
#define OBJECT_REPORTED (1 << 1)
/* flag set to not scan the object */
#define OBJECT_NO_SCAN (1 << 2)
+/* flag set to fully scan the object when scan_area allocation failed */
+#define OBJECT_FULL_SCAN (1 << 3)
#define HEX_PREFIX " "
/* number of bytes to print per line; must be 16 or 32 */
@@ -183,6 +185,10 @@ struct kmemleak_object {
static LIST_HEAD(object_list);
/* the list of gray-colored objects (see color_gray comment below) */
static LIST_HEAD(gray_list);
+/* memory pool allocation */
+static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
+static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
+static LIST_HEAD(mem_pool_free_list);
/* search tree for object boundaries */
static struct rb_root object_tree_root = RB_ROOT;
/* rw_lock protecting the access to object_list and object_tree_root */
@@ -193,13 +199,11 @@ static struct kmem_cache *object_cache;
static struct kmem_cache *scan_area_cache;
/* set if tracing memory operations is enabled */
-static int kmemleak_enabled;
+static int kmemleak_enabled = 1;
/* same as above but only for the kmemleak_free() callback */
-static int kmemleak_free_enabled;
+static int kmemleak_free_enabled = 1;
/* set in the late_initcall if there were no errors */
static int kmemleak_initialized;
-/* enables or disables early logging of the memory operations */
-static int kmemleak_early_log = 1;
/* set if a kmemleak warning was issued */
static int kmemleak_warning;
/* set if a fatal kmemleak error has occurred */
@@ -227,49 +231,6 @@ static bool kmemleak_found_leaks;
static bool kmemleak_verbose;
module_param_named(verbose, kmemleak_verbose, bool, 0600);
-/*
- * Early object allocation/freeing logging. Kmemleak is initialized after the
- * kernel allocator. However, both the kernel allocator and kmemleak may
- * allocate memory blocks which need to be tracked. Kmemleak defines an
- * arbitrary buffer to hold the allocation/freeing information before it is
- * fully initialized.
- */
-
-/* kmemleak operation type for early logging */
-enum {
- KMEMLEAK_ALLOC,
- KMEMLEAK_ALLOC_PERCPU,
- KMEMLEAK_FREE,
- KMEMLEAK_FREE_PART,
- KMEMLEAK_FREE_PERCPU,
- KMEMLEAK_NOT_LEAK,
- KMEMLEAK_IGNORE,
- KMEMLEAK_SCAN_AREA,
- KMEMLEAK_NO_SCAN,
- KMEMLEAK_SET_EXCESS_REF
-};
-
-/*
- * Structure holding the information passed to kmemleak callbacks during the
- * early logging.
- */
-struct early_log {
- int op_type; /* kmemleak operation type */
- int min_count; /* minimum reference count */
- const void *ptr; /* allocated/freed memory block */
- union {
- size_t size; /* memory block size */
- unsigned long excess_ref; /* surplus reference passing */
- };
- unsigned long trace[MAX_TRACE]; /* stack trace */
- unsigned int trace_len; /* stack trace length */
-};
-
-/* early logging buffer and current position */
-static struct early_log
- early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
-static int crt_early_log __initdata;
-
static void kmemleak_disable(void);
/*
@@ -450,6 +411,54 @@ static int get_object(struct kmemleak_object *object)
}
/*
+ * Memory pool allocation and freeing. kmemleak_lock must not be held.
+ */
+static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
+{
+ unsigned long flags;
+ struct kmemleak_object *object;
+
+ /* try the slab allocator first */
+ if (object_cache) {
+ object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
+ if (object)
+ return object;
+ }
+
+ /* slab allocation failed, try the memory pool */
+ write_lock_irqsave(&kmemleak_lock, flags);
+ object = list_first_entry_or_null(&mem_pool_free_list,
+ typeof(*object), object_list);
+ if (object)
+ list_del(&object->object_list);
+ else if (mem_pool_free_count)
+ object = &mem_pool[--mem_pool_free_count];
+ else
+ pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
+ write_unlock_irqrestore(&kmemleak_lock, flags);
+
+ return object;
+}
+
+/*
+ * Return the object to either the slab allocator or the memory pool.
+ */
+static void mem_pool_free(struct kmemleak_object *object)
+{
+ unsigned long flags;
+
+ if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
+ kmem_cache_free(object_cache, object);
+ return;
+ }
+
+ /* add the object to the memory pool free list */
+ write_lock_irqsave(&kmemleak_lock, flags);
+ list_add(&object->object_list, &mem_pool_free_list);
+ write_unlock_irqrestore(&kmemleak_lock, flags);
+}
+
+/*
* RCU callback to free a kmemleak_object.
*/
static void free_object_rcu(struct rcu_head *rcu)
@@ -467,7 +476,7 @@ static void free_object_rcu(struct rcu_head *rcu)
hlist_del(&area->node);
kmem_cache_free(scan_area_cache, area);
}
- kmem_cache_free(object_cache, object);
+ mem_pool_free(object);
}
/*
@@ -485,7 +494,15 @@ static void put_object(struct kmemleak_object *object)
/* should only get here after delete_object was called */
WARN_ON(object->flags & OBJECT_ALLOCATED);
- call_rcu(&object->rcu, free_object_rcu);
+ /*
+ * It may be too early for the RCU callbacks, however, there is no
+ * concurrent object_list traversal when !object_cache and all objects
+ * came from the memory pool. Free the object directly.
+ */
+ if (object_cache)
+ call_rcu(&object->rcu, free_object_rcu);
+ else
+ free_object_rcu(&object->rcu);
}
/*
@@ -550,7 +567,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
struct rb_node **link, *rb_parent;
unsigned long untagged_ptr;
- object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
+ object = mem_pool_alloc(gfp);
if (!object) {
pr_warn("Cannot allocate a kmemleak_object structure\n");
kmemleak_disable();
@@ -689,9 +706,7 @@ static void delete_object_part(unsigned long ptr, size_t size)
/*
* Create one or two objects that may result from the memory block
* split. Note that partial freeing is only done by free_bootmem() and
- * this happens before kmemleak_init() is called. The path below is
- * only executed during early log recording in kmemleak_init(), so
- * GFP_KERNEL is enough.
+ * this happens before kmemleak_init() is called.
*/
start = object->pointer;
end = object->pointer + object->size;
@@ -763,7 +778,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
{
unsigned long flags;
struct kmemleak_object *object;
- struct kmemleak_scan_area *area;
+ struct kmemleak_scan_area *area = NULL;
object = find_and_get_object(ptr, 1);
if (!object) {
@@ -772,13 +787,16 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
return;
}
- area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
- if (!area) {
- pr_warn("Cannot allocate a scan area\n");
- goto out;
- }
+ if (scan_area_cache)
+ area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
spin_lock_irqsave(&object->lock, flags);
+ if (!area) {
+ pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
+ /* mark the object for full scan to avoid false positives */
+ object->flags |= OBJECT_FULL_SCAN;
+ goto out_unlock;
+ }
if (size == SIZE_MAX) {
size = object->pointer + object->size - ptr;
} else if (ptr + size > object->pointer + object->size) {
@@ -795,7 +813,6 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
hlist_add_head(&area->node, &object->area_list);
out_unlock:
spin_unlock_irqrestore(&object->lock, flags);
-out:
put_object(object);
}
@@ -845,86 +862,6 @@ static void object_no_scan(unsigned long ptr)
put_object(object);
}
-/*
- * Log an early kmemleak_* call to the early_log buffer. These calls will be
- * processed later once kmemleak is fully initialized.
- */
-static void __init log_early(int op_type, const void *ptr, size_t size,
- int min_count)
-{
- unsigned long flags;
- struct early_log *log;
-
- if (kmemleak_error) {
- /* kmemleak stopped recording, just count the requests */
- crt_early_log++;
- return;
- }
-
- if (crt_early_log >= ARRAY_SIZE(early_log)) {
- crt_early_log++;
- kmemleak_disable();
- return;
- }
-
- /*
- * There is no need for locking since the kernel is still in UP mode
- * at this stage. Disabling the IRQs is enough.
- */
- local_irq_save(flags);
- log = &early_log[crt_early_log];
- log->op_type = op_type;
- log->ptr = ptr;
- log->size = size;
- log->min_count = min_count;
- log->trace_len = __save_stack_trace(log->trace);
- crt_early_log++;
- local_irq_restore(flags);
-}
-
-/*
- * Log an early allocated block and populate the stack trace.
- */
-static void early_alloc(struct early_log *log)
-{
- struct kmemleak_object *object;
- unsigned long flags;
- int i;
-
- if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
- return;
-
- /*
- * RCU locking needed to ensure object is not freed via put_object().
- */
- rcu_read_lock();
- object = create_object((unsigned long)log->ptr, log->size,
- log->min_count, GFP_ATOMIC);
- if (!object)
- goto out;
- spin_lock_irqsave(&object->lock, flags);
- for (i = 0; i < log->trace_len; i++)
- object->trace[i] = log->trace[i];
- object->trace_len = log->trace_len;
- spin_unlock_irqrestore(&object->lock, flags);
-out:
- rcu_read_unlock();
-}
-
-/*
- * Log an early allocated block and populate the stack trace.
- */
-static void early_alloc_percpu(struct early_log *log)
-{
- unsigned int cpu;
- const void __percpu *ptr = log->ptr;
-
- for_each_possible_cpu(cpu) {
- log->ptr = per_cpu_ptr(ptr, cpu);
- early_alloc(log);
- }
-}
-
/**
* kmemleak_alloc - register a newly allocated object
* @ptr: pointer to beginning of the object
@@ -946,8 +883,6 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
if (kmemleak_enabled && ptr && !IS_ERR(ptr))
create_object((unsigned long)ptr, size, min_count, gfp);
- else if (kmemleak_early_log)
- log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
}
EXPORT_SYMBOL_GPL(kmemleak_alloc);
@@ -975,8 +910,6 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
for_each_possible_cpu(cpu)
create_object((unsigned long)per_cpu_ptr(ptr, cpu),
size, 0, gfp);
- else if (kmemleak_early_log)
- log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
@@ -1001,11 +934,6 @@ void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp
create_object((unsigned long)area->addr, size, 2, gfp);
object_set_excess_ref((unsigned long)area,
(unsigned long)area->addr);
- } else if (kmemleak_early_log) {
- log_early(KMEMLEAK_ALLOC, area->addr, size, 2);
- /* reusing early_log.size for storing area->addr */
- log_early(KMEMLEAK_SET_EXCESS_REF,
- area, (unsigned long)area->addr, 0);
}
}
EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
@@ -1023,8 +951,6 @@ void __ref kmemleak_free(const void *ptr)
if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
delete_object_full((unsigned long)ptr);
- else if (kmemleak_early_log)
- log_early(KMEMLEAK_FREE, ptr, 0, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_free);
@@ -1043,8 +969,6 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
if (kmemleak_enabled && ptr && !IS_ERR(ptr))
delete_object_part((unsigned long)ptr, size);
- else if (kmemleak_early_log)
- log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_free_part);
@@ -1065,8 +989,6 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr)
for_each_possible_cpu(cpu)
delete_object_full((unsigned long)per_cpu_ptr(ptr,
cpu));
- else if (kmemleak_early_log)
- log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
@@ -1117,8 +1039,6 @@ void __ref kmemleak_not_leak(const void *ptr)
if (kmemleak_enabled && ptr && !IS_ERR(ptr))
make_gray_object((unsigned long)ptr);
- else if (kmemleak_early_log)
- log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
}
EXPORT_SYMBOL(kmemleak_not_leak);
@@ -1137,8 +1057,6 @@ void __ref kmemleak_ignore(const void *ptr)
if (kmemleak_enabled && ptr && !IS_ERR(ptr))
make_black_object((unsigned long)ptr);
- else if (kmemleak_early_log)
- log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
}
EXPORT_SYMBOL(kmemleak_ignore);
@@ -1159,8 +1077,6 @@ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
add_scan_area((unsigned long)ptr, size, gfp);
- else if (kmemleak_early_log)
- log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
}
EXPORT_SYMBOL(kmemleak_scan_area);
@@ -1179,8 +1095,6 @@ void __ref kmemleak_no_scan(const void *ptr)
if (kmemleak_enabled && ptr && !IS_ERR(ptr))
object_no_scan((unsigned long)ptr);
- else if (kmemleak_early_log)
- log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
}
EXPORT_SYMBOL(kmemleak_no_scan);
@@ -1408,7 +1322,8 @@ static void scan_object(struct kmemleak_object *object)
if (!(object->flags & OBJECT_ALLOCATED))
/* already freed object */
goto out;
- if (hlist_empty(&object->area_list)) {
+ if (hlist_empty(&object->area_list) ||
+ object->flags & OBJECT_FULL_SCAN) {
void *start = (void *)object->pointer;
void *end = (void *)(object->pointer + object->size);
void *next;
@@ -1966,7 +1881,6 @@ static void kmemleak_disable(void)
/* stop any memory operation tracing */
kmemleak_enabled = 0;
- kmemleak_early_log = 0;
/* check whether it is too early for a kernel thread */
if (kmemleak_initialized)
@@ -1994,20 +1908,11 @@ static int __init kmemleak_boot_config(char *str)
}
early_param("kmemleak", kmemleak_boot_config);
-static void __init print_log_trace(struct early_log *log)
-{
- pr_notice("Early log backtrace:\n");
- stack_trace_print(log->trace, log->trace_len, 2);
-}
-
/*
* Kmemleak initialization.
*/
void __init kmemleak_init(void)
{
- int i;
- unsigned long flags;
-
#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
if (!kmemleak_skip_disable) {
kmemleak_disable();
@@ -2015,28 +1920,15 @@ void __init kmemleak_init(void)
}
#endif
+ if (kmemleak_error)
+ return;
+
jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
- if (crt_early_log > ARRAY_SIZE(early_log))
- pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
- crt_early_log);
-
- /* the kernel is still in UP mode, so disabling the IRQs is enough */
- local_irq_save(flags);
- kmemleak_early_log = 0;
- if (kmemleak_error) {
- local_irq_restore(flags);
- return;
- } else {
- kmemleak_enabled = 1;
- kmemleak_free_enabled = 1;
- }
- local_irq_restore(flags);
-
/* register the data/bss sections */
create_object((unsigned long)_sdata, _edata - _sdata,
KMEMLEAK_GREY, GFP_ATOMIC);
@@ -2047,57 +1939,6 @@ void __init kmemleak_init(void)
create_object((unsigned long)__start_ro_after_init,
__end_ro_after_init - __start_ro_after_init,
KMEMLEAK_GREY, GFP_ATOMIC);
-
- /*
- * This is the point where tracking allocations is safe. Automatic
- * scanning is started during the late initcall. Add the early logged
- * callbacks to the kmemleak infrastructure.
- */
- for (i = 0; i < crt_early_log; i++) {
- struct early_log *log = &early_log[i];
-
- switch (log->op_type) {
- case KMEMLEAK_ALLOC:
- early_alloc(log);
- break;
- case KMEMLEAK_ALLOC_PERCPU:
- early_alloc_percpu(log);
- break;
- case KMEMLEAK_FREE:
- kmemleak_free(log->ptr);
- break;
- case KMEMLEAK_FREE_PART:
- kmemleak_free_part(log->ptr, log->size);
- break;
- case KMEMLEAK_FREE_PERCPU:
- kmemleak_free_percpu(log->ptr);
- break;
- case KMEMLEAK_NOT_LEAK:
- kmemleak_not_leak(log->ptr);
- break;
- case KMEMLEAK_IGNORE:
- kmemleak_ignore(log->ptr);
- break;
- case KMEMLEAK_SCAN_AREA:
- kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
- break;
- case KMEMLEAK_NO_SCAN:
- kmemleak_no_scan(log->ptr);
- break;
- case KMEMLEAK_SET_EXCESS_REF:
- object_set_excess_ref((unsigned long)log->ptr,
- log->excess_ref);
- break;
- default:
- kmemleak_warn("Unknown early log operation: %d\n",
- log->op_type);
- }
-
- if (kmemleak_warning) {
- print_log_trace(log);
- kmemleak_warning = 0;
- }
- }
}
/*
@@ -2126,7 +1967,8 @@ static int __init kmemleak_late_init(void)
mutex_unlock(&scan_mutex);
}
- pr_info("Kernel memory leak detector initialized\n");
+ pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
+ mem_pool_free_count);
return 0;
}
diff --git a/mm/ksm.c b/mm/ksm.c
index 3dc4346411e4..dbee2eb4dd05 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1029,24 +1029,6 @@ static u32 calc_checksum(struct page *page)
return checksum;
}
-static int memcmp_pages(struct page *page1, struct page *page2)
-{
- char *addr1, *addr2;
- int ret;
-
- addr1 = kmap_atomic(page1);
- addr2 = kmap_atomic(page2);
- ret = memcmp(addr1, addr2, PAGE_SIZE);
- kunmap_atomic(addr2);
- kunmap_atomic(addr1);
- return ret;
-}
-
-static inline int pages_identical(struct page *page1, struct page *page2)
-{
- return !memcmp_pages(page1, page2);
-}
-
static int write_protect_page(struct vm_area_struct *vma, struct page *page,
pte_t *orig_pte)
{
diff --git a/mm/madvise.c b/mm/madvise.c
index 88babcc384b9..2be9f3fdb05e 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -11,6 +11,7 @@
#include <linux/syscalls.h>
#include <linux/mempolicy.h>
#include <linux/page-isolation.h>
+#include <linux/page_idle.h>
#include <linux/userfaultfd_k.h>
#include <linux/hugetlb.h>
#include <linux/falloc.h>
@@ -31,6 +32,11 @@
#include "internal.h"
+struct madvise_walk_private {
+ struct mmu_gather *tlb;
+ bool pageout;
+};
+
/*
* Any behaviour which results in changes to the vma->vm_flags needs to
* take mmap_sem for writing. Others, which simply traverse vmas, need
@@ -42,6 +48,8 @@ static int madvise_need_mmap_write(int behavior)
case MADV_REMOVE:
case MADV_WILLNEED:
case MADV_DONTNEED:
+ case MADV_COLD:
+ case MADV_PAGEOUT:
case MADV_FREE:
return 0;
default:
@@ -107,28 +115,14 @@ static long madvise_behavior(struct vm_area_struct *vma,
case MADV_MERGEABLE:
case MADV_UNMERGEABLE:
error = ksm_madvise(vma, start, end, behavior, &new_flags);
- if (error) {
- /*
- * madvise() returns EAGAIN if kernel resources, such as
- * slab, are temporarily unavailable.
- */
- if (error == -ENOMEM)
- error = -EAGAIN;
- goto out;
- }
+ if (error)
+ goto out_convert_errno;
break;
case MADV_HUGEPAGE:
case MADV_NOHUGEPAGE:
error = hugepage_madvise(vma, &new_flags, behavior);
- if (error) {
- /*
- * madvise() returns EAGAIN if kernel resources, such as
- * slab, are temporarily unavailable.
- */
- if (error == -ENOMEM)
- error = -EAGAIN;
- goto out;
- }
+ if (error)
+ goto out_convert_errno;
break;
}
@@ -154,15 +148,8 @@ static long madvise_behavior(struct vm_area_struct *vma,
goto out;
}
error = __split_vma(mm, vma, start, 1);
- if (error) {
- /*
- * madvise() returns EAGAIN if kernel resources, such as
- * slab, are temporarily unavailable.
- */
- if (error == -ENOMEM)
- error = -EAGAIN;
- goto out;
- }
+ if (error)
+ goto out_convert_errno;
}
if (end != vma->vm_end) {
@@ -171,15 +158,8 @@ static long madvise_behavior(struct vm_area_struct *vma,
goto out;
}
error = __split_vma(mm, vma, end, 0);
- if (error) {
- /*
- * madvise() returns EAGAIN if kernel resources, such as
- * slab, are temporarily unavailable.
- */
- if (error == -ENOMEM)
- error = -EAGAIN;
- goto out;
- }
+ if (error)
+ goto out_convert_errno;
}
success:
@@ -187,6 +167,14 @@ success:
* vm_flags is protected by the mmap_sem held in write mode.
*/
vma->vm_flags = new_flags;
+
+out_convert_errno:
+ /*
+ * madvise() returns EAGAIN if kernel resources, such as
+ * slab, are temporarily unavailable.
+ */
+ if (error == -ENOMEM)
+ error = -EAGAIN;
out:
return error;
}
@@ -309,6 +297,254 @@ static long madvise_willneed(struct vm_area_struct *vma,
return 0;
}
+static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct madvise_walk_private *private = walk->private;
+ struct mmu_gather *tlb = private->tlb;
+ bool pageout = private->pageout;
+ struct mm_struct *mm = tlb->mm;
+ struct vm_area_struct *vma = walk->vma;
+ pte_t *orig_pte, *pte, ptent;
+ spinlock_t *ptl;
+ struct page *page = NULL;
+ LIST_HEAD(page_list);
+
+ if (fatal_signal_pending(current))
+ return -EINTR;
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (pmd_trans_huge(*pmd)) {
+ pmd_t orig_pmd;
+ unsigned long next = pmd_addr_end(addr, end);
+
+ tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
+ ptl = pmd_trans_huge_lock(pmd, vma);
+ if (!ptl)
+ return 0;
+
+ orig_pmd = *pmd;
+ if (is_huge_zero_pmd(orig_pmd))
+ goto huge_unlock;
+
+ if (unlikely(!pmd_present(orig_pmd))) {
+ VM_BUG_ON(thp_migration_supported() &&
+ !is_pmd_migration_entry(orig_pmd));
+ goto huge_unlock;
+ }
+
+ page = pmd_page(orig_pmd);
+ if (next - addr != HPAGE_PMD_SIZE) {
+ int err;
+
+ if (page_mapcount(page) != 1)
+ goto huge_unlock;
+
+ get_page(page);
+ spin_unlock(ptl);
+ lock_page(page);
+ err = split_huge_page(page);
+ unlock_page(page);
+ put_page(page);
+ if (!err)
+ goto regular_page;
+ return 0;
+ }
+
+ if (pmd_young(orig_pmd)) {
+ pmdp_invalidate(vma, addr, pmd);
+ orig_pmd = pmd_mkold(orig_pmd);
+
+ set_pmd_at(mm, addr, pmd, orig_pmd);
+ tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
+ }
+
+ ClearPageReferenced(page);
+ test_and_clear_page_young(page);
+ if (pageout) {
+ if (!isolate_lru_page(page))
+ list_add(&page->lru, &page_list);
+ } else
+ deactivate_page(page);
+huge_unlock:
+ spin_unlock(ptl);
+ if (pageout)
+ reclaim_pages(&page_list);
+ return 0;
+ }
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
+regular_page:
+#endif
+ tlb_change_page_size(tlb, PAGE_SIZE);
+ orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ flush_tlb_batched_pending(mm);
+ arch_enter_lazy_mmu_mode();
+ for (; addr < end; pte++, addr += PAGE_SIZE) {
+ ptent = *pte;
+
+ if (pte_none(ptent))
+ continue;
+
+ if (!pte_present(ptent))
+ continue;
+
+ page = vm_normal_page(vma, addr, ptent);
+ if (!page)
+ continue;
+
+ /*
+ * Creating a THP page is expensive so split it only if we
+ * are sure it's worth. Split it if we are only owner.
+ */
+ if (PageTransCompound(page)) {
+ if (page_mapcount(page) != 1)
+ break;
+ get_page(page);
+ if (!trylock_page(page)) {
+ put_page(page);
+ break;
+ }
+ pte_unmap_unlock(orig_pte, ptl);
+ if (split_huge_page(page)) {
+ unlock_page(page);
+ put_page(page);
+ pte_offset_map_lock(mm, pmd, addr, &ptl);
+ break;
+ }
+ unlock_page(page);
+ put_page(page);
+ pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ pte--;
+ addr -= PAGE_SIZE;
+ continue;
+ }
+
+ VM_BUG_ON_PAGE(PageTransCompound(page), page);
+
+ if (pte_young(ptent)) {
+ ptent = ptep_get_and_clear_full(mm, addr, pte,
+ tlb->fullmm);
+ ptent = pte_mkold(ptent);
+ set_pte_at(mm, addr, pte, ptent);
+ tlb_remove_tlb_entry(tlb, pte, addr);
+ }
+
+ /*
+ * We are deactivating a page for accelerating reclaiming.
+ * VM couldn't reclaim the page unless we clear PG_young.
+ * As a side effect, it makes confuse idle-page tracking
+ * because they will miss recent referenced history.
+ */
+ ClearPageReferenced(page);
+ test_and_clear_page_young(page);
+ if (pageout) {
+ if (!isolate_lru_page(page))
+ list_add(&page->lru, &page_list);
+ } else
+ deactivate_page(page);
+ }
+
+ arch_leave_lazy_mmu_mode();
+ pte_unmap_unlock(orig_pte, ptl);
+ if (pageout)
+ reclaim_pages(&page_list);
+ cond_resched();
+
+ return 0;
+}
+
+static const struct mm_walk_ops cold_walk_ops = {
+ .pmd_entry = madvise_cold_or_pageout_pte_range,
+};
+
+static void madvise_cold_page_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end)
+{
+ struct madvise_walk_private walk_private = {
+ .pageout = false,
+ .tlb = tlb,
+ };
+
+ tlb_start_vma(tlb, vma);
+ walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
+ tlb_end_vma(tlb, vma);
+}
+
+static long madvise_cold(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start_addr, unsigned long end_addr)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct mmu_gather tlb;
+
+ *prev = vma;
+ if (!can_madv_lru_vma(vma))
+ return -EINVAL;
+
+ lru_add_drain();
+ tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
+ madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
+ tlb_finish_mmu(&tlb, start_addr, end_addr);
+
+ return 0;
+}
+
+static void madvise_pageout_page_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end)
+{
+ struct madvise_walk_private walk_private = {
+ .pageout = true,
+ .tlb = tlb,
+ };
+
+ tlb_start_vma(tlb, vma);
+ walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
+ tlb_end_vma(tlb, vma);
+}
+
+static inline bool can_do_pageout(struct vm_area_struct *vma)
+{
+ if (vma_is_anonymous(vma))
+ return true;
+ if (!vma->vm_file)
+ return false;
+ /*
+ * paging out pagecache only for non-anonymous mappings that correspond
+ * to the files the calling process could (if tried) open for writing;
+ * otherwise we'd be including shared non-exclusive mappings, which
+ * opens a side channel.
+ */
+ return inode_owner_or_capable(file_inode(vma->vm_file)) ||
+ inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
+}
+
+static long madvise_pageout(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start_addr, unsigned long end_addr)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct mmu_gather tlb;
+
+ *prev = vma;
+ if (!can_madv_lru_vma(vma))
+ return -EINVAL;
+
+ if (!can_do_pageout(vma))
+ return 0;
+
+ lru_add_drain();
+ tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
+ madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
+ tlb_finish_mmu(&tlb, start_addr, end_addr);
+
+ return 0;
+}
+
static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
@@ -513,7 +749,7 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
int behavior)
{
*prev = vma;
- if (!can_madv_dontneed_vma(vma))
+ if (!can_madv_lru_vma(vma))
return -EINVAL;
if (!userfaultfd_remove(vma, start, end)) {
@@ -535,7 +771,7 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
*/
return -ENOMEM;
}
- if (!can_madv_dontneed_vma(vma))
+ if (!can_madv_lru_vma(vma))
return -EINVAL;
if (end > vma->vm_end) {
/*
@@ -689,6 +925,10 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
return madvise_remove(vma, prev, start, end);
case MADV_WILLNEED:
return madvise_willneed(vma, prev, start, end);
+ case MADV_COLD:
+ return madvise_cold(vma, prev, start, end);
+ case MADV_PAGEOUT:
+ return madvise_pageout(vma, prev, start, end);
case MADV_FREE:
case MADV_DONTNEED:
return madvise_dontneed_free(vma, prev, start, end, behavior);
@@ -710,6 +950,8 @@ madvise_behavior_valid(int behavior)
case MADV_WILLNEED:
case MADV_DONTNEED:
case MADV_FREE:
+ case MADV_COLD:
+ case MADV_PAGEOUT:
#ifdef CONFIG_KSM
case MADV_MERGEABLE:
case MADV_UNMERGEABLE:
@@ -804,6 +1046,8 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
size_t len;
struct blk_plug plug;
+ start = untagged_addr(start);
+
if (!madvise_behavior_valid(behavior))
return error;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f3c15bb07cce..c313c49074ca 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -57,6 +57,7 @@
#include <linux/lockdep.h>
#include <linux/file.h>
#include <linux/tracehook.h>
+#include <linux/psi.h>
#include <linux/seq_buf.h>
#include "internal.h"
#include <net/sock.h>
@@ -317,6 +318,7 @@ DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
EXPORT_SYMBOL(memcg_kmem_enabled_key);
struct workqueue_struct *memcg_kmem_cache_wq;
+#endif
static int memcg_shrinker_map_size;
static DEFINE_MUTEX(memcg_shrinker_map_mutex);
@@ -440,14 +442,6 @@ void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
}
}
-#else /* CONFIG_MEMCG_KMEM */
-static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
-{
- return 0;
-}
-static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) { }
-#endif /* CONFIG_MEMCG_KMEM */
-
/**
* mem_cgroup_css_from_page - css of the memcg associated with a page
* @page: page of interest
@@ -2270,21 +2264,22 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
+ bool flush = false;
+ rcu_read_lock();
memcg = stock->cached;
- if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css))
- continue;
- if (!mem_cgroup_is_descendant(memcg, root_memcg)) {
- css_put(&memcg->css);
- continue;
- }
- if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
+ if (memcg && stock->nr_pages &&
+ mem_cgroup_is_descendant(memcg, root_memcg))
+ flush = true;
+ rcu_read_unlock();
+
+ if (flush &&
+ !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
if (cpu == curcpu)
drain_local_stock(&stock->work);
else
schedule_work_on(cpu, &stock->work);
}
- css_put(&memcg->css);
}
put_cpu();
mutex_unlock(&percpu_charge_mutex);
@@ -2359,11 +2354,67 @@ static void high_work_func(struct work_struct *work)
}
/*
+ * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
+ * enough to still cause a significant slowdown in most cases, while still
+ * allowing diagnostics and tracing to proceed without becoming stuck.
+ */
+#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
+
+/*
+ * When calculating the delay, we use these either side of the exponentiation to
+ * maintain precision and scale to a reasonable number of jiffies (see the table
+ * below.
+ *
+ * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
+ * overage ratio to a delay.
+ * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down down the
+ * proposed penalty in order to reduce to a reasonable number of jiffies, and
+ * to produce a reasonable delay curve.
+ *
+ * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
+ * reasonable delay curve compared to precision-adjusted overage, not
+ * penalising heavily at first, but still making sure that growth beyond the
+ * limit penalises misbehaviour cgroups by slowing them down exponentially. For
+ * example, with a high of 100 megabytes:
+ *
+ * +-------+------------------------+
+ * | usage | time to allocate in ms |
+ * +-------+------------------------+
+ * | 100M | 0 |
+ * | 101M | 6 |
+ * | 102M | 25 |
+ * | 103M | 57 |
+ * | 104M | 102 |
+ * | 105M | 159 |
+ * | 106M | 230 |
+ * | 107M | 313 |
+ * | 108M | 409 |
+ * | 109M | 518 |
+ * | 110M | 639 |
+ * | 111M | 774 |
+ * | 112M | 921 |
+ * | 113M | 1081 |
+ * | 114M | 1254 |
+ * | 115M | 1439 |
+ * | 116M | 1638 |
+ * | 117M | 1849 |
+ * | 118M | 2000 |
+ * | 119M | 2000 |
+ * | 120M | 2000 |
+ * +-------+------------------------+
+ */
+ #define MEMCG_DELAY_PRECISION_SHIFT 20
+ #define MEMCG_DELAY_SCALING_SHIFT 14
+
+/*
* Scheduled by try_charge() to be executed from the userland return path
* and reclaims memory over the high limit.
*/
void mem_cgroup_handle_over_high(void)
{
+ unsigned long usage, high, clamped_high;
+ unsigned long pflags;
+ unsigned long penalty_jiffies, overage;
unsigned int nr_pages = current->memcg_nr_pages_over_high;
struct mem_cgroup *memcg;
@@ -2372,8 +2423,75 @@ void mem_cgroup_handle_over_high(void)
memcg = get_mem_cgroup_from_mm(current->mm);
reclaim_high(memcg, nr_pages, GFP_KERNEL);
- css_put(&memcg->css);
current->memcg_nr_pages_over_high = 0;
+
+ /*
+ * memory.high is breached and reclaim is unable to keep up. Throttle
+ * allocators proactively to slow down excessive growth.
+ *
+ * We use overage compared to memory.high to calculate the number of
+ * jiffies to sleep (penalty_jiffies). Ideally this value should be
+ * fairly lenient on small overages, and increasingly harsh when the
+ * memcg in question makes it clear that it has no intention of stopping
+ * its crazy behaviour, so we exponentially increase the delay based on
+ * overage amount.
+ */
+
+ usage = page_counter_read(&memcg->memory);
+ high = READ_ONCE(memcg->high);
+
+ if (usage <= high)
+ goto out;
+
+ /*
+ * Prevent division by 0 in overage calculation by acting as if it was a
+ * threshold of 1 page
+ */
+ clamped_high = max(high, 1UL);
+
+ overage = div_u64((u64)(usage - high) << MEMCG_DELAY_PRECISION_SHIFT,
+ clamped_high);
+
+ penalty_jiffies = ((u64)overage * overage * HZ)
+ >> (MEMCG_DELAY_PRECISION_SHIFT + MEMCG_DELAY_SCALING_SHIFT);
+
+ /*
+ * Factor in the task's own contribution to the overage, such that four
+ * N-sized allocations are throttled approximately the same as one
+ * 4N-sized allocation.
+ *
+ * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
+ * larger the current charge patch is than that.
+ */
+ penalty_jiffies = penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
+
+ /*
+ * Clamp the max delay per usermode return so as to still keep the
+ * application moving forwards and also permit diagnostics, albeit
+ * extremely slowly.
+ */
+ penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
+
+ /*
+ * Don't sleep if the amount of jiffies this memcg owes us is so low
+ * that it's not even worth doing, in an attempt to be nice to those who
+ * go only a small amount over their memory.high value and maybe haven't
+ * been aggressively reclaimed enough yet.
+ */
+ if (penalty_jiffies <= HZ / 100)
+ goto out;
+
+ /*
+ * If we exit early, we're guaranteed to die (since
+ * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
+ * need to account for any ill-begotten jiffies to pay them off later.
+ */
+ psi_memstall_enter(&pflags);
+ schedule_timeout_killable(penalty_jiffies);
+ psi_memstall_leave(&pflags);
+
+out:
+ css_put(&memcg->css);
}
static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
@@ -2825,6 +2943,16 @@ int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
!page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
+
+ /*
+ * Enforce __GFP_NOFAIL allocation because callers are not
+ * prepared to see failures and likely do not have any failure
+ * handling code.
+ */
+ if (gfp & __GFP_NOFAIL) {
+ page_counter_charge(&memcg->kmem, nr_pages);
+ return 0;
+ }
cancel_charge(memcg, nr_pages);
return -ENOMEM;
}
@@ -3512,6 +3640,9 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
ret = mem_cgroup_resize_max(memcg, nr_pages, true);
break;
case _KMEM:
+ pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
+ "Please report your usecase to [email protected] if you "
+ "depend on this functionality.\n");
ret = memcg_update_kmem_max(memcg, nr_pages);
break;
case _TCP:
@@ -4805,11 +4936,6 @@ static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
}
}
-static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
-{
- mem_cgroup_id_get_many(memcg, 1);
-}
-
static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
{
mem_cgroup_id_put_many(memcg, 1);
@@ -4955,6 +5081,11 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
memcg->cgwb_frn[i].done =
__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
+ INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
+ memcg->deferred_split_queue.split_queue_len = 0;
+#endif
idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
return memcg;
fail:
@@ -5333,6 +5464,14 @@ static int mem_cgroup_move_account(struct page *page,
__mod_memcg_state(to, NR_WRITEBACK, nr_pages);
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (compound && !list_empty(page_deferred_list(page))) {
+ spin_lock(&from->deferred_split_queue.split_queue_lock);
+ list_del_init(page_deferred_list(page));
+ from->deferred_split_queue.split_queue_len--;
+ spin_unlock(&from->deferred_split_queue.split_queue_lock);
+ }
+#endif
/*
* It is safe to change page->mem_cgroup here because the page
* is referenced, charged, and isolated - we can't race with
@@ -5341,6 +5480,17 @@ static int mem_cgroup_move_account(struct page *page,
/* caller should have done css_get */
page->mem_cgroup = to;
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (compound && list_empty(page_deferred_list(page))) {
+ spin_lock(&to->deferred_split_queue.split_queue_lock);
+ list_add_tail(page_deferred_list(page),
+ &to->deferred_split_queue.split_queue);
+ to->deferred_split_queue.split_queue_len++;
+ spin_unlock(&to->deferred_split_queue.split_queue_lock);
+ }
+#endif
+
spin_unlock_irqrestore(&from->move_lock, flags);
ret = 0;
@@ -6511,7 +6661,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
unsigned int nr_pages = 1;
if (PageTransHuge(page)) {
- nr_pages <<= compound_order(page);
+ nr_pages = compound_nr(page);
ug->nr_huge += nr_pages;
}
if (PageAnon(page))
@@ -6523,7 +6673,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
}
ug->pgpgout++;
} else {
- ug->nr_kmem += 1 << compound_order(page);
+ ug->nr_kmem += compound_nr(page);
__ClearPageKmemcg(page);
}
diff --git a/mm/memfd.c b/mm/memfd.c
index 650e65a46b9c..2647c898990c 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -39,6 +39,7 @@ static void memfd_tag_pins(struct xa_state *xas)
xas_for_each(xas, page, ULONG_MAX) {
if (xa_is_value(page))
continue;
+ page = find_subpage(page, xas->xa_index);
if (page_count(page) - page_mapcount(page) > 1)
xas_set_mark(xas, MEMFD_TAG_PINNED);
@@ -88,6 +89,7 @@ static int memfd_wait_for_pins(struct address_space *mapping)
bool clear = true;
if (xa_is_value(page))
continue;
+ page = find_subpage(page, xas.xa_index);
if (page_count(page) - page_mapcount(page) != 1) {
/*
* On the last scan, we clean up all those tags
diff --git a/mm/memory.c b/mm/memory.c
index b1dff75640b7..b1ca51a079f2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -518,7 +518,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
(long long)pte_val(pte), (long long)pmd_val(*pmd));
if (page)
dump_page(page, "bad pte");
- pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
+ pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
(void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
vma->vm_file,
@@ -1026,6 +1026,9 @@ again:
if (pte_none(ptent))
continue;
+ if (need_resched())
+ break;
+
if (pte_present(ptent)) {
struct page *page;
@@ -1093,7 +1096,6 @@ again:
if (unlikely(details))
continue;
- entry = pte_to_swp_entry(ptent);
if (!non_swap_entry(entry))
rss[MM_SWAPENTS]--;
else if (is_migration_entry(entry)) {
@@ -1124,8 +1126,11 @@ again:
if (force_flush) {
force_flush = 0;
tlb_flush_mmu(tlb);
- if (addr != end)
- goto again;
+ }
+
+ if (addr != end) {
+ cond_resched();
+ goto again;
}
return addr;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index c73f09913165..b1be791f772d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -632,33 +632,30 @@ static void generic_online_page(struct page *page, unsigned int order)
#endif
}
-static int online_pages_blocks(unsigned long start, unsigned long nr_pages)
-{
- unsigned long end = start + nr_pages;
- int order, onlined_pages = 0;
-
- while (start < end) {
- order = min(MAX_ORDER - 1,
- get_order(PFN_PHYS(end) - PFN_PHYS(start)));
- (*online_page_callback)(pfn_to_page(start), order);
-
- onlined_pages += (1UL << order);
- start += (1UL << order);
- }
- return onlined_pages;
-}
-
static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
void *arg)
{
- unsigned long onlined_pages = *(unsigned long *)arg;
+ const unsigned long end_pfn = start_pfn + nr_pages;
+ unsigned long pfn;
+ int order;
- if (PageReserved(pfn_to_page(start_pfn)))
- onlined_pages += online_pages_blocks(start_pfn, nr_pages);
+ /*
+ * Online the pages. The callback might decide to keep some pages
+ * PG_reserved (to add them to the buddy later), but we still account
+ * them as being online/belonging to this zone ("present").
+ */
+ for (pfn = start_pfn; pfn < end_pfn; pfn += 1ul << order) {
+ order = min(MAX_ORDER - 1, get_order(PFN_PHYS(end_pfn - pfn)));
+ /* __free_pages_core() wants pfns to be aligned to the order */
+ if (WARN_ON_ONCE(!IS_ALIGNED(pfn, 1ul << order)))
+ order = 0;
+ (*online_page_callback)(pfn_to_page(pfn), order);
+ }
- online_mem_sections(start_pfn, start_pfn + nr_pages);
+ /* mark all involved sections as online */
+ online_mem_sections(start_pfn, end_pfn);
- *(unsigned long *)arg = onlined_pages;
+ *(unsigned long *)arg += nr_pages;
return 0;
}
@@ -714,8 +711,13 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon
pgdat->node_start_pfn = start_pfn;
pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
-}
+}
+/*
+ * Associate the pfn range with the given zone, initializing the memmaps
+ * and resizing the pgdat/zone data to span the added pages. After this
+ * call, all affected pages are PG_reserved.
+ */
void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap)
{
@@ -804,20 +806,6 @@ struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
return default_zone_for_pfn(nid, start_pfn, nr_pages);
}
-/*
- * Associates the given pfn range with the given node and the zone appropriate
- * for the given online type.
- */
-static struct zone * __meminit move_pfn_range(int online_type, int nid,
- unsigned long start_pfn, unsigned long nr_pages)
-{
- struct zone *zone;
-
- zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
- move_pfn_range_to_zone(zone, start_pfn, nr_pages, NULL);
- return zone;
-}
-
int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
{
unsigned long flags;
@@ -840,7 +828,8 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
put_device(&mem->dev);
/* associate pfn range with the zone */
- zone = move_pfn_range(online_type, nid, pfn, nr_pages);
+ zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages);
+ move_pfn_range_to_zone(zone, pfn, nr_pages, NULL);
arg.start_pfn = pfn;
arg.nr_pages = nr_pages;
@@ -864,6 +853,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
online_pages_range);
if (ret) {
+ /* not a single memory resource was applicable */
if (need_zonelists_rebuild)
zone_pcp_reset(zone);
goto failed_addition;
@@ -877,27 +867,22 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
shuffle_zone(zone);
- if (onlined_pages) {
- node_states_set_node(nid, &arg);
- if (need_zonelists_rebuild)
- build_all_zonelists(NULL);
- else
- zone_pcp_update(zone);
- }
+ node_states_set_node(nid, &arg);
+ if (need_zonelists_rebuild)
+ build_all_zonelists(NULL);
+ else
+ zone_pcp_update(zone);
init_per_zone_wmark_min();
- if (onlined_pages) {
- kswapd_run(nid);
- kcompactd_run(nid);
- }
+ kswapd_run(nid);
+ kcompactd_run(nid);
vm_total_pages = nr_free_pagecache_pages();
writeback_set_ratelimit();
- if (onlined_pages)
- memory_notify(MEM_ONLINE, &arg);
+ memory_notify(MEM_ONLINE, &arg);
mem_hotplug_done();
return 0;
@@ -933,8 +918,11 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
if (!pgdat)
return NULL;
+ pgdat->per_cpu_nodestats =
+ alloc_percpu(struct per_cpu_nodestat);
arch_refresh_nodedata(nid, pgdat);
} else {
+ int cpu;
/*
* Reset the nr_zones, order and classzone_idx before reuse.
* Note that kswapd will init kswapd_classzone_idx properly
@@ -943,6 +931,12 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
pgdat->nr_zones = 0;
pgdat->kswapd_order = 0;
pgdat->kswapd_classzone_idx = 0;
+ for_each_online_cpu(cpu) {
+ struct per_cpu_nodestat *p;
+
+ p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
+ memset(p, 0, sizeof(*p));
+ }
}
/* we can use NODE_DATA(nid) from here */
@@ -952,7 +946,6 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
/* init node's zones as empty zones, we don't have any present pages.*/
free_area_init_core_hotplug(nid);
- pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
/*
* The node we allocated has no zone fallback lists. For avoiding
@@ -1309,7 +1302,7 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
head = compound_head(page);
if (page_huge_active(head))
return pfn;
- skip = (1 << compound_order(head)) - (page - head);
+ skip = compound_nr(head) - (page - head);
pfn += skip - 1;
}
return 0;
@@ -1347,7 +1340,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (PageHuge(page)) {
struct page *head = compound_head(page);
- pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
+ pfn = page_to_pfn(head) + compound_nr(head) - 1;
isolate_huge_page(head, &source);
continue;
} else if (PageTransHuge(page))
@@ -1662,7 +1655,7 @@ static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
phys_addr_t beginpa, endpa;
beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
- endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
+ endpa = beginpa + memory_block_size_bytes() - 1;
pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
&beginpa, &endpa);
@@ -1800,7 +1793,7 @@ void __remove_memory(int nid, u64 start, u64 size)
{
/*
- * trigger BUG() is some memory is not offlined prior to calling this
+ * trigger BUG() if some memory is not offlined prior to calling this
* function
*/
if (try_remove_memory(nid, start, size))
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f000771558d8..de27d08b1ff8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1405,6 +1405,7 @@ static long kernel_mbind(unsigned long start, unsigned long len,
int err;
unsigned short mode_flags;
+ start = untagged_addr(start);
mode_flags = mode & MPOL_MODE_FLAGS;
mode &= ~MPOL_MODE_FLAGS;
if (mode >= MPOL_MAX)
@@ -1512,10 +1513,6 @@ static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
if (nodes_empty(*new))
goto out_put;
- nodes_and(*new, *new, node_states[N_MEMORY]);
- if (nodes_empty(*new))
- goto out_put;
-
err = security_task_movememory(task);
if (err)
goto out_put;
@@ -1562,6 +1559,8 @@ static int kernel_get_mempolicy(int __user *policy,
int uninitialized_var(pval);
nodemask_t nodes;
+ addr = untagged_addr(addr);
+
if (nmask != NULL && maxnode < nr_node_ids)
return -EINVAL;
diff --git a/mm/migrate.c b/mm/migrate.c
index 9f4ed4e985c1..4fe45d1428c8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -460,7 +460,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
for (i = 1; i < HPAGE_PMD_NR; i++) {
xas_next(&xas);
- xas_store(&xas, newpage + i);
+ xas_store(&xas, newpage);
}
}
@@ -1612,7 +1612,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
goto out_flush;
if (get_user(node, nodes + i))
goto out_flush;
- addr = (unsigned long)p;
+ addr = (unsigned long)untagged_addr(p);
err = -ENODEV;
if (node < 0 || node >= MAX_NUMNODES)
@@ -1892,7 +1892,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
/* Avoid migrating to a node that is nearly full */
- if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
+ if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
return 0;
if (isolate_lru_page(page))
@@ -2218,17 +2218,15 @@ again:
pte_t pte;
pte = *ptep;
- pfn = pte_pfn(pte);
if (pte_none(pte)) {
mpfn = MIGRATE_PFN_MIGRATE;
migrate->cpages++;
- pfn = 0;
goto next;
}
if (!pte_present(pte)) {
- mpfn = pfn = 0;
+ mpfn = 0;
/*
* Only care about unaddressable device page special
@@ -2245,10 +2243,10 @@ again:
if (is_write_device_private_entry(entry))
mpfn |= MIGRATE_PFN_WRITE;
} else {
+ pfn = pte_pfn(pte);
if (is_zero_pfn(pfn)) {
mpfn = MIGRATE_PFN_MIGRATE;
migrate->cpages++;
- pfn = 0;
goto next;
}
page = vm_normal_page(migrate->vma, addr, pte);
@@ -2258,10 +2256,9 @@ again:
/* FIXME support THP */
if (!page || !page->mapping || PageTransCompound(page)) {
- mpfn = pfn = 0;
+ mpfn = 0;
goto next;
}
- pfn = page_to_pfn(page);
/*
* By getting a reference on the page we pin it and that blocks
diff --git a/mm/mincore.c b/mm/mincore.c
index f9a9dbe8cd33..49b6fa2f6aa1 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -256,6 +256,8 @@ SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
unsigned long pages;
unsigned char *tmp;
+ start = untagged_addr(start);
+
/* Check the start address: needs to be page-aligned.. */
if (start & ~PAGE_MASK)
return -EINVAL;
diff --git a/mm/mlock.c b/mm/mlock.c
index a90099da4fb4..a72c1eeded77 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -674,6 +674,8 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla
unsigned long lock_limit;
int error = -ENOMEM;
+ start = untagged_addr(start);
+
if (!can_do_mlock())
return -EPERM;
@@ -735,6 +737,8 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
{
int ret;
+ start = untagged_addr(start);
+
len = PAGE_ALIGN(len + (offset_in_page(start)));
start &= PAGE_MASK;
diff --git a/mm/mmap.c b/mm/mmap.c
index 6bc21fca20bc..a7d8c84d19b7 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -201,6 +201,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
bool downgraded = false;
LIST_HEAD(uf);
+ brk = untagged_addr(brk);
+
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
@@ -289,9 +291,9 @@ out:
return retval;
}
-static long vma_compute_subtree_gap(struct vm_area_struct *vma)
+static inline unsigned long vma_compute_gap(struct vm_area_struct *vma)
{
- unsigned long max, prev_end, subtree_gap;
+ unsigned long gap, prev_end;
/*
* Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
@@ -299,14 +301,21 @@ static long vma_compute_subtree_gap(struct vm_area_struct *vma)
* an unmapped area; whereas when expanding we only require one.
* That's a little inconsistent, but keeps the code here simpler.
*/
- max = vm_start_gap(vma);
+ gap = vm_start_gap(vma);
if (vma->vm_prev) {
prev_end = vm_end_gap(vma->vm_prev);
- if (max > prev_end)
- max -= prev_end;
+ if (gap > prev_end)
+ gap -= prev_end;
else
- max = 0;
+ gap = 0;
}
+ return gap;
+}
+
+#ifdef CONFIG_DEBUG_VM_RB
+static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma)
+{
+ unsigned long max = vma_compute_gap(vma), subtree_gap;
if (vma->vm_rb.rb_left) {
subtree_gap = rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb)->rb_subtree_gap;
@@ -322,7 +331,6 @@ static long vma_compute_subtree_gap(struct vm_area_struct *vma)
return max;
}
-#ifdef CONFIG_DEBUG_VM_RB
static int browse_rb(struct mm_struct *mm)
{
struct rb_root *root = &mm->mm_rb;
@@ -428,8 +436,9 @@ static void validate_mm(struct mm_struct *mm)
#define validate_mm(mm) do { } while (0)
#endif
-RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb,
- unsigned long, rb_subtree_gap, vma_compute_subtree_gap)
+RB_DECLARE_CALLBACKS_MAX(static, vma_gap_callbacks,
+ struct vm_area_struct, vm_rb,
+ unsigned long, rb_subtree_gap, vma_compute_gap)
/*
* Update augmented rbtree rb_subtree_gap values after vma->vm_start or
@@ -439,8 +448,8 @@ RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb,
static void vma_gap_update(struct vm_area_struct *vma)
{
/*
- * As it turns out, RB_DECLARE_CALLBACKS() already created a callback
- * function that does exactly what we want.
+ * As it turns out, RB_DECLARE_CALLBACKS_MAX() already created
+ * a callback function that does exactly what we want.
*/
vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
}
@@ -1358,6 +1367,9 @@ static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
if (S_ISBLK(inode->i_mode))
return MAX_LFS_FILESIZE;
+ if (S_ISSOCK(inode->i_mode))
+ return MAX_LFS_FILESIZE;
+
/* Special "we do even unsigned file positions" case */
if (file->f_mode & FMODE_UNSIGNED_OFFSET)
return 0;
@@ -1577,6 +1589,8 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
struct file *file = NULL;
unsigned long retval;
+ addr = untagged_addr(addr);
+
if (!(flags & MAP_ANONYMOUS)) {
audit_mmap_fd(fd, flags);
file = fget(fd);
@@ -2274,12 +2288,9 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
if (vma) {
*pprev = vma->vm_prev;
} else {
- struct rb_node *rb_node = mm->mm_rb.rb_node;
- *pprev = NULL;
- while (rb_node) {
- *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
- rb_node = rb_node->rb_right;
- }
+ struct rb_node *rb_node = rb_last(&mm->mm_rb);
+
+ *pprev = rb_node ? rb_entry(rb_node, struct vm_area_struct, vm_rb) : NULL;
}
return vma;
}
@@ -2878,6 +2889,7 @@ EXPORT_SYMBOL(vm_munmap);
SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
{
+ addr = untagged_addr(addr);
profile_munmap(addr);
return __vm_munmap(addr, len, true);
}
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 8c943a6e1696..7d70e5c78f97 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -271,8 +271,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb,
tlb_flush_mmu(tlb);
- /* keep the page table cache within bounds */
- check_pgt_cache();
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
tlb_batch_list_free(tlb);
#endif
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 675e5d34a507..7967825f6d33 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -459,6 +459,8 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
(prot & PROT_READ);
+ start = untagged_addr(start);
+
prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
return -EINVAL;
diff --git a/mm/mremap.c b/mm/mremap.c
index fc241d23cd97..1fc8a29fbe3f 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -606,6 +606,9 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
LIST_HEAD(uf_unmap_early);
LIST_HEAD(uf_unmap);
+ addr = untagged_addr(addr);
+ new_addr = untagged_addr(new_addr);
+
if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
return ret;
diff --git a/mm/msync.c b/mm/msync.c
index ef30a429623a..c3bd3e75f687 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -37,6 +37,8 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
int unmapped_error = 0;
int error = -EINVAL;
+ start = untagged_addr(start);
+
if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
goto out;
if (offset_in_page(start))
diff --git a/mm/nommu.c b/mm/nommu.c
index fed1b6e9c89b..99b7ec318824 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -108,7 +108,7 @@ unsigned int kobjsize(const void *objp)
* The ksize() function is only guaranteed to work for pointers
* returned by kmalloc(). So handle arbitrary pointers here.
*/
- return PAGE_SIZE << compound_order(page);
+ return page_size(page);
}
/**
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index eda2e2a0bdc6..71e3acea7817 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -73,7 +73,7 @@ static inline bool is_memcg_oom(struct oom_control *oc)
/**
* oom_cpuset_eligible() - check task eligiblity for kill
* @start: task struct of which task to consider
- * @mask: nodemask passed to page allocator for mempolicy ooms
+ * @oc: pointer to struct oom_control
*
* Task eligibility is determined by whether or not a candidate task, @tsk,
* shares the same mempolicy nodes as current if it is bound by such a policy
@@ -287,7 +287,7 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc)
!nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
oc->totalpages = total_swap_pages;
for_each_node_mask(nid, *oc->nodemask)
- oc->totalpages += node_spanned_pages(nid);
+ oc->totalpages += node_present_pages(nid);
return CONSTRAINT_MEMORY_POLICY;
}
@@ -300,7 +300,7 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc)
if (cpuset_limited) {
oc->totalpages = total_swap_pages;
for_each_node_mask(nid, cpuset_current_mems_allowed)
- oc->totalpages += node_spanned_pages(nid);
+ oc->totalpages += node_present_pages(nid);
return CONSTRAINT_CPUSET;
}
return CONSTRAINT_NONE;
@@ -523,7 +523,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
set_bit(MMF_UNSTABLE, &mm->flags);
for (vma = mm->mmap ; vma; vma = vma->vm_next) {
- if (!can_madv_dontneed_vma(vma))
+ if (!can_madv_lru_vma(vma))
continue;
/*
@@ -884,12 +884,13 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
*/
do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
mark_oom_victim(victim);
- pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
- message, task_pid_nr(victim), victim->comm,
- K(victim->mm->total_vm),
- K(get_mm_counter(victim->mm, MM_ANONPAGES)),
- K(get_mm_counter(victim->mm, MM_FILEPAGES)),
- K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
+ pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n",
+ message, task_pid_nr(victim), victim->comm, K(mm->total_vm),
+ K(get_mm_counter(mm, MM_ANONPAGES)),
+ K(get_mm_counter(mm, MM_FILEPAGES)),
+ K(get_mm_counter(mm, MM_SHMEMPAGES)),
+ from_kuid(&init_user_ns, task_uid(victim)),
+ mm_pgtables_bytes(mm), victim->signal->oom_score_adj);
task_unlock(victim);
/*
@@ -1068,9 +1069,10 @@ bool out_of_memory(struct oom_control *oc)
* The OOM killer does not compensate for IO-less reclaim.
* pagefault_out_of_memory lost its gfp context so we have to
* make sure exclude 0 mask - all other users should have at least
- * ___GFP_DIRECT_RECLAIM to get here.
+ * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to
+ * invoke the OOM killer even if it is a GFP_NOFS allocation.
*/
- if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS))
+ if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
return true;
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ff5484fdbdf9..3334a769eb91 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -670,6 +670,7 @@ out:
void free_compound_page(struct page *page)
{
+ mem_cgroup_uncharge(page);
__free_pages_ok(page, compound_order(page));
}
@@ -3955,14 +3956,22 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
goto check_priority;
/*
+ * compaction was skipped because there are not enough order-0 pages
+ * to work with, so we retry only if it looks like reclaim can help.
+ */
+ if (compaction_needs_reclaim(compact_result)) {
+ ret = compaction_zonelist_suitable(ac, order, alloc_flags);
+ goto out;
+ }
+
+ /*
* make sure the compaction wasn't deferred or didn't bail out early
* due to locks contention before we declare that we should give up.
- * But do not retry if the given zonelist is not suitable for
- * compaction.
+ * But the next retry should use a higher priority if allowed, so
+ * we don't just keep bailing out endlessly.
*/
if (compaction_withdrawn(compact_result)) {
- ret = compaction_zonelist_suitable(ac, order, alloc_flags);
- goto out;
+ goto check_priority;
}
/*
@@ -6638,9 +6647,11 @@ static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void pgdat_init_split_queue(struct pglist_data *pgdat)
{
- spin_lock_init(&pgdat->split_queue_lock);
- INIT_LIST_HEAD(&pgdat->split_queue);
- pgdat->split_queue_len = 0;
+ struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
+
+ spin_lock_init(&ds_queue->split_queue_lock);
+ INIT_LIST_HEAD(&ds_queue->split_queue);
+ ds_queue->split_queue_len = 0;
}
#else
static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
@@ -8196,7 +8207,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
if (!hugepage_migration_supported(page_hstate(head)))
goto unmovable;
- skip_pages = (1 << compound_order(head)) - (page - head);
+ skip_pages = compound_nr(head) - (page - head);
iter += skip_pages - 1;
continue;
}
diff --git a/mm/page_owner.c b/mm/page_owner.c
index addcbb2ae4e4..dee931184788 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -24,6 +24,9 @@ struct page_owner {
short last_migrate_reason;
gfp_t gfp_mask;
depot_stack_handle_t handle;
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ depot_stack_handle_t free_handle;
+#endif
};
static bool page_owner_disabled = true;
@@ -102,19 +105,6 @@ static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
return (void *)page_ext + page_owner_ops.offset;
}
-void __reset_page_owner(struct page *page, unsigned int order)
-{
- int i;
- struct page_ext *page_ext;
-
- for (i = 0; i < (1 << order); i++) {
- page_ext = lookup_page_ext(page + i);
- if (unlikely(!page_ext))
- continue;
- __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
- }
-}
-
static inline bool check_recursive_alloc(unsigned long *entries,
unsigned int nr_entries,
unsigned long ip)
@@ -154,18 +144,50 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
return handle;
}
-static inline void __set_page_owner_handle(struct page_ext *page_ext,
- depot_stack_handle_t handle, unsigned int order, gfp_t gfp_mask)
+void __reset_page_owner(struct page *page, unsigned int order)
{
+ int i;
+ struct page_ext *page_ext;
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ depot_stack_handle_t handle = 0;
struct page_owner *page_owner;
- page_owner = get_page_owner(page_ext);
- page_owner->handle = handle;
- page_owner->order = order;
- page_owner->gfp_mask = gfp_mask;
- page_owner->last_migrate_reason = -1;
+ if (debug_pagealloc_enabled())
+ handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
+#endif
- __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
+ for (i = 0; i < (1 << order); i++) {
+ page_ext = lookup_page_ext(page + i);
+ if (unlikely(!page_ext))
+ continue;
+ __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ if (debug_pagealloc_enabled()) {
+ page_owner = get_page_owner(page_ext);
+ page_owner->free_handle = handle;
+ }
+#endif
+ }
+}
+
+static inline void __set_page_owner_handle(struct page *page,
+ struct page_ext *page_ext, depot_stack_handle_t handle,
+ unsigned int order, gfp_t gfp_mask)
+{
+ struct page_owner *page_owner;
+ int i;
+
+ for (i = 0; i < (1 << order); i++) {
+ page_owner = get_page_owner(page_ext);
+ page_owner->handle = handle;
+ page_owner->order = order;
+ page_owner->gfp_mask = gfp_mask;
+ page_owner->last_migrate_reason = -1;
+ __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
+ __set_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
+
+ page_ext = lookup_page_ext(page + i);
+ }
}
noinline void __set_page_owner(struct page *page, unsigned int order,
@@ -178,7 +200,7 @@ noinline void __set_page_owner(struct page *page, unsigned int order,
return;
handle = save_stack(gfp_mask);
- __set_page_owner_handle(page_ext, handle, order, gfp_mask);
+ __set_page_owner_handle(page, page_ext, handle, order, gfp_mask);
}
void __set_page_owner_migrate_reason(struct page *page, int reason)
@@ -204,8 +226,11 @@ void __split_page_owner(struct page *page, unsigned int order)
page_owner = get_page_owner(page_ext);
page_owner->order = 0;
- for (i = 1; i < (1 << order); i++)
- __copy_page_owner(page, page + i);
+ for (i = 1; i < (1 << order); i++) {
+ page_ext = lookup_page_ext(page + i);
+ page_owner = get_page_owner(page_ext);
+ page_owner->order = 0;
+ }
}
void __copy_page_owner(struct page *oldpage, struct page *newpage)
@@ -235,6 +260,7 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
* the new page, which will be freed.
*/
__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
+ __set_bit(PAGE_EXT_OWNER_ACTIVE, &new_ext->flags);
}
void pagetypeinfo_showmixedcount_print(struct seq_file *m,
@@ -294,7 +320,7 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
if (unlikely(!page_ext))
continue;
- if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
+ if (!test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
continue;
page_owner = get_page_owner(page_ext);
@@ -405,20 +431,36 @@ void __dump_page_owner(struct page *page)
mt = gfpflags_to_migratetype(gfp_mask);
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
- pr_alert("page_owner info is not active (free page?)\n");
+ pr_alert("page_owner info is not present (never set?)\n");
return;
}
+ if (test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+ pr_alert("page_owner tracks the page as allocated\n");
+ else
+ pr_alert("page_owner tracks the page as freed\n");
+
+ pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
+ page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
+
handle = READ_ONCE(page_owner->handle);
if (!handle) {
- pr_alert("page_owner info is not active (free page?)\n");
- return;
+ pr_alert("page_owner allocation stack trace missing\n");
+ } else {
+ nr_entries = stack_depot_fetch(handle, &entries);
+ stack_trace_print(entries, nr_entries, 0);
}
- nr_entries = stack_depot_fetch(handle, &entries);
- pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
- page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
- stack_trace_print(entries, nr_entries, 0);
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ handle = READ_ONCE(page_owner->free_handle);
+ if (!handle) {
+ pr_alert("page_owner free stack trace missing\n");
+ } else {
+ nr_entries = stack_depot_fetch(handle, &entries);
+ pr_alert("page last free stack trace:\n");
+ stack_trace_print(entries, nr_entries, 0);
+ }
+#endif
if (page_owner->last_migrate_reason != -1)
pr_alert("page has been migrated, last migrate reason: %s\n",
@@ -481,9 +523,23 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
continue;
+ /*
+ * Although we do have the info about past allocation of free
+ * pages, it's not relevant for current memory usage.
+ */
+ if (!test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+ continue;
+
page_owner = get_page_owner(page_ext);
/*
+ * Don't print "tail" pages of high-order allocations as that
+ * would inflate the stats.
+ */
+ if (!IS_ALIGNED(pfn, 1 << page_owner->order))
+ continue;
+
+ /*
* Access to page_ext->handle isn't synchronous so we should
* be careful to access it.
*/
@@ -562,7 +618,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
continue;
/* Found early allocated page */
- __set_page_owner_handle(page_ext, early_handle, 0, 0);
+ __set_page_owner_handle(page, page_ext, early_handle,
+ 0, 0);
count++;
}
cond_resched();
diff --git a/mm/page_poison.c b/mm/page_poison.c
index 21d4f97cb49b..34b9181ee5d1 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -101,7 +101,7 @@ static void unpoison_page(struct page *page)
/*
* Page poisoning when enabled poisons each and every page
* that is freed to buddy. Thus no extra check is done to
- * see if a page was posioned.
+ * see if a page was poisoned.
*/
check_poison_mem(addr, PAGE_SIZE);
kunmap_atomic(addr);
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 11df03e71288..eff4b4520c8d 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -153,8 +153,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
if (unlikely(PageHuge(pvmw->page))) {
/* when pud is not present, pte will be NULL */
- pvmw->pte = huge_pte_offset(mm, pvmw->address,
- PAGE_SIZE << compound_order(page));
+ pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
if (!pvmw->pte)
return false;
diff --git a/mm/quicklist.c b/mm/quicklist.c
deleted file mode 100644
index 5e98ac78e410..000000000000
--- a/mm/quicklist.c
+++ /dev/null
@@ -1,103 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Quicklist support.
- *
- * Quicklists are light weight lists of pages that have a defined state
- * on alloc and free. Pages must be in the quicklist specific defined state
- * (zero by default) when the page is freed. It seems that the initial idea
- * for such lists first came from Dave Miller and then various other people
- * improved on it.
- *
- * Copyright (C) 2007 SGI,
- * Christoph Lameter <[email protected]>
- * Generalized, added support for multiple lists and
- * constructors / destructors.
- */
-#include <linux/kernel.h>
-
-#include <linux/gfp.h>
-#include <linux/mm.h>
-#include <linux/mmzone.h>
-#include <linux/quicklist.h>
-
-DEFINE_PER_CPU(struct quicklist [CONFIG_NR_QUICK], quicklist);
-
-#define FRACTION_OF_NODE_MEM 16
-
-static unsigned long max_pages(unsigned long min_pages)
-{
- unsigned long node_free_pages, max;
- int node = numa_node_id();
- struct zone *zones = NODE_DATA(node)->node_zones;
- int num_cpus_on_node;
-
- node_free_pages =
-#ifdef CONFIG_ZONE_DMA
- zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) +
-#endif
-#ifdef CONFIG_ZONE_DMA32
- zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) +
-#endif
- zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);
-
- max = node_free_pages / FRACTION_OF_NODE_MEM;
-
- num_cpus_on_node = cpumask_weight(cpumask_of_node(node));
- max /= num_cpus_on_node;
-
- return max(max, min_pages);
-}
-
-static long min_pages_to_free(struct quicklist *q,
- unsigned long min_pages, long max_free)
-{
- long pages_to_free;
-
- pages_to_free = q->nr_pages - max_pages(min_pages);
-
- return min(pages_to_free, max_free);
-}
-
-/*
- * Trim down the number of pages in the quicklist
- */
-void quicklist_trim(int nr, void (*dtor)(void *),
- unsigned long min_pages, unsigned long max_free)
-{
- long pages_to_free;
- struct quicklist *q;
-
- q = &get_cpu_var(quicklist)[nr];
- if (q->nr_pages > min_pages) {
- pages_to_free = min_pages_to_free(q, min_pages, max_free);
-
- while (pages_to_free > 0) {
- /*
- * We pass a gfp_t of 0 to quicklist_alloc here
- * because we will never call into the page allocator.
- */
- void *p = quicklist_alloc(nr, 0, NULL);
-
- if (dtor)
- dtor(p);
- free_page((unsigned long)p);
- pages_to_free--;
- }
- }
- put_cpu_var(quicklist);
-}
-
-unsigned long quicklist_total_size(void)
-{
- unsigned long count = 0;
- int cpu;
- struct quicklist *ql, *q;
-
- for_each_online_cpu(cpu) {
- ql = per_cpu(quicklist, cpu);
- for (q = ql; q < ql + CONFIG_NR_QUICK; q++)
- count += q->nr_pages;
- }
- return count;
-}
-
diff --git a/mm/rmap.c b/mm/rmap.c
index 003377e24232..d9a23bb773bf 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -898,15 +898,13 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
*/
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
0, vma, vma->vm_mm, address,
- min(vma->vm_end, address +
- (PAGE_SIZE << compound_order(page))));
+ min(vma->vm_end, address + page_size(page)));
mmu_notifier_invalidate_range_start(&range);
while (page_vma_mapped_walk(&pvmw)) {
- unsigned long cstart;
int ret = 0;
- cstart = address = pvmw.address;
+ address = pvmw.address;
if (pvmw.pte) {
pte_t entry;
pte_t *pte = pvmw.pte;
@@ -933,7 +931,6 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
entry = pmd_wrprotect(entry);
entry = pmd_mkclean(entry);
set_pmd_at(vma->vm_mm, address, pmd, entry);
- cstart &= PMD_MASK;
ret = 1;
#else
/* unexpected pmd-mapped page? */
@@ -1192,8 +1189,10 @@ void page_add_file_rmap(struct page *page, bool compound)
}
if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
goto out;
- VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
- __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
+ if (PageSwapBacked(page))
+ __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
+ else
+ __inc_node_page_state(page, NR_FILE_PMDMAPPED);
} else {
if (PageTransCompound(page) && page_mapping(page)) {
VM_WARN_ON_ONCE(!PageLocked(page));
@@ -1232,8 +1231,10 @@ static void page_remove_file_rmap(struct page *page, bool compound)
}
if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
goto out;
- VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
- __dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
+ if (PageSwapBacked(page))
+ __dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
+ else
+ __dec_node_page_state(page, NR_FILE_PMDMAPPED);
} else {
if (!atomic_add_negative(-1, &page->_mapcount))
goto out;
@@ -1374,8 +1375,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
*/
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
address,
- min(vma->vm_end, address +
- (PAGE_SIZE << compound_order(page))));
+ min(vma->vm_end, address + page_size(page)));
if (PageHuge(page)) {
/*
* If sharing is possible, start and end will be adjusted
@@ -1524,8 +1524,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (PageHuge(page)) {
- int nr = 1 << compound_order(page);
- hugetlb_count_sub(nr, mm);
+ hugetlb_count_sub(compound_nr(page), mm);
set_huge_swap_pte_at(mm, address,
pvmw.pte, pteval,
vma_mmu_pagesize(vma));
diff --git a/mm/shmem.c b/mm/shmem.c
index 0f7fd4a85db6..30ce722c23fa 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -609,7 +609,7 @@ static int shmem_add_to_page_cache(struct page *page,
{
XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
unsigned long i = 0;
- unsigned long nr = 1UL << compound_order(page);
+ unsigned long nr = compound_nr(page);
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(index != round_down(index, nr), page);
@@ -631,7 +631,7 @@ static int shmem_add_to_page_cache(struct page *page,
if (xas_error(&xas))
goto unlock;
next:
- xas_store(&xas, page + i);
+ xas_store(&xas, page);
if (++i < nr) {
xas_next(&xas);
goto next;
@@ -1734,7 +1734,7 @@ unlock:
* vm. If we swap it in we mark it dirty since we also free the swap
* entry since a page cannot live in both the swap and page cache.
*
- * fault_mm and fault_type are only supplied by shmem_fault:
+ * vmf and fault_type are only supplied by shmem_fault:
* otherwise they are NULL.
*/
static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
@@ -1884,7 +1884,7 @@ alloc_nohuge:
lru_cache_add_anon(page);
spin_lock_irq(&info->lock);
- info->alloced += 1 << compound_order(page);
+ info->alloced += compound_nr(page);
inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
shmem_recalc_inode(inode);
spin_unlock_irq(&info->lock);
@@ -1925,7 +1925,7 @@ clear:
struct page *head = compound_head(page);
int i;
- for (i = 0; i < (1 << compound_order(head)); i++) {
+ for (i = 0; i < compound_nr(head); i++) {
clear_highpage(head + i);
flush_dcache_page(head + i);
}
@@ -1952,7 +1952,7 @@ clear:
* Error recovery.
*/
unacct:
- shmem_inode_unacct_blocks(inode, 1 << compound_order(page));
+ shmem_inode_unacct_blocks(inode, compound_nr(page));
if (PageTransHuge(page)) {
unlock_page(page);
diff --git a/mm/slab.h b/mm/slab.h
index 9057b8056b07..68e455f2b698 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -30,6 +30,69 @@ struct kmem_cache {
struct list_head list; /* List of all slab caches on the system */
};
+#else /* !CONFIG_SLOB */
+
+struct memcg_cache_array {
+ struct rcu_head rcu;
+ struct kmem_cache *entries[0];
+};
+
+/*
+ * This is the main placeholder for memcg-related information in kmem caches.
+ * Both the root cache and the child caches will have it. For the root cache,
+ * this will hold a dynamically allocated array large enough to hold
+ * information about the currently limited memcgs in the system. To allow the
+ * array to be accessed without taking any locks, on relocation we free the old
+ * version only after a grace period.
+ *
+ * Root and child caches hold different metadata.
+ *
+ * @root_cache: Common to root and child caches. NULL for root, pointer to
+ * the root cache for children.
+ *
+ * The following fields are specific to root caches.
+ *
+ * @memcg_caches: kmemcg ID indexed table of child caches. This table is
+ * used to index child cachces during allocation and cleared
+ * early during shutdown.
+ *
+ * @root_caches_node: List node for slab_root_caches list.
+ *
+ * @children: List of all child caches. While the child caches are also
+ * reachable through @memcg_caches, a child cache remains on
+ * this list until it is actually destroyed.
+ *
+ * The following fields are specific to child caches.
+ *
+ * @memcg: Pointer to the memcg this cache belongs to.
+ *
+ * @children_node: List node for @root_cache->children list.
+ *
+ * @kmem_caches_node: List node for @memcg->kmem_caches list.
+ */
+struct memcg_cache_params {
+ struct kmem_cache *root_cache;
+ union {
+ struct {
+ struct memcg_cache_array __rcu *memcg_caches;
+ struct list_head __root_caches_node;
+ struct list_head children;
+ bool dying;
+ };
+ struct {
+ struct mem_cgroup *memcg;
+ struct list_head children_node;
+ struct list_head kmem_caches_node;
+ struct percpu_ref refcnt;
+
+ void (*work_fn)(struct kmem_cache *);
+ union {
+ struct rcu_head rcu_head;
+ struct work_struct work;
+ };
+ };
+ };
+};
#endif /* CONFIG_SLOB */
#ifdef CONFIG_SLAB
@@ -174,6 +237,7 @@ int __kmem_cache_shrink(struct kmem_cache *);
void __kmemcg_cache_deactivate(struct kmem_cache *s);
void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
void slab_kmem_cache_release(struct kmem_cache *);
+void kmem_cache_shrink_all(struct kmem_cache *s);
struct seq_file;
struct file;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 807490fe217a..6491c3a41805 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -981,6 +981,43 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
}
EXPORT_SYMBOL(kmem_cache_shrink);
+/**
+ * kmem_cache_shrink_all - shrink a cache and all memcg caches for root cache
+ * @s: The cache pointer
+ */
+void kmem_cache_shrink_all(struct kmem_cache *s)
+{
+ struct kmem_cache *c;
+
+ if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || !is_root_cache(s)) {
+ kmem_cache_shrink(s);
+ return;
+ }
+
+ get_online_cpus();
+ get_online_mems();
+ kasan_cache_shrink(s);
+ __kmem_cache_shrink(s);
+
+ /*
+ * We have to take the slab_mutex to protect from the memcg list
+ * modification.
+ */
+ mutex_lock(&slab_mutex);
+ for_each_memcg_cache(c, s) {
+ /*
+ * Don't need to shrink deactivated memcg caches.
+ */
+ if (s->flags & SLAB_DEACTIVATED)
+ continue;
+ kasan_cache_shrink(c);
+ __kmem_cache_shrink(c);
+ }
+ mutex_unlock(&slab_mutex);
+ put_online_mems();
+ put_online_cpus();
+}
+
bool slab_is_available(void)
{
return slab_state >= UP;
diff --git a/mm/slob.c b/mm/slob.c
index 7f421d0ca9ab..cf377beab962 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -539,7 +539,7 @@ size_t __ksize(const void *block)
sp = virt_to_page(block);
if (unlikely(!PageSlab(sp)))
- return PAGE_SIZE << compound_order(sp);
+ return page_size(sp);
align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
m = (unsigned int *)(block - align);
diff --git a/mm/slub.c b/mm/slub.c
index 8834563cdb4b..42c1b3af3c98 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -829,7 +829,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
return 1;
start = page_address(page);
- length = PAGE_SIZE << compound_order(page);
+ length = page_size(page);
end = start + length;
remainder = length % s->size;
if (!remainder)
@@ -1074,13 +1074,14 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
init_tracking(s, object);
}
-static void setup_page_debug(struct kmem_cache *s, void *addr, int order)
+static
+void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
{
if (!(s->flags & SLAB_POISON))
return;
metadata_access_enable();
- memset(addr, POISON_INUSE, PAGE_SIZE << order);
+ memset(addr, POISON_INUSE, page_size(page));
metadata_access_disable();
}
@@ -1340,8 +1341,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
#else /* !CONFIG_SLUB_DEBUG */
static inline void setup_object_debug(struct kmem_cache *s,
struct page *page, void *object) {}
-static inline void setup_page_debug(struct kmem_cache *s,
- void *addr, int order) {}
+static inline
+void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}
static inline int alloc_debug_processing(struct kmem_cache *s,
struct page *page, void *object, unsigned long addr) { return 0; }
@@ -1639,7 +1640,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
struct kmem_cache_order_objects oo = s->oo;
gfp_t alloc_gfp;
void *start, *p, *next;
- int idx, order;
+ int idx;
bool shuffle;
flags &= gfp_allowed_mask;
@@ -1673,7 +1674,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
page->objects = oo_objects(oo);
- order = compound_order(page);
page->slab_cache = s;
__SetPageSlab(page);
if (page_is_pfmemalloc(page))
@@ -1683,7 +1683,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
start = page_address(page);
- setup_page_debug(s, start, order);
+ setup_page_debug(s, page, start);
shuffle = shuffle_freelist(s, page);
@@ -2004,6 +2004,7 @@ static inline unsigned long next_tid(unsigned long tid)
return tid + TID_STEP;
}
+#ifdef SLUB_DEBUG_CMPXCHG
static inline unsigned int tid_to_cpu(unsigned long tid)
{
return tid % TID_STEP;
@@ -2013,6 +2014,7 @@ static inline unsigned long tid_to_event(unsigned long tid)
{
return tid / TID_STEP;
}
+#endif
static inline unsigned int init_tid(int cpu)
{
@@ -3930,7 +3932,7 @@ size_t __ksize(const void *object)
if (unlikely(!PageSlab(page))) {
WARN_ON(!PageCompound(page));
- return PAGE_SIZE << compound_order(page);
+ return page_size(page);
}
return slab_ksize(page->slab_cache);
@@ -5298,7 +5300,7 @@ static ssize_t shrink_store(struct kmem_cache *s,
const char *buf, size_t length)
{
if (buf[0] == '1')
- kmem_cache_shrink(s);
+ kmem_cache_shrink_all(s);
else
return -EINVAL;
return length;
diff --git a/mm/sparse.c b/mm/sparse.c
index 72f010d9bff5..bf32de9e666b 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -11,6 +11,8 @@
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
#include "internal.h"
#include <asm/dma.h>
@@ -470,6 +472,12 @@ struct page __init *__populate_section_memmap(unsigned long pfn,
static void *sparsemap_buf __meminitdata;
static void *sparsemap_buf_end __meminitdata;
+static inline void __meminit sparse_buffer_free(unsigned long size)
+{
+ WARN_ON(!sparsemap_buf || size == 0);
+ memblock_free_early(__pa(sparsemap_buf), size);
+}
+
static void __init sparse_buffer_init(unsigned long size, int nid)
{
phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
@@ -486,7 +494,7 @@ static void __init sparse_buffer_fini(void)
unsigned long size = sparsemap_buf_end - sparsemap_buf;
if (sparsemap_buf && size > 0)
- memblock_free_early(__pa(sparsemap_buf), size);
+ sparse_buffer_free(size);
sparsemap_buf = NULL;
}
@@ -495,11 +503,15 @@ void * __meminit sparse_buffer_alloc(unsigned long size)
void *ptr = NULL;
if (sparsemap_buf) {
- ptr = PTR_ALIGN(sparsemap_buf, size);
+ ptr = (void *) roundup((unsigned long)sparsemap_buf, size);
if (ptr + size > sparsemap_buf_end)
ptr = NULL;
- else
+ else {
+ /* Free redundant aligned space */
+ if ((unsigned long)(ptr - sparsemap_buf) > 0)
+ sparse_buffer_free((unsigned long)(ptr - sparsemap_buf));
sparsemap_buf = ptr + size;
+ }
}
return ptr;
}
@@ -867,7 +879,7 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn,
*/
page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
- ms = __pfn_to_section(start_pfn);
+ ms = __nr_to_section(section_nr);
set_section_nid(section_nr, nid);
section_mark_present(ms);
@@ -884,9 +896,6 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
{
int i;
- if (!memmap)
- return;
-
/*
* A further optimization is to have per section refcounted
* num_poisoned_pages. But that would need more space per memmap, so
@@ -898,7 +907,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
for (i = 0; i < nr_pages; i++) {
if (PageHWPoison(&memmap[i])) {
- atomic_long_sub(1, &num_poisoned_pages);
+ num_poisoned_pages_dec();
ClearPageHWPoison(&memmap[i]);
}
}
diff --git a/mm/swap.c b/mm/swap.c
index ae300397dfda..38c3fa4308e2 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -47,6 +47,7 @@ int page_cluster;
static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
+static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs);
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
@@ -71,12 +72,12 @@ static void __page_cache_release(struct page *page)
spin_unlock_irqrestore(&pgdat->lru_lock, flags);
}
__ClearPageWaiters(page);
- mem_cgroup_uncharge(page);
}
static void __put_single_page(struct page *page)
{
__page_cache_release(page);
+ mem_cgroup_uncharge(page);
free_unref_page(page);
}
@@ -515,7 +516,6 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
del_page_from_lru_list(page, lruvec, lru + active);
ClearPageActive(page);
ClearPageReferenced(page);
- add_page_to_lru_list(page, lruvec, lru);
if (PageWriteback(page) || PageDirty(page)) {
/*
@@ -523,13 +523,14 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
* It can make readahead confusing. But race window
* is _really_ small and it's non-critical problem.
*/
+ add_page_to_lru_list(page, lruvec, lru);
SetPageReclaim(page);
} else {
/*
* The page's writeback ends up during pagevec
* We moves tha page into tail of inactive.
*/
- list_move_tail(&page->lru, &lruvec->lists[lru]);
+ add_page_to_lru_list_tail(page, lruvec, lru);
__count_vm_event(PGROTATED);
}
@@ -538,6 +539,22 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
update_page_reclaim_stat(lruvec, file, 0);
}
+static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
+ void *arg)
+{
+ if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
+ int file = page_is_file_cache(page);
+ int lru = page_lru_base_type(page);
+
+ del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
+ ClearPageActive(page);
+ ClearPageReferenced(page);
+ add_page_to_lru_list(page, lruvec, lru);
+
+ __count_vm_events(PGDEACTIVATE, hpage_nr_pages(page));
+ update_page_reclaim_stat(lruvec, file, 0);
+ }
+}
static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
void *arg)
@@ -590,6 +607,10 @@ void lru_add_drain_cpu(int cpu)
if (pagevec_count(pvec))
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
+ pvec = &per_cpu(lru_deactivate_pvecs, cpu);
+ if (pagevec_count(pvec))
+ pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+
pvec = &per_cpu(lru_lazyfree_pvecs, cpu);
if (pagevec_count(pvec))
pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
@@ -623,6 +644,26 @@ void deactivate_file_page(struct page *page)
}
}
+/*
+ * deactivate_page - deactivate a page
+ * @page: page to deactivate
+ *
+ * deactivate_page() moves @page to the inactive list if @page was on the active
+ * list and was not an unevictable page. This is done to accelerate the reclaim
+ * of @page.
+ */
+void deactivate_page(struct page *page)
+{
+ if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
+ struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
+
+ get_page(page);
+ if (!pagevec_add(pvec, page) || PageCompound(page))
+ pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+ put_cpu_var(lru_deactivate_pvecs);
+ }
+}
+
/**
* mark_page_lazyfree - make an anon page lazyfree
* @page: page to deactivate
@@ -687,6 +728,7 @@ void lru_add_drain_all(void)
if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
+ pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
need_activate_page_drain(cpu)) {
INIT_WORK(work, lru_add_drain_per_cpu);
@@ -844,17 +886,15 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
get_page(page_tail);
list_add_tail(&page_tail->lru, list);
} else {
- struct list_head *list_head;
/*
* Head page has not yet been counted, as an hpage,
* so we must account for each subpage individually.
*
- * Use the standard add function to put page_tail on the list,
- * but then correct its position so they all end up in order.
+ * Put page_tail on the list at the correct position
+ * so they all end up in order.
*/
- add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
- list_head = page_tail->lru.prev;
- list_move_tail(&page_tail->lru, list_head);
+ add_page_to_lru_list_tail(page_tail, lruvec,
+ page_lru(page_tail));
}
if (!PageUnevictable(page))
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 8368621a0fc7..8e7ce9a9bc5e 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -116,7 +116,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
struct address_space *address_space = swap_address_space(entry);
pgoff_t idx = swp_offset(entry);
XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
- unsigned long i, nr = 1UL << compound_order(page);
+ unsigned long i, nr = compound_nr(page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapCache(page), page);
@@ -133,7 +133,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
for (i = 0; i < nr; i++) {
VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
set_page_private(page + i, entry.val + i);
- xas_store(&xas, page + i);
+ xas_store(&xas, page);
xas_next(&xas);
}
address_space->nrpages += nr;
@@ -168,7 +168,7 @@ void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
for (i = 0; i < nr; i++) {
void *entry = xas_store(&xas, NULL);
- VM_BUG_ON_PAGE(entry != page + i, entry);
+ VM_BUG_ON_PAGE(entry != page, entry);
set_page_private(page + i, 0);
xas_next(&xas);
}
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 98e924864554..660717a1ea5c 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/mm.h>
+#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
@@ -227,7 +228,12 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
if (!virt_addr_valid(ptr))
return;
- page = virt_to_head_page(ptr);
+ /*
+ * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
+ * highmem page or fallback to virt_to_page(). The following
+ * is effectively a highmem-aware virt_to_head_page().
+ */
+ page = compound_head(kmap_to_page((void *)ptr));
if (PageSlab(page)) {
/* Check slab allocator for flags and size. */
diff --git a/mm/util.c b/mm/util.c
index e6351a80f248..3ad6db9a722e 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -16,6 +16,13 @@
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
#include <linux/userfaultfd_k.h>
+#include <linux/elf.h>
+#include <linux/elf-randomize.h>
+#include <linux/personality.h>
+#include <linux/random.h>
+#include <linux/processor.h>
+#include <linux/sizes.h>
+#include <linux/compat.h>
#include <linux/uaccess.h>
@@ -293,7 +300,105 @@ int vma_is_stack_for_current(struct vm_area_struct *vma)
return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
}
-#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
+#ifndef STACK_RND_MASK
+#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
+#endif
+
+unsigned long randomize_stack_top(unsigned long stack_top)
+{
+ unsigned long random_variable = 0;
+
+ if (current->flags & PF_RANDOMIZE) {
+ random_variable = get_random_long();
+ random_variable &= STACK_RND_MASK;
+ random_variable <<= PAGE_SHIFT;
+ }
+#ifdef CONFIG_STACK_GROWSUP
+ return PAGE_ALIGN(stack_top) + random_variable;
+#else
+ return PAGE_ALIGN(stack_top) - random_variable;
+#endif
+}
+
+#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+ /* Is the current task 32bit ? */
+ if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
+ return randomize_page(mm->brk, SZ_32M);
+
+ return randomize_page(mm->brk, SZ_1G);
+}
+
+unsigned long arch_mmap_rnd(void)
+{
+ unsigned long rnd;
+
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
+ if (is_compat_task())
+ rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
+ else
+#endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
+ rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
+
+ return rnd << PAGE_SHIFT;
+}
+
+static int mmap_is_legacy(struct rlimit *rlim_stack)
+{
+ if (current->personality & ADDR_COMPAT_LAYOUT)
+ return 1;
+
+ if (rlim_stack->rlim_cur == RLIM_INFINITY)
+ return 1;
+
+ return sysctl_legacy_va_layout;
+}
+
+/*
+ * Leave enough space between the mmap area and the stack to honour ulimit in
+ * the face of randomisation.
+ */
+#define MIN_GAP (SZ_128M)
+#define MAX_GAP (STACK_TOP / 6 * 5)
+
+static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
+{
+ unsigned long gap = rlim_stack->rlim_cur;
+ unsigned long pad = stack_guard_gap;
+
+ /* Account for stack randomization if necessary */
+ if (current->flags & PF_RANDOMIZE)
+ pad += (STACK_RND_MASK << PAGE_SHIFT);
+
+ /* Values close to RLIM_INFINITY can overflow. */
+ if (gap + pad > gap)
+ gap += pad;
+
+ if (gap < MIN_GAP)
+ gap = MIN_GAP;
+ else if (gap > MAX_GAP)
+ gap = MAX_GAP;
+
+ return PAGE_ALIGN(STACK_TOP - gap - rnd);
+}
+
+void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
+{
+ unsigned long random_factor = 0UL;
+
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
+ if (mmap_is_legacy(rlim_stack)) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ } else {
+ mm->mmap_base = mmap_base(random_factor, rlim_stack);
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ }
+}
+#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
{
mm->mmap_base = TASK_UNMAPPED_BASE;
@@ -521,7 +626,7 @@ bool page_mapped(struct page *page)
return true;
if (PageHuge(page))
return false;
- for (i = 0; i < (1 << compound_order(page)); i++) {
+ for (i = 0; i < compound_nr(page); i++) {
if (atomic_read(&page[i]._mapcount) >= 0)
return true;
}
@@ -783,3 +888,16 @@ out_mm:
out:
return res;
}
+
+int memcmp_pages(struct page *page1, struct page *page2)
+{
+ char *addr1, *addr2;
+ int ret;
+
+ addr1 = kmap_atomic(page1);
+ addr2 = kmap_atomic(page2);
+ ret = memcmp(addr1, addr2, PAGE_SIZE);
+ kunmap_atomic(addr2);
+ kunmap_atomic(addr1);
+ return ret;
+}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c1246d77cf75..a3c70e275f4e 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -329,8 +329,6 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
-#define VM_LAZY_FREE 0x02
-#define VM_VM_AREA 0x04
static DEFINE_SPINLOCK(vmap_area_lock);
/* Export for kexec only */
@@ -398,9 +396,8 @@ compute_subtree_max_size(struct vmap_area *va)
get_subtree_max_size(va->rb_node.rb_right));
}
-RB_DECLARE_CALLBACKS(static, free_vmap_area_rb_augment_cb,
- struct vmap_area, rb_node, unsigned long, subtree_max_size,
- compute_subtree_max_size)
+RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
+ struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
static void purge_vmap_area_lazy(void);
static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
@@ -1116,7 +1113,7 @@ retry:
va->va_start = addr;
va->va_end = addr + size;
- va->flags = 0;
+ va->vm = NULL;
insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
spin_unlock(&vmap_area_lock);
@@ -1282,7 +1279,14 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
llist_for_each_entry_safe(va, n_va, valist, purge_list) {
unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
- __free_vmap_area(va);
+ /*
+ * Finally insert or merge lazily-freed area. It is
+ * detached and there is no need to "unlink" it from
+ * anything.
+ */
+ merge_or_add_vmap_area(va,
+ &free_vmap_area_root, &free_vmap_area_list);
+
atomic_long_sub(nr, &vmap_lazy_nr);
if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
@@ -1324,6 +1328,10 @@ static void free_vmap_area_noflush(struct vmap_area *va)
{
unsigned long nr_lazy;
+ spin_lock(&vmap_area_lock);
+ unlink_va(va, &vmap_area_root);
+ spin_unlock(&vmap_area_lock);
+
nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
PAGE_SHIFT, &vmap_lazy_nr);
@@ -1918,7 +1926,6 @@ void __init vmalloc_init(void)
if (WARN_ON_ONCE(!va))
continue;
- va->flags = VM_VM_AREA;
va->va_start = (unsigned long)tmp->addr;
va->va_end = va->va_start + tmp->size;
va->vm = tmp;
@@ -2016,7 +2023,6 @@ static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
vm->size = va->va_end - va->va_start;
vm->caller = caller;
va->vm = vm;
- va->flags |= VM_VM_AREA;
spin_unlock(&vmap_area_lock);
}
@@ -2121,10 +2127,10 @@ struct vm_struct *find_vm_area(const void *addr)
struct vmap_area *va;
va = find_vmap_area((unsigned long)addr);
- if (va && va->flags & VM_VM_AREA)
- return va->vm;
+ if (!va)
+ return NULL;
- return NULL;
+ return va->vm;
}
/**
@@ -2143,14 +2149,12 @@ struct vm_struct *remove_vm_area(const void *addr)
might_sleep();
- va = find_vmap_area((unsigned long)addr);
- if (va && va->flags & VM_VM_AREA) {
+ spin_lock(&vmap_area_lock);
+ va = __find_vmap_area((unsigned long)addr);
+ if (va && va->vm) {
struct vm_struct *vm = va->vm;
- spin_lock(&vmap_area_lock);
va->vm = NULL;
- va->flags &= ~VM_VM_AREA;
- va->flags |= VM_LAZY_FREE;
spin_unlock(&vmap_area_lock);
kasan_free_shadow(vm);
@@ -2158,6 +2162,8 @@ struct vm_struct *remove_vm_area(const void *addr)
return vm;
}
+
+ spin_unlock(&vmap_area_lock);
return NULL;
}
@@ -2402,7 +2408,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
array_size = (nr_pages * sizeof(struct page *));
- area->nr_pages = nr_pages;
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
@@ -2410,13 +2415,16 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
} else {
pages = kmalloc_node(array_size, nested_gfp, node);
}
- area->pages = pages;
- if (!area->pages) {
+
+ if (!pages) {
remove_vm_area(area->addr);
kfree(area);
return NULL;
}
+ area->pages = pages;
+ area->nr_pages = nr_pages;
+
for (i = 0; i < area->nr_pages; i++) {
struct page *page;
@@ -2851,7 +2859,7 @@ long vread(char *buf, char *addr, unsigned long count)
if (!count)
break;
- if (!(va->flags & VM_VM_AREA))
+ if (!va->vm)
continue;
vm = va->vm;
@@ -2931,7 +2939,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
if (!count)
break;
- if (!(va->flags & VM_VM_AREA))
+ if (!va->vm)
continue;
vm = va->vm;
@@ -3450,6 +3458,22 @@ static void show_numa_info(struct seq_file *m, struct vm_struct *v)
}
}
+static void show_purge_info(struct seq_file *m)
+{
+ struct llist_node *head;
+ struct vmap_area *va;
+
+ head = READ_ONCE(vmap_purge_list.first);
+ if (head == NULL)
+ return;
+
+ llist_for_each_entry(va, head, purge_list) {
+ seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
+ (void *)va->va_start, (void *)va->va_end,
+ va->va_end - va->va_start);
+ }
+}
+
static int s_show(struct seq_file *m, void *p)
{
struct vmap_area *va;
@@ -3458,14 +3482,13 @@ static int s_show(struct seq_file *m, void *p)
va = list_entry(p, struct vmap_area, list);
/*
- * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
- * behalf of vmap area is being tear down or vm_map_ram allocation.
+ * s_show can encounter race with remove_vm_area, !vm on behalf
+ * of vmap area is being tear down or vm_map_ram allocation.
*/
- if (!(va->flags & VM_VM_AREA)) {
- seq_printf(m, "0x%pK-0x%pK %7ld %s\n",
+ if (!va->vm) {
+ seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
(void *)va->va_start, (void *)va->va_end,
- va->va_end - va->va_start,
- va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram");
+ va->va_end - va->va_start);
return 0;
}
@@ -3504,6 +3527,16 @@ static int s_show(struct seq_file *m, void *p)
show_numa_info(m, v);
seq_putc(m, '\n');
+
+ /*
+ * As a final step, dump "unpurged" areas. Note,
+ * that entire "/proc/vmallocinfo" output will not
+ * be address sorted, because the purge list is not
+ * sorted.
+ */
+ if (list_is_last(&va->list, &vmap_area_list))
+ show_purge_info(m);
+
return 0;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a6c5d0b28321..e5d52d6a24af 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -171,11 +171,22 @@ int vm_swappiness = 60;
*/
unsigned long vm_total_pages;
+static void set_task_reclaim_state(struct task_struct *task,
+ struct reclaim_state *rs)
+{
+ /* Check for an overwrite */
+ WARN_ON_ONCE(rs && task->reclaim_state);
+
+ /* Check for the nulling of an already-nulled member */
+ WARN_ON_ONCE(!rs && !task->reclaim_state);
+
+ task->reclaim_state = rs;
+}
+
static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem);
-#ifdef CONFIG_MEMCG_KMEM
-
+#ifdef CONFIG_MEMCG
/*
* We allow subsystems to populate their shrinker-related
* LRU lists before register_shrinker_prepared() is called
@@ -227,30 +238,7 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
idr_remove(&shrinker_idr, id);
up_write(&shrinker_rwsem);
}
-#else /* CONFIG_MEMCG_KMEM */
-static int prealloc_memcg_shrinker(struct shrinker *shrinker)
-{
- return 0;
-}
-
-static void unregister_memcg_shrinker(struct shrinker *shrinker)
-{
-}
-#endif /* CONFIG_MEMCG_KMEM */
-static void set_task_reclaim_state(struct task_struct *task,
- struct reclaim_state *rs)
-{
- /* Check for an overwrite */
- WARN_ON_ONCE(rs && task->reclaim_state);
-
- /* Check for the nulling of an already-nulled member */
- WARN_ON_ONCE(!rs && !task->reclaim_state);
-
- task->reclaim_state = rs;
-}
-
-#ifdef CONFIG_MEMCG
static bool global_reclaim(struct scan_control *sc)
{
return !sc->target_mem_cgroup;
@@ -305,6 +293,15 @@ static bool memcg_congested(pg_data_t *pgdat,
}
#else
+static int prealloc_memcg_shrinker(struct shrinker *shrinker)
+{
+ return 0;
+}
+
+static void unregister_memcg_shrinker(struct shrinker *shrinker)
+{
+}
+
static bool global_reclaim(struct scan_control *sc)
{
return true;
@@ -591,7 +588,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
return freed;
}
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
struct mem_cgroup *memcg, int priority)
{
@@ -599,7 +596,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
unsigned long ret, freed = 0;
int i;
- if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
+ if (!mem_cgroup_online(memcg))
return 0;
if (!down_read_trylock(&shrinker_rwsem))
@@ -625,6 +622,11 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
continue;
}
+ /* Call non-slab shrinkers even though kmem is disabled */
+ if (!memcg_kmem_enabled() &&
+ !(shrinker->flags & SHRINKER_NONSLAB))
+ continue;
+
ret = do_shrink_slab(&sc, shrinker, priority);
if (ret == SHRINK_EMPTY) {
clear_bit(i, map->map);
@@ -661,13 +663,13 @@ unlock:
up_read(&shrinker_rwsem);
return freed;
}
-#else /* CONFIG_MEMCG_KMEM */
+#else /* CONFIG_MEMCG */
static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
struct mem_cgroup *memcg, int priority)
{
return 0;
}
-#endif /* CONFIG_MEMCG_KMEM */
+#endif /* CONFIG_MEMCG */
/**
* shrink_slab - shrink slab caches
@@ -1121,7 +1123,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
struct scan_control *sc,
enum ttu_flags ttu_flags,
struct reclaim_stat *stat,
- bool force_reclaim)
+ bool ignore_references)
{
LIST_HEAD(ret_pages);
LIST_HEAD(free_pages);
@@ -1135,7 +1137,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
struct address_space *mapping;
struct page *page;
int may_enter_fs;
- enum page_references references = PAGEREF_RECLAIM_CLEAN;
+ enum page_references references = PAGEREF_RECLAIM;
bool dirty, writeback;
unsigned int nr_pages;
@@ -1149,7 +1151,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
VM_BUG_ON_PAGE(PageActive(page), page);
- nr_pages = 1 << compound_order(page);
+ nr_pages = compound_nr(page);
/* Account the number of base pages even though THP */
sc->nr_scanned += nr_pages;
@@ -1266,7 +1268,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
}
}
- if (!force_reclaim)
+ if (!ignore_references)
references = page_check_references(page, sc);
switch (references) {
@@ -1487,10 +1489,9 @@ free_it:
* Is there need to periodically free_page_list? It would
* appear not as the counts should be low
*/
- if (unlikely(PageTransHuge(page))) {
- mem_cgroup_uncharge(page);
+ if (unlikely(PageTransHuge(page)))
(*get_compound_page_dtor(page))(page);
- } else
+ else
list_add(&page->lru, &free_pages);
continue;
@@ -1705,7 +1706,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
VM_BUG_ON_PAGE(!PageLRU(page), page);
- nr_pages = 1 << compound_order(page);
+ nr_pages = compound_nr(page);
total_scan += nr_pages;
if (page_zonenum(page) > sc->reclaim_idx) {
@@ -1911,7 +1912,6 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
if (unlikely(PageCompound(page))) {
spin_unlock_irq(&pgdat->lru_lock);
- mem_cgroup_uncharge(page);
(*get_compound_page_dtor(page))(page);
spin_lock_irq(&pgdat->lru_lock);
} else
@@ -2145,6 +2145,62 @@ static void shrink_active_list(unsigned long nr_to_scan,
nr_deactivate, nr_rotated, sc->priority, file);
}
+unsigned long reclaim_pages(struct list_head *page_list)
+{
+ int nid = -1;
+ unsigned long nr_reclaimed = 0;
+ LIST_HEAD(node_page_list);
+ struct reclaim_stat dummy_stat;
+ struct page *page;
+ struct scan_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ .priority = DEF_PRIORITY,
+ .may_writepage = 1,
+ .may_unmap = 1,
+ .may_swap = 1,
+ };
+
+ while (!list_empty(page_list)) {
+ page = lru_to_page(page_list);
+ if (nid == -1) {
+ nid = page_to_nid(page);
+ INIT_LIST_HEAD(&node_page_list);
+ }
+
+ if (nid == page_to_nid(page)) {
+ ClearPageActive(page);
+ list_move(&page->lru, &node_page_list);
+ continue;
+ }
+
+ nr_reclaimed += shrink_page_list(&node_page_list,
+ NODE_DATA(nid),
+ &sc, 0,
+ &dummy_stat, false);
+ while (!list_empty(&node_page_list)) {
+ page = lru_to_page(&node_page_list);
+ list_del(&page->lru);
+ putback_lru_page(page);
+ }
+
+ nid = -1;
+ }
+
+ if (!list_empty(&node_page_list)) {
+ nr_reclaimed += shrink_page_list(&node_page_list,
+ NODE_DATA(nid),
+ &sc, 0,
+ &dummy_stat, false);
+ while (!list_empty(&node_page_list)) {
+ page = lru_to_page(&node_page_list);
+ list_del(&page->lru);
+ putback_lru_page(page);
+ }
+ }
+
+ return nr_reclaimed;
+}
+
/*
* The inactive anon list should be small enough that the VM never has
* to do too much work.
@@ -2586,7 +2642,6 @@ static bool in_reclaim_compaction(struct scan_control *sc)
*/
static inline bool should_continue_reclaim(struct pglist_data *pgdat,
unsigned long nr_reclaimed,
- unsigned long nr_scanned,
struct scan_control *sc)
{
unsigned long pages_for_compaction;
@@ -2597,40 +2652,18 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
if (!in_reclaim_compaction(sc))
return false;
- /* Consider stopping depending on scan and reclaim activity */
- if (sc->gfp_mask & __GFP_RETRY_MAYFAIL) {
- /*
- * For __GFP_RETRY_MAYFAIL allocations, stop reclaiming if the
- * full LRU list has been scanned and we are still failing
- * to reclaim pages. This full LRU scan is potentially
- * expensive but a __GFP_RETRY_MAYFAIL caller really wants to succeed
- */
- if (!nr_reclaimed && !nr_scanned)
- return false;
- } else {
- /*
- * For non-__GFP_RETRY_MAYFAIL allocations which can presumably
- * fail without consequence, stop if we failed to reclaim
- * any pages from the last SWAP_CLUSTER_MAX number of
- * pages that were scanned. This will return to the
- * caller faster at the risk reclaim/compaction and
- * the resulting allocation attempt fails
- */
- if (!nr_reclaimed)
- return false;
- }
-
/*
- * If we have not reclaimed enough pages for compaction and the
- * inactive lists are large enough, continue reclaiming
+ * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX
+ * number of pages that were scanned. This will return to the caller
+ * with the risk reclaim/compaction and the resulting allocation attempt
+ * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL
+ * allocations through requiring that the full LRU list has been scanned
+ * first, by assuming that zero delta of sc->nr_scanned means full LRU
+ * scan, but that approximation was wrong, and there were corner cases
+ * where always a non-zero amount of pages were scanned.
*/
- pages_for_compaction = compact_gap(sc->order);
- inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
- if (get_nr_swap_pages() > 0)
- inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
- if (sc->nr_reclaimed < pages_for_compaction &&
- inactive_lru_pages > pages_for_compaction)
- return true;
+ if (!nr_reclaimed)
+ return false;
/* If compaction would go ahead or the allocation would succeed, stop */
for (z = 0; z <= sc->reclaim_idx; z++) {
@@ -2647,7 +2680,17 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
;
}
}
- return true;
+
+ /*
+ * If we have not reclaimed enough pages for compaction and the
+ * inactive lists are large enough, continue reclaiming
+ */
+ pages_for_compaction = compact_gap(sc->order);
+ inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
+ if (get_nr_swap_pages() > 0)
+ inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
+
+ return inactive_lru_pages > pages_for_compaction;
}
static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg)
@@ -2664,10 +2707,6 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
do {
struct mem_cgroup *root = sc->target_mem_cgroup;
- struct mem_cgroup_reclaim_cookie reclaim = {
- .pgdat = pgdat,
- .priority = sc->priority,
- };
unsigned long node_lru_pages = 0;
struct mem_cgroup *memcg;
@@ -2676,7 +2715,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
nr_reclaimed = sc->nr_reclaimed;
nr_scanned = sc->nr_scanned;
- memcg = mem_cgroup_iter(root, NULL, &reclaim);
+ memcg = mem_cgroup_iter(root, NULL, NULL);
do {
unsigned long lru_pages;
unsigned long reclaimed;
@@ -2719,21 +2758,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
sc->nr_scanned - scanned,
sc->nr_reclaimed - reclaimed);
- /*
- * Kswapd have to scan all memory cgroups to fulfill
- * the overall scan target for the node.
- *
- * Limit reclaim, on the other hand, only cares about
- * nr_to_reclaim pages to be reclaimed and it will
- * retry with decreasing priority if one round over the
- * whole hierarchy is not sufficient.
- */
- if (!current_is_kswapd() &&
- sc->nr_reclaimed >= sc->nr_to_reclaim) {
- mem_cgroup_iter_break(root, memcg);
- break;
- }
- } while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
+ } while ((memcg = mem_cgroup_iter(root, memcg, NULL)));
if (reclaim_state) {
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
@@ -2810,7 +2835,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
wait_iff_congested(BLK_RW_ASYNC, HZ/10);
} while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
- sc->nr_scanned - nr_scanned, sc));
+ sc));
/*
* Kswapd gives up on balancing particular nodes after too
diff --git a/mm/vmstat.c b/mm/vmstat.c
index fd7e16ca6996..6afc892a148a 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1158,6 +1158,8 @@ const char * const vmstat_text[] = {
"nr_shmem",
"nr_shmem_hugepages",
"nr_shmem_pmdmapped",
+ "nr_file_hugepages",
+ "nr_file_pmdmapped",
"nr_anon_transparent_hugepages",
"nr_unstable",
"nr_vmscan_write",
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 75b7962439ff..05bdf90646e7 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -41,7 +41,6 @@
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/wait.h>
#include <linux/zpool.h>
#include <linux/magic.h>
@@ -146,8 +145,6 @@ struct z3fold_header {
* @release_wq: workqueue for safe page release
* @work: work_struct for safe page release
* @inode: inode for z3fold pseudo filesystem
- * @destroying: bool to stop migration once we start destruction
- * @isolated: int to count the number of pages currently in isolation
*
* This structure is allocated at pool creation time and maintains metadata
* pertaining to a particular z3fold pool.
@@ -166,11 +163,8 @@ struct z3fold_pool {
const struct zpool_ops *zpool_ops;
struct workqueue_struct *compact_wq;
struct workqueue_struct *release_wq;
- struct wait_queue_head isolate_wait;
struct work_struct work;
struct inode *inode;
- bool destroying;
- int isolated;
};
/*
@@ -301,14 +295,11 @@ static void z3fold_unregister_migration(struct z3fold_pool *pool)
}
/* Initializes the z3fold header of a newly allocated z3fold page */
-static struct z3fold_header *init_z3fold_page(struct page *page,
+static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
struct z3fold_pool *pool, gfp_t gfp)
{
struct z3fold_header *zhdr = page_address(page);
- struct z3fold_buddy_slots *slots = alloc_slots(pool, gfp);
-
- if (!slots)
- return NULL;
+ struct z3fold_buddy_slots *slots;
INIT_LIST_HEAD(&page->lru);
clear_bit(PAGE_HEADLESS, &page->private);
@@ -316,6 +307,12 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
clear_bit(NEEDS_COMPACTING, &page->private);
clear_bit(PAGE_STALE, &page->private);
clear_bit(PAGE_CLAIMED, &page->private);
+ if (headless)
+ return zhdr;
+
+ slots = alloc_slots(pool, gfp);
+ if (!slots)
+ return NULL;
spin_lock_init(&zhdr->page_lock);
kref_init(&zhdr->refcount);
@@ -372,9 +369,10 @@ static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
* Encodes the handle of a particular buddy within a z3fold page
* Pool lock should be held as this function accesses first_num
*/
-static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
+static unsigned long __encode_handle(struct z3fold_header *zhdr,
+ struct z3fold_buddy_slots *slots,
+ enum buddy bud)
{
- struct z3fold_buddy_slots *slots;
unsigned long h = (unsigned long)zhdr;
int idx = 0;
@@ -391,11 +389,15 @@ static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
if (bud == LAST)
h |= (zhdr->last_chunks << BUDDY_SHIFT);
- slots = zhdr->slots;
slots->slot[idx] = h;
return (unsigned long)&slots->slot[idx];
}
+static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
+{
+ return __encode_handle(zhdr, zhdr->slots, bud);
+}
+
/* Returns the z3fold page where a given handle is stored */
static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
{
@@ -630,6 +632,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
}
if (unlikely(PageIsolated(page) ||
+ test_bit(PAGE_CLAIMED, &page->private) ||
test_bit(PAGE_STALE, &page->private))) {
z3fold_page_unlock(zhdr);
return;
@@ -775,7 +778,6 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
goto out_c;
spin_lock_init(&pool->lock);
spin_lock_init(&pool->stale_lock);
- init_waitqueue_head(&pool->isolate_wait);
pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
if (!pool->unbuddied)
goto out_pool;
@@ -815,15 +817,6 @@ out:
return NULL;
}
-static bool pool_isolated_are_drained(struct z3fold_pool *pool)
-{
- bool ret;
-
- spin_lock(&pool->lock);
- ret = pool->isolated == 0;
- spin_unlock(&pool->lock);
- return ret;
-}
/**
* z3fold_destroy_pool() - destroys an existing z3fold pool
* @pool: the z3fold pool to be destroyed
@@ -833,22 +826,6 @@ static bool pool_isolated_are_drained(struct z3fold_pool *pool)
static void z3fold_destroy_pool(struct z3fold_pool *pool)
{
kmem_cache_destroy(pool->c_handle);
- /*
- * We set pool-> destroying under lock to ensure that
- * z3fold_page_isolate() sees any changes to destroying. This way we
- * avoid the need for any memory barriers.
- */
-
- spin_lock(&pool->lock);
- pool->destroying = true;
- spin_unlock(&pool->lock);
-
- /*
- * We need to ensure that no pages are being migrated while we destroy
- * these workqueues, as migration can queue work on either of the
- * workqueues.
- */
- wait_event(pool->isolate_wait, !pool_isolated_are_drained(pool));
/*
* We need to destroy pool->compact_wq before pool->release_wq,
@@ -956,7 +933,7 @@ retry:
if (!page)
return -ENOMEM;
- zhdr = init_z3fold_page(page, pool, gfp);
+ zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
if (!zhdr) {
__free_page(page);
return -ENOMEM;
@@ -1132,6 +1109,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
struct z3fold_header *zhdr = NULL;
struct page *page = NULL;
struct list_head *pos;
+ struct z3fold_buddy_slots slots;
unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
spin_lock(&pool->lock);
@@ -1150,16 +1128,22 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
/* this bit could have been set by free, in which case
* we pass over to the next page in the pool.
*/
- if (test_and_set_bit(PAGE_CLAIMED, &page->private))
+ if (test_and_set_bit(PAGE_CLAIMED, &page->private)) {
+ page = NULL;
continue;
+ }
- if (unlikely(PageIsolated(page)))
+ if (unlikely(PageIsolated(page))) {
+ clear_bit(PAGE_CLAIMED, &page->private);
+ page = NULL;
continue;
+ }
+ zhdr = page_address(page);
if (test_bit(PAGE_HEADLESS, &page->private))
break;
- zhdr = page_address(page);
if (!z3fold_page_trylock(zhdr)) {
+ clear_bit(PAGE_CLAIMED, &page->private);
zhdr = NULL;
continue; /* can't evict at this point */
}
@@ -1177,26 +1161,30 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
if (!test_bit(PAGE_HEADLESS, &page->private)) {
/*
- * We need encode the handles before unlocking, since
- * we can race with free that will set
- * (first|last)_chunks to 0
+ * We need encode the handles before unlocking, and
+ * use our local slots structure because z3fold_free
+ * can zero out zhdr->slots and we can't do much
+ * about that
*/
first_handle = 0;
last_handle = 0;
middle_handle = 0;
if (zhdr->first_chunks)
- first_handle = encode_handle(zhdr, FIRST);
+ first_handle = __encode_handle(zhdr, &slots,
+ FIRST);
if (zhdr->middle_chunks)
- middle_handle = encode_handle(zhdr, MIDDLE);
+ middle_handle = __encode_handle(zhdr, &slots,
+ MIDDLE);
if (zhdr->last_chunks)
- last_handle = encode_handle(zhdr, LAST);
+ last_handle = __encode_handle(zhdr, &slots,
+ LAST);
/*
* it's safe to unlock here because we hold a
* reference to this page
*/
z3fold_page_unlock(zhdr);
} else {
- first_handle = encode_handle(zhdr, HEADLESS);
+ first_handle = __encode_handle(zhdr, &slots, HEADLESS);
last_handle = middle_handle = 0;
}
@@ -1226,9 +1214,9 @@ next:
spin_lock(&pool->lock);
list_add(&page->lru, &pool->lru);
spin_unlock(&pool->lock);
+ clear_bit(PAGE_CLAIMED, &page->private);
} else {
z3fold_page_lock(zhdr);
- clear_bit(PAGE_CLAIMED, &page->private);
if (kref_put(&zhdr->refcount,
release_z3fold_page_locked)) {
atomic64_dec(&pool->pages_nr);
@@ -1243,6 +1231,7 @@ next:
list_add(&page->lru, &pool->lru);
spin_unlock(&pool->lock);
z3fold_page_unlock(zhdr);
+ clear_bit(PAGE_CLAIMED, &page->private);
}
/* We started off locked to we need to lock the pool back */
@@ -1339,28 +1328,6 @@ static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
return atomic64_read(&pool->pages_nr);
}
-/*
- * z3fold_dec_isolated() expects to be called while pool->lock is held.
- */
-static void z3fold_dec_isolated(struct z3fold_pool *pool)
-{
- assert_spin_locked(&pool->lock);
- VM_BUG_ON(pool->isolated <= 0);
- pool->isolated--;
-
- /*
- * If we have no more isolated pages, we have to see if
- * z3fold_destroy_pool() is waiting for a signal.
- */
- if (pool->isolated == 0 && waitqueue_active(&pool->isolate_wait))
- wake_up_all(&pool->isolate_wait);
-}
-
-static void z3fold_inc_isolated(struct z3fold_pool *pool)
-{
- pool->isolated++;
-}
-
static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
{
struct z3fold_header *zhdr;
@@ -1369,7 +1336,8 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
VM_BUG_ON_PAGE(!PageMovable(page), page);
VM_BUG_ON_PAGE(PageIsolated(page), page);
- if (test_bit(PAGE_HEADLESS, &page->private))
+ if (test_bit(PAGE_HEADLESS, &page->private) ||
+ test_bit(PAGE_CLAIMED, &page->private))
return false;
zhdr = page_address(page);
@@ -1387,34 +1355,6 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
spin_lock(&pool->lock);
if (!list_empty(&page->lru))
list_del(&page->lru);
- /*
- * We need to check for destruction while holding pool->lock, as
- * otherwise destruction could see 0 isolated pages, and
- * proceed.
- */
- if (unlikely(pool->destroying)) {
- spin_unlock(&pool->lock);
- /*
- * If this page isn't stale, somebody else holds a
- * reference to it. Let't drop our refcount so that they
- * can call the release logic.
- */
- if (unlikely(kref_put(&zhdr->refcount,
- release_z3fold_page_locked))) {
- /*
- * If we get here we have kref problems, so we
- * should freak out.
- */
- WARN(1, "Z3fold is experiencing kref problems\n");
- z3fold_page_unlock(zhdr);
- return false;
- }
- z3fold_page_unlock(zhdr);
- return false;
- }
-
-
- z3fold_inc_isolated(pool);
spin_unlock(&pool->lock);
z3fold_page_unlock(zhdr);
return true;
@@ -1483,10 +1423,6 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
- spin_lock(&pool->lock);
- z3fold_dec_isolated(pool);
- spin_unlock(&pool->lock);
-
page_mapcount_reset(page);
put_page(page);
return 0;
@@ -1506,14 +1442,10 @@ static void z3fold_page_putback(struct page *page)
INIT_LIST_HEAD(&page->lru);
if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
atomic64_dec(&pool->pages_nr);
- spin_lock(&pool->lock);
- z3fold_dec_isolated(pool);
- spin_unlock(&pool->lock);
return;
}
spin_lock(&pool->lock);
list_add(&page->lru, &pool->lru);
- z3fold_dec_isolated(pool);
spin_unlock(&pool->lock);
z3fold_page_unlock(zhdr);
}
diff --git a/mm/zpool.c b/mm/zpool.c
index a2dd9107857d..863669212070 100644
--- a/mm/zpool.c
+++ b/mm/zpool.c
@@ -239,6 +239,22 @@ const char *zpool_get_type(struct zpool *zpool)
}
/**
+ * zpool_malloc_support_movable() - Check if the zpool support
+ * allocate movable memory
+ * @zpool: The zpool to check
+ *
+ * This returns if the zpool support allocate movable memory.
+ *
+ * Implementations must guarantee this to be thread-safe.
+ *
+ * Returns: true if if the zpool support allocate movable memory, false if not
+ */
+bool zpool_malloc_support_movable(struct zpool *zpool)
+{
+ return zpool->driver->malloc_support_movable;
+}
+
+/**
* zpool_malloc() - Allocate memory
* @zpool: The zpool to allocate from.
* @size: The amount of memory to allocate.
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index e98bb6ab4f7e..2b2b9aae8a3c 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -443,15 +443,16 @@ static u64 zs_zpool_total_size(void *pool)
}
static struct zpool_driver zs_zpool_driver = {
- .type = "zsmalloc",
- .owner = THIS_MODULE,
- .create = zs_zpool_create,
- .destroy = zs_zpool_destroy,
- .malloc = zs_zpool_malloc,
- .free = zs_zpool_free,
- .map = zs_zpool_map,
- .unmap = zs_zpool_unmap,
- .total_size = zs_zpool_total_size,
+ .type = "zsmalloc",
+ .owner = THIS_MODULE,
+ .create = zs_zpool_create,
+ .destroy = zs_zpool_destroy,
+ .malloc_support_movable = true,
+ .malloc = zs_zpool_malloc,
+ .free = zs_zpool_free,
+ .map = zs_zpool_map,
+ .unmap = zs_zpool_unmap,
+ .total_size = zs_zpool_total_size,
};
MODULE_ALIAS("zpool-zsmalloc");
@@ -476,10 +477,6 @@ static inline int get_zspage_inuse(struct zspage *zspage)
return zspage->inuse;
}
-static inline void set_zspage_inuse(struct zspage *zspage, int val)
-{
- zspage->inuse = val;
-}
static inline void mod_zspage_inuse(struct zspage *zspage, int val)
{
diff --git a/mm/zswap.c b/mm/zswap.c
index 0e22744a76cb..46a322316e52 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -856,7 +856,6 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
/* extract swpentry from data */
zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
swpentry = zhdr->swpentry; /* here */
- zpool_unmap_handle(pool, handle);
tree = zswap_trees[swp_type(swpentry)];
offset = swp_offset(swpentry);
@@ -866,6 +865,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
if (!entry) {
/* entry was invalidated */
spin_unlock(&tree->lock);
+ zpool_unmap_handle(pool, handle);
return 0;
}
spin_unlock(&tree->lock);
@@ -886,15 +886,13 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
case ZSWAP_SWAPCACHE_NEW: /* page is locked */
/* decompress */
dlen = PAGE_SIZE;
- src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
- ZPOOL_MM_RO) + sizeof(struct zswap_header);
+ src = (u8 *)zhdr + sizeof(struct zswap_header);
dst = kmap_atomic(page);
tfm = *get_cpu_ptr(entry->pool->tfm);
ret = crypto_comp_decompress(tfm, src, entry->length,
dst, &dlen);
put_cpu_ptr(entry->pool->tfm);
kunmap_atomic(dst);
- zpool_unmap_handle(entry->pool->zpool, entry->handle);
BUG_ON(ret);
BUG_ON(dlen != PAGE_SIZE);
@@ -940,6 +938,7 @@ fail:
spin_unlock(&tree->lock);
end:
+ zpool_unmap_handle(pool, handle);
return ret;
}
@@ -997,6 +996,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
char *buf;
u8 *src, *dst;
struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
+ gfp_t gfp;
/* THP isn't supported */
if (PageTransHuge(page)) {
@@ -1070,9 +1070,10 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
/* store */
hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
- ret = zpool_malloc(entry->pool->zpool, hlen + dlen,
- __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
- &handle);
+ gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
+ if (zpool_malloc_support_movable(entry->pool->zpool))
+ gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
+ ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle);
if (ret == -ENOSPC) {
zswap_reject_compress_poor++;
goto put_dstmem;
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 4eeea4d5c3ef..2d568246803f 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -13,6 +13,7 @@
#include <linux/nsproxy.h>
#include <linux/parser.h>
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/statfs.h>
@@ -185,18 +186,34 @@ int ceph_compare_options(struct ceph_options *new_opt,
}
EXPORT_SYMBOL(ceph_compare_options);
+/*
+ * kvmalloc() doesn't fall back to the vmalloc allocator unless flags are
+ * compatible with (a superset of) GFP_KERNEL. This is because while the
+ * actual pages are allocated with the specified flags, the page table pages
+ * are always allocated with GFP_KERNEL. map_vm_area() doesn't even take
+ * flags because GFP_KERNEL is hard-coded in {p4d,pud,pmd,pte}_alloc().
+ *
+ * ceph_kvmalloc() may be called with GFP_KERNEL, GFP_NOFS or GFP_NOIO.
+ */
void *ceph_kvmalloc(size_t size, gfp_t flags)
{
- if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
- void *ptr = kmalloc(size, flags | __GFP_NOWARN);
- if (ptr)
- return ptr;
+ void *p;
+
+ if ((flags & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) {
+ p = kvmalloc(size, flags);
+ } else if ((flags & (__GFP_IO | __GFP_FS)) == __GFP_IO) {
+ unsigned int nofs_flag = memalloc_nofs_save();
+ p = kvmalloc(size, GFP_KERNEL);
+ memalloc_nofs_restore(nofs_flag);
+ } else {
+ unsigned int noio_flag = memalloc_noio_save();
+ p = kvmalloc(size, GFP_KERNEL);
+ memalloc_noio_restore(noio_flag);
}
- return __vmalloc(size, flags, PAGE_KERNEL);
+ return p;
}
-
static int parse_fsid(const char *str, struct ceph_fsid *fsid)
{
int i = 0;
@@ -694,6 +711,14 @@ void ceph_destroy_client(struct ceph_client *client)
}
EXPORT_SYMBOL(ceph_destroy_client);
+void ceph_reset_client_addr(struct ceph_client *client)
+{
+ ceph_messenger_reset_nonce(&client->msgr);
+ ceph_monc_reopen_session(&client->monc);
+ ceph_osdc_reopen_osds(&client->osdc);
+}
+EXPORT_SYMBOL(ceph_reset_client_addr);
+
/*
* true if we have the mon map (and have thus joined the cluster)
*/
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 962f521c863e..e4cb3db2ee77 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -3031,6 +3031,12 @@ static void con_fault(struct ceph_connection *con)
}
+void ceph_messenger_reset_nonce(struct ceph_messenger *msgr)
+{
+ u32 nonce = le32_to_cpu(msgr->inst.addr.nonce) + 1000000;
+ msgr->inst.addr.nonce = cpu_to_le32(nonce);
+ encode_my_addr(msgr);
+}
/*
* initialize a new messenger instance
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 0520bf9825aa..7256c402ebaa 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -213,6 +213,13 @@ static void reopen_session(struct ceph_mon_client *monc)
__open_session(monc);
}
+void ceph_monc_reopen_session(struct ceph_mon_client *monc)
+{
+ mutex_lock(&monc->mutex);
+ reopen_session(monc);
+ mutex_unlock(&monc->mutex);
+}
+
static void un_backoff(struct ceph_mon_client *monc)
{
monc->hunt_mult /= 2; /* reduce by 50% */
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 78ae6e8c953d..ba45b074a362 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -841,6 +841,7 @@ int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
struct ceph_pagelist *pagelist;
size_t payload_len = 0;
size_t size;
+ int ret;
op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0);
@@ -852,20 +853,27 @@ int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
size = strlen(class);
BUG_ON(size > (size_t) U8_MAX);
op->cls.class_len = size;
- ceph_pagelist_append(pagelist, class, size);
+ ret = ceph_pagelist_append(pagelist, class, size);
+ if (ret)
+ goto err_pagelist_free;
payload_len += size;
op->cls.method_name = method;
size = strlen(method);
BUG_ON(size > (size_t) U8_MAX);
op->cls.method_len = size;
- ceph_pagelist_append(pagelist, method, size);
+ ret = ceph_pagelist_append(pagelist, method, size);
+ if (ret)
+ goto err_pagelist_free;
payload_len += size;
osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
-
op->indata_len = payload_len;
return 0;
+
+err_pagelist_free:
+ ceph_pagelist_release(pagelist);
+ return ret;
}
EXPORT_SYMBOL(osd_req_op_cls_init);
@@ -877,6 +885,7 @@ int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
opcode, 0);
struct ceph_pagelist *pagelist;
size_t payload_len;
+ int ret;
BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
@@ -886,10 +895,14 @@ int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
payload_len = strlen(name);
op->xattr.name_len = payload_len;
- ceph_pagelist_append(pagelist, name, payload_len);
+ ret = ceph_pagelist_append(pagelist, name, payload_len);
+ if (ret)
+ goto err_pagelist_free;
op->xattr.value_len = size;
- ceph_pagelist_append(pagelist, value, size);
+ ret = ceph_pagelist_append(pagelist, value, size);
+ if (ret)
+ goto err_pagelist_free;
payload_len += size;
op->xattr.cmp_op = cmp_op;
@@ -898,6 +911,10 @@ int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
op->indata_len = payload_len;
return 0;
+
+err_pagelist_free:
+ ceph_pagelist_release(pagelist);
+ return ret;
}
EXPORT_SYMBOL(osd_req_op_xattr_init);
@@ -1488,7 +1505,6 @@ enum calc_target_result {
static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
struct ceph_osd_request_target *t,
- struct ceph_connection *con,
bool any_change)
{
struct ceph_pg_pool_info *pi;
@@ -2272,7 +2288,7 @@ static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
again:
- ct_res = calc_target(osdc, &req->r_t, NULL, false);
+ ct_res = calc_target(osdc, &req->r_t, false);
if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
goto promote;
@@ -2476,6 +2492,14 @@ void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err)
}
EXPORT_SYMBOL(ceph_osdc_abort_requests);
+void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc)
+{
+ down_write(&osdc->lock);
+ osdc->abort_err = 0;
+ up_write(&osdc->lock);
+}
+EXPORT_SYMBOL(ceph_osdc_clear_abort_err);
+
static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
{
if (likely(eb > osdc->epoch_barrier)) {
@@ -3087,7 +3111,7 @@ static void linger_submit(struct ceph_osd_linger_request *lreq)
lreq->reg_req->r_ops[0].notify.cookie = lreq->linger_id;
}
- calc_target(osdc, &lreq->t, NULL, false);
+ calc_target(osdc, &lreq->t, false);
osd = lookup_create_osd(osdc, lreq->t.osd, true);
link_linger(osd, lreq);
@@ -3704,7 +3728,7 @@ recalc_linger_target(struct ceph_osd_linger_request *lreq)
struct ceph_osd_client *osdc = lreq->osdc;
enum calc_target_result ct_res;
- ct_res = calc_target(osdc, &lreq->t, NULL, true);
+ ct_res = calc_target(osdc, &lreq->t, true);
if (ct_res == CALC_TARGET_NEED_RESEND) {
struct ceph_osd *osd;
@@ -3776,8 +3800,7 @@ static void scan_requests(struct ceph_osd *osd,
n = rb_next(n); /* unlink_request(), check_pool_dne() */
dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
- ct_res = calc_target(osdc, &req->r_t, &req->r_osd->o_con,
- false);
+ ct_res = calc_target(osdc, &req->r_t, false);
switch (ct_res) {
case CALC_TARGET_NO_ACTION:
force_resend_writes = cleared_full ||
@@ -3886,7 +3909,7 @@ static void kick_requests(struct ceph_osd_client *osdc,
n = rb_next(n);
if (req->r_t.epoch < osdc->osdmap->epoch) {
- ct_res = calc_target(osdc, &req->r_t, NULL, false);
+ ct_res = calc_target(osdc, &req->r_t, false);
if (ct_res == CALC_TARGET_POOL_DNE) {
erase_request(need_resend, req);
check_pool_dne(req);
@@ -5087,6 +5110,24 @@ out_put_req:
EXPORT_SYMBOL(ceph_osdc_call);
/*
+ * reset all osd connections
+ */
+void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc)
+{
+ struct rb_node *n;
+
+ down_write(&osdc->lock);
+ for (n = rb_first(&osdc->osds); n; ) {
+ struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
+
+ n = rb_next(n);
+ if (!reopen_osd(osd))
+ kick_osd_requests(osd);
+ }
+ up_write(&osdc->lock);
+}
+
+/*
* init, shutdown
*/
int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 90437906b7bc..4e0de14f80bb 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -973,11 +973,11 @@ void ceph_osdmap_destroy(struct ceph_osdmap *map)
struct ceph_pg_pool_info, node);
__remove_pg_pool(&map->pg_pools, pi);
}
- kfree(map->osd_state);
- kfree(map->osd_weight);
- kfree(map->osd_addr);
- kfree(map->osd_primary_affinity);
- kfree(map->crush_workspace);
+ kvfree(map->osd_state);
+ kvfree(map->osd_weight);
+ kvfree(map->osd_addr);
+ kvfree(map->osd_primary_affinity);
+ kvfree(map->crush_workspace);
kfree(map);
}
@@ -986,28 +986,41 @@ void ceph_osdmap_destroy(struct ceph_osdmap *map)
*
* The new elements are properly initialized.
*/
-static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
+static int osdmap_set_max_osd(struct ceph_osdmap *map, u32 max)
{
u32 *state;
u32 *weight;
struct ceph_entity_addr *addr;
+ u32 to_copy;
int i;
- state = krealloc(map->osd_state, max*sizeof(*state), GFP_NOFS);
- if (!state)
- return -ENOMEM;
- map->osd_state = state;
+ dout("%s old %u new %u\n", __func__, map->max_osd, max);
+ if (max == map->max_osd)
+ return 0;
- weight = krealloc(map->osd_weight, max*sizeof(*weight), GFP_NOFS);
- if (!weight)
+ state = ceph_kvmalloc(array_size(max, sizeof(*state)), GFP_NOFS);
+ weight = ceph_kvmalloc(array_size(max, sizeof(*weight)), GFP_NOFS);
+ addr = ceph_kvmalloc(array_size(max, sizeof(*addr)), GFP_NOFS);
+ if (!state || !weight || !addr) {
+ kvfree(state);
+ kvfree(weight);
+ kvfree(addr);
return -ENOMEM;
- map->osd_weight = weight;
+ }
- addr = krealloc(map->osd_addr, max*sizeof(*addr), GFP_NOFS);
- if (!addr)
- return -ENOMEM;
- map->osd_addr = addr;
+ to_copy = min(map->max_osd, max);
+ if (map->osd_state) {
+ memcpy(state, map->osd_state, to_copy * sizeof(*state));
+ memcpy(weight, map->osd_weight, to_copy * sizeof(*weight));
+ memcpy(addr, map->osd_addr, to_copy * sizeof(*addr));
+ kvfree(map->osd_state);
+ kvfree(map->osd_weight);
+ kvfree(map->osd_addr);
+ }
+ map->osd_state = state;
+ map->osd_weight = weight;
+ map->osd_addr = addr;
for (i = map->max_osd; i < max; i++) {
map->osd_state[i] = 0;
map->osd_weight[i] = CEPH_OSD_OUT;
@@ -1017,12 +1030,16 @@ static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
if (map->osd_primary_affinity) {
u32 *affinity;
- affinity = krealloc(map->osd_primary_affinity,
- max*sizeof(*affinity), GFP_NOFS);
+ affinity = ceph_kvmalloc(array_size(max, sizeof(*affinity)),
+ GFP_NOFS);
if (!affinity)
return -ENOMEM;
- map->osd_primary_affinity = affinity;
+ memcpy(affinity, map->osd_primary_affinity,
+ to_copy * sizeof(*affinity));
+ kvfree(map->osd_primary_affinity);
+
+ map->osd_primary_affinity = affinity;
for (i = map->max_osd; i < max; i++)
map->osd_primary_affinity[i] =
CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
@@ -1043,7 +1060,7 @@ static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush)
work_size = crush_work_size(crush, CEPH_PG_MAX_SIZE);
dout("%s work_size %zu bytes\n", __func__, work_size);
- workspace = kmalloc(work_size, GFP_NOIO);
+ workspace = ceph_kvmalloc(work_size, GFP_NOIO);
if (!workspace) {
crush_destroy(crush);
return -ENOMEM;
@@ -1052,7 +1069,7 @@ static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush)
if (map->crush)
crush_destroy(map->crush);
- kfree(map->crush_workspace);
+ kvfree(map->crush_workspace);
map->crush = crush;
map->crush_workspace = workspace;
return 0;
@@ -1298,9 +1315,9 @@ static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff)
if (!map->osd_primary_affinity) {
int i;
- map->osd_primary_affinity = kmalloc_array(map->max_osd,
- sizeof(u32),
- GFP_NOFS);
+ map->osd_primary_affinity = ceph_kvmalloc(
+ array_size(map->max_osd, sizeof(*map->osd_primary_affinity)),
+ GFP_NOFS);
if (!map->osd_primary_affinity)
return -ENOMEM;
@@ -1321,7 +1338,7 @@ static int decode_primary_affinity(void **p, void *end,
ceph_decode_32_safe(p, end, len, e_inval);
if (len == 0) {
- kfree(map->osd_primary_affinity);
+ kvfree(map->osd_primary_affinity);
map->osd_primary_affinity = NULL;
return 0;
}
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 4ce42c62458e..d75fddca44c9 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1960,7 +1960,7 @@ gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset, integ_len))
goto unwrap_failed;
- if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset))
+ if (xdr_buf_read_mic(rcv_buf, &mic, mic_offset))
goto unwrap_failed;
maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index a07b516e503a..f7f78566be46 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1837,7 +1837,7 @@ call_allocate(struct rpc_task *task)
return;
}
- rpc_exit(task, -ERESTARTSYS);
+ rpc_call_rpcerror(task, -ERESTARTSYS);
}
static int
@@ -1862,6 +1862,7 @@ rpc_xdr_encode(struct rpc_task *task)
req->rq_rbuffer,
req->rq_rcvsize);
+ req->rq_reply_bytes_recvd = 0;
req->rq_snd_buf.head[0].iov_len = 0;
xdr_init_encode(&xdr, &req->rq_snd_buf,
req->rq_snd_buf.head[0].iov_base, req);
@@ -1881,6 +1882,8 @@ call_encode(struct rpc_task *task)
if (!rpc_task_need_encode(task))
goto out;
dprint_status(task);
+ /* Dequeue task from the receive queue while we're encoding */
+ xprt_request_dequeue_xprt(task);
/* Encode here so that rpcsec_gss can use correct sequence number. */
rpc_xdr_encode(task);
/* Did the encode result in an error condition? */
@@ -2479,6 +2482,7 @@ call_decode(struct rpc_task *task)
struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
struct xdr_stream xdr;
+ int err;
dprint_status(task);
@@ -2501,6 +2505,15 @@ call_decode(struct rpc_task *task)
* before it changed req->rq_reply_bytes_recvd.
*/
smp_rmb();
+
+ /*
+ * Did we ever call xprt_complete_rqst()? If not, we should assume
+ * the message is incomplete.
+ */
+ err = -EAGAIN;
+ if (!req->rq_reply_bytes_recvd)
+ goto out;
+
req->rq_rcv_buf.len = req->rq_private_buf.len;
/* Check that the softirq receive buffer is valid */
@@ -2509,7 +2522,9 @@ call_decode(struct rpc_task *task)
xdr_init_decode(&xdr, &req->rq_rcv_buf,
req->rq_rcv_buf.head[0].iov_base, req);
- switch (rpc_decode_header(task, &xdr)) {
+ err = rpc_decode_header(task, &xdr);
+out:
+ switch (err) {
case 0:
task->tk_action = rpc_exit_task;
task->tk_status = rpcauth_unwrap_resp(task, &xdr);
@@ -2518,9 +2533,6 @@ call_decode(struct rpc_task *task)
return;
case -EAGAIN:
task->tk_status = 0;
- xdr_free_bvec(&req->rq_rcv_buf);
- req->rq_reply_bytes_recvd = 0;
- req->rq_rcv_buf.len = 0;
if (task->tk_client->cl_discrtry)
xprt_conditional_disconnect(req->rq_xprt,
req->rq_connect_cookie);
@@ -2561,7 +2573,7 @@ rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr)
return 0;
out_fail:
trace_rpc_bad_callhdr(task);
- rpc_exit(task, error);
+ rpc_call_rpcerror(task, error);
return error;
}
@@ -2628,7 +2640,7 @@ out_garbage:
return -EAGAIN;
}
out_err:
- rpc_exit(task, error);
+ rpc_call_rpcerror(task, error);
return error;
out_unparsable:
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 1f275aba786f..360afe153193 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -541,33 +541,14 @@ rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
return NULL;
}
-static void
-rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq,
- struct rpc_wait_queue *queue, struct rpc_task *task)
-{
- rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, task, NULL, NULL);
-}
-
/*
* Wake up a queued task while the queue lock is being held
*/
-static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
-{
- rpc_wake_up_task_on_wq_queue_locked(rpciod_workqueue, queue, task);
-}
-
-/*
- * Wake up a task on a specific queue
- */
-void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
- struct rpc_wait_queue *queue,
- struct rpc_task *task)
+static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue,
+ struct rpc_task *task)
{
- if (!RPC_IS_QUEUED(task))
- return;
- spin_lock(&queue->lock);
- rpc_wake_up_task_on_wq_queue_locked(wq, queue, task);
- spin_unlock(&queue->lock);
+ rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
+ task, NULL, NULL);
}
/*
@@ -930,8 +911,10 @@ static void __rpc_execute(struct rpc_task *task)
/*
* Signalled tasks should exit rather than sleep.
*/
- if (RPC_SIGNALLED(task))
+ if (RPC_SIGNALLED(task)) {
+ task->tk_rpc_status = -ERESTARTSYS;
rpc_exit(task, -ERESTARTSYS);
+ }
/*
* The queue->lock protects against races with
@@ -967,6 +950,7 @@ static void __rpc_execute(struct rpc_task *task)
*/
dprintk("RPC: %5u got signal\n", task->tk_pid);
set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
+ task->tk_rpc_status = -ERESTARTSYS;
rpc_exit(task, -ERESTARTSYS);
}
dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 48c93b9e525e..14ba9e72a204 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -560,7 +560,7 @@ EXPORT_SYMBOL_GPL(xdr_init_encode);
* required at the end of encoding, or any other time when the xdr_buf
* data might be read.
*/
-void xdr_commit_encode(struct xdr_stream *xdr)
+inline void xdr_commit_encode(struct xdr_stream *xdr)
{
int shift = xdr->scratch.iov_len;
void *page;
@@ -1236,43 +1236,60 @@ xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
}
EXPORT_SYMBOL_GPL(xdr_encode_word);
-/* If the netobj starting offset bytes from the start of xdr_buf is contained
- * entirely in the head or the tail, set object to point to it; otherwise
- * try to find space for it at the end of the tail, copy it there, and
- * set obj to point to it. */
-int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
+/**
+ * xdr_buf_read_mic() - obtain the address of the GSS mic from xdr buf
+ * @buf: pointer to buffer containing a mic
+ * @mic: on success, returns the address of the mic
+ * @offset: the offset in buf where mic may be found
+ *
+ * This function may modify the xdr buf if the mic is found to be straddling
+ * a boundary between head, pages, and tail. On success the mic can be read
+ * from the address returned. There is no need to free the mic.
+ *
+ * Return: Success returns 0, otherwise an integer error.
+ */
+int xdr_buf_read_mic(struct xdr_buf *buf, struct xdr_netobj *mic, unsigned int offset)
{
struct xdr_buf subbuf;
+ unsigned int boundary;
- if (xdr_decode_word(buf, offset, &obj->len))
+ if (xdr_decode_word(buf, offset, &mic->len))
return -EFAULT;
- if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
+ offset += 4;
+
+ /* Is the mic partially in the head? */
+ boundary = buf->head[0].iov_len;
+ if (offset < boundary && (offset + mic->len) > boundary)
+ xdr_shift_buf(buf, boundary - offset);
+
+ /* Is the mic partially in the pages? */
+ boundary += buf->page_len;
+ if (offset < boundary && (offset + mic->len) > boundary)
+ xdr_shrink_pagelen(buf, boundary - offset);
+
+ if (xdr_buf_subsegment(buf, &subbuf, offset, mic->len))
return -EFAULT;
- /* Is the obj contained entirely in the head? */
- obj->data = subbuf.head[0].iov_base;
- if (subbuf.head[0].iov_len == obj->len)
+ /* Is the mic contained entirely in the head? */
+ mic->data = subbuf.head[0].iov_base;
+ if (subbuf.head[0].iov_len == mic->len)
return 0;
- /* ..or is the obj contained entirely in the tail? */
- obj->data = subbuf.tail[0].iov_base;
- if (subbuf.tail[0].iov_len == obj->len)
+ /* ..or is the mic contained entirely in the tail? */
+ mic->data = subbuf.tail[0].iov_base;
+ if (subbuf.tail[0].iov_len == mic->len)
return 0;
- /* use end of tail as storage for obj:
- * (We don't copy to the beginning because then we'd have
- * to worry about doing a potentially overlapping copy.
- * This assumes the object is at most half the length of the
- * tail.) */
- if (obj->len > buf->buflen - buf->len)
+ /* Find a contiguous area in @buf to hold all of @mic */
+ if (mic->len > buf->buflen - buf->len)
return -ENOMEM;
if (buf->tail[0].iov_len != 0)
- obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
+ mic->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
else
- obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
- __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
+ mic->data = buf->head[0].iov_base + buf->head[0].iov_len;
+ __read_bytes_from_xdr_buf(&subbuf, mic->data, mic->len);
return 0;
}
-EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
+EXPORT_SYMBOL_GPL(xdr_buf_read_mic);
/* Returns 0 on success, or else a negative error code. */
static int
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 2e71f5455c6c..8a45b3ccc313 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -456,6 +456,12 @@ void xprt_release_rqst_cong(struct rpc_task *task)
}
EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
+static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
+{
+ if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
+ __xprt_lock_write_next_cong(xprt);
+}
+
/*
* Clear the congestion window wait flag and wake up the next
* entry on xprt->sending
@@ -671,6 +677,7 @@ void xprt_disconnect_done(struct rpc_xprt *xprt)
spin_lock(&xprt->transport_lock);
xprt_clear_connected(xprt);
xprt_clear_write_space_locked(xprt);
+ xprt_clear_congestion_window_wait_locked(xprt);
xprt_wake_pending_tasks(xprt, -ENOTCONN);
spin_unlock(&xprt->transport_lock);
}
@@ -1324,6 +1331,36 @@ xprt_request_dequeue_transmit(struct rpc_task *task)
}
/**
+ * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
+ * @task: pointer to rpc_task
+ *
+ * Remove a task from the transmit and receive queues, and ensure that
+ * it is not pinned by the receive work item.
+ */
+void
+xprt_request_dequeue_xprt(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+
+ if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
+ test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
+ xprt_is_pinned_rqst(req)) {
+ spin_lock(&xprt->queue_lock);
+ xprt_request_dequeue_transmit_locked(task);
+ xprt_request_dequeue_receive_locked(task);
+ while (xprt_is_pinned_rqst(req)) {
+ set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
+ spin_unlock(&xprt->queue_lock);
+ xprt_wait_on_pinned_rqst(req);
+ spin_lock(&xprt->queue_lock);
+ clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
+ }
+ spin_unlock(&xprt->queue_lock);
+ }
+}
+
+/**
* xprt_request_prepare - prepare an encoded request for transport
* @req: pointer to rpc_rqst
*
@@ -1747,28 +1784,6 @@ void xprt_retry_reserve(struct rpc_task *task)
xprt_do_reserve(xprt, task);
}
-static void
-xprt_request_dequeue_all(struct rpc_task *task, struct rpc_rqst *req)
-{
- struct rpc_xprt *xprt = req->rq_xprt;
-
- if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
- test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
- xprt_is_pinned_rqst(req)) {
- spin_lock(&xprt->queue_lock);
- xprt_request_dequeue_transmit_locked(task);
- xprt_request_dequeue_receive_locked(task);
- while (xprt_is_pinned_rqst(req)) {
- set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
- spin_unlock(&xprt->queue_lock);
- xprt_wait_on_pinned_rqst(req);
- spin_lock(&xprt->queue_lock);
- clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
- }
- spin_unlock(&xprt->queue_lock);
- }
-}
-
/**
* xprt_release - release an RPC request slot
* @task: task which is finished with the slot
@@ -1788,7 +1803,7 @@ void xprt_release(struct rpc_task *task)
}
xprt = req->rq_xprt;
- xprt_request_dequeue_all(task, req);
+ xprt_request_dequeue_xprt(task);
spin_lock(&xprt->transport_lock);
xprt->ops->release_xprt(xprt, task);
if (xprt->ops->release_request)
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 59e624b1d7a0..50e075fcdd8f 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -54,9 +54,7 @@ size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *xprt)
{
- struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
-
- return r_xprt->rx_buf.rb_bc_srv_max_requests;
+ return RPCRDMA_BACKWARD_WRS >> 1;
}
static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 0b6dad7580a1..30065a28628c 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -7,67 +7,37 @@
/* Lightweight memory registration using Fast Registration Work
* Requests (FRWR).
*
- * FRWR features ordered asynchronous registration and deregistration
- * of arbitrarily sized memory regions. This is the fastest and safest
+ * FRWR features ordered asynchronous registration and invalidation
+ * of arbitrarily-sized memory regions. This is the fastest and safest
* but most complex memory registration mode.
*/
/* Normal operation
*
- * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
+ * A Memory Region is prepared for RDMA Read or Write using a FAST_REG
* Work Request (frwr_map). When the RDMA operation is finished, this
* Memory Region is invalidated using a LOCAL_INV Work Request
- * (frwr_unmap_sync).
+ * (frwr_unmap_async and frwr_unmap_sync).
*
- * Typically these Work Requests are not signaled, and neither are RDMA
- * SEND Work Requests (with the exception of signaling occasionally to
- * prevent provider work queue overflows). This greatly reduces HCA
+ * Typically FAST_REG Work Requests are not signaled, and neither are
+ * RDMA Send Work Requests (with the exception of signaling occasionally
+ * to prevent provider work queue overflows). This greatly reduces HCA
* interrupt workload.
- *
- * As an optimization, frwr_unmap marks MRs INVALID before the
- * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
- * rb_mrs immediately so that no work (like managing a linked list
- * under a spinlock) is needed in the completion upcall.
- *
- * But this means that frwr_map() can occasionally encounter an MR
- * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
- * ordering prevents a subsequent FAST_REG WR from executing against
- * that MR while it is still being invalidated.
*/
/* Transport recovery
*
- * ->op_map and the transport connect worker cannot run at the same
- * time, but ->op_unmap can fire while the transport connect worker
- * is running. Thus MR recovery is handled in ->op_map, to guarantee
- * that recovered MRs are owned by a sending RPC, and not one where
- * ->op_unmap could fire at the same time transport reconnect is
- * being done.
- *
- * When the underlying transport disconnects, MRs are left in one of
- * four states:
- *
- * INVALID: The MR was not in use before the QP entered ERROR state.
- *
- * VALID: The MR was registered before the QP entered ERROR state.
- *
- * FLUSHED_FR: The MR was being registered when the QP entered ERROR
- * state, and the pending WR was flushed.
- *
- * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
- * state, and the pending WR was flushed.
- *
- * When frwr_map encounters FLUSHED and VALID MRs, they are recovered
- * with ib_dereg_mr and then are re-initialized. Because MR recovery
- * allocates fresh resources, it is deferred to a workqueue, and the
- * recovered MRs are placed back on the rb_mrs list when recovery is
- * complete. frwr_map allocates another MR for the current RPC while
- * the broken MR is reset.
- *
- * To ensure that frwr_map doesn't encounter an MR that is marked
- * INVALID but that is about to be flushed due to a previous transport
- * disconnect, the transport connect worker attempts to drain all
- * pending send queue WRs before the transport is reconnected.
+ * frwr_map and frwr_unmap_* cannot run at the same time the transport
+ * connect worker is running. The connect worker holds the transport
+ * send lock, just as ->send_request does. This prevents frwr_map and
+ * the connect worker from running concurrently. When a connection is
+ * closed, the Receive completion queue is drained before the allowing
+ * the connect worker to get control. This prevents frwr_unmap and the
+ * connect worker from running concurrently.
+ *
+ * When the underlying transport disconnects, MRs that are in flight
+ * are flushed and are likely unusable. Thus all flushed MRs are
+ * destroyed. New MRs are created on demand.
*/
#include <linux/sunrpc/rpc_rdma.h>
@@ -118,15 +88,8 @@ void frwr_release_mr(struct rpcrdma_mr *mr)
kfree(mr);
}
-/* MRs are dynamically allocated, so simply clean up and release the MR.
- * A replacement MR will subsequently be allocated on demand.
- */
-static void
-frwr_mr_recycle_worker(struct work_struct *work)
+static void frwr_mr_recycle(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
{
- struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
- struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
-
trace_xprtrdma_mr_recycle(mr);
if (mr->mr_dir != DMA_NONE) {
@@ -136,14 +99,40 @@ frwr_mr_recycle_worker(struct work_struct *work)
mr->mr_dir = DMA_NONE;
}
- spin_lock(&r_xprt->rx_buf.rb_mrlock);
+ spin_lock(&r_xprt->rx_buf.rb_lock);
list_del(&mr->mr_all);
r_xprt->rx_stats.mrs_recycled++;
- spin_unlock(&r_xprt->rx_buf.rb_mrlock);
+ spin_unlock(&r_xprt->rx_buf.rb_lock);
frwr_release_mr(mr);
}
+/* MRs are dynamically allocated, so simply clean up and release the MR.
+ * A replacement MR will subsequently be allocated on demand.
+ */
+static void
+frwr_mr_recycle_worker(struct work_struct *work)
+{
+ struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr,
+ mr_recycle);
+
+ frwr_mr_recycle(mr->mr_xprt, mr);
+}
+
+/* frwr_recycle - Discard MRs
+ * @req: request to reset
+ *
+ * Used after a reconnect. These MRs could be in flight, we can't
+ * tell. Safe thing to do is release them.
+ */
+void frwr_recycle(struct rpcrdma_req *req)
+{
+ struct rpcrdma_mr *mr;
+
+ while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
+ frwr_mr_recycle(mr->mr_xprt, mr);
+}
+
/* frwr_reset - Place MRs back on the free list
* @req: request to reset
*
@@ -156,12 +145,10 @@ frwr_mr_recycle_worker(struct work_struct *work)
*/
void frwr_reset(struct rpcrdma_req *req)
{
- while (!list_empty(&req->rl_registered)) {
- struct rpcrdma_mr *mr;
+ struct rpcrdma_mr *mr;
- mr = rpcrdma_mr_pop(&req->rl_registered);
- rpcrdma_mr_unmap_and_put(mr);
- }
+ while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
+ rpcrdma_mr_put(mr);
}
/**
@@ -179,11 +166,14 @@ int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
struct ib_mr *frmr;
int rc;
+ /* NB: ib_alloc_mr and device drivers typically allocate
+ * memory with GFP_KERNEL.
+ */
frmr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
if (IS_ERR(frmr))
goto out_mr_err;
- sg = kcalloc(depth, sizeof(*sg), GFP_KERNEL);
+ sg = kcalloc(depth, sizeof(*sg), GFP_NOFS);
if (!sg)
goto out_list_err;
@@ -203,8 +193,6 @@ out_mr_err:
return rc;
out_list_err:
- dprintk("RPC: %s: sg allocation failure\n",
- __func__);
ib_dereg_mr(frmr);
return -ENOMEM;
}
@@ -290,8 +278,8 @@ int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep)
ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
- ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
- ia->ri_max_frwr_depth);
+ ia->ri_max_segs =
+ DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ia->ri_max_frwr_depth);
/* Reply chunks require segments for head and tail buffers */
ia->ri_max_segs += 2;
if (ia->ri_max_segs > RPCRDMA_MAX_HDR_SEGS)
@@ -323,31 +311,25 @@ size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt)
* @nsegs: number of segments remaining
* @writing: true when RDMA Write will be used
* @xid: XID of RPC using the registered memory
- * @out: initialized MR
+ * @mr: MR to fill in
*
* Prepare a REG_MR Work Request to register a memory region
* for remote access via RDMA READ or RDMA WRITE.
*
* Returns the next segment or a negative errno pointer.
- * On success, the prepared MR is planted in @out.
+ * On success, @mr is filled in.
*/
struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_mr_seg *seg,
int nsegs, bool writing, __be32 xid,
- struct rpcrdma_mr **out)
+ struct rpcrdma_mr *mr)
{
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
- struct rpcrdma_mr *mr;
- struct ib_mr *ibmr;
struct ib_reg_wr *reg_wr;
+ struct ib_mr *ibmr;
int i, n;
u8 key;
- mr = rpcrdma_mr_get(r_xprt);
- if (!mr)
- goto out_getmr_err;
-
if (nsegs > ia->ri_max_frwr_depth)
nsegs = ia->ri_max_frwr_depth;
for (i = 0; i < nsegs;) {
@@ -362,7 +344,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
++seg;
++i;
- if (holes_ok)
+ if (ia->ri_mrtype == IB_MR_TYPE_SG_GAPS)
continue;
if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
@@ -397,22 +379,15 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
mr->mr_offset = ibmr->iova;
trace_xprtrdma_mr_map(mr);
- *out = mr;
return seg;
-out_getmr_err:
- xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
- return ERR_PTR(-EAGAIN);
-
out_dmamap_err:
mr->mr_dir = DMA_NONE;
trace_xprtrdma_frwr_sgerr(mr, i);
- rpcrdma_mr_put(mr);
return ERR_PTR(-EIO);
out_mapmr_err:
trace_xprtrdma_frwr_maperr(mr, n);
- rpcrdma_mr_recycle(mr);
return ERR_PTR(-EIO);
}
@@ -485,7 +460,7 @@ void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
if (mr->mr_handle == rep->rr_inv_rkey) {
list_del_init(&mr->mr_list);
trace_xprtrdma_mr_remoteinv(mr);
- rpcrdma_mr_unmap_and_put(mr);
+ rpcrdma_mr_put(mr);
break; /* only one invalidated MR per RPC */
}
}
@@ -495,7 +470,7 @@ static void __frwr_release_mr(struct ib_wc *wc, struct rpcrdma_mr *mr)
if (wc->status != IB_WC_SUCCESS)
rpcrdma_mr_recycle(mr);
else
- rpcrdma_mr_unmap_and_put(mr);
+ rpcrdma_mr_put(mr);
}
/**
@@ -532,8 +507,8 @@ static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
/* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_li_wake(wc, frwr);
- complete(&frwr->fr_linv_done);
__frwr_release_mr(wc, mr);
+ complete(&frwr->fr_linv_done);
}
/**
@@ -562,8 +537,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
*/
frwr = NULL;
prev = &first;
- while (!list_empty(&req->rl_registered)) {
- mr = rpcrdma_mr_pop(&req->rl_registered);
+ while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
trace_xprtrdma_mr_localinv(mr);
r_xprt->rx_stats.local_inv_needed++;
@@ -632,11 +606,15 @@ static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
struct rpcrdma_frwr *frwr =
container_of(cqe, struct rpcrdma_frwr, fr_cqe);
struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
+ struct rpcrdma_rep *rep = mr->mr_req->rl_reply;
/* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_li_done(wc, frwr);
- rpcrdma_complete_rqst(frwr->fr_req->rl_reply);
__frwr_release_mr(wc, mr);
+
+ /* Ensure @rep is generated before __frwr_release_mr */
+ smp_rmb();
+ rpcrdma_complete_rqst(rep);
}
/**
@@ -662,15 +640,13 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
*/
frwr = NULL;
prev = &first;
- while (!list_empty(&req->rl_registered)) {
- mr = rpcrdma_mr_pop(&req->rl_registered);
+ while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
trace_xprtrdma_mr_localinv(mr);
r_xprt->rx_stats.local_inv_needed++;
frwr = &mr->frwr;
frwr->fr_cqe.done = frwr_wc_localinv;
- frwr->fr_req = req;
last = &frwr->fr_invwr;
last->next = NULL;
last->wr_cqe = &frwr->fr_cqe;
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 4345e6912392..b86b5fd62d9f 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -342,6 +342,32 @@ encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
return 0;
}
+static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt,
+ struct rpcrdma_req *req,
+ struct rpcrdma_mr_seg *seg,
+ int nsegs, bool writing,
+ struct rpcrdma_mr **mr)
+{
+ *mr = rpcrdma_mr_pop(&req->rl_free_mrs);
+ if (!*mr) {
+ *mr = rpcrdma_mr_get(r_xprt);
+ if (!*mr)
+ goto out_getmr_err;
+ trace_xprtrdma_mr_get(req);
+ (*mr)->mr_req = req;
+ }
+
+ rpcrdma_mr_push(*mr, &req->rl_registered);
+ return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr);
+
+out_getmr_err:
+ trace_xprtrdma_nomrs(req);
+ xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
+ if (r_xprt->rx_ep.rep_connected != -ENODEV)
+ schedule_work(&r_xprt->rx_buf.rb_refresh_worker);
+ return ERR_PTR(-EAGAIN);
+}
+
/* Register and XDR encode the Read list. Supports encoding a list of read
* segments that belong to a single read chunk.
*
@@ -356,9 +382,10 @@ encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
*
* Only a single @pos value is currently supported.
*/
-static noinline int
-rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
- struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
+static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
+ struct rpcrdma_req *req,
+ struct rpc_rqst *rqst,
+ enum rpcrdma_chunktype rtype)
{
struct xdr_stream *xdr = &req->rl_stream;
struct rpcrdma_mr_seg *seg;
@@ -379,10 +406,9 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
return nsegs;
do {
- seg = frwr_map(r_xprt, seg, nsegs, false, rqst->rq_xid, &mr);
+ seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr);
if (IS_ERR(seg))
return PTR_ERR(seg);
- rpcrdma_mr_push(mr, &req->rl_registered);
if (encode_read_segment(xdr, mr, pos) < 0)
return -EMSGSIZE;
@@ -411,9 +437,10 @@ done:
*
* Only a single Write chunk is currently supported.
*/
-static noinline int
-rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
- struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
+static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
+ struct rpcrdma_req *req,
+ struct rpc_rqst *rqst,
+ enum rpcrdma_chunktype wtype)
{
struct xdr_stream *xdr = &req->rl_stream;
struct rpcrdma_mr_seg *seg;
@@ -440,10 +467,9 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
nchunks = 0;
do {
- seg = frwr_map(r_xprt, seg, nsegs, true, rqst->rq_xid, &mr);
+ seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
if (IS_ERR(seg))
return PTR_ERR(seg);
- rpcrdma_mr_push(mr, &req->rl_registered);
if (encode_rdma_segment(xdr, mr) < 0)
return -EMSGSIZE;
@@ -474,9 +500,10 @@ done:
* Returns zero on success, or a negative errno if a failure occurred.
* @xdr is advanced to the next position in the stream.
*/
-static noinline int
-rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
- struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
+static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
+ struct rpcrdma_req *req,
+ struct rpc_rqst *rqst,
+ enum rpcrdma_chunktype wtype)
{
struct xdr_stream *xdr = &req->rl_stream;
struct rpcrdma_mr_seg *seg;
@@ -501,10 +528,9 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
nchunks = 0;
do {
- seg = frwr_map(r_xprt, seg, nsegs, true, rqst->rq_xid, &mr);
+ seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
if (IS_ERR(seg))
return PTR_ERR(seg);
- rpcrdma_mr_push(mr, &req->rl_registered);
if (encode_rdma_segment(xdr, mr) < 0)
return -EMSGSIZE;
@@ -841,12 +867,7 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
* chunks. Very likely the connection has been replaced,
* so these registrations are invalid and unusable.
*/
- while (unlikely(!list_empty(&req->rl_registered))) {
- struct rpcrdma_mr *mr;
-
- mr = rpcrdma_mr_pop(&req->rl_registered);
- rpcrdma_mr_recycle(mr);
- }
+ frwr_recycle(req);
/* This implementation supports the following combinations
* of chunk lists in one RPC-over-RDMA Call message:
@@ -1240,8 +1261,6 @@ void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
struct rpc_rqst *rqst = rep->rr_rqst;
int status;
- xprt->reestablish_timeout = 0;
-
switch (rep->rr_proc) {
case rdma_msg:
status = rpcrdma_decode_msg(r_xprt, rep, rqst);
@@ -1300,6 +1319,12 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
u32 credits;
__be32 *p;
+ /* Any data means we had a useful conversation, so
+ * then we don't need to delay the next reconnect.
+ */
+ if (xprt->reestablish_timeout)
+ xprt->reestablish_timeout = 0;
+
/* Fixed transport header fields */
xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
rep->rr_hdrbuf.head[0].iov_base, NULL);
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 2ec349ed4770..160558b4135e 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -423,8 +423,6 @@ void xprt_rdma_close(struct rpc_xprt *xprt)
if (ep->rep_connected == -ENODEV)
return;
- if (ep->rep_connected > 0)
- xprt->reestablish_timeout = 0;
rpcrdma_ep_disconnect(ep, ia);
/* Prepare @xprt for the next connection by reinitializing
@@ -434,6 +432,7 @@ void xprt_rdma_close(struct rpc_xprt *xprt)
xprt->cwnd = RPC_CWNDSHIFT;
out:
+ xprt->reestablish_timeout = 0;
++xprt->connect_cookie;
xprt_disconnect_done(xprt);
}
@@ -494,9 +493,9 @@ xprt_rdma_timer(struct rpc_xprt *xprt, struct rpc_task *task)
* @reconnect_timeout: reconnect timeout after server disconnects
*
*/
-static void xprt_rdma_tcp_set_connect_timeout(struct rpc_xprt *xprt,
- unsigned long connect_timeout,
- unsigned long reconnect_timeout)
+static void xprt_rdma_set_connect_timeout(struct rpc_xprt *xprt,
+ unsigned long connect_timeout,
+ unsigned long reconnect_timeout)
{
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
@@ -571,6 +570,7 @@ xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
return;
out_sleep:
+ set_bit(XPRT_CONGESTED, &xprt->state);
rpc_sleep_on(&xprt->backlog, task, NULL);
task->tk_status = -EAGAIN;
}
@@ -589,7 +589,8 @@ xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
memset(rqst, 0, sizeof(*rqst));
rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
- rpc_wake_up_next(&xprt->backlog);
+ if (unlikely(!rpc_wake_up_next(&xprt->backlog)))
+ clear_bit(XPRT_CONGESTED, &xprt->state);
}
static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt,
@@ -803,7 +804,7 @@ static const struct rpc_xprt_ops xprt_rdma_procs = {
.send_request = xprt_rdma_send_request,
.close = xprt_rdma_close,
.destroy = xprt_rdma_destroy,
- .set_connect_timeout = xprt_rdma_tcp_set_connect_timeout,
+ .set_connect_timeout = xprt_rdma_set_connect_timeout,
.print_stats = xprt_rdma_print_stats,
.enable_swap = xprt_rdma_enable_swap,
.disable_swap = xprt_rdma_disable_swap,
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index b10aa16557f0..3a907537e2cf 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -53,6 +53,7 @@
#include <linux/slab.h>
#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/svc_rdma.h>
+#include <linux/log2.h>
#include <asm-generic/barrier.h>
#include <asm/bitops.h>
@@ -74,8 +75,10 @@
* internal functions
*/
static void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc);
+static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf);
static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf);
+static void rpcrdma_mr_free(struct rpcrdma_mr *mr);
static struct rpcrdma_regbuf *
rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
gfp_t flags);
@@ -405,9 +408,8 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
struct rpcrdma_ep *ep = &r_xprt->rx_ep;
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_req *req;
- struct rpcrdma_rep *rep;
- cancel_delayed_work_sync(&buf->rb_refresh_worker);
+ cancel_work_sync(&buf->rb_refresh_worker);
/* This is similar to rpcrdma_ep_destroy, but:
* - Don't cancel the connect worker.
@@ -429,8 +431,7 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
/* The ULP is responsible for ensuring all DMA
* mappings and MRs are gone.
*/
- list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list)
- rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf);
+ rpcrdma_reps_destroy(buf);
list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf);
rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
@@ -604,10 +605,10 @@ void rpcrdma_ep_destroy(struct rpcrdma_xprt *r_xprt)
* Unlike a normal reconnection, a fresh PD and a new set
* of MRs and buffers is needed.
*/
-static int
-rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
- struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
+static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
+ struct ib_qp_init_attr *qp_init_attr)
{
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
int rc, err;
trace_xprtrdma_reinsert(r_xprt);
@@ -624,7 +625,7 @@ rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
}
rc = -ENETUNREACH;
- err = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
+ err = rdma_create_qp(ia->ri_id, ia->ri_pd, qp_init_attr);
if (err) {
pr_err("rpcrdma: rdma_create_qp returned %d\n", err);
goto out3;
@@ -641,16 +642,16 @@ out1:
return rc;
}
-static int
-rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep,
- struct rpcrdma_ia *ia)
+static int rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt,
+ struct ib_qp_init_attr *qp_init_attr)
{
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
struct rdma_cm_id *id, *old;
int err, rc;
trace_xprtrdma_reconnect(r_xprt);
- rpcrdma_ep_disconnect(ep, ia);
+ rpcrdma_ep_disconnect(&r_xprt->rx_ep, ia);
rc = -EHOSTUNREACH;
id = rpcrdma_create_id(r_xprt, ia);
@@ -672,7 +673,7 @@ rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep,
goto out_destroy;
}
- err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
+ err = rdma_create_qp(id, ia->ri_pd, qp_init_attr);
if (err)
goto out_destroy;
@@ -697,25 +698,27 @@ rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
rx_ia);
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
+ struct ib_qp_init_attr qp_init_attr;
int rc;
retry:
+ memcpy(&qp_init_attr, &ep->rep_attr, sizeof(qp_init_attr));
switch (ep->rep_connected) {
case 0:
dprintk("RPC: %s: connecting...\n", __func__);
- rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
+ rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &qp_init_attr);
if (rc) {
rc = -ENETUNREACH;
goto out_noupdate;
}
break;
case -ENODEV:
- rc = rpcrdma_ep_recreate_xprt(r_xprt, ep, ia);
+ rc = rpcrdma_ep_recreate_xprt(r_xprt, &qp_init_attr);
if (rc)
goto out_noupdate;
break;
default:
- rc = rpcrdma_ep_reconnect(r_xprt, ep, ia);
+ rc = rpcrdma_ep_reconnect(r_xprt, &qp_init_attr);
if (rc)
goto out;
}
@@ -729,6 +732,8 @@ retry:
if (rc)
goto out;
+ if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO)
+ xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
if (ep->rep_connected <= 0) {
if (ep->rep_connected == -EAGAIN)
@@ -942,14 +947,12 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
unsigned int count;
- LIST_HEAD(free);
- LIST_HEAD(all);
for (count = 0; count < ia->ri_max_segs; count++) {
struct rpcrdma_mr *mr;
int rc;
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ mr = kzalloc(sizeof(*mr), GFP_NOFS);
if (!mr)
break;
@@ -961,15 +964,13 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
mr->mr_xprt = r_xprt;
- list_add(&mr->mr_list, &free);
- list_add(&mr->mr_all, &all);
+ spin_lock(&buf->rb_lock);
+ list_add(&mr->mr_list, &buf->rb_mrs);
+ list_add(&mr->mr_all, &buf->rb_all_mrs);
+ spin_unlock(&buf->rb_lock);
}
- spin_lock(&buf->rb_mrlock);
- list_splice(&free, &buf->rb_mrs);
- list_splice(&all, &buf->rb_all);
r_xprt->rx_stats.mrs_allocated += count;
- spin_unlock(&buf->rb_mrlock);
trace_xprtrdma_createmrs(r_xprt, count);
}
@@ -977,7 +978,7 @@ static void
rpcrdma_mr_refresh_worker(struct work_struct *work)
{
struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
- rb_refresh_worker.work);
+ rb_refresh_worker);
struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
rx_buf);
@@ -999,12 +1000,18 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
struct rpcrdma_regbuf *rb;
struct rpcrdma_req *req;
+ size_t maxhdrsize;
req = kzalloc(sizeof(*req), flags);
if (req == NULL)
goto out1;
- rb = rpcrdma_regbuf_alloc(RPCRDMA_HDRBUF_SIZE, DMA_TO_DEVICE, flags);
+ /* Compute maximum header buffer size in bytes */
+ maxhdrsize = rpcrdma_fixed_maxsz + 3 +
+ r_xprt->rx_ia.ri_max_segs * rpcrdma_readchunk_maxsz;
+ maxhdrsize *= sizeof(__be32);
+ rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize),
+ DMA_TO_DEVICE, flags);
if (!rb)
goto out2;
req->rl_rdmabuf = rb;
@@ -1018,6 +1025,7 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
if (!req->rl_recvbuf)
goto out4;
+ INIT_LIST_HEAD(&req->rl_free_mrs);
INIT_LIST_HEAD(&req->rl_registered);
spin_lock(&buffer->rb_lock);
list_add(&req->rl_all, &buffer->rb_allreqs);
@@ -1065,6 +1073,40 @@ out:
return NULL;
}
+static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
+{
+ rpcrdma_regbuf_free(rep->rr_rdmabuf);
+ kfree(rep);
+}
+
+static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf)
+{
+ struct llist_node *node;
+
+ /* Calls to llist_del_first are required to be serialized */
+ node = llist_del_first(&buf->rb_free_reps);
+ if (!node)
+ return NULL;
+ return llist_entry(node, struct rpcrdma_rep, rr_node);
+}
+
+static void rpcrdma_rep_put(struct rpcrdma_buffer *buf,
+ struct rpcrdma_rep *rep)
+{
+ if (!rep->rr_temp)
+ llist_add(&rep->rr_node, &buf->rb_free_reps);
+ else
+ rpcrdma_rep_destroy(rep);
+}
+
+static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf)
+{
+ struct rpcrdma_rep *rep;
+
+ while ((rep = rpcrdma_rep_get_locked(buf)) != NULL)
+ rpcrdma_rep_destroy(rep);
+}
+
/**
* rpcrdma_buffer_create - Create initial set of req/rep objects
* @r_xprt: transport instance to (re)initialize
@@ -1078,12 +1120,10 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
buf->rb_max_requests = r_xprt->rx_ep.rep_max_requests;
buf->rb_bc_srv_max_requests = 0;
- spin_lock_init(&buf->rb_mrlock);
spin_lock_init(&buf->rb_lock);
INIT_LIST_HEAD(&buf->rb_mrs);
- INIT_LIST_HEAD(&buf->rb_all);
- INIT_DELAYED_WORK(&buf->rb_refresh_worker,
- rpcrdma_mr_refresh_worker);
+ INIT_LIST_HEAD(&buf->rb_all_mrs);
+ INIT_WORK(&buf->rb_refresh_worker, rpcrdma_mr_refresh_worker);
rpcrdma_mrs_create(r_xprt);
@@ -1102,7 +1142,7 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
}
buf->rb_credits = 1;
- INIT_LIST_HEAD(&buf->rb_recv_bufs);
+ init_llist_head(&buf->rb_free_reps);
rc = rpcrdma_sendctxs_create(r_xprt);
if (rc)
@@ -1114,12 +1154,6 @@ out:
return rc;
}
-static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
-{
- rpcrdma_regbuf_free(rep->rr_rdmabuf);
- kfree(rep);
-}
-
/**
* rpcrdma_req_destroy - Destroy an rpcrdma_req object
* @req: unused object to be destroyed
@@ -1127,11 +1161,13 @@ static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
* This function assumes that the caller prevents concurrent device
* unload and transport tear-down.
*/
-void
-rpcrdma_req_destroy(struct rpcrdma_req *req)
+void rpcrdma_req_destroy(struct rpcrdma_req *req)
{
list_del(&req->rl_all);
+ while (!list_empty(&req->rl_free_mrs))
+ rpcrdma_mr_free(rpcrdma_mr_pop(&req->rl_free_mrs));
+
rpcrdma_regbuf_free(req->rl_recvbuf);
rpcrdma_regbuf_free(req->rl_sendbuf);
rpcrdma_regbuf_free(req->rl_rdmabuf);
@@ -1147,25 +1183,19 @@ rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
unsigned int count;
count = 0;
- spin_lock(&buf->rb_mrlock);
- while (!list_empty(&buf->rb_all)) {
- mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all);
+ spin_lock(&buf->rb_lock);
+ while ((mr = list_first_entry_or_null(&buf->rb_all_mrs,
+ struct rpcrdma_mr,
+ mr_all)) != NULL) {
list_del(&mr->mr_all);
-
- spin_unlock(&buf->rb_mrlock);
-
- /* Ensure MW is not on any rl_registered list */
- if (!list_empty(&mr->mr_list))
- list_del(&mr->mr_list);
+ spin_unlock(&buf->rb_lock);
frwr_release_mr(mr);
count++;
- spin_lock(&buf->rb_mrlock);
+ spin_lock(&buf->rb_lock);
}
- spin_unlock(&buf->rb_mrlock);
+ spin_unlock(&buf->rb_lock);
r_xprt->rx_stats.mrs_allocated = 0;
-
- dprintk("RPC: %s: released %u MRs\n", __func__, count);
}
/**
@@ -1179,18 +1209,10 @@ rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
void
rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
{
- cancel_delayed_work_sync(&buf->rb_refresh_worker);
+ cancel_work_sync(&buf->rb_refresh_worker);
rpcrdma_sendctxs_destroy(buf);
-
- while (!list_empty(&buf->rb_recv_bufs)) {
- struct rpcrdma_rep *rep;
-
- rep = list_first_entry(&buf->rb_recv_bufs,
- struct rpcrdma_rep, rr_list);
- list_del(&rep->rr_list);
- rpcrdma_rep_destroy(rep);
- }
+ rpcrdma_reps_destroy(buf);
while (!list_empty(&buf->rb_send_bufs)) {
struct rpcrdma_req *req;
@@ -1215,54 +1237,20 @@ struct rpcrdma_mr *
rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
- struct rpcrdma_mr *mr = NULL;
-
- spin_lock(&buf->rb_mrlock);
- if (!list_empty(&buf->rb_mrs))
- mr = rpcrdma_mr_pop(&buf->rb_mrs);
- spin_unlock(&buf->rb_mrlock);
+ struct rpcrdma_mr *mr;
- if (!mr)
- goto out_nomrs;
+ spin_lock(&buf->rb_lock);
+ mr = rpcrdma_mr_pop(&buf->rb_mrs);
+ spin_unlock(&buf->rb_lock);
return mr;
-
-out_nomrs:
- trace_xprtrdma_nomrs(r_xprt);
- if (r_xprt->rx_ep.rep_connected != -ENODEV)
- schedule_delayed_work(&buf->rb_refresh_worker, 0);
-
- /* Allow the reply handler and refresh worker to run */
- cond_resched();
-
- return NULL;
-}
-
-static void
-__rpcrdma_mr_put(struct rpcrdma_buffer *buf, struct rpcrdma_mr *mr)
-{
- spin_lock(&buf->rb_mrlock);
- rpcrdma_mr_push(mr, &buf->rb_mrs);
- spin_unlock(&buf->rb_mrlock);
-}
-
-/**
- * rpcrdma_mr_put - Release an rpcrdma_mr object
- * @mr: object to release
- *
- */
-void
-rpcrdma_mr_put(struct rpcrdma_mr *mr)
-{
- __rpcrdma_mr_put(&mr->mr_xprt->rx_buf, mr);
}
/**
- * rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it
- * @mr: object to release
+ * rpcrdma_mr_put - DMA unmap an MR and release it
+ * @mr: MR to release
*
*/
-void
-rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
+void rpcrdma_mr_put(struct rpcrdma_mr *mr)
{
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
@@ -1272,7 +1260,19 @@ rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
mr->mr_sg, mr->mr_nents, mr->mr_dir);
mr->mr_dir = DMA_NONE;
}
- __rpcrdma_mr_put(&r_xprt->rx_buf, mr);
+
+ rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
+}
+
+static void rpcrdma_mr_free(struct rpcrdma_mr *mr)
+{
+ struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
+ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+
+ mr->mr_req = NULL;
+ spin_lock(&buf->rb_lock);
+ rpcrdma_mr_push(mr, &buf->rb_mrs);
+ spin_unlock(&buf->rb_lock);
}
/**
@@ -1303,39 +1303,24 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
*/
void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
{
- struct rpcrdma_rep *rep = req->rl_reply;
-
+ if (req->rl_reply)
+ rpcrdma_rep_put(buffers, req->rl_reply);
req->rl_reply = NULL;
spin_lock(&buffers->rb_lock);
list_add(&req->rl_list, &buffers->rb_send_bufs);
- if (rep) {
- if (!rep->rr_temp) {
- list_add(&rep->rr_list, &buffers->rb_recv_bufs);
- rep = NULL;
- }
- }
spin_unlock(&buffers->rb_lock);
- if (rep)
- rpcrdma_rep_destroy(rep);
}
-/*
- * Put reply buffers back into pool when not attached to
- * request. This happens in error conditions.
+/**
+ * rpcrdma_recv_buffer_put - Release rpcrdma_rep back to free list
+ * @rep: rep to release
+ *
+ * Used after error conditions.
*/
-void
-rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
+void rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
{
- struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
-
- if (!rep->rr_temp) {
- spin_lock(&buffers->rb_lock);
- list_add(&rep->rr_list, &buffers->rb_recv_bufs);
- spin_unlock(&buffers->rb_lock);
- } else {
- rpcrdma_rep_destroy(rep);
- }
+ rpcrdma_rep_put(&rep->rr_rxprt->rx_buf, rep);
}
/* Returns a pointer to a rpcrdma_regbuf object, or NULL.
@@ -1483,7 +1468,7 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
count = 0;
needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
- if (ep->rep_receive_count > needed)
+ if (likely(ep->rep_receive_count > needed))
goto out;
needed -= ep->rep_receive_count;
if (!temp)
@@ -1491,22 +1476,10 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
/* fast path: all needed reps can be found on the free list */
wr = NULL;
- spin_lock(&buf->rb_lock);
while (needed) {
- rep = list_first_entry_or_null(&buf->rb_recv_bufs,
- struct rpcrdma_rep, rr_list);
+ rep = rpcrdma_rep_get_locked(buf);
if (!rep)
- break;
-
- list_del(&rep->rr_list);
- rep->rr_recv_wr.next = wr;
- wr = &rep->rr_recv_wr;
- --needed;
- }
- spin_unlock(&buf->rb_lock);
-
- while (needed) {
- rep = rpcrdma_rep_create(r_xprt, temp);
+ rep = rpcrdma_rep_create(r_xprt, temp);
if (!rep)
break;
@@ -1523,7 +1496,7 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf))
goto release_wrs;
- trace_xprtrdma_post_recv(rep->rr_recv_wr.wr_cqe);
+ trace_xprtrdma_post_recv(rep);
++count;
}
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 92ce09fcea74..65e6b0eb862e 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -47,6 +47,7 @@
#include <linux/atomic.h> /* atomic_t, etc */
#include <linux/kref.h> /* struct kref */
#include <linux/workqueue.h> /* struct work_struct */
+#include <linux/llist.h>
#include <rdma/rdma_cm.h> /* RDMA connection api */
#include <rdma/ib_verbs.h> /* RDMA verbs api */
@@ -117,9 +118,6 @@ struct rpcrdma_ep {
#endif
/* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV
- *
- * The below structure appears at the front of a large region of kmalloc'd
- * memory, which always starts on a good alignment boundary.
*/
struct rpcrdma_regbuf {
@@ -158,25 +156,22 @@ static inline void *rdmab_data(const struct rpcrdma_regbuf *rb)
/* To ensure a transport can always make forward progress,
* the number of RDMA segments allowed in header chunk lists
- * is capped at 8. This prevents less-capable devices and
- * memory registrations from overrunning the Send buffer
- * while building chunk lists.
+ * is capped at 16. This prevents less-capable devices from
+ * overrunning the Send buffer while building chunk lists.
*
* Elements of the Read list take up more room than the
- * Write list or Reply chunk. 8 read segments means the Read
- * list (or Write list or Reply chunk) cannot consume more
- * than
- *
- * ((8 + 2) * read segment size) + 1 XDR words, or 244 bytes.
+ * Write list or Reply chunk. 16 read segments means the
+ * chunk lists cannot consume more than
*
- * And the fixed part of the header is another 24 bytes.
+ * ((16 + 2) * read segment size) + 1 XDR words,
*
- * The smallest inline threshold is 1024 bytes, ensuring that
- * at least 750 bytes are available for RPC messages.
+ * or about 400 bytes. The fixed part of the header is
+ * another 24 bytes. Thus when the inline threshold is
+ * 1024 bytes, at least 600 bytes are available for RPC
+ * message bodies.
*/
enum {
- RPCRDMA_MAX_HDR_SEGS = 8,
- RPCRDMA_HDRBUF_SIZE = 256,
+ RPCRDMA_MAX_HDR_SEGS = 16,
};
/*
@@ -206,7 +201,7 @@ struct rpcrdma_rep {
struct rpc_rqst *rr_rqst;
struct xdr_buf rr_hdrbuf;
struct xdr_stream rr_stream;
- struct list_head rr_list;
+ struct llist_node rr_node;
struct ib_recv_wr rr_recv_wr;
};
@@ -240,20 +235,20 @@ struct rpcrdma_sendctx {
* An external memory region is any buffer or page that is registered
* on the fly (ie, not pre-registered).
*/
-struct rpcrdma_req;
struct rpcrdma_frwr {
struct ib_mr *fr_mr;
struct ib_cqe fr_cqe;
struct completion fr_linv_done;
- struct rpcrdma_req *fr_req;
union {
struct ib_reg_wr fr_regwr;
struct ib_send_wr fr_invwr;
};
};
+struct rpcrdma_req;
struct rpcrdma_mr {
struct list_head mr_list;
+ struct rpcrdma_req *mr_req;
struct scatterlist *mr_sg;
int mr_nents;
enum dma_data_direction mr_dir;
@@ -331,7 +326,8 @@ struct rpcrdma_req {
struct list_head rl_all;
struct kref rl_kref;
- struct list_head rl_registered; /* registered segments */
+ struct list_head rl_free_mrs;
+ struct list_head rl_registered;
struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
};
@@ -344,7 +340,7 @@ rpcr_to_rdmar(const struct rpc_rqst *rqst)
static inline void
rpcrdma_mr_push(struct rpcrdma_mr *mr, struct list_head *list)
{
- list_add_tail(&mr->mr_list, list);
+ list_add(&mr->mr_list, list);
}
static inline struct rpcrdma_mr *
@@ -352,8 +348,9 @@ rpcrdma_mr_pop(struct list_head *list)
{
struct rpcrdma_mr *mr;
- mr = list_first_entry(list, struct rpcrdma_mr, mr_list);
- list_del_init(&mr->mr_list);
+ mr = list_first_entry_or_null(list, struct rpcrdma_mr, mr_list);
+ if (mr)
+ list_del_init(&mr->mr_list);
return mr;
}
@@ -364,19 +361,19 @@ rpcrdma_mr_pop(struct list_head *list)
* One of these is associated with a transport instance
*/
struct rpcrdma_buffer {
- spinlock_t rb_mrlock; /* protect rb_mrs list */
+ spinlock_t rb_lock;
+ struct list_head rb_send_bufs;
struct list_head rb_mrs;
- struct list_head rb_all;
unsigned long rb_sc_head;
unsigned long rb_sc_tail;
unsigned long rb_sc_last;
struct rpcrdma_sendctx **rb_sc_ctxs;
- spinlock_t rb_lock; /* protect buf lists */
- struct list_head rb_send_bufs;
- struct list_head rb_recv_bufs;
struct list_head rb_allreqs;
+ struct list_head rb_all_mrs;
+
+ struct llist_head rb_free_reps;
u32 rb_max_requests;
u32 rb_credits; /* most recent credit grant */
@@ -384,7 +381,7 @@ struct rpcrdma_buffer {
u32 rb_bc_srv_max_requests;
u32 rb_bc_max_requests;
- struct delayed_work rb_refresh_worker;
+ struct work_struct rb_refresh_worker;
};
/*
@@ -490,7 +487,6 @@ struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt);
struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt);
void rpcrdma_mr_put(struct rpcrdma_mr *mr);
-void rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr);
static inline void
rpcrdma_mr_recycle(struct rpcrdma_mr *mr)
@@ -546,6 +542,7 @@ rpcrdma_data_dir(bool writing)
/* Memory registration calls xprtrdma/frwr_ops.c
*/
bool frwr_is_supported(struct ib_device *device);
+void frwr_recycle(struct rpcrdma_req *req);
void frwr_reset(struct rpcrdma_req *req);
int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep);
int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr);
@@ -554,7 +551,7 @@ size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt);
struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_mr_seg *seg,
int nsegs, bool writing, __be32 xid,
- struct rpcrdma_mr **mr);
+ struct rpcrdma_mr *mr);
int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req);
void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs);
void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index e2176c167a57..9ac88722fa83 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -562,10 +562,14 @@ xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
printk(KERN_WARNING "Callback slot table overflowed\n");
return -ESHUTDOWN;
}
+ if (transport->recv.copied && !req->rq_private_buf.len)
+ return -ESHUTDOWN;
ret = xs_read_stream_request(transport, msg, flags, req);
if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
xprt_complete_bc_request(req, transport->recv.copied);
+ else
+ req->rq_private_buf.len = transport->recv.copied;
return ret;
}
@@ -587,7 +591,7 @@ xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags)
/* Look up and lock the request corresponding to the given XID */
spin_lock(&xprt->queue_lock);
req = xprt_lookup_rqst(xprt, transport->recv.xid);
- if (!req) {
+ if (!req || (transport->recv.copied && !req->rq_private_buf.len)) {
msg->msg_flags |= MSG_TRUNC;
goto out;
}
@@ -599,6 +603,8 @@ xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags)
spin_lock(&xprt->queue_lock);
if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
xprt_complete_rqst(req->rq_task, transport->recv.copied);
+ else
+ req->rq_private_buf.len = transport->recv.copied;
xprt_unpin_rqst(req);
out:
spin_unlock(&xprt->queue_lock);
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 947b8ff0227e..bba3104f128f 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -206,14 +206,7 @@ static int xdp_umem_map_pages(struct xdp_umem *umem)
static void xdp_umem_unpin_pages(struct xdp_umem *umem)
{
- unsigned int i;
-
- for (i = 0; i < umem->npgs; i++) {
- struct page *page = umem->pgs[i];
-
- set_page_dirty_lock(page);
- put_page(page);
- }
+ put_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
kfree(umem->pgs);
umem->pgs = NULL;
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index c2f1af3b6a7c..fa8fbb8fa3c8 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -977,7 +977,7 @@ static int xsk_mmap(struct file *file, struct socket *sock,
/* Matches the smp_wmb() in xsk_init_queue */
smp_rmb();
qpg = virt_to_head_page(q->ring);
- if (size > (PAGE_SIZE << compound_order(qpg)))
+ if (size > page_size(qpg))
return -EINVAL;
pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 93a7edfe0f05..6fcc66afb088 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -62,6 +62,8 @@ my $conststructsfile = "$D/const_structs.checkpatch";
my $typedefsfile = "";
my $color = "auto";
my $allow_c99_comments = 1; # Can be overridden by --ignore C99_COMMENT_TOLERANCE
+# git output parsing needs US English output, so first set backtick child process LANGUAGE
+my $git_command ='export LANGUAGE=en_US.UTF-8; git';
sub help {
my ($exitcode) = @_;
@@ -904,7 +906,7 @@ sub seed_camelcase_includes {
$camelcase_seeded = 1;
if (-e ".git") {
- my $git_last_include_commit = `git log --no-merges --pretty=format:"%h%n" -1 -- include`;
+ my $git_last_include_commit = `${git_command} log --no-merges --pretty=format:"%h%n" -1 -- include`;
chomp $git_last_include_commit;
$camelcase_cache = ".checkpatch-camelcase.git.$git_last_include_commit";
} else {
@@ -932,7 +934,7 @@ sub seed_camelcase_includes {
}
if (-e ".git") {
- $files = `git ls-files "include/*.h"`;
+ $files = `${git_command} ls-files "include/*.h"`;
@include_files = split('\n', $files);
}
@@ -956,13 +958,13 @@ sub git_commit_info {
return ($id, $desc) if ((which("git") eq "") || !(-e ".git"));
- my $output = `git log --no-color --format='%H %s' -1 $commit 2>&1`;
+ my $output = `${git_command} log --no-color --format='%H %s' -1 $commit 2>&1`;
$output =~ s/^\s*//gm;
my @lines = split("\n", $output);
return ($id, $desc) if ($#lines < 0);
- if ($lines[0] =~ /^error: short SHA1 $commit is ambiguous\./) {
+ if ($lines[0] =~ /^error: short SHA1 $commit is ambiguous/) {
# Maybe one day convert this block of bash into something that returns
# all matching commit ids, but it's very slow...
#
@@ -1006,7 +1008,7 @@ if ($git) {
} else {
$git_range = "-1 $commit_expr";
}
- my $lines = `git log --no-color --no-merges --pretty=format:'%H %s' $git_range`;
+ my $lines = `${git_command} log --no-color --no-merges --pretty=format:'%H %s' $git_range`;
foreach my $line (split(/\n/, $lines)) {
$line =~ /^([0-9a-fA-F]{40,40}) (.*)$/;
next if (!defined($1) || !defined($2));
@@ -2725,8 +2727,10 @@ sub process {
($line =~ /^\s*(?:WARNING:|BUG:)/ ||
$line =~ /^\s*\[\s*\d+\.\d{6,6}\s*\]/ ||
# timestamp
- $line =~ /^\s*\[\<[0-9a-fA-F]{8,}\>\]/)) {
- # stack dump address
+ $line =~ /^\s*\[\<[0-9a-fA-F]{8,}\>\]/) ||
+ $line =~ /^(?:\s+\w+:\s+[0-9a-fA-F]+){3,3}/ ||
+ $line =~ /^\s*\#\d+\s*\[[0-9a-fA-F]+\]\s*\w+ at [0-9a-fA-F]+/) {
+ # stack dump address styles
$commit_log_possible_stack_dump = 1;
}
@@ -2898,6 +2902,17 @@ sub process {
}
}
+# check for invalid commit id
+ if ($in_commit_log && $line =~ /(^fixes:|\bcommit)\s+([0-9a-f]{6,40})\b/i) {
+ my $id;
+ my $description;
+ ($id, $description) = git_commit_info($2, undef, undef);
+ if (!defined($id)) {
+ WARN("UNKNOWN_COMMIT_ID",
+ "Unknown commit id '$2', maybe rebased or not pulled?\n" . $herecurr);
+ }
+ }
+
# ignore non-hunk lines and lines being removed
next if (!$hunk_line || $line =~ /^-/);
@@ -3069,21 +3084,21 @@ sub process {
# check SPDX comment style for .[chsS] files
if ($realfile =~ /\.[chsS]$/ &&
$rawline =~ /SPDX-License-Identifier:/ &&
- $rawline !~ /^\+\s*\Q$comment\E\s*/) {
+ $rawline !~ m@^\+\s*\Q$comment\E\s*@) {
WARN("SPDX_LICENSE_TAG",
"Improper SPDX comment style for '$realfile', please use '$comment' instead\n" . $herecurr);
}
if ($comment !~ /^$/ &&
- $rawline !~ /^\+\Q$comment\E SPDX-License-Identifier: /) {
- WARN("SPDX_LICENSE_TAG",
- "Missing or malformed SPDX-License-Identifier tag in line $checklicenseline\n" . $herecurr);
+ $rawline !~ m@^\+\Q$comment\E SPDX-License-Identifier: @) {
+ WARN("SPDX_LICENSE_TAG",
+ "Missing or malformed SPDX-License-Identifier tag in line $checklicenseline\n" . $herecurr);
} elsif ($rawline =~ /(SPDX-License-Identifier: .*)/) {
- my $spdx_license = $1;
- if (!is_SPDX_License_valid($spdx_license)) {
- WARN("SPDX_LICENSE_TAG",
- "'$spdx_license' is not supported in LICENSES/...\n" . $herecurr);
- }
+ my $spdx_license = $1;
+ if (!is_SPDX_License_valid($spdx_license)) {
+ WARN("SPDX_LICENSE_TAG",
+ "'$spdx_license' is not supported in LICENSES/...\n" . $herecurr);
+ }
}
}
}
@@ -4660,7 +4675,7 @@ sub process {
# closing brace should have a space following it when it has anything
# on the line
- if ($line =~ /}(?!(?:,|;|\)))\S/) {
+ if ($line =~ /}(?!(?:,|;|\)|\}))\S/) {
if (ERROR("SPACING",
"space required after that close brace '}'\n" . $herecurr) &&
$fix) {
@@ -5191,7 +5206,7 @@ sub process {
next if ($arg =~ /\.\.\./);
next if ($arg =~ /^type$/i);
my $tmp_stmt = $define_stmt;
- $tmp_stmt =~ s/\b(typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g;
+ $tmp_stmt =~ s/\b(sizeof|typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g;
$tmp_stmt =~ s/\#+\s*$arg\b//g;
$tmp_stmt =~ s/\b$arg\s*\#\#//g;
my $use_cnt = () = $tmp_stmt =~ /\b$arg\b/g;
@@ -5873,6 +5888,18 @@ sub process {
"__aligned(size) is preferred over __attribute__((aligned(size)))\n" . $herecurr);
}
+# Check for __attribute__ section, prefer __section
+ if ($realfile !~ m@\binclude/uapi/@ &&
+ $line =~ /\b__attribute__\s*\(\s*\(.*_*section_*\s*\(\s*("[^"]*")/) {
+ my $old = substr($rawline, $-[1], $+[1] - $-[1]);
+ my $new = substr($old, 1, -1);
+ if (WARN("PREFER_SECTION",
+ "__section($new) is preferred over __attribute__((section($old)))\n" . $herecurr) &&
+ $fix) {
+ $fixed[$fixlinenr] =~ s/\b__attribute__\s*\(\s*\(\s*_*section_*\s*\(\s*\Q$old\E\s*\)\s*\)\s*\)/__section($new)/;
+ }
+ }
+
# Check for __attribute__ format(printf, prefer __printf
if ($realfile !~ m@\binclude/uapi/@ &&
$line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*printf/) {
@@ -6480,6 +6507,12 @@ sub process {
"Using $1 should generally have parentheses around the comparison\n" . $herecurr);
}
+# nested likely/unlikely calls
+ if ($line =~ /\b(?:(?:un)?likely)\s*\(\s*!?\s*(IS_ERR(?:_OR_NULL|_VALUE)?|WARN)/) {
+ WARN("LIKELY_MISUSE",
+ "nested (un)?likely() calls, $1 already uses unlikely() internally\n" . $herecurr);
+ }
+
# whine mightly about in_atomic
if ($line =~ /\bin_atomic\s*\(/) {
if ($realfile =~ m@^drivers/@) {
diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
index 2f5b95f09fa0..34e40e96dee2 100644
--- a/scripts/gdb/linux/symbols.py
+++ b/scripts/gdb/linux/symbols.py
@@ -77,12 +77,12 @@ lx-symbols command."""
gdb.write("scanning for modules in {0}\n".format(path))
for root, dirs, files in os.walk(path):
for name in files:
- if name.endswith(".ko"):
+ if name.endswith(".ko") or name.endswith(".ko.debug"):
self.module_files.append(root + "/" + name)
self.module_files_updated = True
def _get_module_file(self, module_name):
- module_pattern = ".*/{0}\.ko$".format(
+ module_pattern = ".*/{0}\.ko(?:.debug)?$".format(
module_name.replace("_", r"[_\-]"))
for name in self.module_files:
if re.match(module_pattern, name) and os.path.exists(name):
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index ade699131065..1fbd77816610 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -1228,11 +1228,16 @@ hashalg_fail:
static int __init init_digests(void)
{
+ int i;
+
digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests),
GFP_KERNEL);
if (!digests)
return -ENOMEM;
+ for (i = 0; i < chip->nr_allocated_banks; i++)
+ digests[i].alg_id = chip->allocated_banks[i].alg_id;
+
return 0;
}
diff --git a/sound/firewire/dice/dice-alesis.c b/sound/firewire/dice/dice-alesis.c
index 218292bdace6..f5b325263b67 100644
--- a/sound/firewire/dice/dice-alesis.c
+++ b/sound/firewire/dice/dice-alesis.c
@@ -15,7 +15,7 @@ alesis_io14_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
static const unsigned int
alesis_io26_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
- {10, 10, 8}, /* Tx0 = Analog + S/PDIF. */
+ {10, 10, 4}, /* Tx0 = Analog + S/PDIF. */
{16, 8, 0}, /* Tx1 = ADAT1 + ADAT2. */
};
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 91e71be42fa4..240f4ca76391 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2485,8 +2485,7 @@ static const struct pci_device_id azx_ids[] = {
AZX_DCAPS_PM_RUNTIME },
/* AMD Raven */
{ PCI_DEVICE(0x1022, 0x15e3),
- .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
- AZX_DCAPS_PM_RUNTIME },
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
/* ATI HDMI */
{ PCI_DEVICE(0x1002, 0x0002),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index e283966bdbb1..bc9dd8e6fd86 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -357,6 +357,7 @@ static const struct hda_fixup ad1986a_fixups[] = {
static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x30af, "HP B2800", AD1986A_FIXUP_LAPTOP_IMIC),
+ SND_PCI_QUIRK(0x1043, 0x1153, "ASUS M9V", AD1986A_FIXUP_LAPTOP_IMIC),
SND_PCI_QUIRK(0x1043, 0x1443, "ASUS Z99He", AD1986A_FIXUP_EAPD),
SND_PCI_QUIRK(0x1043, 0x1447, "ASUS A8JN", AD1986A_FIXUP_EAPD),
SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index da1695418731..b000b36ac3c6 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5817,6 +5817,7 @@ enum {
ALC292_FIXUP_DELL_E7X,
ALC292_FIXUP_DISABLE_AAMIX,
ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK,
+ ALC298_FIXUP_ALIENWARE_MIC_NO_PRESENCE,
ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
ALC275_FIXUP_DELL_XPS,
@@ -5871,6 +5872,7 @@ enum {
ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
ALC299_FIXUP_PREDATOR_SPK,
ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
+ ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -6506,6 +6508,15 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC292_FIXUP_DISABLE_AAMIX
},
+ [ALC298_FIXUP_ALIENWARE_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x18, 0x01a1913c }, /* headset mic w/o jack detect */
+ { }
+ },
+ .chained_before = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE,
+ },
[ALC298_FIXUP_DELL1_MIC_NO_PRESENCE] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -6927,6 +6938,16 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
},
+ [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x04a11040 },
+ { 0x21, 0x04211020 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7190,6 +7211,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
+ SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
#if 0
/* Below is a quirk table taken from the old code.
@@ -7358,6 +7380,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-chrome-book"},
{.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
{.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"},
+ {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
{}
};
#define ALC225_STANDARD_PINS \
@@ -7770,6 +7793,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x17, 0x90170110},
{0x1a, 0x03011020},
{0x21, 0x03211030}),
+ SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_ALIENWARE_MIC_NO_PRESENCE,
+ {0x12, 0xb7a60140},
+ {0x17, 0x90170110},
+ {0x1a, 0x03a11030},
+ {0x21, 0x03211020}),
SND_HDA_PIN_QUIRK(0x10ec0299, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
ALC225_STANDARD_PINS,
{0x12, 0xb7a60130},
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 48e9eef34c0f..ca603397651c 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -116,19 +116,16 @@ static struct atmel_pcm_dma_params ssc_dma_params[NUM_SSC_DEVICES][2] = {
static struct atmel_ssc_info ssc_info[NUM_SSC_DEVICES] = {
{
.name = "ssc0",
- .lock = __SPIN_LOCK_UNLOCKED(ssc_info[0].lock),
.dir_mask = SSC_DIR_MASK_UNUSED,
.initialized = 0,
},
{
.name = "ssc1",
- .lock = __SPIN_LOCK_UNLOCKED(ssc_info[1].lock),
.dir_mask = SSC_DIR_MASK_UNUSED,
.initialized = 0,
},
{
.name = "ssc2",
- .lock = __SPIN_LOCK_UNLOCKED(ssc_info[2].lock),
.dir_mask = SSC_DIR_MASK_UNUSED,
.initialized = 0,
},
@@ -317,13 +314,10 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream,
snd_soc_dai_set_dma_data(dai, substream, dma_params);
- spin_lock_irq(&ssc_p->lock);
- if (ssc_p->dir_mask & dir_mask) {
- spin_unlock_irq(&ssc_p->lock);
+ if (ssc_p->dir_mask & dir_mask)
return -EBUSY;
- }
+
ssc_p->dir_mask |= dir_mask;
- spin_unlock_irq(&ssc_p->lock);
return 0;
}
@@ -355,7 +349,6 @@ static void atmel_ssc_shutdown(struct snd_pcm_substream *substream,
dir_mask = 1 << dir;
- spin_lock_irq(&ssc_p->lock);
ssc_p->dir_mask &= ~dir_mask;
if (!ssc_p->dir_mask) {
if (ssc_p->initialized) {
@@ -369,7 +362,6 @@ static void atmel_ssc_shutdown(struct snd_pcm_substream *substream,
ssc_p->cmr_div = ssc_p->tcmr_period = ssc_p->rcmr_period = 0;
ssc_p->forced_divider = 0;
}
- spin_unlock_irq(&ssc_p->lock);
/* Shutdown the SSC clock. */
pr_debug("atmel_ssc_dai: Stopping clock\n");
diff --git a/sound/soc/atmel/atmel_ssc_dai.h b/sound/soc/atmel/atmel_ssc_dai.h
index ae764cb541c7..3470b966e449 100644
--- a/sound/soc/atmel/atmel_ssc_dai.h
+++ b/sound/soc/atmel/atmel_ssc_dai.h
@@ -93,7 +93,6 @@ struct atmel_ssc_state {
struct atmel_ssc_info {
char *name;
struct ssc_device *ssc;
- spinlock_t lock; /* lock for dir_mask */
unsigned short dir_mask; /* 0=unused, 1=playback, 2=capture */
unsigned short initialized; /* true if SSC has been initialized */
unsigned short daifmt;
diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c
index 50ed86d45c26..88b75695fbf7 100644
--- a/sound/soc/codecs/pcm3168a.c
+++ b/sound/soc/codecs/pcm3168a.c
@@ -21,8 +21,7 @@
#define PCM3168A_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S24_3LE | \
- SNDRV_PCM_FMTBIT_S24_LE | \
- SNDRV_PCM_FMTBIT_S32_LE)
+ SNDRV_PCM_FMTBIT_S24_LE)
#define PCM3168A_FMT_I2S 0x0
#define PCM3168A_FMT_LEFT_J 0x1
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index ef0b74693093..b517e4bc1b87 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -628,6 +628,16 @@ static int fsl_sai_startup(struct snd_pcm_substream *substream,
FSL_SAI_CR3_TRCE_MASK,
FSL_SAI_CR3_TRCE);
+ /*
+ * EDMA controller needs period size to be a multiple of
+ * tx/rx maxburst
+ */
+ if (sai->soc_data->use_edma)
+ snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ tx ? sai->dma_params_tx.maxburst :
+ sai->dma_params_rx.maxburst);
+
ret = snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE, &fsl_sai_rate_constraints);
@@ -1026,30 +1036,35 @@ static int fsl_sai_remove(struct platform_device *pdev)
static const struct fsl_sai_soc_data fsl_sai_vf610_data = {
.use_imx_pcm = false,
+ .use_edma = false,
.fifo_depth = 32,
.reg_offset = 0,
};
static const struct fsl_sai_soc_data fsl_sai_imx6sx_data = {
.use_imx_pcm = true,
+ .use_edma = false,
.fifo_depth = 32,
.reg_offset = 0,
};
static const struct fsl_sai_soc_data fsl_sai_imx7ulp_data = {
.use_imx_pcm = true,
+ .use_edma = false,
.fifo_depth = 16,
.reg_offset = 8,
};
static const struct fsl_sai_soc_data fsl_sai_imx8mq_data = {
.use_imx_pcm = true,
+ .use_edma = false,
.fifo_depth = 128,
.reg_offset = 8,
};
static const struct fsl_sai_soc_data fsl_sai_imx8qm_data = {
.use_imx_pcm = true,
+ .use_edma = true,
.fifo_depth = 64,
.reg_offset = 0,
};
diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
index b12cb578f6d0..76b15deea80c 100644
--- a/sound/soc/fsl/fsl_sai.h
+++ b/sound/soc/fsl/fsl_sai.h
@@ -157,6 +157,7 @@
struct fsl_sai_soc_data {
bool use_imx_pcm;
+ bool use_edma;
unsigned int fifo_depth;
unsigned int reg_offset;
};
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index f6a7466622ea..fc5d089868df 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -286,6 +286,11 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
if (rsnd_ssi_is_multi_slave(mod, io))
return 0;
+ if (rsnd_runtime_is_tdm_split(io))
+ chan = rsnd_io_converted_chan(io);
+
+ chan = rsnd_channel_normalization(chan);
+
if (ssi->usrcnt > 0) {
if (ssi->rate != rate) {
dev_err(dev, "SSI parent/child should use same rate\n");
@@ -300,11 +305,6 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
return 0;
}
- if (rsnd_runtime_is_tdm_split(io))
- chan = rsnd_io_converted_chan(io);
-
- chan = rsnd_channel_normalization(chan);
-
main_rate = rsnd_ssi_clk_query(rdai, rate, chan, &idx);
if (!main_rate) {
dev_err(dev, "unsupported clock rate\n");
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 35f48e9c5ead..88978a3036c4 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -978,7 +978,7 @@ static void soc_cleanup_component(struct snd_soc_component *component)
/* For framework level robustness */
snd_soc_component_set_jack(component, NULL, NULL);
- list_del(&component->card_list);
+ list_del_init(&component->card_list);
snd_soc_dapm_free(snd_soc_component_get_dapm(component));
soc_cleanup_component_debugfs(component);
component->card = NULL;
diff --git a/sound/soc/ti/Kconfig b/sound/soc/ti/Kconfig
index 87a9b9dd4e98..29f61053ab62 100644
--- a/sound/soc/ti/Kconfig
+++ b/sound/soc/ti/Kconfig
@@ -200,11 +200,18 @@ config SND_SOC_DM365_AIC3X_CODEC
config SND_SOC_DM365_VOICE_CODEC
bool "Voice Codec - CQ93VC"
- select MFD_DAVINCI_VOICECODEC
- select SND_SOC_CQ0093VC
help
Say Y if you want to add support for SoC On-chip voice codec
endchoice
+config SND_SOC_DM365_VOICE_CODEC_MODULE
+ def_tristate y
+ depends on SND_SOC_DM365_VOICE_CODEC && SND_SOC
+ select MFD_DAVINCI_VOICECODEC
+ select SND_SOC_CQ0093VC
+ help
+ The is an internal symbol needed to ensure that the codec
+ and MFD driver can be built as loadable modules if necessary.
+
endmenu
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 25faf2d3c639..fbfde996fee7 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1658,6 +1658,8 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
case 0x25ce: /* Mytek devices */
case 0x278b: /* Rotel? */
case 0x2ab6: /* T+A devices */
+ case 0x3842: /* EVGA */
+ case 0xc502: /* HiBy devices */
if (fp->dsd_raw)
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 5171b9c7ca3e..0652d3eed9bd 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -231,6 +231,8 @@
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
#define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
+#define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */
+#define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
@@ -354,6 +356,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
diff --git a/tools/arch/x86/include/uapi/asm/unistd.h b/tools/arch/x86/include/uapi/asm/unistd.h
index 30d7d04d72d6..196fdd02b8b1 100644
--- a/tools/arch/x86/include/uapi/asm/unistd.h
+++ b/tools/arch/x86/include/uapi/asm/unistd.h
@@ -3,7 +3,7 @@
#define _UAPI_ASM_X86_UNISTD_H
/* x32 syscall flag bit */
-#define __X32_SYSCALL_BIT 0x40000000
+#define __X32_SYSCALL_BIT 0x40000000UL
#ifndef __KERNEL__
# ifdef __i386__
diff --git a/tools/hv/Build b/tools/hv/Build
new file mode 100644
index 000000000000..6cf51fa4b306
--- /dev/null
+++ b/tools/hv/Build
@@ -0,0 +1,3 @@
+hv_kvp_daemon-y += hv_kvp_daemon.o
+hv_vss_daemon-y += hv_vss_daemon.o
+hv_fcopy_daemon-y += hv_fcopy_daemon.o
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index 5db5e62cebda..b57143d9459c 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -1,28 +1,55 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for Hyper-V tools
-
-WARNINGS = -Wall -Wextra
-CFLAGS = $(WARNINGS) -g $(shell getconf LFS_CFLAGS)
-
-CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
+include ../scripts/Makefile.include
sbindir ?= /usr/sbin
libexecdir ?= /usr/libexec
sharedstatedir ?= /var/lib
-ALL_PROGRAMS := hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+
+# Do not use make's built-in rules
+# (this improves performance and avoids hard-to-debug behaviour);
+MAKEFLAGS += -r
+
+override CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
+
+ALL_TARGETS := hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
+ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))
ALL_SCRIPTS := hv_get_dhcp_info.sh hv_get_dns_info.sh hv_set_ifconfig.sh
all: $(ALL_PROGRAMS)
-%: %.c
- $(CC) $(CFLAGS) -o $@ $^
+export srctree OUTPUT CC LD CFLAGS
+include $(srctree)/tools/build/Makefile.include
+
+HV_KVP_DAEMON_IN := $(OUTPUT)hv_kvp_daemon-in.o
+$(HV_KVP_DAEMON_IN): FORCE
+ $(Q)$(MAKE) $(build)=hv_kvp_daemon
+$(OUTPUT)hv_kvp_daemon: $(HV_KVP_DAEMON_IN)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
+
+HV_VSS_DAEMON_IN := $(OUTPUT)hv_vss_daemon-in.o
+$(HV_VSS_DAEMON_IN): FORCE
+ $(Q)$(MAKE) $(build)=hv_vss_daemon
+$(OUTPUT)hv_vss_daemon: $(HV_VSS_DAEMON_IN)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
+
+HV_FCOPY_DAEMON_IN := $(OUTPUT)hv_fcopy_daemon-in.o
+$(HV_FCOPY_DAEMON_IN): FORCE
+ $(Q)$(MAKE) $(build)=hv_fcopy_daemon
+$(OUTPUT)hv_fcopy_daemon: $(HV_FCOPY_DAEMON_IN)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
clean:
- $(RM) hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
+ rm -f $(ALL_PROGRAMS)
+ find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
-install: all
+install: $(ALL_PROGRAMS)
install -d -m 755 $(DESTDIR)$(sbindir); \
install -d -m 755 $(DESTDIR)$(libexecdir)/hypervkvpd; \
install -d -m 755 $(DESTDIR)$(sharedstatedir); \
@@ -33,3 +60,7 @@ install: all
for script in $(ALL_SCRIPTS); do \
install $$script -m 755 $(DESTDIR)$(libexecdir)/hypervkvpd/$${script%.sh}; \
done
+
+FORCE:
+
+.PHONY: all install clean FORCE prepare
diff --git a/tools/include/asm/bug.h b/tools/include/asm/bug.h
index bbd75ac8b202..550223f0a6e6 100644
--- a/tools/include/asm/bug.h
+++ b/tools/include/asm/bug.h
@@ -3,6 +3,7 @@
#define _TOOLS_ASM_BUG_H
#include <linux/compiler.h>
+#include <stdio.h>
#define __WARN_printf(arg...) do { fprintf(stderr, arg); } while (0)
diff --git a/tools/include/linux/rbtree.h b/tools/include/linux/rbtree.h
index d83763a5327c..e03b1ea23e0e 100644
--- a/tools/include/linux/rbtree.h
+++ b/tools/include/linux/rbtree.h
@@ -31,25 +31,9 @@ struct rb_root {
struct rb_node *rb_node;
};
-/*
- * Leftmost-cached rbtrees.
- *
- * We do not cache the rightmost node based on footprint
- * size vs number of potential users that could benefit
- * from O(1) rb_last(). Just not worth it, users that want
- * this feature can always implement the logic explicitly.
- * Furthermore, users that want to cache both pointers may
- * find it a bit asymmetric, but that's ok.
- */
-struct rb_root_cached {
- struct rb_root rb_root;
- struct rb_node *rb_leftmost;
-};
-
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
#define RB_ROOT (struct rb_root) { NULL, }
-#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
@@ -71,12 +55,6 @@ extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
-extern void rb_insert_color_cached(struct rb_node *,
- struct rb_root_cached *, bool);
-extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *);
-/* Same as rb_first(), but O(1) */
-#define rb_first_cached(root) (root)->rb_leftmost
-
/* Postorder iteration - always visit the parent after its children */
extern struct rb_node *rb_first_postorder(const struct rb_root *);
extern struct rb_node *rb_next_postorder(const struct rb_node *);
@@ -84,8 +62,6 @@ extern struct rb_node *rb_next_postorder(const struct rb_node *);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
-extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
- struct rb_root_cached *root);
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
@@ -129,4 +105,51 @@ static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
rb_erase(n, root);
RB_CLEAR_NODE(n);
}
+
+/*
+ * Leftmost-cached rbtrees.
+ *
+ * We do not cache the rightmost node based on footprint
+ * size vs number of potential users that could benefit
+ * from O(1) rb_last(). Just not worth it, users that want
+ * this feature can always implement the logic explicitly.
+ * Furthermore, users that want to cache both pointers may
+ * find it a bit asymmetric, but that's ok.
+ */
+struct rb_root_cached {
+ struct rb_root rb_root;
+ struct rb_node *rb_leftmost;
+};
+
+#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
+
+/* Same as rb_first(), but O(1) */
+#define rb_first_cached(root) (root)->rb_leftmost
+
+static inline void rb_insert_color_cached(struct rb_node *node,
+ struct rb_root_cached *root,
+ bool leftmost)
+{
+ if (leftmost)
+ root->rb_leftmost = node;
+ rb_insert_color(node, &root->rb_root);
+}
+
+static inline void rb_erase_cached(struct rb_node *node,
+ struct rb_root_cached *root)
+{
+ if (root->rb_leftmost == node)
+ root->rb_leftmost = rb_next(node);
+ rb_erase(node, &root->rb_root);
+}
+
+static inline void rb_replace_node_cached(struct rb_node *victim,
+ struct rb_node *new,
+ struct rb_root_cached *root)
+{
+ if (root->rb_leftmost == victim)
+ root->rb_leftmost = new;
+ rb_replace_node(victim, new, &root->rb_root);
+}
+
#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h
index ddd01006ece5..381aa948610d 100644
--- a/tools/include/linux/rbtree_augmented.h
+++ b/tools/include/linux/rbtree_augmented.h
@@ -32,17 +32,16 @@ struct rb_augment_callbacks {
void (*rotate)(struct rb_node *old, struct rb_node *new);
};
-extern void __rb_insert_augmented(struct rb_node *node,
- struct rb_root *root,
- bool newleft, struct rb_node **leftmost,
+extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
+
/*
* Fixup the rbtree and update the augmented information when rebalancing.
*
* On insertion, the user must update the augmented information on the path
* leading to the inserted node, then call rb_link_node() as usual and
- * rb_augment_inserted() instead of the usual rb_insert_color() call.
- * If rb_augment_inserted() rebalances the rbtree, it will callback into
+ * rb_insert_augmented() instead of the usual rb_insert_color() call.
+ * If rb_insert_augmented() rebalances the rbtree, it will callback into
* a user provided function to update the augmented information on the
* affected subtrees.
*/
@@ -50,7 +49,7 @@ static inline void
rb_insert_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
- __rb_insert_augmented(node, root, false, NULL, augment->rotate);
+ __rb_insert_augmented(node, root, augment->rotate);
}
static inline void
@@ -58,45 +57,92 @@ rb_insert_augmented_cached(struct rb_node *node,
struct rb_root_cached *root, bool newleft,
const struct rb_augment_callbacks *augment)
{
- __rb_insert_augmented(node, &root->rb_root,
- newleft, &root->rb_leftmost, augment->rotate);
+ if (newleft)
+ root->rb_leftmost = node;
+ rb_insert_augmented(node, &root->rb_root, augment);
}
-#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
- rbtype, rbaugmented, rbcompute) \
+/*
+ * Template for declaring augmented rbtree callbacks (generic case)
+ *
+ * RBSTATIC: 'static' or empty
+ * RBNAME: name of the rb_augment_callbacks structure
+ * RBSTRUCT: struct type of the tree nodes
+ * RBFIELD: name of struct rb_node field within RBSTRUCT
+ * RBAUGMENTED: name of field within RBSTRUCT holding data for subtree
+ * RBCOMPUTE: name of function that recomputes the RBAUGMENTED data
+ */
+
+#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
+ RBSTRUCT, RBFIELD, RBAUGMENTED, RBCOMPUTE) \
static inline void \
-rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \
+RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \
{ \
while (rb != stop) { \
- rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
- rbtype augmented = rbcompute(node); \
- if (node->rbaugmented == augmented) \
+ RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \
+ if (RBCOMPUTE(node, true)) \
break; \
- node->rbaugmented = augmented; \
- rb = rb_parent(&node->rbfield); \
+ rb = rb_parent(&node->RBFIELD); \
} \
} \
static inline void \
-rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
+RBNAME ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
{ \
- rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
- rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
- new->rbaugmented = old->rbaugmented; \
+ RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
+ RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
+ new->RBAUGMENTED = old->RBAUGMENTED; \
} \
static void \
-rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
+RBNAME ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
{ \
- rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
- rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
- new->rbaugmented = old->rbaugmented; \
- old->rbaugmented = rbcompute(old); \
+ RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
+ RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
+ new->RBAUGMENTED = old->RBAUGMENTED; \
+ RBCOMPUTE(old, false); \
} \
-rbstatic const struct rb_augment_callbacks rbname = { \
- .propagate = rbname ## _propagate, \
- .copy = rbname ## _copy, \
- .rotate = rbname ## _rotate \
+RBSTATIC const struct rb_augment_callbacks RBNAME = { \
+ .propagate = RBNAME ## _propagate, \
+ .copy = RBNAME ## _copy, \
+ .rotate = RBNAME ## _rotate \
};
+/*
+ * Template for declaring augmented rbtree callbacks,
+ * computing RBAUGMENTED scalar as max(RBCOMPUTE(node)) for all subtree nodes.
+ *
+ * RBSTATIC: 'static' or empty
+ * RBNAME: name of the rb_augment_callbacks structure
+ * RBSTRUCT: struct type of the tree nodes
+ * RBFIELD: name of struct rb_node field within RBSTRUCT
+ * RBTYPE: type of the RBAUGMENTED field
+ * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree
+ * RBCOMPUTE: name of function that returns the per-node RBTYPE scalar
+ */
+
+#define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \
+ RBTYPE, RBAUGMENTED, RBCOMPUTE) \
+static inline bool RBNAME ## _compute_max(RBSTRUCT *node, bool exit) \
+{ \
+ RBSTRUCT *child; \
+ RBTYPE max = RBCOMPUTE(node); \
+ if (node->RBFIELD.rb_left) { \
+ child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \
+ if (child->RBAUGMENTED > max) \
+ max = child->RBAUGMENTED; \
+ } \
+ if (node->RBFIELD.rb_right) { \
+ child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \
+ if (child->RBAUGMENTED > max) \
+ max = child->RBAUGMENTED; \
+ } \
+ if (exit && node->RBAUGMENTED == max) \
+ return true; \
+ node->RBAUGMENTED = max; \
+ return false; \
+} \
+RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
+ RBSTRUCT, RBFIELD, RBAUGMENTED, RBNAME ## _compute_max)
+
#define RB_RED 0
#define RB_BLACK 1
@@ -139,7 +185,6 @@ extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
static __always_inline struct rb_node *
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
- struct rb_node **leftmost,
const struct rb_augment_callbacks *augment)
{
struct rb_node *child = node->rb_right;
@@ -147,9 +192,6 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
struct rb_node *parent, *rebalance;
unsigned long pc;
- if (leftmost && node == *leftmost)
- *leftmost = rb_next(node);
-
if (!tmp) {
/*
* Case 1: node to erase has no more than 1 child (easy!)
@@ -249,8 +291,7 @@ static __always_inline void
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
- struct rb_node *rebalance = __rb_erase_augmented(node, root,
- NULL, augment);
+ struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
if (rebalance)
__rb_erase_color(rebalance, root, augment->rotate);
}
@@ -259,11 +300,9 @@ static __always_inline void
rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root,
const struct rb_augment_callbacks *augment)
{
- struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root,
- &root->rb_leftmost,
- augment);
- if (rebalance)
- __rb_erase_color(rebalance, &root->rb_root, augment->rotate);
+ if (root->rb_leftmost == node)
+ root->rb_leftmost = rb_next(node);
+ rb_erase_augmented(node, &root->rb_root, augment);
}
#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 1be0e798e362..1fc8faa6e973 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -569,7 +569,7 @@ __SYSCALL(__NR_semget, sys_semget)
__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_semtimedop 192
-__SC_COMP(__NR_semtimedop, sys_semtimedop, sys_semtimedop_time32)
+__SC_3264(__NR_semtimedop, sys_semtimedop_time32, sys_semtimedop)
#endif
#define __NR_semop 193
__SYSCALL(__NR_semop, sys_semop)
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index 094bb03b9cc2..7da1b37b27aa 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -181,7 +181,7 @@ struct prctl_mm_map {
#define PR_GET_THP_DISABLE 42
/*
- * Tell the kernel to start/stop helping userspace manage bounds tables.
+ * No longer implemented, but left here to ensure the numbers stay reserved:
*/
#define PR_MPX_ENABLE_MANAGEMENT 43
#define PR_MPX_DISABLE_MANAGEMENT 44
@@ -229,4 +229,9 @@ struct prctl_mm_map {
# define PR_PAC_APDBKEY (1UL << 3)
# define PR_PAC_APGAKEY (1UL << 4)
+/* Tagged user address controls for arm64 */
+#define PR_SET_TAGGED_ADDR_CTRL 55
+#define PR_GET_TAGGED_ADDR_CTRL 56
+# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+
#endif /* _LINUX_PRCTL_H */
diff --git a/tools/lib/rbtree.c b/tools/lib/rbtree.c
index 804f145e3113..2548ff8c4d9c 100644
--- a/tools/lib/rbtree.c
+++ b/tools/lib/rbtree.c
@@ -83,14 +83,10 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
static __always_inline void
__rb_insert(struct rb_node *node, struct rb_root *root,
- bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
- if (newleft)
- *leftmost = node;
-
while (true) {
/*
* Loop invariant: node is red.
@@ -436,34 +432,17 @@ static const struct rb_augment_callbacks dummy_callbacks = {
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
- __rb_insert(node, root, false, NULL, dummy_rotate);
+ __rb_insert(node, root, dummy_rotate);
}
void rb_erase(struct rb_node *node, struct rb_root *root)
{
struct rb_node *rebalance;
- rebalance = __rb_erase_augmented(node, root,
- NULL, &dummy_callbacks);
+ rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
if (rebalance)
____rb_erase_color(rebalance, root, dummy_rotate);
}
-void rb_insert_color_cached(struct rb_node *node,
- struct rb_root_cached *root, bool leftmost)
-{
- __rb_insert(node, &root->rb_root, leftmost,
- &root->rb_leftmost, dummy_rotate);
-}
-
-void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
-{
- struct rb_node *rebalance;
- rebalance = __rb_erase_augmented(node, &root->rb_root,
- &root->rb_leftmost, &dummy_callbacks);
- if (rebalance)
- ____rb_erase_color(rebalance, &root->rb_root, dummy_rotate);
-}
-
/*
* Augmented rbtree manipulation functions.
*
@@ -472,10 +451,9 @@ void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
*/
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
- bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
- __rb_insert(node, root, newleft, leftmost, augment_rotate);
+ __rb_insert(node, root, augment_rotate);
}
/*
@@ -580,15 +558,6 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
__rb_change_child(victim, new, parent, root);
}
-void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
- struct rb_root_cached *root)
-{
- rb_replace_node(victim, new, &root->rb_root);
-
- if (root->rb_leftmost == victim)
- root->rb_leftmost = new;
-}
-
static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
{
for (;;) {
diff --git a/tools/lib/traceevent/Build b/tools/lib/traceevent/Build
index ba54bfce0b0b..f9a5d79578f5 100644
--- a/tools/lib/traceevent/Build
+++ b/tools/lib/traceevent/Build
@@ -6,14 +6,3 @@ libtraceevent-y += parse-utils.o
libtraceevent-y += kbuffer-parse.o
libtraceevent-y += tep_strerror.o
libtraceevent-y += event-parse-api.o
-
-plugin_jbd2-y += plugin_jbd2.o
-plugin_hrtimer-y += plugin_hrtimer.o
-plugin_kmem-y += plugin_kmem.o
-plugin_kvm-y += plugin_kvm.o
-plugin_mac80211-y += plugin_mac80211.o
-plugin_sched_switch-y += plugin_sched_switch.o
-plugin_function-y += plugin_function.o
-plugin_xen-y += plugin_xen.o
-plugin_scsi-y += plugin_scsi.o
-plugin_cfg80211-y += plugin_cfg80211.o
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-event_print.txt b/tools/lib/traceevent/Documentation/libtraceevent-event_print.txt
new file mode 100644
index 000000000000..2c6a61811118
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-event_print.txt
@@ -0,0 +1,130 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_print_event - Writes event information into a trace sequence.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+*#include <trace-seq.h>*
+
+void *tep_print_event*(struct tep_handle pass:[*]_tep_, struct trace_seqpass:[*]_s_, struct tep_record pass:[*]_record_, const char pass:[*]_fmt_, _..._)
+--
+
+DESCRIPTION
+-----------
+
+The _tep_print_event()_ function parses the event information of the given
+_record_ and writes it into the trace sequence _s_, according to the format
+string _fmt_. The desired information is specified after the format string.
+The _fmt_ is printf-like format string, following arguments are supported:
+[verse]
+--
+ TEP_PRINT_PID, "%d" - PID of the event.
+ TEP_PRINT_CPU, "%d" - Event CPU.
+ TEP_PRINT_COMM, "%s" - Event command string.
+ TEP_PRINT_NAME, "%s" - Event name.
+ TEP_PRINT_LATENCY, "%s" - Latency of the event. It prints 4 or more
+ fields - interrupt state, scheduling state,
+ current context, and preemption count.
+ Field 1 is the interrupt enabled state:
+ d : Interrupts are disabled
+ . : Interrupts are enabled
+ X : The architecture does not support this
+ information
+ Field 2 is the "need resched" state.
+ N : The task is set to call the scheduler when
+ possible, as another higher priority task
+ may need to be scheduled in.
+ . : The task is not set to call the scheduler.
+ Field 3 is the context state.
+ . : Normal context
+ s : Soft interrupt context
+ h : Hard interrupt context
+ H : Hard interrupt context which triggered
+ during soft interrupt context.
+ z : NMI context
+ Z : NMI context which triggered during hard
+ interrupt context
+ Field 4 is the preemption count.
+ . : The preempt count is zero.
+ On preemptible kernels (where the task can be scheduled
+ out in arbitrary locations while in kernel context), the
+ preempt count, when non zero, will prevent the kernel
+ from scheduling out the current task. The preempt count
+ number is displayed when it is not zero.
+ Depending on the kernel, it may show other fields
+ (lock depth, or migration disabled, which are unique to
+ specialized kernels).
+ TEP_PRINT_TIME, %d - event time stamp. A divisor and precision can be
+ specified as part of this format string:
+ "%precision.divisord". Example:
+ "%3.1000d" - divide the time by 1000 and print the first
+ 3 digits before the dot. Thus, the time stamp
+ "123456000" will be printed as "123.456"
+ TEP_PRINT_INFO, "%s" - event information.
+ TEP_PRINT_INFO_RAW, "%s" - event information, in raw format.
+
+--
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+#include <trace-seq.h>
+...
+struct trace_seq seq;
+trace_seq_init(&seq);
+struct tep_handle *tep = tep_alloc();
+...
+void print_my_event(struct tep_record *record)
+{
+ trace_seq_reset(&seq);
+ tep_print_event(tep, s, record, "%16s-%-5d [%03d] %s %6.1000d %s %s",
+ TEP_PRINT_COMM, TEP_PRINT_PID, TEP_PRINT_CPU,
+ TEP_PRINT_LATENCY, TEP_PRINT_TIME, TEP_PRINT_NAME,
+ TEP_PRINT_INFO);
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*trace-seq.h*
+ Header file to include in order to have access to trace sequences related APIs.
+ Trace sequences are used to allow a function to call several other functions
+ to create a string of data to use.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <[email protected]>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <[email protected]>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <[email protected]>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-func_apis.txt b/tools/lib/traceevent/Documentation/libtraceevent-func_apis.txt
index 38bfea30a5f6..f6aca0df2151 100644
--- a/tools/lib/traceevent/Documentation/libtraceevent-func_apis.txt
+++ b/tools/lib/traceevent/Documentation/libtraceevent-func_apis.txt
@@ -59,12 +59,12 @@ parser context.
The _tep_register_function()_ function registers a function name mapped to an
address and (optional) module. This mapping is used in case the function tracer
-or events have "%pF" or "%pS" parameter in its format string. It is common to
-pass in the kallsyms function names with their corresponding addresses with this
+or events have "%pS" parameter in its format string. It is common to pass in
+the kallsyms function names with their corresponding addresses with this
function. The _tep_ argument is the trace event parser context. The _name_ is
-the name of the function, the string is copied internally. The _addr_ is
-the start address of the function. The _mod_ is the kernel module
-the function may be in (NULL for none).
+the name of the function, the string is copied internally. The _addr_ is the
+start address of the function. The _mod_ is the kernel module the function may
+be in (NULL for none).
The _tep_register_print_string()_ function registers a string by the address
it was stored in the kernel. Some strings internal to the kernel with static
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-handle.txt b/tools/lib/traceevent/Documentation/libtraceevent-handle.txt
index 8d568316847d..45b20172e262 100644
--- a/tools/lib/traceevent/Documentation/libtraceevent-handle.txt
+++ b/tools/lib/traceevent/Documentation/libtraceevent-handle.txt
@@ -3,7 +3,7 @@ libtraceevent(3)
NAME
----
-tep_alloc, tep_free,tep_ref, tep_unref,tep_ref_get - Create, destroy, manage
+tep_alloc, tep_free,tep_ref, tep_unref,tep_get_ref - Create, destroy, manage
references of trace event parser context.
SYNOPSIS
@@ -16,7 +16,7 @@ struct tep_handle pass:[*]*tep_alloc*(void);
void *tep_free*(struct tep_handle pass:[*]_tep_);
void *tep_ref*(struct tep_handle pass:[*]_tep_);
void *tep_unref*(struct tep_handle pass:[*]_tep_);
-int *tep_ref_get*(struct tep_handle pass:[*]_tep_);
+int *tep_get_ref*(struct tep_handle pass:[*]_tep_);
--
DESCRIPTION
@@ -57,9 +57,9 @@ EXAMPLE
...
struct tep_handle *tep = tep_alloc();
...
-int ref = tep_ref_get(tep);
+int ref = tep_get_ref(tep);
tep_ref(tep);
-if ( (ref+1) != tep_ref_get(tep)) {
+if ( (ref+1) != tep_get_ref(tep)) {
/* Something wrong happened, the counter is not incremented by 1 */
}
tep_unref(tep);
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-plugins.txt b/tools/lib/traceevent/Documentation/libtraceevent-plugins.txt
new file mode 100644
index 000000000000..596032ade31f
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-plugins.txt
@@ -0,0 +1,99 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_load_plugins, tep_unload_plugins - Load / unload traceevent plugins.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+struct tep_plugin_list pass:[*]*tep_load_plugins*(struct tep_handle pass:[*]_tep_);
+void *tep_unload_plugins*(struct tep_plugin_list pass:[*]_plugin_list_, struct tep_handle pass:[*]_tep_);
+--
+
+DESCRIPTION
+-----------
+The _tep_load_plugins()_ function loads all plugins, located in the plugin
+directories. The _tep_ argument is trace event parser context.
+The plugin directories are :
+[verse]
+--
+ - System's plugin directory, defined at the library compile time. It
+ depends on the library installation prefix and usually is
+ _(install_preffix)/lib/traceevent/plugins_
+ - Directory, defined by the environment variable _TRACEEVENT_PLUGIN_DIR_
+ - User's plugin directory, located at _~/.local/lib/traceevent/plugins_
+--
+Loading of plugins can be controlled by the _tep_flags_, using the
+_tep_set_flag()_ API:
+[verse]
+--
+ _TEP_DISABLE_SYS_PLUGINS_ - do not load plugins, located in
+ the system's plugin directory.
+ _TEP_DISABLE_PLUGINS_ - do not load any plugins.
+--
+The _tep_set_flag()_ API needs to be called before _tep_load_plugins()_, if
+loading of all plugins is not the desired case.
+
+The _tep_unload_plugins()_ function unloads the plugins, previously loaded by
+_tep_load_plugins()_. The _tep_ argument is trace event parser context. The
+_plugin_list_ is the list of loaded plugins, returned by
+the _tep_load_plugins()_ function.
+
+RETURN VALUE
+------------
+The _tep_load_plugins()_ function returns a list of successfully loaded plugins,
+or NULL in case no plugins are loaded.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+struct tep_plugin_list *plugins = tep_load_plugins(tep);
+if (plugins == NULL) {
+ /* no plugins are loaded */
+}
+...
+tep_unload_plugins(plugins, tep);
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_, _tep_set_flag(3)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <[email protected]>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <[email protected]>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <[email protected]>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent.txt b/tools/lib/traceevent/Documentation/libtraceevent.txt
index fbd977b47de1..d530a7ce8fb2 100644
--- a/tools/lib/traceevent/Documentation/libtraceevent.txt
+++ b/tools/lib/traceevent/Documentation/libtraceevent.txt
@@ -16,7 +16,7 @@ Management of tep handler data structure and access of its members:
void *tep_free*(struct tep_handle pass:[*]_tep_);
void *tep_ref*(struct tep_handle pass:[*]_tep_);
void *tep_unref*(struct tep_handle pass:[*]_tep_);
- int *tep_ref_get*(struct tep_handle pass:[*]_tep_);
+ int *tep_get_ref*(struct tep_handle pass:[*]_tep_);
void *tep_set_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
void *tep_clear_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
bool *tep_test_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flags_);
@@ -26,15 +26,12 @@ Management of tep handler data structure and access of its members:
void *tep_set_long_size*(struct tep_handle pass:[*]_tep_, int _long_size_);
int *tep_get_page_size*(struct tep_handle pass:[*]_tep_);
void *tep_set_page_size*(struct tep_handle pass:[*]_tep_, int _page_size_);
- bool *tep_is_latency_format*(struct tep_handle pass:[*]_tep_);
- void *tep_set_latency_format*(struct tep_handle pass:[*]_tep_, int _lat_);
int *tep_get_header_page_size*(struct tep_handle pass:[*]_tep_);
int *tep_get_header_timestamp_size*(struct tep_handle pass:[*]_tep_);
bool *tep_is_old_format*(struct tep_handle pass:[*]_tep_);
int *tep_strerror*(struct tep_handle pass:[*]_tep_, enum tep_errno _errnum_, char pass:[*]_buf_, size_t _buflen_);
Register / unregister APIs:
- int *tep_register_trace_clock*(struct tep_handle pass:[*]_tep_, const char pass:[*]_trace_clock_);
int *tep_register_function*(struct tep_handle pass:[*]_tep_, char pass:[*]_name_, unsigned long long _addr_, char pass:[*]_mod_);
int *tep_register_event_handler*(struct tep_handle pass:[*]_tep_, int _id_, const char pass:[*]_sys_name_, const char pass:[*]_event_name_, tep_event_handler_func _func_, void pass:[*]_context_);
int *tep_unregister_event_handler*(struct tep_handle pass:[*]tep, int id, const char pass:[*]sys_name, const char pass:[*]event_name, tep_event_handler_func func, void pass:[*]_context_);
@@ -57,14 +54,7 @@ Event related APIs:
int *tep_get_events_count*(struct tep_handle pass:[*]_tep_);
struct tep_event pass:[*]pass:[*]*tep_list_events*(struct tep_handle pass:[*]_tep_, enum tep_event_sort_type _sort_type_);
struct tep_event pass:[*]pass:[*]*tep_list_events_copy*(struct tep_handle pass:[*]_tep_, enum tep_event_sort_type _sort_type_);
-
-Event printing:
- void *tep_print_event*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_record pass:[*]_record_, bool _use_trace_clock_);
- void *tep_print_event_data*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]_record_);
- void *tep_event_info*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]_record_);
- void *tep_print_event_task*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]_record_);
- void *tep_print_event_time*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]record, bool _use_trace_clock_);
- void *tep_set_print_raw*(struct tep_handle pass:[*]_tep_, int _print_raw_);
+ void *tep_print_event*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_record pass:[*]_record_, const char pass:[*]_fmt_, _..._);
Event finding:
struct tep_event pass:[*]*tep_find_event*(struct tep_handle pass:[*]_tep_, int _id_);
@@ -116,7 +106,6 @@ Filter management:
int *tep_filter_compare*(struct tep_event_filter pass:[*]_filter1_, struct tep_event_filter pass:[*]_filter2_);
Parsing various data from the records:
- void *tep_data_latency_format*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_record pass:[*]_record_);
int *tep_data_type*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
int *tep_data_pid*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
int *tep_data_preempt_count*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index a39cdd0d890d..5315f3787f8d 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -58,30 +58,6 @@ export man_dir man_dir_SQ INSTALL
export DESTDIR DESTDIR_SQ
export EVENT_PARSE_VERSION
-set_plugin_dir := 1
-
-# Set plugin_dir to preffered global plugin location
-# If we install under $HOME directory we go under
-# $(HOME)/.local/lib/traceevent/plugins
-#
-# We dont set PLUGIN_DIR in case we install under $HOME
-# directory, because by default the code looks under:
-# $(HOME)/.local/lib/traceevent/plugins by default.
-#
-ifeq ($(plugin_dir),)
-ifeq ($(prefix),$(HOME))
-override plugin_dir = $(HOME)/.local/lib/traceevent/plugins
-set_plugin_dir := 0
-else
-override plugin_dir = $(libdir)/traceevent/plugins
-endif
-endif
-
-ifeq ($(set_plugin_dir),1)
-PLUGIN_DIR = -DPLUGIN_DIR="$(plugin_dir)"
-PLUGIN_DIR_SQ = '$(subst ','\'',$(PLUGIN_DIR))'
-endif
-
include ../../scripts/Makefile.include
# copy a bit from Linux kbuild
@@ -105,7 +81,6 @@ export prefix libdir src obj
# Shell quotes
libdir_SQ = $(subst ','\'',$(libdir))
libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
-plugin_dir_SQ = $(subst ','\'',$(plugin_dir))
CONFIG_INCLUDES =
CONFIG_LIBS =
@@ -151,29 +126,14 @@ MAKEOVERRIDES=
export srctree OUTPUT CC LD CFLAGS V
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
-PLUGINS = plugin_jbd2.so
-PLUGINS += plugin_hrtimer.so
-PLUGINS += plugin_kmem.so
-PLUGINS += plugin_kvm.so
-PLUGINS += plugin_mac80211.so
-PLUGINS += plugin_sched_switch.so
-PLUGINS += plugin_function.so
-PLUGINS += plugin_xen.so
-PLUGINS += plugin_scsi.so
-PLUGINS += plugin_cfg80211.so
-
-PLUGINS := $(addprefix $(OUTPUT),$(PLUGINS))
-PLUGINS_IN := $(PLUGINS:.so=-in.o)
-
TE_IN := $(OUTPUT)libtraceevent-in.o
LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET))
-DYNAMIC_LIST_FILE := $(OUTPUT)libtraceevent-dynamic-list
-CMD_TARGETS = $(LIB_TARGET) $(PLUGINS) $(DYNAMIC_LIST_FILE)
+CMD_TARGETS = $(LIB_TARGET)
TARGETS = $(CMD_TARGETS)
-all: all_cmd
+all: all_cmd plugins
all_cmd: $(CMD_TARGETS)
@@ -188,17 +148,6 @@ $(OUTPUT)libtraceevent.so.$(EVENT_PARSE_VERSION): $(TE_IN)
$(OUTPUT)libtraceevent.a: $(TE_IN)
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
-$(OUTPUT)libtraceevent-dynamic-list: $(PLUGINS)
- $(QUIET_GEN)$(call do_generate_dynamic_list_file, $(PLUGINS), $@)
-
-plugins: $(PLUGINS)
-
-__plugin_obj = $(notdir $@)
- plugin_obj = $(__plugin_obj:-in.o=)
-
-$(PLUGINS_IN): force
- $(Q)$(MAKE) $(build)=$(plugin_obj)
-
$(OUTPUT)%.so: $(OUTPUT)%-in.o
$(QUIET_LINK)$(CC) $(CFLAGS) -shared $(LDFLAGS) -nostartfiles -o $@ $^
@@ -258,25 +207,6 @@ define do_install
$(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2'
endef
-define do_install_plugins
- for plugin in $1; do \
- $(call do_install,$$plugin,$(plugin_dir_SQ)); \
- done
-endef
-
-define do_generate_dynamic_list_file
- symbol_type=`$(NM) -u -D $1 | awk 'NF>1 {print $$1}' | \
- xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
- if [ "$$symbol_type" = "U W" ];then \
- (echo '{'; \
- $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
- echo '};'; \
- ) > $2; \
- else \
- (echo Either missing one of [$1] or bad version of $(NM)) 1>&2;\
- fi
-endef
-
PKG_CONFIG_FILE = libtraceevent.pc
define do_install_pkgconfig_file
if [ -n "${pkgconfig_dir}" ]; then \
@@ -296,10 +226,6 @@ install_lib: all_cmd install_plugins install_headers install_pkgconfig
$(call do_install_mkdir,$(libdir_SQ)); \
cp -fpR $(LIB_INSTALL) $(DESTDIR)$(libdir_SQ)
-install_plugins: $(PLUGINS)
- $(call QUIET_INSTALL, trace_plugins) \
- $(call do_install_plugins, $(PLUGINS))
-
install_pkgconfig:
$(call QUIET_INSTALL, $(PKG_CONFIG_FILE)) \
$(call do_install_pkgconfig_file,$(prefix))
@@ -313,7 +239,7 @@ install_headers:
install: install_lib
-clean:
+clean: clean_plugins
$(call QUIET_CLEAN, libtraceevent) \
$(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd; \
$(RM) TRACEEVENT-CFLAGS tags TAGS; \
@@ -351,7 +277,19 @@ help:
@echo ' doc-install - install the man pages'
@echo ' doc-uninstall - uninstall the man pages'
@echo''
-PHONY += force plugins
+
+PHONY += plugins
+plugins:
+ $(call descend,plugins)
+
+PHONY += install_plugins
+install_plugins:
+ $(call descend,plugins,install)
+
+PHONY += clean_plugins
+clean_plugins:
+ $(call descend,plugins,clean)
+
force:
# Declare the contents of the .PHONY variable as phony. We keep that
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index bb22238debfe..d948475585ce 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -4367,10 +4367,20 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
switch (*ptr) {
case 's':
case 'S':
- case 'f':
- case 'F':
case 'x':
break;
+ case 'f':
+ case 'F':
+ /*
+ * Pre-5.5 kernels use %pf and
+ * %pF for printing symbols
+ * while kernels since 5.5 use
+ * %pfw for fwnodes. So check
+ * %p[fF] isn't followed by 'w'.
+ */
+ if (ptr[1] != 'w')
+ break;
+ /* fall through */
default:
/*
* Older kernels do not process
@@ -4487,12 +4497,12 @@ get_bprint_format(void *data, int size __maybe_unused,
printk = find_printk(tep, addr);
if (!printk) {
- if (asprintf(&format, "%%pf: (NO FORMAT FOUND at %llx)\n", addr) < 0)
+ if (asprintf(&format, "%%ps: (NO FORMAT FOUND at %llx)\n", addr) < 0)
return NULL;
return format;
}
- if (asprintf(&format, "%s: %s", "%pf", printk->printk) < 0)
+ if (asprintf(&format, "%s: %s", "%ps", printk->printk) < 0)
return NULL;
return format;
@@ -5517,8 +5527,10 @@ static void print_event_time(struct tep_handle *tep, struct trace_seq *s,
if (divstr && isdigit(*(divstr + 1)))
div = atoi(divstr + 1);
time = record->ts;
- if (div)
+ if (div) {
+ time += div / 2;
time /= div;
+ }
pr = prec;
while (pr--)
p10 *= 10;
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index d438ee44289f..b77837f75a0d 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -441,6 +441,8 @@ int tep_register_print_string(struct tep_handle *tep, const char *fmt,
unsigned long long addr);
bool tep_is_pid_registered(struct tep_handle *tep, int pid);
+struct tep_event *tep_get_event(struct tep_handle *tep, int index);
+
#define TEP_PRINT_INFO "INFO"
#define TEP_PRINT_INFO_RAW "INFO_RAW"
#define TEP_PRINT_COMM "COMM"
diff --git a/tools/lib/traceevent/plugins/Build b/tools/lib/traceevent/plugins/Build
new file mode 100644
index 000000000000..210d26910613
--- /dev/null
+++ b/tools/lib/traceevent/plugins/Build
@@ -0,0 +1,10 @@
+plugin_jbd2-y += plugin_jbd2.o
+plugin_hrtimer-y += plugin_hrtimer.o
+plugin_kmem-y += plugin_kmem.o
+plugin_kvm-y += plugin_kvm.o
+plugin_mac80211-y += plugin_mac80211.o
+plugin_sched_switch-y += plugin_sched_switch.o
+plugin_function-y += plugin_function.o
+plugin_xen-y += plugin_xen.o
+plugin_scsi-y += plugin_scsi.o
+plugin_cfg80211-y += plugin_cfg80211.o
diff --git a/tools/lib/traceevent/plugins/Makefile b/tools/lib/traceevent/plugins/Makefile
new file mode 100644
index 000000000000..f440989fa55e
--- /dev/null
+++ b/tools/lib/traceevent/plugins/Makefile
@@ -0,0 +1,222 @@
+# SPDX-License-Identifier: GPL-2.0
+
+#MAKEFLAGS += --no-print-directory
+
+
+# Makefiles suck: This macro sets a default value of $(2) for the
+# variable named by $(1), unless the variable has been set by
+# environment or command line. This is necessary for CC and AR
+# because make sets default values, so the simpler ?= approach
+# won't work as expected.
+define allow-override
+ $(if $(or $(findstring environment,$(origin $(1))),\
+ $(findstring command line,$(origin $(1)))),,\
+ $(eval $(1) = $(2)))
+endef
+
+# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix.
+$(call allow-override,CC,$(CROSS_COMPILE)gcc)
+$(call allow-override,AR,$(CROSS_COMPILE)ar)
+$(call allow-override,NM,$(CROSS_COMPILE)nm)
+$(call allow-override,PKG_CONFIG,pkg-config)
+
+EXT = -std=gnu99
+INSTALL = install
+
+# Use DESTDIR for installing into a different root directory.
+# This is useful for building a package. The program will be
+# installed in this directory as if it was the root directory.
+# Then the build tool can move it later.
+DESTDIR ?=
+DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
+
+LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
+ifeq ($(LP64), 1)
+ libdir_relative = lib64
+else
+ libdir_relative = lib
+endif
+
+prefix ?= /usr/local
+libdir = $(prefix)/$(libdir_relative)
+
+set_plugin_dir := 1
+
+# Set plugin_dir to preffered global plugin location
+# If we install under $HOME directory we go under
+# $(HOME)/.local/lib/traceevent/plugins
+#
+# We dont set PLUGIN_DIR in case we install under $HOME
+# directory, because by default the code looks under:
+# $(HOME)/.local/lib/traceevent/plugins by default.
+#
+ifeq ($(plugin_dir),)
+ifeq ($(prefix),$(HOME))
+override plugin_dir = $(HOME)/.local/lib/traceevent/plugins
+set_plugin_dir := 0
+else
+override plugin_dir = $(libdir)/traceevent/plugins
+endif
+endif
+
+ifeq ($(set_plugin_dir),1)
+PLUGIN_DIR = -DPLUGIN_DIR="$(plugin_dir)"
+PLUGIN_DIR_SQ = '$(subst ','\'',$(PLUGIN_DIR))'
+endif
+
+include ../../../scripts/Makefile.include
+
+# copy a bit from Linux kbuild
+
+ifeq ("$(origin V)", "command line")
+ VERBOSE = $(V)
+endif
+ifndef VERBOSE
+ VERBOSE = 0
+endif
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+#$(info Determined 'srctree' to be $(srctree))
+endif
+
+export prefix libdir src obj
+
+# Shell quotes
+plugin_dir_SQ = $(subst ','\'',$(plugin_dir))
+
+CONFIG_INCLUDES =
+CONFIG_LIBS =
+CONFIG_FLAGS =
+
+OBJ = $@
+N =
+
+INCLUDES = -I. -I.. -I $(srctree)/tools/include $(CONFIG_INCLUDES)
+
+# Set compile option CFLAGS
+ifdef EXTRA_CFLAGS
+ CFLAGS := $(EXTRA_CFLAGS)
+else
+ CFLAGS := -g -Wall
+endif
+
+# Append required CFLAGS
+override CFLAGS += -fPIC
+override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ)
+override CFLAGS += $(udis86-flags) -D_GNU_SOURCE
+
+ifeq ($(VERBOSE),1)
+ Q =
+else
+ Q = @
+endif
+
+# Disable command line variables (CFLAGS) override from top
+# level Makefile (perf), otherwise build Makefile will get
+# the same command line setup.
+MAKEOVERRIDES=
+
+export srctree OUTPUT CC LD CFLAGS V
+
+build := -f $(srctree)/tools/build/Makefile.build dir=. obj
+
+DYNAMIC_LIST_FILE := $(OUTPUT)libtraceevent-dynamic-list
+
+PLUGINS = plugin_jbd2.so
+PLUGINS += plugin_hrtimer.so
+PLUGINS += plugin_kmem.so
+PLUGINS += plugin_kvm.so
+PLUGINS += plugin_mac80211.so
+PLUGINS += plugin_sched_switch.so
+PLUGINS += plugin_function.so
+PLUGINS += plugin_xen.so
+PLUGINS += plugin_scsi.so
+PLUGINS += plugin_cfg80211.so
+
+PLUGINS := $(addprefix $(OUTPUT),$(PLUGINS))
+PLUGINS_IN := $(PLUGINS:.so=-in.o)
+
+plugins: $(PLUGINS) $(DYNAMIC_LIST_FILE)
+
+__plugin_obj = $(notdir $@)
+ plugin_obj = $(__plugin_obj:-in.o=)
+
+$(PLUGINS_IN): force
+ $(Q)$(MAKE) $(build)=$(plugin_obj)
+
+$(OUTPUT)libtraceevent-dynamic-list: $(PLUGINS)
+ $(QUIET_GEN)$(call do_generate_dynamic_list_file, $(PLUGINS), $@)
+
+$(OUTPUT)%.so: $(OUTPUT)%-in.o
+ $(QUIET_LINK)$(CC) $(CFLAGS) -shared $(LDFLAGS) -nostartfiles -o $@ $^
+
+define update_dir
+ (echo $1 > [email protected]; \
+ if [ -r $@ ] && cmp -s $@ [email protected]; then \
+ else \
+ echo ' UPDATE $@'; \
+ mv -f [email protected] $@; \
+ fi);
+endef
+
+tags: force
+ $(RM) tags
+ find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \
+ --regex-c++='/_PE\(([^,)]*).*/TEP_ERRNO__\1/'
+
+TAGS: force
+ $(RM) TAGS
+ find . -name '*.[ch]' | xargs etags \
+ --regex='/_PE(\([^,)]*\).*/TEP_ERRNO__\1/'
+
+define do_install_mkdir
+ if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
+ fi
+endef
+
+define do_install
+ $(call do_install_mkdir,$2); \
+ $(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2'
+endef
+
+define do_install_plugins
+ for plugin in $1; do \
+ $(call do_install,$$plugin,$(plugin_dir_SQ)); \
+ done
+endef
+
+define do_generate_dynamic_list_file
+ symbol_type=`$(NM) -u -D $1 | awk 'NF>1 {print $$1}' | \
+ xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
+ if [ "$$symbol_type" = "U W" ];then \
+ (echo '{'; \
+ $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
+ echo '};'; \
+ ) > $2; \
+ else \
+ (echo Either missing one of [$1] or bad version of $(NM)) 1>&2;\
+ fi
+endef
+
+install: $(PLUGINS)
+ $(call QUIET_INSTALL, trace_plugins) \
+ $(call do_install_plugins, $(PLUGINS))
+
+clean:
+ $(call QUIET_CLEAN, trace_plugins) \
+ $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd; \
+ $(RM) $(OUTPUT)libtraceevent-dynamic-list \
+ $(RM) TRACEEVENT-CFLAGS tags TAGS;
+
+PHONY += force plugins
+force:
+
+# Declare the contents of the .PHONY variable as phony. We keep that
+# information in a variable so we can use it in if_changed and friends.
+.PHONY: $(PHONY)
diff --git a/tools/lib/traceevent/plugin_cfg80211.c b/tools/lib/traceevent/plugins/plugin_cfg80211.c
index 3d43b56a6c98..3d43b56a6c98 100644
--- a/tools/lib/traceevent/plugin_cfg80211.c
+++ b/tools/lib/traceevent/plugins/plugin_cfg80211.c
diff --git a/tools/lib/traceevent/plugin_function.c b/tools/lib/traceevent/plugins/plugin_function.c
index 7770fcb78e0f..7770fcb78e0f 100644
--- a/tools/lib/traceevent/plugin_function.c
+++ b/tools/lib/traceevent/plugins/plugin_function.c
diff --git a/tools/lib/traceevent/plugin_hrtimer.c b/tools/lib/traceevent/plugins/plugin_hrtimer.c
index bb434e0ed03a..bb434e0ed03a 100644
--- a/tools/lib/traceevent/plugin_hrtimer.c
+++ b/tools/lib/traceevent/plugins/plugin_hrtimer.c
diff --git a/tools/lib/traceevent/plugin_jbd2.c b/tools/lib/traceevent/plugins/plugin_jbd2.c
index 04fc125f38cb..04fc125f38cb 100644
--- a/tools/lib/traceevent/plugin_jbd2.c
+++ b/tools/lib/traceevent/plugins/plugin_jbd2.c
diff --git a/tools/lib/traceevent/plugin_kmem.c b/tools/lib/traceevent/plugins/plugin_kmem.c
index edaec5d962c3..edaec5d962c3 100644
--- a/tools/lib/traceevent/plugin_kmem.c
+++ b/tools/lib/traceevent/plugins/plugin_kmem.c
diff --git a/tools/lib/traceevent/plugin_kvm.c b/tools/lib/traceevent/plugins/plugin_kvm.c
index c8e623065a7e..c8e623065a7e 100644
--- a/tools/lib/traceevent/plugin_kvm.c
+++ b/tools/lib/traceevent/plugins/plugin_kvm.c
diff --git a/tools/lib/traceevent/plugin_mac80211.c b/tools/lib/traceevent/plugins/plugin_mac80211.c
index 884303c26b5c..884303c26b5c 100644
--- a/tools/lib/traceevent/plugin_mac80211.c
+++ b/tools/lib/traceevent/plugins/plugin_mac80211.c
diff --git a/tools/lib/traceevent/plugin_sched_switch.c b/tools/lib/traceevent/plugins/plugin_sched_switch.c
index 957389a0ff7a..957389a0ff7a 100644
--- a/tools/lib/traceevent/plugin_sched_switch.c
+++ b/tools/lib/traceevent/plugins/plugin_sched_switch.c
diff --git a/tools/lib/traceevent/plugin_scsi.c b/tools/lib/traceevent/plugins/plugin_scsi.c
index 5d0387a4b65a..5d0387a4b65a 100644
--- a/tools/lib/traceevent/plugin_scsi.c
+++ b/tools/lib/traceevent/plugins/plugin_scsi.c
diff --git a/tools/lib/traceevent/plugin_xen.c b/tools/lib/traceevent/plugins/plugin_xen.c
index 993b208d0323..993b208d0323 100644
--- a/tools/lib/traceevent/plugin_xen.c
+++ b/tools/lib/traceevent/plugins/plugin_xen.c
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index a269d78456b6..46f7fba2306c 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -924,7 +924,7 @@ ifndef NO_JVMTI
JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
else
ifneq (,$(wildcard /usr/sbin/alternatives))
- JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
+ JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed -e 's%/jre/bin/java.%%g' -e 's%/bin/java.%%g')
endif
endif
ifndef JDIR
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index f9807d8c005b..902c792f326a 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -292,7 +292,7 @@ endif
LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
export LIBTRACEEVENT
-LIBTRACEEVENT_DYNAMIC_LIST = $(TE_PATH)libtraceevent-dynamic-list
+LIBTRACEEVENT_DYNAMIC_LIST = $(TE_PATH)plugins/libtraceevent-dynamic-list
#
# The static build has no dynsym table, so this does not work for
@@ -567,7 +567,7 @@ all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
# Create python binding output directory if not already present
_dummy := $(shell [ -d '$(OUTPUT)python' ] || mkdir -p '$(OUTPUT)python')
-$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST)
+$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST) $(LIBPERF)
$(QUIET_GEN)LDSHARED="$(CC) -pthread -shared" \
CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS)' \
$(PYTHON_WORD) util/setup.py \
@@ -737,7 +737,7 @@ libtraceevent_plugins: FORCE
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) plugins
$(LIBTRACEEVENT_DYNAMIC_LIST): libtraceevent_plugins
- $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent-dynamic-list
+ $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)plugins/libtraceevent-dynamic-list
$(LIBTRACEEVENT)-clean:
$(call QUIET_CLEAN, libtraceevent)
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index c32db09baf0d..ede040cf82ad 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -23,9 +23,10 @@
#include "../../util/event.h"
#include "../../util/evlist.h"
#include "../../util/evsel.h"
+#include "../../util/evsel_config.h"
#include "../../util/pmu.h"
#include "../../util/cs-etm.h"
-#include "../../util/util.h"
+#include <internal/lib.h> // page_size
#include "../../util/session.h"
#include <errno.h>
@@ -416,7 +417,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
if (err)
goto out;
- tracking_evsel = perf_evlist__last(evlist);
+ tracking_evsel = evlist__last(evlist);
perf_evlist__set_tracking_event(evlist, tracking_evsel);
tracking_evsel->core.attr.freq = 0;
@@ -648,7 +649,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
if (priv_size != cs_etm_info_priv_size(itr, session->evlist))
return -EINVAL;
- if (!session->evlist->nr_mmaps)
+ if (!session->evlist->core.nr_mmaps)
return -EINVAL;
/* If the cpu_map is empty all online CPUs are involved */
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
index 4b364692da67..eba6541ec0f1 100644
--- a/tools/perf/arch/arm64/util/arm-spe.c
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -16,7 +16,7 @@
#include "../../util/evsel.h"
#include "../../util/evlist.h"
#include "../../util/session.h"
-#include "../../util/util.h"
+#include <internal/lib.h> // page_size
#include "../../util/pmu.h"
#include "../../util/debug.h"
#include "../../util/auxtrace.h"
@@ -51,7 +51,7 @@ static int arm_spe_info_fill(struct auxtrace_record *itr,
if (priv_size != ARM_SPE_AUXTRACE_PRIV_SIZE)
return -EINVAL;
- if (!session->evlist->nr_mmaps)
+ if (!session->evlist->core.nr_mmaps)
return -EINVAL;
auxtrace_info->type = PERF_AUXTRACE_ARM_SPE;
@@ -129,7 +129,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
if (err)
return err;
- tracking_evsel = perf_evlist__last(evlist);
+ tracking_evsel = evlist__last(evlist);
perf_evlist__set_tracking_event(evlist, tracking_evsel);
tracking_evsel->core.attr.freq = 0;
diff --git a/tools/perf/arch/arm64/util/dwarf-regs.c b/tools/perf/arch/arm64/util/dwarf-regs.c
index b047b882c5b1..917b97d7c5d3 100644
--- a/tools/perf/arch/arm64/util/dwarf-regs.c
+++ b/tools/perf/arch/arm64/util/dwarf-regs.c
@@ -11,7 +11,6 @@
#include <dwarf-regs.h>
#include <linux/ptrace.h> /* for struct user_pt_regs */
#include <linux/stringify.h>
-#include "util.h"
struct pt_regs_dwarfnum {
const char *name;
diff --git a/tools/perf/arch/arm64/util/header.c b/tools/perf/arch/arm64/util/header.c
index e41defaaa2e6..a32e4b72a98f 100644
--- a/tools/perf/arch/arm64/util/header.c
+++ b/tools/perf/arch/arm64/util/header.c
@@ -1,5 +1,7 @@
#include <stdio.h>
#include <stdlib.h>
+#include <perf/cpumap.h>
+#include <internal/cpumap.h>
#include <api/fs/fs.h>
#include "debug.h"
#include "header.h"
@@ -29,7 +31,7 @@ char *get_cpuid_str(struct perf_pmu *pmu)
/* read midr from list of cpus mapped to this pmu */
cpus = perf_cpu_map__get(pmu->cpus);
- for (cpu = 0; cpu < cpus->nr; cpu++) {
+ for (cpu = 0; cpu < perf_cpu_map__nr(cpus); cpu++) {
scnprintf(path, PATH_MAX, "%s/devices/system/cpu/cpu%d"MIDR,
sysfs, cpus->map[cpu]);
diff --git a/tools/perf/arch/arm64/util/unwind-libunwind.c b/tools/perf/arch/arm64/util/unwind-libunwind.c
index 002520d4036b..1495a9523a23 100644
--- a/tools/perf/arch/arm64/util/unwind-libunwind.c
+++ b/tools/perf/arch/arm64/util/unwind-libunwind.c
@@ -5,8 +5,8 @@
#include <libunwind.h>
#include "perf_regs.h"
#include "../../util/unwind.h"
-#include "../../util/debug.h"
#endif
+#include "../../util/debug.h"
int LIBUNWIND__ARCH_REG_ID(int regnum)
{
diff --git a/tools/perf/arch/powerpc/util/dwarf-regs.c b/tools/perf/arch/powerpc/util/dwarf-regs.c
index 4952890b9428..0c4f4caf53ac 100644
--- a/tools/perf/arch/powerpc/util/dwarf-regs.c
+++ b/tools/perf/arch/powerpc/util/dwarf-regs.c
@@ -12,7 +12,6 @@
#include <linux/ptrace.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
-#include "util.h"
struct pt_regs_dwarfnum {
const char *name;
diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c
index 0b242664f5ea..b6b7bc7e31a1 100644
--- a/tools/perf/arch/powerpc/util/header.c
+++ b/tools/perf/arch/powerpc/util/header.c
@@ -6,7 +6,6 @@
#include <string.h>
#include <linux/stringify.h>
#include "header.h"
-#include "util.h"
#define mfspr(rn) ({unsigned long rval; \
asm volatile("mfspr %0," __stringify(rn) \
diff --git a/tools/perf/arch/powerpc/util/kvm-stat.c b/tools/perf/arch/powerpc/util/kvm-stat.c
index f0dbf7b075c8..9cc1c4a9dec4 100644
--- a/tools/perf/arch/powerpc/util/kvm-stat.c
+++ b/tools/perf/arch/powerpc/util/kvm-stat.c
@@ -5,9 +5,11 @@
#include "util/debug.h"
#include "util/evsel.h"
#include "util/evlist.h"
+#include "util/pmu.h"
#include "book3s_hv_exits.h"
#include "book3s_hcalls.h"
+#include <subcmd/parse-options.h>
#define NR_TPS 4
@@ -172,3 +174,46 @@ int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
return ret;
}
+
+/*
+ * Incase of powerpc architecture, pmu registers are programmable
+ * by guest kernel. So monitoring guest via host may not provide
+ * valid samples with default 'cycles' event. It is better to use
+ * 'trace_imc/trace_cycles' event for guest profiling, since it
+ * can track the guest instruction pointer in the trace-record.
+ *
+ * Function to parse the arguments and return appropriate values.
+ */
+int kvm_add_default_arch_event(int *argc, const char **argv)
+{
+ const char **tmp;
+ bool event = false;
+ int i, j = *argc;
+
+ const struct option event_options[] = {
+ OPT_BOOLEAN('e', "event", &event, NULL),
+ OPT_END()
+ };
+
+ tmp = calloc(j + 1, sizeof(char *));
+ if (!tmp)
+ return -EINVAL;
+
+ for (i = 0; i < j; i++)
+ tmp[i] = argv[i];
+
+ parse_options(j, tmp, event_options, NULL, PARSE_OPT_KEEP_UNKNOWN);
+ if (!event) {
+ if (pmu_have_event("trace_imc", "trace_cycles")) {
+ argv[j++] = strdup("-e");
+ argv[j++] = strdup("trace_imc/trace_cycles/");
+ *argc += 2;
+ } else {
+ free(tmp);
+ return -EINVAL;
+ }
+ }
+
+ free(tmp);
+ return 0;
+}
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
index fc9c2f5fcd52..3018a054526a 100644
--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -13,6 +13,7 @@
#include "util/callchain.h"
#include "util/debug.h"
#include "util/dso.h"
+#include "util/event.h" // struct ip_callchain
#include "util/map.h"
#include "util/symbol.h"
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
index 8a4b717e0a53..abb7a12d8f93 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -4,7 +4,6 @@
* Copyright (C) 2015 Naveen N. Rao, IBM Corporation
*/
-#include "debug.h"
#include "dso.h"
#include "symbol.h"
#include "map.h"
diff --git a/tools/perf/arch/s390/Makefile b/tools/perf/arch/s390/Makefile
index cb198787570a..6ac8887be7c9 100644
--- a/tools/perf/arch/s390/Makefile
+++ b/tools/perf/arch/s390/Makefile
@@ -4,6 +4,7 @@ PERF_HAVE_DWARF_REGS := 1
endif
HAVE_KVM_STAT_SUPPORT := 1
PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
+PERF_HAVE_JITDUMP := 1
#
# Syscall table generation for perf
diff --git a/tools/perf/arch/s390/util/auxtrace.c b/tools/perf/arch/s390/util/auxtrace.c
index b0fb70e38960..0db5c58c98e8 100644
--- a/tools/perf/arch/s390/util/auxtrace.c
+++ b/tools/perf/arch/s390/util/auxtrace.c
@@ -1,4 +1,5 @@
#include <stdbool.h>
+#include <stdlib.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/bitops.h>
diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
index c8c86a0c9b79..724efb2d842d 100644
--- a/tools/perf/arch/s390/util/machine.c
+++ b/tools/perf/arch/s390/util/machine.c
@@ -2,7 +2,7 @@
#include <unistd.h>
#include <stdio.h>
#include <string.h>
-#include "util.h"
+#include <internal/lib.h> // page_size
#include "machine.h"
#include "api/fs/fs.h"
#include "debug.h"
diff --git a/tools/perf/arch/x86/tests/intel-cqm.c b/tools/perf/arch/x86/tests/intel-cqm.c
index 3b5cc3373821..3ec562a2aaba 100644
--- a/tools/perf/arch/x86/tests/intel-cqm.c
+++ b/tools/perf/arch/x86/tests/intel-cqm.c
@@ -5,7 +5,7 @@
#include "evlist.h"
#include "evsel.h"
#include "arch-tests.h"
-#include "util.h"
+#include <internal/lib.h> // page_size
#include <signal.h>
#include <sys/mman.h>
@@ -63,9 +63,9 @@ int test__intel_cqm_count_nmi_context(struct test *test __maybe_unused, int subt
goto out;
}
- evsel = perf_evlist__first(evlist);
+ evsel = evlist__first(evlist);
if (!evsel) {
- pr_debug("perf_evlist__first failed\n");
+ pr_debug("evlist__first failed\n");
goto out;
}
diff --git a/tools/perf/arch/x86/tests/perf-time-to-tsc.c b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
index eb3635941c2b..fa947952c16a 100644
--- a/tools/perf/arch/x86/tests/perf-time-to-tsc.c
+++ b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
@@ -15,9 +15,9 @@
#include "evlist.h"
#include "evsel.h"
#include "thread_map.h"
-#include "cpumap.h"
#include "record.h"
#include "tsc.h"
+#include "util/mmap.h"
#include "tests/tests.h"
#include "arch-tests.h"
@@ -66,7 +66,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
union perf_event *event;
u64 test_tsc, comm1_tsc, comm2_tsc;
u64 test_time, comm1_time = 0, comm2_time = 0;
- struct perf_mmap *md;
+ struct mmap *md;
threads = thread_map__new(-1, getpid(), UINT_MAX);
CHECK_NOT_NULL__(threads);
@@ -83,7 +83,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
perf_evlist__config(evlist, &opts, NULL);
- evsel = perf_evlist__first(evlist);
+ evsel = evlist__first(evlist);
evsel->core.attr.comm = 1;
evsel->core.attr.disabled = 1;
@@ -91,9 +91,9 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
CHECK__(evlist__open(evlist));
- CHECK__(perf_evlist__mmap(evlist, UINT_MAX));
+ CHECK__(evlist__mmap(evlist, UINT_MAX));
- pc = evlist->mmap[0].base;
+ pc = evlist->mmap[0].core.base;
ret = perf_read_tsc_conversion(pc, &tc);
if (ret) {
if (ret == -EOPNOTSUPP) {
@@ -115,7 +115,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
evlist__disable(evlist);
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
md = &evlist->mmap[i];
if (perf_mmap__read_init(md) < 0)
continue;
diff --git a/tools/perf/arch/x86/tests/rdpmc.c b/tools/perf/arch/x86/tests/rdpmc.c
index 6e67cee792b1..1ea916656a2d 100644
--- a/tools/perf/arch/x86/tests/rdpmc.c
+++ b/tools/perf/arch/x86/tests/rdpmc.c
@@ -13,7 +13,7 @@
#include "tests/tests.h"
#include "cloexec.h"
#include "event.h"
-#include "util.h"
+#include <internal/lib.h> // page_size
#include "arch-tests.h"
static u64 rdpmc(unsigned int counter)
diff --git a/tools/perf/arch/x86/util/archinsn.c b/tools/perf/arch/x86/util/archinsn.c
index 9876c7a7ed7c..3e6791531ca5 100644
--- a/tools/perf/arch/x86/util/archinsn.c
+++ b/tools/perf/arch/x86/util/archinsn.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "../../../../arch/x86/include/asm/insn.h"
#include "archinsn.h"
+#include "event.h"
#include "machine.h"
#include "thread.h"
#include "symbol.h"
diff --git a/tools/perf/arch/x86/util/event.c b/tools/perf/arch/x86/util/event.c
index a3a0b6884779..d357c625c09f 100644
--- a/tools/perf/arch/x86/util/event.c
+++ b/tools/perf/arch/x86/util/event.c
@@ -3,6 +3,8 @@
#include <linux/string.h>
#include <linux/zalloc.h>
+#include "../../util/event.h"
+#include "../../util/synthetic-events.h"
#include "../../util/machine.h"
#include "../../util/tool.h"
#include "../../util/map.h"
diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
index d263430c045f..f7f68a50a5cd 100644
--- a/tools/perf/arch/x86/util/intel-bts.c
+++ b/tools/perf/arch/x86/util/intel-bts.c
@@ -15,6 +15,7 @@
#include "../../util/event.h"
#include "../../util/evsel.h"
#include "../../util/evlist.h"
+#include "../../util/mmap.h"
#include "../../util/session.h"
#include "../../util/pmu.h"
#include "../../util/debug.h"
@@ -22,7 +23,7 @@
#include "../../util/tsc.h"
#include "../../util/auxtrace.h"
#include "../../util/intel-bts.h"
-#include "../../util/util.h"
+#include <internal/lib.h> // page_size
#define KiB(x) ((x) * 1024)
#define MiB(x) ((x) * 1024 * 1024)
@@ -74,10 +75,10 @@ static int intel_bts_info_fill(struct auxtrace_record *itr,
if (priv_size != INTEL_BTS_AUXTRACE_PRIV_SIZE)
return -EINVAL;
- if (!session->evlist->nr_mmaps)
+ if (!session->evlist->core.nr_mmaps)
return -EINVAL;
- pc = session->evlist->mmap[0].base;
+ pc = session->evlist->mmap[0].core.base;
if (pc) {
err = perf_read_tsc_conversion(pc, &tc);
if (err) {
@@ -230,7 +231,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
if (err)
return err;
- tracking_evsel = perf_evlist__last(evlist);
+ tracking_evsel = evlist__last(evlist);
perf_evlist__set_tracking_event(evlist, tracking_evsel);
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index cb7cf16af79c..d6d26256915f 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -18,6 +18,7 @@
#include "../../util/evlist.h"
#include "../../util/evsel.h"
#include "../../util/cpumap.h"
+#include "../../util/mmap.h"
#include <subcmd/parse-options.h>
#include "../../util/parse-events.h"
#include "../../util/pmu.h"
@@ -26,7 +27,7 @@
#include "../../util/record.h"
#include "../../util/target.h"
#include "../../util/tsc.h"
-#include "../../util/util.h"
+#include <internal/lib.h> // page_size
#include "../../util/intel-pt.h"
#define KiB(x) ((x) * 1024)
@@ -351,10 +352,10 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
filter = intel_pt_find_filter(session->evlist, ptr->intel_pt_pmu);
filter_str_len = filter ? strlen(filter) : 0;
- if (!session->evlist->nr_mmaps)
+ if (!session->evlist->core.nr_mmaps)
return -EINVAL;
- pc = session->evlist->mmap[0].base;
+ pc = session->evlist->mmap[0].core.base;
if (pc) {
err = perf_read_tsc_conversion(pc, &tc);
if (err) {
@@ -416,12 +417,12 @@ static int intel_pt_track_switches(struct evlist *evlist)
return err;
}
- evsel = perf_evlist__last(evlist);
+ evsel = evlist__last(evlist);
perf_evsel__set_sample_bit(evsel, CPU);
perf_evsel__set_sample_bit(evsel, TIME);
- evsel->system_wide = true;
+ evsel->core.system_wide = true;
evsel->no_aux_samples = true;
evsel->immediate = true;
@@ -716,13 +717,13 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
if (err)
return err;
- switch_evsel = perf_evlist__last(evlist);
+ switch_evsel = evlist__last(evlist);
switch_evsel->core.attr.freq = 0;
switch_evsel->core.attr.sample_period = 1;
switch_evsel->core.attr.context_switch = 1;
- switch_evsel->system_wide = true;
+ switch_evsel->core.system_wide = true;
switch_evsel->no_aux_samples = true;
switch_evsel->immediate = true;
@@ -774,7 +775,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
if (err)
return err;
- tracking_evsel = perf_evlist__last(evlist);
+ tracking_evsel = evlist__last(evlist);
perf_evlist__set_tracking_event(evlist, tracking_evsel);
diff --git a/tools/perf/arch/x86/util/machine.c b/tools/perf/arch/x86/util/machine.c
index 1e9ec783b9a1..e17e080e76f4 100644
--- a/tools/perf/arch/x86/util/machine.c
+++ b/tools/perf/arch/x86/util/machine.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/string.h>
+#include <limits.h>
#include <stdlib.h>
-#include "../../util/util.h"
+#include <internal/lib.h> // page_size
#include "../../util/machine.h"
#include "../../util/map.h"
#include "../../util/symbol.h"
diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c
index c5197a15119b..2f55afb14e1f 100644
--- a/tools/perf/arch/x86/util/tsc.c
+++ b/tools/perf/arch/x86/util/tsc.c
@@ -8,6 +8,8 @@
#include <linux/types.h>
#include <asm/barrier.h>
#include "../../../util/debug.h"
+#include "../../../util/event.h"
+#include "../../../util/synthetic-events.h"
#include "../../../util/tsc.h"
int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
diff --git a/tools/perf/arch/x86/util/unwind-libunwind.c b/tools/perf/arch/x86/util/unwind-libunwind.c
index 05920e3edf7a..47357973b55b 100644
--- a/tools/perf/arch/x86/util/unwind-libunwind.c
+++ b/tools/perf/arch/x86/util/unwind-libunwind.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
+#include "../../util/debug.h"
#ifndef REMOTE_UNWIND_LIBUNWIND
#include <libunwind.h>
#include "perf_regs.h"
#include "../../util/unwind.h"
-#include "../../util/debug.h"
#endif
#ifdef HAVE_ARCH_X86_64_SUPPORT
diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c
index d1caa4a0a12a..bb617e568841 100644
--- a/tools/perf/bench/epoll-ctl.c
+++ b/tools/perf/bench/epoll-ctl.c
@@ -21,12 +21,12 @@
#include <sys/resource.h>
#include <sys/epoll.h>
#include <sys/eventfd.h>
+#include <internal/cpumap.h>
#include <perf/cpumap.h>
#include "../util/stat.h"
#include <subcmd/parse-options.h>
#include "bench.h"
-#include "cpumap.h"
#include <err.h>
diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c
index f6b4472847d2..7af694437f4e 100644
--- a/tools/perf/bench/epoll-wait.c
+++ b/tools/perf/bench/epoll-wait.c
@@ -76,12 +76,12 @@
#include <sys/epoll.h>
#include <sys/eventfd.h>
#include <sys/types.h>
+#include <internal/cpumap.h>
#include <perf/cpumap.h>
#include "../util/stat.h"
#include <subcmd/parse-options.h>
#include "bench.h"
-#include "cpumap.h"
#include <err.h>
diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
index 80e138904c66..8ba0c3330a9a 100644
--- a/tools/perf/bench/futex-hash.c
+++ b/tools/perf/bench/futex-hash.c
@@ -20,13 +20,13 @@
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <sys/time.h>
+#include <internal/cpumap.h>
#include <perf/cpumap.h>
#include "../util/stat.h"
#include <subcmd/parse-options.h>
#include "bench.h"
#include "futex.h"
-#include "cpumap.h"
#include <err.h>
diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c
index c5d6d0abbaa9..d0cae8125423 100644
--- a/tools/perf/bench/futex-lock-pi.c
+++ b/tools/perf/bench/futex-lock-pi.c
@@ -14,10 +14,10 @@
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <errno.h>
+#include <internal/cpumap.h>
#include <perf/cpumap.h>
#include "bench.h"
#include "futex.h"
-#include "cpumap.h"
#include <err.h>
#include <stdlib.h>
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
index 75d3418c1a88..a00a6891447a 100644
--- a/tools/perf/bench/futex-requeue.c
+++ b/tools/perf/bench/futex-requeue.c
@@ -20,10 +20,10 @@
#include <linux/kernel.h>
#include <linux/time64.h>
#include <errno.h>
+#include <internal/cpumap.h>
#include <perf/cpumap.h>
#include "bench.h"
#include "futex.h"
-#include "cpumap.h"
#include <err.h>
#include <stdlib.h>
diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c
index 163fe16c275a..a053cf2b7039 100644
--- a/tools/perf/bench/futex-wake-parallel.c
+++ b/tools/perf/bench/futex-wake-parallel.c
@@ -29,7 +29,8 @@ int bench_futex_wake_parallel(int argc __maybe_unused, const char **argv __maybe
#include <linux/time64.h>
#include <errno.h>
#include "futex.h"
-#include "cpumap.h"
+#include <internal/cpumap.h>
+#include <perf/cpumap.h>
#include <err.h>
#include <stdlib.h>
diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
index 77dcdc13618a..df810096abfe 100644
--- a/tools/perf/bench/futex-wake.c
+++ b/tools/perf/bench/futex-wake.c
@@ -20,10 +20,10 @@
#include <linux/kernel.h>
#include <linux/time64.h>
#include <errno.h>
+#include <internal/cpumap.h>
#include <perf/cpumap.h>
#include "bench.h"
#include "futex.h"
-#include "cpumap.h"
#include <err.h>
#include <stdlib.h>
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 62b8ef4bcb1f..5797253b9700 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -9,7 +9,6 @@
/* For the CLR_() macros */
#include <pthread.h>
-#include "../builtin.h"
#include <subcmd/parse-options.h>
#include "../util/cloexec.h"
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index c63eb9a46346..97e4a4fb3362 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -10,9 +10,7 @@
*
*/
-#include "../util/util.h"
#include <subcmd/parse-options.h>
-#include "../builtin.h"
#include "bench.h"
/* Test groups of 20 processes spraying to 20 receivers */
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c
index 35b07f197d48..3c88d1f201f1 100644
--- a/tools/perf/bench/sched-pipe.c
+++ b/tools/perf/bench/sched-pipe.c
@@ -9,9 +9,7 @@
* http://people.redhat.com/mingo/cfs-scheduler/tools/pipe-test-1m.c
* Ported to perf by Hitoshi Mitake <[email protected]>
*/
-#include "../util/util.h"
#include <subcmd/parse-options.h>
-#include "../builtin.h"
#include "bench.h"
#include <unistd.h>
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 4e4d2e76232e..8db8fc9bddef 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -27,6 +27,7 @@
#include "util/sort.h"
#include "util/hist.h"
#include "util/dso.h"
+#include "util/machine.h"
#include "util/map.h"
#include "util/session.h"
#include "util/tool.h"
@@ -39,6 +40,7 @@
#include <dlfcn.h>
#include <errno.h>
#include <linux/bitmap.h>
+#include <linux/err.h>
struct perf_annotate {
struct perf_tool tool;
@@ -583,8 +585,8 @@ int cmd_annotate(int argc, const char **argv)
data.path = input_name;
annotate.session = perf_session__new(&data, false, &annotate.tool);
- if (annotate.session == NULL)
- return -1;
+ if (IS_ERR(annotate.session))
+ return PTR_ERR(annotate.session);
annotate.has_br_stack = perf_header__has_feat(&annotate.session->header,
HEADER_BRANCH_STACK);
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 1a69eb565dc0..39efa51d7fb3 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -28,6 +28,7 @@
#include "util/util.h"
#include "util/probe-file.h"
#include <linux/string.h>
+#include <linux/err.h>
static int build_id_cache__kcore_buildid(const char *proc_dir, char *sbuildid)
{
@@ -422,8 +423,8 @@ int cmd_buildid_cache(int argc, const char **argv)
data.force = force;
session = perf_session__new(&data, false, NULL);
- if (session == NULL)
- return -1;
+ if (IS_ERR(session))
+ return PTR_ERR(session);
}
if (symbol__init(session ? &session->header.env : NULL) < 0)
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index 5a0d8b378cb5..e3ef75583514 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -18,6 +18,7 @@
#include "util/symbol.h"
#include "util/data.h"
#include <errno.h>
+#include <linux/err.h>
static int sysfs__fprintf_build_id(FILE *fp)
{
@@ -65,8 +66,8 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
goto out;
session = perf_session__new(&data, false, &build_id__mark_dso_hit_ops);
- if (session == NULL)
- return -1;
+ if (IS_ERR(session))
+ return PTR_ERR(session);
/*
* We take all buildids when the file contains AUX area tracing data
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index b09b12e0976b..3542b6ab9813 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -13,6 +13,7 @@
#include <errno.h>
#include <inttypes.h>
#include <linux/compiler.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/zalloc.h>
@@ -20,6 +21,7 @@
#include <sys/param.h>
#include "debug.h"
#include "builtin.h"
+#include <perf/cpumap.h>
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
#include "map_symbol.h"
@@ -2780,8 +2782,9 @@ static int perf_c2c__report(int argc, const char **argv)
}
session = perf_session__new(&data, 0, &c2c.tool);
- if (session == NULL) {
- pr_debug("No memory for session\n");
+ if (IS_ERR(session)) {
+ err = PTR_ERR(session);
+ pr_debug("Error creating perf session\n");
goto out;
}
diff --git a/tools/perf/builtin-config.c b/tools/perf/builtin-config.c
index 42d8157e047a..2603015f98be 100644
--- a/tools/perf/builtin-config.c
+++ b/tools/perf/builtin-config.c
@@ -9,7 +9,6 @@
#include "util/cache.h"
#include <subcmd/parse-options.h>
-#include "util/util.h"
#include "util/debug.h"
#include "util/config.h"
#include <linux/string.h>
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 827e4800d862..c37a78677955 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -23,6 +23,7 @@
#include "util/time-utils.h"
#include "util/annotate.h"
#include "util/map.h"
+#include <linux/err.h>
#include <linux/zalloc.h>
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
@@ -1153,9 +1154,9 @@ static int check_file_brstack(void)
data__for_each_file(i, d) {
d->session = perf_session__new(&d->data, false, &pdiff.tool);
- if (!d->session) {
+ if (IS_ERR(d->session)) {
pr_err("Failed to open %s\n", d->data.path);
- return -1;
+ return PTR_ERR(d->session);
}
has_br_stack = perf_header__has_feat(&d->session->header,
@@ -1185,9 +1186,9 @@ static int __cmd_diff(void)
data__for_each_file(i, d) {
d->session = perf_session__new(&d->data, false, &pdiff.tool);
- if (!d->session) {
+ if (IS_ERR(d->session)) {
+ ret = PTR_ERR(d->session);
pr_err("Failed to open %s\n", d->data.path);
- ret = -1;
goto out_delete;
}
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index 238fa3876805..440501994931 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -5,18 +5,18 @@
*/
#include "builtin.h"
-#include "util/util.h"
-
#include <linux/list.h>
#include "perf.h"
#include "util/evlist.h"
#include "util/evsel.h"
+#include "util/evsel_fprintf.h"
#include "util/parse-events.h"
#include <subcmd/parse-options.h>
#include "util/session.h"
#include "util/data.h"
#include "util/debug.h"
+#include <linux/err.h>
static int __cmd_evlist(const char *file_name, struct perf_attr_details *details)
{
@@ -30,8 +30,8 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
bool has_tracepoint = false;
session = perf_session__new(&data, 0, NULL);
- if (session == NULL)
- return -1;
+ if (IS_ERR(session))
+ return PTR_ERR(session);
evlist__for_each_entry(session->evlist, pos) {
perf_evsel__fprintf(pos, details, stdout);
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index c14f40b858bc..372ecb3e2c06 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -21,7 +21,9 @@
#include "util/auxtrace.h"
#include "util/jit.h"
#include "util/symbol.h"
+#include "util/synthetic-events.h"
#include "util/thread.h"
+#include <linux/err.h>
#include <subcmd/parse-options.h>
@@ -834,8 +836,8 @@ int cmd_inject(int argc, const char **argv)
data.path = inject.input_name;
inject.session = perf_session__new(&data, true, &inject.tool);
- if (inject.session == NULL)
- return -1;
+ if (IS_ERR(inject.session))
+ return PTR_ERR(inject.session);
if (zstd_init(&(inject.session->zstd_data), 0) < 0)
pr_warning("Decompression initialization failed.\n");
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index b5682beaad72..1e61e353f579 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -14,6 +14,7 @@
#include "util/tool.h"
#include "util/callchain.h"
#include "util/time-utils.h"
+#include <linux/err.h>
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
@@ -1956,8 +1957,8 @@ int cmd_kmem(int argc, const char **argv)
data.path = input_name;
kmem_session = session = perf_session__new(&data, false, &perf_kmem);
- if (session == NULL)
- return -1;
+ if (IS_ERR(session))
+ return PTR_ERR(session);
ret = -1;
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 0a4fcbe32bf6..2227e2f42c09 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -5,6 +5,7 @@
#include "util/build-id.h"
#include "util/evsel.h"
#include "util/evlist.h"
+#include "util/mmap.h"
#include "util/term.h"
#include "util/symbol.h"
#include "util/thread.h"
@@ -17,9 +18,11 @@
#include "util/debug.h"
#include "util/tool.h"
#include "util/stat.h"
+#include "util/synthetic-events.h"
#include "util/top.h"
#include "util/data.h"
#include "util/ordered-events.h"
+#include "util/kvm-stat.h"
#include "ui/ui.h"
#include <sys/prctl.h>
@@ -31,6 +34,7 @@
#include <sys/stat.h>
#include <fcntl.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/time64.h>
@@ -58,7 +62,6 @@ static const char *get_filename_for_perf_kvm(void)
}
#ifdef HAVE_KVM_STAT_SUPPORT
-#include "util/kvm-stat.h"
void exit_event_get_key(struct evsel *evsel,
struct perf_sample *sample,
@@ -748,7 +751,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
{
struct evlist *evlist = kvm->evlist;
union perf_event *event;
- struct perf_mmap *md;
+ struct mmap *md;
u64 timestamp;
s64 n = 0;
int err;
@@ -799,7 +802,7 @@ static int perf_kvm__mmap_read(struct perf_kvm_stat *kvm)
s64 n, ntotal = 0;
u64 flush_time = ULLONG_MAX, mmap_time;
- for (i = 0; i < kvm->evlist->nr_mmaps; i++) {
+ for (i = 0; i < kvm->evlist->core.nr_mmaps; i++) {
n = perf_kvm__mmap_read_idx(kvm, i, &mmap_time);
if (n < 0)
return -1;
@@ -964,10 +967,10 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm)
goto out;
}
- if (perf_evlist__add_pollfd(kvm->evlist, kvm->timerfd) < 0)
+ if (evlist__add_pollfd(kvm->evlist, kvm->timerfd) < 0)
goto out;
- nr_stdin = perf_evlist__add_pollfd(kvm->evlist, fileno(stdin));
+ nr_stdin = evlist__add_pollfd(kvm->evlist, fileno(stdin));
if (nr_stdin < 0)
goto out;
@@ -978,7 +981,7 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm)
evlist__enable(kvm->evlist);
while (!done) {
- struct fdarray *fda = &kvm->evlist->pollfd;
+ struct fdarray *fda = &kvm->evlist->core.pollfd;
int rc;
rc = perf_kvm__mmap_read(kvm);
@@ -1058,7 +1061,7 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
goto out;
}
- if (perf_evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) {
+ if (evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) {
ui__error("Failed to mmap the events: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
evlist__close(evlist);
@@ -1090,9 +1093,9 @@ static int read_events(struct perf_kvm_stat *kvm)
kvm->tool = eops;
kvm->session = perf_session__new(&file, false, &kvm->tool);
- if (!kvm->session) {
+ if (IS_ERR(kvm->session)) {
pr_err("Initializing perf session failed\n");
- return -1;
+ return PTR_ERR(kvm->session);
}
symbol__init(&kvm->session->header.env);
@@ -1445,8 +1448,8 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
* perf session
*/
kvm->session = perf_session__new(&data, false, &kvm->tool);
- if (kvm->session == NULL) {
- err = -1;
+ if (IS_ERR(kvm->session)) {
+ err = PTR_ERR(kvm->session);
goto out;
}
kvm->session->evlist = kvm->evlist;
@@ -1513,11 +1516,21 @@ perf_stat:
}
#endif /* HAVE_KVM_STAT_SUPPORT */
+int __weak kvm_add_default_arch_event(int *argc __maybe_unused,
+ const char **argv __maybe_unused)
+{
+ return 0;
+}
+
static int __cmd_record(const char *file_name, int argc, const char **argv)
{
- int rec_argc, i = 0, j;
+ int rec_argc, i = 0, j, ret;
const char **rec_argv;
+ ret = kvm_add_default_arch_event(&argc, argv);
+ if (ret)
+ return -EINVAL;
+
rec_argc = argc + 2;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
rec_argv[i++] = strdup("record");
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index e290f6b348d8..08e62ae9d37e 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -81,9 +81,9 @@ int cmd_list(int argc, const char **argv)
long_desc_flag, details_flag);
else if (strcmp(argv[i], "sdt") == 0)
print_sdt_events(NULL, NULL, raw_dump);
- else if (strcmp(argv[i], "metric") == 0)
+ else if (strcmp(argv[i], "metric") == 0 || strcmp(argv[i], "metrics") == 0)
metricgroup__print(true, false, NULL, raw_dump, details_flag);
- else if (strcmp(argv[i], "metricgroup") == 0)
+ else if (strcmp(argv[i], "metricgroup") == 0 || strcmp(argv[i], "metricgroups") == 0)
metricgroup__print(false, true, NULL, raw_dump, details_flag);
else if ((sep = strchr(argv[i], ':')) != NULL) {
int sep_idx;
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 4c2b7f437cdf..474dfd59d7eb 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -30,6 +30,7 @@
#include <linux/hash.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
+#include <linux/err.h>
static struct perf_session *session;
@@ -872,9 +873,9 @@ static int __cmd_report(bool display_info)
};
session = perf_session__new(&data, false, &eops);
- if (!session) {
+ if (IS_ERR(session)) {
pr_err("Initializing perf session failed\n");
- return -1;
+ return PTR_ERR(session);
}
symbol__init(&session->header.env);
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 27d2bde943a8..a13f5817d6fc 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -17,6 +17,7 @@
#include "util/dso.h"
#include "util/map.h"
#include "util/symbol.h"
+#include <linux/err.h>
#define MEM_OPERATION_LOAD 0x1
#define MEM_OPERATION_STORE 0x2
@@ -249,8 +250,8 @@ static int report_raw_events(struct perf_mem *mem)
struct perf_session *session = perf_session__new(&data, false,
&mem->tool);
- if (session == NULL)
- return -1;
+ if (IS_ERR(session))
+ return PTR_ERR(session);
if (mem->cpu_list) {
ret = perf_session__cpu_bitmap(session, mem->cpu_list,
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 1447004eee8a..23332861de6e 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -20,6 +20,7 @@
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
+#include "util/mmap.h"
#include "util/target.h"
#include "util/session.h"
#include "util/tool.h"
@@ -38,6 +39,7 @@
#include "util/trigger.h"
#include "util/perf-hooks.h"
#include "util/cpu-set-sched.h"
+#include "util/synthetic-events.h"
#include "util/time-utils.h"
#include "util/units.h"
#include "util/bpf-event.h"
@@ -53,6 +55,7 @@
#include <signal.h>
#include <sys/mman.h>
#include <sys/wait.h>
+#include <linux/err.h>
#include <linux/string.h>
#include <linux/time64.h>
#include <linux/zalloc.h>
@@ -117,7 +120,7 @@ static bool switch_output_time(struct record *rec)
trigger_is_ready(&switch_output_trigger);
}
-static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
+static int record__write(struct record *rec, struct mmap *map __maybe_unused,
void *bf, size_t size)
{
struct perf_data_file *file = &rec->session->data->file;
@@ -166,7 +169,7 @@ static int record__aio_write(struct aiocb *cblock, int trace_fd,
return rc;
}
-static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
+static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
{
void *rem_buf;
off_t rem_off;
@@ -212,7 +215,7 @@ static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
return rc;
}
-static int record__aio_sync(struct perf_mmap *md, bool sync_all)
+static int record__aio_sync(struct mmap *md, bool sync_all)
{
struct aiocb **aiocb = md->aio.aiocb;
struct aiocb *cblocks = md->aio.cblocks;
@@ -253,12 +256,12 @@ struct record_aio {
size_t size;
};
-static int record__aio_pushfn(struct perf_mmap *map, void *to, void *buf, size_t size)
+static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
{
struct record_aio *aio = to;
/*
- * map->base data pointed by buf is copied into free map->aio.data[] buffer
+ * map->core.base data pointed by buf is copied into free map->aio.data[] buffer
* to release space in the kernel buffer as fast as possible, calling
* perf_mmap__consume() from perf_mmap__push() function.
*
@@ -298,7 +301,7 @@ static int record__aio_pushfn(struct perf_mmap *map, void *to, void *buf, size_t
return size;
}
-static int record__aio_push(struct record *rec, struct perf_mmap *map, off_t *off)
+static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
{
int ret, idx;
int trace_fd = rec->session->data->file.fd;
@@ -349,15 +352,15 @@ static void record__aio_mmap_read_sync(struct record *rec)
{
int i;
struct evlist *evlist = rec->evlist;
- struct perf_mmap *maps = evlist->mmap;
+ struct mmap *maps = evlist->mmap;
if (!record__aio_enabled(rec))
return;
- for (i = 0; i < evlist->nr_mmaps; i++) {
- struct perf_mmap *map = &maps[i];
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ struct mmap *map = &maps[i];
- if (map->base)
+ if (map->core.base)
record__aio_sync(map, true);
}
}
@@ -385,7 +388,7 @@ static int record__aio_parse(const struct option *opt,
#else /* HAVE_AIO_SUPPORT */
static int nr_cblocks_max = 0;
-static int record__aio_push(struct record *rec __maybe_unused, struct perf_mmap *map __maybe_unused,
+static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
off_t *off __maybe_unused)
{
return -1;
@@ -437,7 +440,7 @@ static int record__mmap_flush_parse(const struct option *opt,
if (!opts->mmap_flush)
opts->mmap_flush = MMAP_FLUSH_DEFAULT;
- flush_max = perf_evlist__mmap_size(opts->mmap_pages);
+ flush_max = evlist__mmap_size(opts->mmap_pages);
flush_max /= 4;
if (opts->mmap_flush > flush_max)
opts->mmap_flush = flush_max;
@@ -480,7 +483,7 @@ static int process_synthesized_event(struct perf_tool *tool,
return record__write(rec, NULL, event, event->header.size);
}
-static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
+static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
{
struct record *rec = to;
@@ -525,7 +528,7 @@ static void record__sig_exit(void)
#ifdef HAVE_AUXTRACE_SUPPORT
static int record__process_auxtrace(struct perf_tool *tool,
- struct perf_mmap *map,
+ struct mmap *map,
union perf_event *event, void *data1,
size_t len1, void *data2, size_t len2)
{
@@ -563,7 +566,7 @@ static int record__process_auxtrace(struct perf_tool *tool,
}
static int record__auxtrace_mmap_read(struct record *rec,
- struct perf_mmap *map)
+ struct mmap *map)
{
int ret;
@@ -579,7 +582,7 @@ static int record__auxtrace_mmap_read(struct record *rec,
}
static int record__auxtrace_mmap_read_snapshot(struct record *rec,
- struct perf_mmap *map)
+ struct mmap *map)
{
int ret;
@@ -600,8 +603,8 @@ static int record__auxtrace_read_snapshot_all(struct record *rec)
int i;
int rc = 0;
- for (i = 0; i < rec->evlist->nr_mmaps; i++) {
- struct perf_mmap *map = &rec->evlist->mmap[i];
+ for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
+ struct mmap *map = &rec->evlist->mmap[i];
if (!map->auxtrace_mmap.base)
continue;
@@ -666,7 +669,7 @@ static int record__auxtrace_init(struct record *rec)
static inline
int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
- struct perf_mmap *map __maybe_unused)
+ struct mmap *map __maybe_unused)
{
return 0;
}
@@ -705,7 +708,7 @@ static int record__mmap_evlist(struct record *rec,
if (opts->affinity != PERF_AFFINITY_SYS)
cpu__setup_cpunode_map();
- if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
+ if (evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
opts->auxtrace_snapshot_mode,
opts->nr_cblocks, opts->affinity,
@@ -753,9 +756,9 @@ static int record__open(struct record *rec)
if (perf_evlist__add_dummy(evlist))
return -ENOMEM;
- pos = perf_evlist__first(evlist);
+ pos = evlist__first(evlist);
pos->tracking = 0;
- pos = perf_evlist__last(evlist);
+ pos = evlist__last(evlist);
pos->tracking = 1;
pos->core.attr.enable_on_exec = 1;
}
@@ -786,6 +789,17 @@ try_again:
pos->supported = true;
}
+ if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(evlist)) {
+ pr_warning(
+"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
+"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
+"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
+"file is not found in the buildid cache or in the vmlinux path.\n\n"
+"Samples in kernel modules won't be resolved at all.\n\n"
+"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
+"even with a suitable vmlinux or kallsyms file.\n\n");
+ }
+
if (perf_evlist__apply_filters(evlist, &pos)) {
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
pos->filter, perf_evsel__name(pos), errno,
@@ -888,7 +902,7 @@ static struct perf_event_header finished_round_event = {
.type = PERF_RECORD_FINISHED_ROUND,
};
-static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
+static void record__adjust_affinity(struct record *rec, struct mmap *map)
{
if (rec->opts.affinity != PERF_AFFINITY_SYS &&
!CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
@@ -935,7 +949,7 @@ static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
u64 bytes_written = rec->bytes_written;
int i;
int rc = 0;
- struct perf_mmap *maps;
+ struct mmap *maps;
int trace_fd = rec->data.file.fd;
off_t off = 0;
@@ -952,20 +966,20 @@ static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
if (record__aio_enabled(rec))
off = record__aio_get_pos(trace_fd);
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
u64 flush = 0;
- struct perf_mmap *map = &maps[i];
+ struct mmap *map = &maps[i];
- if (map->base) {
+ if (map->core.base) {
record__adjust_affinity(rec, map);
if (synch) {
- flush = map->flush;
- map->flush = 1;
+ flush = map->core.flush;
+ map->core.flush = 1;
}
if (!record__aio_enabled(rec)) {
if (perf_mmap__push(map, rec, record__pushfn) < 0) {
if (synch)
- map->flush = flush;
+ map->core.flush = flush;
rc = -1;
goto out;
}
@@ -973,13 +987,13 @@ static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
if (record__aio_push(rec, map, &off) < 0) {
record__aio_set_pos(trace_fd, off);
if (synch)
- map->flush = flush;
+ map->core.flush = flush;
rc = -1;
goto out;
}
}
if (synch)
- map->flush = flush;
+ map->core.flush = flush;
}
if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
@@ -1180,23 +1194,14 @@ static void workload_exec_failed_signal(int signo __maybe_unused,
static void snapshot_sig_handler(int sig);
static void alarm_sig_handler(int sig);
-int __weak
-perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
- struct perf_tool *tool __maybe_unused,
- perf_event__handler_t process __maybe_unused,
- struct machine *machine __maybe_unused)
-{
- return 0;
-}
-
static const struct perf_event_mmap_page *
perf_evlist__pick_pc(struct evlist *evlist)
{
if (evlist) {
- if (evlist->mmap && evlist->mmap[0].base)
- return evlist->mmap[0].base;
- if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
- return evlist->overwrite_mmap[0].base;
+ if (evlist->mmap && evlist->mmap[0].core.base)
+ return evlist->mmap[0].core.base;
+ if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base)
+ return evlist->overwrite_mmap[0].core.base;
}
return NULL;
}
@@ -1362,9 +1367,9 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
}
session = perf_session__new(data, false, tool);
- if (session == NULL) {
+ if (IS_ERR(session)) {
pr_err("Perf session creation failed.\n");
- return -1;
+ return PTR_ERR(session);
}
fd = perf_data__fd(data);
@@ -1407,7 +1412,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
err = -1;
goto out_child;
}
- session->header.env.comp_mmap_len = session->evlist->mmap_len;
+ session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
err = bpf__apply_obj_config();
if (err) {
@@ -1610,7 +1615,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
if (hits == rec->samples) {
if (done || draining)
break;
- err = perf_evlist__poll(rec->evlist, -1);
+ err = evlist__poll(rec->evlist, -1);
/*
* Propagate error, only if there's any. Ignore positive
* number of returned events and interrupt error.
@@ -1619,7 +1624,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
err = 0;
waking++;
- if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
+ if (evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
draining = true;
}
@@ -1976,7 +1981,7 @@ out_free:
static void switch_output_size_warn(struct record *rec)
{
- u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
+ u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
struct switch_output *s = &rec->switch_output;
wakeup_size /= 2;
@@ -2371,16 +2376,6 @@ int cmd_record(int argc, const char **argv)
err = -ENOMEM;
- if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
- pr_warning(
-"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
-"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
-"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
-"file is not found in the buildid cache or in the vmlinux path.\n\n"
-"Samples in kernel modules won't be resolved at all.\n\n"
-"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
-"even with a suitable vmlinux or kallsyms file.\n\n");
-
if (rec->no_buildid_cache || rec->no_buildid) {
disable_buildid_cache();
} else if (rec->switch_output.enabled) {
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index b18fab94d38d..aae0e57c60fb 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -48,7 +48,7 @@
#include "util/auxtrace.h"
#include "util/units.h"
#include "util/branch.h"
-#include "util/util.h"
+#include "util/util.h" // perf_tip()
#include "ui/ui.h"
#include "ui/progress.h"
@@ -1269,8 +1269,8 @@ int cmd_report(int argc, const char **argv)
repeat:
session = perf_session__new(&data, false, &report.tool);
- if (session == NULL)
- return -1;
+ if (IS_ERR(session))
+ return PTR_ERR(session);
ret = evswitch__init(&report.evswitch, session->evlist, stderr);
if (ret)
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index ec96d64aec69..5cacc4f84c8d 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -3,8 +3,10 @@
#include "perf.h"
#include "perf-sys.h"
+#include "util/cpumap.h"
#include "util/evlist.h"
#include "util/evsel.h"
+#include "util/evsel_fprintf.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
@@ -23,6 +25,7 @@
#include "util/trace-event.h"
#include "util/debug.h"
+#include "util/event.h"
#include <linux/kernel.h>
#include <linux/log2.h>
@@ -36,7 +39,9 @@
#include <pthread.h>
#include <math.h>
#include <api/fs/fs.h>
+#include <perf/cpumap.h>
#include <linux/time64.h>
+#include <linux/err.h>
#include <linux/ctype.h>
@@ -1794,9 +1799,9 @@ static int perf_sched__read_events(struct perf_sched *sched)
int rc = -1;
session = perf_session__new(&data, false, &sched->tool);
- if (session == NULL) {
- pr_debug("No Memory for session\n");
- return -1;
+ if (IS_ERR(session)) {
+ pr_debug("Error creating perf session");
+ return PTR_ERR(session);
}
symbol__init(&session->header.env);
@@ -2051,7 +2056,7 @@ static void timehist_print_sample(struct perf_sched *sched,
EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
EVSEL__PRINT_CALLCHAIN_ARROW |
EVSEL__PRINT_SKIP_IGNORED,
- &callchain_cursor, stdout);
+ &callchain_cursor, symbol_conf.bt_stop_list, stdout);
out:
printf("\n");
@@ -2986,8 +2991,8 @@ static int perf_sched__timehist(struct perf_sched *sched)
symbol_conf.use_callchain = sched->show_callchain;
session = perf_session__new(&data, false, &sched->tool);
- if (session == NULL)
- return -ENOMEM;
+ if (IS_ERR(session))
+ return PTR_ERR(session);
evlist = session->evlist;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index e079b34201f2..286fc70d7402 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -17,6 +17,7 @@
#include "util/trace-event.h"
#include "util/evlist.h"
#include "util/evsel.h"
+#include "util/evsel_fprintf.h"
#include "util/evswitch.h"
#include "util/sort.h"
#include "util/data.h"
@@ -52,6 +53,7 @@
#include <unistd.h>
#include <subcmd/pager.h>
#include <perf/evlist.h>
+#include <linux/err.h>
#include "util/record.h"
#include "util/util.h"
#include "perf.h"
@@ -1324,7 +1326,8 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample,
} else
printed += fprintf(fp, "\n");
- printed += sample__fprintf_sym(sample, al, 0, print_opts, cursor, fp);
+ printed += sample__fprintf_sym(sample, al, 0, print_opts, cursor,
+ symbol_conf.bt_stop_list, fp);
}
/* print branch_to information */
@@ -1866,7 +1869,8 @@ static void process_event(struct perf_script *script,
cursor = &callchain_cursor;
fputc(cursor ? '\n' : ' ', fp);
- sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor, fp);
+ sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor,
+ symbol_conf.bt_stop_list, fp);
}
if (PRINT_FIELD(IREGS))
@@ -1915,7 +1919,7 @@ static void __process_stat(struct evsel *counter, u64 tstamp)
int cpu, thread;
static int header_printed;
- if (counter->system_wide)
+ if (counter->core.system_wide)
nthreads = 1;
if (!header_printed) {
@@ -2042,7 +2046,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
return err;
evlist = *pevlist;
- evsel = perf_evlist__last(*pevlist);
+ evsel = evlist__last(*pevlist);
if (!evsel->priv) {
if (scr->per_event_dump) {
@@ -3083,8 +3087,8 @@ int find_scripts(char **scripts_array, char **scripts_path_array, int num,
int i = 0;
session = perf_session__new(&data, false, NULL);
- if (!session)
- return -1;
+ if (IS_ERR(session))
+ return PTR_ERR(session);
snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path());
@@ -3754,8 +3758,8 @@ int cmd_script(int argc, const char **argv)
}
session = perf_session__new(&data, false, &script.tool);
- if (session == NULL)
- return -1;
+ if (IS_ERR(session))
+ return PTR_ERR(session);
if (header || header_only) {
script.tool.show_feat_hdr = SHOW_FEAT_HEADER;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 7e17bf9f700a..468fc49420ce 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -61,6 +61,7 @@
#include "util/tool.h"
#include "util/string2.h"
#include "util/metricgroup.h"
+#include "util/synthetic-events.h"
#include "util/target.h"
#include "util/time-utils.h"
#include "util/top.h"
@@ -82,6 +83,7 @@
#include <unistd.h>
#include <sys/time.h>
#include <sys/resource.h>
+#include <linux/err.h>
#include <linux/ctype.h>
#include <perf/evlist.h>
@@ -233,7 +235,7 @@ static int write_stat_round_event(u64 tm, u64 type)
#define WRITE_STAT_ROUND_EVENT(time, interval) \
write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
-#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
+#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
static int
perf_evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread,
@@ -276,7 +278,7 @@ static int read_counter(struct evsel *counter, struct timespec *rs)
if (!counter->supported)
return -ENOENT;
- if (counter->system_wide)
+ if (counter->core.system_wide)
nthreads = 1;
for (thread = 0; thread < nthreads; thread++) {
@@ -540,8 +542,8 @@ try_again:
if (err < 0)
return err;
- err = perf_stat_synthesize_config(&stat_config, NULL, evsel_list,
- process_synthesized_event, is_pipe);
+ err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list,
+ process_synthesized_event, is_pipe);
if (err < 0)
return err;
}
@@ -822,18 +824,6 @@ static int perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
return cpu_map__get_core(map, cpu, NULL);
}
-static int cpu_map__get_max(struct perf_cpu_map *map)
-{
- int i, max = -1;
-
- for (i = 0; i < map->nr; i++) {
- if (map->map[i] > max)
- max = map->map[i];
- }
-
- return max;
-}
-
static int perf_stat__get_aggr(struct perf_stat_config *config,
aggr_get_id_t get_id, struct perf_cpu_map *map, int idx)
{
@@ -928,7 +918,7 @@ static int perf_stat_init_aggr_mode(void)
* taking the highest cpu number to be the size of
* the aggregation translate cpumap.
*/
- nr = cpu_map__get_max(evsel_list->core.cpus);
+ nr = perf_cpu_map__max(evsel_list->core.cpus);
stat_config.cpus_aggr_map = perf_cpu_map__empty_new(nr + 1);
return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
}
@@ -1447,9 +1437,9 @@ static int __cmd_record(int argc, const char **argv)
}
session = perf_session__new(data, false, NULL);
- if (session == NULL) {
- pr_err("Perf session creation failed.\n");
- return -1;
+ if (IS_ERR(session)) {
+ pr_err("Perf session creation failed\n");
+ return PTR_ERR(session);
}
init_features(session);
@@ -1646,8 +1636,8 @@ static int __cmd_report(int argc, const char **argv)
perf_stat.data.mode = PERF_DATA_MODE_READ;
session = perf_session__new(&perf_stat.data, false, &perf_stat.tool);
- if (session == NULL)
- return -1;
+ if (IS_ERR(session))
+ return PTR_ERR(session);
perf_stat.session = session;
stat_config.output = stderr;
@@ -1681,7 +1671,7 @@ static void setup_system_wide(int forks)
struct evsel *counter;
evlist__for_each_entry(evsel_list, counter) {
- if (!counter->system_wide)
+ if (!counter->core.system_wide)
return;
}
@@ -1963,8 +1953,11 @@ int cmd_stat(int argc, const char **argv)
fprintf(output, "[ perf stat: executing run #%d ... ]\n",
run_idx + 1);
+ if (run_idx != 0)
+ perf_evlist__reset_prev_raw_counts(evsel_list);
+
status = run_perf_stat(argc, argv, run_idx);
- if (forever && status != -1) {
+ if (forever && status != -1 && !interval) {
print_counters(NULL, argc, argv);
perf_stat__reset_stats();
}
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index e0e822695a29..9e84fae9b096 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -35,6 +35,7 @@
#include "util/tool.h"
#include "util/data.h"
#include "util/debug.h"
+#include <linux/err.h>
#ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE
FILE *open_memstream(char **ptr, size_t *sizeloc);
@@ -1601,8 +1602,8 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name)
&tchart->tool);
int ret = -EINVAL;
- if (session == NULL)
- return -1;
+ if (IS_ERR(session))
+ return PTR_ERR(session);
symbol__init(&session->header.env);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 726e3f2dd8c7..1f60124eb19b 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -27,11 +27,14 @@
#include "util/dso.h"
#include "util/evlist.h"
#include "util/evsel.h"
+#include "util/evsel_config.h"
#include "util/event.h"
#include "util/machine.h"
#include "util/map.h"
+#include "util/mmap.h"
#include "util/session.h"
#include "util/symbol.h"
+#include "util/synthetic-events.h"
#include "util/top.h"
#include "util/util.h"
#include <linux/rbtree.h>
@@ -76,6 +79,7 @@
#include <linux/stringify.h>
#include <linux/time64.h>
#include <linux/types.h>
+#include <linux/err.h>
#include <linux/ctype.h>
@@ -528,7 +532,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
prompt_integer(&counter, "Enter details event counter");
if (counter >= top->evlist->core.nr_entries) {
- top->sym_evsel = perf_evlist__first(top->evlist);
+ top->sym_evsel = evlist__first(top->evlist);
fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
sleep(1);
break;
@@ -537,7 +541,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
if (top->sym_evsel->idx == counter)
break;
} else
- top->sym_evsel = perf_evlist__first(top->evlist);
+ top->sym_evsel = evlist__first(top->evlist);
break;
case 'f':
prompt_integer(&top->count_filter, "Enter display event count filter");
@@ -861,7 +865,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
{
struct record_opts *opts = &top->record_opts;
struct evlist *evlist = top->evlist;
- struct perf_mmap *md;
+ struct mmap *md;
union perf_event *event;
md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
@@ -901,7 +905,7 @@ static void perf_top__mmap_read(struct perf_top *top)
if (overwrite)
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
- for (i = 0; i < top->evlist->nr_mmaps; i++)
+ for (i = 0; i < top->evlist->core.nr_mmaps; i++)
perf_top__mmap_read_idx(top, i);
if (overwrite) {
@@ -959,7 +963,7 @@ static int perf_top__overwrite_check(struct perf_top *top)
/* has term for current event */
if ((overwrite < 0) && (set >= 0)) {
/* if it's first event, set overwrite */
- if (evsel == perf_evlist__first(evlist))
+ if (evsel == evlist__first(evlist))
overwrite = set;
else
return -1;
@@ -983,7 +987,7 @@ static int perf_top_overwrite_fallback(struct perf_top *top,
return 0;
/* only fall back when first event fails */
- if (evsel != perf_evlist__first(evlist))
+ if (evsel != evlist__first(evlist))
return 0;
evlist__for_each_entry(evlist, counter)
@@ -1040,7 +1044,7 @@ try_again:
}
}
- if (perf_evlist__mmap(evlist, opts->mmap_pages) < 0) {
+ if (evlist__mmap(evlist, opts->mmap_pages) < 0) {
ui__error("Failed to mmap with %d (%s)\n",
errno, str_error_r(errno, msg, sizeof(msg)));
goto out_err;
@@ -1304,7 +1308,7 @@ static int __cmd_top(struct perf_top *top)
}
/* Wait for a minimal set of events before starting the snapshot */
- perf_evlist__poll(top->evlist, 100);
+ evlist__poll(top->evlist, 100);
perf_top__mmap_read(top);
@@ -1314,7 +1318,7 @@ static int __cmd_top(struct perf_top *top)
perf_top__mmap_read(top);
if (opts->overwrite || (hits == top->samples))
- ret = perf_evlist__poll(top->evlist, 100);
+ ret = evlist__poll(top->evlist, 100);
if (resize) {
perf_top__resize(top);
@@ -1641,7 +1645,7 @@ int cmd_top(int argc, const char **argv)
goto out_delete_evlist;
}
- top.sym_evsel = perf_evlist__first(top.evlist);
+ top.sym_evsel = evlist__first(top.evlist);
if (!callchain_param.enabled) {
symbol_conf.cumulate_callchain = false;
@@ -1671,8 +1675,8 @@ int cmd_top(int argc, const char **argv)
}
top.session = perf_session__new(NULL, false, NULL);
- if (top.session == NULL) {
- status = -1;
+ if (IS_ERR(top.session)) {
+ status = PTR_ERR(top.session);
goto out_delete_evlist;
}
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 0f633f0d6be8..bb5130d02155 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -28,8 +28,12 @@
#include "util/dso.h"
#include "util/env.h"
#include "util/event.h"
+#include "util/evsel.h"
+#include "util/evsel_fprintf.h"
+#include "util/synthetic-events.h"
#include "util/evlist.h"
#include "util/evswitch.h"
+#include "util/mmap.h"
#include <subcmd/pager.h>
#include <subcmd/exec-cmd.h>
#include "util/machine.h"
@@ -2074,7 +2078,7 @@ static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sam
EVSEL__PRINT_DSO |
EVSEL__PRINT_UNKNOWN_AS_ADDR;
- return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, trace->output);
+ return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output);
}
static const char *errno_to_name(struct evsel *evsel, int err)
@@ -3408,7 +3412,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (trace->dump.map)
bpf_map__fprintf(trace->dump.map, trace->output);
- err = perf_evlist__mmap(evlist, trace->opts.mmap_pages);
+ err = evlist__mmap(evlist, trace->opts.mmap_pages);
if (err < 0)
goto out_error_mmap;
@@ -3425,7 +3429,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
evlist->core.threads->nr > 1 ||
- perf_evlist__first(evlist)->core.attr.inherit;
+ evlist__first(evlist)->core.attr.inherit;
/*
* Now that we already used evsel->core.attr to ask the kernel to setup the
@@ -3441,9 +3445,9 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
again:
before = trace->nr_events;
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
union perf_event *event;
- struct perf_mmap *md;
+ struct mmap *md;
md = &evlist->mmap[i];
if (perf_mmap__read_init(md) < 0)
@@ -3472,8 +3476,8 @@ again:
if (trace->nr_events == before) {
int timeout = done ? 100 : -1;
- if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
- if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
+ if (!draining && evlist__poll(evlist, timeout) > 0) {
+ if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
draining = true;
goto again;
@@ -3584,8 +3588,8 @@ static int trace__replay(struct trace *trace)
trace->multiple_threads = true;
session = perf_session__new(&data, false, &trace->tool);
- if (session == NULL)
- return -1;
+ if (IS_ERR(session))
+ return PTR_ERR(session);
if (trace->opts.target.pid)
symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
diff --git a/tools/perf/jvmti/Build b/tools/perf/jvmti/Build
index eaeb8cb5379b..1e148bbdf820 100644
--- a/tools/perf/jvmti/Build
+++ b/tools/perf/jvmti/Build
@@ -1,8 +1,17 @@
jvmti-y += libjvmti.o
jvmti-y += jvmti_agent.o
+# For strlcpy
+jvmti-y += libstring.o
+
CFLAGS_jvmti = -fPIC -DPIC -I$(JDIR)/include -I$(JDIR)/include/linux
CFLAGS_REMOVE_jvmti = -Wmissing-declarations
CFLAGS_REMOVE_jvmti += -Wstrict-prototypes
CFLAGS_REMOVE_jvmti += -Wextra
CFLAGS_REMOVE_jvmti += -Wwrite-strings
+
+CFLAGS_libstring.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
+
+$(OUTPUT)jvmti/libstring.o: ../lib/string.c FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,cc_o_c)
diff --git a/tools/perf/lib/Makefile b/tools/perf/lib/Makefile
index a67efb8d9d39..85ccb8c439a4 100644
--- a/tools/perf/lib/Makefile
+++ b/tools/perf/lib/Makefile
@@ -59,7 +59,13 @@ else
CFLAGS := -g -Wall
endif
-INCLUDES = -I$(srctree)/tools/perf/lib/include -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/ -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi
+INCLUDES = \
+-I$(srctree)/tools/perf/lib/include \
+-I$(srctree)/tools/lib/ \
+-I$(srctree)/tools/include \
+-I$(srctree)/tools/arch/$(SRCARCH)/include/ \
+-I$(srctree)/tools/arch/$(SRCARCH)/include/uapi \
+-I$(srctree)/tools/include/uapi
# Append required CFLAGS
override CFLAGS += $(EXTRA_WARNINGS)
@@ -88,13 +94,34 @@ LIBPERF_PC := $(OUTPUT)libperf.pc
LIBPERF_ALL := $(LIBPERF_A) $(OUTPUT)libperf.so*
+LIB_DIR := $(srctree)/tools/lib/api/
+
+ifneq ($(OUTPUT),)
+ifneq ($(subdir),)
+ API_PATH=$(OUTPUT)/../lib/api/
+else
+ API_PATH=$(OUTPUT)
+endif
+else
+ API_PATH=$(LIB_DIR)
+endif
+
+LIBAPI = $(API_PATH)libapi.a
+
+$(LIBAPI): FORCE
+ $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) $(OUTPUT)libapi.a
+
+$(LIBAPI)-clean:
+ $(call QUIET_CLEAN, libapi)
+ $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
+
$(LIBPERF_IN): FORCE
$(Q)$(MAKE) $(build)=libperf
$(LIBPERF_A): $(LIBPERF_IN)
$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBPERF_IN)
-$(LIBPERF_SO): $(LIBPERF_IN)
+$(LIBPERF_SO): $(LIBPERF_IN) $(LIBAPI)
$(QUIET_LINK)$(CC) --shared -Wl,-soname,libperf.so \
-Wl,--version-script=$(VERSION_SCRIPT) $^ -o $@
@ln -sf $(@F) $(OUTPUT)libperf.so
@@ -106,12 +133,12 @@ libs: $(LIBPERF_A) $(LIBPERF_SO) $(LIBPERF_PC)
all: fixdep
$(Q)$(MAKE) libs
-clean:
+clean: $(LIBAPI)-clean
$(call QUIET_CLEAN, libperf) $(RM) $(LIBPERF_A) \
*.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBPERF_VERSION) .*.d .*.cmd LIBPERF-CFLAGS $(LIBPERF_PC)
$(Q)$(MAKE) -C tests clean
-tests:
+tests: libs
$(Q)$(MAKE) -C tests
$(Q)$(MAKE) -C tests run
@@ -146,6 +173,7 @@ install_headers:
$(call do_install,include/perf/threadmap.h,$(prefix)/include/perf,644); \
$(call do_install,include/perf/evlist.h,$(prefix)/include/perf,644); \
$(call do_install,include/perf/evsel.h,$(prefix)/include/perf,644);
+ $(call do_install,include/perf/event.h,$(prefix)/include/perf,644);
install_pkgconfig: $(LIBPERF_PC)
$(call QUIET_INSTALL, $(LIBPERF_PC)) \
diff --git a/tools/perf/lib/core.c b/tools/perf/lib/core.c
index 29d5e3348718..d0b9ae422b9f 100644
--- a/tools/perf/lib/core.c
+++ b/tools/perf/lib/core.c
@@ -4,7 +4,9 @@
#include <stdio.h>
#include <stdarg.h>
+#include <unistd.h>
#include <perf/core.h>
+#include <internal/lib.h>
#include "internal.h"
static int __base_pr(enum libperf_print_level level, const char *format,
@@ -15,11 +17,6 @@ static int __base_pr(enum libperf_print_level level, const char *format,
static libperf_print_fn_t __libperf_pr = __base_pr;
-void libperf_set_print(libperf_print_fn_t fn)
-{
- __libperf_pr = fn;
-}
-
__printf(2, 3)
void libperf_print(enum libperf_print_level level, const char *format, ...)
{
@@ -32,3 +29,9 @@ void libperf_print(enum libperf_print_level level, const char *format, ...)
__libperf_pr(level, format, args);
va_end(args);
}
+
+void libperf_init(libperf_print_fn_t fn)
+{
+ page_size = sysconf(_SC_PAGE_SIZE);
+ __libperf_pr = fn;
+}
diff --git a/tools/perf/lib/cpumap.c b/tools/perf/lib/cpumap.c
index 1f0e6f334237..2ca1fafa620d 100644
--- a/tools/perf/lib/cpumap.c
+++ b/tools/perf/lib/cpumap.c
@@ -260,3 +260,15 @@ int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu)
return -1;
}
+
+int perf_cpu_map__max(struct perf_cpu_map *map)
+{
+ int i, max = -1;
+
+ for (i = 0; i < map->nr; i++) {
+ if (map->map[i] > max)
+ max = map->map[i];
+ }
+
+ return max;
+}
diff --git a/tools/perf/lib/evlist.c b/tools/perf/lib/evlist.c
index f4dc9a208332..d1496fee810c 100644
--- a/tools/perf/lib/evlist.c
+++ b/tools/perf/lib/evlist.c
@@ -1,16 +1,30 @@
// SPDX-License-Identifier: GPL-2.0
#include <perf/evlist.h>
#include <perf/evsel.h>
+#include <linux/bitops.h>
#include <linux/list.h>
+#include <linux/hash.h>
+#include <sys/ioctl.h>
#include <internal/evlist.h>
#include <internal/evsel.h>
+#include <internal/xyarray.h>
#include <linux/zalloc.h>
#include <stdlib.h>
+#include <errno.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <poll.h>
#include <perf/cpumap.h>
#include <perf/threadmap.h>
+#include <api/fd/array.h>
void perf_evlist__init(struct perf_evlist *evlist)
{
+ int i;
+
+ for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
+ INIT_HLIST_HEAD(&evlist->heads[i]);
INIT_LIST_HEAD(&evlist->entries);
evlist->nr_entries = 0;
}
@@ -157,3 +171,113 @@ void perf_evlist__disable(struct perf_evlist *evlist)
perf_evlist__for_each_entry(evlist, evsel)
perf_evsel__disable(evsel);
}
+
+u64 perf_evlist__read_format(struct perf_evlist *evlist)
+{
+ struct perf_evsel *first = perf_evlist__first(evlist);
+
+ return first->attr.read_format;
+}
+
+#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
+
+static void perf_evlist__id_hash(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int cpu, int thread, u64 id)
+{
+ int hash;
+ struct perf_sample_id *sid = SID(evsel, cpu, thread);
+
+ sid->id = id;
+ sid->evsel = evsel;
+ hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
+ hlist_add_head(&sid->node, &evlist->heads[hash]);
+}
+
+void perf_evlist__id_add(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int cpu, int thread, u64 id)
+{
+ perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
+ evsel->id[evsel->ids++] = id;
+}
+
+int perf_evlist__id_add_fd(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int cpu, int thread, int fd)
+{
+ u64 read_data[4] = { 0, };
+ int id_idx = 1; /* The first entry is the counter value */
+ u64 id;
+ int ret;
+
+ ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
+ if (!ret)
+ goto add;
+
+ if (errno != ENOTTY)
+ return -1;
+
+ /* Legacy way to get event id.. All hail to old kernels! */
+
+ /*
+ * This way does not work with group format read, so bail
+ * out in that case.
+ */
+ if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
+ return -1;
+
+ if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
+ read(fd, &read_data, sizeof(read_data)) == -1)
+ return -1;
+
+ if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ ++id_idx;
+ if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ ++id_idx;
+
+ id = read_data[id_idx];
+
+add:
+ perf_evlist__id_add(evlist, evsel, cpu, thread, id);
+ return 0;
+}
+
+int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
+{
+ int nr_cpus = perf_cpu_map__nr(evlist->cpus);
+ int nr_threads = perf_thread_map__nr(evlist->threads);
+ int nfds = 0;
+ struct perf_evsel *evsel;
+
+ perf_evlist__for_each_entry(evlist, evsel) {
+ if (evsel->system_wide)
+ nfds += nr_cpus;
+ else
+ nfds += nr_cpus * nr_threads;
+ }
+
+ if (fdarray__available_entries(&evlist->pollfd) < nfds &&
+ fdarray__grow(&evlist->pollfd, nfds) < 0)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
+ void *ptr, short revent)
+{
+ int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
+
+ if (pos >= 0) {
+ evlist->pollfd.priv[pos].ptr = ptr;
+ fcntl(fd, F_SETFL, O_NONBLOCK);
+ }
+
+ return pos;
+}
+
+int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
+{
+ return fdarray__poll(&evlist->pollfd, timeout);
+}
diff --git a/tools/perf/lib/evsel.c b/tools/perf/lib/evsel.c
index 24abc80dd767..a8cb582e2721 100644
--- a/tools/perf/lib/evsel.c
+++ b/tools/perf/lib/evsel.c
@@ -230,3 +230,33 @@ struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
{
return &evsel->attr;
}
+
+int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ if (ncpus == 0 || nthreads == 0)
+ return 0;
+
+ if (evsel->system_wide)
+ nthreads = 1;
+
+ evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
+ if (evsel->sample_id == NULL)
+ return -ENOMEM;
+
+ evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
+ if (evsel->id == NULL) {
+ xyarray__delete(evsel->sample_id);
+ evsel->sample_id = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void perf_evsel__free_id(struct perf_evsel *evsel)
+{
+ xyarray__delete(evsel->sample_id);
+ evsel->sample_id = NULL;
+ zfree(&evsel->id);
+ evsel->ids = 0;
+}
diff --git a/tools/perf/lib/include/internal/evlist.h b/tools/perf/lib/include/internal/evlist.h
index 448891f06e3e..9f440ab12b76 100644
--- a/tools/perf/lib/include/internal/evlist.h
+++ b/tools/perf/lib/include/internal/evlist.h
@@ -3,6 +3,11 @@
#define __LIBPERF_INTERNAL_EVLIST_H
#include <linux/list.h>
+#include <api/fd/array.h>
+#include <internal/evsel.h>
+
+#define PERF_EVLIST__HLIST_BITS 8
+#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
struct perf_cpu_map;
struct perf_thread_map;
@@ -13,8 +18,16 @@ struct perf_evlist {
bool has_user_cpus;
struct perf_cpu_map *cpus;
struct perf_thread_map *threads;
+ int nr_mmaps;
+ size_t mmap_len;
+ struct fdarray pollfd;
+ struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
};
+int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
+int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
+ void *ptr, short revent);
+
/**
* __perf_evlist__for_each_entry - iterate thru all the evsels
* @list: list_head instance to iterate
@@ -47,4 +60,24 @@ struct perf_evlist {
#define perf_evlist__for_each_entry_reverse(evlist, evsel) \
__perf_evlist__for_each_entry_reverse(&(evlist)->entries, evsel)
+static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
+{
+ return list_entry(evlist->entries.next, struct perf_evsel, node);
+}
+
+static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
+{
+ return list_entry(evlist->entries.prev, struct perf_evsel, node);
+}
+
+u64 perf_evlist__read_format(struct perf_evlist *evlist);
+
+void perf_evlist__id_add(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int cpu, int thread, u64 id);
+
+int perf_evlist__id_add_fd(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int cpu, int thread, int fd);
+
#endif /* __LIBPERF_INTERNAL_EVLIST_H */
diff --git a/tools/perf/lib/include/internal/evsel.h b/tools/perf/lib/include/internal/evsel.h
index 8b854d1c9b45..a69b8299c36f 100644
--- a/tools/perf/lib/include/internal/evsel.h
+++ b/tools/perf/lib/include/internal/evsel.h
@@ -4,9 +4,35 @@
#include <linux/types.h>
#include <linux/perf_event.h>
+#include <stdbool.h>
+#include <sys/types.h>
struct perf_cpu_map;
struct perf_thread_map;
+struct xyarray;
+
+/*
+ * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
+ * more than one entry in the evlist.
+ */
+struct perf_sample_id {
+ struct hlist_node node;
+ u64 id;
+ struct perf_evsel *evsel;
+ /*
+ * 'idx' will be used for AUX area sampling. A sample will have AUX area
+ * data that will be queued for decoding, where there are separate
+ * queues for each CPU (per-cpu tracing) or task (per-thread tracing).
+ * The sample ID can be used to lookup 'idx' which is effectively the
+ * queue number.
+ */
+ int idx;
+ int cpu;
+ pid_t tid;
+
+ /* Holds total ID period value for PERF_SAMPLE_READ processing. */
+ u64 period;
+};
struct perf_evsel {
struct list_head node;
@@ -15,9 +41,13 @@ struct perf_evsel {
struct perf_cpu_map *own_cpus;
struct perf_thread_map *threads;
struct xyarray *fd;
+ struct xyarray *sample_id;
+ u64 *id;
+ u32 ids;
/* parse modifier helper */
int nr_members;
+ bool system_wide;
};
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
@@ -26,4 +56,7 @@ void perf_evsel__free_fd(struct perf_evsel *evsel);
int perf_evsel__read_size(struct perf_evsel *evsel);
int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter);
+int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
+void perf_evsel__free_id(struct perf_evsel *evsel);
+
#endif /* __LIBPERF_INTERNAL_EVSEL_H */
diff --git a/tools/perf/lib/include/internal/lib.h b/tools/perf/lib/include/internal/lib.h
index 0b56f1201dc9..5175d491b2d4 100644
--- a/tools/perf/lib/include/internal/lib.h
+++ b/tools/perf/lib/include/internal/lib.h
@@ -2,7 +2,9 @@
#ifndef __LIBPERF_INTERNAL_LIB_H
#define __LIBPERF_INTERNAL_LIB_H
-#include <unistd.h>
+#include <sys/types.h>
+
+extern unsigned int page_size;
ssize_t readn(int fd, void *buf, size_t n);
ssize_t writen(int fd, const void *buf, size_t n);
diff --git a/tools/perf/lib/include/internal/mmap.h b/tools/perf/lib/include/internal/mmap.h
new file mode 100644
index 000000000000..ba1e519c15b9
--- /dev/null
+++ b/tools/perf/lib/include/internal/mmap.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_INTERNAL_MMAP_H
+#define __LIBPERF_INTERNAL_MMAP_H
+
+#include <linux/compiler.h>
+#include <linux/refcount.h>
+#include <linux/types.h>
+#include <stdbool.h>
+
+/* perf sample has 16 bits size limit */
+#define PERF_SAMPLE_MAX_SIZE (1 << 16)
+
+/**
+ * struct perf_mmap - perf's ring buffer mmap details
+ *
+ * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
+ */
+struct perf_mmap {
+ void *base;
+ int mask;
+ int fd;
+ int cpu;
+ refcount_t refcnt;
+ u64 prev;
+ u64 start;
+ u64 end;
+ bool overwrite;
+ u64 flush;
+ char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
+};
+
+#endif /* __LIBPERF_INTERNAL_MMAP_H */
diff --git a/tools/perf/lib/include/perf/core.h b/tools/perf/lib/include/perf/core.h
index c341a7b2c874..cfd70e720c1c 100644
--- a/tools/perf/lib/include/perf/core.h
+++ b/tools/perf/lib/include/perf/core.h
@@ -17,6 +17,6 @@ enum libperf_print_level {
typedef int (*libperf_print_fn_t)(enum libperf_print_level level,
const char *, va_list ap);
-LIBPERF_API void libperf_set_print(libperf_print_fn_t fn);
+LIBPERF_API void libperf_init(libperf_print_fn_t fn);
#endif /* __LIBPERF_CORE_H */
diff --git a/tools/perf/lib/include/perf/cpumap.h b/tools/perf/lib/include/perf/cpumap.h
index 8aa995c59498..ac9aa497f84a 100644
--- a/tools/perf/lib/include/perf/cpumap.h
+++ b/tools/perf/lib/include/perf/cpumap.h
@@ -16,6 +16,7 @@ LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
LIBPERF_API int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map);
+LIBPERF_API int perf_cpu_map__max(struct perf_cpu_map *map);
#define perf_cpu_map__for_each_cpu(cpu, idx, cpus) \
for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx); \
diff --git a/tools/perf/lib/include/perf/evlist.h b/tools/perf/lib/include/perf/evlist.h
index 38365f8f3fba..8a2ce0757ab2 100644
--- a/tools/perf/lib/include/perf/evlist.h
+++ b/tools/perf/lib/include/perf/evlist.h
@@ -31,5 +31,6 @@ LIBPERF_API void perf_evlist__disable(struct perf_evlist *evlist);
LIBPERF_API void perf_evlist__set_maps(struct perf_evlist *evlist,
struct perf_cpu_map *cpus,
struct perf_thread_map *threads);
+LIBPERF_API int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
#endif /* __LIBPERF_EVLIST_H */
diff --git a/tools/perf/lib/lib.c b/tools/perf/lib/lib.c
index 2a81819c3b8c..18658931fc71 100644
--- a/tools/perf/lib/lib.c
+++ b/tools/perf/lib/lib.c
@@ -5,6 +5,8 @@
#include <linux/kernel.h>
#include <internal/lib.h>
+unsigned int page_size;
+
static ssize_t ion(bool is_read, int fd, void *buf, size_t n)
{
void *buf_start = buf;
diff --git a/tools/perf/lib/libperf.map b/tools/perf/lib/libperf.map
index dc4d66363bc4..ab8dbde1136c 100644
--- a/tools/perf/lib/libperf.map
+++ b/tools/perf/lib/libperf.map
@@ -1,6 +1,6 @@
LIBPERF_0.0.1 {
global:
- libperf_set_print;
+ libperf_init;
perf_cpu_map__dummy_new;
perf_cpu_map__get;
perf_cpu_map__put;
@@ -9,6 +9,7 @@ LIBPERF_0.0.1 {
perf_cpu_map__nr;
perf_cpu_map__cpu;
perf_cpu_map__empty;
+ perf_cpu_map__max;
perf_thread_map__new_dummy;
perf_thread_map__set_pid;
perf_thread_map__comm;
@@ -38,6 +39,7 @@ LIBPERF_0.0.1 {
perf_evlist__remove;
perf_evlist__next;
perf_evlist__set_maps;
+ perf_evlist__poll;
local:
*;
};
diff --git a/tools/perf/lib/tests/test-cpumap.c b/tools/perf/lib/tests/test-cpumap.c
index 76a43cfb83a1..aa34c20df07e 100644
--- a/tools/perf/lib/tests/test-cpumap.c
+++ b/tools/perf/lib/tests/test-cpumap.c
@@ -1,13 +1,23 @@
// SPDX-License-Identifier: GPL-2.0
+#include <stdarg.h>
+#include <stdio.h>
#include <perf/cpumap.h>
#include <internal/tests.h>
+static int libperf_print(enum libperf_print_level level,
+ const char *fmt, va_list ap)
+{
+ return vfprintf(stderr, fmt, ap);
+}
+
int main(int argc, char **argv)
{
struct perf_cpu_map *cpus;
__T_START;
+ libperf_init(libperf_print);
+
cpus = perf_cpu_map__dummy_new();
if (!cpus)
return -1;
diff --git a/tools/perf/lib/tests/test-evlist.c b/tools/perf/lib/tests/test-evlist.c
index 4e1407f20ffd..e6b2ab2e2bde 100644
--- a/tools/perf/lib/tests/test-evlist.c
+++ b/tools/perf/lib/tests/test-evlist.c
@@ -1,4 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <stdarg.h>
#include <linux/perf_event.h>
#include <perf/cpumap.h>
#include <perf/threadmap.h>
@@ -6,6 +8,12 @@
#include <perf/evsel.h>
#include <internal/tests.h>
+static int libperf_print(enum libperf_print_level level,
+ const char *fmt, va_list ap)
+{
+ return vfprintf(stderr, fmt, ap);
+}
+
static int test_stat_cpu(void)
{
struct perf_cpu_map *cpus;
@@ -177,6 +185,8 @@ int main(int argc, char **argv)
{
__T_START;
+ libperf_init(libperf_print);
+
test_stat_cpu();
test_stat_thread();
test_stat_thread_enable();
diff --git a/tools/perf/lib/tests/test-evsel.c b/tools/perf/lib/tests/test-evsel.c
index 2c648fe5617e..1b6c4285ac2b 100644
--- a/tools/perf/lib/tests/test-evsel.c
+++ b/tools/perf/lib/tests/test-evsel.c
@@ -1,10 +1,18 @@
// SPDX-License-Identifier: GPL-2.0
+#include <stdarg.h>
+#include <stdio.h>
#include <linux/perf_event.h>
#include <perf/cpumap.h>
#include <perf/threadmap.h>
#include <perf/evsel.h>
#include <internal/tests.h>
+static int libperf_print(enum libperf_print_level level,
+ const char *fmt, va_list ap)
+{
+ return vfprintf(stderr, fmt, ap);
+}
+
static int test_stat_cpu(void)
{
struct perf_cpu_map *cpus;
@@ -116,6 +124,8 @@ int main(int argc, char **argv)
{
__T_START;
+ libperf_init(libperf_print);
+
test_stat_cpu();
test_stat_thread();
test_stat_thread_enable();
diff --git a/tools/perf/lib/tests/test-threadmap.c b/tools/perf/lib/tests/test-threadmap.c
index 10a4f4cbbdd5..8c5f47247d9e 100644
--- a/tools/perf/lib/tests/test-threadmap.c
+++ b/tools/perf/lib/tests/test-threadmap.c
@@ -1,13 +1,23 @@
// SPDX-License-Identifier: GPL-2.0
+#include <stdarg.h>
+#include <stdio.h>
#include <perf/threadmap.h>
#include <internal/tests.h>
+static int libperf_print(enum libperf_print_level level,
+ const char *fmt, va_list ap)
+{
+ return vfprintf(stderr, fmt, ap);
+}
+
int main(int argc, char **argv)
{
struct perf_thread_map *threads;
__T_START;
+ libperf_init(libperf_print);
+
threads = perf_thread_map__new_dummy();
if (!threads)
return -1;
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 1193b923e801..27f94b0bb874 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -12,6 +12,7 @@
#include "util/build-id.h"
#include "util/cache.h"
#include "util/env.h"
+#include <internal/lib.h> // page_size
#include <subcmd/exec-cmd.h>
#include "util/config.h"
#include <subcmd/run-command.h>
@@ -20,11 +21,12 @@
#include "util/bpf-loader.h"
#include "util/debug.h"
#include "util/event.h"
-#include "util/util.h"
+#include "util/util.h" // usage()
#include "ui/ui.h"
#include "perf-sys.h"
#include <api/fs/fs.h>
#include <api/fs/tracing_path.h>
+#include <perf/core.h>
#include <errno.h>
#include <pthread.h>
#include <signal.h>
@@ -428,6 +430,12 @@ void pthread__unblock_sigwinch(void)
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
}
+static int libperf_print(enum libperf_print_level level,
+ const char *fmt, va_list ap)
+{
+ return eprintf(level, verbose, fmt, ap);
+}
+
int main(int argc, const char **argv)
{
int err;
@@ -438,8 +446,7 @@ int main(int argc, const char **argv)
exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT);
pager_init(PERF_PAGER_ENVIRONMENT);
- /* The page_size is placed in util object. */
- page_size = sysconf(_SC_PAGE_SIZE);
+ libperf_init(libperf_print);
cmd = extract_argv0_path(argv[0]);
if (!cmd)
diff --git a/tools/perf/pmu-events/README b/tools/perf/pmu-events/README
index e62b09b6a844..de7efa2cebd1 100644
--- a/tools/perf/pmu-events/README
+++ b/tools/perf/pmu-events/README
@@ -30,9 +30,9 @@ the topic. Eg: "Floating-point.json".
All the topic JSON files for a CPU model/family should be in a separate
sub directory. Thus for the Silvermont X86 CPU:
- $ ls tools/perf/pmu-events/arch/x86/Silvermont_core
- Cache.json Memory.json Virtual-Memory.json
- Frontend.json Pipeline.json
+ $ ls tools/perf/pmu-events/arch/x86/silvermont
+ cache.json memory.json virtual-memory.json
+ frontend.json pipeline.json
The JSONs folder for a CPU model/family may be placed in the root arch
folder, or may be placed in a vendor sub-folder under the arch folder
@@ -94,7 +94,7 @@ users to specify events by their name:
where 'pm_1plus_ppc_cmpl' is a Power8 PMU event.
-However some errors in processing may cause the perf build to fail.
+However some errors in processing may cause the alias build to fail.
Mapfile format
===============
@@ -119,7 +119,7 @@ where:
Header line
The header line is the first line in the file, which is
- always _IGNORED_. It can empty.
+ always _IGNORED_. It can be empty.
CPUID:
CPUID is an arch-specific char string, that can be used
@@ -138,15 +138,15 @@ where:
files, relative to the directory containing the mapfile.csv
Type:
- indicates whether the events or "core" or "uncore" events.
+ indicates whether the events are "core" or "uncore" events.
Eg:
- $ grep Silvermont tools/perf/pmu-events/arch/x86/mapfile.csv
- GenuineIntel-6-37,V13,Silvermont_core,core
- GenuineIntel-6-4D,V13,Silvermont_core,core
- GenuineIntel-6-4C,V13,Silvermont_core,core
+ $ grep silvermont tools/perf/pmu-events/arch/x86/mapfile.csv
+ GenuineIntel-6-37,v13,silvermont,core
+ GenuineIntel-6-4D,v13,silvermont,core
+ GenuineIntel-6-4C,v13,silvermont,core
i.e the three CPU models use the JSON files (i.e PMU events) listed
- in the directory 'tools/perf/pmu-events/arch/x86/Silvermont_core'.
+ in the directory 'tools/perf/pmu-events/arch/x86/silvermont'.
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/branch.json
new file mode 100644
index 000000000000..b5e5d055c70d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/branch.json
@@ -0,0 +1,14 @@
+[
+ {
+ "PublicDescription": "Mispredicted or not predicted branch speculatively executed. This event counts any predictable branch instruction which is mispredicted either due to dynamic misprediction or because the MMU is off and the branches are statically predicted not taken.",
+ "EventCode": "0x10",
+ "EventName": "BR_MIS_PRED",
+ "BriefDescription": "Mispredicted or not predicted branch speculatively executed."
+ },
+ {
+ "PublicDescription": "Predictable branch speculatively executed. This event counts all predictable branches.",
+ "EventCode": "0x12",
+ "EventName": "BR_PRED",
+ "BriefDescription": "Predictable branch speculatively executed."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/bus.json
new file mode 100644
index 000000000000..fce7309ae624
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/bus.json
@@ -0,0 +1,24 @@
+[
+ {
+ "EventCode": "0x11",
+ "EventName": "CPU_CYCLES",
+ "BriefDescription": "The number of core clock cycles."
+ },
+ {
+ "PublicDescription": "Bus access. This event counts for every beat of data transferred over the data channels between the core and the SCU. If both read and write data beats are transferred on a given cycle, this event is counted twice on that cycle. This event counts the sum of BUS_ACCESS_RD and BUS_ACCESS_WR.",
+ "EventCode": "0x19",
+ "EventName": "BUS_ACCESS",
+ "BriefDescription": "Bus access."
+ },
+ {
+ "EventCode": "0x1D",
+ "EventName": "BUS_CYCLES",
+ "BriefDescription": "Bus cycles. This event duplicates CPU_CYCLES."
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/cache.json
new file mode 100644
index 000000000000..24594081c199
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/cache.json
@@ -0,0 +1,207 @@
+[
+ {
+ "PublicDescription": "L1 instruction cache refill. This event counts any instruction fetch which misses in the cache.",
+ "EventCode": "0x01",
+ "EventName": "L1I_CACHE_REFILL",
+ "BriefDescription": "L1 instruction cache refill"
+ },
+ {
+ "PublicDescription": "L1 instruction TLB refill. This event counts any refill of the instruction L1 TLB from the L2 TLB. This includes refills that result in a translation fault.",
+ "EventCode": "0x02",
+ "EventName": "L1I_TLB_REFILL",
+ "BriefDescription": "L1 instruction TLB refill"
+ },
+ {
+ "PublicDescription": "L1 data cache refill. This event counts any load or store operation or page table walk access which causes data to be read from outside the L1, including accesses which do not allocate into L1.",
+ "EventCode": "0x03",
+ "EventName": "L1D_CACHE_REFILL",
+ "BriefDescription": "L1 data cache refill"
+ },
+ {
+ "PublicDescription": "L1 data cache access. This event counts any load or store operation or page table walk access which looks up in the L1 data cache. In particular, any access which could count the L1D_CACHE_REFILL event causes this event to count.",
+ "EventCode": "0x04",
+ "EventName": "L1D_CACHE",
+ "BriefDescription": "L1 data cache access"
+ },
+ {
+ "PublicDescription": "L1 data TLB refill. This event counts any refill of the data L1 TLB from the L2 TLB. This includes refills that result in a translation fault.",
+ "EventCode": "0x05",
+ "EventName": "L1D_TLB_REFILL",
+ "BriefDescription": "L1 data TLB refill"
+ },
+ {
+ "PublicDescription": "Level 1 instruction cache access or Level 0 Macro-op cache access. This event counts any instruction fetch which accesses the L1 instruction cache or L0 Macro-op cache.",
+ "EventCode": "0x14",
+ "EventName": "L1I_CACHE",
+ "BriefDescription": "L1 instruction cache access"
+ },
+ {
+ "PublicDescription": "L1 data cache Write-Back. This event counts any write-back of data from the L1 data cache to L2 or L3. This counts both victim line evictions and snoops, including cache maintenance operations.",
+ "EventCode": "0x15",
+ "EventName": "L1D_CACHE_WB",
+ "BriefDescription": "L1 data cache Write-Back"
+ },
+ {
+ "PublicDescription": "L2 data cache access. This event counts any transaction from L1 which looks up in the L2 cache, and any write-back from the L1 to the L2. Snoops from outside the core and cache maintenance operations are not counted.",
+ "EventCode": "0x16",
+ "EventName": "L2D_CACHE",
+ "BriefDescription": "L2 data cache access"
+ },
+ {
+ "PublicDescription": "L2 data cache refill. This event counts any cacheable transaction from L1 which causes data to be read from outside the core. L2 refills caused by stashes into L2 should not be counted",
+ "EventCode": "0x17",
+ "EventName": "L2D_CACHE_REFILL",
+ "BriefDescription": "L2 data cache refill"
+ },
+ {
+ "PublicDescription": "L2 data cache write-back. This event counts any write-back of data from the L2 cache to outside the core. This includes snoops to the L2 which return data, regardless of whether they cause an invalidation. Invalidations from the L2 which do not write data outside of the core and snoops which return data from the L1 are not counted",
+ "EventCode": "0x18",
+ "EventName": "L2D_CACHE_WB",
+ "BriefDescription": "L2 data cache write-back"
+ },
+ {
+ "PublicDescription": "L2 data cache allocation without refill. This event counts any full cache line write into the L2 cache which does not cause a linefill, including write-backs from L1 to L2 and full-line writes which do not allocate into L1.",
+ "EventCode": "0x20",
+ "EventName": "L2D_CACHE_ALLOCATE",
+ "BriefDescription": "L2 data cache allocation without refill"
+ },
+ {
+ "PublicDescription": "Level 1 data TLB access. This event counts any load or store operation which accesses the data L1 TLB. If both a load and a store are executed on a cycle, this event counts twice. This event counts regardless of whether the MMU is enabled.",
+ "EventCode": "0x25",
+ "EventName": "L1D_TLB",
+ "BriefDescription": "Level 1 data TLB access."
+ },
+ {
+ "PublicDescription": "Level 1 instruction TLB access. This event counts any instruction fetch which accesses the instruction L1 TLB.This event counts regardless of whether the MMU is enabled.",
+ "EventCode": "0x26",
+ "EventName": "L1I_TLB",
+ "BriefDescription": "Level 1 instruction TLB access"
+ },
+ {
+ "PublicDescription": "This event counts any full cache line write into the L3 cache which does not cause a linefill, including write-backs from L2 to L3 and full-line writes which do not allocate into L2",
+ "EventCode": "0x29",
+ "EventName": "L3D_CACHE_ALLOCATE",
+ "BriefDescription": "Allocation without refill"
+ },
+ {
+ "PublicDescription": "Attributable Level 3 unified cache refill. This event counts for any cacheable read transaction returning datafrom the SCU for which the data source was outside the cluster. Transactions such as ReadUnique are counted here as 'read' transactions, even though they can be generated by store instructions.",
+ "EventCode": "0x2A",
+ "EventName": "L3D_CACHE_REFILL",
+ "BriefDescription": "Attributable Level 3 unified cache refill."
+ },
+ {
+ "PublicDescription": "Attributable Level 3 unified cache access. This event counts for any cacheable read transaction returning datafrom the SCU, or for any cacheable write to the SCU.",
+ "EventCode": "0x2B",
+ "EventName": "L3D_CACHE",
+ "BriefDescription": "Attributable Level 3 unified cache access."
+ },
+ {
+ "PublicDescription": "Attributable L2 data or unified TLB refill. This event counts on anyrefill of the L2 TLB, caused by either an instruction or data access.This event does not count if the MMU is disabled.",
+ "EventCode": "0x2D",
+ "EventName": "L2D_TLB_REFILL",
+ "BriefDescription": "Attributable L2 data or unified TLB refill"
+ },
+ {
+ "PublicDescription": "Attributable L2 data or unified TLB access. This event counts on any access to the L2 TLB (caused by a refill of any of the L1 TLBs). This event does not count if the MMU is disabled.",
+ "EventCode": "0x2F",
+ "EventName": "L2D_TLB",
+ "BriefDescription": "Attributable L2 data or unified TLB access"
+ },
+ {
+ "PublicDescription": "Access to data TLB that caused a page table walk. This event counts on any data access which causes L2D_TLB_REFILL to count.",
+ "EventCode": "0x34",
+ "EventName": "DTLB_WALK",
+ "BriefDescription": "Access to data TLB that caused a page table walk."
+ },
+ {
+ "PublicDescription": "Access to instruction TLB that caused a page table walk. This event counts on any instruction access which causes L2D_TLB_REFILL to count.",
+ "EventCode": "0x35",
+ "EventName": "ITLB_WALK",
+ "BriefDescription": "Access to instruction TLB that caused a page table walk."
+ },
+ {
+ "EventCode": "0x36",
+ "EventName": "LL_CACHE_RD",
+ "BriefDescription": "Last level cache access, read"
+ },
+ {
+ "EventCode": "0x37",
+ "EventName": "LL_CACHE_MISS_RD",
+ "BriefDescription": "Last level cache miss, read"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/exception.json
new file mode 100644
index 000000000000..98d29c862320
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/exception.json
@@ -0,0 +1,52 @@
+[
+ {
+ "EventCode": "0x09",
+ "EventName": "EXC_TAKEN",
+ "BriefDescription": "Exception taken."
+ },
+ {
+ "PublicDescription": "Local memory error. This event counts any correctable or uncorrectable memory error (ECC or parity) in the protected core RAMs",
+ "EventCode": "0x1A",
+ "EventName": "MEMORY_ERROR",
+ "BriefDescription": "Local memory error."
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/instruction.json
new file mode 100644
index 000000000000..c153ac706d8d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/instruction.json
@@ -0,0 +1,108 @@
+[
+ {
+ "PublicDescription": "Software increment. Instruction architecturally executed (condition code check pass).",
+ "EventCode": "0x00",
+ "EventName": "SW_INCR",
+ "BriefDescription": "Software increment."
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed. This event counts all retired instructions, including those that fail their condition check.",
+ "EventCode": "0x08",
+ "EventName": "INST_RETIRED",
+ "BriefDescription": "Instruction architecturally executed."
+ },
+ {
+ "EventCode": "0x0A",
+ "EventName": "EXC_RETURN",
+ "BriefDescription": "Instruction architecturally executed, condition code check pass, exception return."
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, condition code check pass, write to CONTEXTIDR. This event only counts writes to CONTEXTIDR in AArch32 state, and via the CONTEXTIDR_EL1 mnemonic in AArch64 state.",
+ "EventCode": "0x0B",
+ "EventName": "CID_WRITE_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, condition code check pass, write to CONTEXTIDR."
+ },
+ {
+ "EventCode": "0x1B",
+ "EventName": "INST_SPEC",
+ "BriefDescription": "Operation speculatively executed"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, condition code check pass, write to TTBR. This event only counts writes to TTBR0/TTBR1 in AArch32 state and TTBR0_EL1/TTBR1_EL1 in AArch64 state.",
+ "EventCode": "0x1C",
+ "EventName": "TTBR_WRITE_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, condition code check pass, write to TTBR"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, branch. This event counts all branches, taken or not. This excludes exception entries, debug entries and CCFAIL branches.",
+ "EventCode": "0x21",
+ "EventName": "BR_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, branch."
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, mispredicted branch. This event counts any branch counted by BR_RETIRED which is not correctly predicted and causes a pipeline flush.",
+ "EventCode": "0x22",
+ "EventName": "BR_MIS_PRED_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, mispredicted branch."
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/memory.json
new file mode 100644
index 000000000000..b86643253f19
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/memory.json
@@ -0,0 +1,23 @@
+[
+ {
+ "PublicDescription": "Data memory access. This event counts memory accesses due to load or store instructions. This event counts the sum of MEM_ACCESS_RD and MEM_ACCESS_WR.",
+ "EventCode": "0x13",
+ "EventName": "MEM_ACCESS",
+ "BriefDescription": "Data memory access"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/other.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/other.json
new file mode 100644
index 000000000000..8bde029a62d5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/other.json
@@ -0,0 +1,7 @@
+[
+ {
+ "EventCode": "0x31",
+ "EventName": "REMOTE_ACCESS",
+ "BriefDescription": "Access to another socket in a multi-socket system"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/pipeline.json
new file mode 100644
index 000000000000..010a647f9d02
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/pipeline.json
@@ -0,0 +1,14 @@
+[
+ {
+ "PublicDescription": "No operation issued because of the frontend. The counter counts on any cycle when there are no fetched instructions available to dispatch.",
+ "EventCode": "0x23",
+ "EventName": "STALL_FRONTEND",
+ "BriefDescription": "No operation issued because of the frontend."
+ },
+ {
+ "PublicDescription": "No operation issued because of the backend. The counter counts on any cycle fetched instructions are not dispatched due to resource constraints.",
+ "EventCode": "0x24",
+ "EventName": "STALL_BACKEND",
+ "BriefDescription": "No operation issued because of the backend."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/mapfile.csv b/tools/perf/pmu-events/arch/arm64/mapfile.csv
index 927fcddcb4aa..0d609149b82a 100644
--- a/tools/perf/pmu-events/arch/arm64/mapfile.csv
+++ b/tools/perf/pmu-events/arch/arm64/mapfile.csv
@@ -16,6 +16,8 @@
0x00000000420f1000,v1,arm/cortex-a53,core
0x00000000410fd070,v1,arm/cortex-a57-a72,core
0x00000000410fd080,v1,arm/cortex-a57-a72,core
+0x00000000410fd0b0,v1,arm/cortex-a76-n1,core
+0x00000000410fd0c0,v1,arm/cortex-a76-n1,core
0x00000000420f5160,v1,cavium/thunderx2,core
0x00000000430f0af0,v1,cavium/thunderx2,core
0x00000000480fd010,v1,hisilicon/hip08,core
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/other.json b/tools/perf/pmu-events/arch/powerpc/power8/other.json
index 9dc2f6b70354..b2a3df07fbc4 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/other.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/other.json
@@ -1776,30 +1776,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0xa29084",
- "EventName": "PM_L3_P0_GRP_PUMP",
- "BriefDescription": "L3 pf sent with grp scope port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x528084",
- "EventName": "PM_L3_P0_LCO_DATA",
- "BriefDescription": "lco sent with data port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x518080",
- "EventName": "PM_L3_P0_LCO_NO_DATA",
- "BriefDescription": "dataless l3 lco sent port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xa4908c",
- "EventName": "PM_L3_P0_LCO_RTY",
- "BriefDescription": "L3 LCO received retry port 0",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x84908d",
"EventName": "PM_L3_PF0_ALLOC",
"BriefDescription": "lifetime, sample of PF machine 0 valid",
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json b/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json
index fad4af9142cb..6221a840fcea 100644
--- a/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json
@@ -283,5 +283,47 @@
"BriefDescription": "Total cycles spent with one or more fill requests in flight from L2.",
"PublicDescription": "Total cycles spent with one or more fill requests in flight from L2.",
"UMask": "0x1"
+ },
+ {
+ "EventName": "l3_request_g1.caching_l3_cache_accesses",
+ "EventCode": "0x01",
+ "BriefDescription": "Caching: L3 cache accesses",
+ "UMask": "0x80",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_lookup_state.all_l3_req_typs",
+ "EventCode": "0x04",
+ "BriefDescription": "All L3 Request Types",
+ "UMask": "0xff",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_comb_clstr_state.other_l3_miss_typs",
+ "EventCode": "0x06",
+ "BriefDescription": "Other L3 Miss Request Types",
+ "UMask": "0xfe",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_comb_clstr_state.request_miss",
+ "EventCode": "0x06",
+ "BriefDescription": "L3 cache misses",
+ "UMask": "0x01",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "xi_sys_fill_latency",
+ "EventCode": "0x90",
+ "BriefDescription": "L3 Cache Miss Latency. Total cycles for all transactions divided by 16. Ignores SliceMask and ThreadMask.",
+ "UMask": "0x00",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "xi_ccx_sdp_req1.all_l3_miss_req_typs",
+ "EventCode": "0x9a",
+ "BriefDescription": "All L3 Miss Request Types. Ignores SliceMask and ThreadMask.",
+ "UMask": "0x3f",
+ "Unit": "L3PMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/core.json b/tools/perf/pmu-events/arch/x86/amdfam17h/core.json
index 7b285b0a7f35..1079544eeed5 100644
--- a/tools/perf/pmu-events/arch/x86/amdfam17h/core.json
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/core.json
@@ -13,7 +13,7 @@
{
"EventName": "ex_ret_brn",
"EventCode": "0xc2",
- "BriefDescription": "[Retired Branch Instructions.",
+ "BriefDescription": "Retired Branch Instructions.",
"PublicDescription": "The number of branch instructions retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
},
{
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index d413761621b0..9e37287da924 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -239,6 +239,7 @@ static struct map {
{ "hisi_sccl,ddrc", "hisi_sccl,ddrc" },
{ "hisi_sccl,hha", "hisi_sccl,hha" },
{ "hisi_sccl,l3c", "hisi_sccl,l3c" },
+ { "L3PMC", "amd_l3" },
{}
};
diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
index a637a4a90760..338cd9faa835 100644
--- a/tools/perf/tests/backward-ring-buffer.c
+++ b/tools/perf/tests/backward-ring-buffer.c
@@ -10,6 +10,7 @@
#include "tests.h"
#include "debug.h"
#include "parse-events.h"
+#include "util/mmap.h"
#include <errno.h>
#include <linux/string.h>
@@ -32,8 +33,8 @@ static int count_samples(struct evlist *evlist, int *sample_count,
{
int i;
- for (i = 0; i < evlist->nr_mmaps; i++) {
- struct perf_mmap *map = &evlist->overwrite_mmap[i];
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ struct mmap *map = &evlist->overwrite_mmap[i];
union perf_event *event;
perf_mmap__read_init(map);
@@ -63,9 +64,9 @@ static int do_test(struct evlist *evlist, int mmap_pages,
int err;
char sbuf[STRERR_BUFSIZE];
- err = perf_evlist__mmap(evlist, mmap_pages);
+ err = evlist__mmap(evlist, mmap_pages);
if (err < 0) {
- pr_debug("perf_evlist__mmap: %s\n",
+ pr_debug("evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
return TEST_FAIL;
}
@@ -75,7 +76,7 @@ static int do_test(struct evlist *evlist, int mmap_pages,
evlist__disable(evlist);
err = count_samples(evlist, sample_count, comm_count);
- perf_evlist__munmap(evlist);
+ evlist__munmap(evlist);
return err;
}
diff --git a/tools/perf/tests/bitmap.c b/tools/perf/tests/bitmap.c
index db2aadff3708..96c137360918 100644
--- a/tools/perf/tests/bitmap.c
+++ b/tools/perf/tests/bitmap.c
@@ -2,8 +2,8 @@
#include <linux/compiler.h>
#include <linux/bitmap.h>
#include <perf/cpumap.h>
+#include <internal/cpumap.h>
#include "tests.h"
-#include "cpumap.h"
#include "debug.h"
#define NBITS 100
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
index fc102e4f403e..1eb0bffaed6c 100644
--- a/tools/perf/tests/bpf.c
+++ b/tools/perf/tests/bpf.c
@@ -19,6 +19,7 @@
#include "llvm.h"
#include "debug.h"
#include "parse-events.h"
+#include "util/mmap.h"
#define NR_ITERS 111
#define PERF_TEST_BPF_PATH "/sys/fs/bpf/perf_test"
@@ -167,9 +168,9 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
goto out_delete_evlist;
}
- err = perf_evlist__mmap(evlist, opts.mmap_pages);
+ err = evlist__mmap(evlist, opts.mmap_pages);
if (err < 0) {
- pr_debug("perf_evlist__mmap: %s\n",
+ pr_debug("evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
@@ -178,9 +179,9 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
(*func)();
evlist__disable(evlist);
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
union perf_event *event;
- struct perf_mmap *md;
+ struct mmap *md;
md = &evlist->mmap[i];
if (perf_mmap__read_init(md) < 0)
diff --git a/tools/perf/tests/clang.c b/tools/perf/tests/clang.c
index f45fe11dcf50..2577d3ed1531 100644
--- a/tools/perf/tests/clang.c
+++ b/tools/perf/tests/clang.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#include "tests.h"
-#include "debug.h"
-#include "util.h"
#include "c++/clang-c.h"
#include <linux/kernel.h>
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index c1c29e08e7fb..f5764a3890b9 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -19,12 +19,13 @@
#include "evlist.h"
#include "evsel.h"
#include "thread_map.h"
-#include "cpumap.h"
#include "machine.h"
#include "map.h"
#include "symbol.h"
#include "event.h"
#include "record.h"
+#include "util/mmap.h"
+#include "util/synthetic-events.h"
#include "thread.h"
#include "tests.h"
@@ -419,10 +420,10 @@ static int process_events(struct machine *machine, struct evlist *evlist,
struct state *state)
{
union perf_event *event;
- struct perf_mmap *md;
+ struct mmap *md;
int i, ret;
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
md = &evlist->mmap[i];
if (perf_mmap__read_init(md) < 0)
continue;
@@ -651,7 +652,7 @@ static int do_test_code_reading(bool try_kcore)
perf_evlist__config(evlist, &opts, NULL);
- evsel = perf_evlist__first(evlist);
+ evsel = evlist__first(evlist);
evsel->core.attr.comm = 1;
evsel->core.attr.disabled = 1;
@@ -685,9 +686,9 @@ static int do_test_code_reading(bool try_kcore)
break;
}
- ret = perf_evlist__mmap(evlist, UINT_MAX);
+ ret = evlist__mmap(evlist, UINT_MAX);
if (ret < 0) {
- pr_debug("perf_evlist__mmap failed\n");
+ pr_debug("evlist__mmap failed\n");
goto out_put;
}
diff --git a/tools/perf/tests/cpumap.c b/tools/perf/tests/cpumap.c
index 39493de50117..8a0d236202b0 100644
--- a/tools/perf/tests/cpumap.c
+++ b/tools/perf/tests/cpumap.c
@@ -3,6 +3,7 @@
#include <stdio.h>
#include "cpumap.h"
#include "event.h"
+#include "util/synthetic-events.h"
#include <string.h>
#include <linux/bitops.h>
#include <perf/cpumap.h>
diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c
index a4874d4ce7ef..627c1aaf1c9e 100644
--- a/tools/perf/tests/dso-data.c
+++ b/tools/perf/tests/dso-data.c
@@ -10,7 +10,6 @@
#include <sys/resource.h>
#include <api/fs/fs.h>
#include "dso.h"
-#include "util.h"
#include "machine.h"
#include "symbol.h"
#include "tests.h"
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index 4125255ff637..4f4ecbcbe87e 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -15,6 +15,7 @@
#include "symbol.h"
#include "thread.h"
#include "callchain.h"
+#include "util/synthetic-events.h"
#if defined (__x86_64__) || defined (__i386__) || defined (__powerpc__)
#include "arch-tests.h"
diff --git a/tools/perf/tests/event-times.c b/tools/perf/tests/event-times.c
index d824a726906c..1ee8704e2284 100644
--- a/tools/perf/tests/event-times.c
+++ b/tools/perf/tests/event-times.c
@@ -9,7 +9,6 @@
#include "tests.h"
#include "evlist.h"
#include "evsel.h"
-#include "util.h"
#include "debug.h"
#include "parse-events.h"
#include "thread_map.h"
@@ -17,7 +16,7 @@
static int attach__enable_on_exec(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__last(evlist);
+ struct evsel *evsel = evlist__last(evlist);
struct target target = {
.uid = UINT_MAX,
};
@@ -59,7 +58,7 @@ static int detach__enable_on_exec(struct evlist *evlist)
static int attach__current_disabled(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__last(evlist);
+ struct evsel *evsel = evlist__last(evlist);
struct perf_thread_map *threads;
int err;
@@ -85,7 +84,7 @@ static int attach__current_disabled(struct evlist *evlist)
static int attach__current_enabled(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__last(evlist);
+ struct evsel *evsel = evlist__last(evlist);
struct perf_thread_map *threads;
int err;
@@ -105,14 +104,14 @@ static int attach__current_enabled(struct evlist *evlist)
static int detach__disable(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__last(evlist);
+ struct evsel *evsel = evlist__last(evlist);
return evsel__enable(evsel);
}
static int attach__cpu_disabled(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__last(evlist);
+ struct evsel *evsel = evlist__last(evlist);
struct perf_cpu_map *cpus;
int err;
@@ -141,7 +140,7 @@ static int attach__cpu_disabled(struct evlist *evlist)
static int attach__cpu_enabled(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__last(evlist);
+ struct evsel *evsel = evlist__last(evlist);
struct perf_cpu_map *cpus;
int err;
@@ -181,7 +180,7 @@ static int test_times(int (attach)(struct evlist *),
goto out_err;
}
- evsel = perf_evlist__last(evlist);
+ evsel = evlist__last(evlist);
evsel->core.attr.read_format |=
PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING;
diff --git a/tools/perf/tests/event_update.c b/tools/perf/tests/event_update.c
index cac4290e233a..c727379cf20e 100644
--- a/tools/perf/tests/event_update.c
+++ b/tools/perf/tests/event_update.c
@@ -2,10 +2,12 @@
#include <linux/compiler.h>
#include <perf/cpumap.h>
#include <string.h>
+#include "cpumap.h"
#include "evlist.h"
#include "evsel.h"
#include "header.h"
#include "machine.h"
+#include "util/synthetic-events.h"
#include "tool.h"
#include "tests.h"
#include "debug.h"
@@ -90,12 +92,12 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu
evlist = perf_evlist__new_default();
TEST_ASSERT_VAL("failed to get evlist", evlist);
- evsel = perf_evlist__first(evlist);
+ evsel = evlist__first(evlist);
- TEST_ASSERT_VAL("failed to allos ids",
- !perf_evsel__alloc_id(evsel, 1, 1));
+ TEST_ASSERT_VAL("failed to allocate ids",
+ !perf_evsel__alloc_id(&evsel->core, 1, 1));
- perf_evlist__id_add(evlist, evsel, 0, 0, 123);
+ perf_evlist__id_add(&evlist->core, &evsel->core, 0, 0, 123);
evsel->unit = strdup("KRAVA");
diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c
index 5330f106a6ee..956205bf9326 100644
--- a/tools/perf/tests/evsel-roundtrip-name.c
+++ b/tools/perf/tests/evsel-roundtrip-name.c
@@ -34,7 +34,7 @@ static int perf_evsel__roundtrip_cache_name_test(void)
}
idx = 0;
- evsel = perf_evlist__first(evlist);
+ evsel = evlist__first(evlist);
for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
diff --git a/tools/perf/tests/hists_common.c b/tools/perf/tests/hists_common.c
index de110d8f169b..6f34d08b84e5 100644
--- a/tools/perf/tests/hists_common.c
+++ b/tools/perf/tests/hists_common.c
@@ -2,6 +2,7 @@
#include <inttypes.h>
#include "util/debug.h"
#include "util/dso.h"
+#include "util/event.h" // struct perf_sample
#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
@@ -10,6 +11,7 @@
#include "util/thread.h"
#include "tests/hists_common.h"
#include <linux/kernel.h>
+#include <linux/perf_event.h>
static struct {
u32 pid;
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index fa55b7bad3af..6367c8f6ca22 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -721,7 +721,7 @@ int test__hists_cumulate(struct test *test __maybe_unused, int subtest __maybe_u
if (verbose > 1)
machine__fprintf(machine, stderr);
- evsel = perf_evlist__first(evlist);
+ evsel = evlist__first(evlist);
for (i = 0; i < ARRAY_SIZE(testcases); i++) {
err = testcases[i](evsel, machine);
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index 8be4d0b61e3a..a024d3f3a412 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -8,6 +8,7 @@
#include "machine.h"
#include "parse-events.h"
#include "hists_common.h"
+#include "util/mmap.h"
#include <errno.h>
#include <linux/kernel.h>
@@ -310,8 +311,8 @@ int test__hists_link(struct test *test __maybe_unused, int subtest __maybe_unuse
print_hists_in(hists);
}
- first = perf_evlist__first(evlist);
- evsel = perf_evlist__last(evlist);
+ first = evlist__first(evlist);
+ evsel = evlist__last(evlist);
first_hists = evsel__hists(first);
hists = evsel__hists(evsel);
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index 3f6dfa212260..38f804ff6452 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -608,7 +608,7 @@ int test__hists_output(struct test *test __maybe_unused, int subtest __maybe_unu
if (verbose > 1)
machine__fprintf(machine, stderr);
- evsel = perf_evlist__first(evlist);
+ evsel = evlist__first(evlist);
for (i = 0; i < ARRAY_SIZE(testcases); i++) {
err = testcases[i](evsel, machine);
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
index 9f0762d987fa..92c7d591bcac 100644
--- a/tools/perf/tests/keep-tracking.c
+++ b/tools/perf/tests/keep-tracking.c
@@ -12,8 +12,8 @@
#include "evsel.h"
#include "record.h"
#include "thread_map.h"
-#include "cpumap.h"
#include "tests.h"
+#include "util/mmap.h"
#define CHECK__(x) { \
while ((x) < 0) { \
@@ -32,11 +32,11 @@
static int find_comm(struct evlist *evlist, const char *comm)
{
union perf_event *event;
- struct perf_mmap *md;
+ struct mmap *md;
int i, found;
found = 0;
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
md = &evlist->mmap[i];
if (perf_mmap__read_init(md) < 0)
continue;
@@ -93,7 +93,7 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un
perf_evlist__config(evlist, &opts, NULL);
- evsel = perf_evlist__first(evlist);
+ evsel = evlist__first(evlist);
evsel->core.attr.comm = 1;
evsel->core.attr.disabled = 1;
@@ -105,7 +105,7 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un
goto out_err;
}
- CHECK__(perf_evlist__mmap(evlist, UINT_MAX));
+ CHECK__(evlist__mmap(evlist, UINT_MAX));
/*
* First, test that a 'comm' event can be found when the event is
@@ -132,7 +132,7 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un
evlist__enable(evlist);
- evsel = perf_evlist__last(evlist);
+ evsel = evlist__last(evlist);
CHECK__(evsel__disable(evsel));
@@ -143,7 +143,7 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un
found = find_comm(evlist, comm);
if (found != 1) {
- pr_debug("Seconf time, failed to find tracking event.\n");
+ pr_debug("Second time, failed to find tracking event.\n");
goto out_err;
}
diff --git a/tools/perf/tests/llvm.c b/tools/perf/tests/llvm.c
index 022e4c9cf092..ae6cda81c209 100644
--- a/tools/perf/tests/llvm.c
+++ b/tools/perf/tests/llvm.c
@@ -7,7 +7,6 @@
#include "llvm.h"
#include "tests.h"
#include "debug.h"
-#include "util.h"
#ifdef HAVE_LIBBPF_SUPPORT
static int test__bpf_parsing(void *obj_buf, size_t obj_buf_sz)
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 70c48475896d..c850d1664c56 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -100,7 +100,7 @@ make_install_info := install-info
make_install_pdf := install-pdf
make_install_prefix := install prefix=/tmp/krava
make_install_prefix_slash := install prefix=/tmp/krava/
-make_static := LDFLAGS=-static
+make_static := LDFLAGS=-static NO_PERF_READ_VDSO32=1 NO_PERF_READ_VDSOX32=1 NO_JVMTI=1
# all the NO_* variable combined
make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
@@ -327,6 +327,10 @@ make_kernelsrc_tools:
(make -C ../../tools $(PARALLEL_OPT) $(K_O_OPT) perf) > $@ 2>&1 && \
test -x $(KERNEL_O)/tools/perf/perf && rm -f $@ || (cat $@ ; false)
+make_libperf:
+ @echo "- make -C lib";
+ make -C lib clean >$@ 2>&1; make -C lib >>$@ 2>&1 && rm $@
+
FEATURES_DUMP_FILE := $(FULL_O)/BUILD_TEST_FEATURE_DUMP
FEATURES_DUMP_FILE_STATIC := $(FULL_O)/BUILD_TEST_FEATURE_DUMP_STATIC
@@ -365,5 +369,5 @@ $(foreach t,$(run),$(if $(findstring make_static,$(t)),\
$(eval $(t) := $($(t)) FEATURES_DUMP=$(FEATURES_DUMP_FILE))))
endif
-.PHONY: all $(run) $(run_O) tarpkg clean make_kernelsrc make_kernelsrc_tools
+.PHONY: all $(run) $(run_O) tarpkg clean make_kernelsrc make_kernelsrc_tools make_libperf
endif # ifndef MK
diff --git a/tools/perf/tests/mem2node.c b/tools/perf/tests/mem2node.c
index 7672ade70f20..a258bd51f1a4 100644
--- a/tools/perf/tests/mem2node.c
+++ b/tools/perf/tests/mem2node.c
@@ -4,7 +4,7 @@
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <perf/cpumap.h>
-#include "cpumap.h"
+#include <internal/cpumap.h>
#include "debug.h"
#include "env.h"
#include "mem2node.h"
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index 85e1d7337dc0..3a22dce991ba 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -10,8 +10,8 @@
#include "evlist.h"
#include "evsel.h"
#include "thread_map.h"
-#include "cpumap.h"
#include "tests.h"
+#include "util/mmap.h"
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -43,7 +43,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
expected_nr_events[nsyscalls], i, j;
struct evsel *evsels[nsyscalls], *evsel;
char sbuf[STRERR_BUFSIZE];
- struct perf_mmap *md;
+ struct mmap *md;
threads = thread_map__new(-1, getpid(), UINT_MAX);
if (threads == NULL) {
@@ -53,7 +53,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
cpus = perf_cpu_map__new(NULL);
if (cpus == NULL) {
- pr_debug("cpu_map__new\n");
+ pr_debug("perf_cpu_map__new\n");
goto out_free_threads;
}
@@ -100,7 +100,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
expected_nr_events[i] = 1 + rand() % 127;
}
- if (perf_evlist__mmap(evlist, 128) < 0) {
+ if (evlist__mmap(evlist, 128) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index 360d70deb855..8d9d4cbff76d 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -8,13 +8,15 @@
#include <stdlib.h>
#include <stdio.h>
#include "debug.h"
+#include "event.h"
#include "tests.h"
#include "machine.h"
#include "thread_map.h"
#include "map.h"
#include "symbol.h"
+#include "util/synthetic-events.h"
#include "thread.h"
-#include "util.h"
+#include <internal/lib.h> // page_size
#define THREADS 4
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
index 9171f77cd9cd..93c176523e38 100644
--- a/tools/perf/tests/openat-syscall-all-cpus.c
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
@@ -14,7 +14,8 @@
#include "evsel.h"
#include "tests.h"
#include "thread_map.h"
-#include "cpumap.h"
+#include <perf/cpumap.h>
+#include <internal/cpumap.h>
#include "debug.h"
#include "stat.h"
#include "util/counts.h"
@@ -37,7 +38,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
cpus = perf_cpu_map__new(NULL);
if (cpus == NULL) {
- pr_debug("cpu_map__new\n");
+ pr_debug("perf_cpu_map__new\n");
goto out_thread_map_delete;
}
diff --git a/tools/perf/tests/openat-syscall-tp-fields.c b/tools/perf/tests/openat-syscall-tp-fields.c
index b71167b43dda..2b5c46813053 100644
--- a/tools/perf/tests/openat-syscall-tp-fields.c
+++ b/tools/perf/tests/openat-syscall-tp-fields.c
@@ -11,6 +11,7 @@
#include "record.h"
#include "tests.h"
#include "debug.h"
+#include "util/mmap.h"
#include <errno.h>
#ifndef O_DIRECTORY
@@ -69,9 +70,9 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
goto out_delete_evlist;
}
- err = perf_evlist__mmap(evlist, UINT_MAX);
+ err = evlist__mmap(evlist, UINT_MAX);
if (err < 0) {
- pr_debug("perf_evlist__mmap: %s\n",
+ pr_debug("evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
@@ -86,9 +87,9 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
while (1) {
int before = nr_events;
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
union perf_event *event;
- struct perf_mmap *md;
+ struct mmap *md;
md = &evlist->mmap[i];
if (perf_mmap__read_init(md) < 0)
@@ -126,7 +127,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
}
if (nr_events == before)
- perf_evlist__poll(evlist, 10);
+ evlist__poll(evlist, 10);
if (++nr_polls > 5) {
pr_debug("%s: no events!\n", __func__);
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 02ba696fb87f..25e0ed2eedfc 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -6,7 +6,6 @@
#include "tests.h"
#include "debug.h"
#include "pmu.h"
-#include "util.h"
#include <dirent.h>
#include <errno.h>
#include <sys/types.h>
@@ -47,7 +46,7 @@ static bool kvm_s390_create_vm_valid(void)
static int test__checkevent_tracepoint(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups);
@@ -78,7 +77,7 @@ static int test__checkevent_tracepoint_multi(struct evlist *evlist)
static int test__checkevent_raw(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
@@ -88,7 +87,7 @@ static int test__checkevent_raw(struct evlist *evlist)
static int test__checkevent_numeric(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", 1 == evsel->core.attr.type);
@@ -98,7 +97,7 @@ static int test__checkevent_numeric(struct evlist *evlist)
static int test__checkevent_symbolic_name(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
@@ -109,7 +108,7 @@ static int test__checkevent_symbolic_name(struct evlist *evlist)
static int test__checkevent_symbolic_name_config(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
@@ -130,7 +129,7 @@ static int test__checkevent_symbolic_name_config(struct evlist *evlist)
static int test__checkevent_symbolic_alias(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
@@ -141,7 +140,7 @@ static int test__checkevent_symbolic_alias(struct evlist *evlist)
static int test__checkevent_genhw(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type);
@@ -151,7 +150,7 @@ static int test__checkevent_genhw(struct evlist *evlist)
static int test__checkevent_breakpoint(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
@@ -165,7 +164,7 @@ static int test__checkevent_breakpoint(struct evlist *evlist)
static int test__checkevent_breakpoint_x(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
@@ -178,7 +177,7 @@ static int test__checkevent_breakpoint_x(struct evlist *evlist)
static int test__checkevent_breakpoint_r(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type",
@@ -193,7 +192,7 @@ static int test__checkevent_breakpoint_r(struct evlist *evlist)
static int test__checkevent_breakpoint_w(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type",
@@ -208,7 +207,7 @@ static int test__checkevent_breakpoint_w(struct evlist *evlist)
static int test__checkevent_breakpoint_rw(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type",
@@ -223,7 +222,7 @@ static int test__checkevent_breakpoint_rw(struct evlist *evlist)
static int test__checkevent_tracepoint_modifier(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
@@ -254,7 +253,7 @@ test__checkevent_tracepoint_multi_modifier(struct evlist *evlist)
static int test__checkevent_raw_modifier(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
@@ -266,7 +265,7 @@ static int test__checkevent_raw_modifier(struct evlist *evlist)
static int test__checkevent_numeric_modifier(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
@@ -278,7 +277,7 @@ static int test__checkevent_numeric_modifier(struct evlist *evlist)
static int test__checkevent_symbolic_name_modifier(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
@@ -290,7 +289,7 @@ static int test__checkevent_symbolic_name_modifier(struct evlist *evlist)
static int test__checkevent_exclude_host_modifier(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
@@ -300,7 +299,7 @@ static int test__checkevent_exclude_host_modifier(struct evlist *evlist)
static int test__checkevent_exclude_guest_modifier(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
@@ -310,7 +309,7 @@ static int test__checkevent_exclude_guest_modifier(struct evlist *evlist)
static int test__checkevent_symbolic_alias_modifier(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
@@ -322,7 +321,7 @@ static int test__checkevent_symbolic_alias_modifier(struct evlist *evlist)
static int test__checkevent_genhw_modifier(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
@@ -334,7 +333,7 @@ static int test__checkevent_genhw_modifier(struct evlist *evlist)
static int test__checkevent_exclude_idle_modifier(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude idle", evsel->core.attr.exclude_idle);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
@@ -349,7 +348,7 @@ static int test__checkevent_exclude_idle_modifier(struct evlist *evlist)
static int test__checkevent_exclude_idle_modifier_1(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude idle", evsel->core.attr.exclude_idle);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
@@ -364,7 +363,7 @@ static int test__checkevent_exclude_idle_modifier_1(struct evlist *evlist)
static int test__checkevent_breakpoint_modifier(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
@@ -379,7 +378,7 @@ static int test__checkevent_breakpoint_modifier(struct evlist *evlist)
static int test__checkevent_breakpoint_x_modifier(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
@@ -393,7 +392,7 @@ static int test__checkevent_breakpoint_x_modifier(struct evlist *evlist)
static int test__checkevent_breakpoint_r_modifier(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
@@ -407,7 +406,7 @@ static int test__checkevent_breakpoint_r_modifier(struct evlist *evlist)
static int test__checkevent_breakpoint_w_modifier(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
@@ -421,7 +420,7 @@ static int test__checkevent_breakpoint_w_modifier(struct evlist *evlist)
static int test__checkevent_breakpoint_rw_modifier(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
@@ -436,7 +435,7 @@ static int test__checkevent_breakpoint_rw_modifier(struct evlist *evlist)
static int test__checkevent_pmu(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
@@ -454,7 +453,7 @@ static int test__checkevent_pmu(struct evlist *evlist)
static int test__checkevent_list(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries);
@@ -493,7 +492,7 @@ static int test__checkevent_list(struct evlist *evlist)
static int test__checkevent_pmu_name(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
/* cpu/config=1,name=krava/u */
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
@@ -514,7 +513,7 @@ static int test__checkevent_pmu_name(struct evlist *evlist)
static int test__checkevent_pmu_partial_time_callgraph(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
/* cpu/config=1,call-graph=fp,time,period=100000/ */
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
@@ -547,7 +546,7 @@ static int test__checkevent_pmu_partial_time_callgraph(struct evlist *evlist)
static int test__checkevent_pmu_events(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
@@ -565,7 +564,7 @@ static int test__checkevent_pmu_events(struct evlist *evlist)
static int test__checkevent_pmu_events_mix(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
/* pmu-event:u */
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
@@ -643,7 +642,7 @@ static int test__group1(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
/* instructions:k */
- evsel = leader = perf_evlist__first(evlist);
+ evsel = leader = evlist__first(evlist);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config",
PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config);
@@ -685,7 +684,7 @@ static int test__group2(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
/* faults + :ku modifier */
- evsel = leader = perf_evlist__first(evlist);
+ evsel = leader = evlist__first(evlist);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config",
PERF_COUNT_SW_PAGE_FAULTS == evsel->core.attr.config);
@@ -740,7 +739,7 @@ static int test__group3(struct evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups);
/* group1 syscalls:sys_enter_openat:H */
- evsel = leader = perf_evlist__first(evlist);
+ evsel = leader = evlist__first(evlist);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong sample_type",
PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type);
@@ -832,7 +831,7 @@ static int test__group4(struct evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
/* cycles:u + p */
- evsel = leader = perf_evlist__first(evlist);
+ evsel = leader = evlist__first(evlist);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config",
PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
@@ -876,7 +875,7 @@ static int test__group5(struct evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups);
/* cycles + G */
- evsel = leader = perf_evlist__first(evlist);
+ evsel = leader = evlist__first(evlist);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config",
PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
@@ -962,7 +961,7 @@ static int test__group_gh1(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
/* cycles + :H group modifier */
- evsel = leader = perf_evlist__first(evlist);
+ evsel = leader = evlist__first(evlist);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config",
PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
@@ -1002,7 +1001,7 @@ static int test__group_gh2(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
/* cycles + :G group modifier */
- evsel = leader = perf_evlist__first(evlist);
+ evsel = leader = evlist__first(evlist);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config",
PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
@@ -1042,7 +1041,7 @@ static int test__group_gh3(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
/* cycles:G + :u group modifier */
- evsel = leader = perf_evlist__first(evlist);
+ evsel = leader = evlist__first(evlist);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config",
PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
@@ -1082,7 +1081,7 @@ static int test__group_gh4(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
/* cycles:G + :uG group modifier */
- evsel = leader = perf_evlist__first(evlist);
+ evsel = leader = evlist__first(evlist);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config",
PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
@@ -1121,7 +1120,7 @@ static int test__leader_sample1(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries);
/* cycles - sampling group leader */
- evsel = leader = perf_evlist__first(evlist);
+ evsel = leader = evlist__first(evlist);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config",
PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
@@ -1174,7 +1173,7 @@ static int test__leader_sample2(struct evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
/* instructions - sampling group leader */
- evsel = leader = perf_evlist__first(evlist);
+ evsel = leader = evlist__first(evlist);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config",
PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config);
@@ -1208,7 +1207,7 @@ static int test__leader_sample2(struct evlist *evlist __maybe_unused)
static int test__checkevent_pinned_modifier(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
@@ -1226,7 +1225,7 @@ static int test__pinned_group(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries);
/* cycles - group leader */
- evsel = leader = perf_evlist__first(evlist);
+ evsel = leader = evlist__first(evlist);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config",
PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
@@ -1252,7 +1251,7 @@ static int test__pinned_group(struct evlist *evlist)
static int test__checkevent_breakpoint_len(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
@@ -1267,7 +1266,7 @@ static int test__checkevent_breakpoint_len(struct evlist *evlist)
static int test__checkevent_breakpoint_len_w(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
@@ -1283,7 +1282,7 @@ static int test__checkevent_breakpoint_len_w(struct evlist *evlist)
static int
test__checkevent_breakpoint_len_rw_modifier(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
@@ -1295,7 +1294,7 @@ test__checkevent_breakpoint_len_rw_modifier(struct evlist *evlist)
static int test__checkevent_precise_max_modifier(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
@@ -1306,7 +1305,7 @@ static int test__checkevent_precise_max_modifier(struct evlist *evlist)
static int test__checkevent_config_symbol(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "insn") == 0);
return 0;
@@ -1314,7 +1313,7 @@ static int test__checkevent_config_symbol(struct evlist *evlist)
static int test__checkevent_config_raw(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "rawpmu") == 0);
return 0;
@@ -1322,7 +1321,7 @@ static int test__checkevent_config_raw(struct evlist *evlist)
static int test__checkevent_config_num(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "numpmu") == 0);
return 0;
@@ -1330,7 +1329,7 @@ static int test__checkevent_config_num(struct evlist *evlist)
static int test__checkevent_config_cache(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "cachepmu") == 0);
return 0;
@@ -1343,7 +1342,7 @@ static bool test__intel_pt_valid(void)
static int test__intel_pt(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "intel_pt//u") == 0);
return 0;
@@ -1351,7 +1350,7 @@ static int test__intel_pt(struct evlist *evlist)
static int test__checkevent_complex_name(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong complex name parsing", strcmp(evsel->name, "COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks") == 0);
return 0;
@@ -1359,7 +1358,7 @@ static int test__checkevent_complex_name(struct evlist *evlist)
static int test__sym_event_slash(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
TEST_ASSERT_VAL("wrong config", evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES);
@@ -1369,7 +1368,7 @@ static int test__sym_event_slash(struct evlist *evlist)
static int test__sym_event_dc(struct evlist *evlist)
{
- struct evsel *evsel = perf_evlist__first(evlist);
+ struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
TEST_ASSERT_VAL("wrong config", evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES);
diff --git a/tools/perf/tests/parse-no-sample-id-all.c b/tools/perf/tests/parse-no-sample-id-all.c
index 8284752a60c8..adf3c9c4a416 100644
--- a/tools/perf/tests/parse-no-sample-id-all.c
+++ b/tools/perf/tests/parse-no-sample-id-all.c
@@ -1,4 +1,3 @@
-// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/types.h>
#include <stddef.h>
@@ -8,7 +7,6 @@
#include "event.h"
#include "evlist.h"
#include "header.h"
-#include "util.h"
#include "debug.h"
static int process_event(struct evlist **pevlist, union perf_event *event)
diff --git a/tools/perf/tests/perf-hooks.c b/tools/perf/tests/perf-hooks.c
index a693bcf017ea..dbc27199c65e 100644
--- a/tools/perf/tests/perf-hooks.c
+++ b/tools/perf/tests/perf-hooks.c
@@ -4,7 +4,6 @@
#include "tests.h"
#include "debug.h"
-#include "util.h"
#include "perf-hooks.h"
static void sigsegv_handler(int sig __maybe_unused)
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index e1b42292cf7f..437426be29e9 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -11,6 +11,7 @@
#include "debug.h"
#include "record.h"
#include "tests.h"
+#include "util/mmap.h"
static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
{
@@ -103,7 +104,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
/*
* Config the evsels, setting attr->comm on the first one, etc.
*/
- evsel = perf_evlist__first(evlist);
+ evsel = evlist__first(evlist);
perf_evsel__set_sample_bit(evsel, CPU);
perf_evsel__set_sample_bit(evsel, TID);
perf_evsel__set_sample_bit(evsel, TIME);
@@ -143,9 +144,9 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
* fds in the same CPU to be injected in the same mmap ring buffer
* (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
*/
- err = perf_evlist__mmap(evlist, opts.mmap_pages);
+ err = evlist__mmap(evlist, opts.mmap_pages);
if (err < 0) {
- pr_debug("perf_evlist__mmap: %s\n",
+ pr_debug("evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
@@ -164,9 +165,9 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
while (1) {
int before = total_events;
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
union perf_event *event;
- struct perf_mmap *md;
+ struct mmap *md;
md = &evlist->mmap[i];
if (perf_mmap__read_init(md) < 0)
@@ -286,7 +287,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
* perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
*/
if (total_events == before && false)
- perf_evlist__poll(evlist, -1);
+ evlist__poll(evlist, -1);
sleep(1);
if (++wakeups > 5) {
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
index 14a78898d79e..74379ff1f7fa 100644
--- a/tools/perf/tests/pmu.c
+++ b/tools/perf/tests/pmu.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include "parse-events.h"
#include "pmu.h"
-#include "util.h"
#include "tests.h"
#include <errno.h>
#include <stdio.h>
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 5fcc06817076..3a02426db9a6 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -9,10 +9,10 @@
#include "map_symbol.h"
#include "branch.h"
-#include "util.h"
#include "event.h"
#include "evsel.h"
#include "debug.h"
+#include "util/synthetic-events.h"
#include "tests.h"
diff --git a/tools/perf/tests/sdt.c b/tools/perf/tests/sdt.c
index cf1bd57d3023..60f0e9ee04fb 100644
--- a/tools/perf/tests/sdt.c
+++ b/tools/perf/tests/sdt.c
@@ -3,6 +3,7 @@
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
+#include <unistd.h>
#include <sys/epoll.h>
#include <util/symbol.h>
#include <linux/filter.h>
diff --git a/tools/perf/tests/stat.c b/tools/perf/tests/stat.c
index cc10b4116c9f..c1911501c39c 100644
--- a/tools/perf/tests/stat.c
+++ b/tools/perf/tests/stat.c
@@ -5,6 +5,7 @@
#include "stat.h"
#include "counts.h"
#include "debug.h"
+#include "util/synthetic-events.h"
static bool has_term(struct perf_record_stat_config *config,
u64 tag, u64 val)
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
index 97694a040986..84519df87f30 100644
--- a/tools/perf/tests/sw-clock.c
+++ b/tools/perf/tests/sw-clock.c
@@ -12,6 +12,7 @@
#include "util/evsel.h"
#include "util/evlist.h"
#include "util/cpumap.h"
+#include "util/mmap.h"
#include "util/thread_map.h"
#include <perf/evlist.h>
@@ -42,7 +43,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
};
struct perf_cpu_map *cpus;
struct perf_thread_map *threads;
- struct perf_mmap *md;
+ struct mmap *md;
attr.sample_freq = 500;
@@ -82,7 +83,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
goto out_delete_evlist;
}
- err = perf_evlist__mmap(evlist, 128);
+ err = evlist__mmap(evlist, 128);
if (err < 0) {
pr_debug("failed to mmap event: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf)));
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
index 1a60fa1219f5..ffa592e0020e 100644
--- a/tools/perf/tests/switch-tracking.c
+++ b/tools/perf/tests/switch-tracking.c
@@ -14,9 +14,9 @@
#include "evlist.h"
#include "evsel.h"
#include "thread_map.h"
-#include "cpumap.h"
#include "record.h"
#include "tests.h"
+#include "util/mmap.h"
static int spin_sleep(void)
{
@@ -144,7 +144,7 @@ static int process_sample_event(struct evlist *evlist,
return err;
/*
* Check for no missing sched_switch events i.e. that the
- * evsel->system_wide flag has worked.
+ * evsel->core.system_wide flag has worked.
*/
if (switch_tracking->tids[cpu] != -1 &&
switch_tracking->tids[cpu] != prev_tid) {
@@ -264,10 +264,10 @@ static int process_events(struct evlist *evlist,
unsigned pos, cnt = 0;
LIST_HEAD(events);
struct event_node *events_array, *node;
- struct perf_mmap *md;
+ struct mmap *md;
int i, ret;
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
md = &evlist->mmap[i];
if (perf_mmap__read_init(md) < 0)
continue;
@@ -316,7 +316,7 @@ out_free_nodes:
*
* This function implements a test that checks that sched_switch events and
* tracking events can be recorded for a workload (current process) using the
- * evsel->system_wide and evsel->tracking flags (respectively) with other events
+ * evsel->core.system_wide and evsel->tracking flags (respectively) with other events
* sometimes enabled or disabled.
*/
int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_unused)
@@ -367,7 +367,7 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
goto out_err;
}
- cpu_clocks_evsel = perf_evlist__last(evlist);
+ cpu_clocks_evsel = evlist__last(evlist);
/* Second event */
err = parse_events(evlist, "cycles:u", NULL);
@@ -376,7 +376,7 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
goto out_err;
}
- cycles_evsel = perf_evlist__last(evlist);
+ cycles_evsel = evlist__last(evlist);
/* Third event */
if (!perf_evlist__can_select_event(evlist, sched_switch)) {
@@ -391,22 +391,22 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
goto out_err;
}
- switch_evsel = perf_evlist__last(evlist);
+ switch_evsel = evlist__last(evlist);
perf_evsel__set_sample_bit(switch_evsel, CPU);
perf_evsel__set_sample_bit(switch_evsel, TIME);
- switch_evsel->system_wide = true;
+ switch_evsel->core.system_wide = true;
switch_evsel->no_aux_samples = true;
switch_evsel->immediate = true;
/* Test moving an event to the front */
- if (cycles_evsel == perf_evlist__first(evlist)) {
+ if (cycles_evsel == evlist__first(evlist)) {
pr_debug("cycles event already at front");
goto out_err;
}
perf_evlist__to_front(evlist, cycles_evsel);
- if (cycles_evsel != perf_evlist__first(evlist)) {
+ if (cycles_evsel != evlist__first(evlist)) {
pr_debug("Failed to move cycles event to front");
goto out_err;
}
@@ -421,7 +421,7 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
goto out_err;
}
- tracking_evsel = perf_evlist__last(evlist);
+ tracking_evsel = evlist__last(evlist);
perf_evlist__set_tracking_event(evlist, tracking_evsel);
@@ -434,7 +434,7 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
perf_evlist__config(evlist, &opts, NULL);
/* Check moved event is still at the front */
- if (cycles_evsel != perf_evlist__first(evlist)) {
+ if (cycles_evsel != evlist__first(evlist)) {
pr_debug("Front event no longer at front");
goto out_err;
}
@@ -461,9 +461,9 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
goto out;
}
- err = perf_evlist__mmap(evlist, UINT_MAX);
+ err = evlist__mmap(evlist, UINT_MAX);
if (err) {
- pr_debug("perf_evlist__mmap failed!\n");
+ pr_debug("evlist__mmap failed!\n");
goto out_err;
}
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index f610e8c0a083..bce3a4cb4c89 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -4,12 +4,13 @@
#include "evsel.h"
#include "target.h"
#include "thread_map.h"
-#include "cpumap.h"
#include "tests.h"
+#include "util/mmap.h"
#include <errno.h>
#include <signal.h>
#include <linux/string.h>
+#include <perf/cpumap.h>
#include <perf/evlist.h>
static int exited;
@@ -51,7 +52,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
char sbuf[STRERR_BUFSIZE];
struct perf_cpu_map *cpus;
struct perf_thread_map *threads;
- struct perf_mmap *md;
+ struct mmap *md;
signal(SIGCHLD, sig_handler);
@@ -87,7 +88,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
goto out_delete_evlist;
}
- evsel = perf_evlist__first(evlist);
+ evsel = evlist__first(evlist);
evsel->core.attr.task = 1;
#ifdef __s390x__
evsel->core.attr.sample_freq = 1000000;
@@ -106,7 +107,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
goto out_delete_evlist;
}
- if (perf_evlist__mmap(evlist, 128) < 0) {
+ if (evlist__mmap(evlist, 128) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
@@ -129,7 +130,7 @@ retry:
out_init:
if (!exited || !nr_exit) {
- perf_evlist__poll(evlist, -1);
+ evlist__poll(evlist, -1);
goto retry;
}
diff --git a/tools/perf/tests/thread-map.c b/tools/perf/tests/thread-map.c
index 39168c57943b..28f51c4bd373 100644
--- a/tools/perf/tests/thread-map.c
+++ b/tools/perf/tests/thread-map.c
@@ -8,6 +8,7 @@
#include "thread_map.h"
#include "debug.h"
#include "event.h"
+#include "util/synthetic-events.h"
#include <linux/zalloc.h>
#include <perf/event.h>
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
index a4f9f5182b47..4a800499d7c3 100644
--- a/tools/perf/tests/topology.c
+++ b/tools/perf/tests/topology.c
@@ -3,11 +3,12 @@
#include <stdlib.h>
#include <stdio.h>
#include <perf/cpumap.h>
+#include "cpumap.h"
#include "tests.h"
-#include "util.h"
#include "session.h"
#include "evlist.h"
#include "debug.h"
+#include <linux/err.h>
#define TEMPL "/tmp/perf-test-XXXXXX"
#define DATA_SIZE 10
@@ -39,7 +40,7 @@ static int session_write_header(char *path)
};
session = perf_session__new(&data, false, NULL);
- TEST_ASSERT_VAL("can't get session", session);
+ TEST_ASSERT_VAL("can't get session", !IS_ERR(session));
session->evlist = perf_evlist__new_default();
TEST_ASSERT_VAL("can't get evlist", session->evlist);
@@ -70,7 +71,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
int i;
session = perf_session__new(&data, false, NULL);
- TEST_ASSERT_VAL("can't get session", session);
+ TEST_ASSERT_VAL("can't get session", !IS_ERR(session));
/* On platforms with large numbers of CPUs process_cpu_topology()
* might issue an error while reading the perf.data file section
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 01f434c067c6..aa296ffea6d1 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -7,7 +7,7 @@
#include "dso.h"
#include "map.h"
#include "symbol.h"
-#include "util.h"
+#include <internal/lib.h> // page_size
#include "tests.h"
#include "debug.h"
#include "machine.h"
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index f93d40b1c203..781afe42e90e 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
-#include "../util/util.h"
#include "../util/string2.h"
#include "../util/config.h"
#include "libslang.h"
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index ac74ed2c23a0..82207db8f97c 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -2,7 +2,6 @@
#include "../browser.h"
#include "../helpline.h"
#include "../ui.h"
-#include "../util.h"
#include "../../util/annotate.h"
#include "../../util/debug.h"
#include "../../util/dso.h"
diff --git a/tools/perf/ui/browsers/header.c b/tools/perf/ui/browsers/header.c
index 0f59a7001479..57e6e4332f74 100644
--- a/tools/perf/ui/browsers/header.c
+++ b/tools/perf/ui/browsers/header.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
-#include "util/debug.h"
#include "ui/browser.h"
#include "ui/keysyms.h"
#include "ui/ui.h"
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 589168ca9f62..7a7187e069b4 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -3319,13 +3319,13 @@ browse_hists:
switch (key) {
case K_TAB:
if (pos->core.node.next == &evlist->core.entries)
- pos = perf_evlist__first(evlist);
+ pos = evlist__first(evlist);
else
pos = perf_evsel__next(pos);
goto browse_hists;
case K_UNTAB:
if (pos->core.node.prev == &evlist->core.entries)
- pos = perf_evlist__last(evlist);
+ pos = evlist__last(evlist);
else
pos = perf_evsel__prev(pos);
goto browse_hists;
@@ -3417,7 +3417,7 @@ int perf_evlist__tui_browse_hists(struct evlist *evlist, const char *help,
single_entry:
if (nr_entries == 1) {
- struct evsel *first = perf_evlist__first(evlist);
+ struct evsel *first = evlist__first(evlist);
return perf_evsel__hists_browse(first, nr_entries, help,
false, hbt, min_pcnt,
diff --git a/tools/perf/ui/browsers/map.c b/tools/perf/ui/browsers/map.c
index 893b065971f6..3d49b916c9e4 100644
--- a/tools/perf/ui/browsers/map.c
+++ b/tools/perf/ui/browsers/map.c
@@ -5,7 +5,6 @@
#include <stdlib.h>
#include <string.h>
#include <linux/bitops.h>
-#include "../../util/util.h"
#include "../../util/debug.h"
#include "../../util/map.h"
#include "../../util/dso.h"
diff --git a/tools/perf/ui/browsers/res_sample.c b/tools/perf/ui/browsers/res_sample.c
index f16a38fea45e..76d356a18790 100644
--- a/tools/perf/ui/browsers/res_sample.c
+++ b/tools/perf/ui/browsers/res_sample.c
@@ -7,7 +7,7 @@
#include "config.h"
#include "time-utils.h"
#include "../util.h"
-#include "../../util/util.h"
+#include "../../util/util.h" // perf_exe()
#include "../../perf.h"
#include <stdlib.h>
#include <string.h>
diff --git a/tools/perf/ui/browsers/scripts.c b/tools/perf/ui/browsers/scripts.c
index 586a21acc13d..fc733a6354d4 100644
--- a/tools/perf/ui/browsers/scripts.c
+++ b/tools/perf/ui/browsers/scripts.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include "../../builtin.h"
#include "../../perf.h"
-#include "../../util/util.h"
+#include "../../util/util.h" // perf_exe()
+#include "../util.h"
#include "../../util/hist.h"
#include "../../util/debug.h"
#include "../../util/symbol.h"
diff --git a/tools/perf/ui/gtk/helpline.c b/tools/perf/ui/gtk/helpline.c
index e166da9ec767..e40a006aead8 100644
--- a/tools/perf/ui/gtk/helpline.c
+++ b/tools/perf/ui/gtk/helpline.c
@@ -6,7 +6,6 @@
#include "gtk.h"
#include "../ui.h"
#include "../helpline.h"
-#include "../../util/debug.h"
static void gtk_helpline_pop(void)
{
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index 6c2efc10bf5c..ed1a97b2c4b0 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -8,6 +8,7 @@
#include "../string2.h"
#include "gtk.h"
#include <signal.h>
+#include <stdlib.h>
#include <linux/string.h>
#define MAX_COLUMNS 32
diff --git a/tools/perf/ui/gtk/progress.c b/tools/perf/ui/gtk/progress.c
index b6ad8857da78..eea6fcde518a 100644
--- a/tools/perf/ui/gtk/progress.c
+++ b/tools/perf/ui/gtk/progress.c
@@ -3,7 +3,6 @@
#include "gtk.h"
#include "../progress.h"
-#include "util.h"
static GtkWidget *dialog;
static GtkWidget *progress;
diff --git a/tools/perf/ui/gtk/setup.c b/tools/perf/ui/gtk/setup.c
index 1a2616b97b5c..f5eee4d66873 100644
--- a/tools/perf/ui/gtk/setup.c
+++ b/tools/perf/ui/gtk/setup.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "gtk.h"
-#include "../../util/debug.h"
+#include <linux/compiler.h>
+#include "../util.h"
extern struct perf_error_ops perf_gtk_eops;
diff --git a/tools/perf/ui/gtk/util.c b/tools/perf/ui/gtk/util.c
index c2c558958b9c..c47f5c387838 100644
--- a/tools/perf/ui/gtk/util.c
+++ b/tools/perf/ui/gtk/util.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#include "../util.h"
-#include "../../util/debug.h"
#include "gtk.h"
#include <stdlib.h>
diff --git a/tools/perf/ui/helpline.c b/tools/perf/ui/helpline.c
index 54bcd08df87e..911182b3f5e6 100644
--- a/tools/perf/ui/helpline.c
+++ b/tools/perf/ui/helpline.c
@@ -3,10 +3,8 @@
#include <stdlib.h>
#include <string.h>
-#include "../util/debug.h"
#include "helpline.h"
#include "ui.h"
-#include "../util/util.h"
char ui_helpline__current[512];
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 3e533de7d852..f73675500061 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -8,7 +8,6 @@
#include "../util/callchain.h"
#include "../util/debug.h"
#include "../util/hist.h"
-#include "../util/util.h"
#include "../util/sort.h"
#include "../util/evsel.h"
#include "../util/evlist.h"
diff --git a/tools/perf/ui/setup.c b/tools/perf/ui/setup.c
index c7a86b4be9f5..700335cde618 100644
--- a/tools/perf/ui/setup.c
+++ b/tools/perf/ui/setup.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
#include <pthread.h>
#include <dlfcn.h>
+#include <unistd.h>
#include <subcmd/pager.h>
#include "../util/debug.h"
#include "../util/hist.h"
-#include "../util/util.h"
#include "ui.h"
pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 832ca6cfbe30..5365606e9dad 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -5,6 +5,7 @@
#include "../../util/callchain.h"
#include "../../util/debug.h"
+#include "../../util/event.h"
#include "../../util/hist.h"
#include "../../util/map.h"
#include "../../util/map_groups.h"
diff --git a/tools/perf/ui/tui/helpline.c b/tools/perf/ui/tui/helpline.c
index 5f188f678c55..298d6af82fdd 100644
--- a/tools/perf/ui/tui/helpline.c
+++ b/tools/perf/ui/tui/helpline.c
@@ -6,7 +6,6 @@
#include <linux/kernel.h>
#include <linux/string.h>
-#include "../../util/debug.h"
#include "../helpline.h"
#include "../ui.h"
#include "../libslang.h"
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c
index 56651a4f5aa0..e9bfe856a5de 100644
--- a/tools/perf/ui/tui/setup.c
+++ b/tools/perf/ui/tui/setup.c
@@ -2,13 +2,13 @@
#include <signal.h>
#include <stdbool.h>
#include <stdlib.h>
+#include <unistd.h>
#include <linux/kernel.h>
#ifdef HAVE_BACKTRACE_SUPPORT
#include <execinfo.h>
#endif
#include "../../util/debug.h"
-#include "../../util/util.h"
#include "../../perf.h"
#include "../browser.h"
#include "../helpline.h"
diff --git a/tools/perf/ui/tui/util.c b/tools/perf/ui/tui/util.c
index 087d9ab054c8..b98dd0e31dc1 100644
--- a/tools/perf/ui/tui/util.c
+++ b/tools/perf/ui/tui/util.c
@@ -5,7 +5,6 @@
#include <stdlib.h>
#include <sys/ttydefaults.h>
-#include "../../util/debug.h"
#include "../browser.h"
#include "../keysyms.h"
#include "../helpline.h"
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 0b4d8e0d474c..8dcfca1a882f 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -3,6 +3,7 @@ perf-y += block-range.o
perf-y += build-id.o
perf-y += cacheline.o
perf-y += config.o
+perf-y += copyfile.o
perf-y += ctype.o
perf-y += db-export.o
perf-y += env.o
@@ -10,6 +11,7 @@ perf-y += event.o
perf-y += evlist.o
perf-y += evsel.o
perf-y += evsel_fprintf.o
+perf-y += perf_event_attr_fprintf.o
perf-y += evswitch.o
perf-y += find_bit.o
perf-y += get_current_dir_name.o
@@ -86,6 +88,7 @@ perf-y += stat-display.o
perf-y += record.o
perf-y += srcline.o
perf-y += srccode.o
+perf-y += synthetic-events.o
perf-y += data.o
perf-y += tsc.o
perf-y += cloexec.o
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 1748f528b6e9..e830eadfca2a 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -14,7 +14,7 @@
#include <bpf/btf.h>
#include <bpf/libbpf.h>
#include <linux/btf.h>
-#include "util.h"
+#include "util.h" // hex_width()
#include "ui/ui.h"
#include "sort.h"
#include "build-id.h"
@@ -34,6 +34,7 @@
#include "bpf-event.h"
#include "block-range.h"
#include "string2.h"
+#include "util/event.h"
#include "arch/common.h"
#include <regex.h>
#include <pthread.h>
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index 8a7340f6a2a2..53be12b23ff4 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -16,7 +16,6 @@
#include <linux/log2.h>
#include <linux/zalloc.h>
-#include "cpumap.h"
#include "color.h"
#include "evsel.h"
#include "machine.h"
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 6f25224a3def..8470dfe9fe97 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -31,8 +31,8 @@
#include "map.h"
#include "pmu.h"
#include "evsel.h"
-#include "cpumap.h"
#include "symbol.h"
+#include "util/synthetic-events.h"
#include "thread_map.h"
#include "asm/bug.h"
#include "auxtrace.h"
@@ -50,10 +50,12 @@
#include "intel-bts.h"
#include "arm-spe.h"
#include "s390-cpumsf.h"
-#include "util.h"
+#include "util/mmap.h"
#include <linux/ctype.h>
+#include <linux/kernel.h>
#include "symbol/kallsyms.h"
+#include <internal/lib.h>
static bool auxtrace__dont_decode(struct perf_session *session)
{
@@ -1226,7 +1228,7 @@ int perf_event__process_auxtrace_error(struct perf_session *session,
return 0;
}
-static int __auxtrace_mmap__read(struct perf_mmap *map,
+static int __auxtrace_mmap__read(struct mmap *map,
struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn,
bool snapshot, size_t snapshot_size)
@@ -1337,13 +1339,13 @@ static int __auxtrace_mmap__read(struct perf_mmap *map,
return 1;
}
-int auxtrace_mmap__read(struct perf_mmap *map, struct auxtrace_record *itr,
+int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn)
{
return __auxtrace_mmap__read(map, itr, tool, fn, false, 0);
}
-int auxtrace_mmap__read_snapshot(struct perf_mmap *map,
+int auxtrace_mmap__read_snapshot(struct mmap *map,
struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn,
size_t snapshot_size)
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 37e70dc01436..f201f36bc35f 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -11,21 +11,22 @@
#include <errno.h>
#include <stdbool.h>
#include <stddef.h>
+#include <stdio.h> // FILE
#include <linux/list.h>
#include <linux/perf_event.h>
#include <linux/types.h>
#include <asm/bitsperlong.h>
#include <asm/barrier.h>
-#include "event.h"
-
union perf_event;
struct perf_session;
struct evlist;
struct perf_tool;
-struct perf_mmap;
+struct mmap;
+struct perf_sample;
struct option;
struct record_opts;
+struct perf_record_auxtrace_error;
struct perf_record_auxtrace_info;
struct events_stats;
@@ -444,14 +445,14 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
bool per_cpu);
typedef int (*process_auxtrace_t)(struct perf_tool *tool,
- struct perf_mmap *map,
+ struct mmap *map,
union perf_event *event, void *data1,
size_t len1, void *data2, size_t len2);
-int auxtrace_mmap__read(struct perf_mmap *map, struct auxtrace_record *itr,
+int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn);
-int auxtrace_mmap__read_snapshot(struct perf_mmap *map,
+int auxtrace_mmap__read_snapshot(struct mmap *map,
struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn,
size_t snapshot_size);
@@ -524,10 +525,6 @@ void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
const char *msg, u64 timestamp);
-int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
- struct perf_tool *tool,
- struct perf_session *session,
- perf_event__handler_t process);
int perf_event__process_auxtrace_info(struct perf_session *session,
union perf_event *event);
s64 perf_event__process_auxtrace(struct perf_session *session,
@@ -604,15 +601,6 @@ void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
{
}
-static inline int
-perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr __maybe_unused,
- struct perf_tool *tool __maybe_unused,
- struct perf_session *session __maybe_unused,
- perf_event__handler_t process __maybe_unused)
-{
- return -EINVAL;
-}
-
static inline
int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
struct evlist *evlist __maybe_unused,
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 7a3d4b125323..f7ed5d122e22 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -16,6 +16,7 @@
#include "map.h"
#include "evlist.h"
#include "record.h"
+#include "util/synthetic-events.h"
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
index a01c2fd68c03..81fdc88e6c1a 100644
--- a/tools/perf/util/bpf-event.h
+++ b/tools/perf/util/bpf-event.h
@@ -6,9 +6,9 @@
#include <linux/rbtree.h>
#include <pthread.h>
#include <api/fd/array.h>
-#include "event.h"
#include <stdio.h>
+struct bpf_prog_info;
struct machine;
union perf_event;
struct perf_env;
@@ -33,11 +33,6 @@ struct btf_node {
#ifdef HAVE_LIBBPF_SUPPORT
int machine__process_bpf(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
-
-int perf_event__synthesize_bpf_events(struct perf_session *session,
- perf_event__handler_t process,
- struct machine *machine,
- struct record_opts *opts);
int bpf_event__add_sb_event(struct evlist **evlist,
struct perf_env *env);
void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
@@ -51,14 +46,6 @@ static inline int machine__process_bpf(struct machine *machine __maybe_unused,
return 0;
}
-static inline int perf_event__synthesize_bpf_events(struct perf_session *session __maybe_unused,
- perf_event__handler_t process __maybe_unused,
- struct machine *machine __maybe_unused,
- struct record_opts *opts __maybe_unused)
-{
- return 0;
-}
-
static inline int bpf_event__add_sb_event(struct evlist **evlist __maybe_unused,
struct perf_env *env __maybe_unused)
{
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 37283e865352..10c187b8b8ea 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -1568,7 +1568,7 @@ struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name)
return ERR_PTR(-err);
}
- evsel = perf_evlist__last(evlist);
+ evsel = evlist__last(evlist);
}
bpf__for_each_map_named(map, obj, tmp, name) {
diff --git a/tools/perf/util/branch.c b/tools/perf/util/branch.c
index 9d1e090084a2..2285b1eb3128 100644
--- a/tools/perf/util/branch.c
+++ b/tools/perf/util/branch.c
@@ -1,5 +1,3 @@
-#include "util/util.h"
-#include "util/debug.h"
#include "util/map_symbol.h"
#include "util/branch.h"
#include <linux/kernel.h>
diff --git a/tools/perf/util/branch.h b/tools/perf/util/branch.h
index 06f66dad0b79..88e00d268f6f 100644
--- a/tools/perf/util/branch.h
+++ b/tools/perf/util/branch.h
@@ -1,8 +1,15 @@
#ifndef _PERF_BRANCH_H
#define _PERF_BRANCH_H 1
-
+/*
+ * The linux/stddef.h isn't need here, but is needed for __always_inline used
+ * in files included from uapi/linux/perf_event.h such as
+ * /usr/include/linux/swab.h and /usr/include/linux/byteorder/little_endian.h,
+ * detected in at least musl libc, used in Alpine Linux. -acme
+ */
#include <stdio.h>
#include <stdint.h>
+#include <linux/compiler.h>
+#include <linux/stddef.h>
#include <linux/perf_event.h>
#include <linux/types.h>
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index e5fb77755d9e..c076fc7fe025 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -7,12 +7,13 @@
* Copyright (C) 2009, 2010 Red Hat Inc.
* Copyright (C) 2009, 2010 Arnaldo Carvalho de Melo <[email protected]>
*/
-#include "util.h"
+#include "util.h" // lsdir(), mkdir_p(), rm_rf()
#include <dirent.h>
#include <errno.h>
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
+#include "util/copyfile.h"
#include "dso.h"
#include "build-id.h"
#include "event.h"
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index c14646c1f2eb..9a9b56ed3f0a 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -23,6 +23,7 @@
#include "debug.h"
#include "dso.h"
+#include "event.h"
#include "hist.h"
#include "sort.h"
#include "machine.h"
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index b042ceef4114..83398e5bbe4b 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -4,12 +4,15 @@
#include <linux/list.h>
#include <linux/rbtree.h>
-#include "event.h"
#include "map_symbol.h"
#include "branch.h"
+struct addr_location;
struct evsel;
+struct ip_callchain;
struct map;
+struct perf_sample;
+struct thread;
#define HELP_PAD "\t\t\t\t"
diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
index 4e904fcb2783..a12872f2856a 100644
--- a/tools/perf/util/cloexec.c
+++ b/tools/perf/util/cloexec.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <sched.h>
-#include "util.h"
+#include "util.h" // for sched_getcpu()
#include "../perf-sys.h"
#include "cloexec.h"
#include "event.h"
diff --git a/tools/perf/util/copyfile.c b/tools/perf/util/copyfile.c
new file mode 100644
index 000000000000..3fa0db136667
--- /dev/null
+++ b/tools/perf/util/copyfile.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "util/copyfile.h"
+#include "util/namespaces.h"
+#include <internal/lib.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+static int slow_copyfile(const char *from, const char *to, struct nsinfo *nsi)
+{
+ int err = -1;
+ char *line = NULL;
+ size_t n;
+ FILE *from_fp, *to_fp;
+ struct nscookie nsc;
+
+ nsinfo__mountns_enter(nsi, &nsc);
+ from_fp = fopen(from, "r");
+ nsinfo__mountns_exit(&nsc);
+ if (from_fp == NULL)
+ goto out;
+
+ to_fp = fopen(to, "w");
+ if (to_fp == NULL)
+ goto out_fclose_from;
+
+ while (getline(&line, &n, from_fp) > 0)
+ if (fputs(line, to_fp) == EOF)
+ goto out_fclose_to;
+ err = 0;
+out_fclose_to:
+ fclose(to_fp);
+ free(line);
+out_fclose_from:
+ fclose(from_fp);
+out:
+ return err;
+}
+
+int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size)
+{
+ void *ptr;
+ loff_t pgoff;
+
+ pgoff = off_in & ~(page_size - 1);
+ off_in -= pgoff;
+
+ ptr = mmap(NULL, off_in + size, PROT_READ, MAP_PRIVATE, ifd, pgoff);
+ if (ptr == MAP_FAILED)
+ return -1;
+
+ while (size) {
+ ssize_t ret = pwrite(ofd, ptr + off_in, size, off_out);
+ if (ret < 0 && errno == EINTR)
+ continue;
+ if (ret <= 0)
+ break;
+
+ size -= ret;
+ off_in += ret;
+ off_out += ret;
+ }
+ munmap(ptr, off_in + size);
+
+ return size ? -1 : 0;
+}
+
+static int copyfile_mode_ns(const char *from, const char *to, mode_t mode,
+ struct nsinfo *nsi)
+{
+ int fromfd, tofd;
+ struct stat st;
+ int err;
+ char *tmp = NULL, *ptr = NULL;
+ struct nscookie nsc;
+
+ nsinfo__mountns_enter(nsi, &nsc);
+ err = stat(from, &st);
+ nsinfo__mountns_exit(&nsc);
+ if (err)
+ goto out;
+ err = -1;
+
+ /* extra 'x' at the end is to reserve space for '.' */
+ if (asprintf(&tmp, "%s.XXXXXXx", to) < 0) {
+ tmp = NULL;
+ goto out;
+ }
+ ptr = strrchr(tmp, '/');
+ if (!ptr)
+ goto out;
+ ptr = memmove(ptr + 1, ptr, strlen(ptr) - 1);
+ *ptr = '.';
+
+ tofd = mkstemp(tmp);
+ if (tofd < 0)
+ goto out;
+
+ if (fchmod(tofd, mode))
+ goto out_close_to;
+
+ if (st.st_size == 0) { /* /proc? do it slowly... */
+ err = slow_copyfile(from, tmp, nsi);
+ goto out_close_to;
+ }
+
+ nsinfo__mountns_enter(nsi, &nsc);
+ fromfd = open(from, O_RDONLY);
+ nsinfo__mountns_exit(&nsc);
+ if (fromfd < 0)
+ goto out_close_to;
+
+ err = copyfile_offset(fromfd, 0, tofd, 0, st.st_size);
+
+ close(fromfd);
+out_close_to:
+ close(tofd);
+ if (!err)
+ err = link(tmp, to);
+ unlink(tmp);
+out:
+ free(tmp);
+ return err;
+}
+
+int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi)
+{
+ return copyfile_mode_ns(from, to, 0755, nsi);
+}
+
+int copyfile_mode(const char *from, const char *to, mode_t mode)
+{
+ return copyfile_mode_ns(from, to, mode, NULL);
+}
+
+int copyfile(const char *from, const char *to)
+{
+ return copyfile_mode(from, to, 0755);
+}
diff --git a/tools/perf/util/copyfile.h b/tools/perf/util/copyfile.h
new file mode 100644
index 000000000000..e85d2f22f3cc
--- /dev/null
+++ b/tools/perf/util/copyfile.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef PERF_COPYFILE_H_
+#define PERF_COPYFILE_H_
+
+#include <linux/types.h>
+#include <sys/types.h>
+#include <fcntl.h>
+
+struct nsinfo;
+
+int copyfile(const char *from, const char *to);
+int copyfile_mode(const char *from, const char *to, mode_t mode);
+int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi);
+int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size);
+
+#endif // PERF_COPYFILE_H_
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 37d7c492b155..cd92a99eb89d 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -17,7 +17,6 @@
#include "cs-etm.h"
#include "cs-etm-decoder.h"
#include "intlist.h"
-#include "util.h"
/* use raw logging */
#ifdef CS_DEBUG_RAW
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 707afdbd9529..4ba0f871f086 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -35,7 +35,7 @@
#include "thread.h"
#include "thread-stack.h"
#include <tools/libc_compat.h>
-#include "util.h"
+#include "util/synthetic-events.h"
#define MAX_TIMESTAMP (~0ULL)
@@ -1298,7 +1298,7 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
attr.read_format = evsel->core.attr.read_format;
/* create new id val to be a fixed offset from evsel id */
- id = evsel->id[0] + 1000000000;
+ id = evsel->core.id[0] + 1000000000;
if (!id)
id = 1;
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 0c268449959c..dbc772bfb04e 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -30,6 +30,7 @@
#include "machine.h"
#include "config.h"
#include <linux/ctype.h>
+#include <linux/err.h>
#define pr_N(n, fmt, ...) \
eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
@@ -1619,8 +1620,10 @@ int bt_convert__perf2ctf(const char *input, const char *path,
err = -1;
/* perf.data session */
session = perf_session__new(&data, 0, &c.tool);
- if (!session)
+ if (IS_ERR(session)) {
+ err = PTR_ERR(session);
goto free_writer;
+ }
if (c.queue_size) {
ordered_events__set_alloc_size(&session->ordered_events,
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index e75c3a279fe8..88fba2ba549f 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -13,9 +13,10 @@
#include <dirent.h>
#include "data.h"
-#include "util.h"
+#include "util.h" // rm_rf_perf_data()
#include "debug.h"
#include "header.h"
+#include <internal/lib.h>
static void close_dir(struct perf_data_file *files, int nr)
{
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index a1b59bd35519..e55114f0336f 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -17,7 +17,6 @@
#include "event.h"
#include "debug.h"
#include "print_binary.h"
-#include "util.h"
#include "target.h"
#include "ui/helpline.h"
#include "ui/ui.h"
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index b2deee987ffa..d25ae1c4cee9 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -3,9 +3,9 @@
#ifndef __PERF_DEBUG_H
#define __PERF_DEBUG_H
+#include <stdarg.h>
#include <stdbool.h>
#include <linux/compiler.h>
-#include "../ui/util.h"
extern int verbose;
extern bool quiet, dump_trace;
diff --git a/tools/perf/util/demangle-java.c b/tools/perf/util/demangle-java.c
index 763328c151e9..6fb7f34c0814 100644
--- a/tools/perf/util/demangle-java.c
+++ b/tools/perf/util/demangle-java.c
@@ -3,7 +3,6 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include "debug.h"
#include "symbol.h"
#include "demangle-java.h"
diff --git a/tools/perf/util/demangle-rust.c b/tools/perf/util/demangle-rust.c
index 423afbbd386b..a659fc69f73a 100644
--- a/tools/perf/util/demangle-rust.c
+++ b/tools/perf/util/demangle-rust.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
-#include "util.h"
#include "debug.h"
#include "demangle-rust.h"
diff --git a/tools/perf/util/dwarf-regs.c b/tools/perf/util/dwarf-regs.c
index db55eddce8cd..1b49ecee5aff 100644
--- a/tools/perf/util/dwarf-regs.c
+++ b/tools/perf/util/dwarf-regs.c
@@ -5,7 +5,6 @@
* Written by: Masami Hiramatsu <[email protected]>
*/
-#include <util.h>
#include <debug.h>
#include <dwarf-regs.h>
#include <elf.h>
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index d8e083d42610..db40906e2937 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -4,9 +4,10 @@
#include <linux/types.h>
#include <linux/rbtree.h>
-#include "cpumap.h"
#include "rwsem.h"
+struct perf_cpu_map;
+
struct cpu_topology_map {
int socket_id;
int die_id;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index f4afbb858ebb..fc1e5a991008 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1,16 +1,16 @@
-#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include <perf/cpumap.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
-#include <api/fs/fs.h>
#include <linux/perf_event.h>
#include <linux/zalloc.h>
+#include "cpumap.h"
#include "dso.h"
#include "event.h"
#include "debug.h"
@@ -24,6 +24,7 @@
#include "time-utils.h"
#include <linux/ctype.h>
#include "map.h"
+#include "util/namespaces.h"
#include "symbol.h"
#include "symbol/kallsyms.h"
#include "asm/bug.h"
@@ -33,8 +34,6 @@
#include "tool.h"
#include "../perf.h"
-#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
-
static const char *perf_event__names[] = {
[0] = "TOTAL",
[PERF_RECORD_MMAP] = "MMAP",
@@ -75,18 +74,6 @@ static const char *perf_event__names[] = {
[PERF_RECORD_COMPRESSED] = "COMPRESSED",
};
-static const char *perf_ns__names[] = {
- [NET_NS_INDEX] = "net",
- [UTS_NS_INDEX] = "uts",
- [IPC_NS_INDEX] = "ipc",
- [PID_NS_INDEX] = "pid",
- [USER_NS_INDEX] = "user",
- [MNT_NS_INDEX] = "mnt",
- [CGROUP_NS_INDEX] = "cgroup",
-};
-
-unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
-
const char *perf_event__name(unsigned int id)
{
if (id >= ARRAY_SIZE(perf_event__names))
@@ -96,775 +83,6 @@ const char *perf_event__name(unsigned int id)
return perf_event__names[id];
}
-static const char *perf_ns__name(unsigned int id)
-{
- if (id >= ARRAY_SIZE(perf_ns__names))
- return "UNKNOWN";
- return perf_ns__names[id];
-}
-
-int perf_tool__process_synth_event(struct perf_tool *tool,
- union perf_event *event,
- struct machine *machine,
- perf_event__handler_t process)
-{
- struct perf_sample synth_sample = {
- .pid = -1,
- .tid = -1,
- .time = -1,
- .stream_id = -1,
- .cpu = -1,
- .period = 1,
- .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
- };
-
- return process(tool, event, &synth_sample, machine);
-};
-
-/*
- * Assumes that the first 4095 bytes of /proc/pid/stat contains
- * the comm, tgid and ppid.
- */
-static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
- pid_t *tgid, pid_t *ppid)
-{
- char filename[PATH_MAX];
- char bf[4096];
- int fd;
- size_t size = 0;
- ssize_t n;
- char *name, *tgids, *ppids;
-
- *tgid = -1;
- *ppid = -1;
-
- snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
-
- fd = open(filename, O_RDONLY);
- if (fd < 0) {
- pr_debug("couldn't open %s\n", filename);
- return -1;
- }
-
- n = read(fd, bf, sizeof(bf) - 1);
- close(fd);
- if (n <= 0) {
- pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
- pid);
- return -1;
- }
- bf[n] = '\0';
-
- name = strstr(bf, "Name:");
- tgids = strstr(bf, "Tgid:");
- ppids = strstr(bf, "PPid:");
-
- if (name) {
- char *nl;
-
- name = skip_spaces(name + 5); /* strlen("Name:") */
- nl = strchr(name, '\n');
- if (nl)
- *nl = '\0';
-
- size = strlen(name);
- if (size >= len)
- size = len - 1;
- memcpy(comm, name, size);
- comm[size] = '\0';
- } else {
- pr_debug("Name: string not found for pid %d\n", pid);
- }
-
- if (tgids) {
- tgids += 5; /* strlen("Tgid:") */
- *tgid = atoi(tgids);
- } else {
- pr_debug("Tgid: string not found for pid %d\n", pid);
- }
-
- if (ppids) {
- ppids += 5; /* strlen("PPid:") */
- *ppid = atoi(ppids);
- } else {
- pr_debug("PPid: string not found for pid %d\n", pid);
- }
-
- return 0;
-}
-
-static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
- struct machine *machine,
- pid_t *tgid, pid_t *ppid)
-{
- size_t size;
-
- *ppid = -1;
-
- memset(&event->comm, 0, sizeof(event->comm));
-
- if (machine__is_host(machine)) {
- if (perf_event__get_comm_ids(pid, event->comm.comm,
- sizeof(event->comm.comm),
- tgid, ppid) != 0) {
- return -1;
- }
- } else {
- *tgid = machine->pid;
- }
-
- if (*tgid < 0)
- return -1;
-
- event->comm.pid = *tgid;
- event->comm.header.type = PERF_RECORD_COMM;
-
- size = strlen(event->comm.comm) + 1;
- size = PERF_ALIGN(size, sizeof(u64));
- memset(event->comm.comm + size, 0, machine->id_hdr_size);
- event->comm.header.size = (sizeof(event->comm) -
- (sizeof(event->comm.comm) - size) +
- machine->id_hdr_size);
- event->comm.tid = pid;
-
- return 0;
-}
-
-pid_t perf_event__synthesize_comm(struct perf_tool *tool,
- union perf_event *event, pid_t pid,
- perf_event__handler_t process,
- struct machine *machine)
-{
- pid_t tgid, ppid;
-
- if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
- return -1;
-
- if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
- return -1;
-
- return tgid;
-}
-
-static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
- struct perf_ns_link_info *ns_link_info)
-{
- struct stat64 st;
- char proc_ns[128];
-
- sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
- if (stat64(proc_ns, &st) == 0) {
- ns_link_info->dev = st.st_dev;
- ns_link_info->ino = st.st_ino;
- }
-}
-
-int perf_event__synthesize_namespaces(struct perf_tool *tool,
- union perf_event *event,
- pid_t pid, pid_t tgid,
- perf_event__handler_t process,
- struct machine *machine)
-{
- u32 idx;
- struct perf_ns_link_info *ns_link_info;
-
- if (!tool || !tool->namespace_events)
- return 0;
-
- memset(&event->namespaces, 0, (sizeof(event->namespaces) +
- (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
- machine->id_hdr_size));
-
- event->namespaces.pid = tgid;
- event->namespaces.tid = pid;
-
- event->namespaces.nr_namespaces = NR_NAMESPACES;
-
- ns_link_info = event->namespaces.link_info;
-
- for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
- perf_event__get_ns_link_info(pid, perf_ns__name(idx),
- &ns_link_info[idx]);
-
- event->namespaces.header.type = PERF_RECORD_NAMESPACES;
-
- event->namespaces.header.size = (sizeof(event->namespaces) +
- (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
- machine->id_hdr_size);
-
- if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
- return -1;
-
- return 0;
-}
-
-static int perf_event__synthesize_fork(struct perf_tool *tool,
- union perf_event *event,
- pid_t pid, pid_t tgid, pid_t ppid,
- perf_event__handler_t process,
- struct machine *machine)
-{
- memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
-
- /*
- * for main thread set parent to ppid from status file. For other
- * threads set parent pid to main thread. ie., assume main thread
- * spawns all threads in a process
- */
- if (tgid == pid) {
- event->fork.ppid = ppid;
- event->fork.ptid = ppid;
- } else {
- event->fork.ppid = tgid;
- event->fork.ptid = tgid;
- }
- event->fork.pid = tgid;
- event->fork.tid = pid;
- event->fork.header.type = PERF_RECORD_FORK;
- event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
-
- event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
-
- if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
- return -1;
-
- return 0;
-}
-
-int perf_event__synthesize_mmap_events(struct perf_tool *tool,
- union perf_event *event,
- pid_t pid, pid_t tgid,
- perf_event__handler_t process,
- struct machine *machine,
- bool mmap_data)
-{
- char filename[PATH_MAX];
- FILE *fp;
- unsigned long long t;
- bool truncation = false;
- unsigned long long timeout = proc_map_timeout * 1000000ULL;
- int rc = 0;
- const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
- int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
-
- if (machine__is_default_guest(machine))
- return 0;
-
- snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
- machine->root_dir, pid, pid);
-
- fp = fopen(filename, "r");
- if (fp == NULL) {
- /*
- * We raced with a task exiting - just return:
- */
- pr_debug("couldn't open %s\n", filename);
- return -1;
- }
-
- event->header.type = PERF_RECORD_MMAP2;
- t = rdclock();
-
- while (1) {
- char bf[BUFSIZ];
- char prot[5];
- char execname[PATH_MAX];
- char anonstr[] = "//anon";
- unsigned int ino;
- size_t size;
- ssize_t n;
-
- if (fgets(bf, sizeof(bf), fp) == NULL)
- break;
-
- if ((rdclock() - t) > timeout) {
- pr_warning("Reading %s time out. "
- "You may want to increase "
- "the time limit by --proc-map-timeout\n",
- filename);
- truncation = true;
- goto out;
- }
-
- /* ensure null termination since stack will be reused. */
- strcpy(execname, "");
-
- /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
- n = sscanf(bf, "%"PRI_lx64"-%"PRI_lx64" %s %"PRI_lx64" %x:%x %u %[^\n]\n",
- &event->mmap2.start, &event->mmap2.len, prot,
- &event->mmap2.pgoff, &event->mmap2.maj,
- &event->mmap2.min,
- &ino, execname);
-
- /*
- * Anon maps don't have the execname.
- */
- if (n < 7)
- continue;
-
- event->mmap2.ino = (u64)ino;
-
- /*
- * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
- */
- if (machine__is_host(machine))
- event->header.misc = PERF_RECORD_MISC_USER;
- else
- event->header.misc = PERF_RECORD_MISC_GUEST_USER;
-
- /* map protection and flags bits */
- event->mmap2.prot = 0;
- event->mmap2.flags = 0;
- if (prot[0] == 'r')
- event->mmap2.prot |= PROT_READ;
- if (prot[1] == 'w')
- event->mmap2.prot |= PROT_WRITE;
- if (prot[2] == 'x')
- event->mmap2.prot |= PROT_EXEC;
-
- if (prot[3] == 's')
- event->mmap2.flags |= MAP_SHARED;
- else
- event->mmap2.flags |= MAP_PRIVATE;
-
- if (prot[2] != 'x') {
- if (!mmap_data || prot[0] != 'r')
- continue;
-
- event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
- }
-
-out:
- if (truncation)
- event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
-
- if (!strcmp(execname, ""))
- strcpy(execname, anonstr);
-
- if (hugetlbfs_mnt_len &&
- !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
- strcpy(execname, anonstr);
- event->mmap2.flags |= MAP_HUGETLB;
- }
-
- size = strlen(execname) + 1;
- memcpy(event->mmap2.filename, execname, size);
- size = PERF_ALIGN(size, sizeof(u64));
- event->mmap2.len -= event->mmap.start;
- event->mmap2.header.size = (sizeof(event->mmap2) -
- (sizeof(event->mmap2.filename) - size));
- memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
- event->mmap2.header.size += machine->id_hdr_size;
- event->mmap2.pid = tgid;
- event->mmap2.tid = pid;
-
- if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
- rc = -1;
- break;
- }
-
- if (truncation)
- break;
- }
-
- fclose(fp);
- return rc;
-}
-
-int perf_event__synthesize_modules(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine)
-{
- int rc = 0;
- struct map *pos;
- struct maps *maps = machine__kernel_maps(machine);
- union perf_event *event = zalloc((sizeof(event->mmap) +
- machine->id_hdr_size));
- if (event == NULL) {
- pr_debug("Not enough memory synthesizing mmap event "
- "for kernel modules\n");
- return -1;
- }
-
- event->header.type = PERF_RECORD_MMAP;
-
- /*
- * kernel uses 0 for user space maps, see kernel/perf_event.c
- * __perf_event_mmap
- */
- if (machine__is_host(machine))
- event->header.misc = PERF_RECORD_MISC_KERNEL;
- else
- event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
-
- for (pos = maps__first(maps); pos; pos = map__next(pos)) {
- size_t size;
-
- if (!__map__is_kmodule(pos))
- continue;
-
- size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
- event->mmap.header.type = PERF_RECORD_MMAP;
- event->mmap.header.size = (sizeof(event->mmap) -
- (sizeof(event->mmap.filename) - size));
- memset(event->mmap.filename + size, 0, machine->id_hdr_size);
- event->mmap.header.size += machine->id_hdr_size;
- event->mmap.start = pos->start;
- event->mmap.len = pos->end - pos->start;
- event->mmap.pid = machine->pid;
-
- memcpy(event->mmap.filename, pos->dso->long_name,
- pos->dso->long_name_len + 1);
- if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
- rc = -1;
- break;
- }
- }
-
- free(event);
- return rc;
-}
-
-static int __event__synthesize_thread(union perf_event *comm_event,
- union perf_event *mmap_event,
- union perf_event *fork_event,
- union perf_event *namespaces_event,
- pid_t pid, int full,
- perf_event__handler_t process,
- struct perf_tool *tool,
- struct machine *machine,
- bool mmap_data)
-{
- char filename[PATH_MAX];
- DIR *tasks;
- struct dirent *dirent;
- pid_t tgid, ppid;
- int rc = 0;
-
- /* special case: only send one comm event using passed in pid */
- if (!full) {
- tgid = perf_event__synthesize_comm(tool, comm_event, pid,
- process, machine);
-
- if (tgid == -1)
- return -1;
-
- if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
- tgid, process, machine) < 0)
- return -1;
-
- /*
- * send mmap only for thread group leader
- * see thread__init_map_groups
- */
- if (pid == tgid &&
- perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
- process, machine, mmap_data))
- return -1;
-
- return 0;
- }
-
- if (machine__is_default_guest(machine))
- return 0;
-
- snprintf(filename, sizeof(filename), "%s/proc/%d/task",
- machine->root_dir, pid);
-
- tasks = opendir(filename);
- if (tasks == NULL) {
- pr_debug("couldn't open %s\n", filename);
- return 0;
- }
-
- while ((dirent = readdir(tasks)) != NULL) {
- char *end;
- pid_t _pid;
-
- _pid = strtol(dirent->d_name, &end, 10);
- if (*end)
- continue;
-
- rc = -1;
- if (perf_event__prepare_comm(comm_event, _pid, machine,
- &tgid, &ppid) != 0)
- break;
-
- if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
- ppid, process, machine) < 0)
- break;
-
- if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
- tgid, process, machine) < 0)
- break;
-
- /*
- * Send the prepared comm event
- */
- if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
- break;
-
- rc = 0;
- if (_pid == pid) {
- /* process the parent's maps too */
- rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
- process, machine, mmap_data);
- if (rc)
- break;
- }
- }
-
- closedir(tasks);
- return rc;
-}
-
-int perf_event__synthesize_thread_map(struct perf_tool *tool,
- struct perf_thread_map *threads,
- perf_event__handler_t process,
- struct machine *machine,
- bool mmap_data)
-{
- union perf_event *comm_event, *mmap_event, *fork_event;
- union perf_event *namespaces_event;
- int err = -1, thread, j;
-
- comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
- if (comm_event == NULL)
- goto out;
-
- mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
- if (mmap_event == NULL)
- goto out_free_comm;
-
- fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
- if (fork_event == NULL)
- goto out_free_mmap;
-
- namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
- (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
- machine->id_hdr_size);
- if (namespaces_event == NULL)
- goto out_free_fork;
-
- err = 0;
- for (thread = 0; thread < threads->nr; ++thread) {
- if (__event__synthesize_thread(comm_event, mmap_event,
- fork_event, namespaces_event,
- perf_thread_map__pid(threads, thread), 0,
- process, tool, machine,
- mmap_data)) {
- err = -1;
- break;
- }
-
- /*
- * comm.pid is set to thread group id by
- * perf_event__synthesize_comm
- */
- if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
- bool need_leader = true;
-
- /* is thread group leader in thread_map? */
- for (j = 0; j < threads->nr; ++j) {
- if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
- need_leader = false;
- break;
- }
- }
-
- /* if not, generate events for it */
- if (need_leader &&
- __event__synthesize_thread(comm_event, mmap_event,
- fork_event, namespaces_event,
- comm_event->comm.pid, 0,
- process, tool, machine,
- mmap_data)) {
- err = -1;
- break;
- }
- }
- }
- free(namespaces_event);
-out_free_fork:
- free(fork_event);
-out_free_mmap:
- free(mmap_event);
-out_free_comm:
- free(comm_event);
-out:
- return err;
-}
-
-static int __perf_event__synthesize_threads(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine,
- bool mmap_data,
- struct dirent **dirent,
- int start,
- int num)
-{
- union perf_event *comm_event, *mmap_event, *fork_event;
- union perf_event *namespaces_event;
- int err = -1;
- char *end;
- pid_t pid;
- int i;
-
- comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
- if (comm_event == NULL)
- goto out;
-
- mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
- if (mmap_event == NULL)
- goto out_free_comm;
-
- fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
- if (fork_event == NULL)
- goto out_free_mmap;
-
- namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
- (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
- machine->id_hdr_size);
- if (namespaces_event == NULL)
- goto out_free_fork;
-
- for (i = start; i < start + num; i++) {
- if (!isdigit(dirent[i]->d_name[0]))
- continue;
-
- pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
- /* only interested in proper numerical dirents */
- if (*end)
- continue;
- /*
- * We may race with exiting thread, so don't stop just because
- * one thread couldn't be synthesized.
- */
- __event__synthesize_thread(comm_event, mmap_event, fork_event,
- namespaces_event, pid, 1, process,
- tool, machine, mmap_data);
- }
- err = 0;
-
- free(namespaces_event);
-out_free_fork:
- free(fork_event);
-out_free_mmap:
- free(mmap_event);
-out_free_comm:
- free(comm_event);
-out:
- return err;
-}
-
-struct synthesize_threads_arg {
- struct perf_tool *tool;
- perf_event__handler_t process;
- struct machine *machine;
- bool mmap_data;
- struct dirent **dirent;
- int num;
- int start;
-};
-
-static void *synthesize_threads_worker(void *arg)
-{
- struct synthesize_threads_arg *args = arg;
-
- __perf_event__synthesize_threads(args->tool, args->process,
- args->machine, args->mmap_data,
- args->dirent,
- args->start, args->num);
- return NULL;
-}
-
-int perf_event__synthesize_threads(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine,
- bool mmap_data,
- unsigned int nr_threads_synthesize)
-{
- struct synthesize_threads_arg *args = NULL;
- pthread_t *synthesize_threads = NULL;
- char proc_path[PATH_MAX];
- struct dirent **dirent;
- int num_per_thread;
- int m, n, i, j;
- int thread_nr;
- int base = 0;
- int err = -1;
-
-
- if (machine__is_default_guest(machine))
- return 0;
-
- snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
- n = scandir(proc_path, &dirent, 0, alphasort);
- if (n < 0)
- return err;
-
- if (nr_threads_synthesize == UINT_MAX)
- thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
- else
- thread_nr = nr_threads_synthesize;
-
- if (thread_nr <= 1) {
- err = __perf_event__synthesize_threads(tool, process,
- machine, mmap_data,
- dirent, base, n);
- goto free_dirent;
- }
- if (thread_nr > n)
- thread_nr = n;
-
- synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
- if (synthesize_threads == NULL)
- goto free_dirent;
-
- args = calloc(sizeof(*args), thread_nr);
- if (args == NULL)
- goto free_threads;
-
- num_per_thread = n / thread_nr;
- m = n % thread_nr;
- for (i = 0; i < thread_nr; i++) {
- args[i].tool = tool;
- args[i].process = process;
- args[i].machine = machine;
- args[i].mmap_data = mmap_data;
- args[i].dirent = dirent;
- }
- for (i = 0; i < m; i++) {
- args[i].num = num_per_thread + 1;
- args[i].start = i * args[i].num;
- }
- if (i != 0)
- base = args[i-1].start + args[i-1].num;
- for (j = i; j < thread_nr; j++) {
- args[j].num = num_per_thread;
- args[j].start = base + (j - i) * args[i].num;
- }
-
- for (i = 0; i < thread_nr; i++) {
- if (pthread_create(&synthesize_threads[i], NULL,
- synthesize_threads_worker, &args[i]))
- goto out_join;
- }
- err = 0;
-out_join:
- for (i = 0; i < thread_nr; i++)
- pthread_join(synthesize_threads[i], NULL);
- free(args);
-free_threads:
- free(synthesize_threads);
-free_dirent:
- for (i = 0; i < n; i++)
- zfree(&dirent[i]);
- free(dirent);
-
- return err;
-}
-
struct process_symbol_args {
const char *name;
u64 start;
@@ -899,327 +117,6 @@ int kallsyms__get_function_start(const char *kallsyms_filename,
return 0;
}
-int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
- perf_event__handler_t process __maybe_unused,
- struct machine *machine __maybe_unused)
-{
- return 0;
-}
-
-static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine)
-{
- size_t size;
- struct map *map = machine__kernel_map(machine);
- struct kmap *kmap;
- int err;
- union perf_event *event;
-
- if (map == NULL)
- return -1;
-
- kmap = map__kmap(map);
- if (!kmap->ref_reloc_sym)
- return -1;
-
- /*
- * We should get this from /sys/kernel/sections/.text, but till that is
- * available use this, and after it is use this as a fallback for older
- * kernels.
- */
- event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
- if (event == NULL) {
- pr_debug("Not enough memory synthesizing mmap event "
- "for kernel modules\n");
- return -1;
- }
-
- if (machine__is_host(machine)) {
- /*
- * kernel uses PERF_RECORD_MISC_USER for user space maps,
- * see kernel/perf_event.c __perf_event_mmap
- */
- event->header.misc = PERF_RECORD_MISC_KERNEL;
- } else {
- event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
- }
-
- size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
- "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
- size = PERF_ALIGN(size, sizeof(u64));
- event->mmap.header.type = PERF_RECORD_MMAP;
- event->mmap.header.size = (sizeof(event->mmap) -
- (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
- event->mmap.pgoff = kmap->ref_reloc_sym->addr;
- event->mmap.start = map->start;
- event->mmap.len = map->end - event->mmap.start;
- event->mmap.pid = machine->pid;
-
- err = perf_tool__process_synth_event(tool, event, machine, process);
- free(event);
-
- return err;
-}
-
-int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine)
-{
- int err;
-
- err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
- if (err < 0)
- return err;
-
- return perf_event__synthesize_extra_kmaps(tool, process, machine);
-}
-
-int perf_event__synthesize_thread_map2(struct perf_tool *tool,
- struct perf_thread_map *threads,
- perf_event__handler_t process,
- struct machine *machine)
-{
- union perf_event *event;
- int i, err, size;
-
- size = sizeof(event->thread_map);
- size += threads->nr * sizeof(event->thread_map.entries[0]);
-
- event = zalloc(size);
- if (!event)
- return -ENOMEM;
-
- event->header.type = PERF_RECORD_THREAD_MAP;
- event->header.size = size;
- event->thread_map.nr = threads->nr;
-
- for (i = 0; i < threads->nr; i++) {
- struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
- char *comm = perf_thread_map__comm(threads, i);
-
- if (!comm)
- comm = (char *) "";
-
- entry->pid = perf_thread_map__pid(threads, i);
- strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
- }
-
- err = process(tool, event, NULL, machine);
-
- free(event);
- return err;
-}
-
-static void synthesize_cpus(struct cpu_map_entries *cpus,
- struct perf_cpu_map *map)
-{
- int i;
-
- cpus->nr = map->nr;
-
- for (i = 0; i < map->nr; i++)
- cpus->cpu[i] = map->map[i];
-}
-
-static void synthesize_mask(struct perf_record_record_cpu_map *mask,
- struct perf_cpu_map *map, int max)
-{
- int i;
-
- mask->nr = BITS_TO_LONGS(max);
- mask->long_size = sizeof(long);
-
- for (i = 0; i < map->nr; i++)
- set_bit(map->map[i], mask->mask);
-}
-
-static size_t cpus_size(struct perf_cpu_map *map)
-{
- return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
-}
-
-static size_t mask_size(struct perf_cpu_map *map, int *max)
-{
- int i;
-
- *max = 0;
-
- for (i = 0; i < map->nr; i++) {
- /* bit possition of the cpu is + 1 */
- int bit = map->map[i] + 1;
-
- if (bit > *max)
- *max = bit;
- }
-
- return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
-}
-
-void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
-{
- size_t size_cpus, size_mask;
- bool is_dummy = perf_cpu_map__empty(map);
-
- /*
- * Both array and mask data have variable size based
- * on the number of cpus and their actual values.
- * The size of the 'struct perf_record_cpu_map_data' is:
- *
- * array = size of 'struct cpu_map_entries' +
- * number of cpus * sizeof(u64)
- *
- * mask = size of 'struct perf_record_record_cpu_map' +
- * maximum cpu bit converted to size of longs
- *
- * and finaly + the size of 'struct perf_record_cpu_map_data'.
- */
- size_cpus = cpus_size(map);
- size_mask = mask_size(map, max);
-
- if (is_dummy || (size_cpus < size_mask)) {
- *size += size_cpus;
- *type = PERF_CPU_MAP__CPUS;
- } else {
- *size += size_mask;
- *type = PERF_CPU_MAP__MASK;
- }
-
- *size += sizeof(struct perf_record_cpu_map_data);
- *size = PERF_ALIGN(*size, sizeof(u64));
- return zalloc(*size);
-}
-
-void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
- u16 type, int max)
-{
- data->type = type;
-
- switch (type) {
- case PERF_CPU_MAP__CPUS:
- synthesize_cpus((struct cpu_map_entries *) data->data, map);
- break;
- case PERF_CPU_MAP__MASK:
- synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
- default:
- break;
- };
-}
-
-static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
-{
- size_t size = sizeof(struct perf_record_cpu_map);
- struct perf_record_cpu_map *event;
- int max;
- u16 type;
-
- event = cpu_map_data__alloc(map, &size, &type, &max);
- if (!event)
- return NULL;
-
- event->header.type = PERF_RECORD_CPU_MAP;
- event->header.size = size;
- event->data.type = type;
-
- cpu_map_data__synthesize(&event->data, map, type, max);
- return event;
-}
-
-int perf_event__synthesize_cpu_map(struct perf_tool *tool,
- struct perf_cpu_map *map,
- perf_event__handler_t process,
- struct machine *machine)
-{
- struct perf_record_cpu_map *event;
- int err;
-
- event = cpu_map_event__new(map);
- if (!event)
- return -ENOMEM;
-
- err = process(tool, (union perf_event *) event, NULL, machine);
-
- free(event);
- return err;
-}
-
-int perf_event__synthesize_stat_config(struct perf_tool *tool,
- struct perf_stat_config *config,
- perf_event__handler_t process,
- struct machine *machine)
-{
- struct perf_record_stat_config *event;
- int size, i = 0, err;
-
- size = sizeof(*event);
- size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
-
- event = zalloc(size);
- if (!event)
- return -ENOMEM;
-
- event->header.type = PERF_RECORD_STAT_CONFIG;
- event->header.size = size;
- event->nr = PERF_STAT_CONFIG_TERM__MAX;
-
-#define ADD(__term, __val) \
- event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \
- event->data[i].val = __val; \
- i++;
-
- ADD(AGGR_MODE, config->aggr_mode)
- ADD(INTERVAL, config->interval)
- ADD(SCALE, config->scale)
-
- WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
- "stat config terms unbalanced\n");
-#undef ADD
-
- err = process(tool, (union perf_event *) event, NULL, machine);
-
- free(event);
- return err;
-}
-
-int perf_event__synthesize_stat(struct perf_tool *tool,
- u32 cpu, u32 thread, u64 id,
- struct perf_counts_values *count,
- perf_event__handler_t process,
- struct machine *machine)
-{
- struct perf_record_stat event;
-
- event.header.type = PERF_RECORD_STAT;
- event.header.size = sizeof(event);
- event.header.misc = 0;
-
- event.id = id;
- event.cpu = cpu;
- event.thread = thread;
- event.val = count->val;
- event.ena = count->ena;
- event.run = count->run;
-
- return process(tool, (union perf_event *) &event, NULL, machine);
-}
-
-int perf_event__synthesize_stat_round(struct perf_tool *tool,
- u64 evtime, u64 type,
- perf_event__handler_t process,
- struct machine *machine)
-{
- struct perf_record_stat_round event;
-
- event.header.type = PERF_RECORD_STAT_ROUND;
- event.header.size = sizeof(event);
- event.header.misc = 0;
-
- event.time = evtime;
- event.type = type;
-
- return process(tool, (union perf_event *) &event, NULL, machine);
-}
-
void perf_event__read_stat_config(struct perf_stat_config *config,
struct perf_record_stat_config *event)
{
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 47ad81d47b1a..a0a0c91cde4a 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -279,54 +279,13 @@ enum {
void perf_event__print_totals(void);
-struct perf_tool;
-struct perf_thread_map;
struct perf_cpu_map;
+struct perf_record_stat_config;
struct perf_stat_config;
-struct perf_counts_values;
-
-typedef int (*perf_event__handler_t)(struct perf_tool *tool,
- union perf_event *event,
- struct perf_sample *sample,
- struct machine *machine);
+struct perf_tool;
-int perf_event__synthesize_thread_map(struct perf_tool *tool,
- struct perf_thread_map *threads,
- perf_event__handler_t process,
- struct machine *machine, bool mmap_data);
-int perf_event__synthesize_thread_map2(struct perf_tool *tool,
- struct perf_thread_map *threads,
- perf_event__handler_t process,
- struct machine *machine);
-int perf_event__synthesize_cpu_map(struct perf_tool *tool,
- struct perf_cpu_map *cpus,
- perf_event__handler_t process,
- struct machine *machine);
-int perf_event__synthesize_threads(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine, bool mmap_data,
- unsigned int nr_threads_synthesize);
-int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine);
-int perf_event__synthesize_stat_config(struct perf_tool *tool,
- struct perf_stat_config *config,
- perf_event__handler_t process,
- struct machine *machine);
void perf_event__read_stat_config(struct perf_stat_config *config,
struct perf_record_stat_config *event);
-int perf_event__synthesize_stat(struct perf_tool *tool,
- u32 cpu, u32 thread, u64 id,
- struct perf_counts_values *count,
- perf_event__handler_t process,
- struct machine *machine);
-int perf_event__synthesize_stat_round(struct perf_tool *tool,
- u64 time, u64 type,
- perf_event__handler_t process,
- struct machine *machine);
-int perf_event__synthesize_modules(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine);
int perf_event__process_comm(struct perf_tool *tool,
union perf_event *event,
@@ -380,10 +339,6 @@ int perf_event__process_bpf(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
-int perf_tool__process_synth_event(struct perf_tool *tool,
- union perf_event *event,
- struct machine *machine,
- perf_event__handler_t process);
int perf_event__process(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -405,34 +360,6 @@ void thread__resolve(struct thread *thread, struct addr_location *al,
const char *perf_event__name(unsigned int id);
-size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
- u64 read_format);
-int perf_event__synthesize_sample(union perf_event *event, u64 type,
- u64 read_format,
- const struct perf_sample *sample);
-
-pid_t perf_event__synthesize_comm(struct perf_tool *tool,
- union perf_event *event, pid_t pid,
- perf_event__handler_t process,
- struct machine *machine);
-
-int perf_event__synthesize_namespaces(struct perf_tool *tool,
- union perf_event *event,
- pid_t pid, pid_t tgid,
- perf_event__handler_t process,
- struct machine *machine);
-
-int perf_event__synthesize_mmap_events(struct perf_tool *tool,
- union perf_event *event,
- pid_t pid, pid_t tgid,
- perf_event__handler_t process,
- struct machine *machine,
- bool mmap_data);
-
-int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine);
-
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 095924aa186b..d277a98e62df 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -10,13 +10,14 @@
#include <inttypes.h>
#include <poll.h>
#include "cpumap.h"
+#include "util/mmap.h"
#include "thread_map.h"
#include "target.h"
#include "evlist.h"
#include "evsel.h"
#include "debug.h"
#include "units.h"
-#include "util.h"
+#include <internal/lib.h> // page_size
#include "../perf.h"
#include "asm/bug.h"
#include "bpf-event.h"
@@ -49,18 +50,14 @@ int sigqueue(pid_t pid, int sig, const union sigval value);
#endif
#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
-#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
+#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
struct perf_thread_map *threads)
{
- int i;
-
- for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
- INIT_HLIST_HEAD(&evlist->heads[i]);
perf_evlist__init(&evlist->core);
perf_evlist__set_maps(&evlist->core, cpus, threads);
- fdarray__init(&evlist->pollfd, 64);
+ fdarray__init(&evlist->core.pollfd, 64);
evlist->workload.pid = -1;
evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
}
@@ -108,7 +105,7 @@ struct evlist *perf_evlist__new_dummy(void)
*/
void perf_evlist__set_id_pos(struct evlist *evlist)
{
- struct evsel *first = perf_evlist__first(evlist);
+ struct evsel *first = evlist__first(evlist);
evlist->id_pos = first->id_pos;
evlist->is_pos = first->is_pos;
@@ -124,7 +121,7 @@ static void perf_evlist__update_id_pos(struct evlist *evlist)
perf_evlist__set_id_pos(evlist);
}
-static void perf_evlist__purge(struct evlist *evlist)
+static void evlist__purge(struct evlist *evlist)
{
struct evsel *pos, *n;
@@ -137,11 +134,11 @@ static void perf_evlist__purge(struct evlist *evlist)
evlist->core.nr_entries = 0;
}
-void perf_evlist__exit(struct evlist *evlist)
+void evlist__exit(struct evlist *evlist)
{
zfree(&evlist->mmap);
zfree(&evlist->overwrite_mmap);
- fdarray__exit(&evlist->pollfd);
+ fdarray__exit(&evlist->core.pollfd);
}
void evlist__delete(struct evlist *evlist)
@@ -149,14 +146,14 @@ void evlist__delete(struct evlist *evlist)
if (evlist == NULL)
return;
- perf_evlist__munmap(evlist);
+ evlist__munmap(evlist);
evlist__close(evlist);
perf_cpu_map__put(evlist->core.cpus);
perf_thread_map__put(evlist->core.threads);
evlist->core.cpus = NULL;
evlist->core.threads = NULL;
- perf_evlist__purge(evlist);
- perf_evlist__exit(evlist);
+ evlist__purge(evlist);
+ evlist__exit(evlist);
free(evlist);
}
@@ -318,7 +315,7 @@ int perf_evlist__add_newtp(struct evlist *evlist,
static int perf_evlist__nr_threads(struct evlist *evlist,
struct evsel *evsel)
{
- if (evsel->system_wide)
+ if (evsel->core.system_wide)
return 1;
else
return perf_thread_map__nr(evlist->core.threads);
@@ -401,128 +398,29 @@ int perf_evlist__enable_event_idx(struct evlist *evlist,
return perf_evlist__enable_event_thread(evlist, evsel, idx);
}
-int perf_evlist__alloc_pollfd(struct evlist *evlist)
+int evlist__add_pollfd(struct evlist *evlist, int fd)
{
- int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
- int nr_threads = perf_thread_map__nr(evlist->core.threads);
- int nfds = 0;
- struct evsel *evsel;
-
- evlist__for_each_entry(evlist, evsel) {
- if (evsel->system_wide)
- nfds += nr_cpus;
- else
- nfds += nr_cpus * nr_threads;
- }
-
- if (fdarray__available_entries(&evlist->pollfd) < nfds &&
- fdarray__grow(&evlist->pollfd, nfds) < 0)
- return -ENOMEM;
-
- return 0;
-}
-
-static int __perf_evlist__add_pollfd(struct evlist *evlist, int fd,
- struct perf_mmap *map, short revent)
-{
- int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
- /*
- * Save the idx so that when we filter out fds POLLHUP'ed we can
- * close the associated evlist->mmap[] entry.
- */
- if (pos >= 0) {
- evlist->pollfd.priv[pos].ptr = map;
-
- fcntl(fd, F_SETFL, O_NONBLOCK);
- }
-
- return pos;
-}
-
-int perf_evlist__add_pollfd(struct evlist *evlist, int fd)
-{
- return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
+ return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN);
}
static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
void *arg __maybe_unused)
{
- struct perf_mmap *map = fda->priv[fd].ptr;
+ struct mmap *map = fda->priv[fd].ptr;
if (map)
perf_mmap__put(map);
}
-int perf_evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
+int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
{
- return fdarray__filter(&evlist->pollfd, revents_and_mask,
+ return fdarray__filter(&evlist->core.pollfd, revents_and_mask,
perf_evlist__munmap_filtered, NULL);
}
-int perf_evlist__poll(struct evlist *evlist, int timeout)
+int evlist__poll(struct evlist *evlist, int timeout)
{
- return fdarray__poll(&evlist->pollfd, timeout);
-}
-
-static void perf_evlist__id_hash(struct evlist *evlist,
- struct evsel *evsel,
- int cpu, int thread, u64 id)
-{
- int hash;
- struct perf_sample_id *sid = SID(evsel, cpu, thread);
-
- sid->id = id;
- sid->evsel = evsel;
- hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
- hlist_add_head(&sid->node, &evlist->heads[hash]);
-}
-
-void perf_evlist__id_add(struct evlist *evlist, struct evsel *evsel,
- int cpu, int thread, u64 id)
-{
- perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
- evsel->id[evsel->ids++] = id;
-}
-
-int perf_evlist__id_add_fd(struct evlist *evlist,
- struct evsel *evsel,
- int cpu, int thread, int fd)
-{
- u64 read_data[4] = { 0, };
- int id_idx = 1; /* The first entry is the counter value */
- u64 id;
- int ret;
-
- ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
- if (!ret)
- goto add;
-
- if (errno != ENOTTY)
- return -1;
-
- /* Legacy way to get event id.. All hail to old kernels! */
-
- /*
- * This way does not work with group format read, so bail
- * out in that case.
- */
- if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
- return -1;
-
- if (!(evsel->core.attr.read_format & PERF_FORMAT_ID) ||
- read(fd, &read_data, sizeof(read_data)) == -1)
- return -1;
-
- if (evsel->core.attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
- ++id_idx;
- if (evsel->core.attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
- ++id_idx;
-
- id = read_data[id_idx];
-
- add:
- perf_evlist__id_add(evlist, evsel, cpu, thread, id);
- return 0;
+ return perf_evlist__poll(&evlist->core, timeout);
}
static void perf_evlist__set_sid_idx(struct evlist *evlist,
@@ -535,7 +433,7 @@ static void perf_evlist__set_sid_idx(struct evlist *evlist,
sid->cpu = evlist->core.cpus->map[cpu];
else
sid->cpu = -1;
- if (!evsel->system_wide && evlist->core.threads && thread >= 0)
+ if (!evsel->core.system_wide && evlist->core.threads && thread >= 0)
sid->tid = perf_thread_map__pid(evlist->core.threads, thread);
else
sid->tid = -1;
@@ -548,7 +446,7 @@ struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id)
int hash;
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
- head = &evlist->heads[hash];
+ head = &evlist->core.heads[hash];
hlist_for_each_entry(sid, head, node)
if (sid->id == id)
@@ -562,14 +460,14 @@ struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id)
struct perf_sample_id *sid;
if (evlist->core.nr_entries == 1 || !id)
- return perf_evlist__first(evlist);
+ return evlist__first(evlist);
sid = perf_evlist__id2sid(evlist, id);
if (sid)
- return sid->evsel;
+ return container_of(sid->evsel, struct evsel, core);
if (!perf_evlist__sample_id_all(evlist))
- return perf_evlist__first(evlist);
+ return evlist__first(evlist);
return NULL;
}
@@ -584,7 +482,7 @@ struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist,
sid = perf_evlist__id2sid(evlist, id);
if (sid)
- return sid->evsel;
+ return container_of(sid->evsel, struct evsel, core);
return NULL;
}
@@ -613,7 +511,7 @@ static int perf_evlist__event2id(struct evlist *evlist,
struct evsel *perf_evlist__event2evsel(struct evlist *evlist,
union perf_event *event)
{
- struct evsel *first = perf_evlist__first(evlist);
+ struct evsel *first = evlist__first(evlist);
struct hlist_head *head;
struct perf_sample_id *sid;
int hash;
@@ -634,11 +532,11 @@ struct evsel *perf_evlist__event2evsel(struct evlist *evlist,
return first;
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
- head = &evlist->heads[hash];
+ head = &evlist->core.heads[hash];
hlist_for_each_entry(sid, head, node) {
if (sid->id == id)
- return sid->evsel;
+ return container_of(sid->evsel, struct evsel, core);
}
return NULL;
}
@@ -650,8 +548,8 @@ static int perf_evlist__set_paused(struct evlist *evlist, bool value)
if (!evlist->overwrite_mmap)
return 0;
- for (i = 0; i < evlist->nr_mmaps; i++) {
- int fd = evlist->overwrite_mmap[i].fd;
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ int fd = evlist->overwrite_mmap[i].core.fd;
int err;
if (fd < 0)
@@ -673,42 +571,42 @@ static int perf_evlist__resume(struct evlist *evlist)
return perf_evlist__set_paused(evlist, false);
}
-static void perf_evlist__munmap_nofree(struct evlist *evlist)
+static void evlist__munmap_nofree(struct evlist *evlist)
{
int i;
if (evlist->mmap)
- for (i = 0; i < evlist->nr_mmaps; i++)
+ for (i = 0; i < evlist->core.nr_mmaps; i++)
perf_mmap__munmap(&evlist->mmap[i]);
if (evlist->overwrite_mmap)
- for (i = 0; i < evlist->nr_mmaps; i++)
+ for (i = 0; i < evlist->core.nr_mmaps; i++)
perf_mmap__munmap(&evlist->overwrite_mmap[i]);
}
-void perf_evlist__munmap(struct evlist *evlist)
+void evlist__munmap(struct evlist *evlist)
{
- perf_evlist__munmap_nofree(evlist);
+ evlist__munmap_nofree(evlist);
zfree(&evlist->mmap);
zfree(&evlist->overwrite_mmap);
}
-static struct perf_mmap *perf_evlist__alloc_mmap(struct evlist *evlist,
- bool overwrite)
+static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
+ bool overwrite)
{
int i;
- struct perf_mmap *map;
+ struct mmap *map;
- evlist->nr_mmaps = perf_cpu_map__nr(evlist->core.cpus);
+ evlist->core.nr_mmaps = perf_cpu_map__nr(evlist->core.cpus);
if (perf_cpu_map__empty(evlist->core.cpus))
- evlist->nr_mmaps = perf_thread_map__nr(evlist->core.threads);
- map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
+ evlist->core.nr_mmaps = perf_thread_map__nr(evlist->core.threads);
+ map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
if (!map)
return NULL;
- for (i = 0; i < evlist->nr_mmaps; i++) {
- map[i].fd = -1;
- map[i].overwrite = overwrite;
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ map[i].core.fd = -1;
+ map[i].core.overwrite = overwrite;
/*
* When the perf_mmap() call is made we grab one refcount, plus
* one extra to let perf_mmap__consume() get the last
@@ -718,7 +616,7 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct evlist *evlist,
* Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
* thus does perf_mmap__get() on it.
*/
- refcount_set(&map[i].refcnt, 0);
+ refcount_set(&map[i].core.refcnt, 0);
}
return map;
}
@@ -732,7 +630,7 @@ perf_evlist__should_poll(struct evlist *evlist __maybe_unused,
return true;
}
-static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
+static int evlist__mmap_per_evsel(struct evlist *evlist, int idx,
struct mmap_params *mp, int cpu_idx,
int thread, int *_output, int *_output_overwrite)
{
@@ -741,7 +639,7 @@ static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
int evlist_cpu = cpu_map__cpu(evlist->core.cpus, cpu_idx);
evlist__for_each_entry(evlist, evsel) {
- struct perf_mmap *maps = evlist->mmap;
+ struct mmap *maps = evlist->mmap;
int *output = _output;
int fd;
int cpu;
@@ -752,7 +650,7 @@ static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
maps = evlist->overwrite_mmap;
if (!maps) {
- maps = perf_evlist__alloc_mmap(evlist, true);
+ maps = evlist__alloc_mmap(evlist, true);
if (!maps)
return -1;
evlist->overwrite_mmap = maps;
@@ -762,7 +660,7 @@ static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
mp->prot &= ~PROT_WRITE;
}
- if (evsel->system_wide && thread)
+ if (evsel->core.system_wide && thread)
continue;
cpu = perf_cpu_map__idx(evsel->core.cpus, evlist_cpu);
@@ -792,14 +690,14 @@ static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
* other events, so it should not need to be polled anyway.
* Therefore don't add it for polling.
*/
- if (!evsel->system_wide &&
- __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
+ if (!evsel->core.system_wide &&
+ perf_evlist__add_pollfd(&evlist->core, fd, &maps[idx], revent) < 0) {
perf_mmap__put(&maps[idx]);
return -1;
}
if (evsel->core.attr.read_format & PERF_FORMAT_ID) {
- if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
+ if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, cpu, thread,
fd) < 0)
return -1;
perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
@@ -810,7 +708,7 @@ static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
return 0;
}
-static int perf_evlist__mmap_per_cpu(struct evlist *evlist,
+static int evlist__mmap_per_cpu(struct evlist *evlist,
struct mmap_params *mp)
{
int cpu, thread;
@@ -826,7 +724,7 @@ static int perf_evlist__mmap_per_cpu(struct evlist *evlist,
true);
for (thread = 0; thread < nr_threads; thread++) {
- if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
+ if (evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
thread, &output, &output_overwrite))
goto out_unmap;
}
@@ -835,11 +733,11 @@ static int perf_evlist__mmap_per_cpu(struct evlist *evlist,
return 0;
out_unmap:
- perf_evlist__munmap_nofree(evlist);
+ evlist__munmap_nofree(evlist);
return -1;
}
-static int perf_evlist__mmap_per_thread(struct evlist *evlist,
+static int evlist__mmap_per_thread(struct evlist *evlist,
struct mmap_params *mp)
{
int thread;
@@ -853,7 +751,7 @@ static int perf_evlist__mmap_per_thread(struct evlist *evlist,
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
false);
- if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
+ if (evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
&output, &output_overwrite))
goto out_unmap;
}
@@ -861,7 +759,7 @@ static int perf_evlist__mmap_per_thread(struct evlist *evlist,
return 0;
out_unmap:
- perf_evlist__munmap_nofree(evlist);
+ evlist__munmap_nofree(evlist);
return -1;
}
@@ -888,7 +786,7 @@ unsigned long perf_event_mlock_kb_in_pages(void)
return pages;
}
-size_t perf_evlist__mmap_size(unsigned long pages)
+size_t evlist__mmap_size(unsigned long pages)
{
if (pages == UINT_MAX)
pages = perf_event_mlock_kb_in_pages();
@@ -971,7 +869,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
}
/**
- * perf_evlist__mmap_ex - Create mmaps to receive events.
+ * evlist__mmap_ex - Create mmaps to receive events.
* @evlist: list of events
* @pages: map length in pages
* @overwrite: overwrite older events?
@@ -979,7 +877,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
* @auxtrace_overwrite - overwrite older auxtrace data?
*
* If @overwrite is %false the user needs to signal event consumption using
- * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
+ * perf_mmap__write_tail(). Using evlist__mmap_read() does this
* automatically.
*
* Similarly, if @auxtrace_overwrite is %false the user needs to signal data
@@ -987,7 +885,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
*
* Return: %0 on success, negative error code otherwise.
*/
-int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
+int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
int comp_level)
@@ -1004,36 +902,36 @@ int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
.comp_level = comp_level };
if (!evlist->mmap)
- evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
+ evlist->mmap = evlist__alloc_mmap(evlist, false);
if (!evlist->mmap)
return -ENOMEM;
- if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
+ if (evlist->core.pollfd.entries == NULL && perf_evlist__alloc_pollfd(&evlist->core) < 0)
return -ENOMEM;
- evlist->mmap_len = perf_evlist__mmap_size(pages);
- pr_debug("mmap size %zuB\n", evlist->mmap_len);
- mp.mask = evlist->mmap_len - page_size - 1;
+ evlist->core.mmap_len = evlist__mmap_size(pages);
+ pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
+ mp.mask = evlist->core.mmap_len - page_size - 1;
- auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
+ auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
auxtrace_pages, auxtrace_overwrite);
evlist__for_each_entry(evlist, evsel) {
if ((evsel->core.attr.read_format & PERF_FORMAT_ID) &&
- evsel->sample_id == NULL &&
- perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
+ evsel->core.sample_id == NULL &&
+ perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr) < 0)
return -ENOMEM;
}
if (perf_cpu_map__empty(cpus))
- return perf_evlist__mmap_per_thread(evlist, &mp);
+ return evlist__mmap_per_thread(evlist, &mp);
- return perf_evlist__mmap_per_cpu(evlist, &mp);
+ return evlist__mmap_per_cpu(evlist, &mp);
}
-int perf_evlist__mmap(struct evlist *evlist, unsigned int pages)
+int evlist__mmap(struct evlist *evlist, unsigned int pages)
{
- return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
+ return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
}
int perf_evlist__create_maps(struct evlist *evlist, struct target *target)
@@ -1225,7 +1123,7 @@ u64 perf_evlist__combined_branch_type(struct evlist *evlist)
bool perf_evlist__valid_read_format(struct evlist *evlist)
{
- struct evsel *first = perf_evlist__first(evlist), *pos = first;
+ struct evsel *first = evlist__first(evlist), *pos = first;
u64 read_format = first->core.attr.read_format;
u64 sample_type = first->core.attr.sample_type;
@@ -1243,15 +1141,9 @@ bool perf_evlist__valid_read_format(struct evlist *evlist)
return true;
}
-u64 perf_evlist__read_format(struct evlist *evlist)
-{
- struct evsel *first = perf_evlist__first(evlist);
- return first->core.attr.read_format;
-}
-
u16 perf_evlist__id_hdr_size(struct evlist *evlist)
{
- struct evsel *first = perf_evlist__first(evlist);
+ struct evsel *first = evlist__first(evlist);
struct perf_sample *data;
u64 sample_type;
u16 size = 0;
@@ -1284,7 +1176,7 @@ out:
bool perf_evlist__valid_sample_id_all(struct evlist *evlist)
{
- struct evsel *first = perf_evlist__first(evlist), *pos = first;
+ struct evsel *first = evlist__first(evlist), *pos = first;
evlist__for_each_entry_continue(evlist, pos) {
if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
@@ -1296,7 +1188,7 @@ bool perf_evlist__valid_sample_id_all(struct evlist *evlist)
bool perf_evlist__sample_id_all(struct evlist *evlist)
{
- struct evsel *first = perf_evlist__first(evlist);
+ struct evsel *first = evlist__first(evlist);
return first->core.attr.sample_id_all;
}
@@ -1529,19 +1421,6 @@ int perf_evlist__parse_sample_timestamp(struct evlist *evlist,
return perf_evsel__parse_sample_timestamp(evsel, event, timestamp);
}
-size_t perf_evlist__fprintf(struct evlist *evlist, FILE *fp)
-{
- struct evsel *evsel;
- size_t printed = 0;
-
- evlist__for_each_entry(evlist, evsel) {
- printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
- perf_evsel__name(evsel));
- }
-
- return printed + fprintf(fp, "\n");
-}
-
int perf_evlist__strerror_open(struct evlist *evlist,
int err, char *buf, size_t size)
{
@@ -1571,7 +1450,7 @@ int perf_evlist__strerror_open(struct evlist *evlist,
"Hint:\tThe current value is %d.", value);
break;
case EINVAL: {
- struct evsel *first = perf_evlist__first(evlist);
+ struct evsel *first = evlist__first(evlist);
int max_freq;
if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
@@ -1599,7 +1478,7 @@ out_default:
int perf_evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
{
char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
- int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
+ int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
switch (err) {
case EPERM:
@@ -1633,7 +1512,7 @@ void perf_evlist__to_front(struct evlist *evlist,
struct evsel *evsel, *n;
LIST_HEAD(move);
- if (move_evsel == perf_evlist__first(evlist))
+ if (move_evsel == evlist__first(evlist))
return;
evlist__for_each_entry_safe(evlist, n, evsel) {
@@ -1754,7 +1633,7 @@ bool perf_evlist__exclude_kernel(struct evlist *evlist)
void perf_evlist__force_leader(struct evlist *evlist)
{
if (!evlist->nr_groups) {
- struct evsel *leader = perf_evlist__first(evlist);
+ struct evsel *leader = evlist__first(evlist);
perf_evlist__set_leader(evlist);
leader->forced_leader = true;
@@ -1780,7 +1659,7 @@ struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
is_open = false;
if (c2->leader == leader) {
if (is_open)
- evsel__close(c2);
+ perf_evsel__close(&evsel->core);
c2->leader = c2;
c2->core.nr_members = 0;
}
@@ -1844,10 +1723,10 @@ static void *perf_evlist__poll_thread(void *arg)
draining = true;
if (!draining)
- perf_evlist__poll(evlist, 1000);
+ evlist__poll(evlist, 1000);
- for (i = 0; i < evlist->nr_mmaps; i++) {
- struct perf_mmap *map = &evlist->mmap[i];
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ struct mmap *map = &evlist->mmap[i];
union perf_event *event;
if (perf_mmap__read_init(map))
@@ -1889,7 +1768,7 @@ int perf_evlist__start_sb_thread(struct evlist *evlist,
goto out_delete_evlist;
}
- if (perf_evlist__mmap(evlist, UINT_MAX))
+ if (evlist__mmap(evlist, UINT_MAX))
goto out_delete_evlist;
evlist__for_each_entry(evlist, counter) {
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index a55f0f2546e5..7cfe75522ba5 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -7,11 +7,11 @@
#include <linux/refcount.h>
#include <linux/list.h>
#include <api/fd/array.h>
-#include <stdio.h>
#include <internal/evlist.h>
+#include <internal/evsel.h>
#include "events_stats.h"
#include "evsel.h"
-#include "mmap.h"
+#include <pthread.h>
#include <signal.h>
#include <unistd.h>
@@ -20,16 +20,38 @@ struct thread_map;
struct perf_cpu_map;
struct record_opts;
-#define PERF_EVLIST__HLIST_BITS 8
-#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
+/*
+ * State machine of bkw_mmap_state:
+ *
+ * .________________(forbid)_____________.
+ * | V
+ * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
+ * ^ ^ | ^ |
+ * | |__(forbid)____/ |___(forbid)___/|
+ * | |
+ * \_________________(3)_______________/
+ *
+ * NOTREADY : Backward ring buffers are not ready
+ * RUNNING : Backward ring buffers are recording
+ * DATA_PENDING : We are required to collect data from backward ring buffers
+ * EMPTY : We have collected data from backward ring buffers.
+ *
+ * (0): Setup backward ring buffer
+ * (1): Pause ring buffers for reading
+ * (2): Read from ring buffers
+ * (3): Resume ring buffers for recording
+ */
+enum bkw_mmap_state {
+ BKW_MMAP_NOTREADY,
+ BKW_MMAP_RUNNING,
+ BKW_MMAP_DATA_PENDING,
+ BKW_MMAP_EMPTY,
+};
struct evlist {
struct perf_evlist core;
- struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
int nr_groups;
- int nr_mmaps;
bool enabled;
- size_t mmap_len;
int id_pos;
int is_pos;
u64 combined_sample_type;
@@ -38,9 +60,8 @@ struct evlist {
int cork_fd;
pid_t pid;
} workload;
- struct fdarray pollfd;
- struct perf_mmap *mmap;
- struct perf_mmap *overwrite_mmap;
+ struct mmap *mmap;
+ struct mmap *overwrite_mmap;
struct evsel *selected;
struct events_stats stats;
struct perf_env *env;
@@ -65,7 +86,7 @@ struct evlist *perf_evlist__new_default(void);
struct evlist *perf_evlist__new_dummy(void);
void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
struct perf_thread_map *threads);
-void perf_evlist__exit(struct evlist *evlist);
+void evlist__exit(struct evlist *evlist);
void evlist__delete(struct evlist *evlist);
void evlist__add(struct evlist *evlist, struct evsel *entry);
@@ -119,17 +140,10 @@ struct evsel *
perf_evlist__find_tracepoint_by_name(struct evlist *evlist,
const char *name);
-void perf_evlist__id_add(struct evlist *evlist, struct evsel *evsel,
- int cpu, int thread, u64 id);
-int perf_evlist__id_add_fd(struct evlist *evlist,
- struct evsel *evsel,
- int cpu, int thread, int fd);
-
-int perf_evlist__add_pollfd(struct evlist *evlist, int fd);
-int perf_evlist__alloc_pollfd(struct evlist *evlist);
-int perf_evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask);
+int evlist__add_pollfd(struct evlist *evlist, int fd);
+int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask);
-int perf_evlist__poll(struct evlist *evlist, int timeout);
+int evlist__poll(struct evlist *evlist, int timeout);
struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id);
struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist,
@@ -139,7 +153,7 @@ struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id);
void perf_evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state);
-void perf_evlist__mmap_consume(struct evlist *evlist, int idx);
+void evlist__mmap_consume(struct evlist *evlist, int idx);
int evlist__open(struct evlist *evlist);
void evlist__close(struct evlist *evlist);
@@ -170,14 +184,14 @@ int perf_evlist__parse_mmap_pages(const struct option *opt,
unsigned long perf_event_mlock_kb_in_pages(void);
-int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
+int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
bool auxtrace_overwrite, int nr_cblocks,
int affinity, int flush, int comp_level);
-int perf_evlist__mmap(struct evlist *evlist, unsigned int pages);
-void perf_evlist__munmap(struct evlist *evlist);
+int evlist__mmap(struct evlist *evlist, unsigned int pages);
+void evlist__munmap(struct evlist *evlist);
-size_t perf_evlist__mmap_size(unsigned long pages);
+size_t evlist__mmap_size(unsigned long pages);
void evlist__disable(struct evlist *evlist);
void evlist__enable(struct evlist *evlist);
@@ -195,7 +209,6 @@ int perf_evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel);
void __perf_evlist__set_leader(struct list_head *list);
void perf_evlist__set_leader(struct evlist *evlist);
-u64 perf_evlist__read_format(struct evlist *evlist);
u64 __perf_evlist__combined_sample_type(struct evlist *evlist);
u64 perf_evlist__combined_sample_type(struct evlist *evlist);
u64 perf_evlist__combined_branch_type(struct evlist *evlist);
@@ -221,17 +234,19 @@ static inline bool perf_evlist__empty(struct evlist *evlist)
return list_empty(&evlist->core.entries);
}
-static inline struct evsel *perf_evlist__first(struct evlist *evlist)
+static inline struct evsel *evlist__first(struct evlist *evlist)
{
- return list_entry(evlist->core.entries.next, struct evsel, core.node);
+ struct perf_evsel *evsel = perf_evlist__first(&evlist->core);
+
+ return container_of(evsel, struct evsel, core);
}
-static inline struct evsel *perf_evlist__last(struct evlist *evlist)
+static inline struct evsel *evlist__last(struct evlist *evlist)
{
- return list_entry(evlist->core.entries.prev, struct evsel, core.node);
-}
+ struct perf_evsel *evsel = perf_evlist__last(&evlist->core);
-size_t perf_evlist__fprintf(struct evlist *evlist, FILE *fp);
+ return container_of(evsel, struct evsel, core);
+}
int perf_evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size);
int perf_evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 85825384f9e8..5591af81a070 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -30,8 +30,10 @@
#include "counts.h"
#include "event.h"
#include "evsel.h"
+#include "util/evsel_config.h"
+#include "util/evsel_fprintf.h"
#include "evlist.h"
-#include "cpumap.h"
+#include <perf/cpumap.h>
#include "thread_map.h"
#include "target.h"
#include "perf_regs.h"
@@ -45,6 +47,7 @@
#include "../perf-sys.h"
#include "util/parse-branch-options.h"
#include <internal/xyarray.h>
+#include <internal/lib.h>
#include <linux/ctype.h>
@@ -1226,36 +1229,6 @@ int evsel__disable(struct evsel *evsel)
return err;
}
-int perf_evsel__alloc_id(struct evsel *evsel, int ncpus, int nthreads)
-{
- if (ncpus == 0 || nthreads == 0)
- return 0;
-
- if (evsel->system_wide)
- nthreads = 1;
-
- evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
- if (evsel->sample_id == NULL)
- return -ENOMEM;
-
- evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
- if (evsel->id == NULL) {
- xyarray__delete(evsel->sample_id);
- evsel->sample_id = NULL;
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void perf_evsel__free_id(struct evsel *evsel)
-{
- xyarray__delete(evsel->sample_id);
- evsel->sample_id = NULL;
- zfree(&evsel->id);
- evsel->ids = 0;
-}
-
static void perf_evsel__free_config_terms(struct evsel *evsel)
{
struct perf_evsel_config_term *term, *h;
@@ -1272,7 +1245,7 @@ void perf_evsel__exit(struct evsel *evsel)
assert(evsel->evlist == NULL);
perf_evsel__free_counts(evsel);
perf_evsel__free_fd(&evsel->core);
- perf_evsel__free_id(evsel);
+ perf_evsel__free_id(&evsel->core);
perf_evsel__free_config_terms(evsel);
cgroup__put(evsel->cgrp);
perf_cpu_map__put(evsel->core.cpus);
@@ -1472,152 +1445,6 @@ static int get_group_fd(struct evsel *evsel, int cpu, int thread)
return fd;
}
-struct bit_names {
- int bit;
- const char *name;
-};
-
-static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
-{
- bool first_bit = true;
- int i = 0;
-
- do {
- if (value & bits[i].bit) {
- buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
- first_bit = false;
- }
- } while (bits[++i].name != NULL);
-}
-
-static void __p_sample_type(char *buf, size_t size, u64 value)
-{
-#define bit_name(n) { PERF_SAMPLE_##n, #n }
- struct bit_names bits[] = {
- bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
- bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
- bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
- bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
- bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
- bit_name(WEIGHT), bit_name(PHYS_ADDR),
- { .name = NULL, }
- };
-#undef bit_name
- __p_bits(buf, size, value, bits);
-}
-
-static void __p_branch_sample_type(char *buf, size_t size, u64 value)
-{
-#define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
- struct bit_names bits[] = {
- bit_name(USER), bit_name(KERNEL), bit_name(HV), bit_name(ANY),
- bit_name(ANY_CALL), bit_name(ANY_RETURN), bit_name(IND_CALL),
- bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX),
- bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
- bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
- { .name = NULL, }
- };
-#undef bit_name
- __p_bits(buf, size, value, bits);
-}
-
-static void __p_read_format(char *buf, size_t size, u64 value)
-{
-#define bit_name(n) { PERF_FORMAT_##n, #n }
- struct bit_names bits[] = {
- bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
- bit_name(ID), bit_name(GROUP),
- { .name = NULL, }
- };
-#undef bit_name
- __p_bits(buf, size, value, bits);
-}
-
-#define BUF_SIZE 1024
-
-#define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
-#define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
-#define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
-#define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
-#define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
-#define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
-
-#define PRINT_ATTRn(_n, _f, _p) \
-do { \
- if (attr->_f) { \
- _p(attr->_f); \
- ret += attr__fprintf(fp, _n, buf, priv);\
- } \
-} while (0)
-
-#define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
-
-int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
- attr__fprintf_f attr__fprintf, void *priv)
-{
- char buf[BUF_SIZE];
- int ret = 0;
-
- PRINT_ATTRf(type, p_unsigned);
- PRINT_ATTRf(size, p_unsigned);
- PRINT_ATTRf(config, p_hex);
- PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
- PRINT_ATTRf(sample_type, p_sample_type);
- PRINT_ATTRf(read_format, p_read_format);
-
- PRINT_ATTRf(disabled, p_unsigned);
- PRINT_ATTRf(inherit, p_unsigned);
- PRINT_ATTRf(pinned, p_unsigned);
- PRINT_ATTRf(exclusive, p_unsigned);
- PRINT_ATTRf(exclude_user, p_unsigned);
- PRINT_ATTRf(exclude_kernel, p_unsigned);
- PRINT_ATTRf(exclude_hv, p_unsigned);
- PRINT_ATTRf(exclude_idle, p_unsigned);
- PRINT_ATTRf(mmap, p_unsigned);
- PRINT_ATTRf(comm, p_unsigned);
- PRINT_ATTRf(freq, p_unsigned);
- PRINT_ATTRf(inherit_stat, p_unsigned);
- PRINT_ATTRf(enable_on_exec, p_unsigned);
- PRINT_ATTRf(task, p_unsigned);
- PRINT_ATTRf(watermark, p_unsigned);
- PRINT_ATTRf(precise_ip, p_unsigned);
- PRINT_ATTRf(mmap_data, p_unsigned);
- PRINT_ATTRf(sample_id_all, p_unsigned);
- PRINT_ATTRf(exclude_host, p_unsigned);
- PRINT_ATTRf(exclude_guest, p_unsigned);
- PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
- PRINT_ATTRf(exclude_callchain_user, p_unsigned);
- PRINT_ATTRf(mmap2, p_unsigned);
- PRINT_ATTRf(comm_exec, p_unsigned);
- PRINT_ATTRf(use_clockid, p_unsigned);
- PRINT_ATTRf(context_switch, p_unsigned);
- PRINT_ATTRf(write_backward, p_unsigned);
- PRINT_ATTRf(namespaces, p_unsigned);
- PRINT_ATTRf(ksymbol, p_unsigned);
- PRINT_ATTRf(bpf_event, p_unsigned);
- PRINT_ATTRf(aux_output, p_unsigned);
-
- PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
- PRINT_ATTRf(bp_type, p_unsigned);
- PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
- PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
- PRINT_ATTRf(branch_sample_type, p_branch_sample_type);
- PRINT_ATTRf(sample_regs_user, p_hex);
- PRINT_ATTRf(sample_stack_user, p_unsigned);
- PRINT_ATTRf(clockid, p_signed);
- PRINT_ATTRf(sample_regs_intr, p_hex);
- PRINT_ATTRf(aux_watermark, p_unsigned);
- PRINT_ATTRf(sample_max_stack, p_unsigned);
-
- return ret;
-}
-
-static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
- void *priv __maybe_unused)
-{
- return fprintf(fp, " %-32s %s\n", name, val);
-}
-
static void perf_evsel__remove_fd(struct evsel *pos,
int nr_cpus, int nr_threads,
int thread_idx)
@@ -1662,7 +1489,7 @@ static bool ignore_missing_thread(struct evsel *evsel,
return false;
/* The system wide setup does not work with threads. */
- if (evsel->system_wide)
+ if (evsel->core.system_wide)
return false;
/* The -ESRCH is perf event syscall errno for pid's not found. */
@@ -1688,6 +1515,12 @@ static bool ignore_missing_thread(struct evsel *evsel,
return true;
}
+static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
+ void *priv __maybe_unused)
+{
+ return fprintf(fp, " %-32s %s\n", name, val);
+}
+
static void display_attr(struct perf_event_attr *attr)
{
if (verbose >= 2) {
@@ -1771,7 +1604,7 @@ int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
threads = empty_thread_map;
}
- if (evsel->system_wide)
+ if (evsel->core.system_wide)
nthreads = 1;
else
nthreads = threads->nr;
@@ -1818,7 +1651,7 @@ retry_sample_id:
for (thread = 0; thread < nthreads; thread++) {
int fd, group_fd;
- if (!evsel->cgrp && !evsel->system_wide)
+ if (!evsel->cgrp && !evsel->core.system_wide)
pid = perf_thread_map__pid(threads, thread);
group_fd = get_group_fd(evsel, cpu, thread);
@@ -1991,7 +1824,7 @@ out_close:
void evsel__close(struct evsel *evsel)
{
perf_evsel__close(&evsel->core);
- perf_evsel__free_id(evsel);
+ perf_evsel__free_id(&evsel->core);
}
int perf_evsel__open_per_cpu(struct evsel *evsel,
@@ -2419,283 +2252,6 @@ int perf_evsel__parse_sample_timestamp(struct evsel *evsel,
return 0;
}
-size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
- u64 read_format)
-{
- size_t sz, result = sizeof(struct perf_record_sample);
-
- if (type & PERF_SAMPLE_IDENTIFIER)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_IP)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_TID)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_TIME)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_ADDR)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_ID)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_STREAM_ID)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_CPU)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_PERIOD)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_READ) {
- result += sizeof(u64);
- if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
- result += sizeof(u64);
- if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
- result += sizeof(u64);
- /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
- if (read_format & PERF_FORMAT_GROUP) {
- sz = sample->read.group.nr *
- sizeof(struct sample_read_value);
- result += sz;
- } else {
- result += sizeof(u64);
- }
- }
-
- if (type & PERF_SAMPLE_CALLCHAIN) {
- sz = (sample->callchain->nr + 1) * sizeof(u64);
- result += sz;
- }
-
- if (type & PERF_SAMPLE_RAW) {
- result += sizeof(u32);
- result += sample->raw_size;
- }
-
- if (type & PERF_SAMPLE_BRANCH_STACK) {
- sz = sample->branch_stack->nr * sizeof(struct branch_entry);
- sz += sizeof(u64);
- result += sz;
- }
-
- if (type & PERF_SAMPLE_REGS_USER) {
- if (sample->user_regs.abi) {
- result += sizeof(u64);
- sz = hweight64(sample->user_regs.mask) * sizeof(u64);
- result += sz;
- } else {
- result += sizeof(u64);
- }
- }
-
- if (type & PERF_SAMPLE_STACK_USER) {
- sz = sample->user_stack.size;
- result += sizeof(u64);
- if (sz) {
- result += sz;
- result += sizeof(u64);
- }
- }
-
- if (type & PERF_SAMPLE_WEIGHT)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_DATA_SRC)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_TRANSACTION)
- result += sizeof(u64);
-
- if (type & PERF_SAMPLE_REGS_INTR) {
- if (sample->intr_regs.abi) {
- result += sizeof(u64);
- sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
- result += sz;
- } else {
- result += sizeof(u64);
- }
- }
-
- if (type & PERF_SAMPLE_PHYS_ADDR)
- result += sizeof(u64);
-
- return result;
-}
-
-int perf_event__synthesize_sample(union perf_event *event, u64 type,
- u64 read_format,
- const struct perf_sample *sample)
-{
- __u64 *array;
- size_t sz;
- /*
- * used for cross-endian analysis. See git commit 65014ab3
- * for why this goofiness is needed.
- */
- union u64_swap u;
-
- array = event->sample.array;
-
- if (type & PERF_SAMPLE_IDENTIFIER) {
- *array = sample->id;
- array++;
- }
-
- if (type & PERF_SAMPLE_IP) {
- *array = sample->ip;
- array++;
- }
-
- if (type & PERF_SAMPLE_TID) {
- u.val32[0] = sample->pid;
- u.val32[1] = sample->tid;
- *array = u.val64;
- array++;
- }
-
- if (type & PERF_SAMPLE_TIME) {
- *array = sample->time;
- array++;
- }
-
- if (type & PERF_SAMPLE_ADDR) {
- *array = sample->addr;
- array++;
- }
-
- if (type & PERF_SAMPLE_ID) {
- *array = sample->id;
- array++;
- }
-
- if (type & PERF_SAMPLE_STREAM_ID) {
- *array = sample->stream_id;
- array++;
- }
-
- if (type & PERF_SAMPLE_CPU) {
- u.val32[0] = sample->cpu;
- u.val32[1] = 0;
- *array = u.val64;
- array++;
- }
-
- if (type & PERF_SAMPLE_PERIOD) {
- *array = sample->period;
- array++;
- }
-
- if (type & PERF_SAMPLE_READ) {
- if (read_format & PERF_FORMAT_GROUP)
- *array = sample->read.group.nr;
- else
- *array = sample->read.one.value;
- array++;
-
- if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
- *array = sample->read.time_enabled;
- array++;
- }
-
- if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
- *array = sample->read.time_running;
- array++;
- }
-
- /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
- if (read_format & PERF_FORMAT_GROUP) {
- sz = sample->read.group.nr *
- sizeof(struct sample_read_value);
- memcpy(array, sample->read.group.values, sz);
- array = (void *)array + sz;
- } else {
- *array = sample->read.one.id;
- array++;
- }
- }
-
- if (type & PERF_SAMPLE_CALLCHAIN) {
- sz = (sample->callchain->nr + 1) * sizeof(u64);
- memcpy(array, sample->callchain, sz);
- array = (void *)array + sz;
- }
-
- if (type & PERF_SAMPLE_RAW) {
- u.val32[0] = sample->raw_size;
- *array = u.val64;
- array = (void *)array + sizeof(u32);
-
- memcpy(array, sample->raw_data, sample->raw_size);
- array = (void *)array + sample->raw_size;
- }
-
- if (type & PERF_SAMPLE_BRANCH_STACK) {
- sz = sample->branch_stack->nr * sizeof(struct branch_entry);
- sz += sizeof(u64);
- memcpy(array, sample->branch_stack, sz);
- array = (void *)array + sz;
- }
-
- if (type & PERF_SAMPLE_REGS_USER) {
- if (sample->user_regs.abi) {
- *array++ = sample->user_regs.abi;
- sz = hweight64(sample->user_regs.mask) * sizeof(u64);
- memcpy(array, sample->user_regs.regs, sz);
- array = (void *)array + sz;
- } else {
- *array++ = 0;
- }
- }
-
- if (type & PERF_SAMPLE_STACK_USER) {
- sz = sample->user_stack.size;
- *array++ = sz;
- if (sz) {
- memcpy(array, sample->user_stack.data, sz);
- array = (void *)array + sz;
- *array++ = sz;
- }
- }
-
- if (type & PERF_SAMPLE_WEIGHT) {
- *array = sample->weight;
- array++;
- }
-
- if (type & PERF_SAMPLE_DATA_SRC) {
- *array = sample->data_src;
- array++;
- }
-
- if (type & PERF_SAMPLE_TRANSACTION) {
- *array = sample->transaction;
- array++;
- }
-
- if (type & PERF_SAMPLE_REGS_INTR) {
- if (sample->intr_regs.abi) {
- *array++ = sample->intr_regs.abi;
- sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
- memcpy(array, sample->intr_regs.regs, sz);
- array = (void *)array + sz;
- } else {
- *array++ = 0;
- }
- }
-
- if (type & PERF_SAMPLE_PHYS_ADDR) {
- *array = sample->phys_addr;
- array++;
- }
-
- return 0;
-}
-
struct tep_format_field *perf_evsel__field(struct evsel *evsel, const char *name)
{
return tep_find_field(evsel->tp_format, name);
@@ -2811,9 +2367,11 @@ bool perf_evsel__fallback(struct evsel *evsel, int err,
if (evsel->name)
free(evsel->name);
evsel->name = new_name;
- scnprintf(msg, msgsize,
-"kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid);
+ scnprintf(msg, msgsize, "kernel.perf_event_paranoid=%d, trying "
+ "to fall back to excluding kernel and hypervisor "
+ " samples", paranoid);
evsel->core.attr.exclude_kernel = 1;
+ evsel->core.attr.exclude_hv = 1;
return true;
}
@@ -2966,7 +2524,7 @@ static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
thread++) {
int fd = FD(evsel, cpu, thread);
- if (perf_evlist__id_add_fd(evlist, evsel,
+ if (perf_evlist__id_add_fd(&evlist->core, &evsel->core,
cpu, thread, fd) < 0)
return -1;
}
@@ -2980,7 +2538,7 @@ int perf_evsel__store_ids(struct evsel *evsel, struct evlist *evlist)
struct perf_cpu_map *cpus = evsel->core.cpus;
struct perf_thread_map *threads = evsel->core.threads;
- if (perf_evsel__alloc_id(evsel, cpus->nr, threads->nr))
+ if (perf_evsel__alloc_id(&evsel->core, cpus->nr, threads->nr))
return -ENOMEM;
return store_evsel_ids(evsel, evlist);
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 68321d10eb2d..ddc5ee6f6592 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -4,7 +4,6 @@
#include <linux/list.h>
#include <stdbool.h>
-#include <stdio.h>
#include <sys/types.h>
#include <linux/perf_event.h>
#include <linux/types.h>
@@ -13,79 +12,11 @@
#include "symbol_conf.h"
#include <internal/cpumap.h>
-struct addr_location;
-struct evsel;
-union perf_event;
-
-/*
- * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
- * more than one entry in the evlist.
- */
-struct perf_sample_id {
- struct hlist_node node;
- u64 id;
- struct evsel *evsel;
- /*
- * 'idx' will be used for AUX area sampling. A sample will have AUX area
- * data that will be queued for decoding, where there are separate
- * queues for each CPU (per-cpu tracing) or task (per-thread tracing).
- * The sample ID can be used to lookup 'idx' which is effectively the
- * queue number.
- */
- int idx;
- int cpu;
- pid_t tid;
-
- /* Holds total ID period value for PERF_SAMPLE_READ processing. */
- u64 period;
-};
-
+struct bpf_object;
struct cgroup;
-
-/*
- * The 'struct perf_evsel_config_term' is used to pass event
- * specific configuration data to perf_evsel__config routine.
- * It is allocated within event parsing and attached to
- * perf_evsel::config_terms list head.
-*/
-enum term_type {
- PERF_EVSEL__CONFIG_TERM_PERIOD,
- PERF_EVSEL__CONFIG_TERM_FREQ,
- PERF_EVSEL__CONFIG_TERM_TIME,
- PERF_EVSEL__CONFIG_TERM_CALLGRAPH,
- PERF_EVSEL__CONFIG_TERM_STACK_USER,
- PERF_EVSEL__CONFIG_TERM_INHERIT,
- PERF_EVSEL__CONFIG_TERM_MAX_STACK,
- PERF_EVSEL__CONFIG_TERM_MAX_EVENTS,
- PERF_EVSEL__CONFIG_TERM_OVERWRITE,
- PERF_EVSEL__CONFIG_TERM_DRV_CFG,
- PERF_EVSEL__CONFIG_TERM_BRANCH,
- PERF_EVSEL__CONFIG_TERM_PERCORE,
- PERF_EVSEL__CONFIG_TERM_AUX_OUTPUT,
-};
-
-struct perf_evsel_config_term {
- struct list_head list;
- enum term_type type;
- union {
- u64 period;
- u64 freq;
- bool time;
- char *callgraph;
- char *drv_cfg;
- u64 stack_user;
- int max_stack;
- bool inherit;
- bool overwrite;
- char *branch;
- unsigned long max_events;
- bool percore;
- bool aux_output;
- } val;
- bool weak;
-};
-
+struct perf_counts;
struct perf_stat_evsel;
+union perf_event;
typedef int (perf_evsel__sb_cb_t)(union perf_event *event, void *data);
@@ -94,10 +25,6 @@ enum perf_tool_event {
PERF_TOOL_DURATION_TIME = 1,
};
-struct bpf_object;
-struct perf_counts;
-struct xyarray;
-
/** struct evsel - event selector
*
* @evlist - evlist this evsel is in, if it is in one.
@@ -117,12 +44,9 @@ struct evsel {
struct perf_evsel core;
struct evlist *evlist;
char *filter;
- struct xyarray *sample_id;
- u64 *id;
struct perf_counts *counts;
struct perf_counts *prev_raw_counts;
int idx;
- u32 ids;
unsigned long max_events;
unsigned long nr_events_printed;
char *name;
@@ -146,7 +70,6 @@ struct evsel {
bool disabled;
bool no_aux_samples;
bool immediate;
- bool system_wide;
bool tracking;
bool per_pkg;
bool precise_max;
@@ -179,11 +102,6 @@ struct evsel {
} side_band;
};
-union u64_swap {
- u64 val64;
- u32 val32[2];
-};
-
struct perf_missing_features {
bool sample_id_all;
bool exclude_guest;
@@ -282,8 +200,6 @@ const char *perf_evsel__name(struct evsel *evsel);
const char *perf_evsel__group_name(struct evsel *evsel);
int perf_evsel__group_desc(struct evsel *evsel, char *buf, size_t size);
-int perf_evsel__alloc_id(struct evsel *evsel, int ncpus, int nthreads);
-
void __perf_evsel__set_sample_bit(struct evsel *evsel,
enum perf_event_sample_format bit);
void __perf_evsel__reset_sample_bit(struct evsel *evsel,
@@ -439,37 +355,6 @@ static inline bool perf_evsel__is_clock(struct evsel *evsel)
perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK);
}
-struct perf_attr_details {
- bool freq;
- bool verbose;
- bool event_group;
- bool force;
- bool trace_fields;
-};
-
-int perf_evsel__fprintf(struct evsel *evsel,
- struct perf_attr_details *details, FILE *fp);
-
-#define EVSEL__PRINT_IP (1<<0)
-#define EVSEL__PRINT_SYM (1<<1)
-#define EVSEL__PRINT_DSO (1<<2)
-#define EVSEL__PRINT_SYMOFFSET (1<<3)
-#define EVSEL__PRINT_ONELINE (1<<4)
-#define EVSEL__PRINT_SRCLINE (1<<5)
-#define EVSEL__PRINT_UNKNOWN_AS_ADDR (1<<6)
-#define EVSEL__PRINT_CALLCHAIN_ARROW (1<<7)
-#define EVSEL__PRINT_SKIP_IGNORED (1<<8)
-
-struct callchain_cursor;
-
-int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
- unsigned int print_opts,
- struct callchain_cursor *cursor, FILE *fp);
-
-int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
- int left_alignment, unsigned int print_opts,
- struct callchain_cursor *cursor, FILE *fp);
-
bool perf_evsel__fallback(struct evsel *evsel, int err,
char *msg, size_t msgsize);
int perf_evsel__open_strerror(struct evsel *evsel, struct target *target,
@@ -502,11 +387,6 @@ static inline bool evsel__has_callchain(const struct evsel *evsel)
return (evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0;
}
-typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
-
-int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
- attr__fprintf_f attr__fprintf, void *priv);
-
struct perf_env *perf_evsel__env(struct evsel *evsel);
int perf_evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
diff --git a/tools/perf/util/evsel_config.h b/tools/perf/util/evsel_config.h
new file mode 100644
index 000000000000..8a7648037c18
--- /dev/null
+++ b/tools/perf/util/evsel_config.h
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __PERF_EVSEL_CONFIG_H
+#define __PERF_EVSEL_CONFIG_H 1
+
+#include <linux/types.h>
+#include <stdbool.h>
+
+/*
+ * The 'struct perf_evsel_config_term' is used to pass event
+ * specific configuration data to perf_evsel__config routine.
+ * It is allocated within event parsing and attached to
+ * perf_evsel::config_terms list head.
+*/
+enum evsel_term_type {
+ PERF_EVSEL__CONFIG_TERM_PERIOD,
+ PERF_EVSEL__CONFIG_TERM_FREQ,
+ PERF_EVSEL__CONFIG_TERM_TIME,
+ PERF_EVSEL__CONFIG_TERM_CALLGRAPH,
+ PERF_EVSEL__CONFIG_TERM_STACK_USER,
+ PERF_EVSEL__CONFIG_TERM_INHERIT,
+ PERF_EVSEL__CONFIG_TERM_MAX_STACK,
+ PERF_EVSEL__CONFIG_TERM_MAX_EVENTS,
+ PERF_EVSEL__CONFIG_TERM_OVERWRITE,
+ PERF_EVSEL__CONFIG_TERM_DRV_CFG,
+ PERF_EVSEL__CONFIG_TERM_BRANCH,
+ PERF_EVSEL__CONFIG_TERM_PERCORE,
+ PERF_EVSEL__CONFIG_TERM_AUX_OUTPUT,
+};
+
+struct perf_evsel_config_term {
+ struct list_head list;
+ enum evsel_term_type type;
+ union {
+ u64 period;
+ u64 freq;
+ bool time;
+ char *callgraph;
+ char *drv_cfg;
+ u64 stack_user;
+ int max_stack;
+ bool inherit;
+ bool overwrite;
+ char *branch;
+ unsigned long max_events;
+ bool percore;
+ bool aux_output;
+ } val;
+ bool weak;
+};
+#endif // __PERF_EVSEL_CONFIG_H
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
index 496fec01f5d1..028df7afb0dc 100644
--- a/tools/perf/util/evsel_fprintf.c
+++ b/tools/perf/util/evsel_fprintf.c
@@ -4,6 +4,8 @@
#include <stdbool.h>
#include <traceevent/event-parse.h>
#include "evsel.h"
+#include "util/evsel_fprintf.h"
+#include "util/event.h"
#include "callchain.h"
#include "map.h"
#include "strlist.h"
@@ -101,7 +103,7 @@ out:
int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
unsigned int print_opts, struct callchain_cursor *cursor,
- FILE *fp)
+ struct strlist *bt_stop_list, FILE *fp)
{
int printed = 0;
struct callchain_cursor_node *node;
@@ -174,10 +176,8 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
printed += fprintf(fp, "\n");
/* Add srccode here too? */
- if (symbol_conf.bt_stop_list &&
- node->sym &&
- strlist__has_entry(symbol_conf.bt_stop_list,
- node->sym->name)) {
+ if (bt_stop_list && node->sym &&
+ strlist__has_entry(bt_stop_list, node->sym->name)) {
break;
}
@@ -192,7 +192,7 @@ next:
int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
int left_alignment, unsigned int print_opts,
- struct callchain_cursor *cursor, FILE *fp)
+ struct callchain_cursor *cursor, struct strlist *bt_stop_list, FILE *fp)
{
int printed = 0;
int print_ip = print_opts & EVSEL__PRINT_IP;
@@ -203,8 +203,8 @@ int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
int print_unknown_as_addr = print_opts & EVSEL__PRINT_UNKNOWN_AS_ADDR;
if (cursor != NULL) {
- printed += sample__fprintf_callchain(sample, left_alignment,
- print_opts, cursor, fp);
+ printed += sample__fprintf_callchain(sample, left_alignment, print_opts,
+ cursor, bt_stop_list, fp);
} else {
printed += fprintf(fp, "%-*.*s", left_alignment, left_alignment, " ");
diff --git a/tools/perf/util/evsel_fprintf.h b/tools/perf/util/evsel_fprintf.h
new file mode 100644
index 000000000000..47e6c8456bb1
--- /dev/null
+++ b/tools/perf/util/evsel_fprintf.h
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __PERF_EVSEL_FPRINTF_H
+#define __PERF_EVSEL_FPRINTF_H 1
+
+#include <stdio.h>
+#include <stdbool.h>
+
+struct evsel;
+
+struct perf_attr_details {
+ bool freq;
+ bool verbose;
+ bool event_group;
+ bool force;
+ bool trace_fields;
+};
+
+int perf_evsel__fprintf(struct evsel *evsel,
+ struct perf_attr_details *details, FILE *fp);
+
+#define EVSEL__PRINT_IP (1<<0)
+#define EVSEL__PRINT_SYM (1<<1)
+#define EVSEL__PRINT_DSO (1<<2)
+#define EVSEL__PRINT_SYMOFFSET (1<<3)
+#define EVSEL__PRINT_ONELINE (1<<4)
+#define EVSEL__PRINT_SRCLINE (1<<5)
+#define EVSEL__PRINT_UNKNOWN_AS_ADDR (1<<6)
+#define EVSEL__PRINT_CALLCHAIN_ARROW (1<<7)
+#define EVSEL__PRINT_SKIP_IGNORED (1<<8)
+
+struct addr_location;
+struct perf_event_attr;
+struct perf_sample;
+struct callchain_cursor;
+struct strlist;
+
+int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
+ unsigned int print_opts, struct callchain_cursor *cursor,
+ struct strlist *bt_stop_list, FILE *fp);
+
+int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
+ int left_alignment, unsigned int print_opts,
+ struct callchain_cursor *cursor,
+ struct strlist *bt_stop_list, FILE *fp);
+
+typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
+
+int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
+ attr__fprintf_f attr__fprintf, void *priv);
+#endif // __PERF_EVSEL_H
diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
index b72440bf9a79..d4137559be05 100644
--- a/tools/perf/util/genelf.h
+++ b/tools/perf/util/genelf.h
@@ -35,6 +35,9 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
#elif defined(__sparc__)
#define GEN_ELF_ARCH EM_SPARC
#define GEN_ELF_CLASS ELFCLASS32
+#elif defined(__s390x__)
+#define GEN_ELF_ARCH EM_S390
+#define GEN_ELF_CLASS ELFCLASS64
#else
#error "unsupported architecture"
#endif
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index b0c34dda30a0..86d9396cb131 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -25,6 +25,7 @@
#include "dso.h"
#include "evlist.h"
#include "evsel.h"
+#include "util/evsel_fprintf.h"
#include "header.h"
#include "memswap.h"
#include "trace-event.h"
@@ -42,11 +43,12 @@
#include "tool.h"
#include "time-utils.h"
#include "units.h"
-#include "util.h"
+#include "util/util.h" // perf_exe()
#include "cputopo.h"
#include "bpf-event.h"
#include <linux/ctype.h>
+#include <internal/lib.h>
/*
* magic2 = "PERFILE2"
@@ -70,15 +72,6 @@ struct perf_file_attr {
struct perf_file_section ids;
};
-struct feat_fd {
- struct perf_header *ph;
- int fd;
- void *buf; /* Either buf != NULL or fd >= 0 */
- ssize_t offset;
- size_t size;
- struct evsel *events;
-};
-
void perf_header__set_feat(struct perf_header *header, int feat)
{
set_bit(feat, header->adds_features);
@@ -524,7 +517,7 @@ static int write_event_desc(struct feat_fd *ff,
* copy into an nri to be independent of the
* type of ids,
*/
- nri = evsel->ids;
+ nri = evsel->core.ids;
ret = do_write(ff, &nri, sizeof(nri));
if (ret < 0)
return ret;
@@ -538,7 +531,7 @@ static int write_event_desc(struct feat_fd *ff,
/*
* write unique ids for this event
*/
- ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64));
+ ret = do_write(ff, evsel->core.id, evsel->core.ids * sizeof(u64));
if (ret < 0)
return ret;
}
@@ -1081,7 +1074,7 @@ static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 lev
scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
if (sysfs__read_str(file, &cache->map, &len)) {
- zfree(&cache->map);
+ zfree(&cache->size);
zfree(&cache->type);
return -1;
}
@@ -1598,7 +1591,7 @@ static void free_event_desc(struct evsel *events)
for (evsel = events; evsel->core.attr.size; evsel++) {
zfree(&evsel->name);
- zfree(&evsel->id);
+ zfree(&evsel->core.id);
}
free(events);
@@ -1664,8 +1657,8 @@ static struct evsel *read_event_desc(struct feat_fd *ff)
id = calloc(nr, sizeof(*id));
if (!id)
goto error;
- evsel->ids = nr;
- evsel->id = id;
+ evsel->core.ids = nr;
+ evsel->core.id = id;
for (j = 0 ; j < nr; j++) {
if (do_read_u64(ff, id))
@@ -1707,9 +1700,9 @@ static void print_event_desc(struct feat_fd *ff, FILE *fp)
for (evsel = events; evsel->core.attr.size; evsel++) {
fprintf(fp, "# event : name = %s, ", evsel->name);
- if (evsel->ids) {
+ if (evsel->core.ids) {
fprintf(fp, ", id = {");
- for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
+ for (j = 0, id = evsel->core.id; j < evsel->core.ids; j++, id++) {
if (j)
fputc(',', fp);
fprintf(fp, " %"PRIu64, *id);
@@ -2823,15 +2816,6 @@ static int process_compressed(struct feat_fd *ff,
return 0;
}
-struct feature_ops {
- int (*write)(struct feat_fd *ff, struct evlist *evlist);
- void (*print)(struct feat_fd *ff, FILE *fp);
- int (*process)(struct feat_fd *ff, void *data);
- const char *name;
- bool full_only;
- bool synthesize;
-};
-
#define FEAT_OPR(n, func, __full_only) \
[HEADER_##n] = { \
.name = __stringify(n), \
@@ -2858,8 +2842,10 @@ struct feature_ops {
#define process_branch_stack NULL
#define process_stat NULL
+// Only used in util/synthetic-events.c
+const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
-static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
+const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPN(TRACING_DATA, tracing_data, false),
FEAT_OPN(BUILD_ID, build_id, false),
FEAT_OPR(HOSTNAME, hostname, false),
@@ -3083,7 +3069,7 @@ int perf_session__write_header(struct perf_session *session,
evlist__for_each_entry(session->evlist, evsel) {
evsel->id_offset = lseek(fd, 0, SEEK_CUR);
- err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64));
+ err = do_write(&ff, evsel->core.id, evsel->core.ids * sizeof(u64));
if (err < 0) {
pr_debug("failed to write perf header\n");
return err;
@@ -3097,7 +3083,7 @@ int perf_session__write_header(struct perf_session *session,
.attr = evsel->core.attr,
.ids = {
.offset = evsel->id_offset,
- .size = evsel->ids * sizeof(u64),
+ .size = evsel->core.ids * sizeof(u64),
}
};
err = do_write(&ff, &f_attr, sizeof(f_attr));
@@ -3624,7 +3610,7 @@ int perf_session__read_header(struct perf_session *session)
* for allocating the perf_sample_id table we fake 1 cpu and
* hattr->ids threads.
*/
- if (perf_evsel__alloc_id(evsel, 1, nr_ids))
+ if (perf_evsel__alloc_id(&evsel->core, 1, nr_ids))
goto out_delete_evlist;
lseek(fd, f_attr.ids.offset, SEEK_SET);
@@ -3633,7 +3619,7 @@ int perf_session__read_header(struct perf_session *session)
if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
goto out_errno;
- perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
+ perf_evlist__id_add(&session->evlist->core, &evsel->core, 0, j, f_id);
}
lseek(fd, tmp, SEEK_SET);
@@ -3656,105 +3642,6 @@ out_delete_evlist:
return -ENOMEM;
}
-int perf_event__synthesize_attr(struct perf_tool *tool,
- struct perf_event_attr *attr, u32 ids, u64 *id,
- perf_event__handler_t process)
-{
- union perf_event *ev;
- size_t size;
- int err;
-
- size = sizeof(struct perf_event_attr);
- size = PERF_ALIGN(size, sizeof(u64));
- size += sizeof(struct perf_event_header);
- size += ids * sizeof(u64);
-
- ev = zalloc(size);
-
- if (ev == NULL)
- return -ENOMEM;
-
- ev->attr.attr = *attr;
- memcpy(ev->attr.id, id, ids * sizeof(u64));
-
- ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
- ev->attr.header.size = (u16)size;
-
- if (ev->attr.header.size == size)
- err = process(tool, ev, NULL, NULL);
- else
- err = -E2BIG;
-
- free(ev);
-
- return err;
-}
-
-int perf_event__synthesize_features(struct perf_tool *tool,
- struct perf_session *session,
- struct evlist *evlist,
- perf_event__handler_t process)
-{
- struct perf_header *header = &session->header;
- struct feat_fd ff;
- struct perf_record_header_feature *fe;
- size_t sz, sz_hdr;
- int feat, ret;
-
- sz_hdr = sizeof(fe->header);
- sz = sizeof(union perf_event);
- /* get a nice alignment */
- sz = PERF_ALIGN(sz, page_size);
-
- memset(&ff, 0, sizeof(ff));
-
- ff.buf = malloc(sz);
- if (!ff.buf)
- return -ENOMEM;
-
- ff.size = sz - sz_hdr;
- ff.ph = &session->header;
-
- for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
- if (!feat_ops[feat].synthesize) {
- pr_debug("No record header feature for header :%d\n", feat);
- continue;
- }
-
- ff.offset = sizeof(*fe);
-
- ret = feat_ops[feat].write(&ff, evlist);
- if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
- pr_debug("Error writing feature\n");
- continue;
- }
- /* ff.buf may have changed due to realloc in do_write() */
- fe = ff.buf;
- memset(fe, 0, sizeof(*fe));
-
- fe->feat_id = feat;
- fe->header.type = PERF_RECORD_HEADER_FEATURE;
- fe->header.size = ff.offset;
-
- ret = process(tool, ff.buf, NULL, NULL);
- if (ret) {
- free(ff.buf);
- return ret;
- }
- }
-
- /* Send HEADER_LAST_FEATURE mark. */
- fe = ff.buf;
- fe->feat_id = HEADER_LAST_FEATURE;
- fe->header.type = PERF_RECORD_HEADER_FEATURE;
- fe->header.size = sizeof(*fe);
-
- ret = process(tool, ff.buf, NULL, NULL);
-
- free(ff.buf);
- return ret;
-}
-
int perf_event__process_feature(struct perf_session *session,
union perf_event *event)
{
@@ -3797,113 +3684,6 @@ int perf_event__process_feature(struct perf_session *session,
return 0;
}
-static struct perf_record_event_update *
-event_update_event__new(size_t size, u64 type, u64 id)
-{
- struct perf_record_event_update *ev;
-
- size += sizeof(*ev);
- size = PERF_ALIGN(size, sizeof(u64));
-
- ev = zalloc(size);
- if (ev) {
- ev->header.type = PERF_RECORD_EVENT_UPDATE;
- ev->header.size = (u16)size;
- ev->type = type;
- ev->id = id;
- }
- return ev;
-}
-
-int
-perf_event__synthesize_event_update_unit(struct perf_tool *tool,
- struct evsel *evsel,
- perf_event__handler_t process)
-{
- struct perf_record_event_update *ev;
- size_t size = strlen(evsel->unit);
- int err;
-
- ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
- if (ev == NULL)
- return -ENOMEM;
-
- strlcpy(ev->data, evsel->unit, size + 1);
- err = process(tool, (union perf_event *)ev, NULL, NULL);
- free(ev);
- return err;
-}
-
-int
-perf_event__synthesize_event_update_scale(struct perf_tool *tool,
- struct evsel *evsel,
- perf_event__handler_t process)
-{
- struct perf_record_event_update *ev;
- struct perf_record_event_update_scale *ev_data;
- int err;
-
- ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
- if (ev == NULL)
- return -ENOMEM;
-
- ev_data = (struct perf_record_event_update_scale *)ev->data;
- ev_data->scale = evsel->scale;
- err = process(tool, (union perf_event*) ev, NULL, NULL);
- free(ev);
- return err;
-}
-
-int
-perf_event__synthesize_event_update_name(struct perf_tool *tool,
- struct evsel *evsel,
- perf_event__handler_t process)
-{
- struct perf_record_event_update *ev;
- size_t len = strlen(evsel->name);
- int err;
-
- ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
- if (ev == NULL)
- return -ENOMEM;
-
- strlcpy(ev->data, evsel->name, len + 1);
- err = process(tool, (union perf_event*) ev, NULL, NULL);
- free(ev);
- return err;
-}
-
-int
-perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
- struct evsel *evsel,
- perf_event__handler_t process)
-{
- size_t size = sizeof(struct perf_record_event_update);
- struct perf_record_event_update *ev;
- int max, err;
- u16 type;
-
- if (!evsel->core.own_cpus)
- return 0;
-
- ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
- if (!ev)
- return -ENOMEM;
-
- ev->header.type = PERF_RECORD_EVENT_UPDATE;
- ev->header.size = (u16)size;
- ev->type = PERF_EVENT_UPDATE__CPUS;
- ev->id = evsel->id[0];
-
- cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
- evsel->core.own_cpus,
- type, max);
-
- err = process(tool, (union perf_event*) ev, NULL, NULL);
- free(ev);
- return err;
-}
-
size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
{
struct perf_record_event_update *ev = &event->event_update;
@@ -3943,93 +3723,6 @@ size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
return ret;
}
-int perf_event__synthesize_attrs(struct perf_tool *tool,
- struct evlist *evlist,
- perf_event__handler_t process)
-{
- struct evsel *evsel;
- int err = 0;
-
- evlist__for_each_entry(evlist, evsel) {
- err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->ids,
- evsel->id, process);
- if (err) {
- pr_debug("failed to create perf header attribute\n");
- return err;
- }
- }
-
- return err;
-}
-
-static bool has_unit(struct evsel *counter)
-{
- return counter->unit && *counter->unit;
-}
-
-static bool has_scale(struct evsel *counter)
-{
- return counter->scale != 1;
-}
-
-int perf_event__synthesize_extra_attr(struct perf_tool *tool,
- struct evlist *evsel_list,
- perf_event__handler_t process,
- bool is_pipe)
-{
- struct evsel *counter;
- int err;
-
- /*
- * Synthesize other events stuff not carried within
- * attr event - unit, scale, name
- */
- evlist__for_each_entry(evsel_list, counter) {
- if (!counter->supported)
- continue;
-
- /*
- * Synthesize unit and scale only if it's defined.
- */
- if (has_unit(counter)) {
- err = perf_event__synthesize_event_update_unit(tool, counter, process);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel unit.\n");
- return err;
- }
- }
-
- if (has_scale(counter)) {
- err = perf_event__synthesize_event_update_scale(tool, counter, process);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel counter.\n");
- return err;
- }
- }
-
- if (counter->core.own_cpus) {
- err = perf_event__synthesize_event_update_cpus(tool, counter, process);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel cpus.\n");
- return err;
- }
- }
-
- /*
- * Name is needed only for pipe output,
- * perf.data carries event names.
- */
- if (is_pipe) {
- err = perf_event__synthesize_event_update_name(tool, counter, process);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel name.\n");
- return err;
- }
- }
- }
- return 0;
-}
-
int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct evlist **pevlist)
@@ -4058,11 +3751,11 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
* for allocating the perf_sample_id table we fake 1 cpu and
* hattr->ids threads.
*/
- if (perf_evsel__alloc_id(evsel, 1, n_ids))
+ if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))
return -ENOMEM;
for (i = 0; i < n_ids; i++) {
- perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
+ perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, event->attr.id[i]);
}
return 0;
@@ -4114,55 +3807,6 @@ int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
return 0;
}
-int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
- struct evlist *evlist,
- perf_event__handler_t process)
-{
- union perf_event ev;
- struct tracing_data *tdata;
- ssize_t size = 0, aligned_size = 0, padding;
- struct feat_fd ff;
- int err __maybe_unused = 0;
-
- /*
- * We are going to store the size of the data followed
- * by the data contents. Since the fd descriptor is a pipe,
- * we cannot seek back to store the size of the data once
- * we know it. Instead we:
- *
- * - write the tracing data to the temp file
- * - get/write the data size to pipe
- * - write the tracing data from the temp file
- * to the pipe
- */
- tdata = tracing_data_get(&evlist->core.entries, fd, true);
- if (!tdata)
- return -1;
-
- memset(&ev, 0, sizeof(ev));
-
- ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
- size = tdata->size;
- aligned_size = PERF_ALIGN(size, sizeof(u64));
- padding = aligned_size - size;
- ev.tracing_data.header.size = sizeof(ev.tracing_data);
- ev.tracing_data.size = aligned_size;
-
- process(tool, &ev, NULL, NULL);
-
- /*
- * The put function will copy all the tracing data
- * stored in temp file to the pipe.
- */
- tracing_data_put(tdata);
-
- ff = (struct feat_fd){ .fd = fd };
- if (write_padded(&ff, NULL, 0, padding))
- return -1;
-
- return aligned_size;
-}
-
int perf_event__process_tracing_data(struct perf_session *session,
union perf_event *event)
{
@@ -4202,34 +3846,6 @@ int perf_event__process_tracing_data(struct perf_session *session,
return size_read + padding;
}
-int perf_event__synthesize_build_id(struct perf_tool *tool,
- struct dso *pos, u16 misc,
- perf_event__handler_t process,
- struct machine *machine)
-{
- union perf_event ev;
- size_t len;
- int err = 0;
-
- if (!pos->hit)
- return err;
-
- memset(&ev, 0, sizeof(ev));
-
- len = pos->long_name_len + 1;
- len = PERF_ALIGN(len, NAME_ALIGN);
- memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
- ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
- ev.build_id.header.misc = misc;
- ev.build_id.pid = machine->pid;
- ev.build_id.header.size = sizeof(ev.build_id) + len;
- memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
-
- err = process(tool, &ev, NULL, machine);
-
- return err;
-}
-
int perf_event__process_build_id(struct perf_session *session,
union perf_event *event)
{
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 3e48ae3c49b1..ca53a929e9fd 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -5,10 +5,10 @@
#include <linux/stddef.h>
#include <linux/perf_event.h>
#include <sys/types.h>
+#include <stdio.h> // FILE
#include <stdbool.h>
#include <linux/bitmap.h>
#include <linux/types.h>
-#include "event.h"
#include "env.h"
#include "pmu.h"
@@ -92,8 +92,28 @@ struct perf_header {
struct perf_env env;
};
+struct feat_fd {
+ struct perf_header *ph;
+ int fd;
+ void *buf; /* Either buf != NULL or fd >= 0 */
+ ssize_t offset;
+ size_t size;
+ struct evsel *events;
+};
+
+struct perf_header_feature_ops {
+ int (*write)(struct feat_fd *ff, struct evlist *evlist);
+ void (*print)(struct feat_fd *ff, FILE *fp);
+ int (*process)(struct feat_fd *ff, void *data);
+ const char *name;
+ bool full_only;
+ bool synthesize;
+};
+
struct evlist;
struct perf_session;
+struct perf_tool;
+union perf_event;
int perf_session__read_header(struct perf_session *session);
int perf_session__write_header(struct perf_session *session,
@@ -115,54 +135,16 @@ int perf_header__process_sections(struct perf_header *header, int fd,
int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full);
-int perf_event__synthesize_features(struct perf_tool *tool,
- struct perf_session *session,
- struct evlist *evlist,
- perf_event__handler_t process);
-
-int perf_event__synthesize_extra_attr(struct perf_tool *tool,
- struct evlist *evsel_list,
- perf_event__handler_t process,
- bool is_pipe);
-
int perf_event__process_feature(struct perf_session *session,
union perf_event *event);
-
-int perf_event__synthesize_attr(struct perf_tool *tool,
- struct perf_event_attr *attr, u32 ids, u64 *id,
- perf_event__handler_t process);
-int perf_event__synthesize_attrs(struct perf_tool *tool,
- struct evlist *evlist,
- perf_event__handler_t process);
-int perf_event__synthesize_event_update_unit(struct perf_tool *tool,
- struct evsel *evsel,
- perf_event__handler_t process);
-int perf_event__synthesize_event_update_scale(struct perf_tool *tool,
- struct evsel *evsel,
- perf_event__handler_t process);
-int perf_event__synthesize_event_update_name(struct perf_tool *tool,
- struct evsel *evsel,
- perf_event__handler_t process);
-int perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
- struct evsel *evsel,
- perf_event__handler_t process);
int perf_event__process_attr(struct perf_tool *tool, union perf_event *event,
struct evlist **pevlist);
int perf_event__process_event_update(struct perf_tool *tool,
union perf_event *event,
struct evlist **pevlist);
size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp);
-
-int perf_event__synthesize_tracing_data(struct perf_tool *tool,
- int fd, struct evlist *evlist,
- perf_event__handler_t process);
int perf_event__process_tracing_data(struct perf_session *session,
union perf_event *event);
-
-int perf_event__synthesize_build_id(struct perf_tool *tool,
- struct dso *pos, u16 misc,
- perf_event__handler_t process,
- struct machine *machine);
int perf_event__process_build_id(struct perf_session *session,
union perf_event *event);
bool is_perf_magic(u64 magic);
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 34803e33dc80..6a186b668303 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -15,6 +15,7 @@ struct addr_location;
struct map_symbol;
struct mem_info;
struct branch_info;
+struct branch_stack;
struct block_info;
struct symbol;
struct ui_progress;
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index aacffa2b0362..34cb380d19a3 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -14,7 +14,6 @@
#include <linux/log2.h>
#include <linux/zalloc.h>
-#include "cpumap.h"
#include "color.h"
#include "evsel.h"
#include "evlist.h"
@@ -29,6 +28,7 @@
#include "auxtrace.h"
#include "intel-pt-decoder/intel-pt-insn-decoder.h"
#include "intel-bts.h"
+#include "util/synthetic-events.h"
#define MAX_TIMESTAMP (~0ULL)
@@ -768,7 +768,7 @@ static int intel_bts_synth_events(struct intel_bts *bts,
int err;
evlist__for_each_entry(evlist, evsel) {
- if (evsel->core.attr.type == bts->pmu_type && evsel->ids) {
+ if (evsel->core.attr.type == bts->pmu_type && evsel->core.ids) {
found = true;
break;
}
@@ -795,7 +795,7 @@ static int intel_bts_synth_events(struct intel_bts *bts,
attr.sample_id_all = evsel->core.attr.sample_id_all;
attr.read_format = evsel->core.attr.read_format;
- id = evsel->id[0] + 1000000000;
+ id = evsel->core.id[0] + 1000000000;
if (!id)
id = 1;
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 9b56fb74bedf..a1c9eb6d4f40 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -33,6 +33,7 @@
#include "tsc.h"
#include "intel-pt.h"
#include "config.h"
+#include "util/synthetic-events.h"
#include "time-utils.h"
#include "../arch/x86/include/uapi/asm/perf_regs.h"
@@ -1704,7 +1705,7 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
struct intel_pt *pt = ptq->pt;
struct evsel *evsel = pt->pebs_evsel;
u64 sample_type = evsel->core.attr.sample_type;
- u64 id = evsel->id[0];
+ u64 id = evsel->core.id[0];
u8 cpumode;
if (intel_pt_skip_event(pt))
@@ -2719,7 +2720,7 @@ static void intel_pt_set_event_name(struct evlist *evlist, u64 id,
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
- if (evsel->id && evsel->id[0] == id) {
+ if (evsel->core.id && evsel->core.id[0] == id) {
if (evsel->name)
zfree(&evsel->name);
evsel->name = strdup(name);
@@ -2734,7 +2735,7 @@ static struct evsel *intel_pt_evsel(struct intel_pt *pt,
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
- if (evsel->core.attr.type == pt->pmu_type && evsel->ids)
+ if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids)
return evsel;
}
@@ -2775,7 +2776,7 @@ static int intel_pt_synth_events(struct intel_pt *pt,
attr.sample_id_all = evsel->core.attr.sample_id_all;
attr.read_format = evsel->core.attr.read_format;
- id = evsel->id[0] + 1000000000;
+ id = evsel->core.id[0] + 1000000000;
if (!id)
id = 1;
@@ -2902,7 +2903,7 @@ static void intel_pt_setup_pebs_events(struct intel_pt *pt)
return;
evlist__for_each_entry(pt->session->evlist, evsel) {
- if (evsel->core.attr.aux_output && evsel->id) {
+ if (evsel->core.attr.aux_output && evsel->core.id) {
pt->sample_pebs = true;
pt->pebs_evsel = evsel;
return;
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index b80f29bfc7bb..1bdf4c6ea3e5 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -15,7 +15,6 @@
#include <linux/stringify.h>
#include "build-id.h"
-#include "util.h"
#include "event.h"
#include "debug.h"
#include "evlist.h"
@@ -27,7 +26,6 @@
#include "jit.h"
#include "jitdump.h"
#include "genelf.h"
-#include "../builtin.h"
#include <linux/ctype.h>
#include <linux/zalloc.h>
@@ -779,7 +777,7 @@ jit_process(struct perf_session *session,
* track sample_type to compute id_all layout
* perf sets the same sample type to all events as of now
*/
- first = perf_evlist__first(session->evlist);
+ first = evlist__first(session->evlist);
jd.sample_type = first->core.attr.sample_type;
*nbytes = 0;
diff --git a/tools/perf/util/kvm-stat.h b/tools/perf/util/kvm-stat.h
index 46913637085b..6f0fa05b62b6 100644
--- a/tools/perf/util/kvm-stat.h
+++ b/tools/perf/util/kvm-stat.h
@@ -2,6 +2,8 @@
#ifndef __PERF_KVM_STAT_H
#define __PERF_KVM_STAT_H
+#ifdef HAVE_KVM_STAT_SUPPORT
+
#include "tool.h"
#include "stat.h"
#include "record.h"
@@ -144,5 +146,7 @@ extern const int decode_str_len;
extern const char *kvm_exit_reason;
extern const char *kvm_entry_trace;
extern const char *kvm_exit_trace;
+#endif /* HAVE_KVM_STAT_SUPPORT */
+extern int kvm_add_default_arch_event(int *argc, const char **argv);
#endif /* __PERF_KVM_STAT_H */
diff --git a/tools/perf/util/libunwind/arm64.c b/tools/perf/util/libunwind/arm64.c
index 66756e6be111..6b4e5a0892f8 100644
--- a/tools/perf/util/libunwind/arm64.c
+++ b/tools/perf/util/libunwind/arm64.c
@@ -22,7 +22,6 @@
#define LIBUNWIND__ARCH_REG_SP PERF_REG_ARM64_SP
#include "unwind.h"
-#include "debug.h"
#include "libunwind-aarch64.h"
#include <../../../../arch/arm64/include/uapi/asm/perf_regs.h>
#include "../../arch/arm64/util/unwind-libunwind.c"
diff --git a/tools/perf/util/libunwind/x86_32.c b/tools/perf/util/libunwind/x86_32.c
index c5e568188e19..21c216c40a3b 100644
--- a/tools/perf/util/libunwind/x86_32.c
+++ b/tools/perf/util/libunwind/x86_32.c
@@ -22,7 +22,6 @@
#define LIBUNWIND__ARCH_REG_SP PERF_REG_X86_SP
#include "unwind.h"
-#include "debug.h"
#include "libunwind-x86.h"
#include <../../../../arch/x86/include/uapi/asm/perf_regs.h>
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index 55fb4b3b1157..8d04e3d070b1 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -8,6 +8,7 @@
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
+#include <unistd.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/zalloc.h>
diff --git a/tools/perf/util/lzma.c b/tools/perf/util/lzma.c
index 397447066033..39062df02629 100644
--- a/tools/perf/util/lzma.c
+++ b/tools/perf/util/lzma.c
@@ -7,10 +7,10 @@
#include <sys/stat.h>
#include <fcntl.h>
#include "compress.h"
-#include "util.h"
#include "debug.h"
#include <string.h>
#include <unistd.h>
+#include <internal/lib.h>
#define BUFSIZE 8192
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index b4749d3eed08..70a9f8716a4b 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -32,6 +32,7 @@
#include "linux/hash.h"
#include "asm/bug.h"
#include "bpf-event.h"
+#include <internal/lib.h> // page_size
#include <linux/ctype.h>
#include <symbol/kallsyms.h>
@@ -2609,21 +2610,6 @@ int machines__for_each_thread(struct machines *machines,
return rc;
}
-int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
- struct target *target, struct perf_thread_map *threads,
- perf_event__handler_t process, bool data_mmap,
- unsigned int nr_threads_synthesize)
-{
- if (target__has_task(target))
- return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
- else if (target__has_cpu(target))
- return perf_event__synthesize_threads(tool, process,
- machine, data_mmap,
- nr_threads_synthesize);
- /* command specified */
- return 0;
-}
-
pid_t machine__get_current_tid(struct machine *machine, int cpu)
{
int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index ffd391a925a6..18e13c0ccd6a 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -6,7 +6,6 @@
#include <linux/rbtree.h>
#include "map_groups.h"
#include "dsos.h"
-#include "event.h"
#include "rwsem.h"
struct addr_location;
@@ -252,20 +251,6 @@ int machines__for_each_thread(struct machines *machines,
int (*fn)(struct thread *thread, void *p),
void *priv);
-int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
- struct target *target, struct perf_thread_map *threads,
- perf_event__handler_t process, bool data_mmap,
- unsigned int nr_threads_synthesize);
-static inline
-int machine__synthesize_threads(struct machine *machine, struct target *target,
- struct perf_thread_map *threads, bool data_mmap,
- unsigned int nr_threads_synthesize)
-{
- return __machine__synthesize_threads(machine, NULL, target, threads,
- perf_event__process, data_mmap,
- nr_threads_synthesize);
-}
-
pid_t machine__get_current_tid(struct machine *machine, int cpu);
int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
pid_t tid);
diff --git a/tools/perf/util/memswap.h b/tools/perf/util/memswap.h
index 1e29ff903ca9..2c38e8c2d548 100644
--- a/tools/perf/util/memswap.h
+++ b/tools/perf/util/memswap.h
@@ -2,6 +2,13 @@
#ifndef PERF_MEMSWAP_H_
#define PERF_MEMSWAP_H_
+#include <linux/types.h>
+
+union u64_swap {
+ u64 val64;
+ u32 val32[2];
+};
+
void mem_bswap_64(void *src, int byte_size);
void mem_bswap_32(void *src, int byte_size);
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 33c5b5495482..a35dc57d5995 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -12,6 +12,7 @@
#include <linux/zalloc.h>
#include <stdlib.h>
#include <string.h>
+#include <unistd.h> // sysconf()
#ifdef HAVE_LIBNUMA_SUPPORT
#include <numaif.h>
#endif
@@ -20,25 +21,25 @@
#include "event.h"
#include "mmap.h"
#include "../perf.h"
-#include "util.h" /* page_size */
+#include <internal/lib.h> /* page_size */
-size_t perf_mmap__mmap_len(struct perf_mmap *map)
+size_t perf_mmap__mmap_len(struct mmap *map)
{
- return map->mask + 1 + page_size;
+ return map->core.mask + 1 + page_size;
}
/* When check_messup is true, 'end' must points to a good entry */
-static union perf_event *perf_mmap__read(struct perf_mmap *map,
+static union perf_event *perf_mmap__read(struct mmap *map,
u64 *startp, u64 end)
{
- unsigned char *data = map->base + page_size;
+ unsigned char *data = map->core.base + page_size;
union perf_event *event = NULL;
int diff = end - *startp;
if (diff >= (int)sizeof(event->header)) {
size_t size;
- event = (union perf_event *)&data[*startp & map->mask];
+ event = (union perf_event *)&data[*startp & map->core.mask];
size = event->header.size;
if (size < sizeof(event->header) || diff < (int)size)
@@ -48,20 +49,20 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
* Event straddles the mmap boundary -- header should always
* be inside due to u64 alignment of output.
*/
- if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
+ if ((*startp & map->core.mask) + size != ((*startp + size) & map->core.mask)) {
unsigned int offset = *startp;
unsigned int len = min(sizeof(*event), size), cpy;
- void *dst = map->event_copy;
+ void *dst = map->core.event_copy;
do {
- cpy = min(map->mask + 1 - (offset & map->mask), len);
- memcpy(dst, &data[offset & map->mask], cpy);
+ cpy = min(map->core.mask + 1 - (offset & map->core.mask), len);
+ memcpy(dst, &data[offset & map->core.mask], cpy);
offset += cpy;
dst += cpy;
len -= cpy;
} while (len);
- event = (union perf_event *)map->event_copy;
+ event = (union perf_event *)map->core.event_copy;
}
*startp += size;
@@ -82,55 +83,55 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
* }
* perf_mmap__read_done()
*/
-union perf_event *perf_mmap__read_event(struct perf_mmap *map)
+union perf_event *perf_mmap__read_event(struct mmap *map)
{
union perf_event *event;
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
- if (!refcount_read(&map->refcnt))
+ if (!refcount_read(&map->core.refcnt))
return NULL;
/* non-overwirte doesn't pause the ringbuffer */
- if (!map->overwrite)
- map->end = perf_mmap__read_head(map);
+ if (!map->core.overwrite)
+ map->core.end = perf_mmap__read_head(map);
- event = perf_mmap__read(map, &map->start, map->end);
+ event = perf_mmap__read(map, &map->core.start, map->core.end);
- if (!map->overwrite)
- map->prev = map->start;
+ if (!map->core.overwrite)
+ map->core.prev = map->core.start;
return event;
}
-static bool perf_mmap__empty(struct perf_mmap *map)
+static bool perf_mmap__empty(struct mmap *map)
{
- return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
+ return perf_mmap__read_head(map) == map->core.prev && !map->auxtrace_mmap.base;
}
-void perf_mmap__get(struct perf_mmap *map)
+void perf_mmap__get(struct mmap *map)
{
- refcount_inc(&map->refcnt);
+ refcount_inc(&map->core.refcnt);
}
-void perf_mmap__put(struct perf_mmap *map)
+void perf_mmap__put(struct mmap *map)
{
- BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
+ BUG_ON(map->core.base && refcount_read(&map->core.refcnt) == 0);
- if (refcount_dec_and_test(&map->refcnt))
+ if (refcount_dec_and_test(&map->core.refcnt))
perf_mmap__munmap(map);
}
-void perf_mmap__consume(struct perf_mmap *map)
+void perf_mmap__consume(struct mmap *map)
{
- if (!map->overwrite) {
- u64 old = map->prev;
+ if (!map->core.overwrite) {
+ u64 old = map->core.prev;
perf_mmap__write_tail(map, old);
}
- if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
+ if (refcount_read(&map->core.refcnt) == 1 && perf_mmap__empty(map))
perf_mmap__put(map);
}
@@ -161,13 +162,13 @@ void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __mayb
}
#ifdef HAVE_AIO_SUPPORT
-static int perf_mmap__aio_enabled(struct perf_mmap *map)
+static int perf_mmap__aio_enabled(struct mmap *map)
{
return map->aio.nr_cblocks > 0;
}
#ifdef HAVE_LIBNUMA_SUPPORT
-static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
+static int perf_mmap__aio_alloc(struct mmap *map, int idx)
{
map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
@@ -179,7 +180,7 @@ static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
return 0;
}
-static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
+static void perf_mmap__aio_free(struct mmap *map, int idx)
{
if (map->aio.data[idx]) {
munmap(map->aio.data[idx], perf_mmap__mmap_len(map));
@@ -187,7 +188,7 @@ static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
}
}
-static int perf_mmap__aio_bind(struct perf_mmap *map, int idx, int cpu, int affinity)
+static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
{
void *data;
size_t mmap_len;
@@ -207,7 +208,7 @@ static int perf_mmap__aio_bind(struct perf_mmap *map, int idx, int cpu, int affi
return 0;
}
#else /* !HAVE_LIBNUMA_SUPPORT */
-static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
+static int perf_mmap__aio_alloc(struct mmap *map, int idx)
{
map->aio.data[idx] = malloc(perf_mmap__mmap_len(map));
if (map->aio.data[idx] == NULL)
@@ -216,19 +217,19 @@ static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
return 0;
}
-static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
+static void perf_mmap__aio_free(struct mmap *map, int idx)
{
zfree(&(map->aio.data[idx]));
}
-static int perf_mmap__aio_bind(struct perf_mmap *map __maybe_unused, int idx __maybe_unused,
+static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused,
int cpu __maybe_unused, int affinity __maybe_unused)
{
return 0;
}
#endif
-static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
+static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp)
{
int delta_max, i, prio, ret;
@@ -256,7 +257,7 @@ static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
pr_debug2("failed to allocate data buffer area, error %m");
return -1;
}
- ret = perf_mmap__aio_bind(map, i, map->cpu, mp->affinity);
+ ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity);
if (ret == -1)
return -1;
/*
@@ -282,7 +283,7 @@ static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
return 0;
}
-static void perf_mmap__aio_munmap(struct perf_mmap *map)
+static void perf_mmap__aio_munmap(struct mmap *map)
{
int i;
@@ -294,34 +295,34 @@ static void perf_mmap__aio_munmap(struct perf_mmap *map)
zfree(&map->aio.aiocb);
}
#else /* !HAVE_AIO_SUPPORT */
-static int perf_mmap__aio_enabled(struct perf_mmap *map __maybe_unused)
+static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused)
{
return 0;
}
-static int perf_mmap__aio_mmap(struct perf_mmap *map __maybe_unused,
+static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused,
struct mmap_params *mp __maybe_unused)
{
return 0;
}
-static void perf_mmap__aio_munmap(struct perf_mmap *map __maybe_unused)
+static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
{
}
#endif
-void perf_mmap__munmap(struct perf_mmap *map)
+void perf_mmap__munmap(struct mmap *map)
{
perf_mmap__aio_munmap(map);
if (map->data != NULL) {
munmap(map->data, perf_mmap__mmap_len(map));
map->data = NULL;
}
- if (map->base != NULL) {
- munmap(map->base, perf_mmap__mmap_len(map));
- map->base = NULL;
- map->fd = -1;
- refcount_set(&map->refcnt, 0);
+ if (map->core.base != NULL) {
+ munmap(map->core.base, perf_mmap__mmap_len(map));
+ map->core.base = NULL;
+ map->core.fd = -1;
+ refcount_set(&map->core.refcnt, 0);
}
auxtrace_mmap__munmap(&map->auxtrace_mmap);
}
@@ -343,16 +344,16 @@ static void build_node_mask(int node, cpu_set_t *mask)
}
}
-static void perf_mmap__setup_affinity_mask(struct perf_mmap *map, struct mmap_params *mp)
+static void perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
{
CPU_ZERO(&map->affinity_mask);
if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
- build_node_mask(cpu__get_node(map->cpu), &map->affinity_mask);
+ build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
else if (mp->affinity == PERF_AFFINITY_CPU)
- CPU_SET(map->cpu, &map->affinity_mask);
+ CPU_SET(map->core.cpu, &map->affinity_mask);
}
-int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu)
+int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
{
/*
* The last one will be done at perf_mmap__consume(), so that we
@@ -367,23 +368,23 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
* evlist layer can't just drop it when filtering events in
* perf_evlist__filter_pollfd().
*/
- refcount_set(&map->refcnt, 2);
- map->prev = 0;
- map->mask = mp->mask;
- map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
+ refcount_set(&map->core.refcnt, 2);
+ map->core.prev = 0;
+ map->core.mask = mp->mask;
+ map->core.base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
MAP_SHARED, fd, 0);
- if (map->base == MAP_FAILED) {
+ if (map->core.base == MAP_FAILED) {
pr_debug2("failed to mmap perf event ring buffer, error %d\n",
errno);
- map->base = NULL;
+ map->core.base = NULL;
return -1;
}
- map->fd = fd;
- map->cpu = cpu;
+ map->core.fd = fd;
+ map->core.cpu = cpu;
perf_mmap__setup_affinity_mask(map, mp);
- map->flush = mp->flush;
+ map->core.flush = mp->flush;
map->comp_level = mp->comp_level;
@@ -399,7 +400,7 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
}
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
- &mp->auxtrace_mp, map->base, fd))
+ &mp->auxtrace_mp, map->core.base, fd))
return -1;
return perf_mmap__aio_mmap(map, mp);
@@ -440,25 +441,25 @@ static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
/*
* Report the start and end of the available data in ringbuffer
*/
-static int __perf_mmap__read_init(struct perf_mmap *md)
+static int __perf_mmap__read_init(struct mmap *md)
{
u64 head = perf_mmap__read_head(md);
- u64 old = md->prev;
- unsigned char *data = md->base + page_size;
+ u64 old = md->core.prev;
+ unsigned char *data = md->core.base + page_size;
unsigned long size;
- md->start = md->overwrite ? head : old;
- md->end = md->overwrite ? old : head;
+ md->core.start = md->core.overwrite ? head : old;
+ md->core.end = md->core.overwrite ? old : head;
- if ((md->end - md->start) < md->flush)
+ if ((md->core.end - md->core.start) < md->core.flush)
return -EAGAIN;
- size = md->end - md->start;
- if (size > (unsigned long)(md->mask) + 1) {
- if (!md->overwrite) {
+ size = md->core.end - md->core.start;
+ if (size > (unsigned long)(md->core.mask) + 1) {
+ if (!md->core.overwrite) {
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
- md->prev = head;
+ md->core.prev = head;
perf_mmap__consume(md);
return -EAGAIN;
}
@@ -467,29 +468,29 @@ static int __perf_mmap__read_init(struct perf_mmap *md)
* Backward ring buffer is full. We still have a chance to read
* most of data from it.
*/
- if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end))
+ if (overwrite_rb_find_range(data, md->core.mask, &md->core.start, &md->core.end))
return -EINVAL;
}
return 0;
}
-int perf_mmap__read_init(struct perf_mmap *map)
+int perf_mmap__read_init(struct mmap *map)
{
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
- if (!refcount_read(&map->refcnt))
+ if (!refcount_read(&map->core.refcnt))
return -ENOENT;
return __perf_mmap__read_init(map);
}
-int perf_mmap__push(struct perf_mmap *md, void *to,
- int push(struct perf_mmap *map, void *to, void *buf, size_t size))
+int perf_mmap__push(struct mmap *md, void *to,
+ int push(struct mmap *map, void *to, void *buf, size_t size))
{
u64 head = perf_mmap__read_head(md);
- unsigned char *data = md->base + page_size;
+ unsigned char *data = md->core.base + page_size;
unsigned long size;
void *buf;
int rc = 0;
@@ -498,12 +499,12 @@ int perf_mmap__push(struct perf_mmap *md, void *to,
if (rc < 0)
return (rc == -EAGAIN) ? 1 : -1;
- size = md->end - md->start;
+ size = md->core.end - md->core.start;
- if ((md->start & md->mask) + size != (md->end & md->mask)) {
- buf = &data[md->start & md->mask];
- size = md->mask + 1 - (md->start & md->mask);
- md->start += size;
+ if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) {
+ buf = &data[md->core.start & md->core.mask];
+ size = md->core.mask + 1 - (md->core.start & md->core.mask);
+ md->core.start += size;
if (push(md, to, buf, size) < 0) {
rc = -1;
@@ -511,16 +512,16 @@ int perf_mmap__push(struct perf_mmap *md, void *to,
}
}
- buf = &data[md->start & md->mask];
- size = md->end - md->start;
- md->start += size;
+ buf = &data[md->core.start & md->core.mask];
+ size = md->core.end - md->core.start;
+ md->core.start += size;
if (push(md, to, buf, size) < 0) {
rc = -1;
goto out;
}
- md->prev = head;
+ md->core.prev = head;
perf_mmap__consume(md);
out:
return rc;
@@ -529,16 +530,16 @@ out:
/*
* Mandatory for overwrite mode
* The direction of overwrite mode is backward.
- * The last perf_mmap__read() will set tail to map->prev.
- * Need to correct the map->prev to head which is the end of next read.
+ * The last perf_mmap__read() will set tail to map->core.prev.
+ * Need to correct the map->core.prev to head which is the end of next read.
*/
-void perf_mmap__read_done(struct perf_mmap *map)
+void perf_mmap__read_done(struct mmap *map)
{
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
- if (!refcount_read(&map->refcnt))
+ if (!refcount_read(&map->core.refcnt))
return;
- map->prev = perf_mmap__read_head(map);
+ map->core.prev = perf_mmap__read_head(map);
}
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index 3857a49e8f96..e567c1c875bd 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -1,6 +1,7 @@
#ifndef __PERF_MMAP_H
#define __PERF_MMAP_H 1
+#include <internal/mmap.h>
#include <linux/compiler.h>
#include <linux/refcount.h>
#include <linux/types.h>
@@ -15,22 +16,13 @@
struct aiocb;
/**
- * struct perf_mmap - perf's ring buffer mmap details
+ * struct mmap - perf's ring buffer mmap details
*
* @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
*/
-struct perf_mmap {
- void *base;
- int mask;
- int fd;
- int cpu;
- refcount_t refcnt;
- u64 prev;
- u64 start;
- u64 end;
- bool overwrite;
+struct mmap {
+ struct perf_mmap core;
struct auxtrace_mmap auxtrace_mmap;
- char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
#ifdef HAVE_AIO_SUPPORT
struct {
void **data;
@@ -40,71 +32,42 @@ struct perf_mmap {
} aio;
#endif
cpu_set_t affinity_mask;
- u64 flush;
void *data;
int comp_level;
};
-/*
- * State machine of bkw_mmap_state:
- *
- * .________________(forbid)_____________.
- * | V
- * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
- * ^ ^ | ^ |
- * | |__(forbid)____/ |___(forbid)___/|
- * | |
- * \_________________(3)_______________/
- *
- * NOTREADY : Backward ring buffers are not ready
- * RUNNING : Backward ring buffers are recording
- * DATA_PENDING : We are required to collect data from backward ring buffers
- * EMPTY : We have collected data from backward ring buffers.
- *
- * (0): Setup backward ring buffer
- * (1): Pause ring buffers for reading
- * (2): Read from ring buffers
- * (3): Resume ring buffers for recording
- */
-enum bkw_mmap_state {
- BKW_MMAP_NOTREADY,
- BKW_MMAP_RUNNING,
- BKW_MMAP_DATA_PENDING,
- BKW_MMAP_EMPTY,
-};
-
struct mmap_params {
int prot, mask, nr_cblocks, affinity, flush, comp_level;
struct auxtrace_mmap_params auxtrace_mp;
};
-int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu);
-void perf_mmap__munmap(struct perf_mmap *map);
+int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu);
+void perf_mmap__munmap(struct mmap *map);
-void perf_mmap__get(struct perf_mmap *map);
-void perf_mmap__put(struct perf_mmap *map);
+void perf_mmap__get(struct mmap *map);
+void perf_mmap__put(struct mmap *map);
-void perf_mmap__consume(struct perf_mmap *map);
+void perf_mmap__consume(struct mmap *map);
-static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
+static inline u64 perf_mmap__read_head(struct mmap *mm)
{
- return ring_buffer_read_head(mm->base);
+ return ring_buffer_read_head(mm->core.base);
}
-static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
+static inline void perf_mmap__write_tail(struct mmap *md, u64 tail)
{
- ring_buffer_write_tail(md->base, tail);
+ ring_buffer_write_tail(md->core.base, tail);
}
-union perf_event *perf_mmap__read_forward(struct perf_mmap *map);
+union perf_event *perf_mmap__read_forward(struct mmap *map);
-union perf_event *perf_mmap__read_event(struct perf_mmap *map);
+union perf_event *perf_mmap__read_event(struct mmap *map);
-int perf_mmap__push(struct perf_mmap *md, void *to,
- int push(struct perf_mmap *map, void *to, void *buf, size_t size));
+int perf_mmap__push(struct mmap *md, void *to,
+ int push(struct mmap *map, void *to, void *buf, size_t size));
-size_t perf_mmap__mmap_len(struct perf_mmap *map);
+size_t perf_mmap__mmap_len(struct mmap *map);
-int perf_mmap__read_init(struct perf_mmap *md);
-void perf_mmap__read_done(struct perf_mmap *map);
+int perf_mmap__read_init(struct mmap *md);
+void perf_mmap__read_done(struct mmap *map);
#endif /*__PERF_MMAP_H */
diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
index 99be15dd2b6b..285d6f30d912 100644
--- a/tools/perf/util/namespaces.c
+++ b/tools/perf/util/namespaces.c
@@ -17,8 +17,26 @@
#include <string.h>
#include <unistd.h>
#include <asm/bug.h>
+#include <linux/kernel.h>
#include <linux/zalloc.h>
+static const char *perf_ns__names[] = {
+ [NET_NS_INDEX] = "net",
+ [UTS_NS_INDEX] = "uts",
+ [IPC_NS_INDEX] = "ipc",
+ [PID_NS_INDEX] = "pid",
+ [USER_NS_INDEX] = "user",
+ [MNT_NS_INDEX] = "mnt",
+ [CGROUP_NS_INDEX] = "cgroup",
+};
+
+const char *perf_ns__name(unsigned int id)
+{
+ if (id >= ARRAY_SIZE(perf_ns__names))
+ return "UNKNOWN";
+ return perf_ns__names[id];
+}
+
struct namespaces *namespaces__new(struct perf_record_namespaces *event)
{
struct namespaces *namespaces;
diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h
index 40edef56cb52..4b33f684eddd 100644
--- a/tools/perf/util/namespaces.h
+++ b/tools/perf/util/namespaces.h
@@ -66,4 +66,6 @@ static inline void __nsinfo__zput(struct nsinfo **nsip)
#define nsinfo__zput(nsi) __nsinfo__zput(&nsi)
+const char *perf_ns__name(unsigned int id);
+
#endif /* __PERF_NAMESPACES_H */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 5ec21d21113c..b5e2adef49de 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -30,11 +30,12 @@
#include "parse-events-flex.h"
#include "pmu.h"
#include "thread_map.h"
-#include "cpumap.h"
#include "probe-file.h"
#include "asm/bug.h"
#include "util/parse-branch-options.h"
#include "metricgroup.h"
+#include "util/evsel_config.h"
+#include "util/event.h"
#define MAX_NAME_LEN 100
@@ -335,7 +336,7 @@ __add_event(struct list_head *list, int *idx,
(*idx)++;
evsel->core.cpus = perf_cpu_map__get(cpus);
evsel->core.own_cpus = perf_cpu_map__get(cpus);
- evsel->system_wide = pmu ? pmu->is_uncore : false;
+ evsel->core.system_wide = pmu ? pmu->is_uncore : false;
evsel->auto_merge_stats = auto_merge_stats;
if (name)
@@ -1936,7 +1937,7 @@ int parse_events(struct evlist *evlist, const char *str,
perf_evlist__splice_list_tail(evlist, &parse_state.list);
evlist->nr_groups += parse_state.nr_groups;
- last = perf_evlist__last(evlist);
+ last = evlist__last(evlist);
last->cmdline_group_boundary = true;
return 0;
@@ -2050,7 +2051,7 @@ foreach_evsel_in_last_glob(struct evlist *evlist,
* So no need to WARN here, let *func do this.
*/
if (evlist->core.nr_entries > 0)
- last = perf_evlist__last(evlist);
+ last = evlist__last(evlist);
do {
err = (*func)(last, arg);
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index f1c36ed1cf36..48126ae4cd13 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -9,13 +9,11 @@
#define YYDEBUG 1
#include <fnmatch.h>
+#include <stdio.h>
#include <linux/compiler.h>
-#include <linux/list.h>
#include <linux/types.h>
-#include "util.h"
#include "pmu.h"
#include "evsel.h"
-#include "debug.h"
#include "parse-events.h"
#include "parse-events-bison.h"
diff --git a/tools/perf/util/perf-hooks.c b/tools/perf/util/perf-hooks.c
index e635c594f773..7a0ab3507bd5 100644
--- a/tools/perf/util/perf-hooks.c
+++ b/tools/perf/util/perf-hooks.c
@@ -12,7 +12,6 @@
#include <setjmp.h>
#include <linux/err.h>
#include <linux/kernel.h>
-#include "util/util.h"
#include "util/debug.h"
#include "util/perf-hooks.h"
diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c
new file mode 100644
index 000000000000..d4ad3f04923a
--- /dev/null
+++ b/tools/perf/util/perf_event_attr_fprintf.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/perf_event.h>
+#include "util/evsel_fprintf.h"
+
+struct bit_names {
+ int bit;
+ const char *name;
+};
+
+static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
+{
+ bool first_bit = true;
+ int i = 0;
+
+ do {
+ if (value & bits[i].bit) {
+ buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
+ first_bit = false;
+ }
+ } while (bits[++i].name != NULL);
+}
+
+static void __p_sample_type(char *buf, size_t size, u64 value)
+{
+#define bit_name(n) { PERF_SAMPLE_##n, #n }
+ struct bit_names bits[] = {
+ bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
+ bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
+ bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
+ bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
+ bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
+ bit_name(WEIGHT), bit_name(PHYS_ADDR),
+ { .name = NULL, }
+ };
+#undef bit_name
+ __p_bits(buf, size, value, bits);
+}
+
+static void __p_branch_sample_type(char *buf, size_t size, u64 value)
+{
+#define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
+ struct bit_names bits[] = {
+ bit_name(USER), bit_name(KERNEL), bit_name(HV), bit_name(ANY),
+ bit_name(ANY_CALL), bit_name(ANY_RETURN), bit_name(IND_CALL),
+ bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX),
+ bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
+ bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
+ { .name = NULL, }
+ };
+#undef bit_name
+ __p_bits(buf, size, value, bits);
+}
+
+static void __p_read_format(char *buf, size_t size, u64 value)
+{
+#define bit_name(n) { PERF_FORMAT_##n, #n }
+ struct bit_names bits[] = {
+ bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
+ bit_name(ID), bit_name(GROUP),
+ { .name = NULL, }
+ };
+#undef bit_name
+ __p_bits(buf, size, value, bits);
+}
+
+#define BUF_SIZE 1024
+
+#define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
+#define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
+#define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
+#define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
+#define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
+#define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
+
+#define PRINT_ATTRn(_n, _f, _p) \
+do { \
+ if (attr->_f) { \
+ _p(attr->_f); \
+ ret += attr__fprintf(fp, _n, buf, priv);\
+ } \
+} while (0)
+
+#define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
+
+int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
+ attr__fprintf_f attr__fprintf, void *priv)
+{
+ char buf[BUF_SIZE];
+ int ret = 0;
+
+ PRINT_ATTRf(type, p_unsigned);
+ PRINT_ATTRf(size, p_unsigned);
+ PRINT_ATTRf(config, p_hex);
+ PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
+ PRINT_ATTRf(sample_type, p_sample_type);
+ PRINT_ATTRf(read_format, p_read_format);
+
+ PRINT_ATTRf(disabled, p_unsigned);
+ PRINT_ATTRf(inherit, p_unsigned);
+ PRINT_ATTRf(pinned, p_unsigned);
+ PRINT_ATTRf(exclusive, p_unsigned);
+ PRINT_ATTRf(exclude_user, p_unsigned);
+ PRINT_ATTRf(exclude_kernel, p_unsigned);
+ PRINT_ATTRf(exclude_hv, p_unsigned);
+ PRINT_ATTRf(exclude_idle, p_unsigned);
+ PRINT_ATTRf(mmap, p_unsigned);
+ PRINT_ATTRf(comm, p_unsigned);
+ PRINT_ATTRf(freq, p_unsigned);
+ PRINT_ATTRf(inherit_stat, p_unsigned);
+ PRINT_ATTRf(enable_on_exec, p_unsigned);
+ PRINT_ATTRf(task, p_unsigned);
+ PRINT_ATTRf(watermark, p_unsigned);
+ PRINT_ATTRf(precise_ip, p_unsigned);
+ PRINT_ATTRf(mmap_data, p_unsigned);
+ PRINT_ATTRf(sample_id_all, p_unsigned);
+ PRINT_ATTRf(exclude_host, p_unsigned);
+ PRINT_ATTRf(exclude_guest, p_unsigned);
+ PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
+ PRINT_ATTRf(exclude_callchain_user, p_unsigned);
+ PRINT_ATTRf(mmap2, p_unsigned);
+ PRINT_ATTRf(comm_exec, p_unsigned);
+ PRINT_ATTRf(use_clockid, p_unsigned);
+ PRINT_ATTRf(context_switch, p_unsigned);
+ PRINT_ATTRf(write_backward, p_unsigned);
+ PRINT_ATTRf(namespaces, p_unsigned);
+ PRINT_ATTRf(ksymbol, p_unsigned);
+ PRINT_ATTRf(bpf_event, p_unsigned);
+ PRINT_ATTRf(aux_output, p_unsigned);
+
+ PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
+ PRINT_ATTRf(bp_type, p_unsigned);
+ PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
+ PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
+ PRINT_ATTRf(branch_sample_type, p_branch_sample_type);
+ PRINT_ATTRf(sample_regs_user, p_hex);
+ PRINT_ATTRf(sample_stack_user, p_unsigned);
+ PRINT_ATTRf(clockid, p_signed);
+ PRINT_ATTRf(sample_regs_intr, p_hex);
+ PRINT_ATTRf(aux_watermark, p_unsigned);
+ PRINT_ATTRf(sample_max_stack, p_unsigned);
+
+ return ret;
+}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index fb597fa94234..5608da82ad23 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -20,7 +20,6 @@
#include "debug.h"
#include "pmu.h"
#include "parse-events.h"
-#include "cpumap.h"
#include "header.h"
#include "pmu-events/pmu-events.h"
#include "string2.h"
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index b8e0967c5c21..91cab5f669d2 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2331,6 +2331,7 @@ void clear_probe_trace_event(struct probe_trace_event *tev)
}
}
zfree(&tev->args);
+ tev->nargs = 0;
}
struct kprobe_blacklist_node {
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index d13db55a2feb..b659466ea498 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -16,6 +16,7 @@
#include "strlist.h"
#include "strfilter.h"
#include "debug.h"
+#include "build-id.h"
#include "dso.h"
#include "color.h"
#include "symbol.h"
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 505905fc21c5..cd9f95e5044e 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -1245,6 +1245,17 @@ static int expand_probe_args(Dwarf_Die *sc_die, struct probe_finder *pf,
return n;
}
+static bool trace_event_finder_overlap(struct trace_event_finder *tf)
+{
+ int i;
+
+ for (i = 0; i < tf->ntevs; i++) {
+ if (tf->pf.addr == tf->tevs[i].point.address)
+ return true;
+ }
+ return false;
+}
+
/* Add a found probe point into trace event list */
static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
{
@@ -1255,6 +1266,14 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
struct perf_probe_arg *args = NULL;
int ret, i;
+ /*
+ * For some reason (e.g. different column assigned to same address)
+ * This callback can be called with the address which already passed.
+ * Ignore it first.
+ */
+ if (trace_event_finder_overlap(tf))
+ return 0;
+
/* Check number of tevs */
if (tf->ntevs == tf->max_tevs) {
pr_warning("Too many( > %d) probe point found.\n",
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index c6dd478956f1..9af183860fbd 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -10,6 +10,7 @@ util/python.c
util/cap.c
util/evlist.c
util/evsel.c
+util/perf_event_attr_fprintf.c
util/cpumap.c
util/memswap.c
util/mmap.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 07ca4535e6f7..53f31053a27a 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -6,17 +6,15 @@
#include <linux/err.h>
#include <perf/cpumap.h>
#include <traceevent/event-parse.h>
-#include "debug.h"
#include "evlist.h"
#include "callchain.h"
#include "evsel.h"
#include "event.h"
-#include "cpumap.h"
#include "print_binary.h"
#include "thread_map.h"
#include "trace-event.h"
#include "mmap.h"
-#include "util.h"
+#include <internal/lib.h>
#include "../perf-sys.h"
#if PY_MAJOR_VERSION < 3
@@ -61,6 +59,8 @@ int parse_callchain_record(const char *arg __maybe_unused,
*/
int verbose;
+int eprintf(int level, int var, const char *fmt, ...);
+
int eprintf(int level, int var, const char *fmt, ...)
{
va_list args;
@@ -884,7 +884,7 @@ static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
{
- perf_evlist__exit(&pevlist->evlist);
+ evlist__exit(&pevlist->evlist);
Py_TYPE(pevlist)->tp_free((PyObject*)pevlist);
}
@@ -899,7 +899,7 @@ static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
&pages, &overwrite))
return NULL;
- if (perf_evlist__mmap(evlist, pages) < 0) {
+ if (evlist__mmap(evlist, pages) < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
@@ -918,7 +918,7 @@ static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
return NULL;
- n = perf_evlist__poll(evlist, timeout);
+ n = evlist__poll(evlist, timeout);
if (n < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
@@ -935,17 +935,17 @@ static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
PyObject *list = PyList_New(0);
int i;
- for (i = 0; i < evlist->pollfd.nr; ++i) {
+ for (i = 0; i < evlist->core.pollfd.nr; ++i) {
PyObject *file;
#if PY_MAJOR_VERSION < 3
- FILE *fp = fdopen(evlist->pollfd.entries[i].fd, "r");
+ FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r");
if (fp == NULL)
goto free_list;
file = PyFile_FromFile(fp, "perf", "r", NULL);
#else
- file = PyFile_FromFd(evlist->pollfd.entries[i].fd, "perf", "r", -1,
+ file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1,
NULL, NULL, NULL, 0);
#endif
if (file == NULL)
@@ -984,14 +984,14 @@ static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
return Py_BuildValue("i", evlist->core.nr_entries);
}
-static struct perf_mmap *get_md(struct evlist *evlist, int cpu)
+static struct mmap *get_md(struct evlist *evlist, int cpu)
{
int i;
- for (i = 0; i < evlist->nr_mmaps; i++) {
- struct perf_mmap *md = &evlist->mmap[i];
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ struct mmap *md = &evlist->mmap[i];
- if (md->cpu == cpu)
+ if (md->core.cpu == cpu)
return md;
}
@@ -1005,7 +1005,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
union perf_event *event;
int sample_id_all = 1, cpu;
static char *kwlist[] = { "cpu", "sample_id_all", NULL };
- struct perf_mmap *md;
+ struct mmap *md;
int err;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index 286fe816c0f3..8579505c29a4 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -2,7 +2,6 @@
#include "debug.h"
#include "evlist.h"
#include "evsel.h"
-#include "cpumap.h"
#include "parse-events.h"
#include <errno.h>
#include <limits.h>
@@ -10,7 +9,6 @@
#include <api/fs/fs.h>
#include <subcmd/parse-options.h>
#include <perf/cpumap.h>
-#include "util.h"
#include "cloexec.h"
#include "record.h"
#include "../perf-sys.h"
@@ -32,7 +30,7 @@ static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
if (parse_events(evlist, str, NULL))
goto out_delete;
- evsel = perf_evlist__first(evlist);
+ evsel = evlist__first(evlist);
while (1) {
fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1, flags);
@@ -173,7 +171,7 @@ void perf_evlist__config(struct evlist *evlist, struct record_opts *opts,
use_sample_identifier = perf_can_sample_identifier();
sample_id = true;
} else if (evlist->core.nr_entries > 1) {
- struct evsel *first = perf_evlist__first(evlist);
+ struct evsel *first = evlist__first(evlist);
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.sample_type == first->core.attr.sample_type)
@@ -278,7 +276,7 @@ bool perf_evlist__can_select_event(struct evlist *evlist, const char *str)
if (err)
goto out_delete;
- evsel = perf_evlist__last(temp_evlist);
+ evsel = evlist__last(temp_evlist);
if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) {
struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
diff --git a/tools/perf/util/rwsem.c b/tools/perf/util/rwsem.c
index 5e52e7baa7b6..f3d29d8ddc99 100644
--- a/tools/perf/util/rwsem.c
+++ b/tools/perf/util/rwsem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "util.h"
#include "rwsem.h"
diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
index 24a99909d8b3..6785cd87aa4d 100644
--- a/tools/perf/util/s390-cpumsf.c
+++ b/tools/perf/util/s390-cpumsf.c
@@ -151,7 +151,6 @@
#include <sys/stat.h>
#include <sys/types.h>
-#include "cpumap.h"
#include "color.h"
#include "evsel.h"
#include "evlist.h"
diff --git a/tools/perf/util/s390-sample-raw.c b/tools/perf/util/s390-sample-raw.c
index 4d9593e331ea..05b43ab4eeef 100644
--- a/tools/perf/util/s390-sample-raw.c
+++ b/tools/perf/util/s390-sample-raw.c
@@ -22,7 +22,6 @@
#include <asm/byteorder.h>
#include "debug.h"
-#include "util.h"
#include "session.h"
#include "evlist.h"
#include "color.h"
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 666a56e88d8e..5d341efc3237 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -37,7 +37,6 @@
#include "../dso.h"
#include "../callchain.h"
#include "../evsel.h"
-#include "../util.h"
#include "../event.h"
#include "../thread.h"
#include "../comm.h"
@@ -49,7 +48,6 @@
#include "map.h"
#include "symbol.h"
#include "thread_map.h"
-#include "cpumap.h"
#include "print_binary.h"
#include "stat.h"
#include "mem-events.h"
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index e9e4a04f15db..061bb4d6a3f5 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -22,7 +22,6 @@
#include "symbol.h"
#include "session.h"
#include "tool.h"
-#include "cpumap.h"
#include "perf_regs.h"
#include "asm/bug.h"
#include "auxtrace.h"
@@ -30,10 +29,11 @@
#include "thread-stack.h"
#include "sample-raw.h"
#include "stat.h"
-#include "util.h"
#include "ui/progress.h"
#include "../perf.h"
#include "arch/common.h"
+#include <internal/lib.h>
+#include <linux/err.h>
#ifdef HAVE_ZSTD_SUPPORT
static int perf_session__process_compressed_event(struct perf_session *session,
@@ -187,6 +187,7 @@ static int ordered_events__deliver_event(struct ordered_events *oe,
struct perf_session *perf_session__new(struct perf_data *data,
bool repipe, struct perf_tool *tool)
{
+ int ret = -ENOMEM;
struct perf_session *session = zalloc(sizeof(*session));
if (!session)
@@ -201,13 +202,15 @@ struct perf_session *perf_session__new(struct perf_data *data,
perf_env__init(&session->header.env);
if (data) {
- if (perf_data__open(data))
+ ret = perf_data__open(data);
+ if (ret < 0)
goto out_delete;
session->data = data;
if (perf_data__is_read(data)) {
- if (perf_session__open(session) < 0)
+ ret = perf_session__open(session);
+ if (ret < 0)
goto out_delete;
/*
@@ -222,8 +225,11 @@ struct perf_session *perf_session__new(struct perf_data *data,
perf_evlist__init_trace_event_sample_raw(session->evlist);
/* Open the directory data. */
- if (data->is_dir && perf_data__open_dir(data))
+ if (data->is_dir) {
+ ret = perf_data__open_dir(data);
+ if (ret)
goto out_delete;
+ }
}
} else {
session->machines.host.env = &perf_env;
@@ -256,7 +262,7 @@ struct perf_session *perf_session__new(struct perf_data *data,
out_delete:
perf_session__delete(session);
out:
- return NULL;
+ return ERR_PTR(ret);
}
static void perf_session__delete_threads(struct perf_session *session)
@@ -1317,6 +1323,7 @@ static int deliver_sample_value(struct evlist *evlist,
struct machine *machine)
{
struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
+ struct evsel *evsel;
if (sid) {
sample->id = v->id;
@@ -1336,7 +1343,8 @@ static int deliver_sample_value(struct evlist *evlist,
if (!sample->period)
return 0;
- return tool->sample(tool, event, sample, sid->evsel, machine);
+ evsel = container_of(sid->evsel, struct evsel, core);
+ return tool->sample(tool, event, sample, evsel, machine);
}
static int deliver_sample_group(struct evlist *evlist,
@@ -2412,73 +2420,3 @@ int perf_event__process_id_index(struct perf_session *session,
}
return 0;
}
-
-int perf_event__synthesize_id_index(struct perf_tool *tool,
- perf_event__handler_t process,
- struct evlist *evlist,
- struct machine *machine)
-{
- union perf_event *ev;
- struct evsel *evsel;
- size_t nr = 0, i = 0, sz, max_nr, n;
- int err;
-
- pr_debug2("Synthesizing id index\n");
-
- max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
- sizeof(struct id_index_entry);
-
- evlist__for_each_entry(evlist, evsel)
- nr += evsel->ids;
-
- n = nr > max_nr ? max_nr : nr;
- sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
- ev = zalloc(sz);
- if (!ev)
- return -ENOMEM;
-
- ev->id_index.header.type = PERF_RECORD_ID_INDEX;
- ev->id_index.header.size = sz;
- ev->id_index.nr = n;
-
- evlist__for_each_entry(evlist, evsel) {
- u32 j;
-
- for (j = 0; j < evsel->ids; j++) {
- struct id_index_entry *e;
- struct perf_sample_id *sid;
-
- if (i >= n) {
- err = process(tool, ev, NULL, machine);
- if (err)
- goto out_err;
- nr -= n;
- i = 0;
- }
-
- e = &ev->id_index.entries[i++];
-
- e->id = evsel->id[j];
-
- sid = perf_evlist__id2sid(evlist, e->id);
- if (!sid) {
- free(ev);
- return -ENOENT;
- }
-
- e->idx = sid->idx;
- e->cpu = sid->cpu;
- e->tid = sid->tid;
- }
- }
-
- sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
- ev->id_index.header.size = sz;
- ev->id_index.nr = nr;
-
- err = process(tool, ev, NULL, machine);
-out_err:
- free(ev);
-
- return err;
-}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index b7aa076ab6fd..b4c9428c18f0 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -138,9 +138,4 @@ int perf_session__deliver_synth_event(struct perf_session *session,
int perf_event__process_id_index(struct perf_session *session,
union perf_event *event);
-int perf_event__synthesize_id_index(struct perf_tool *tool,
- perf_event__handler_t process,
- struct evlist *evlist,
- struct machine *machine);
-
#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index a2308eb77681..43d1d410854a 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -2329,7 +2329,7 @@ static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
if (nr > evlist->core.nr_entries)
return NULL;
- evsel = perf_evlist__first(evlist);
+ evsel = evlist__first(evlist);
while (--nr > 0)
evsel = perf_evsel__next(evsel);
diff --git a/tools/perf/util/srccode.c b/tools/perf/util/srccode.c
index adfcf1ff464c..d84ed8b6caaa 100644
--- a/tools/perf/util/srccode.c
+++ b/tools/perf/util/srccode.c
@@ -15,7 +15,7 @@
#include <string.h>
#include "srccode.h"
#include "debug.h"
-#include "util.h"
+#include <internal/lib.h> // page_size
#define MAXSRCCACHE (32*1024*1024)
#define MAXSRCFILES 64
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 70c87fdb2a43..2c41d47f6f83 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -738,6 +738,8 @@ static void generic_metric(struct perf_stat_config *config,
char *n, *pn;
expr__ctx_init(&pctx);
+ /* Must be first id entry */
+ expr__add_id(&pctx, name, avg);
for (i = 0; metric_events[i]; i++) {
struct saved_value *v;
struct stats *stats;
@@ -776,8 +778,6 @@ static void generic_metric(struct perf_stat_config *config,
expr__add_id(&pctx, n, avg_stats(stats)*scale);
}
- expr__add_id(&pctx, name, avg);
-
if (!metric_events[i]) {
const char *p = metric_expr;
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 8f1ea27f976f..ebdd130557fb 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -4,6 +4,7 @@
#include <math.h>
#include <string.h>
#include "counts.h"
+#include "cpumap.h"
#include "debug.h"
#include "header.h"
#include "stat.h"
@@ -161,6 +162,15 @@ static void perf_evsel__free_prev_raw_counts(struct evsel *evsel)
evsel->prev_raw_counts = NULL;
}
+static void perf_evsel__reset_prev_raw_counts(struct evsel *evsel)
+{
+ if (evsel->prev_raw_counts) {
+ evsel->prev_raw_counts->aggr.val = 0;
+ evsel->prev_raw_counts->aggr.ena = 0;
+ evsel->prev_raw_counts->aggr.run = 0;
+ }
+}
+
static int perf_evsel__alloc_stats(struct evsel *evsel, bool alloc_raw)
{
int ncpus = perf_evsel__nr_cpus(evsel);
@@ -211,6 +221,14 @@ void perf_evlist__reset_stats(struct evlist *evlist)
}
}
+void perf_evlist__reset_prev_raw_counts(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel)
+ perf_evsel__reset_prev_raw_counts(evsel);
+}
+
static void zero_per_pkg(struct evsel *counter)
{
if (counter->per_pkg_mask)
@@ -318,7 +336,7 @@ static int process_counter_maps(struct perf_stat_config *config,
int ncpus = perf_evsel__nr_cpus(counter);
int cpu, thread;
- if (counter->system_wide)
+ if (counter->core.system_wide)
nthreads = 1;
for (thread = 0; thread < nthreads; thread++) {
@@ -493,45 +511,3 @@ int create_perf_stat_counter(struct evsel *evsel,
return perf_evsel__open_per_thread(evsel, evsel->core.threads);
}
-
-int perf_stat_synthesize_config(struct perf_stat_config *config,
- struct perf_tool *tool,
- struct evlist *evlist,
- perf_event__handler_t process,
- bool attrs)
-{
- int err;
-
- if (attrs) {
- err = perf_event__synthesize_attrs(tool, evlist, process);
- if (err < 0) {
- pr_err("Couldn't synthesize attrs.\n");
- return err;
- }
- }
-
- err = perf_event__synthesize_extra_attr(tool, evlist, process,
- attrs);
-
- err = perf_event__synthesize_thread_map2(tool, evlist->core.threads,
- process, NULL);
- if (err < 0) {
- pr_err("Couldn't synthesize thread map.\n");
- return err;
- }
-
- err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus,
- process, NULL);
- if (err < 0) {
- pr_err("Couldn't synthesize thread map.\n");
- return err;
- }
-
- err = perf_event__synthesize_stat_config(tool, config, process, NULL);
- if (err < 0) {
- pr_err("Couldn't synthesize config.\n");
- return err;
- }
-
- return 0;
-}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 14fe3e548229..edbeb2f63e8d 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -7,8 +7,9 @@
#include <sys/types.h>
#include <sys/resource.h>
#include "rblist.h"
-#include "event.h"
+struct perf_cpu_map;
+struct perf_stat_config;
struct timespec;
struct stats {
@@ -192,6 +193,7 @@ void perf_stat__collect_metric_expr(struct evlist *);
int perf_evlist__alloc_stats(struct evlist *evlist, bool alloc_raw);
void perf_evlist__free_stats(struct evlist *evlist);
void perf_evlist__reset_stats(struct evlist *evlist);
+void perf_evlist__reset_prev_raw_counts(struct evlist *evlist);
int perf_stat_process_counter(struct perf_stat_config *config,
struct evsel *counter);
@@ -210,11 +212,6 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp);
int create_perf_stat_counter(struct evsel *evsel,
struct perf_stat_config *config,
struct target *target);
-int perf_stat_synthesize_config(struct perf_stat_config *config,
- struct perf_tool *tool,
- struct evlist *evlist,
- perf_event__handler_t process,
- bool attrs);
void
perf_evlist__print_counters(struct evlist *evlist,
struct perf_stat_config *config,
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index 582f4a69cd48..96f941e01681 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -17,11 +17,11 @@
#include <linux/string.h>
#include <linux/time64.h>
#include <linux/zalloc.h>
+#include <internal/cpumap.h>
#include <perf/cpumap.h>
#include "env.h"
#include "svghelper.h"
-#include "cpumap.h"
static u64 first_time, last_time;
static u64 turbo_frequency, max_freq;
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 9428639872a6..66f4be1df573 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -7,6 +7,7 @@
#include <unistd.h>
#include <inttypes.h>
+#include "dso.h"
#include "map.h"
#include "map_groups.h"
#include "symbol.h"
@@ -16,10 +17,12 @@
#include "machine.h"
#include "vdso.h"
#include "debug.h"
-#include "util.h"
+#include "util/copyfile.h"
#include <linux/ctype.h>
+#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <symbol/kallsyms.h>
+#include <internal/lib.h>
#ifndef EM_AARCH64
#define EM_AARCH64 183 /* ARM 64 bit */
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index 7e2813ec9498..d6e99af263ec 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -1,8 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
#include "dso.h"
#include "symbol.h"
#include "symsrc.h"
-#include "util.h"
#include <errno.h>
#include <unistd.h>
@@ -13,6 +11,7 @@
#include <byteswap.h>
#include <sys/stat.h>
#include <linux/zalloc.h>
+#include <internal/lib.h>
static bool check_need_swap(int file_endian)
{
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 765c75df2904..a8f80e427674 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -19,7 +19,7 @@
#include "build-id.h"
#include "cap.h"
#include "dso.h"
-#include "util.h"
+#include "util.h" // lsdir()
#include "debug.h"
#include "event.h"
#include "machine.h"
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
new file mode 100644
index 000000000000..807cbca403a7
--- /dev/null
+++ b/tools/perf/util/synthetic-events.c
@@ -0,0 +1,1884 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "util/debug.h"
+#include "util/dso.h"
+#include "util/event.h"
+#include "util/evlist.h"
+#include "util/machine.h"
+#include "util/map.h"
+#include "util/map_symbol.h"
+#include "util/branch.h"
+#include "util/memswap.h"
+#include "util/namespaces.h"
+#include "util/session.h"
+#include "util/stat.h"
+#include "util/symbol.h"
+#include "util/synthetic-events.h"
+#include "util/target.h"
+#include "util/time-utils.h"
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/zalloc.h>
+#include <linux/perf_event.h>
+#include <asm/bug.h>
+#include <perf/evsel.h>
+#include <internal/cpumap.h>
+#include <perf/cpumap.h>
+#include <internal/lib.h> // page_size
+#include <internal/threadmap.h>
+#include <perf/threadmap.h>
+#include <symbol/kallsyms.h>
+#include <dirent.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <string.h>
+#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
+#include <api/fs/fs.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
+
+unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
+
+int perf_tool__process_synth_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct machine *machine,
+ perf_event__handler_t process)
+{
+ struct perf_sample synth_sample = {
+ .pid = -1,
+ .tid = -1,
+ .time = -1,
+ .stream_id = -1,
+ .cpu = -1,
+ .period = 1,
+ .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
+ };
+
+ return process(tool, event, &synth_sample, machine);
+};
+
+/*
+ * Assumes that the first 4095 bytes of /proc/pid/stat contains
+ * the comm, tgid and ppid.
+ */
+static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
+ pid_t *tgid, pid_t *ppid)
+{
+ char filename[PATH_MAX];
+ char bf[4096];
+ int fd;
+ size_t size = 0;
+ ssize_t n;
+ char *name, *tgids, *ppids;
+
+ *tgid = -1;
+ *ppid = -1;
+
+ snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0) {
+ pr_debug("couldn't open %s\n", filename);
+ return -1;
+ }
+
+ n = read(fd, bf, sizeof(bf) - 1);
+ close(fd);
+ if (n <= 0) {
+ pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
+ pid);
+ return -1;
+ }
+ bf[n] = '\0';
+
+ name = strstr(bf, "Name:");
+ tgids = strstr(bf, "Tgid:");
+ ppids = strstr(bf, "PPid:");
+
+ if (name) {
+ char *nl;
+
+ name = skip_spaces(name + 5); /* strlen("Name:") */
+ nl = strchr(name, '\n');
+ if (nl)
+ *nl = '\0';
+
+ size = strlen(name);
+ if (size >= len)
+ size = len - 1;
+ memcpy(comm, name, size);
+ comm[size] = '\0';
+ } else {
+ pr_debug("Name: string not found for pid %d\n", pid);
+ }
+
+ if (tgids) {
+ tgids += 5; /* strlen("Tgid:") */
+ *tgid = atoi(tgids);
+ } else {
+ pr_debug("Tgid: string not found for pid %d\n", pid);
+ }
+
+ if (ppids) {
+ ppids += 5; /* strlen("PPid:") */
+ *ppid = atoi(ppids);
+ } else {
+ pr_debug("PPid: string not found for pid %d\n", pid);
+ }
+
+ return 0;
+}
+
+static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
+ struct machine *machine,
+ pid_t *tgid, pid_t *ppid)
+{
+ size_t size;
+
+ *ppid = -1;
+
+ memset(&event->comm, 0, sizeof(event->comm));
+
+ if (machine__is_host(machine)) {
+ if (perf_event__get_comm_ids(pid, event->comm.comm,
+ sizeof(event->comm.comm),
+ tgid, ppid) != 0) {
+ return -1;
+ }
+ } else {
+ *tgid = machine->pid;
+ }
+
+ if (*tgid < 0)
+ return -1;
+
+ event->comm.pid = *tgid;
+ event->comm.header.type = PERF_RECORD_COMM;
+
+ size = strlen(event->comm.comm) + 1;
+ size = PERF_ALIGN(size, sizeof(u64));
+ memset(event->comm.comm + size, 0, machine->id_hdr_size);
+ event->comm.header.size = (sizeof(event->comm) -
+ (sizeof(event->comm.comm) - size) +
+ machine->id_hdr_size);
+ event->comm.tid = pid;
+
+ return 0;
+}
+
+pid_t perf_event__synthesize_comm(struct perf_tool *tool,
+ union perf_event *event, pid_t pid,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ pid_t tgid, ppid;
+
+ if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
+ return -1;
+
+ if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
+ return -1;
+
+ return tgid;
+}
+
+static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
+ struct perf_ns_link_info *ns_link_info)
+{
+ struct stat64 st;
+ char proc_ns[128];
+
+ sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
+ if (stat64(proc_ns, &st) == 0) {
+ ns_link_info->dev = st.st_dev;
+ ns_link_info->ino = st.st_ino;
+ }
+}
+
+int perf_event__synthesize_namespaces(struct perf_tool *tool,
+ union perf_event *event,
+ pid_t pid, pid_t tgid,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ u32 idx;
+ struct perf_ns_link_info *ns_link_info;
+
+ if (!tool || !tool->namespace_events)
+ return 0;
+
+ memset(&event->namespaces, 0, (sizeof(event->namespaces) +
+ (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
+ machine->id_hdr_size));
+
+ event->namespaces.pid = tgid;
+ event->namespaces.tid = pid;
+
+ event->namespaces.nr_namespaces = NR_NAMESPACES;
+
+ ns_link_info = event->namespaces.link_info;
+
+ for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
+ perf_event__get_ns_link_info(pid, perf_ns__name(idx),
+ &ns_link_info[idx]);
+
+ event->namespaces.header.type = PERF_RECORD_NAMESPACES;
+
+ event->namespaces.header.size = (sizeof(event->namespaces) +
+ (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
+ machine->id_hdr_size);
+
+ if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
+ return -1;
+
+ return 0;
+}
+
+static int perf_event__synthesize_fork(struct perf_tool *tool,
+ union perf_event *event,
+ pid_t pid, pid_t tgid, pid_t ppid,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
+
+ /*
+ * for main thread set parent to ppid from status file. For other
+ * threads set parent pid to main thread. ie., assume main thread
+ * spawns all threads in a process
+ */
+ if (tgid == pid) {
+ event->fork.ppid = ppid;
+ event->fork.ptid = ppid;
+ } else {
+ event->fork.ppid = tgid;
+ event->fork.ptid = tgid;
+ }
+ event->fork.pid = tgid;
+ event->fork.tid = pid;
+ event->fork.header.type = PERF_RECORD_FORK;
+ event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
+
+ event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
+
+ if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
+ return -1;
+
+ return 0;
+}
+
+int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+ union perf_event *event,
+ pid_t pid, pid_t tgid,
+ perf_event__handler_t process,
+ struct machine *machine,
+ bool mmap_data)
+{
+ char filename[PATH_MAX];
+ FILE *fp;
+ unsigned long long t;
+ bool truncation = false;
+ unsigned long long timeout = proc_map_timeout * 1000000ULL;
+ int rc = 0;
+ const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
+ int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
+
+ if (machine__is_default_guest(machine))
+ return 0;
+
+ snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
+ machine->root_dir, pid, pid);
+
+ fp = fopen(filename, "r");
+ if (fp == NULL) {
+ /*
+ * We raced with a task exiting - just return:
+ */
+ pr_debug("couldn't open %s\n", filename);
+ return -1;
+ }
+
+ event->header.type = PERF_RECORD_MMAP2;
+ t = rdclock();
+
+ while (1) {
+ char bf[BUFSIZ];
+ char prot[5];
+ char execname[PATH_MAX];
+ char anonstr[] = "//anon";
+ unsigned int ino;
+ size_t size;
+ ssize_t n;
+
+ if (fgets(bf, sizeof(bf), fp) == NULL)
+ break;
+
+ if ((rdclock() - t) > timeout) {
+ pr_warning("Reading %s time out. "
+ "You may want to increase "
+ "the time limit by --proc-map-timeout\n",
+ filename);
+ truncation = true;
+ goto out;
+ }
+
+ /* ensure null termination since stack will be reused. */
+ strcpy(execname, "");
+
+ /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
+ n = sscanf(bf, "%"PRI_lx64"-%"PRI_lx64" %s %"PRI_lx64" %x:%x %u %[^\n]\n",
+ &event->mmap2.start, &event->mmap2.len, prot,
+ &event->mmap2.pgoff, &event->mmap2.maj,
+ &event->mmap2.min,
+ &ino, execname);
+
+ /*
+ * Anon maps don't have the execname.
+ */
+ if (n < 7)
+ continue;
+
+ event->mmap2.ino = (u64)ino;
+
+ /*
+ * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
+ */
+ if (machine__is_host(machine))
+ event->header.misc = PERF_RECORD_MISC_USER;
+ else
+ event->header.misc = PERF_RECORD_MISC_GUEST_USER;
+
+ /* map protection and flags bits */
+ event->mmap2.prot = 0;
+ event->mmap2.flags = 0;
+ if (prot[0] == 'r')
+ event->mmap2.prot |= PROT_READ;
+ if (prot[1] == 'w')
+ event->mmap2.prot |= PROT_WRITE;
+ if (prot[2] == 'x')
+ event->mmap2.prot |= PROT_EXEC;
+
+ if (prot[3] == 's')
+ event->mmap2.flags |= MAP_SHARED;
+ else
+ event->mmap2.flags |= MAP_PRIVATE;
+
+ if (prot[2] != 'x') {
+ if (!mmap_data || prot[0] != 'r')
+ continue;
+
+ event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
+ }
+
+out:
+ if (truncation)
+ event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
+
+ if (!strcmp(execname, ""))
+ strcpy(execname, anonstr);
+
+ if (hugetlbfs_mnt_len &&
+ !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
+ strcpy(execname, anonstr);
+ event->mmap2.flags |= MAP_HUGETLB;
+ }
+
+ size = strlen(execname) + 1;
+ memcpy(event->mmap2.filename, execname, size);
+ size = PERF_ALIGN(size, sizeof(u64));
+ event->mmap2.len -= event->mmap.start;
+ event->mmap2.header.size = (sizeof(event->mmap2) -
+ (sizeof(event->mmap2.filename) - size));
+ memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
+ event->mmap2.header.size += machine->id_hdr_size;
+ event->mmap2.pid = tgid;
+ event->mmap2.tid = pid;
+
+ if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
+ rc = -1;
+ break;
+ }
+
+ if (truncation)
+ break;
+ }
+
+ fclose(fp);
+ return rc;
+}
+
+int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
+ struct machine *machine)
+{
+ int rc = 0;
+ struct map *pos;
+ struct maps *maps = machine__kernel_maps(machine);
+ union perf_event *event = zalloc((sizeof(event->mmap) +
+ machine->id_hdr_size));
+ if (event == NULL) {
+ pr_debug("Not enough memory synthesizing mmap event "
+ "for kernel modules\n");
+ return -1;
+ }
+
+ event->header.type = PERF_RECORD_MMAP;
+
+ /*
+ * kernel uses 0 for user space maps, see kernel/perf_event.c
+ * __perf_event_mmap
+ */
+ if (machine__is_host(machine))
+ event->header.misc = PERF_RECORD_MISC_KERNEL;
+ else
+ event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
+
+ for (pos = maps__first(maps); pos; pos = map__next(pos)) {
+ size_t size;
+
+ if (!__map__is_kmodule(pos))
+ continue;
+
+ size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
+ event->mmap.header.type = PERF_RECORD_MMAP;
+ event->mmap.header.size = (sizeof(event->mmap) -
+ (sizeof(event->mmap.filename) - size));
+ memset(event->mmap.filename + size, 0, machine->id_hdr_size);
+ event->mmap.header.size += machine->id_hdr_size;
+ event->mmap.start = pos->start;
+ event->mmap.len = pos->end - pos->start;
+ event->mmap.pid = machine->pid;
+
+ memcpy(event->mmap.filename, pos->dso->long_name,
+ pos->dso->long_name_len + 1);
+ if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
+ rc = -1;
+ break;
+ }
+ }
+
+ free(event);
+ return rc;
+}
+
+static int __event__synthesize_thread(union perf_event *comm_event,
+ union perf_event *mmap_event,
+ union perf_event *fork_event,
+ union perf_event *namespaces_event,
+ pid_t pid, int full, perf_event__handler_t process,
+ struct perf_tool *tool, struct machine *machine, bool mmap_data)
+{
+ char filename[PATH_MAX];
+ DIR *tasks;
+ struct dirent *dirent;
+ pid_t tgid, ppid;
+ int rc = 0;
+
+ /* special case: only send one comm event using passed in pid */
+ if (!full) {
+ tgid = perf_event__synthesize_comm(tool, comm_event, pid,
+ process, machine);
+
+ if (tgid == -1)
+ return -1;
+
+ if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
+ tgid, process, machine) < 0)
+ return -1;
+
+ /*
+ * send mmap only for thread group leader
+ * see thread__init_map_groups
+ */
+ if (pid == tgid &&
+ perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
+ process, machine, mmap_data))
+ return -1;
+
+ return 0;
+ }
+
+ if (machine__is_default_guest(machine))
+ return 0;
+
+ snprintf(filename, sizeof(filename), "%s/proc/%d/task",
+ machine->root_dir, pid);
+
+ tasks = opendir(filename);
+ if (tasks == NULL) {
+ pr_debug("couldn't open %s\n", filename);
+ return 0;
+ }
+
+ while ((dirent = readdir(tasks)) != NULL) {
+ char *end;
+ pid_t _pid;
+
+ _pid = strtol(dirent->d_name, &end, 10);
+ if (*end)
+ continue;
+
+ rc = -1;
+ if (perf_event__prepare_comm(comm_event, _pid, machine,
+ &tgid, &ppid) != 0)
+ break;
+
+ if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
+ ppid, process, machine) < 0)
+ break;
+
+ if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
+ tgid, process, machine) < 0)
+ break;
+
+ /*
+ * Send the prepared comm event
+ */
+ if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
+ break;
+
+ rc = 0;
+ if (_pid == pid) {
+ /* process the parent's maps too */
+ rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
+ process, machine, mmap_data);
+ if (rc)
+ break;
+ }
+ }
+
+ closedir(tasks);
+ return rc;
+}
+
+int perf_event__synthesize_thread_map(struct perf_tool *tool,
+ struct perf_thread_map *threads,
+ perf_event__handler_t process,
+ struct machine *machine,
+ bool mmap_data)
+{
+ union perf_event *comm_event, *mmap_event, *fork_event;
+ union perf_event *namespaces_event;
+ int err = -1, thread, j;
+
+ comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
+ if (comm_event == NULL)
+ goto out;
+
+ mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
+ if (mmap_event == NULL)
+ goto out_free_comm;
+
+ fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
+ if (fork_event == NULL)
+ goto out_free_mmap;
+
+ namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
+ (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
+ machine->id_hdr_size);
+ if (namespaces_event == NULL)
+ goto out_free_fork;
+
+ err = 0;
+ for (thread = 0; thread < threads->nr; ++thread) {
+ if (__event__synthesize_thread(comm_event, mmap_event,
+ fork_event, namespaces_event,
+ perf_thread_map__pid(threads, thread), 0,
+ process, tool, machine,
+ mmap_data)) {
+ err = -1;
+ break;
+ }
+
+ /*
+ * comm.pid is set to thread group id by
+ * perf_event__synthesize_comm
+ */
+ if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
+ bool need_leader = true;
+
+ /* is thread group leader in thread_map? */
+ for (j = 0; j < threads->nr; ++j) {
+ if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
+ need_leader = false;
+ break;
+ }
+ }
+
+ /* if not, generate events for it */
+ if (need_leader &&
+ __event__synthesize_thread(comm_event, mmap_event,
+ fork_event, namespaces_event,
+ comm_event->comm.pid, 0,
+ process, tool, machine,
+ mmap_data)) {
+ err = -1;
+ break;
+ }
+ }
+ }
+ free(namespaces_event);
+out_free_fork:
+ free(fork_event);
+out_free_mmap:
+ free(mmap_event);
+out_free_comm:
+ free(comm_event);
+out:
+ return err;
+}
+
+static int __perf_event__synthesize_threads(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine,
+ bool mmap_data,
+ struct dirent **dirent,
+ int start,
+ int num)
+{
+ union perf_event *comm_event, *mmap_event, *fork_event;
+ union perf_event *namespaces_event;
+ int err = -1;
+ char *end;
+ pid_t pid;
+ int i;
+
+ comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
+ if (comm_event == NULL)
+ goto out;
+
+ mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
+ if (mmap_event == NULL)
+ goto out_free_comm;
+
+ fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
+ if (fork_event == NULL)
+ goto out_free_mmap;
+
+ namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
+ (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
+ machine->id_hdr_size);
+ if (namespaces_event == NULL)
+ goto out_free_fork;
+
+ for (i = start; i < start + num; i++) {
+ if (!isdigit(dirent[i]->d_name[0]))
+ continue;
+
+ pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
+ /* only interested in proper numerical dirents */
+ if (*end)
+ continue;
+ /*
+ * We may race with exiting thread, so don't stop just because
+ * one thread couldn't be synthesized.
+ */
+ __event__synthesize_thread(comm_event, mmap_event, fork_event,
+ namespaces_event, pid, 1, process,
+ tool, machine, mmap_data);
+ }
+ err = 0;
+
+ free(namespaces_event);
+out_free_fork:
+ free(fork_event);
+out_free_mmap:
+ free(mmap_event);
+out_free_comm:
+ free(comm_event);
+out:
+ return err;
+}
+
+struct synthesize_threads_arg {
+ struct perf_tool *tool;
+ perf_event__handler_t process;
+ struct machine *machine;
+ bool mmap_data;
+ struct dirent **dirent;
+ int num;
+ int start;
+};
+
+static void *synthesize_threads_worker(void *arg)
+{
+ struct synthesize_threads_arg *args = arg;
+
+ __perf_event__synthesize_threads(args->tool, args->process,
+ args->machine, args->mmap_data,
+ args->dirent,
+ args->start, args->num);
+ return NULL;
+}
+
+int perf_event__synthesize_threads(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine,
+ bool mmap_data,
+ unsigned int nr_threads_synthesize)
+{
+ struct synthesize_threads_arg *args = NULL;
+ pthread_t *synthesize_threads = NULL;
+ char proc_path[PATH_MAX];
+ struct dirent **dirent;
+ int num_per_thread;
+ int m, n, i, j;
+ int thread_nr;
+ int base = 0;
+ int err = -1;
+
+
+ if (machine__is_default_guest(machine))
+ return 0;
+
+ snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
+ n = scandir(proc_path, &dirent, 0, alphasort);
+ if (n < 0)
+ return err;
+
+ if (nr_threads_synthesize == UINT_MAX)
+ thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
+ else
+ thread_nr = nr_threads_synthesize;
+
+ if (thread_nr <= 1) {
+ err = __perf_event__synthesize_threads(tool, process,
+ machine, mmap_data,
+ dirent, base, n);
+ goto free_dirent;
+ }
+ if (thread_nr > n)
+ thread_nr = n;
+
+ synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
+ if (synthesize_threads == NULL)
+ goto free_dirent;
+
+ args = calloc(sizeof(*args), thread_nr);
+ if (args == NULL)
+ goto free_threads;
+
+ num_per_thread = n / thread_nr;
+ m = n % thread_nr;
+ for (i = 0; i < thread_nr; i++) {
+ args[i].tool = tool;
+ args[i].process = process;
+ args[i].machine = machine;
+ args[i].mmap_data = mmap_data;
+ args[i].dirent = dirent;
+ }
+ for (i = 0; i < m; i++) {
+ args[i].num = num_per_thread + 1;
+ args[i].start = i * args[i].num;
+ }
+ if (i != 0)
+ base = args[i-1].start + args[i-1].num;
+ for (j = i; j < thread_nr; j++) {
+ args[j].num = num_per_thread;
+ args[j].start = base + (j - i) * args[i].num;
+ }
+
+ for (i = 0; i < thread_nr; i++) {
+ if (pthread_create(&synthesize_threads[i], NULL,
+ synthesize_threads_worker, &args[i]))
+ goto out_join;
+ }
+ err = 0;
+out_join:
+ for (i = 0; i < thread_nr; i++)
+ pthread_join(synthesize_threads[i], NULL);
+ free(args);
+free_threads:
+ free(synthesize_threads);
+free_dirent:
+ for (i = 0; i < n; i++)
+ zfree(&dirent[i]);
+ free(dirent);
+
+ return err;
+}
+
+int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
+ perf_event__handler_t process __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ return 0;
+}
+
+static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ size_t size;
+ struct map *map = machine__kernel_map(machine);
+ struct kmap *kmap;
+ int err;
+ union perf_event *event;
+
+ if (map == NULL)
+ return -1;
+
+ kmap = map__kmap(map);
+ if (!kmap->ref_reloc_sym)
+ return -1;
+
+ /*
+ * We should get this from /sys/kernel/sections/.text, but till that is
+ * available use this, and after it is use this as a fallback for older
+ * kernels.
+ */
+ event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
+ if (event == NULL) {
+ pr_debug("Not enough memory synthesizing mmap event "
+ "for kernel modules\n");
+ return -1;
+ }
+
+ if (machine__is_host(machine)) {
+ /*
+ * kernel uses PERF_RECORD_MISC_USER for user space maps,
+ * see kernel/perf_event.c __perf_event_mmap
+ */
+ event->header.misc = PERF_RECORD_MISC_KERNEL;
+ } else {
+ event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
+ }
+
+ size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
+ "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
+ size = PERF_ALIGN(size, sizeof(u64));
+ event->mmap.header.type = PERF_RECORD_MMAP;
+ event->mmap.header.size = (sizeof(event->mmap) -
+ (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
+ event->mmap.pgoff = kmap->ref_reloc_sym->addr;
+ event->mmap.start = map->start;
+ event->mmap.len = map->end - event->mmap.start;
+ event->mmap.pid = machine->pid;
+
+ err = perf_tool__process_synth_event(tool, event, machine, process);
+ free(event);
+
+ return err;
+}
+
+int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ int err;
+
+ err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
+ if (err < 0)
+ return err;
+
+ return perf_event__synthesize_extra_kmaps(tool, process, machine);
+}
+
+int perf_event__synthesize_thread_map2(struct perf_tool *tool,
+ struct perf_thread_map *threads,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ union perf_event *event;
+ int i, err, size;
+
+ size = sizeof(event->thread_map);
+ size += threads->nr * sizeof(event->thread_map.entries[0]);
+
+ event = zalloc(size);
+ if (!event)
+ return -ENOMEM;
+
+ event->header.type = PERF_RECORD_THREAD_MAP;
+ event->header.size = size;
+ event->thread_map.nr = threads->nr;
+
+ for (i = 0; i < threads->nr; i++) {
+ struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
+ char *comm = perf_thread_map__comm(threads, i);
+
+ if (!comm)
+ comm = (char *) "";
+
+ entry->pid = perf_thread_map__pid(threads, i);
+ strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
+ }
+
+ err = process(tool, event, NULL, machine);
+
+ free(event);
+ return err;
+}
+
+static void synthesize_cpus(struct cpu_map_entries *cpus,
+ struct perf_cpu_map *map)
+{
+ int i;
+
+ cpus->nr = map->nr;
+
+ for (i = 0; i < map->nr; i++)
+ cpus->cpu[i] = map->map[i];
+}
+
+static void synthesize_mask(struct perf_record_record_cpu_map *mask,
+ struct perf_cpu_map *map, int max)
+{
+ int i;
+
+ mask->nr = BITS_TO_LONGS(max);
+ mask->long_size = sizeof(long);
+
+ for (i = 0; i < map->nr; i++)
+ set_bit(map->map[i], mask->mask);
+}
+
+static size_t cpus_size(struct perf_cpu_map *map)
+{
+ return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
+}
+
+static size_t mask_size(struct perf_cpu_map *map, int *max)
+{
+ int i;
+
+ *max = 0;
+
+ for (i = 0; i < map->nr; i++) {
+ /* bit possition of the cpu is + 1 */
+ int bit = map->map[i] + 1;
+
+ if (bit > *max)
+ *max = bit;
+ }
+
+ return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
+}
+
+void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
+{
+ size_t size_cpus, size_mask;
+ bool is_dummy = perf_cpu_map__empty(map);
+
+ /*
+ * Both array and mask data have variable size based
+ * on the number of cpus and their actual values.
+ * The size of the 'struct perf_record_cpu_map_data' is:
+ *
+ * array = size of 'struct cpu_map_entries' +
+ * number of cpus * sizeof(u64)
+ *
+ * mask = size of 'struct perf_record_record_cpu_map' +
+ * maximum cpu bit converted to size of longs
+ *
+ * and finaly + the size of 'struct perf_record_cpu_map_data'.
+ */
+ size_cpus = cpus_size(map);
+ size_mask = mask_size(map, max);
+
+ if (is_dummy || (size_cpus < size_mask)) {
+ *size += size_cpus;
+ *type = PERF_CPU_MAP__CPUS;
+ } else {
+ *size += size_mask;
+ *type = PERF_CPU_MAP__MASK;
+ }
+
+ *size += sizeof(struct perf_record_cpu_map_data);
+ *size = PERF_ALIGN(*size, sizeof(u64));
+ return zalloc(*size);
+}
+
+void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
+ u16 type, int max)
+{
+ data->type = type;
+
+ switch (type) {
+ case PERF_CPU_MAP__CPUS:
+ synthesize_cpus((struct cpu_map_entries *) data->data, map);
+ break;
+ case PERF_CPU_MAP__MASK:
+ synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
+ default:
+ break;
+ };
+}
+
+static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
+{
+ size_t size = sizeof(struct perf_record_cpu_map);
+ struct perf_record_cpu_map *event;
+ int max;
+ u16 type;
+
+ event = cpu_map_data__alloc(map, &size, &type, &max);
+ if (!event)
+ return NULL;
+
+ event->header.type = PERF_RECORD_CPU_MAP;
+ event->header.size = size;
+ event->data.type = type;
+
+ cpu_map_data__synthesize(&event->data, map, type, max);
+ return event;
+}
+
+int perf_event__synthesize_cpu_map(struct perf_tool *tool,
+ struct perf_cpu_map *map,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ struct perf_record_cpu_map *event;
+ int err;
+
+ event = cpu_map_event__new(map);
+ if (!event)
+ return -ENOMEM;
+
+ err = process(tool, (union perf_event *) event, NULL, machine);
+
+ free(event);
+ return err;
+}
+
+int perf_event__synthesize_stat_config(struct perf_tool *tool,
+ struct perf_stat_config *config,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ struct perf_record_stat_config *event;
+ int size, i = 0, err;
+
+ size = sizeof(*event);
+ size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
+
+ event = zalloc(size);
+ if (!event)
+ return -ENOMEM;
+
+ event->header.type = PERF_RECORD_STAT_CONFIG;
+ event->header.size = size;
+ event->nr = PERF_STAT_CONFIG_TERM__MAX;
+
+#define ADD(__term, __val) \
+ event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \
+ event->data[i].val = __val; \
+ i++;
+
+ ADD(AGGR_MODE, config->aggr_mode)
+ ADD(INTERVAL, config->interval)
+ ADD(SCALE, config->scale)
+
+ WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
+ "stat config terms unbalanced\n");
+#undef ADD
+
+ err = process(tool, (union perf_event *) event, NULL, machine);
+
+ free(event);
+ return err;
+}
+
+int perf_event__synthesize_stat(struct perf_tool *tool,
+ u32 cpu, u32 thread, u64 id,
+ struct perf_counts_values *count,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ struct perf_record_stat event;
+
+ event.header.type = PERF_RECORD_STAT;
+ event.header.size = sizeof(event);
+ event.header.misc = 0;
+
+ event.id = id;
+ event.cpu = cpu;
+ event.thread = thread;
+ event.val = count->val;
+ event.ena = count->ena;
+ event.run = count->run;
+
+ return process(tool, (union perf_event *) &event, NULL, machine);
+}
+
+int perf_event__synthesize_stat_round(struct perf_tool *tool,
+ u64 evtime, u64 type,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ struct perf_record_stat_round event;
+
+ event.header.type = PERF_RECORD_STAT_ROUND;
+ event.header.size = sizeof(event);
+ event.header.misc = 0;
+
+ event.time = evtime;
+ event.type = type;
+
+ return process(tool, (union perf_event *) &event, NULL, machine);
+}
+
+size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
+{
+ size_t sz, result = sizeof(struct perf_record_sample);
+
+ if (type & PERF_SAMPLE_IDENTIFIER)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_IP)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_TID)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_TIME)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_ADDR)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_ID)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_STREAM_ID)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_CPU)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_PERIOD)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_READ) {
+ result += sizeof(u64);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ result += sizeof(u64);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ result += sizeof(u64);
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_GROUP) {
+ sz = sample->read.group.nr *
+ sizeof(struct sample_read_value);
+ result += sz;
+ } else {
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ sz = (sample->callchain->nr + 1) * sizeof(u64);
+ result += sz;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ result += sizeof(u32);
+ result += sample->raw_size;
+ }
+
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ sz = sample->branch_stack->nr * sizeof(struct branch_entry);
+ sz += sizeof(u64);
+ result += sz;
+ }
+
+ if (type & PERF_SAMPLE_REGS_USER) {
+ if (sample->user_regs.abi) {
+ result += sizeof(u64);
+ sz = hweight64(sample->user_regs.mask) * sizeof(u64);
+ result += sz;
+ } else {
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_STACK_USER) {
+ sz = sample->user_stack.size;
+ result += sizeof(u64);
+ if (sz) {
+ result += sz;
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_WEIGHT)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_DATA_SRC)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_TRANSACTION)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_REGS_INTR) {
+ if (sample->intr_regs.abi) {
+ result += sizeof(u64);
+ sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
+ result += sz;
+ } else {
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_PHYS_ADDR)
+ result += sizeof(u64);
+
+ return result;
+}
+
+int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
+ const struct perf_sample *sample)
+{
+ __u64 *array;
+ size_t sz;
+ /*
+ * used for cross-endian analysis. See git commit 65014ab3
+ * for why this goofiness is needed.
+ */
+ union u64_swap u;
+
+ array = event->sample.array;
+
+ if (type & PERF_SAMPLE_IDENTIFIER) {
+ *array = sample->id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_IP) {
+ *array = sample->ip;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ u.val32[0] = sample->pid;
+ u.val32[1] = sample->tid;
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ *array = sample->time;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ADDR) {
+ *array = sample->addr;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ID) {
+ *array = sample->id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ *array = sample->stream_id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+ u.val32[0] = sample->cpu;
+ u.val32[1] = 0;
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_PERIOD) {
+ *array = sample->period;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_READ) {
+ if (read_format & PERF_FORMAT_GROUP)
+ *array = sample->read.group.nr;
+ else
+ *array = sample->read.one.value;
+ array++;
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ *array = sample->read.time_enabled;
+ array++;
+ }
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ *array = sample->read.time_running;
+ array++;
+ }
+
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_GROUP) {
+ sz = sample->read.group.nr *
+ sizeof(struct sample_read_value);
+ memcpy(array, sample->read.group.values, sz);
+ array = (void *)array + sz;
+ } else {
+ *array = sample->read.one.id;
+ array++;
+ }
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ sz = (sample->callchain->nr + 1) * sizeof(u64);
+ memcpy(array, sample->callchain, sz);
+ array = (void *)array + sz;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ u.val32[0] = sample->raw_size;
+ *array = u.val64;
+ array = (void *)array + sizeof(u32);
+
+ memcpy(array, sample->raw_data, sample->raw_size);
+ array = (void *)array + sample->raw_size;
+ }
+
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ sz = sample->branch_stack->nr * sizeof(struct branch_entry);
+ sz += sizeof(u64);
+ memcpy(array, sample->branch_stack, sz);
+ array = (void *)array + sz;
+ }
+
+ if (type & PERF_SAMPLE_REGS_USER) {
+ if (sample->user_regs.abi) {
+ *array++ = sample->user_regs.abi;
+ sz = hweight64(sample->user_regs.mask) * sizeof(u64);
+ memcpy(array, sample->user_regs.regs, sz);
+ array = (void *)array + sz;
+ } else {
+ *array++ = 0;
+ }
+ }
+
+ if (type & PERF_SAMPLE_STACK_USER) {
+ sz = sample->user_stack.size;
+ *array++ = sz;
+ if (sz) {
+ memcpy(array, sample->user_stack.data, sz);
+ array = (void *)array + sz;
+ *array++ = sz;
+ }
+ }
+
+ if (type & PERF_SAMPLE_WEIGHT) {
+ *array = sample->weight;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_DATA_SRC) {
+ *array = sample->data_src;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TRANSACTION) {
+ *array = sample->transaction;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_REGS_INTR) {
+ if (sample->intr_regs.abi) {
+ *array++ = sample->intr_regs.abi;
+ sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
+ memcpy(array, sample->intr_regs.regs, sz);
+ array = (void *)array + sz;
+ } else {
+ *array++ = 0;
+ }
+ }
+
+ if (type & PERF_SAMPLE_PHYS_ADDR) {
+ *array = sample->phys_addr;
+ array++;
+ }
+
+ return 0;
+}
+
+int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
+ struct evlist *evlist, struct machine *machine)
+{
+ union perf_event *ev;
+ struct evsel *evsel;
+ size_t nr = 0, i = 0, sz, max_nr, n;
+ int err;
+
+ pr_debug2("Synthesizing id index\n");
+
+ max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
+ sizeof(struct id_index_entry);
+
+ evlist__for_each_entry(evlist, evsel)
+ nr += evsel->core.ids;
+
+ n = nr > max_nr ? max_nr : nr;
+ sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
+ ev = zalloc(sz);
+ if (!ev)
+ return -ENOMEM;
+
+ ev->id_index.header.type = PERF_RECORD_ID_INDEX;
+ ev->id_index.header.size = sz;
+ ev->id_index.nr = n;
+
+ evlist__for_each_entry(evlist, evsel) {
+ u32 j;
+
+ for (j = 0; j < evsel->core.ids; j++) {
+ struct id_index_entry *e;
+ struct perf_sample_id *sid;
+
+ if (i >= n) {
+ err = process(tool, ev, NULL, machine);
+ if (err)
+ goto out_err;
+ nr -= n;
+ i = 0;
+ }
+
+ e = &ev->id_index.entries[i++];
+
+ e->id = evsel->core.id[j];
+
+ sid = perf_evlist__id2sid(evlist, e->id);
+ if (!sid) {
+ free(ev);
+ return -ENOENT;
+ }
+
+ e->idx = sid->idx;
+ e->cpu = sid->cpu;
+ e->tid = sid->tid;
+ }
+ }
+
+ sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
+ ev->id_index.header.size = sz;
+ ev->id_index.nr = nr;
+
+ err = process(tool, ev, NULL, machine);
+out_err:
+ free(ev);
+
+ return err;
+}
+
+int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
+ struct target *target, struct perf_thread_map *threads,
+ perf_event__handler_t process, bool data_mmap,
+ unsigned int nr_threads_synthesize)
+{
+ if (target__has_task(target))
+ return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
+ else if (target__has_cpu(target))
+ return perf_event__synthesize_threads(tool, process,
+ machine, data_mmap,
+ nr_threads_synthesize);
+ /* command specified */
+ return 0;
+}
+
+int machine__synthesize_threads(struct machine *machine, struct target *target,
+ struct perf_thread_map *threads, bool data_mmap,
+ unsigned int nr_threads_synthesize)
+{
+ return __machine__synthesize_threads(machine, NULL, target, threads,
+ perf_event__process, data_mmap,
+ nr_threads_synthesize);
+}
+
+static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
+{
+ struct perf_record_event_update *ev;
+
+ size += sizeof(*ev);
+ size = PERF_ALIGN(size, sizeof(u64));
+
+ ev = zalloc(size);
+ if (ev) {
+ ev->header.type = PERF_RECORD_EVENT_UPDATE;
+ ev->header.size = (u16)size;
+ ev->type = type;
+ ev->id = id;
+ }
+ return ev;
+}
+
+int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
+ perf_event__handler_t process)
+{
+ size_t size = strlen(evsel->unit);
+ struct perf_record_event_update *ev;
+ int err;
+
+ ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
+ if (ev == NULL)
+ return -ENOMEM;
+
+ strlcpy(ev->data, evsel->unit, size + 1);
+ err = process(tool, (union perf_event *)ev, NULL, NULL);
+ free(ev);
+ return err;
+}
+
+int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
+ perf_event__handler_t process)
+{
+ struct perf_record_event_update *ev;
+ struct perf_record_event_update_scale *ev_data;
+ int err;
+
+ ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
+ if (ev == NULL)
+ return -ENOMEM;
+
+ ev_data = (struct perf_record_event_update_scale *)ev->data;
+ ev_data->scale = evsel->scale;
+ err = process(tool, (union perf_event *)ev, NULL, NULL);
+ free(ev);
+ return err;
+}
+
+int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
+ perf_event__handler_t process)
+{
+ struct perf_record_event_update *ev;
+ size_t len = strlen(evsel->name);
+ int err;
+
+ ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
+ if (ev == NULL)
+ return -ENOMEM;
+
+ strlcpy(ev->data, evsel->name, len + 1);
+ err = process(tool, (union perf_event *)ev, NULL, NULL);
+ free(ev);
+ return err;
+}
+
+int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
+ perf_event__handler_t process)
+{
+ size_t size = sizeof(struct perf_record_event_update);
+ struct perf_record_event_update *ev;
+ int max, err;
+ u16 type;
+
+ if (!evsel->core.own_cpus)
+ return 0;
+
+ ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
+ if (!ev)
+ return -ENOMEM;
+
+ ev->header.type = PERF_RECORD_EVENT_UPDATE;
+ ev->header.size = (u16)size;
+ ev->type = PERF_EVENT_UPDATE__CPUS;
+ ev->id = evsel->core.id[0];
+
+ cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
+ evsel->core.own_cpus, type, max);
+
+ err = process(tool, (union perf_event *)ev, NULL, NULL);
+ free(ev);
+ return err;
+}
+
+int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
+ perf_event__handler_t process)
+{
+ struct evsel *evsel;
+ int err = 0;
+
+ evlist__for_each_entry(evlist, evsel) {
+ err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
+ evsel->core.id, process);
+ if (err) {
+ pr_debug("failed to create perf header attribute\n");
+ return err;
+ }
+ }
+
+ return err;
+}
+
+static bool has_unit(struct evsel *evsel)
+{
+ return evsel->unit && *evsel->unit;
+}
+
+static bool has_scale(struct evsel *evsel)
+{
+ return evsel->scale != 1;
+}
+
+int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
+ perf_event__handler_t process, bool is_pipe)
+{
+ struct evsel *evsel;
+ int err;
+
+ /*
+ * Synthesize other events stuff not carried within
+ * attr event - unit, scale, name
+ */
+ evlist__for_each_entry(evsel_list, evsel) {
+ if (!evsel->supported)
+ continue;
+
+ /*
+ * Synthesize unit and scale only if it's defined.
+ */
+ if (has_unit(evsel)) {
+ err = perf_event__synthesize_event_update_unit(tool, evsel, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel unit.\n");
+ return err;
+ }
+ }
+
+ if (has_scale(evsel)) {
+ err = perf_event__synthesize_event_update_scale(tool, evsel, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel evsel.\n");
+ return err;
+ }
+ }
+
+ if (evsel->core.own_cpus) {
+ err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel cpus.\n");
+ return err;
+ }
+ }
+
+ /*
+ * Name is needed only for pipe output,
+ * perf.data carries event names.
+ */
+ if (is_pipe) {
+ err = perf_event__synthesize_event_update_name(tool, evsel, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel name.\n");
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
+int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
+ u32 ids, u64 *id, perf_event__handler_t process)
+{
+ union perf_event *ev;
+ size_t size;
+ int err;
+
+ size = sizeof(struct perf_event_attr);
+ size = PERF_ALIGN(size, sizeof(u64));
+ size += sizeof(struct perf_event_header);
+ size += ids * sizeof(u64);
+
+ ev = zalloc(size);
+
+ if (ev == NULL)
+ return -ENOMEM;
+
+ ev->attr.attr = *attr;
+ memcpy(ev->attr.id, id, ids * sizeof(u64));
+
+ ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
+ ev->attr.header.size = (u16)size;
+
+ if (ev->attr.header.size == size)
+ err = process(tool, ev, NULL, NULL);
+ else
+ err = -E2BIG;
+
+ free(ev);
+
+ return err;
+}
+
+int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
+ perf_event__handler_t process)
+{
+ union perf_event ev;
+ struct tracing_data *tdata;
+ ssize_t size = 0, aligned_size = 0, padding;
+ struct feat_fd ff;
+
+ /*
+ * We are going to store the size of the data followed
+ * by the data contents. Since the fd descriptor is a pipe,
+ * we cannot seek back to store the size of the data once
+ * we know it. Instead we:
+ *
+ * - write the tracing data to the temp file
+ * - get/write the data size to pipe
+ * - write the tracing data from the temp file
+ * to the pipe
+ */
+ tdata = tracing_data_get(&evlist->core.entries, fd, true);
+ if (!tdata)
+ return -1;
+
+ memset(&ev, 0, sizeof(ev));
+
+ ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
+ size = tdata->size;
+ aligned_size = PERF_ALIGN(size, sizeof(u64));
+ padding = aligned_size - size;
+ ev.tracing_data.header.size = sizeof(ev.tracing_data);
+ ev.tracing_data.size = aligned_size;
+
+ process(tool, &ev, NULL, NULL);
+
+ /*
+ * The put function will copy all the tracing data
+ * stored in temp file to the pipe.
+ */
+ tracing_data_put(tdata);
+
+ ff = (struct feat_fd){ .fd = fd };
+ if (write_padded(&ff, NULL, 0, padding))
+ return -1;
+
+ return aligned_size;
+}
+
+int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
+ perf_event__handler_t process, struct machine *machine)
+{
+ union perf_event ev;
+ size_t len;
+
+ if (!pos->hit)
+ return 0;
+
+ memset(&ev, 0, sizeof(ev));
+
+ len = pos->long_name_len + 1;
+ len = PERF_ALIGN(len, NAME_ALIGN);
+ memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
+ ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
+ ev.build_id.header.misc = misc;
+ ev.build_id.pid = machine->pid;
+ ev.build_id.header.size = sizeof(ev.build_id) + len;
+ memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
+
+ return process(tool, &ev, NULL, machine);
+}
+
+int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
+ struct evlist *evlist, perf_event__handler_t process, bool attrs)
+{
+ int err;
+
+ if (attrs) {
+ err = perf_event__synthesize_attrs(tool, evlist, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize attrs.\n");
+ return err;
+ }
+ }
+
+ err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
+ err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
+ if (err < 0) {
+ pr_err("Couldn't synthesize thread map.\n");
+ return err;
+ }
+
+ err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL);
+ if (err < 0) {
+ pr_err("Couldn't synthesize thread map.\n");
+ return err;
+ }
+
+ err = perf_event__synthesize_stat_config(tool, config, process, NULL);
+ if (err < 0) {
+ pr_err("Couldn't synthesize config.\n");
+ return err;
+ }
+
+ return 0;
+}
+
+int __weak perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
+ struct perf_tool *tool __maybe_unused,
+ perf_event__handler_t process __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ return 0;
+}
+
+extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
+
+int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
+ struct evlist *evlist, perf_event__handler_t process)
+{
+ struct perf_header *header = &session->header;
+ struct perf_record_header_feature *fe;
+ struct feat_fd ff;
+ size_t sz, sz_hdr;
+ int feat, ret;
+
+ sz_hdr = sizeof(fe->header);
+ sz = sizeof(union perf_event);
+ /* get a nice alignment */
+ sz = PERF_ALIGN(sz, page_size);
+
+ memset(&ff, 0, sizeof(ff));
+
+ ff.buf = malloc(sz);
+ if (!ff.buf)
+ return -ENOMEM;
+
+ ff.size = sz - sz_hdr;
+ ff.ph = &session->header;
+
+ for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
+ if (!feat_ops[feat].synthesize) {
+ pr_debug("No record header feature for header :%d\n", feat);
+ continue;
+ }
+
+ ff.offset = sizeof(*fe);
+
+ ret = feat_ops[feat].write(&ff, evlist);
+ if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
+ pr_debug("Error writing feature\n");
+ continue;
+ }
+ /* ff.buf may have changed due to realloc in do_write() */
+ fe = ff.buf;
+ memset(fe, 0, sizeof(*fe));
+
+ fe->feat_id = feat;
+ fe->header.type = PERF_RECORD_HEADER_FEATURE;
+ fe->header.size = ff.offset;
+
+ ret = process(tool, ff.buf, NULL, NULL);
+ if (ret) {
+ free(ff.buf);
+ return ret;
+ }
+ }
+
+ /* Send HEADER_LAST_FEATURE mark. */
+ fe = ff.buf;
+ fe->feat_id = HEADER_LAST_FEATURE;
+ fe->header.type = PERF_RECORD_HEADER_FEATURE;
+ fe->header.size = sizeof(*fe);
+
+ ret = process(tool, ff.buf, NULL, NULL);
+
+ free(ff.buf);
+ return ret;
+}
diff --git a/tools/perf/util/synthetic-events.h b/tools/perf/util/synthetic-events.h
new file mode 100644
index 000000000000..baead0cdc381
--- /dev/null
+++ b/tools/perf/util/synthetic-events.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_SYNTHETIC_EVENTS_H
+#define __PERF_SYNTHETIC_EVENTS_H
+
+#include <stdbool.h>
+#include <sys/types.h> // pid_t
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+struct auxtrace_record;
+struct dso;
+struct evlist;
+struct evsel;
+struct machine;
+struct perf_counts_values;
+struct perf_cpu_map;
+struct perf_event_attr;
+struct perf_event_mmap_page;
+struct perf_sample;
+struct perf_session;
+struct perf_stat_config;
+struct perf_thread_map;
+struct perf_tool;
+struct record_opts;
+struct target;
+
+union perf_event;
+
+typedef int (*perf_event__handler_t)(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine);
+
+int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist, perf_event__handler_t process);
+int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr, u32 ids, u64 *id, perf_event__handler_t process);
+int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_cpu_map(struct perf_tool *tool, struct perf_cpu_map *cpus, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
+int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
+int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
+int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
+int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list, perf_event__handler_t process, bool is_pipe);
+int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session, struct evlist *evlist, perf_event__handler_t process);
+int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process, struct evlist *evlist, struct machine *machine);
+int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_mmap_events(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine, bool mmap_data);
+int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_namespaces(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format, const struct perf_sample *sample);
+int perf_event__synthesize_stat_config(struct perf_tool *tool, struct perf_stat_config *config, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool, struct evlist *evlist, perf_event__handler_t process, bool attrs);
+int perf_event__synthesize_stat_round(struct perf_tool *tool, u64 time, u64 type, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_stat(struct perf_tool *tool, u32 cpu, u32 thread, u64 id, struct perf_counts_values *count, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_thread_map2(struct perf_tool *tool, struct perf_thread_map *threads, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_thread_map(struct perf_tool *tool, struct perf_thread_map *threads, perf_event__handler_t process, struct machine *machine, bool mmap_data);
+int perf_event__synthesize_threads(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine, bool mmap_data, unsigned int nr_threads_synthesize);
+int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist, perf_event__handler_t process);
+int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc, struct perf_tool *tool, perf_event__handler_t process, struct machine *machine);
+pid_t perf_event__synthesize_comm(struct perf_tool *tool, union perf_event *event, pid_t pid, perf_event__handler_t process, struct machine *machine);
+
+int perf_tool__process_synth_event(struct perf_tool *tool, union perf_event *event, struct machine *machine, perf_event__handler_t process);
+
+size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format);
+
+int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
+ struct target *target, struct perf_thread_map *threads,
+ perf_event__handler_t process, bool data_mmap,
+ unsigned int nr_threads_synthesize);
+int machine__synthesize_threads(struct machine *machine, struct target *target,
+ struct perf_thread_map *threads, bool data_mmap,
+ unsigned int nr_threads_synthesize);
+
+#ifdef HAVE_AUXTRACE_SUPPORT
+int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr, struct perf_tool *tool,
+ struct perf_session *session, perf_event__handler_t process);
+
+#else // HAVE_AUXTRACE_SUPPORT
+
+#include <errno.h>
+
+static inline int
+perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr __maybe_unused,
+ struct perf_tool *tool __maybe_unused,
+ struct perf_session *session __maybe_unused,
+ perf_event__handler_t process __maybe_unused)
+{
+ return -EINVAL;
+}
+#endif // HAVE_AUXTRACE_SUPPORT
+
+#ifdef HAVE_LIBBPF_SUPPORT
+int perf_event__synthesize_bpf_events(struct perf_session *session, perf_event__handler_t process,
+ struct machine *machine, struct record_opts *opts);
+#else // HAVE_LIBBPF_SUPPORT
+static inline int perf_event__synthesize_bpf_events(struct perf_session *session __maybe_unused,
+ perf_event__handler_t process __maybe_unused,
+ struct machine *machine __maybe_unused,
+ struct record_opts *opts __maybe_unused)
+{
+ return 0;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+
+#endif // __PERF_SYNTHETIC_EVENTS_H
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c
index 565f7aef7e6c..a3db13dea937 100644
--- a/tools/perf/util/target.c
+++ b/tools/perf/util/target.c
@@ -6,8 +6,6 @@
*/
#include "target.h"
-#include "util.h"
-#include "debug.h"
#include <pwd.h>
#include <stdio.h>
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
index 51fb574998bb..3dce2de9d005 100644
--- a/tools/perf/util/top.c
+++ b/tools/perf/util/top.c
@@ -5,7 +5,6 @@
* Refactored from builtin-top.c, see that files for further copyright notes.
*/
-#include "cpumap.h"
#include "event.h"
#include "evlist.h"
#include "evsel.h"
@@ -72,7 +71,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
}
if (top->evlist->core.nr_entries == 1) {
- struct evsel *first = perf_evlist__first(top->evlist);
+ struct evsel *first = evlist__first(top->evlist);
ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ",
(uint64_t)first->core.attr.sample_period,
opts->freq ? "Hz" : "");
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index d63d542b2cde..086e98ff42a3 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -2,7 +2,6 @@
/*
* Copyright (C) 2008,2009, Steven Rostedt <[email protected]>
*/
-#include "util.h"
#include <dirent.h>
#include <mntent.h>
#include <stdio.h>
@@ -19,6 +18,7 @@
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
+#include <internal/lib.h> // page_size
#include "trace-event.h"
#include <api/fs/tracing_path.h>
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index b6c0db068be0..8593d3c200c6 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -15,7 +15,6 @@
#include <unistd.h>
#include <errno.h>
-#include "util.h"
#include "trace-event.h"
#include "debug.h"
diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c
index 01b9d89bf5bf..b3ee651e3d91 100644
--- a/tools/perf/util/trace-event.c
+++ b/tools/perf/util/trace-event.c
@@ -14,7 +14,6 @@
#include <api/fs/fs.h>
#include "trace-event.h"
#include "machine.h"
-#include "util.h"
/*
* global trace_event object used by trace_event__tp_format
diff --git a/tools/perf/util/tsc.h b/tools/perf/util/tsc.h
index e0c3af34ac8d..3c5a632ee57c 100644
--- a/tools/perf/util/tsc.h
+++ b/tools/perf/util/tsc.h
@@ -4,13 +4,12 @@
#include <linux/types.h>
-#include "event.h"
-
struct perf_tsc_conversion {
u16 time_shift;
u32 time_mult;
u64 time_zero;
};
+
struct perf_event_mmap_page;
int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
@@ -20,13 +19,4 @@ u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc);
u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc);
u64 rdtsc(void);
-struct perf_event_mmap_page;
-struct perf_tool;
-struct machine;
-
-int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc,
- struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine);
-
-#endif
+#endif // __PERF_TSC_H
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 9ece188ae48a..15f6e46d7124 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -17,7 +17,6 @@
#include "event.h"
#include "perf_regs.h"
#include "callchain.h"
-#include "util.h"
static char *debuginfo_path;
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index ebdbb056510c..1800887b2255 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -37,7 +37,6 @@
#include "unwind.h"
#include "map.h"
#include "symbol.h"
-#include "util.h"
#include "debug.h"
#include "asm/bug.h"
#include "dso.h"
diff --git a/tools/perf/util/usage.c b/tools/perf/util/usage.c
index 3949a60b00ae..196438ee4c9d 100644
--- a/tools/perf/util/usage.c
+++ b/tools/perf/util/usage.c
@@ -8,7 +8,6 @@
* Copyright (C) Linus Torvalds, 2005
*/
#include "util.h"
-#include "debug.h"
#include <stdio.h>
#include <stdlib.h>
#include <linux/compiler.h>
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 32322a20a68b..5eda6e19c947 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -2,9 +2,7 @@
#include "util.h"
#include "debug.h"
#include "event.h"
-#include "namespaces.h"
#include <api/fs/fs.h>
-#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/utsname.h>
#include <dirent.h>
@@ -41,8 +39,6 @@ void perf_set_multithreaded(void)
perf_singlethreaded = false;
}
-unsigned int page_size;
-
int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH;
int sysctl_perf_event_max_contexts_per_stack = PERF_MAX_CONTEXTS_PER_STACK;
@@ -234,138 +230,6 @@ out:
return list;
}
-static int slow_copyfile(const char *from, const char *to, struct nsinfo *nsi)
-{
- int err = -1;
- char *line = NULL;
- size_t n;
- FILE *from_fp, *to_fp;
- struct nscookie nsc;
-
- nsinfo__mountns_enter(nsi, &nsc);
- from_fp = fopen(from, "r");
- nsinfo__mountns_exit(&nsc);
- if (from_fp == NULL)
- goto out;
-
- to_fp = fopen(to, "w");
- if (to_fp == NULL)
- goto out_fclose_from;
-
- while (getline(&line, &n, from_fp) > 0)
- if (fputs(line, to_fp) == EOF)
- goto out_fclose_to;
- err = 0;
-out_fclose_to:
- fclose(to_fp);
- free(line);
-out_fclose_from:
- fclose(from_fp);
-out:
- return err;
-}
-
-int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size)
-{
- void *ptr;
- loff_t pgoff;
-
- pgoff = off_in & ~(page_size - 1);
- off_in -= pgoff;
-
- ptr = mmap(NULL, off_in + size, PROT_READ, MAP_PRIVATE, ifd, pgoff);
- if (ptr == MAP_FAILED)
- return -1;
-
- while (size) {
- ssize_t ret = pwrite(ofd, ptr + off_in, size, off_out);
- if (ret < 0 && errno == EINTR)
- continue;
- if (ret <= 0)
- break;
-
- size -= ret;
- off_in += ret;
- off_out += ret;
- }
- munmap(ptr, off_in + size);
-
- return size ? -1 : 0;
-}
-
-static int copyfile_mode_ns(const char *from, const char *to, mode_t mode,
- struct nsinfo *nsi)
-{
- int fromfd, tofd;
- struct stat st;
- int err;
- char *tmp = NULL, *ptr = NULL;
- struct nscookie nsc;
-
- nsinfo__mountns_enter(nsi, &nsc);
- err = stat(from, &st);
- nsinfo__mountns_exit(&nsc);
- if (err)
- goto out;
- err = -1;
-
- /* extra 'x' at the end is to reserve space for '.' */
- if (asprintf(&tmp, "%s.XXXXXXx", to) < 0) {
- tmp = NULL;
- goto out;
- }
- ptr = strrchr(tmp, '/');
- if (!ptr)
- goto out;
- ptr = memmove(ptr + 1, ptr, strlen(ptr) - 1);
- *ptr = '.';
-
- tofd = mkstemp(tmp);
- if (tofd < 0)
- goto out;
-
- if (fchmod(tofd, mode))
- goto out_close_to;
-
- if (st.st_size == 0) { /* /proc? do it slowly... */
- err = slow_copyfile(from, tmp, nsi);
- goto out_close_to;
- }
-
- nsinfo__mountns_enter(nsi, &nsc);
- fromfd = open(from, O_RDONLY);
- nsinfo__mountns_exit(&nsc);
- if (fromfd < 0)
- goto out_close_to;
-
- err = copyfile_offset(fromfd, 0, tofd, 0, st.st_size);
-
- close(fromfd);
-out_close_to:
- close(tofd);
- if (!err)
- err = link(tmp, to);
- unlink(tmp);
-out:
- free(tmp);
- return err;
-}
-
-int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi)
-{
- return copyfile_mode_ns(from, to, 0755, nsi);
-}
-
-int copyfile_mode(const char *from, const char *to, mode_t mode)
-{
- return copyfile_mode_ns(from, to, mode, NULL);
-}
-
-int copyfile(const char *from, const char *to)
-{
- return copyfile_mode(from, to, 0755);
-}
-
size_t hex_width(u64 v)
{
size_t n = 1;
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 45a5c6f20197..9969b8b46f7c 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -11,14 +11,12 @@
#include <stddef.h>
#include <linux/compiler.h>
#include <sys/types.h>
-#include <internal/lib.h>
/* General helper functions */
void usage(const char *err) __noreturn;
void die(const char *err, ...) __noreturn __printf(1, 2);
struct dirent;
-struct nsinfo;
struct strlist;
int mkdir_p(char *path, mode_t mode);
@@ -26,15 +24,9 @@ int rm_rf(const char *path);
int rm_rf_perf_data(const char *path);
struct strlist *lsdir(const char *name, bool (*filter)(const char *, struct dirent *));
bool lsdir_no_dot_filter(const char *name, struct dirent *d);
-int copyfile(const char *from, const char *to);
-int copyfile_mode(const char *from, const char *to, mode_t mode);
-int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi);
-int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size);
size_t hex_width(u64 v);
-extern unsigned int page_size;
-
int sysctl__max_stack(void);
int fetch_kernel_version(unsigned int *puint,
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index e5e6599603f4..ba4b4395f35d 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -11,7 +11,7 @@
#include "vdso.h"
#include "dso.h"
-#include "util.h"
+#include <internal/lib.h>
#include "map.h"
#include "symbol.h"
#include "machine.h"
diff --git a/tools/perf/util/zlib.c b/tools/perf/util/zlib.c
index 59d456f716e9..78d2297c1b67 100644
--- a/tools/perf/util/zlib.c
+++ b/tools/perf/util/zlib.c
@@ -7,11 +7,9 @@
#include <sys/mman.h>
#include <zlib.h>
#include <linux/compiler.h>
+#include <internal/lib.h>
#include "util/compress.h"
-#include "util/util.h"
-#include "util/debug.h"
-
#define CHUNK_SIZE 16384
diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
index 59753b3917bb..2a9890c8395a 100644
--- a/tools/power/x86/intel-speed-select/isst-config.c
+++ b/tools/power/x86/intel-speed-select/isst-config.c
@@ -38,6 +38,7 @@ static int fact_avx = 0xFF;
static unsigned long long fact_trl;
static int out_format_json;
static int cmd_help;
+static int force_online_offline;
/* clos related */
static int current_clos = -1;
@@ -138,14 +139,14 @@ int out_format_is_json(void)
int get_physical_package_id(int cpu)
{
return parse_int_file(
- 1, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id",
+ 0, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id",
cpu);
}
int get_physical_core_id(int cpu)
{
return parse_int_file(
- 1, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
+ 0, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
}
int get_physical_die_id(int cpu)
@@ -165,6 +166,26 @@ int get_topo_max_cpus(void)
return topo_max_cpus;
}
+static void set_cpu_online_offline(int cpu, int state)
+{
+ char buffer[128];
+ int fd;
+
+ snprintf(buffer, sizeof(buffer),
+ "/sys/devices/system/cpu/cpu%d/online", cpu);
+
+ fd = open(buffer, O_WRONLY);
+ if (fd < 0)
+ err(-1, "%s open failed", buffer);
+
+ if (state)
+ write(fd, "1\n", 2);
+ else
+ write(fd, "0\n", 2);
+
+ close(fd);
+}
+
#define MAX_PACKAGE_COUNT 8
#define MAX_DIE_PER_PACKAGE 2
static void for_each_online_package_in_set(void (*callback)(int, void *, void *,
@@ -402,6 +423,9 @@ void set_cpu_mask_from_punit_coremask(int cpu, unsigned long long core_mask,
int j;
for (j = 0; j < topo_max_cpus; ++j) {
+ if (!CPU_ISSET_S(j, present_cpumask_size, present_cpumask))
+ continue;
+
if (cpu_map[j].pkg_id == pkg_id &&
cpu_map[j].die_id == die_id &&
cpu_map[j].punit_cpu_core == i) {
@@ -484,7 +508,7 @@ int isst_send_mbox_command(unsigned int cpu, unsigned char command,
int write = 0;
int clos_id, core_id, ret = 0;
- debug_printf("CLOS %d\n", cpu);
+ debug_printf("CPU %d\n", cpu);
if (parameter & BIT(MBOX_CMD_WRITE_BIT)) {
value = req_data;
@@ -649,8 +673,8 @@ static void exec_on_get_ctdp_cpu(int cpu, void *arg1, void *arg2, void *arg3,
if (ret)
perror("get_tdp_*");
else
- isst_display_result(cpu, outf, "perf-profile", (char *)arg3,
- *(unsigned int *)arg4);
+ isst_ctdp_display_core_info(cpu, outf, arg3,
+ *(unsigned int *)arg4);
}
#define _get_tdp_level(desc, suffix, object, help) \
@@ -733,9 +757,34 @@ static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
ret = isst_set_tdp_level(cpu, tdp_level);
if (ret)
perror("set_tdp_level_for_cpu");
- else
+ else {
isst_display_result(cpu, outf, "perf-profile", "set_tdp_level",
ret);
+ if (force_online_offline) {
+ struct isst_pkg_ctdp_level_info ctdp_level;
+ int pkg_id = get_physical_package_id(cpu);
+ int die_id = get_physical_die_id(cpu);
+
+ fprintf(stderr, "Option is set to online/offline\n");
+ ctdp_level.core_cpumask_size =
+ alloc_cpu_set(&ctdp_level.core_cpumask);
+ isst_get_coremask_info(cpu, tdp_level, &ctdp_level);
+ if (ctdp_level.cpu_count) {
+ int i, max_cpus = get_topo_max_cpus();
+ for (i = 0; i < max_cpus; ++i) {
+ if (pkg_id != get_physical_package_id(i) || die_id != get_physical_die_id(i))
+ continue;
+ if (CPU_ISSET_S(i, ctdp_level.core_cpumask_size, ctdp_level.core_cpumask)) {
+ fprintf(stderr, "online cpu %d\n", i);
+ set_cpu_online_offline(i, 1);
+ } else {
+ fprintf(stderr, "offline cpu %d\n", i);
+ set_cpu_online_offline(i, 0);
+ }
+ }
+ }
+ }
+ }
}
static void set_tdp_level(void)
@@ -744,6 +793,8 @@ static void set_tdp_level(void)
fprintf(stderr, "Set Config TDP level\n");
fprintf(stderr,
"\t Arguments: -l|--level : Specify tdp level\n");
+ fprintf(stderr,
+ "\t Optional Arguments: -o | online : online/offline for the tdp level\n");
exit(0);
}
@@ -1082,6 +1133,40 @@ static void dump_clos_config(void)
isst_ctdp_display_information_end(outf);
}
+static void get_clos_info_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
+ void *arg4)
+{
+ int enable, ret, prio_type;
+
+ ret = isst_clos_get_clos_information(cpu, &enable, &prio_type);
+ if (ret)
+ perror("isst_clos_get_info");
+ else
+ isst_clos_display_clos_information(cpu, outf, enable, prio_type);
+}
+
+static void dump_clos_info(void)
+{
+ if (cmd_help) {
+ fprintf(stderr,
+ "Print Intel Speed Select Technology core power information\n");
+ fprintf(stderr, "\tSpecify targeted cpu id with [--cpu|-c]\n");
+ exit(0);
+ }
+
+ if (!max_target_cpus) {
+ fprintf(stderr,
+ "Invalid target cpu. Specify with [-c|--cpu]\n");
+ exit(0);
+ }
+
+ isst_ctdp_display_information_start(outf);
+ for_each_online_target_cpu_in_set(get_clos_info_for_cpu, NULL,
+ NULL, NULL, NULL);
+ isst_ctdp_display_information_end(outf);
+
+}
+
static void set_clos_config_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
void *arg4)
{
@@ -1198,7 +1283,7 @@ static void get_clos_assoc_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
if (ret)
perror("isst_clos_get_assoc_status");
else
- isst_display_result(cpu, outf, "core-power", "get-assoc", clos);
+ isst_clos_display_assoc_information(cpu, outf, clos);
}
static void get_clos_assoc(void)
@@ -1208,13 +1293,17 @@ static void get_clos_assoc(void)
fprintf(stderr, "\tSpecify targeted cpu id with [--cpu|-c]\n");
exit(0);
}
- if (max_target_cpus)
- for_each_online_target_cpu_in_set(get_clos_assoc_for_cpu, NULL,
- NULL, NULL, NULL);
- else {
+
+ if (!max_target_cpus) {
fprintf(stderr,
"Invalid target cpu. Specify with [-c|--cpu]\n");
+ exit(0);
}
+
+ isst_ctdp_display_information_start(outf);
+ for_each_online_target_cpu_in_set(get_clos_assoc_for_cpu, NULL,
+ NULL, NULL, NULL);
+ isst_ctdp_display_information_end(outf);
}
static struct process_cmd_struct isst_cmds[] = {
@@ -1231,10 +1320,11 @@ static struct process_cmd_struct isst_cmds[] = {
{ "turbo-freq", "info", dump_fact_config },
{ "turbo-freq", "enable", set_fact_enable },
{ "turbo-freq", "disable", set_fact_disable },
- { "core-power", "info", dump_clos_config },
+ { "core-power", "info", dump_clos_info },
{ "core-power", "enable", set_clos_enable },
{ "core-power", "disable", set_clos_disable },
{ "core-power", "config", set_clos_config },
+ { "core-power", "get-config", dump_clos_config },
{ "core-power", "assoc", set_clos_assoc },
{ "core-power", "get-assoc", get_clos_assoc },
{ NULL, NULL, NULL }
@@ -1316,6 +1406,7 @@ static void parse_cmd_args(int argc, int start, char **argv)
static struct option long_options[] = {
{ "bucket", required_argument, 0, 'b' },
{ "level", required_argument, 0, 'l' },
+ { "online", required_argument, 0, 'o' },
{ "trl-type", required_argument, 0, 'r' },
{ "trl", required_argument, 0, 't' },
{ "help", no_argument, 0, 'h' },
@@ -1332,7 +1423,7 @@ static void parse_cmd_args(int argc, int start, char **argv)
option_index = start;
optind = start + 1;
- while ((opt = getopt_long(argc, argv, "b:l:t:c:d:e:n:m:p:w:h",
+ while ((opt = getopt_long(argc, argv, "b:l:t:c:d:e:n:m:p:w:ho",
long_options, &option_index)) != -1) {
switch (opt) {
case 'b':
@@ -1344,6 +1435,9 @@ static void parse_cmd_args(int argc, int start, char **argv)
case 'l':
tdp_level = atoi(optarg);
break;
+ case 'o':
+ force_online_offline = 1;
+ break;
case 't':
sscanf(optarg, "0x%llx", &fact_trl);
break;
@@ -1362,7 +1456,6 @@ static void parse_cmd_args(int argc, int start, char **argv)
/* CLOS related */
case 'c':
current_clos = atoi(optarg);
- printf("clos %d\n", current_clos);
break;
case 'd':
clos_desired = atoi(optarg);
@@ -1433,6 +1526,7 @@ static void core_power_help(void)
printf("\tenable\n");
printf("\tdisable\n");
printf("\tconfig\n");
+ printf("\tget-config\n");
printf("\tassoc\n");
printf("\tget-assoc\n");
}
diff --git a/tools/power/x86/intel-speed-select/isst-core.c b/tools/power/x86/intel-speed-select/isst-core.c
index 0bf341ad9697..6dee5332c9d3 100644
--- a/tools/power/x86/intel-speed-select/isst-core.c
+++ b/tools/power/x86/intel-speed-select/isst-core.c
@@ -619,6 +619,31 @@ int isst_get_process_ctdp(int cpu, int tdp_level, struct isst_pkg_ctdp *pkg_dev)
return 0;
}
+int isst_clos_get_clos_information(int cpu, int *enable, int *type)
+{
+ unsigned int resp;
+ int ret;
+
+ ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PM_QOS_CONFIG, 0, 0,
+ &resp);
+ if (ret)
+ return ret;
+
+ debug_printf("cpu:%d CLOS_PM_QOS_CONFIG resp:%x\n", cpu, resp);
+
+ if (resp & BIT(1))
+ *enable = 1;
+ else
+ *enable = 0;
+
+ if (resp & BIT(2))
+ *type = 1;
+ else
+ *type = 0;
+
+ return 0;
+}
+
int isst_pm_qos_config(int cpu, int enable_clos, int priority_type)
{
unsigned int req, resp;
diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c
index df4aa99c4e92..40346d534f78 100644
--- a/tools/power/x86/intel-speed-select/isst-display.c
+++ b/tools/power/x86/intel-speed-select/isst-display.c
@@ -287,6 +287,26 @@ static void _isst_fact_display_information(int cpu, FILE *outf, int level,
format_and_print(outf, base_level + 2, header, value);
}
+void isst_ctdp_display_core_info(int cpu, FILE *outf, char *prefix,
+ unsigned int val)
+{
+ char header[256];
+ char value[256];
+
+ snprintf(header, sizeof(header), "package-%d",
+ get_physical_package_id(cpu));
+ format_and_print(outf, 1, header, NULL);
+ snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
+ format_and_print(outf, 2, header, NULL);
+ snprintf(header, sizeof(header), "cpu-%d", cpu);
+ format_and_print(outf, 3, header, NULL);
+
+ snprintf(value, sizeof(value), "%u", val);
+ format_and_print(outf, 4, prefix, value);
+
+ format_and_print(outf, 1, NULL, NULL);
+}
+
void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
struct isst_pkg_ctdp *pkg_dev)
{
@@ -503,6 +523,57 @@ void isst_clos_display_information(int cpu, FILE *outf, int clos,
format_and_print(outf, 1, NULL, NULL);
}
+void isst_clos_display_clos_information(int cpu, FILE *outf,
+ int clos_enable, int type)
+{
+ char header[256];
+ char value[256];
+
+ snprintf(header, sizeof(header), "package-%d",
+ get_physical_package_id(cpu));
+ format_and_print(outf, 1, header, NULL);
+ snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
+ format_and_print(outf, 2, header, NULL);
+ snprintf(header, sizeof(header), "cpu-%d", cpu);
+ format_and_print(outf, 3, header, NULL);
+
+ snprintf(header, sizeof(header), "core-power");
+ format_and_print(outf, 4, header, NULL);
+
+ snprintf(header, sizeof(header), "enable-status");
+ snprintf(value, sizeof(value), "%d", clos_enable);
+ format_and_print(outf, 5, header, value);
+
+ snprintf(header, sizeof(header), "priority-type");
+ snprintf(value, sizeof(value), "%d", type);
+ format_and_print(outf, 5, header, value);
+
+ format_and_print(outf, 1, NULL, NULL);
+}
+
+void isst_clos_display_assoc_information(int cpu, FILE *outf, int clos)
+{
+ char header[256];
+ char value[256];
+
+ snprintf(header, sizeof(header), "package-%d",
+ get_physical_package_id(cpu));
+ format_and_print(outf, 1, header, NULL);
+ snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
+ format_and_print(outf, 2, header, NULL);
+ snprintf(header, sizeof(header), "cpu-%d", cpu);
+ format_and_print(outf, 3, header, NULL);
+
+ snprintf(header, sizeof(header), "get-assoc");
+ format_and_print(outf, 4, header, NULL);
+
+ snprintf(header, sizeof(header), "clos");
+ snprintf(value, sizeof(value), "%d", clos);
+ format_and_print(outf, 5, header, value);
+
+ format_and_print(outf, 1, NULL, NULL);
+}
+
void isst_display_result(int cpu, FILE *outf, char *feature, char *cmd,
int result)
{
diff --git a/tools/power/x86/intel-speed-select/isst.h b/tools/power/x86/intel-speed-select/isst.h
index 2f7f62765eb6..d280b27d600d 100644
--- a/tools/power/x86/intel-speed-select/isst.h
+++ b/tools/power/x86/intel-speed-select/isst.h
@@ -187,12 +187,16 @@ extern int isst_send_msr_command(unsigned int cpu, unsigned int command,
int write, unsigned long long *req_resp);
extern int isst_get_ctdp_levels(int cpu, struct isst_pkg_ctdp *pkg_dev);
+extern int isst_get_coremask_info(int cpu, int config_index,
+ struct isst_pkg_ctdp_level_info *ctdp_level);
extern int isst_get_process_ctdp(int cpu, int tdp_level,
struct isst_pkg_ctdp *pkg_dev);
extern void isst_get_process_ctdp_complete(int cpu,
struct isst_pkg_ctdp *pkg_dev);
extern void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
struct isst_pkg_ctdp *pkg_dev);
+extern void isst_ctdp_display_core_info(int cpu, FILE *outf, char *prefix,
+ unsigned int val);
extern void isst_ctdp_display_information_start(FILE *outf);
extern void isst_ctdp_display_information_end(FILE *outf);
extern void isst_pbf_display_information(int cpu, FILE *outf, int level,
@@ -223,10 +227,14 @@ extern int isst_clos_associate(int cpu, int clos);
extern int isst_clos_get_assoc_status(int cpu, int *clos_id);
extern void isst_clos_display_information(int cpu, FILE *outf, int clos,
struct isst_clos_config *clos_config);
-
+extern void isst_clos_display_assoc_information(int cpu, FILE *outf, int clos);
extern int isst_read_reg(unsigned short reg, unsigned int *val);
extern int isst_write_reg(int reg, unsigned int val);
extern void isst_display_result(int cpu, FILE *outf, char *feature, char *cmd,
int result);
+
+extern int isst_clos_get_clos_information(int cpu, int *enable, int *type);
+extern void isst_clos_display_clos_information(int cpu, FILE *outf,
+ int clos_enable, int type);
#endif
diff --git a/tools/testing/selftests/.gitignore b/tools/testing/selftests/.gitignore
index 8059ce834247..61df01cdf0b2 100644
--- a/tools/testing/selftests/.gitignore
+++ b/tools/testing/selftests/.gitignore
@@ -2,3 +2,5 @@ gpiogpio-event-mon
gpiogpio-hammer
gpioinclude/
gpiolsgpio
+tpm2/SpaceTest.log
+tpm2/*.pyc
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 25b43a8c2b15..c3feccb99ff5 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -126,9 +126,9 @@ endif
# in the default INSTALL_HDR_PATH usr/include.
khdr:
ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
- make --no-builtin-rules ARCH=$(ARCH) -C $(top_srcdir) headers_install
+ $(MAKE) --no-builtin-rules ARCH=$(ARCH) -C $(top_srcdir) headers_install
else
- make --no-builtin-rules INSTALL_HDR_PATH=$$BUILD/usr \
+ $(MAKE) --no-builtin-rules INSTALL_HDR_PATH=$$BUILD/usr \
ARCH=$(ARCH) -C $(top_srcdir) headers_install
endif
@@ -136,35 +136,35 @@ all: khdr
@for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
mkdir $$BUILD_TARGET -p; \
- make OUTPUT=$$BUILD_TARGET -C $$TARGET;\
+ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET;\
done;
run_tests: all
@for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
- make OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\
+ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\
done;
hotplug:
@for TARGET in $(TARGETS_HOTPLUG); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
- make OUTPUT=$$BUILD_TARGET -C $$TARGET;\
+ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET;\
done;
run_hotplug: hotplug
@for TARGET in $(TARGETS_HOTPLUG); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
- make OUTPUT=$$BUILD_TARGET -C $$TARGET run_full_test;\
+ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_full_test;\
done;
clean_hotplug:
@for TARGET in $(TARGETS_HOTPLUG); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
- make OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\
+ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\
done;
run_pstore_crash:
- make -C pstore run_crash
+ $(MAKE) -C pstore run_crash
# Use $BUILD as the default install root. $BUILD points to the
# right output location for the following cases:
@@ -184,7 +184,7 @@ ifdef INSTALL_PATH
install -m 744 kselftest/prefix.pl $(INSTALL_PATH)/kselftest/
@for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
- make OUTPUT=$$BUILD_TARGET -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install; \
+ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install; \
done;
@# Ask all targets to emit their test scripts
@@ -203,7 +203,7 @@ ifdef INSTALL_PATH
echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \
echo "cd $$TARGET" >> $(ALL_SCRIPT); \
echo -n "run_many" >> $(ALL_SCRIPT); \
- make -s --no-print-directory OUTPUT=$$BUILD_TARGET -C $$TARGET emit_tests >> $(ALL_SCRIPT); \
+ $(MAKE) -s --no-print-directory OUTPUT=$$BUILD_TARGET -C $$TARGET emit_tests >> $(ALL_SCRIPT); \
echo "" >> $(ALL_SCRIPT); \
echo "cd \$$ROOT" >> $(ALL_SCRIPT); \
done;
@@ -216,7 +216,7 @@ endif
clean:
@for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
- make OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\
+ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\
done;
.PHONY: khdr all run_tests hotplug run_hotplug clean_hotplug run_pstore_crash install clean
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc
index 7717c0a09686..ac738500d17f 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc
@@ -28,7 +28,7 @@ if [ -z "$FEATURE" ]; then
exit_unsupported
fi
-echo "Test snapshot tigger"
+echo "Test snapshot trigger"
echo 0 > snapshot
echo 1 > events/sched/sched_process_fork/enable
( echo "forked")
diff --git a/tools/testing/selftests/livepatch/config b/tools/testing/selftests/livepatch/config
index 0dd7700464a8..ad23100cb27c 100644
--- a/tools/testing/selftests/livepatch/config
+++ b/tools/testing/selftests/livepatch/config
@@ -1 +1,3 @@
+CONFIG_LIVEPATCH=y
+CONFIG_DYNAMIC_DEBUG=y
CONFIG_TEST_LIVEPATCH=m
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 6ef7f16c4cf5..7f8b5c8982e3 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -199,6 +199,11 @@ struct seccomp_notif_sizes {
};
#endif
+#ifndef PTRACE_EVENTMSG_SYSCALL_ENTRY
+#define PTRACE_EVENTMSG_SYSCALL_ENTRY 1
+#define PTRACE_EVENTMSG_SYSCALL_EXIT 2
+#endif
+
#ifndef seccomp
int seccomp(unsigned int op, unsigned int flags, void *args)
{
diff --git a/tools/testing/selftests/tpm2/Makefile b/tools/testing/selftests/tpm2/Makefile
index 9dd848427a7b..1a5db1eb8ed5 100644
--- a/tools/testing/selftests/tpm2/Makefile
+++ b/tools/testing/selftests/tpm2/Makefile
@@ -2,3 +2,4 @@
include ../lib.mk
TEST_PROGS := test_smoke.sh test_space.sh
+TEST_PROGS_EXTENDED := tpm2.py tpm2_tests.py
diff --git a/tools/testing/selftests/watchdog/watchdog-test.c b/tools/testing/selftests/watchdog/watchdog-test.c
index c2333c78cf04..afff120c7be6 100644
--- a/tools/testing/selftests/watchdog/watchdog-test.c
+++ b/tools/testing/selftests/watchdog/watchdog-test.c
@@ -19,7 +19,7 @@
int fd;
const char v = 'V';
-static const char sopts[] = "bdehp:t:Tn:NL";
+static const char sopts[] = "bdehp:t:Tn:NLf:";
static const struct option lopts[] = {
{"bootstatus", no_argument, NULL, 'b'},
{"disable", no_argument, NULL, 'd'},
@@ -31,6 +31,7 @@ static const struct option lopts[] = {
{"pretimeout", required_argument, NULL, 'n'},
{"getpretimeout", no_argument, NULL, 'N'},
{"gettimeleft", no_argument, NULL, 'L'},
+ {"file", required_argument, NULL, 'f'},
{NULL, no_argument, NULL, 0x0}
};
@@ -69,16 +70,19 @@ static void term(int sig)
static void usage(char *progname)
{
printf("Usage: %s [options]\n", progname);
- printf(" -b, --bootstatus Get last boot status (Watchdog/POR)\n");
- printf(" -d, --disable Turn off the watchdog timer\n");
- printf(" -e, --enable Turn on the watchdog timer\n");
- printf(" -h, --help Print the help message\n");
- printf(" -p, --pingrate=P Set ping rate to P seconds (default %d)\n", DEFAULT_PING_RATE);
- printf(" -t, --timeout=T Set timeout to T seconds\n");
- printf(" -T, --gettimeout Get the timeout\n");
- printf(" -n, --pretimeout=T Set the pretimeout to T seconds\n");
- printf(" -N, --getpretimeout Get the pretimeout\n");
- printf(" -L, --gettimeleft Get the time left until timer expires\n");
+ printf(" -f, --file\t\tOpen watchdog device file\n");
+ printf("\t\t\tDefault is /dev/watchdog\n");
+ printf(" -b, --bootstatus\tGet last boot status (Watchdog/POR)\n");
+ printf(" -d, --disable\t\tTurn off the watchdog timer\n");
+ printf(" -e, --enable\t\tTurn on the watchdog timer\n");
+ printf(" -h, --help\t\tPrint the help message\n");
+ printf(" -p, --pingrate=P\tSet ping rate to P seconds (default %d)\n",
+ DEFAULT_PING_RATE);
+ printf(" -t, --timeout=T\tSet timeout to T seconds\n");
+ printf(" -T, --gettimeout\tGet the timeout\n");
+ printf(" -n, --pretimeout=T\tSet the pretimeout to T seconds\n");
+ printf(" -N, --getpretimeout\tGet the pretimeout\n");
+ printf(" -L, --gettimeleft\tGet the time left until timer expires\n");
printf("\n");
printf("Parameters are parsed left-to-right in real-time.\n");
printf("Example: %s -d -t 10 -p 5 -e\n", progname);
@@ -92,14 +96,20 @@ int main(int argc, char *argv[])
int ret;
int c;
int oneshot = 0;
+ char *file = "/dev/watchdog";
setbuf(stdout, NULL);
- fd = open("/dev/watchdog", O_WRONLY);
+ while ((c = getopt_long(argc, argv, sopts, lopts, NULL)) != -1) {
+ if (c == 'f')
+ file = optarg;
+ }
+
+ fd = open(file, O_WRONLY);
if (fd == -1) {
if (errno == ENOENT)
- printf("Watchdog device not enabled.\n");
+ printf("Watchdog device (%s) not found.\n", file);
else if (errno == EACCES)
printf("Run watchdog as root.\n");
else
@@ -108,6 +118,8 @@ int main(int argc, char *argv[])
exit(-1);
}
+ optind = 0;
+
while ((c = getopt_long(argc, argv, sopts, lopts, NULL)) != -1) {
switch (c) {
case 'b':
@@ -190,6 +202,9 @@ int main(int argc, char *argv[])
else
printf("WDIOC_GETTIMELEFT error '%s'\n", strerror(errno));
break;
+ case 'f':
+ /* Handled above */
+ break;
default:
usage(argv[0]);
diff --git a/usr/Makefile b/usr/Makefile
index 6a89eb019275..e6f7cb2f81db 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -11,6 +11,9 @@ datafile_y = initramfs_data.cpio$(suffix_y)
datafile_d_y = .$(datafile_y).d
AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/$(datafile_y)"
+# clean rules do not have CONFIG_INITRAMFS_COMPRESSION. So clean up after all
+# possible compression formats.
+clean-files += initramfs_data.cpio*
# Generate builtin.o based on initramfs_data.o
obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o