aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/clock/idt,versaclock5.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/brcm,bcm2835-vec.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/bridge/google,cros-ec-anx7688.yaml82
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml159
-rw-r--r--Documentation/devicetree/bindings/display/faraday,tve200.txt54
-rw-r--r--Documentation/devicetree/bindings/display/faraday,tve200.yaml68
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml1
-rw-r--r--Documentation/devicetree/bindings/input/input.yaml1
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml1
-rw-r--r--Documentation/devicetree/bindings/leds/leds-bcm6328.txt4
-rw-r--r--Documentation/devicetree/bindings/leds/leds-bcm6358.txt2
-rw-r--r--Documentation/devicetree/bindings/media/renesas,drif.yaml20
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ipa.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/mtk-efuse.txt1
-rw-r--r--Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml2
-rw-r--r--Documentation/driver-api/dma-buf.rst9
-rw-r--r--Documentation/gpu/driver-uapi.rst8
-rw-r--r--Documentation/gpu/drm-mm.rst4
-rw-r--r--Documentation/gpu/index.rst1
-rw-r--r--Documentation/gpu/rfc/i915_gem_lmem.rst131
-rw-r--r--Documentation/gpu/rfc/index.rst4
-rw-r--r--Documentation/powerpc/syscall64-abi.rst10
-rw-r--r--MAINTAINERS12
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/arm/mach-npcm/Kconfig1
-rw-r--r--arch/arm/mach-pxa/pxa_cplds_irqs.c7
-rw-r--r--arch/arm/tools/syscall.tbl2
-rw-r--r--arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774a1.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774b1.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774e1.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77950.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77951.dtsi12
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77960.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77961.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77965.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77970.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/salvator-common.dtsi3
-rw-r--r--arch/arm64/include/asm/unistd32.h3
-rw-r--r--arch/ia64/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/m68k/kernel/signal.c3
-rw-r--r--arch/m68k/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/microblaze/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl2
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl2
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl2
-rw-r--r--arch/openrisc/include/asm/barrier.h9
-rw-r--r--arch/openrisc/kernel/setup.c2
-rw-r--r--arch/openrisc/mm/init.c6
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/powerpc/include/asm/ptrace.h45
-rw-r--r--arch/powerpc/include/asm/syscall.h42
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/x86/Makefile12
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl2
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl2
-rw-r--r--arch/x86/events/core.c6
-rw-r--r--arch/x86/events/intel/core.c2
-rw-r--r--arch/x86/events/intel/lbr.c26
-rw-r--r--arch/x86/events/perf_event.h6
-rw-r--r--arch/x86/kernel/sev-shared.c1
-rw-r--r--arch/x86/kernel/sev.c136
-rw-r--r--arch/x86/kernel/signal_compat.c9
-rw-r--r--arch/x86/xen/enlighten_pv.c8
-rw-r--r--arch/xtensa/kernel/syscalls/syscall.tbl2
-rw-r--r--block/genhd.c11
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/cdrom/gdrom.c13
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c1
-rw-r--r--drivers/dma-buf/dma-buf.c41
-rw-r--r--drivers/dma-buf/dma-resv.c211
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c17
-rw-r--r--drivers/firmware/arm_scmi/notify.h2
-rw-r--r--drivers/firmware/arm_scpi.c4
-rw-r--r--drivers/gpio/gpio-cadence.c1
-rw-r--r--drivers/gpio/gpio-tegra186.c11
-rw-r--r--drivers/gpio/gpio-xilinx.c2
-rw-r--r--drivers/gpu/drm/Kconfig16
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c388
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c121
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c146
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c195
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c202
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c195
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c172
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c119
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c68
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c240
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c309
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c577
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h70
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c19
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_service_interface.h4
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c29
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c13
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c15
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h3
-rw-r--r--drivers/gpu/drm/amd/include/aldebaran_ip_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_acpi.h18
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h10
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h17
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h69
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h2
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v13_0.h6
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c21
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c26
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c46
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c85
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c61
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c5
-rw-r--r--drivers/gpu/drm/bridge/Kconfig23
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c181
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.h1
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c6
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c2
-rw-r--r--drivers/gpu/drm/bridge/cros-ec-anx7688.c191
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c10
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c3
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c709
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c4
-rw-r--r--drivers/gpu/drm/drm_bufs.c11
-rw-r--r--drivers/gpu/drm/drm_cache.c148
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c8
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c46
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fourcc.c29
-rw-r--r--drivers/gpu/drm/drm_gem.c7
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c44
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c7
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c6
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c7
-rw-r--r--drivers/gpu/drm/drm_internal.h8
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/drm_lease.c1
-rw-r--r--drivers/gpu/drm/drm_legacy.h8
-rw-r--r--drivers/gpu/drm/drm_pci.c11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c4
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_dsi_reg.h2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h2
-rw-r--r--drivers/gpu/drm/hyperv/Makefile8
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm.h52
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c311
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c231
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_proto.c485
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c5
-rw-r--r--drivers/gpu/drm/i915/dma_resv_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c345
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ioctls.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c22
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c159
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c10
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c11
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c26
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c13
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c95
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c82
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.h23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c91
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c201
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c100
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c32
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_ring_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c6
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c64
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c124
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c122
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h4
-rw-r--r--drivers/gpu/drm/i915/i915_active.c14
-rw-r--r--drivers/gpu/drm/i915/i915_active.h11
-rw-r--r--drivers/gpu/drm/i915/i915_active_types.h5
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c18
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h30
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c10
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c75
-rw-r--r--drivers/gpu/drm/i915/i915_params.h8
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c2
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c10
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_query.c62
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h17
-rw-r--r--drivers/gpu/drm/i915/i915_request.c8
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c31
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c29
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h18
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c87
-rw-r--r--drivers/gpu/drm/i915/selftests/librapl.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/librapl.h4
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c61
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.h1
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-ipu.c21
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c4
-rw-r--r--drivers/gpu/drm/msm/Kconfig3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c50
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c56
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c83
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c6
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c8
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c6
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c2
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c7
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h1
-rw-r--r--drivers/gpu/drm/pl111/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c5
-rw-r--r--drivers/gpu/drm/r128/ati_pcigart.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h2
-rw-r--r--drivers/gpu/drm/radeon/cik.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.h1
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c74
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c4
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c2
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c45
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c54
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h1
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c8
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c35
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c6
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c18
-rw-r--r--drivers/gpu/drm/stm/ltdc.c33
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c8
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c206
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c377
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c156
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c35
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c57
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c228
-rw-r--r--drivers/gpu/drm/ttm/ttm_sys_manager.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c47
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_base.c19
-rw-r--r--drivers/gpu/drm/vboxvideo/modesetting.c20
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c68
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c173
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h13
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_regs.h3
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c27
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c42
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_thp.c46
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c53
-rw-r--r--drivers/gpu/vga/vgaarb.c31
-rw-r--r--drivers/hwmon/lm80.c11
-rw-r--r--drivers/infiniband/core/cma.c5
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c2
-rw-r--r--drivers/infiniband/core/uverbs_std_types_device.c7
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c6
-rw-r--r--drivers/infiniband/hw/mlx5/dm.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c7
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c11
-rw-r--r--drivers/irqchip/Kconfig2
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c4
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c4
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c17
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c21
-rw-r--r--drivers/leds/leds-lp5523.c2
-rw-r--r--drivers/md/dm-integrity.c81
-rw-r--r--drivers/md/dm-snap.c6
-rw-r--r--drivers/media/dvb-frontends/sp8870.c2
-rw-r--r--drivers/media/platform/rcar_drif.c1
-rw-r--r--drivers/media/usb/gspca/cpia1.c6
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_mt9m111.c16
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_po1030.c14
-rw-r--r--drivers/misc/eeprom/at24.c6
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c2
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c53
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h23
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c7
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c4
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c59
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c12
-rw-r--r--drivers/misc/habanalabs/goya/goya.c47
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c40
-rw-r--r--drivers/misc/ics932s401.c2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.h1
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c7
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c7
-rw-r--r--drivers/net/caif/caif_serial.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c27
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c27
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c8
-rw-r--r--drivers/net/ethernet/sun/niu.c32
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h19
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c42
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c8
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c33
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c18
-rw-r--r--drivers/nvme/host/fc.c12
-rw-r--r--drivers/nvme/host/tcp.c5
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/loop.c4
-rw-r--r--drivers/pci/pci-driver.c1
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c11
-rw-r--r--drivers/platform/surface/aggregator/controller.c3
-rw-r--r--drivers/platform/surface/surface_dtx.c8
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/dell/dell-smbios-wmi.c3
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c38
-rw-r--r--drivers/platform/x86/hp-wireless.c2
-rw-r--r--drivers/platform/x86/hp_accel.c22
-rw-r--r--drivers/platform/x86/ideapad-laptop.c13
-rw-r--r--drivers/platform/x86/intel_int0002_vgpio.c80
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c1
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c43
-rw-r--r--drivers/rapidio/rio_cm.c17
-rw-r--r--drivers/scsi/BusLogic.c6
-rw-r--r--drivers/scsi/BusLogic.h2
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c10
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c7
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c12
-rw-r--r--drivers/scsi/qedf/qedf_main.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c3
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c15
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c4
-rw-r--r--drivers/scsi/ufs/ufshcd.c5
-rw-r--r--drivers/tee/amdtee/amdtee_private.h13
-rw-r--r--drivers/tee/amdtee/call.c94
-rw-r--r--drivers/tee/amdtee/core.c15
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c3
-rw-r--r--drivers/uio/uio_hv_generic.c12
-rw-r--r--drivers/uio/uio_pci_generic.c2
-rw-r--r--drivers/video/fbdev/aty/mach64_cursor.c2
-rw-r--r--drivers/video/fbdev/hgafb.c21
-rw-r--r--drivers/video/fbdev/imsttfb.c26
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c5
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c6
-rw-r--r--drivers/xen/xen-pciback/vpci.c14
-rw-r--r--drivers/xen/xen-pciback/xenbus.c22
-rw-r--r--fs/block_dev.c18
-rw-r--r--fs/btrfs/compression.c42
-rw-r--r--fs/btrfs/extent_io.c9
-rw-r--r--fs/btrfs/inode.c3
-rw-r--r--fs/btrfs/reflink.c5
-rw-r--r--fs/btrfs/tree-log.c20
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/btrfs/zoned.c4
-rw-r--r--fs/btrfs/zoned.h5
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsglob.h4
-rw-r--r--fs/cifs/file.c46
-rw-r--r--fs/cifs/fs_context.c2
-rw-r--r--fs/cifs/misc.c23
-rw-r--r--fs/cifs/smb2ops.c4
-rw-r--r--fs/cifs/smb2pdu.c6
-rw-r--r--fs/ecryptfs/crypto.c4
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/io_uring.c14
-rw-r--r--fs/namespace.c6
-rw-r--r--fs/quota/dquot.c6
-rw-r--r--fs/signalfd.c23
-rw-r--r--fs/xfs/libxfs/xfs_fs.h4
-rw-r--r--fs/xfs/scrub/common.c4
-rw-r--r--fs/xfs/xfs_bmap_util.c98
-rw-r--r--include/drm/drm_atomic.h12
-rw-r--r--include/drm/drm_cache.h7
-rw-r--r--include/drm/drm_dp_mst_helper.h8
-rw-r--r--include/drm/drm_fb_cma_helper.h5
-rw-r--r--include/drm/drm_fourcc.h9
-rw-r--r--include/drm/drm_gem_cma_helper.h3
-rw-r--r--include/drm/gpu_scheduler.h2
-rw-r--r--include/drm/ttm/ttm_bo_api.h17
-rw-r--r--include/drm/ttm/ttm_bo_driver.h64
-rw-r--r--include/drm/ttm/ttm_caching.h2
-rw-r--r--include/drm/ttm/ttm_device.h15
-rw-r--r--include/drm/ttm/ttm_kmap_iter.h61
-rw-r--r--include/drm/ttm/ttm_range_manager.h42
-rw-r--r--include/drm/ttm/ttm_resource.h111
-rw-r--r--include/drm/ttm/ttm_tt.h29
-rw-r--r--include/linux/bits.h2
-rw-r--r--include/linux/compat.h10
-rw-r--r--include/linux/const.h8
-rw-r--r--include/linux/dma-resv.h75
-rw-r--r--include/linux/dynamic_debug.h5
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/minmax.h10
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/sched/signal.h1
-rw-r--r--include/linux/signal.h1
-rw-r--r--include/linux/surface_aggregator/device.h6
-rw-r--r--include/uapi/asm-generic/siginfo.h15
-rw-r--r--include/uapi/drm/amdgpu_drm.h15
-rw-r--r--include/uapi/drm/drm.h28
-rw-r--r--include/uapi/drm/drm_fourcc.h7
-rw-r--r--include/uapi/drm/i915_drm.h393
-rw-r--r--include/uapi/drm/panfrost_drm.h1
-rw-r--r--include/uapi/linux/perf_event.h2
-rw-r--r--include/uapi/linux/signalfd.h4
-rw-r--r--include/uapi/misc/habanalabs.h33
-rw-r--r--ipc/mqueue.c6
-rw-r--r--ipc/msg.c6
-rw-r--r--ipc/sem.c6
-rw-r--r--kernel/events/core.c11
-rw-r--r--kernel/kcsan/debugfs.c3
-rw-r--r--kernel/locking/lockdep.c4
-rw-r--r--kernel/locking/mutex-debug.c4
-rw-r--r--kernel/locking/mutex-debug.h2
-rw-r--r--kernel/locking/mutex.c18
-rw-r--r--kernel/locking/mutex.h4
-rw-r--r--kernel/module.c17
-rw-r--r--kernel/signal.c59
-rw-r--r--kernel/watchdog.c34
-rw-r--r--lib/Makefile1
-rw-r--r--lib/dynamic_debug.c20
-rw-r--r--mm/gup.c4
-rw-r--r--mm/internal.h20
-rw-r--r--mm/shuffle.h4
-rw-r--r--mm/slub.c1
-rw-r--r--mm/userfaultfd.c28
-rw-r--r--net/smc/smc_ism.c15
-rwxr-xr-xscripts/dummy-tools/gcc6
-rwxr-xr-xscripts/jobserver-exec2
-rw-r--r--sound/firewire/Kconfig4
-rw-r--r--sound/firewire/amdtp-stream-trace.h6
-rw-r--r--sound/firewire/amdtp-stream.c42
-rw-r--r--sound/firewire/bebob/bebob.c2
-rw-r--r--sound/firewire/dice/dice-alesis.c2
-rw-r--r--sound/firewire/dice/dice-pcm.c4
-rw-r--r--sound/firewire/dice/dice-stream.c2
-rw-r--r--sound/firewire/dice/dice-tcelectronic.c4
-rw-r--r--sound/firewire/dice/dice.c24
-rw-r--r--sound/firewire/dice/dice.h3
-rw-r--r--sound/firewire/oxfw/oxfw.c1
-rw-r--r--sound/isa/gus/gus_main.c13
-rw-r--r--sound/isa/sb/sb16_main.c10
-rw-r--r--sound/isa/sb/sb8.c10
-rw-r--r--sound/pci/hda/patch_realtek.c94
-rw-r--r--sound/pci/intel8x0.c7
-rw-r--r--sound/soc/codecs/cs43130.c28
-rw-r--r--sound/soc/codecs/rt5645.c49
-rw-r--r--sound/usb/line6/driver.c4
-rw-r--r--sound/usb/line6/pod.c5
-rw-r--r--sound/usb/line6/variax.c6
-rw-r--r--sound/usb/midi.c11
-rw-r--r--tools/build/Makefile.build22
-rw-r--r--tools/include/linux/bits.h2
-rw-r--r--tools/include/linux/const.h8
-rw-r--r--tools/scripts/Makefile.include30
-rw-r--r--tools/testing/selftests/exec/Makefile6
-rw-r--r--tools/testing/selftests/perf_events/sigtrap_threads.c14
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c27
765 files changed, 12447 insertions, 5593 deletions
diff --git a/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml b/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
index c268debe5b8d..28675b0b80f1 100644
--- a/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
+++ b/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
@@ -60,7 +60,6 @@ properties:
maxItems: 2
idt,xtal-load-femtofarads:
- $ref: /schemas/types.yaml#/definitions/uint32
minimum: 9000
maximum: 22760
description: Optional load capacitor for XTAL1 and XTAL2
@@ -84,7 +83,6 @@ patternProperties:
enum: [ 1800000, 2500000, 3300000 ]
idt,slew-percent:
description: The Slew rate control for CMOS single-ended.
- $ref: /schemas/types.yaml#/definitions/uint32
enum: [ 80, 85, 90, 100 ]
required:
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2835-vec.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2835-vec.yaml
index d900cc57b4ec..9b24081a0dbd 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm2835-vec.yaml
+++ b/Documentation/devicetree/bindings/display/brcm,bcm2835-vec.yaml
@@ -11,7 +11,9 @@ maintainers:
properties:
compatible:
- const: brcm,bcm2835-vec
+ enum:
+ - brcm,bcm2711-vec
+ - brcm,bcm2835-vec
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/bridge/google,cros-ec-anx7688.yaml b/Documentation/devicetree/bindings/display/bridge/google,cros-ec-anx7688.yaml
new file mode 100644
index 000000000000..9f7cc6b757cb
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/google,cros-ec-anx7688.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/google,cros-ec-anx7688.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ChromeOS EC ANX7688 HDMI to DP Converter through Type-C Port
+
+maintainers:
+ - Nicolas Boichat <[email protected]>
+ - Enric Balletbo i Serra <[email protected]>
+
+description: |
+ ChromeOS EC ANX7688 is a display bridge that converts HDMI 2.0 to
+ DisplayPort 1.3 Ultra-HDi (4096x2160p60). It is an Analogix ANX7688 chip
+ which is connected to and operated by the ChromeOS Embedded Controller
+ (See google,cros-ec.yaml). It is accessed using I2C tunneling through
+ the EC and therefore its node should be a child of an EC I2C tunnel node
+ (See google,cros-ec-i2c-tunnel.yaml).
+
+properties:
+ compatible:
+ const: google,cros-ec-anx7688
+
+ reg:
+ maxItems: 1
+ description: I2C address of the device.
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Video port for HDMI input.
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: USB Type-c connector.
+
+ required:
+ - port@0
+ - port@1
+
+required:
+ - compatible
+ - reg
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c_tunnel_b: i2c-tunnel1 {
+ compatible = "google,cros-ec-i2c-tunnel";
+ google,remote-bus = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ anx7688: anx7688@2c {
+ compatible = "google,cros-ec-anx7688";
+ reg = <0x2c>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ anx7688_in: endpoint {
+ remote-endpoint = <&hdmi0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ anx7688_out: endpoint {
+ remote-endpoint = <&typec_connector>;
+ };
+ };
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml
new file mode 100644
index 000000000000..d101233ae17f
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml
@@ -0,0 +1,159 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/ti,sn65dsi83.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SN65DSI83 and SN65DSI84 DSI to LVDS bridge chip
+
+maintainers:
+ - Marek Vasut <[email protected]>
+
+description: |
+ Texas Instruments SN65DSI83 1x Single-link MIPI DSI
+ to 1x Single-link LVDS
+ https://www.ti.com/lit/gpn/sn65dsi83
+ Texas Instruments SN65DSI84 1x Single-link MIPI DSI
+ to 1x Dual-link or 2x Single-link LVDS
+ https://www.ti.com/lit/gpn/sn65dsi84
+
+properties:
+ compatible:
+ enum:
+ - ti,sn65dsi83
+ - ti,sn65dsi84
+
+ reg:
+ enum:
+ - 0x2c
+ - 0x2d
+
+ enable-gpios:
+ maxItems: 1
+ description: GPIO specifier for bridge_en pin (active high).
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Video port for MIPI DSI Channel-A input
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ description: array of physical DSI data lane indexes.
+ minItems: 1
+ maxItems: 4
+ items:
+ - const: 1
+ - const: 2
+ - const: 3
+ - const: 4
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Video port for MIPI DSI Channel-B input
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ description: array of physical DSI data lane indexes.
+ minItems: 1
+ maxItems: 4
+ items:
+ - const: 1
+ - const: 2
+ - const: 3
+ - const: 4
+
+ port@2:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Video port for LVDS Channel-A output (panel or bridge).
+
+ port@3:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Video port for LVDS Channel-B output (panel or bridge).
+
+ required:
+ - port@0
+ - port@2
+
+required:
+ - compatible
+ - reg
+ - enable-gpios
+ - ports
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ti,sn65dsi83
+ then:
+ properties:
+ ports:
+ properties:
+ port@1: false
+ port@3: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ti,sn65dsi84
+ then:
+ properties:
+ ports:
+ properties:
+ port@1: false
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ bridge@2d {
+ compatible = "ti,sn65dsi83";
+ reg = <0x2d>;
+
+ enable-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ endpoint {
+ remote-endpoint = <&dsi0_out>;
+ data-lanes = <1 2 3 4>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ endpoint {
+ remote-endpoint = <&panel_in_lvds>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/faraday,tve200.txt b/Documentation/devicetree/bindings/display/faraday,tve200.txt
deleted file mode 100644
index 82e3bc0b7485..000000000000
--- a/Documentation/devicetree/bindings/display/faraday,tve200.txt
+++ /dev/null
@@ -1,54 +0,0 @@
-* Faraday TV Encoder TVE200
-
-Required properties:
-
-- compatible: must be one of:
- "faraday,tve200"
- "cortina,gemini-tvc", "faraday,tve200"
-
-- reg: base address and size of the control registers block
-
-- interrupts: contains an interrupt specifier for the interrupt
- line from the TVE200
-
-- clock-names: should contain "PCLK" for the clock line clocking the
- silicon and "TVE" for the 27MHz clock to the video driver
-
-- clocks: contains phandle and clock specifier pairs for the entries
- in the clock-names property. See
- Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Optional properties:
-
-- resets: contains the reset line phandle for the block
-
-Required sub-nodes:
-
-- port: describes LCD panel signals, following the common binding
- for video transmitter interfaces; see
- Documentation/devicetree/bindings/media/video-interfaces.txt
- This port should have the properties:
- reg = <0>;
- It should have one endpoint connected to a remote endpoint where
- the display is connected.
-
-Example:
-
-display-controller@6a000000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "faraday,tve200";
- reg = <0x6a000000 0x1000>;
- interrupts = <13 IRQ_TYPE_EDGE_RISING>;
- resets = <&syscon GEMINI_RESET_TVC>;
- clocks = <&syscon GEMINI_CLK_GATE_TVC>,
- <&syscon GEMINI_CLK_TVC>;
- clock-names = "PCLK", "TVE";
-
- port@0 {
- reg = <0>;
- display_out: endpoint {
- remote-endpoint = <&panel_in>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/display/faraday,tve200.yaml b/Documentation/devicetree/bindings/display/faraday,tve200.yaml
new file mode 100644
index 000000000000..e2ee77767321
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/faraday,tve200.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/faraday,tve200.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Faraday TV Encoder TVE200
+
+maintainers:
+ - Linus Walleij <[email protected]>
+
+properties:
+ compatible:
+ oneOf:
+ - const: faraday,tve200
+ - items:
+ - const: cortina,gemini-tvc
+ - const: faraday,tve200
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+
+ clock-names:
+ items:
+ - const: PCLK
+ - const: TVE
+
+ clocks:
+ minItems: 2
+
+ resets:
+ minItems: 1
+
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clock-names
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/cortina,gemini-clock.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/reset/cortina,gemini-reset.h>
+ display-controller@6a000000 {
+ compatible = "faraday,tve200";
+ reg = <0x6a000000 0x1000>;
+ interrupts = <13 IRQ_TYPE_EDGE_RISING>;
+ resets = <&syscon GEMINI_RESET_TVC>;
+ clocks = <&syscon GEMINI_CLK_GATE_TVC>,
+ <&syscon GEMINI_CLK_TVC>;
+ clock-names = "PCLK", "TVE";
+
+ port {
+ display_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
index 6f2398cdc82d..1e7894e524f9 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
@@ -102,7 +102,6 @@ patternProperties:
st,adc-channel-names:
description: List of single-ended channel names.
- $ref: /schemas/types.yaml#/definitions/string-array
st,filter-order:
description: |
diff --git a/Documentation/devicetree/bindings/input/input.yaml b/Documentation/devicetree/bindings/input/input.yaml
index 74244d21d2b3..d41d8743aad4 100644
--- a/Documentation/devicetree/bindings/input/input.yaml
+++ b/Documentation/devicetree/bindings/input/input.yaml
@@ -38,6 +38,5 @@ properties:
Duration in seconds which the key should be kept pressed for device to
reset automatically. Device with key pressed reset feature can specify
this property.
- $ref: /schemas/types.yaml#/definitions/uint32
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
index cb6498108b78..36c955965d90 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
@@ -92,7 +92,6 @@ properties:
this interconnect to send RPMh commands.
qcom,bcm-voter-names:
- $ref: /schemas/types.yaml#/definitions/string-array
description: |
Names for each of the qcom,bcm-voters specified.
diff --git a/Documentation/devicetree/bindings/leds/leds-bcm6328.txt b/Documentation/devicetree/bindings/leds/leds-bcm6328.txt
index ccebce597f37..a555d94084b7 100644
--- a/Documentation/devicetree/bindings/leds/leds-bcm6328.txt
+++ b/Documentation/devicetree/bindings/leds/leds-bcm6328.txt
@@ -4,8 +4,8 @@ This controller is present on BCM6318, BCM6328, BCM6362 and BCM63268.
In these SoCs it's possible to control LEDs both as GPIOs or by hardware.
However, on some devices there are Serial LEDs (LEDs connected to a 74x164
controller), which can either be controlled by software (exporting the 74x164
-as spi-gpio. See Documentation/devicetree/bindings/gpio/gpio-74x164.txt), or
-by hardware using this driver.
+as spi-gpio. See Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml),
+or by hardware using this driver.
Some of these Serial LEDs are hardware controlled (e.g. ethernet LEDs) and
exporting the 74x164 as spi-gpio prevents those LEDs to be hardware
controlled, so the only chance to keep them working is by using this driver.
diff --git a/Documentation/devicetree/bindings/leds/leds-bcm6358.txt b/Documentation/devicetree/bindings/leds/leds-bcm6358.txt
index da5708e7b43b..6e51c6b91ee5 100644
--- a/Documentation/devicetree/bindings/leds/leds-bcm6358.txt
+++ b/Documentation/devicetree/bindings/leds/leds-bcm6358.txt
@@ -3,7 +3,7 @@ LEDs connected to Broadcom BCM6358 controller
This controller is present on BCM6358 and BCM6368.
In these SoCs there are Serial LEDs (LEDs connected to a 74x164 controller),
which can either be controlled by software (exporting the 74x164 as spi-gpio.
-See Documentation/devicetree/bindings/gpio/gpio-74x164.txt), or
+See Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml), or
by hardware using this driver.
Required properties:
diff --git a/Documentation/devicetree/bindings/media/renesas,drif.yaml b/Documentation/devicetree/bindings/media/renesas,drif.yaml
index f1bdaeab4053..ce505a7c006a 100644
--- a/Documentation/devicetree/bindings/media/renesas,drif.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,drif.yaml
@@ -99,32 +99,26 @@ properties:
Indicates that the channel acts as primary among the bonded channels.
port:
- type: object
+ $ref: /schemas/graph.yaml#/properties/port
+ unevaluatedProperties: false
description:
- Child port node corresponding to the data input, in accordance with the
- video interface bindings defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
- The port node must contain at least one endpoint.
+ Child port node corresponding to the data input. The port node must
+ contain at least one endpoint.
properties:
endpoint:
- type: object
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
properties:
- remote-endpoint:
- description:
- A phandle to the remote tuner endpoint subnode in remote node
- port.
-
sync-active:
+ $ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1]
description:
Indicates sync signal polarity, 0/1 for low/high respectively.
This property maps to SYNCAC bit in the hardware manual. The
default is 1 (active high).
- additionalProperties: false
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/net/qcom,ipa.yaml b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
index 7443490d4cc6..5fe6d3dceb08 100644
--- a/Documentation/devicetree/bindings/net/qcom,ipa.yaml
+++ b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
@@ -105,7 +105,6 @@ properties:
- description: Whether the IPA clock is enabled (if valid)
qcom,smem-state-names:
- $ref: /schemas/types.yaml#/definitions/string-array
description: The names of the state bits used for SMP2P output
items:
- const: ipa-clock-enabled-valid
diff --git a/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt b/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
index d479ad977e24..b6791702bcfc 100644
--- a/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
+++ b/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
@@ -9,7 +9,6 @@ Required properties:
"mediatek,mt8173-efuse" or "mediatek,efuse": for MT8173
"mediatek,mt8192-efuse", "mediatek,efuse": for MT8192
"mediatek,mt8516-efuse", "mediatek,efuse": for MT8516
- "mediatek,mt8192-efuse", "mediatek,efuse": for MT8192
- reg: Should contain registers location and length
= Data cells =
diff --git a/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml b/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
index 01dcd14e7b2a..320a232c7208 100644
--- a/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
+++ b/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
@@ -118,7 +118,7 @@ patternProperties:
description:
Specifies the Spread Spectrum Clocking mode used. It can be NO_SSC,
EXTERNAL_SSC or INTERNAL_SSC.
- Refer include/dt-bindings/phy/phy-cadence-torrent.h for the constants to be used.
+ Refer include/dt-bindings/phy/phy-cadence.h for the constants to be used.
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1, 2]
default: 0
diff --git a/Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml b/Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml
index db1aa238cda5..b62c2431f94e 100644
--- a/Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml
+++ b/Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml
@@ -20,7 +20,7 @@ properties:
maxItems: 1
phys:
- $ref: /schemas/types.yaml#/definitions/phandle
+ maxItems: 1
description: phandle to the USB phy
monitored-battery:
diff --git a/Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml b/Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml
index b4c190bddd84..61802a11baf4 100644
--- a/Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml
@@ -49,7 +49,7 @@ properties:
maxItems: 1
memory-region:
- $ref: /schemas/types.yaml#/definitions/phandle
+ maxItems: 1
description:
phandle to a node describing reserved memory (System RAM memory)
The M core can't access all the DDR memory space on some platform,
diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst
index 7f37ec30d9fd..7f21425d9435 100644
--- a/Documentation/driver-api/dma-buf.rst
+++ b/Documentation/driver-api/dma-buf.rst
@@ -178,6 +178,15 @@ DMA Fence Array
.. kernel-doc:: include/linux/dma-fence-array.h
:internal:
+DMA Fence Chain
+~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/dma-buf/dma-fence-chain.c
+ :export:
+
+.. kernel-doc:: include/linux/dma-fence-chain.h
+ :internal:
+
DMA Fence uABI/Sync File
~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/gpu/driver-uapi.rst b/Documentation/gpu/driver-uapi.rst
new file mode 100644
index 000000000000..4411e6919a3d
--- /dev/null
+++ b/Documentation/gpu/driver-uapi.rst
@@ -0,0 +1,8 @@
+===============
+DRM Driver uAPI
+===============
+
+drm/i915 uAPI
+=============
+
+.. kernel-doc:: include/uapi/drm/i915_drm.h
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index 21be6deadc12..d5a73fa2c9ef 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -469,8 +469,8 @@ DRM MM Range Allocator Function References
.. kernel-doc:: drivers/gpu/drm/drm_mm.c
:export:
-DRM Cache Handling
-==================
+DRM Cache Handling and Fast WC memcpy()
+=======================================
.. kernel-doc:: drivers/gpu/drm/drm_cache.c
:export:
diff --git a/Documentation/gpu/index.rst b/Documentation/gpu/index.rst
index ec4bc72438e4..b9c1214d8f23 100644
--- a/Documentation/gpu/index.rst
+++ b/Documentation/gpu/index.rst
@@ -10,6 +10,7 @@ Linux GPU Driver Developer's Guide
drm-kms
drm-kms-helpers
drm-uapi
+ driver-uapi
drm-client
drivers
backlight
diff --git a/Documentation/gpu/rfc/i915_gem_lmem.rst b/Documentation/gpu/rfc/i915_gem_lmem.rst
new file mode 100644
index 000000000000..675ba8620d66
--- /dev/null
+++ b/Documentation/gpu/rfc/i915_gem_lmem.rst
@@ -0,0 +1,131 @@
+=========================
+I915 DG1/LMEM RFC Section
+=========================
+
+Upstream plan
+=============
+For upstream the overall plan for landing all the DG1 stuff and turning it for
+real, with all the uAPI bits is:
+
+* Merge basic HW enabling of DG1(still without pciid)
+* Merge the uAPI bits behind special CONFIG_BROKEN(or so) flag
+ * At this point we can still make changes, but importantly this lets us
+ start running IGTs which can utilize local-memory in CI
+* Convert over to TTM, make sure it all keeps working. Some of the work items:
+ * TTM shrinker for discrete
+ * dma_resv_lockitem for full dma_resv_lock, i.e not just trylock
+ * Use TTM CPU pagefault handler
+ * Route shmem backend over to TTM SYSTEM for discrete
+ * TTM purgeable object support
+ * Move i915 buddy allocator over to TTM
+ * MMAP ioctl mode(see `I915 MMAP`_)
+ * SET/GET ioctl caching(see `I915 SET/GET CACHING`_)
+* Send RFC(with mesa-dev on cc) for final sign off on the uAPI
+* Add pciid for DG1 and turn on uAPI for real
+
+New object placement and region query uAPI
+==========================================
+Starting from DG1 we need to give userspace the ability to allocate buffers from
+device local-memory. Currently the driver supports gem_create, which can place
+buffers in system memory via shmem, and the usual assortment of other
+interfaces, like dumb buffers and userptr.
+
+To support this new capability, while also providing a uAPI which will work
+beyond just DG1, we propose to offer three new bits of uAPI:
+
+DRM_I915_QUERY_MEMORY_REGIONS
+-----------------------------
+New query ID which allows userspace to discover the list of supported memory
+regions(like system-memory and local-memory) for a given device. We identify
+each region with a class and instance pair, which should be unique. The class
+here would be DEVICE or SYSTEM, and the instance would be zero, on platforms
+like DG1.
+
+Side note: The class/instance design is borrowed from our existing engine uAPI,
+where we describe every physical engine in terms of its class, and the
+particular instance, since we can have more than one per class.
+
+In the future we also want to expose more information which can further
+describe the capabilities of a region.
+
+.. kernel-doc:: include/uapi/drm/i915_drm.h
+ :functions: drm_i915_gem_memory_class drm_i915_gem_memory_class_instance drm_i915_memory_region_info drm_i915_query_memory_regions
+
+GEM_CREATE_EXT
+--------------
+New ioctl which is basically just gem_create but now allows userspace to provide
+a chain of possible extensions. Note that if we don't provide any extensions and
+set flags=0 then we get the exact same behaviour as gem_create.
+
+Side note: We also need to support PXP[1] in the near future, which is also
+applicable to integrated platforms, and adds its own gem_create_ext extension,
+which basically lets userspace mark a buffer as "protected".
+
+.. kernel-doc:: include/uapi/drm/i915_drm.h
+ :functions: drm_i915_gem_create_ext
+
+I915_GEM_CREATE_EXT_MEMORY_REGIONS
+----------------------------------
+Implemented as an extension for gem_create_ext, we would now allow userspace to
+optionally provide an immutable list of preferred placements at creation time,
+in priority order, for a given buffer object. For the placements we expect
+them each to use the class/instance encoding, as per the output of the regions
+query. Having the list in priority order will be useful in the future when
+placing an object, say during eviction.
+
+.. kernel-doc:: include/uapi/drm/i915_drm.h
+ :functions: drm_i915_gem_create_ext_memory_regions
+
+One fair criticism here is that this seems a little over-engineered[2]. If we
+just consider DG1 then yes, a simple gem_create.flags or something is totally
+all that's needed to tell the kernel to allocate the buffer in local-memory or
+whatever. However looking to the future we need uAPI which can also support
+upcoming Xe HP multi-tile architecture in a sane way, where there can be
+multiple local-memory instances for a given device, and so using both class and
+instance in our uAPI to describe regions is desirable, although specifically
+for DG1 it's uninteresting, since we only have a single local-memory instance.
+
+Existing uAPI issues
+====================
+Some potential issues we still need to resolve.
+
+I915 MMAP
+---------
+In i915 there are multiple ways to MMAP GEM object, including mapping the same
+object using different mapping types(WC vs WB), i.e multiple active mmaps per
+object. TTM expects one MMAP at most for the lifetime of the object. If it
+turns out that we have to backpedal here, there might be some potential
+userspace fallout.
+
+I915 SET/GET CACHING
+--------------------
+In i915 we have set/get_caching ioctl. TTM doesn't let us to change this, but
+DG1 doesn't support non-snooped pcie transactions, so we can just always
+allocate as WB for smem-only buffers. If/when our hw gains support for
+non-snooped pcie transactions then we must fix this mode at allocation time as
+a new GEM extension.
+
+This is related to the mmap problem, because in general (meaning, when we're
+not running on intel cpus) the cpu mmap must not, ever, be inconsistent with
+allocation mode.
+
+Possible idea is to let the kernel picks the mmap mode for userspace from the
+following table:
+
+smem-only: WB. Userspace does not need to call clflush.
+
+smem+lmem: We only ever allow a single mode, so simply allocate this as uncached
+memory, and always give userspace a WC mapping. GPU still does snooped access
+here(assuming we can't turn it off like on DG1), which is a bit inefficient.
+
+lmem only: always WC
+
+This means on discrete you only get a single mmap mode, all others must be
+rejected. That's probably going to be a new default mode or something like
+that.
+
+Links
+=====
+[1] https://patchwork.freedesktop.org/series/86798/
+
+[2] https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5599#note_553791
diff --git a/Documentation/gpu/rfc/index.rst b/Documentation/gpu/rfc/index.rst
index a8621f7dab8b..05670442ca1b 100644
--- a/Documentation/gpu/rfc/index.rst
+++ b/Documentation/gpu/rfc/index.rst
@@ -15,3 +15,7 @@ host such documentation:
* Once the code has landed move all the documentation to the right places in
the main core, helper or driver sections.
+
+.. toctree::
+
+ i915_gem_lmem.rst
diff --git a/Documentation/powerpc/syscall64-abi.rst b/Documentation/powerpc/syscall64-abi.rst
index dabee3729e5a..56490c4c0c07 100644
--- a/Documentation/powerpc/syscall64-abi.rst
+++ b/Documentation/powerpc/syscall64-abi.rst
@@ -109,6 +109,16 @@ auxiliary vector.
scv 0 syscalls will always behave as PPC_FEATURE2_HTM_NOSC.
+ptrace
+------
+When ptracing system calls (PTRACE_SYSCALL), the pt_regs.trap value contains
+the system call type that can be used to distinguish between sc and scv 0
+system calls, and the different register conventions can be accounted for.
+
+If the value of (pt_regs.trap & 0xfff0) is 0xc00 then the system call was
+performed with the sc instruction, if it is 0x3000 then the system call was
+performed with the scv 0 instruction.
+
vsyscall
========
diff --git a/MAINTAINERS b/MAINTAINERS
index 315120c4124d..bc336961ed52 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1618,8 +1618,8 @@ F: Documentation/devicetree/bindings/sound/amlogic*
F: sound/soc/meson/
ARM/Amlogic Meson SoC support
+M: Neil Armstrong <[email protected]>
M: Kevin Hilman <[email protected]>
-R: Neil Armstrong <[email protected]>
R: Jerome Brunet <[email protected]>
R: Martin Blumenstingl <[email protected]>
L: [email protected] (moderated for non-subscribers)
@@ -6084,6 +6084,14 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/hisilicon/
F: drivers/gpu/drm/hisilicon/
+DRM DRIVER FOR HYPERV SYNTHETIC VIDEO DEVICE
+M: Deepak Rawat <[email protected]>
+S: Maintained
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: drivers/gpu/drm/hyperv
+
DRM DRIVERS FOR LIMA
M: Qiang Yu <[email protected]>
@@ -12195,6 +12203,7 @@ F: drivers/platform/surface/surfacepro3_button.c
MICROSOFT SURFACE SYSTEM AGGREGATOR SUBSYSTEM
M: Maximilian Luz <[email protected]>
S: Maintained
W: https://github.com/linux-surface/surface-aggregator-module
C: irc://chat.freenode.net/##linux-surface
@@ -14749,7 +14758,6 @@ W: https://wireless.wiki.kernel.org/en/users/Drivers/p54
F: drivers/net/wireless/intersil/prism54/
PROC FILESYSTEM
-R: Alexey Dobriyan <[email protected]>
S: Maintained
diff --git a/Makefile b/Makefile
index 0ed7e061c8e9..e4468353425a 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 13
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
NAME = Frozen Wasteland
# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index 5622578742fd..3000a2e8ee21 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -482,7 +482,7 @@
550 common process_madvise sys_process_madvise
551 common epoll_pwait2 sys_epoll_pwait2
552 common mount_setattr sys_mount_setattr
-553 common quotactl_path sys_quotactl_path
+# 553 reserved for quotactl_path
554 common landlock_create_ruleset sys_landlock_create_ruleset
555 common landlock_add_rule sys_landlock_add_rule
556 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/arm/mach-npcm/Kconfig b/arch/arm/mach-npcm/Kconfig
index 658c8efb4ca1..a71cf1d189ae 100644
--- a/arch/arm/mach-npcm/Kconfig
+++ b/arch/arm/mach-npcm/Kconfig
@@ -10,6 +10,7 @@ config ARCH_WPCM450
bool "Support for WPCM450 BMC (Hermon)"
depends on ARCH_MULTI_V5
select CPU_ARM926T
+ select WPCM450_AIC
select NPCM7XX_TIMER
help
General support for WPCM450 BMC (Hermon).
diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c
index ec0d9b094744..bddfc7cd5d40 100644
--- a/arch/arm/mach-pxa/pxa_cplds_irqs.c
+++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c
@@ -121,8 +121,13 @@ static int cplds_probe(struct platform_device *pdev)
return fpga->irq;
base_irq = platform_get_irq(pdev, 1);
- if (base_irq < 0)
+ if (base_irq < 0) {
base_irq = 0;
+ } else {
+ ret = devm_irq_alloc_descs(&pdev->dev, base_irq, base_irq, CPLDS_NB_IRQ, 0);
+ if (ret < 0)
+ return ret;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fpga->base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index c7679d7db98b..28e03b5fec00 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -456,7 +456,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi
index c62ddb9b2ba5..3771144a2ce4 100644
--- a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi
+++ b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi
@@ -14,7 +14,6 @@
ports {
port@0 {
- reg = <0>;
csi20_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
@@ -29,7 +28,6 @@
ports {
port@0 {
- reg = <0>;
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
index d64fb8b1b86c..46f8dbf68904 100644
--- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
@@ -2573,6 +2573,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2628,6 +2632,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
index 5b05474dc272..d16a4be5ef77 100644
--- a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
@@ -2419,6 +2419,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2474,6 +2478,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts b/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts
index e7b4a929bb17..2e3d1981cac4 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts
+++ b/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts
@@ -33,7 +33,7 @@
status = "okay";
ports {
- port {
+ port@0 {
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index 20fa3caa050e..1aef34447abd 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -1823,6 +1823,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
index 8eb006cbd9af..1f51237ab0a6 100644
--- a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
@@ -2709,6 +2709,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2764,6 +2768,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77950.dtsi b/arch/arm64/boot/dts/renesas/r8a77950.dtsi
index 25b87da32eeb..b643d3079db1 100644
--- a/arch/arm64/boot/dts/renesas/r8a77950.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77950.dtsi
@@ -192,6 +192,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77951.dtsi b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
index 5c39152e4570..85d66d15465a 100644
--- a/arch/arm64/boot/dts/renesas/r8a77951.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
@@ -3097,6 +3097,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -3152,6 +3156,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -3191,6 +3199,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77960.dtsi b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
index 25d947a81b29..12476e354d74 100644
--- a/arch/arm64/boot/dts/renesas/r8a77960.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
@@ -2761,6 +2761,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2816,6 +2820,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77961.dtsi b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
index ab081f14af9a..d9804768425a 100644
--- a/arch/arm64/boot/dts/renesas/r8a77961.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
@@ -2499,6 +2499,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2554,6 +2558,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
index 657b20d3533b..dcb9df861d74 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
@@ -2575,6 +2575,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2630,6 +2634,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77970.dtsi b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
index 5a5d5649332a..e8f6352c3665 100644
--- a/arch/arm64/boot/dts/renesas/r8a77970.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
@@ -1106,6 +1106,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
index 1ffa4a995a7a..7b51d464de0e 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
@@ -1439,6 +1439,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -1478,6 +1482,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
index 295d34f1d216..4715e4a4abe0 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
@@ -298,8 +298,6 @@
ports {
port@0 {
- reg = <0>;
-
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index 5010f23fafcc..0eaea58f4210 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -1970,6 +1970,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
index e18747df219f..453ffcef24fa 100644
--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -349,7 +349,6 @@
ports {
port@0 {
- reg = <0>;
csi20_in: endpoint {
clock-lanes = <0>;
data-lanes = <1>;
@@ -364,8 +363,6 @@
ports {
port@0 {
- reg = <0>;
-
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2 3 4>;
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 7859749d6628..5dab69d2c22b 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -893,8 +893,7 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
__SYSCALL(__NR_epoll_pwait2, compat_sys_epoll_pwait2)
#define __NR_mount_setattr 442
__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
-#define __NR_quotactl_path 443
-__SYSCALL(__NR_quotactl_path, sys_quotactl_path)
+/* 443 is reserved for quotactl_path */
#define __NR_landlock_create_ruleset 444
__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
#define __NR_landlock_add_rule 445
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl
index 1ee8e736a48e..bb11fe4c875a 100644
--- a/arch/ia64/kernel/syscalls/syscall.tbl
+++ b/arch/ia64/kernel/syscalls/syscall.tbl
@@ -363,7 +363,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index a4b7ee1df211..8f215e79e70e 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -623,7 +623,8 @@ static inline void siginfo_build_tests(void)
BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x12);
/* _sigfault._perf */
- BUILD_BUG_ON(offsetof(siginfo_t, si_perf) != 0x10);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x10);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x14);
/* _sigpoll */
BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x0c);
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl
index 0dd019dc2136..79c2d24c89dd 100644
--- a/arch/m68k/kernel/syscalls/syscall.tbl
+++ b/arch/m68k/kernel/syscalls/syscall.tbl
@@ -442,7 +442,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
index 2ac716984ca2..b11395a20c20 100644
--- a/arch/microblaze/kernel/syscalls/syscall.tbl
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -448,7 +448,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index 5e0096657251..9220909526f9 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -381,7 +381,7 @@
440 n32 process_madvise sys_process_madvise
441 n32 epoll_pwait2 compat_sys_epoll_pwait2
442 n32 mount_setattr sys_mount_setattr
-443 n32 quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 n32 landlock_create_ruleset sys_landlock_create_ruleset
445 n32 landlock_add_rule sys_landlock_add_rule
446 n32 landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
index 9974f5f8e49b..9cd1c34f31b5 100644
--- a/arch/mips/kernel/syscalls/syscall_n64.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -357,7 +357,7 @@
440 n64 process_madvise sys_process_madvise
441 n64 epoll_pwait2 sys_epoll_pwait2
442 n64 mount_setattr sys_mount_setattr
-443 n64 quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 n64 landlock_create_ruleset sys_landlock_create_ruleset
445 n64 landlock_add_rule sys_landlock_add_rule
446 n64 landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 39d6e71e57b6..d560c467a8c6 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -430,7 +430,7 @@
440 o32 process_madvise sys_process_madvise
441 o32 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 o32 mount_setattr sys_mount_setattr
-443 o32 quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 o32 landlock_create_ruleset sys_landlock_create_ruleset
445 o32 landlock_add_rule sys_landlock_add_rule
446 o32 landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/openrisc/include/asm/barrier.h b/arch/openrisc/include/asm/barrier.h
new file mode 100644
index 000000000000..7538294721be
--- /dev/null
+++ b/arch/openrisc/include/asm/barrier.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#define mb() asm volatile ("l.msync" ::: "memory")
+
+#include <asm-generic/barrier.h>
+
+#endif /* __ASM_BARRIER_H */
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index 2416a9f91533..c6f9e7b9f7cb 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -278,6 +278,8 @@ void calibrate_delay(void)
pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
+
+ of_node_put(cpu);
}
void __init setup_arch(char **cmdline_p)
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index d5641198b90c..cfef61a7b6c2 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -75,7 +75,6 @@ static void __init map_ram(void)
/* These mark extents of read-only kernel pages...
* ...from vmlinux.lds.S
*/
- struct memblock_region *region;
v = PAGE_OFFSET;
@@ -121,7 +120,7 @@ static void __init map_ram(void)
}
printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
- region->base, region->base + region->size);
+ start, end);
}
}
@@ -129,7 +128,6 @@ void __init paging_init(void)
{
extern void tlb_init(void);
- unsigned long end;
int i;
printk(KERN_INFO "Setting up paging and PTEs.\n");
@@ -145,8 +143,6 @@ void __init paging_init(void)
*/
current_pgd[smp_processor_id()] = init_mm.pgd;
- end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
-
map_ram();
zone_sizes_init();
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index 5ac80b83d745..aabc37f8cae3 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -440,7 +440,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 9c9ab2746168..b476a685f066 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -19,6 +19,7 @@
#ifndef _ASM_POWERPC_PTRACE_H
#define _ASM_POWERPC_PTRACE_H
+#include <linux/err.h>
#include <uapi/asm/ptrace.h>
#include <asm/asm-const.h>
@@ -152,25 +153,6 @@ extern unsigned long profile_pc(struct pt_regs *regs);
long do_syscall_trace_enter(struct pt_regs *regs);
void do_syscall_trace_leave(struct pt_regs *regs);
-#define kernel_stack_pointer(regs) ((regs)->gpr[1])
-static inline int is_syscall_success(struct pt_regs *regs)
-{
- return !(regs->ccr & 0x10000000);
-}
-
-static inline long regs_return_value(struct pt_regs *regs)
-{
- if (is_syscall_success(regs))
- return regs->gpr[3];
- else
- return -regs->gpr[3];
-}
-
-static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
-{
- regs->gpr[3] = rc;
-}
-
#ifdef __powerpc64__
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
#else
@@ -235,6 +217,31 @@ static __always_inline void set_trap_norestart(struct pt_regs *regs)
regs->trap |= 0x1;
}
+#define kernel_stack_pointer(regs) ((regs)->gpr[1])
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+ if (trap_is_scv(regs))
+ return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
+ else
+ return !(regs->ccr & 0x10000000);
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+ if (trap_is_scv(regs))
+ return regs->gpr[3];
+
+ if (is_syscall_success(regs))
+ return regs->gpr[3];
+ else
+ return -regs->gpr[3];
+}
+
+static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+{
+ regs->gpr[3] = rc;
+}
+
#define arch_has_single_step() (1)
#define arch_has_block_step() (true)
#define ARCH_HAS_USER_SINGLE_STEP_REPORT
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index fd1b518eed17..ba0f88f3a30d 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -41,11 +41,17 @@ static inline void syscall_rollback(struct task_struct *task,
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
- /*
- * If the system call failed,
- * regs->gpr[3] contains a positive ERRORCODE.
- */
- return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
+ if (trap_is_scv(regs)) {
+ unsigned long error = regs->gpr[3];
+
+ return IS_ERR_VALUE(error) ? error : 0;
+ } else {
+ /*
+ * If the system call failed,
+ * regs->gpr[3] contains a positive ERRORCODE.
+ */
+ return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
+ }
}
static inline long syscall_get_return_value(struct task_struct *task,
@@ -58,18 +64,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
- /*
- * In the general case it's not obvious that we must deal with CCR
- * here, as the syscall exit path will also do that for us. However
- * there are some places, eg. the signal code, which check ccr to
- * decide if the value in r3 is actually an error.
- */
- if (error) {
- regs->ccr |= 0x10000000L;
- regs->gpr[3] = error;
+ if (trap_is_scv(regs)) {
+ regs->gpr[3] = (long) error ?: val;
} else {
- regs->ccr &= ~0x10000000L;
- regs->gpr[3] = val;
+ /*
+ * In the general case it's not obvious that we must deal with
+ * CCR here, as the syscall exit path will also do that for us.
+ * However there are some places, eg. the signal code, which
+ * check ccr to decide if the value in r3 is actually an error.
+ */
+ if (error) {
+ regs->ccr |= 0x10000000L;
+ regs->gpr[3] = error;
+ } else {
+ regs->ccr &= ~0x10000000L;
+ regs->gpr[3] = val;
+ }
}
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index b779d25761cf..e42b85e4f1aa 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -369,11 +369,11 @@ void __init early_setup(unsigned long dt_ptr)
apply_feature_fixups();
setup_feature_keys();
- early_ioremap_setup();
-
/* Initialize the hash table or TLB handling */
early_init_mmu();
+ early_ioremap_setup();
+
/*
* After firmware and early platform setup code has set things up,
* we note the SPR values for configurable control/performance
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 2e68fbb57cc6..8f052ff4058c 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -522,7 +522,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index 7e4a2aba366d..0690263df1dd 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -445,7 +445,7 @@
440 common process_madvise sys_process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index f47a0dc55445..0b91499ebdcf 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -445,7 +445,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index b9e1c0e735b7..e34cc30ef22c 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -488,7 +488,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index c77c5d8a7b3e..307529417021 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -178,11 +178,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
endif
-ifdef CONFIG_LTO_CLANG
-KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
- -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
-endif
-
# Workaround for a gcc prelease that unfortunately was shipped in a suse release
KBUILD_CFLAGS += -Wno-sign-compare
#
@@ -202,7 +197,12 @@ ifdef CONFIG_RETPOLINE
endif
endif
-KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
+KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
+
+ifdef CONFIG_LTO_CLANG
+KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
+ -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
+endif
ifdef CONFIG_X86_NEED_RELOCS
LDFLAGS_vmlinux := --emit-relocs --discard-none
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 28a1423ce32e..4bbc267fb36b 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -447,7 +447,7 @@
440 i386 process_madvise sys_process_madvise
441 i386 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 i386 mount_setattr sys_mount_setattr
-443 i386 quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 i386 landlock_create_ruleset sys_landlock_create_ruleset
445 i386 landlock_add_rule sys_landlock_add_rule
446 i386 landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index ecd551b08d05..ce18119ea0d0 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -364,7 +364,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 8e509325c2c3..8f71dd72ef95 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -396,10 +396,12 @@ int x86_reserve_hardware(void)
if (!atomic_inc_not_zero(&pmc_refcount)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&pmc_refcount) == 0) {
- if (!reserve_pmc_hardware())
+ if (!reserve_pmc_hardware()) {
err = -EBUSY;
- else
+ } else {
reserve_ds_buffers();
+ reserve_lbr_buffers();
+ }
}
if (!err)
atomic_inc(&pmc_refcount);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 2521d03de5e0..e28892270c58 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -6253,7 +6253,7 @@ __init int intel_pmu_init(void)
* Check all LBT MSR here.
* Disable LBR access if any LBR MSRs can not be accessed.
*/
- if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
+ if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
x86_pmu.lbr_nr = 0;
for (i = 0; i < x86_pmu.lbr_nr; i++) {
if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 76dbab6ac9fb..4409d2cccfda 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -658,7 +658,6 @@ static inline bool branch_user_callstack(unsigned br_sel)
void intel_pmu_lbr_add(struct perf_event *event)
{
- struct kmem_cache *kmem_cache = event->pmu->task_ctx_cache;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!x86_pmu.lbr_nr)
@@ -696,11 +695,6 @@ void intel_pmu_lbr_add(struct perf_event *event)
perf_sched_cb_inc(event->ctx->pmu);
if (!cpuc->lbr_users++ && !event->total_time_running)
intel_pmu_lbr_reset();
-
- if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
- kmem_cache && !cpuc->lbr_xsave &&
- (cpuc->lbr_users != cpuc->lbr_pebs_users))
- cpuc->lbr_xsave = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
}
void release_lbr_buffers(void)
@@ -722,6 +716,26 @@ void release_lbr_buffers(void)
}
}
+void reserve_lbr_buffers(void)
+{
+ struct kmem_cache *kmem_cache;
+ struct cpu_hw_events *cpuc;
+ int cpu;
+
+ if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
+ return;
+
+ for_each_possible_cpu(cpu) {
+ cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
+ kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
+ if (!kmem_cache || cpuc->lbr_xsave)
+ continue;
+
+ cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache, GFP_KERNEL,
+ cpu_to_node(cpu));
+ }
+}
+
void intel_pmu_lbr_del(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 27fa85e7d4fd..ad87cb36f7c8 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -1244,6 +1244,8 @@ void reserve_ds_buffers(void);
void release_lbr_buffers(void);
+void reserve_lbr_buffers(void);
+
extern struct event_constraint bts_constraint;
extern struct event_constraint vlbr_constraint;
@@ -1393,6 +1395,10 @@ static inline void release_lbr_buffers(void)
{
}
+static inline void reserve_lbr_buffers(void)
+{
+}
+
static inline int intel_pmu_init(void)
{
return 0;
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index 6ec8b3bfd76e..9f90f460a28c 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -63,6 +63,7 @@ static bool sev_es_negotiate_protocol(void)
static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
{
+ ghcb->save.sw_exit_code = 0;
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
}
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 9578c82832aa..651b81cd648e 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -203,8 +203,18 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
if (unlikely(data->ghcb_active)) {
/* GHCB is already in use - save its contents */
- if (unlikely(data->backup_ghcb_active))
- return NULL;
+ if (unlikely(data->backup_ghcb_active)) {
+ /*
+ * Backup-GHCB is also already in use. There is no way
+ * to continue here so just kill the machine. To make
+ * panic() work, mark GHCBs inactive so that messages
+ * can be printed out.
+ */
+ data->ghcb_active = false;
+ data->backup_ghcb_active = false;
+
+ panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
+ }
/* Mark backup_ghcb active before writing to it */
data->backup_ghcb_active = true;
@@ -221,24 +231,6 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
return ghcb;
}
-static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
-{
- struct sev_es_runtime_data *data;
- struct ghcb *ghcb;
-
- data = this_cpu_read(runtime_data);
- ghcb = &data->ghcb_page;
-
- if (state->ghcb) {
- /* Restore GHCB from Backup */
- *ghcb = *state->ghcb;
- data->backup_ghcb_active = false;
- state->ghcb = NULL;
- } else {
- data->ghcb_active = false;
- }
-}
-
/* Needed in vc_early_forward_exception */
void do_early_exception(struct pt_regs *regs, int trapnr);
@@ -323,31 +315,44 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
u16 d2;
u8 d1;
- /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
- if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
- memcpy(dst, buf, size);
- return ES_OK;
- }
-
+ /*
+ * This function uses __put_user() independent of whether kernel or user
+ * memory is accessed. This works fine because __put_user() does no
+ * sanity checks of the pointer being accessed. All that it does is
+ * to report when the access failed.
+ *
+ * Also, this function runs in atomic context, so __put_user() is not
+ * allowed to sleep. The page-fault handler detects that it is running
+ * in atomic context and will not try to take mmap_sem and handle the
+ * fault, so additional pagefault_enable()/disable() calls are not
+ * needed.
+ *
+ * The access can't be done via copy_to_user() here because
+ * vc_write_mem() must not use string instructions to access unsafe
+ * memory. The reason is that MOVS is emulated by the #VC handler by
+ * splitting the move up into a read and a write and taking a nested #VC
+ * exception on whatever of them is the MMIO access. Using string
+ * instructions here would cause infinite nesting.
+ */
switch (size) {
case 1:
memcpy(&d1, buf, 1);
- if (put_user(d1, target))
+ if (__put_user(d1, target))
goto fault;
break;
case 2:
memcpy(&d2, buf, 2);
- if (put_user(d2, target))
+ if (__put_user(d2, target))
goto fault;
break;
case 4:
memcpy(&d4, buf, 4);
- if (put_user(d4, target))
+ if (__put_user(d4, target))
goto fault;
break;
case 8:
memcpy(&d8, buf, 8);
- if (put_user(d8, target))
+ if (__put_user(d8, target))
goto fault;
break;
default:
@@ -378,30 +383,43 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
u16 d2;
u8 d1;
- /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
- if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
- memcpy(buf, src, size);
- return ES_OK;
- }
-
+ /*
+ * This function uses __get_user() independent of whether kernel or user
+ * memory is accessed. This works fine because __get_user() does no
+ * sanity checks of the pointer being accessed. All that it does is
+ * to report when the access failed.
+ *
+ * Also, this function runs in atomic context, so __get_user() is not
+ * allowed to sleep. The page-fault handler detects that it is running
+ * in atomic context and will not try to take mmap_sem and handle the
+ * fault, so additional pagefault_enable()/disable() calls are not
+ * needed.
+ *
+ * The access can't be done via copy_from_user() here because
+ * vc_read_mem() must not use string instructions to access unsafe
+ * memory. The reason is that MOVS is emulated by the #VC handler by
+ * splitting the move up into a read and a write and taking a nested #VC
+ * exception on whatever of them is the MMIO access. Using string
+ * instructions here would cause infinite nesting.
+ */
switch (size) {
case 1:
- if (get_user(d1, s))
+ if (__get_user(d1, s))
goto fault;
memcpy(buf, &d1, 1);
break;
case 2:
- if (get_user(d2, s))
+ if (__get_user(d2, s))
goto fault;
memcpy(buf, &d2, 2);
break;
case 4:
- if (get_user(d4, s))
+ if (__get_user(d4, s))
goto fault;
memcpy(buf, &d4, 4);
break;
case 8:
- if (get_user(d8, s))
+ if (__get_user(d8, s))
goto fault;
memcpy(buf, &d8, 8);
break;
@@ -461,6 +479,29 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
/* Include code shared with pre-decompression boot stage */
#include "sev-shared.c"
+static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
+{
+ struct sev_es_runtime_data *data;
+ struct ghcb *ghcb;
+
+ data = this_cpu_read(runtime_data);
+ ghcb = &data->ghcb_page;
+
+ if (state->ghcb) {
+ /* Restore GHCB from Backup */
+ *ghcb = *state->ghcb;
+ data->backup_ghcb_active = false;
+ state->ghcb = NULL;
+ } else {
+ /*
+ * Invalidate the GHCB so a VMGEXIT instruction issued
+ * from userspace won't appear to be valid.
+ */
+ vc_ghcb_invalidate(ghcb);
+ data->ghcb_active = false;
+ }
+}
+
void noinstr __sev_es_nmi_complete(void)
{
struct ghcb_state state;
@@ -1255,6 +1296,10 @@ static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
case X86_TRAP_UD:
exc_invalid_op(ctxt->regs);
break;
+ case X86_TRAP_PF:
+ write_cr2(ctxt->fi.cr2);
+ exc_page_fault(ctxt->regs, error_code);
+ break;
case X86_TRAP_AC:
exc_alignment_check(ctxt->regs, error_code);
break;
@@ -1284,7 +1329,6 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
*/
DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
{
- struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
irqentry_state_t irq_state;
struct ghcb_state state;
struct es_em_ctxt ctxt;
@@ -1310,16 +1354,6 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
*/
ghcb = sev_es_get_ghcb(&state);
- if (!ghcb) {
- /*
- * Mark GHCBs inactive so that panic() is able to print the
- * message.
- */
- data->ghcb_active = false;
- data->backup_ghcb_active = false;
-
- panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
- }
vc_ghcb_invalidate(ghcb);
result = vc_init_em_ctxt(&ctxt, regs, error_code);
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index 0e5d0a7e203b..06743ec054d2 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -127,6 +127,9 @@ static inline void signal_compat_build_tests(void)
BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x10);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr) != 0x0C);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_trapno) != 0x18);
+ BUILD_BUG_ON(offsetof(compat_siginfo_t, si_trapno) != 0x10);
+
BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr_lsb) != 0x10);
@@ -138,8 +141,10 @@ static inline void signal_compat_build_tests(void)
BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x20);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pkey) != 0x14);
- BUILD_BUG_ON(offsetof(siginfo_t, si_perf) != 0x18);
- BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf) != 0x10);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x18);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x20);
+ BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_data) != 0x10);
+ BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_type) != 0x14);
CHECK_CSI_OFFSET(_sigpoll);
CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int));
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 17503fed2017..e87699aa2dc8 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -1273,16 +1273,16 @@ asmlinkage __visible void __init xen_start_kernel(void)
/* Get mfn list */
xen_build_dynamic_phys_to_machine();
+ /* Work out if we support NX */
+ get_cpu_cap(&boot_cpu_data);
+ x86_configure_nx();
+
/*
* Set up kernel GDT and segment registers, mainly so that
* -fstack-protector code can be executed.
*/
xen_setup_gdt(0);
- /* Work out if we support NX */
- get_cpu_cap(&boot_cpu_data);
- x86_configure_nx();
-
/* Determine virtual and physical address sizes */
get_cpu_address_sizes(&boot_cpu_data);
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 9d76d433d3d6..fd2f30227d96 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -413,7 +413,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
-443 common quotactl_path sys_quotactl_path
+# 443 reserved for quotactl_path
444 common landlock_create_ruleset sys_landlock_create_ruleset
445 common landlock_add_rule sys_landlock_add_rule
446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/block/genhd.c b/block/genhd.c
index 39ca97b0edc6..9f8cb7beaad1 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -29,8 +29,6 @@
static struct kobject *block_depr;
-DECLARE_RWSEM(bdev_lookup_sem);
-
/* for extended dynamic devt allocation, currently only one major is used */
#define NR_EXT_DEVT (1 << MINORBITS)
static DEFINE_IDA(ext_devt_ida);
@@ -609,13 +607,8 @@ void del_gendisk(struct gendisk *disk)
blk_integrity_del(disk);
disk_del_events(disk);
- /*
- * Block lookups of the disk until all bdevs are unhashed and the
- * disk is marked as dead (GENHD_FL_UP cleared).
- */
- down_write(&bdev_lookup_sem);
-
mutex_lock(&disk->part0->bd_mutex);
+ disk->flags &= ~GENHD_FL_UP;
blk_drop_partitions(disk);
mutex_unlock(&disk->part0->bd_mutex);
@@ -629,8 +622,6 @@ void del_gendisk(struct gendisk *disk)
remove_inode_hash(disk->part0->bd_inode);
set_capacity(disk, 0);
- disk->flags &= ~GENHD_FL_UP;
- up_write(&bdev_lookup_sem);
if (!(disk->flags & GENHD_FL_HIDDEN)) {
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 61d34e1dc59c..bcec598b89f2 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -4918,7 +4918,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
uint32_t enable;
if (copy_from_user(&enable, ubuf, sizeof(enable))) {
- ret = -EINVAL;
+ ret = -EFAULT;
goto err;
}
binder_inner_proc_lock(proc);
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 742b4a0932e3..c6d8c0f59722 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -744,6 +744,13 @@ static const struct blk_mq_ops gdrom_mq_ops = {
static int probe_gdrom(struct platform_device *devptr)
{
int err;
+
+ /*
+ * Ensure our "one" device is initialized properly in case of previous
+ * usages of it
+ */
+ memset(&gd, 0, sizeof(gd));
+
/* Start the device */
if (gdrom_execute_diagnostic() != 1) {
pr_warn("ATA Probe for GDROM failed\n");
@@ -830,6 +837,8 @@ static int remove_gdrom(struct platform_device *devptr)
if (gdrom_major)
unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
unregister_cdrom(gd.cd_info);
+ kfree(gd.cd_info);
+ kfree(gd.toc);
return 0;
}
@@ -845,7 +854,7 @@ static struct platform_driver gdrom_driver = {
static int __init init_gdrom(void)
{
int rc;
- gd.toc = NULL;
+
rc = platform_driver_register(&gdrom_driver);
if (rc)
return rc;
@@ -861,8 +870,6 @@ static void __exit exit_gdrom(void)
{
platform_device_unregister(pd);
platform_driver_unregister(&gdrom_driver);
- kfree(gd.toc);
- kfree(gd.cd_info);
}
module_init(init_gdrom);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index ed3b7dab678d..8b55085650ad 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -984,6 +984,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
hdp->hd_phys_address = fixmem32->address;
hdp->hd_address = ioremap(fixmem32->address,
HPET_RANGE_SIZE);
+ if (!hdp->hd_address)
+ return AE_ERROR;
if (hpet_is_known(hdp)) {
iounmap(hdp->hd_address);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index facc8e6bc580..d385daf2c71c 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -442,7 +442,6 @@ static int nitrox_probe(struct pci_dev *pdev,
err = pci_request_mem_regions(pdev, nitrox_driver_name);
if (err) {
pci_disable_device(pdev);
- dev_err(&pdev->dev, "Failed to request mem regions!\n");
return err;
}
pci_set_master(pdev);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index f264b70c383e..511fe0d217a0 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -234,7 +234,7 @@ retry:
shared_count = fobj->shared_count;
else
shared_count = 0;
- fence_excl = rcu_dereference(resv->fence_excl);
+ fence_excl = dma_resv_excl_fence(resv);
if (read_seqcount_retry(&resv->seq, seq)) {
rcu_read_unlock();
goto retry;
@@ -760,7 +760,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_lock(attach->dmabuf->resv, NULL);
- ret = dma_buf_pin(attach);
+ ret = dmabuf->ops->pin(attach);
if (ret)
goto err_unlock;
}
@@ -786,7 +786,7 @@ err_attach:
err_unpin:
if (dma_buf_is_dynamic(attach->dmabuf))
- dma_buf_unpin(attach);
+ dmabuf->ops->unpin(attach);
err_unlock:
if (dma_buf_is_dynamic(attach->dmabuf))
@@ -843,7 +843,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
__unmap_dma_buf(attach, attach->sgt, attach->dir);
if (dma_buf_is_dynamic(attach->dmabuf)) {
- dma_buf_unpin(attach);
+ dmabuf->ops->unpin(attach);
dma_resv_unlock(attach->dmabuf->resv);
}
}
@@ -956,7 +956,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_assert_held(attach->dmabuf->resv);
if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
- r = dma_buf_pin(attach);
+ r = attach->dmabuf->ops->pin(attach);
if (r)
return ERR_PTR(r);
}
@@ -968,7 +968,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
- dma_buf_unpin(attach);
+ attach->dmabuf->ops->unpin(attach);
if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
attach->sgt = sg_table;
@@ -1147,8 +1147,7 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
long ret;
/* Wait on any implicit rendering fences */
- ret = dma_resv_wait_timeout_rcu(resv, write, true,
- MAX_SCHEDULE_TIMEOUT);
+ ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;
@@ -1349,15 +1348,14 @@ EXPORT_SYMBOL_GPL(dma_buf_vunmap);
#ifdef CONFIG_DEBUG_FS
static int dma_buf_debug_show(struct seq_file *s, void *unused)
{
- int ret;
struct dma_buf *buf_obj;
struct dma_buf_attachment *attach_obj;
struct dma_resv *robj;
struct dma_resv_list *fobj;
struct dma_fence *fence;
- unsigned seq;
int count = 0, attach_count, shared_count, i;
size_t size = 0;
+ int ret;
ret = mutex_lock_interruptible(&db_list.lock);
@@ -1383,33 +1381,24 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
buf_obj->name ?: "");
robj = buf_obj->resv;
- while (true) {
- seq = read_seqcount_begin(&robj->seq);
- rcu_read_lock();
- fobj = rcu_dereference(robj->fence);
- shared_count = fobj ? fobj->shared_count : 0;
- fence = rcu_dereference(robj->fence_excl);
- if (!read_seqcount_retry(&robj->seq, seq))
- break;
- rcu_read_unlock();
- }
-
+ fence = dma_resv_excl_fence(robj);
if (fence)
seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
dma_fence_is_signaled(fence) ? "" : "un");
+
+ fobj = rcu_dereference_protected(robj->fence,
+ dma_resv_held(robj));
+ shared_count = fobj ? fobj->shared_count : 0;
for (i = 0; i < shared_count; i++) {
- fence = rcu_dereference(fobj->shared[i]);
- if (!dma_fence_get_rcu(fence))
- continue;
+ fence = rcu_dereference_protected(fobj->shared[i],
+ dma_resv_held(robj));
seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
dma_fence_is_signaled(fence) ? "" : "un");
- dma_fence_put(fence);
}
- rcu_read_unlock();
seq_puts(s, "\tAttached Devices:\n");
attach_count = 0;
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 6ddbeb5dfbf6..f26c71747d43 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
*
@@ -92,49 +93,6 @@ static void dma_resv_list_free(struct dma_resv_list *list)
kfree_rcu(list, rcu);
}
-#if IS_ENABLED(CONFIG_LOCKDEP)
-static int __init dma_resv_lockdep(void)
-{
- struct mm_struct *mm = mm_alloc();
- struct ww_acquire_ctx ctx;
- struct dma_resv obj;
- struct address_space mapping;
- int ret;
-
- if (!mm)
- return -ENOMEM;
-
- dma_resv_init(&obj);
- address_space_init_once(&mapping);
-
- mmap_read_lock(mm);
- ww_acquire_init(&ctx, &reservation_ww_class);
- ret = dma_resv_lock(&obj, &ctx);
- if (ret == -EDEADLK)
- dma_resv_lock_slow(&obj, &ctx);
- fs_reclaim_acquire(GFP_KERNEL);
- /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
- i_mmap_lock_write(&mapping);
- i_mmap_unlock_write(&mapping);
-#ifdef CONFIG_MMU_NOTIFIER
- lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
- __dma_fence_might_wait();
- lock_map_release(&__mmu_notifier_invalidate_range_start_map);
-#else
- __dma_fence_might_wait();
-#endif
- fs_reclaim_release(GFP_KERNEL);
- ww_mutex_unlock(&obj.lock);
- ww_acquire_fini(&ctx);
- mmap_read_unlock(mm);
-
- mmput(mm);
-
- return 0;
-}
-subsys_initcall(dma_resv_lockdep);
-#endif
-
/**
* dma_resv_init - initialize a reservation object
* @obj: the reservation object
@@ -191,14 +149,11 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
dma_resv_assert_held(obj);
- old = dma_resv_get_list(obj);
-
+ old = dma_resv_shared_list(obj);
if (old && old->shared_max) {
if ((old->shared_count + num_fences) <= old->shared_max)
return 0;
- else
- max = max(old->shared_count + num_fences,
- old->shared_max * 2);
+ max = max(old->shared_count + num_fences, old->shared_max * 2);
} else {
max = max(4ul, roundup_pow_of_two(num_fences));
}
@@ -252,6 +207,28 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
}
EXPORT_SYMBOL(dma_resv_reserve_shared);
+#ifdef CONFIG_DEBUG_MUTEXES
+/**
+ * dma_resv_reset_shared_max - reset shared fences for debugging
+ * @obj: the dma_resv object to reset
+ *
+ * Reset the number of pre-reserved shared slots to test that drivers do
+ * correct slot allocation using dma_resv_reserve_shared(). See also
+ * &dma_resv_list.shared_max.
+ */
+void dma_resv_reset_shared_max(struct dma_resv *obj)
+{
+ struct dma_resv_list *fences = dma_resv_shared_list(obj);
+
+ dma_resv_assert_held(obj);
+
+ /* Test shared fence slot reservation */
+ if (fences)
+ fences->shared_max = fences->shared_count;
+}
+EXPORT_SYMBOL(dma_resv_reset_shared_max);
+#endif
+
/**
* dma_resv_add_shared_fence - Add a fence to a shared slot
* @obj: the reservation object
@@ -270,7 +247,7 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
dma_resv_assert_held(obj);
- fobj = dma_resv_get_list(obj);
+ fobj = dma_resv_shared_list(obj);
count = fobj->shared_count;
write_seqcount_begin(&obj->seq);
@@ -307,13 +284,13 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
*/
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
{
- struct dma_fence *old_fence = dma_resv_get_excl(obj);
+ struct dma_fence *old_fence = dma_resv_excl_fence(obj);
struct dma_resv_list *old;
u32 i = 0;
dma_resv_assert_held(obj);
- old = dma_resv_get_list(obj);
+ old = dma_resv_shared_list(obj);
if (old)
i = old->shared_count;
@@ -337,26 +314,26 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
EXPORT_SYMBOL(dma_resv_add_excl_fence);
/**
-* dma_resv_copy_fences - Copy all fences from src to dst.
-* @dst: the destination reservation object
-* @src: the source reservation object
-*
-* Copy all fences from src to dst. dst-lock must be held.
-*/
+ * dma_resv_copy_fences - Copy all fences from src to dst.
+ * @dst: the destination reservation object
+ * @src: the source reservation object
+ *
+ * Copy all fences from src to dst. dst-lock must be held.
+ */
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
{
struct dma_resv_list *src_list, *dst_list;
struct dma_fence *old, *new;
- unsigned i;
+ unsigned int i;
dma_resv_assert_held(dst);
rcu_read_lock();
- src_list = rcu_dereference(src->fence);
+ src_list = dma_resv_shared_list(src);
retry:
if (src_list) {
- unsigned shared_count = src_list->shared_count;
+ unsigned int shared_count = src_list->shared_count;
rcu_read_unlock();
@@ -365,7 +342,7 @@ retry:
return -ENOMEM;
rcu_read_lock();
- src_list = rcu_dereference(src->fence);
+ src_list = dma_resv_shared_list(src);
if (!src_list || src_list->shared_count > shared_count) {
kfree(dst_list);
goto retry;
@@ -373,6 +350,7 @@ retry:
dst_list->shared_count = 0;
for (i = 0; i < src_list->shared_count; ++i) {
+ struct dma_fence __rcu **dst;
struct dma_fence *fence;
fence = rcu_dereference(src_list->shared[i]);
@@ -382,7 +360,7 @@ retry:
if (!dma_fence_get_rcu(fence)) {
dma_resv_list_free(dst_list);
- src_list = rcu_dereference(src->fence);
+ src_list = dma_resv_shared_list(src);
goto retry;
}
@@ -391,7 +369,8 @@ retry:
continue;
}
- rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
+ dst = &dst_list->shared[dst_list->shared_count++];
+ rcu_assign_pointer(*dst, fence);
}
} else {
dst_list = NULL;
@@ -400,8 +379,8 @@ retry:
new = dma_fence_get_rcu_safe(&src->fence_excl);
rcu_read_unlock();
- src_list = dma_resv_get_list(dst);
- old = dma_resv_get_excl(dst);
+ src_list = dma_resv_shared_list(dst);
+ old = dma_resv_excl_fence(dst);
write_seqcount_begin(&dst->seq);
/* write_seqcount_begin provides the necessary memory barrier */
@@ -417,7 +396,7 @@ retry:
EXPORT_SYMBOL(dma_resv_copy_fences);
/**
- * dma_resv_get_fences_rcu - Get an object's shared and exclusive
+ * dma_resv_get_fences - Get an object's shared and exclusive
* fences without update side lock held
* @obj: the reservation object
* @pfence_excl: the returned exclusive fence (or NULL)
@@ -429,10 +408,9 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
* exclusive fence is not specified the fence is put into the array of the
* shared fences as well. Returns either zero or -ENOMEM.
*/
-int dma_resv_get_fences_rcu(struct dma_resv *obj,
- struct dma_fence **pfence_excl,
- unsigned *pshared_count,
- struct dma_fence ***pshared)
+int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
+ unsigned int *pshared_count,
+ struct dma_fence ***pshared)
{
struct dma_fence **shared = NULL;
struct dma_fence *fence_excl;
@@ -449,11 +427,11 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
rcu_read_lock();
seq = read_seqcount_begin(&obj->seq);
- fence_excl = rcu_dereference(obj->fence_excl);
+ fence_excl = dma_resv_excl_fence(obj);
if (fence_excl && !dma_fence_get_rcu(fence_excl))
goto unlock;
- fobj = rcu_dereference(obj->fence);
+ fobj = dma_resv_shared_list(obj);
if (fobj)
sz += sizeof(*shared) * fobj->shared_max;
@@ -515,27 +493,28 @@ unlock:
*pshared = shared;
return ret;
}
-EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_get_fences);
/**
- * dma_resv_wait_timeout_rcu - Wait on reservation's objects
+ * dma_resv_wait_timeout - Wait on reservation's objects
* shared and/or exclusive fences.
* @obj: the reservation object
* @wait_all: if true, wait on all fences, else wait on just exclusive fence
* @intr: if true, do interruptible wait
* @timeout: timeout value in jiffies or zero to return immediately
*
+ * Callers are not required to hold specific locks, but maybe hold
+ * dma_resv_lock() already
* RETURNS
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
* greater than zer on success.
*/
-long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
- bool wait_all, bool intr,
- unsigned long timeout)
+long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
+ unsigned long timeout)
{
- struct dma_fence *fence;
- unsigned seq, shared_count;
long ret = timeout ? timeout : 1;
+ unsigned int seq, shared_count;
+ struct dma_fence *fence;
int i;
retry:
@@ -544,7 +523,7 @@ retry:
rcu_read_lock();
i = -1;
- fence = rcu_dereference(obj->fence_excl);
+ fence = dma_resv_excl_fence(obj);
if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
if (!dma_fence_get_rcu(fence))
goto unlock_retry;
@@ -559,14 +538,15 @@ retry:
}
if (wait_all) {
- struct dma_resv_list *fobj = rcu_dereference(obj->fence);
+ struct dma_resv_list *fobj = dma_resv_shared_list(obj);
if (fobj)
shared_count = fobj->shared_count;
for (i = 0; !fence && i < shared_count; ++i) {
- struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
+ struct dma_fence *lfence;
+ lfence = rcu_dereference(fobj->shared[i]);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&lfence->flags))
continue;
@@ -602,7 +582,7 @@ unlock_retry:
rcu_read_unlock();
goto retry;
}
-EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
@@ -622,18 +602,20 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
}
/**
- * dma_resv_test_signaled_rcu - Test if a reservation object's
- * fences have been signaled.
+ * dma_resv_test_signaled - Test if a reservation object's fences have been
+ * signaled.
* @obj: the reservation object
* @test_all: if true, test all fences, otherwise only test the exclusive
* fence
*
+ * Callers are not required to hold specific locks, but maybe hold
+ * dma_resv_lock() already
* RETURNS
* true if all fences signaled, else false
*/
-bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
+bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
{
- unsigned seq, shared_count;
+ unsigned int seq, shared_count;
int ret;
rcu_read_lock();
@@ -643,16 +625,16 @@ retry:
seq = read_seqcount_begin(&obj->seq);
if (test_all) {
- unsigned i;
-
- struct dma_resv_list *fobj = rcu_dereference(obj->fence);
+ struct dma_resv_list *fobj = dma_resv_shared_list(obj);
+ unsigned int i;
if (fobj)
shared_count = fobj->shared_count;
for (i = 0; i < shared_count; ++i) {
- struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
+ struct dma_fence *fence;
+ fence = rcu_dereference(fobj->shared[i]);
ret = dma_resv_test_signaled_single(fence);
if (ret < 0)
goto retry;
@@ -665,7 +647,7 @@ retry:
}
if (!shared_count) {
- struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
+ struct dma_fence *fence_excl = dma_resv_excl_fence(obj);
if (fence_excl) {
ret = dma_resv_test_signaled_single(fence_excl);
@@ -680,4 +662,47 @@ retry:
rcu_read_unlock();
return ret;
}
-EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
+
+#if IS_ENABLED(CONFIG_LOCKDEP)
+static int __init dma_resv_lockdep(void)
+{
+ struct mm_struct *mm = mm_alloc();
+ struct ww_acquire_ctx ctx;
+ struct dma_resv obj;
+ struct address_space mapping;
+ int ret;
+
+ if (!mm)
+ return -ENOMEM;
+
+ dma_resv_init(&obj);
+ address_space_init_once(&mapping);
+
+ mmap_read_lock(mm);
+ ww_acquire_init(&ctx, &reservation_ww_class);
+ ret = dma_resv_lock(&obj, &ctx);
+ if (ret == -EDEADLK)
+ dma_resv_lock_slow(&obj, &ctx);
+ fs_reclaim_acquire(GFP_KERNEL);
+ /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
+ i_mmap_lock_write(&mapping);
+ i_mmap_unlock_write(&mapping);
+#ifdef CONFIG_MMU_NOTIFIER
+ lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
+ __dma_fence_might_wait();
+ lock_map_release(&__mmu_notifier_invalidate_range_start_map);
+#else
+ __dma_fence_might_wait();
+#endif
+ fs_reclaim_release(GFP_KERNEL);
+ ww_mutex_unlock(&obj.lock);
+ ww_acquire_fini(&ctx);
+ mmap_read_unlock(mm);
+
+ mmput(mm);
+
+ return 0;
+}
+subsys_initcall(dma_resv_lockdep);
+#endif
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 806ca02c52d7..62026607f3f8 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -418,8 +418,23 @@ static int __init hidma_mgmt_init(void)
hidma_mgmt_of_populate_channels(child);
}
#endif
- return platform_driver_register(&hidma_mgmt_driver);
+ /*
+ * We do not check for return value here, as it is assumed that
+ * platform_driver_register must not fail. The reason for this is that
+ * the (potential) hidma_mgmt_of_populate_channels calls above are not
+ * cleaned up if it does fail, and to do this work is quite
+ * complicated. In particular, various calls of of_address_to_resource,
+ * of_irq_to_resource, platform_device_register_full, of_dma_configure,
+ * and of_msi_configure which then call other functions and so on, must
+ * be cleaned up - this is not a trivial exercise.
+ *
+ * Currently, this module is not intended to be unloaded, and there is
+ * no module_exit function defined which does the needed cleanup. For
+ * this reason, we have to assume success here.
+ */
+ platform_driver_register(&hidma_mgmt_driver);
+ return 0;
}
module_init(hidma_mgmt_init);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_scmi/notify.h b/drivers/firmware/arm_scmi/notify.h
index ce0324be6c71..4e9b627edfef 100644
--- a/drivers/firmware/arm_scmi/notify.h
+++ b/drivers/firmware/arm_scmi/notify.h
@@ -79,8 +79,6 @@ struct scmi_protocol_events {
int scmi_notification_init(struct scmi_handle *handle);
void scmi_notification_exit(struct scmi_handle *handle);
-
-struct scmi_protocol_handle;
int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
const struct scmi_protocol_handle *ph,
const struct scmi_protocol_events *ee);
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index d0dee37ad522..4ceba5ef7895 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -552,8 +552,10 @@ static unsigned long scpi_clk_get_val(u16 clk_id)
ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
sizeof(le_clk_id), &rate, sizeof(rate));
+ if (ret)
+ return 0;
- return ret ? ret : le32_to_cpu(rate);
+ return le32_to_cpu(rate);
}
static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c
index a4d3239d2594..4ab3fcd9b9ba 100644
--- a/drivers/gpio/gpio-cadence.c
+++ b/drivers/gpio/gpio-cadence.c
@@ -278,6 +278,7 @@ static const struct of_device_id cdns_of_ids[] = {
{ .compatible = "cdns,gpio-r1p02" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, cdns_of_ids);
static struct platform_driver cdns_gpio_driver = {
.driver = {
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 1bd9e44df718..05974b760796 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -444,16 +444,6 @@ static int tegra186_irq_set_wake(struct irq_data *data, unsigned int on)
return 0;
}
-static int tegra186_irq_set_affinity(struct irq_data *data,
- const struct cpumask *dest,
- bool force)
-{
- if (data->parent_data)
- return irq_chip_set_affinity_parent(data, dest, force);
-
- return -EINVAL;
-}
-
static void tegra186_gpio_irq(struct irq_desc *desc)
{
struct tegra_gpio *gpio = irq_desc_get_handler_data(desc);
@@ -700,7 +690,6 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->intc.irq_unmask = tegra186_irq_unmask;
gpio->intc.irq_set_type = tegra186_irq_set_type;
gpio->intc.irq_set_wake = tegra186_irq_set_wake;
- gpio->intc.irq_set_affinity = tegra186_irq_set_affinity;
irq = &gpio->gpio.irq;
irq->chip = &gpio->intc;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index b411d3156e0b..136557e7dd3c 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -542,7 +542,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
}
/**
- * xgpio_of_probe - Probe method for the GPIO device.
+ * xgpio_probe - Probe method for the GPIO device.
* @pdev: pointer to the platform device
*
* Return:
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index d3a9ca4b1cec..7ff89690a976 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -98,9 +98,8 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM
+ depends on FB
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
- select FB
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -380,6 +379,19 @@ source "drivers/gpu/drm/xlnx/Kconfig"
source "drivers/gpu/drm/gud/Kconfig"
+config DRM_HYPERV
+ tristate "DRM Support for Hyper-V synthetic video device"
+ depends on DRM && PCI && MMU && HYPERV
+ select DRM_KMS_HELPER
+ select DRM_GEM_SHMEM_HELPER
+ help
+ This is a KMS driver for Hyper-V synthetic video device. Choose this
+ option if you would like to enable drm driver for Hyper-V virtual
+ machine. Unselect Hyper-V framebuffer driver (CONFIG_FB_HYPERV) so
+ that DRM driver is used by default.
+
+ If M is selected the module will be called hyperv_drm.
+
# Keep legacy drivers last
menuconfig DRM_LEGACY
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index a91cc7684904..a118692a6df7 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -126,3 +126,4 @@ obj-$(CONFIG_DRM_MCDE) += mcde/
obj-$(CONFIG_DRM_TIDSS) += tidss/
obj-y += xlnx/
obj-y += gud/
+obj-$(CONFIG_DRM_HYPERV) += hyperv/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 6331a11299d0..6cf0fe871d6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -51,9 +51,10 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
amdgpu_dma_buf.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
- amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
- amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
- amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
+ amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \
+ amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \
+ amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o amdgpu_mmhub.o \
+ amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu_hdp.o
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index 65b1dca4b02e..148f6c3343ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -227,7 +227,7 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
break;
default:
break;
- };
+ }
}
/* Reinit NBIF block */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ad5f508924b8..267e9e147a6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -130,6 +130,13 @@ struct amdgpu_mgpu_info
bool pending_reset;
};
+enum amdgpu_ss {
+ AMDGPU_SS_DRV_LOAD,
+ AMDGPU_SS_DEV_D0,
+ AMDGPU_SS_DEV_D3,
+ AMDGPU_SS_DRV_UNLOAD
+};
+
struct amdgpu_watchdog_timer
{
bool timeout_fatal_disable;
@@ -268,7 +275,6 @@ struct amdgpu_job;
struct amdgpu_irq_src;
struct amdgpu_fpriv;
struct amdgpu_bo_va_mapping;
-struct amdgpu_atif;
struct kfd_vm_fault_info;
struct amdgpu_hive_info;
struct amdgpu_reset_context;
@@ -683,20 +689,6 @@ struct amdgpu_vram_scratch {
};
/*
- * ACPI
- */
-struct amdgpu_atcs_functions {
- bool get_ext_state;
- bool pcie_perf_req;
- bool pcie_dev_rdy;
- bool pcie_bus_width;
-};
-
-struct amdgpu_atcs {
- struct amdgpu_atcs_functions functions;
-};
-
-/*
* CGS
*/
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
@@ -825,8 +817,6 @@ struct amdgpu_device {
struct notifier_block acpi_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
struct debugfs_blob_wrapper debugfs_vbios_blob;
- struct amdgpu_atif *atif;
- struct amdgpu_atcs atcs;
struct mutex srbm_mutex;
/* GRBM index mutex. Protects concurrent access to GRBM index */
struct mutex grbm_idx_mutex;
@@ -1078,7 +1068,7 @@ struct amdgpu_device {
uint32_t ras_hw_enabled;
uint32_t ras_enabled;
- bool in_pci_err_recovery;
+ bool no_hw_access;
struct pci_saved_state *pci_state;
struct amdgpu_reset_control *reset_cntl;
@@ -1101,7 +1091,9 @@ static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
int amdgpu_device_init(struct amdgpu_device *adev,
uint32_t flags);
-void amdgpu_device_fini(struct amdgpu_device *adev);
+void amdgpu_device_fini_hw(struct amdgpu_device *adev);
+void amdgpu_device_fini_sw(struct amdgpu_device *adev);
+
int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
@@ -1144,6 +1136,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
* Registers read & write functions.
*/
#define AMDGPU_REGS_NO_KIQ (1<<1)
+#define AMDGPU_REGS_RLC (1<<2)
#define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
#define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
@@ -1280,6 +1273,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
bool amdgpu_device_supports_atpx(struct drm_device *dev);
bool amdgpu_device_supports_px(struct drm_device *dev);
bool amdgpu_device_supports_boco(struct drm_device *dev);
+bool amdgpu_device_supports_smart_shift(struct drm_device *dev);
bool amdgpu_device_supports_baco(struct drm_device *dev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
@@ -1321,6 +1315,8 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev);
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
+void amdgpu_driver_release_kms(struct drm_device *dev);
+
int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
@@ -1352,21 +1348,38 @@ struct amdgpu_afmt_acr {
struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
/* amdgpu_acpi.c */
+
+/* ATCS Device/Driver State */
+#define AMDGPU_ATCS_PSC_DEV_STATE_D0 0
+#define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT 3
+#define AMDGPU_ATCS_PSC_DRV_STATE_OPR 0
+#define AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR 1
+
#if defined(CONFIG_ACPI)
int amdgpu_acpi_init(struct amdgpu_device *adev);
void amdgpu_acpi_fini(struct amdgpu_device *adev);
bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
+bool amdgpu_acpi_is_power_shift_control_supported(void);
int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise);
+int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
+ u8 dev_state, bool drv_state);
+int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state);
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
-void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
- struct amdgpu_dm_backlight_caps *caps);
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev);
+void amdgpu_acpi_detect(void);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
static inline bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) { return false; }
+static inline void amdgpu_acpi_detect(void) { }
+static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
+static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
+ u8 dev_state, bool drv_state) { return 0; }
+static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
+ enum amdgpu_ss ss_state) { return 0; }
#endif
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index bf2939b6eb43..84a1b4bc9bb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -71,12 +71,31 @@ struct amdgpu_atif {
struct amdgpu_dm_backlight_caps backlight_caps;
};
+struct amdgpu_atcs_functions {
+ bool get_ext_state;
+ bool pcie_perf_req;
+ bool pcie_dev_rdy;
+ bool pcie_bus_width;
+ bool power_shift_control;
+};
+
+struct amdgpu_atcs {
+ acpi_handle handle;
+
+ struct amdgpu_atcs_functions functions;
+};
+
+static struct amdgpu_acpi_priv {
+ struct amdgpu_atif atif;
+ struct amdgpu_atcs atcs;
+} amdgpu_acpi_priv;
+
/* Call the ATIF method
*/
/**
* amdgpu_atif_call - call an ATIF method
*
- * @atif: acpi handle
+ * @atif: atif structure
* @function: the ATIF function to execute
* @params: ATIF function params
*
@@ -207,35 +226,6 @@ out:
return err;
}
-static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle)
-{
- acpi_handle handle = NULL;
- char acpi_method_name[255] = { 0 };
- struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
- acpi_status status;
-
- /* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only
- * systems, ATIF is in the dGPU's namespace.
- */
- status = acpi_get_handle(dhandle, "ATIF", &handle);
- if (ACPI_SUCCESS(status))
- goto out;
-
- if (amdgpu_has_atpx()) {
- status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF",
- &handle);
- if (ACPI_SUCCESS(status))
- goto out;
- }
-
- DRM_DEBUG_DRIVER("No ATIF handle found\n");
- return NULL;
-out:
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
- DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
- return handle;
-}
-
/**
* amdgpu_atif_get_notification_params - determine notify configuration
*
@@ -414,7 +404,7 @@ out:
static int amdgpu_atif_handler(struct amdgpu_device *adev,
struct acpi_bus_event *event)
{
- struct amdgpu_atif *atif = adev->atif;
+ struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
int count;
DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
@@ -424,8 +414,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
return NOTIFY_DONE;
/* Is this actually our event? */
- if (!atif ||
- !atif->notification_cfg.enabled ||
+ if (!atif->notification_cfg.enabled ||
event->type != atif->notification_cfg.command_code) {
/* These events will generate keypresses otherwise */
if (event->type == ACPI_VIDEO_NOTIFY_PROBE)
@@ -485,14 +474,15 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
/**
* amdgpu_atcs_call - call an ATCS method
*
- * @handle: acpi handle
+ * @atcs: atcs structure
* @function: the ATCS function to execute
* @params: ATCS function params
*
* Executes the requested ATCS function (all asics).
* Returns a pointer to the acpi output buffer.
*/
-static union acpi_object *amdgpu_atcs_call(acpi_handle handle, int function,
+static union acpi_object *amdgpu_atcs_call(struct amdgpu_atcs *atcs,
+ int function,
struct acpi_buffer *params)
{
acpi_status status;
@@ -516,7 +506,7 @@ static union acpi_object *amdgpu_atcs_call(acpi_handle handle, int function,
atcs_arg_elements[1].integer.value = 0;
}
- status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer);
+ status = acpi_evaluate_object(atcs->handle, NULL, &atcs_arg, &buffer);
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -545,12 +535,12 @@ static void amdgpu_atcs_parse_functions(struct amdgpu_atcs_functions *f, u32 mas
f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED;
f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED;
f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED;
+ f->power_shift_control = mask & ATCS_SET_POWER_SHIFT_CONTROL_SUPPORTED;
}
/**
* amdgpu_atcs_verify_interface - verify ATCS
*
- * @handle: acpi handle
* @atcs: amdgpu atcs struct
*
* Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function
@@ -558,15 +548,14 @@ static void amdgpu_atcs_parse_functions(struct amdgpu_atcs_functions *f, u32 mas
* (all asics).
* returns 0 on success, error on failure.
*/
-static int amdgpu_atcs_verify_interface(acpi_handle handle,
- struct amdgpu_atcs *atcs)
+static int amdgpu_atcs_verify_interface(struct amdgpu_atcs *atcs)
{
union acpi_object *info;
struct atcs_verify_interface output;
size_t size;
int err = 0;
- info = amdgpu_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
+ info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
if (!info)
return -EIO;
@@ -603,7 +592,7 @@ out:
*/
bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev)
{
- struct amdgpu_atcs *atcs = &adev->atcs;
+ struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
if (atcs->functions.pcie_perf_req && atcs->functions.pcie_dev_rdy)
return true;
@@ -612,6 +601,18 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade
}
/**
+ * amdgpu_acpi_is_power_shift_control_supported
+ *
+ * Check if the ATCS power shift control method
+ * is supported.
+ * returns true if supported, false if not.
+ */
+bool amdgpu_acpi_is_power_shift_control_supported(void)
+{
+ return amdgpu_acpi_priv.atcs.functions.power_shift_control;
+}
+
+/**
* amdgpu_acpi_pcie_notify_device_ready
*
* @adev: amdgpu_device pointer
@@ -622,19 +623,13 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade
*/
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev)
{
- acpi_handle handle;
union acpi_object *info;
- struct amdgpu_atcs *atcs = &adev->atcs;
-
- /* Get the device handle */
- handle = ACPI_HANDLE(&adev->pdev->dev);
- if (!handle)
- return -EINVAL;
+ struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
if (!atcs->functions.pcie_dev_rdy)
return -EINVAL;
- info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL);
+ info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL);
if (!info)
return -EIO;
@@ -657,9 +652,8 @@ int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev)
int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise)
{
- acpi_handle handle;
union acpi_object *info;
- struct amdgpu_atcs *atcs = &adev->atcs;
+ struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
struct atcs_pref_req_input atcs_input;
struct atcs_pref_req_output atcs_output;
struct acpi_buffer params;
@@ -669,11 +663,6 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
if (amdgpu_acpi_pcie_notify_device_ready(adev))
return -EINVAL;
- /* Get the device handle */
- handle = ACPI_HANDLE(&adev->pdev->dev);
- if (!handle)
- return -EINVAL;
-
if (!atcs->functions.pcie_perf_req)
return -EINVAL;
@@ -691,7 +680,7 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
params.pointer = &atcs_input;
while (retry--) {
- info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, &params);
+ info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, &params);
if (!info)
return -EIO;
@@ -725,6 +714,96 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
}
/**
+ * amdgpu_acpi_power_shift_control
+ *
+ * @adev: amdgpu_device pointer
+ * @dev_state: device acpi state
+ * @drv_state: driver state
+ *
+ * Executes the POWER_SHIFT_CONTROL method to
+ * communicate current dGPU device state and
+ * driver state to APU/SBIOS.
+ * returns 0 on success, error on failure.
+ */
+int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
+ u8 dev_state, bool drv_state)
+{
+ union acpi_object *info;
+ struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
+ struct atcs_pwr_shift_input atcs_input;
+ struct acpi_buffer params;
+
+ if (!amdgpu_acpi_is_power_shift_control_supported())
+ return -EINVAL;
+
+ atcs_input.size = sizeof(struct atcs_pwr_shift_input);
+ /* dGPU id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
+ atcs_input.dgpu_id = adev->pdev->devfn | (adev->pdev->bus->number << 8);
+ atcs_input.dev_acpi_state = dev_state;
+ atcs_input.drv_state = drv_state;
+
+ params.length = sizeof(struct atcs_pwr_shift_input);
+ params.pointer = &atcs_input;
+
+ info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_POWER_SHIFT_CONTROL, &params);
+ if (!info) {
+ DRM_ERROR("ATCS PSC update failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_acpi_smart_shift_update - update dGPU device state to SBIOS
+ *
+ * @dev: drm_device pointer
+ * @ss_state: current smart shift event
+ *
+ * returns 0 on success,
+ * otherwise return error number.
+ */
+int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int r;
+
+ if (!amdgpu_device_supports_smart_shift(dev))
+ return 0;
+
+ switch (ss_state) {
+ /* SBIOS trigger “stop”, “enable” and “start” at D0, Driver Operational.
+ * SBIOS trigger “stop” at D3, Driver Not Operational.
+ * SBIOS trigger “stop” and “disable” at D0, Driver NOT operational.
+ */
+ case AMDGPU_SS_DRV_LOAD:
+ r = amdgpu_acpi_power_shift_control(adev,
+ AMDGPU_ATCS_PSC_DEV_STATE_D0,
+ AMDGPU_ATCS_PSC_DRV_STATE_OPR);
+ break;
+ case AMDGPU_SS_DEV_D0:
+ r = amdgpu_acpi_power_shift_control(adev,
+ AMDGPU_ATCS_PSC_DEV_STATE_D0,
+ AMDGPU_ATCS_PSC_DRV_STATE_OPR);
+ break;
+ case AMDGPU_SS_DEV_D3:
+ r = amdgpu_acpi_power_shift_control(adev,
+ AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT,
+ AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR);
+ break;
+ case AMDGPU_SS_DRV_UNLOAD:
+ r = amdgpu_acpi_power_shift_control(adev,
+ AMDGPU_ATCS_PSC_DEV_STATE_D0,
+ AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return r;
+}
+
+/**
* amdgpu_acpi_event - handle notify events
*
* @nb: notifier block
@@ -767,50 +846,15 @@ static int amdgpu_acpi_event(struct notifier_block *nb,
*/
int amdgpu_acpi_init(struct amdgpu_device *adev)
{
- acpi_handle handle, atif_handle;
- struct amdgpu_atif *atif;
- struct amdgpu_atcs *atcs = &adev->atcs;
- int ret;
-
- /* Get the device handle */
- handle = ACPI_HANDLE(&adev->pdev->dev);
-
- if (!adev->bios || !handle)
- return 0;
-
- /* Call the ATCS method */
- ret = amdgpu_atcs_verify_interface(handle, atcs);
- if (ret) {
- DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
- }
-
- /* Probe for ATIF, and initialize it if found */
- atif_handle = amdgpu_atif_probe_handle(handle);
- if (!atif_handle)
- goto out;
-
- atif = kzalloc(sizeof(*atif), GFP_KERNEL);
- if (!atif) {
- DRM_WARN("Not enough memory to initialize ATIF\n");
- goto out;
- }
- atif->handle = atif_handle;
-
- /* Call the ATIF method */
- ret = amdgpu_atif_verify_interface(atif);
- if (ret) {
- DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
- kfree(atif);
- goto out;
- }
- adev->atif = atif;
+ struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
if (atif->notifications.brightness_change) {
if (amdgpu_device_has_dc_support(adev)) {
#if defined(CONFIG_DRM_AMD_DC)
struct amdgpu_display_manager *dm = &adev->dm;
- atif->bd = dm->backlight_dev;
+ if (dm->backlight_dev)
+ atif->bd = dm->backlight_dev;
#endif
} else {
struct drm_encoder *tmp;
@@ -832,6 +876,129 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
}
}
#endif
+ adev->acpi_nb.notifier_call = amdgpu_acpi_event;
+ register_acpi_notifier(&adev->acpi_nb);
+
+ return 0;
+}
+
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps)
+{
+ struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
+
+ caps->caps_valid = atif->backlight_caps.caps_valid;
+ caps->min_input_signal = atif->backlight_caps.min_input_signal;
+ caps->max_input_signal = atif->backlight_caps.max_input_signal;
+}
+
+/**
+ * amdgpu_acpi_fini - tear down driver acpi support
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Unregisters with the acpi notifier chain (all asics).
+ */
+void amdgpu_acpi_fini(struct amdgpu_device *adev)
+{
+ unregister_acpi_notifier(&adev->acpi_nb);
+}
+
+/**
+ * amdgpu_atif_pci_probe_handle - look up the ATIF handle
+ *
+ * @pdev: pci device
+ *
+ * Look up the ATIF handles (all asics).
+ * Returns true if the handle is found, false if not.
+ */
+static bool amdgpu_atif_pci_probe_handle(struct pci_dev *pdev)
+{
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+ acpi_handle dhandle, atif_handle;
+ acpi_status status;
+ int ret;
+
+ dhandle = ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
+
+ status = acpi_get_handle(dhandle, "ATIF", &atif_handle);
+ if (ACPI_FAILURE(status)) {
+ return false;
+ }
+ amdgpu_acpi_priv.atif.handle = atif_handle;
+ acpi_get_name(amdgpu_acpi_priv.atif.handle, ACPI_FULL_PATHNAME, &buffer);
+ DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
+ ret = amdgpu_atif_verify_interface(&amdgpu_acpi_priv.atif);
+ if (ret) {
+ amdgpu_acpi_priv.atif.handle = 0;
+ return false;
+ }
+ return true;
+}
+
+/**
+ * amdgpu_atcs_pci_probe_handle - look up the ATCS handle
+ *
+ * @pdev: pci device
+ *
+ * Look up the ATCS handles (all asics).
+ * Returns true if the handle is found, false if not.
+ */
+static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
+{
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
+ acpi_handle dhandle, atcs_handle;
+ acpi_status status;
+ int ret;
+
+ dhandle = ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
+
+ status = acpi_get_handle(dhandle, "ATCS", &atcs_handle);
+ if (ACPI_FAILURE(status)) {
+ return false;
+ }
+ amdgpu_acpi_priv.atcs.handle = atcs_handle;
+ acpi_get_name(amdgpu_acpi_priv.atcs.handle, ACPI_FULL_PATHNAME, &buffer);
+ DRM_DEBUG_DRIVER("Found ATCS handle %s\n", acpi_method_name);
+ ret = amdgpu_atcs_verify_interface(&amdgpu_acpi_priv.atcs);
+ if (ret) {
+ amdgpu_acpi_priv.atcs.handle = 0;
+ return false;
+ }
+ return true;
+}
+
+/*
+ * amdgpu_acpi_detect - detect ACPI ATIF/ATCS methods
+ *
+ * Check if we have the ATIF/ATCS methods and populate
+ * the structures in the driver.
+ */
+void amdgpu_acpi_detect(void)
+{
+ struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
+ struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
+ struct pci_dev *pdev = NULL;
+ int ret;
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ if (!atif->handle)
+ amdgpu_atif_pci_probe_handle(pdev);
+ if (!atcs->handle)
+ amdgpu_atcs_pci_probe_handle(pdev);
+ }
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+ if (!atif->handle)
+ amdgpu_atif_pci_probe_handle(pdev);
+ if (!atcs->handle)
+ amdgpu_atcs_pci_probe_handle(pdev);
+ }
if (atif->functions.sbios_requests && !atif->functions.system_params) {
/* XXX check this workraround, if sbios request function is
@@ -861,37 +1028,6 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
} else {
atif->backlight_caps.caps_valid = false;
}
-
-out:
- adev->acpi_nb.notifier_call = amdgpu_acpi_event;
- register_acpi_notifier(&adev->acpi_nb);
-
- return ret;
-}
-
-void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
- struct amdgpu_dm_backlight_caps *caps)
-{
- if (!adev->atif) {
- caps->caps_valid = false;
- return;
- }
- caps->caps_valid = adev->atif->backlight_caps.caps_valid;
- caps->min_input_signal = adev->atif->backlight_caps.min_input_signal;
- caps->max_input_signal = adev->atif->backlight_caps.max_input_signal;
-}
-
-/**
- * amdgpu_acpi_fini - tear down driver acpi support
- *
- * @adev: amdgpu_device pointer
- *
- * Unregisters with the acpi notifier chain (all asics).
- */
-void amdgpu_acpi_fini(struct amdgpu_device *adev)
-{
- unregister_acpi_notifier(&adev->acpi_nb);
- kfree(adev->atif);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index bfab2f9fdd17..f9c01bdc3d4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -170,7 +170,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
}
}
-void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
+void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev)
{
if (adev->kfd.dev) {
kgd2kfd_device_exit(adev->kfd.dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 81264517d532..cf62f43a03da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -142,7 +142,7 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry);
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
-void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev);
+void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev);
int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 62aa1a6f64ed..491acdf92f73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -96,8 +96,8 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
lock_srbm(kgd, 0, 0, 0, vmid);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
+ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
+ WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
/* APE1 no longer exists on GFX9 */
unlock_srbm(kgd);
@@ -161,7 +161,7 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
lock_srbm(kgd, mec, pipe, 0, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
+ WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
@@ -239,13 +239,13 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
for (reg = hqd_base;
reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
- WREG32(reg, mqd_hqd[reg - hqd_base]);
+ WREG32_SOC15_IP(GC, reg, mqd_hqd[reg - hqd_base]);
/* Activate doorbell logic before triggering WPTR poll. */
data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, data);
if (wptr) {
/* Don't read wptr with get_user because the user
@@ -274,27 +274,27 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
lower_32_bits(guessed_wptr));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
upper_32_bits(guessed_wptr));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
lower_32_bits((uint64_t)wptr));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
upper_32_bits((uint64_t)wptr));
pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
(uint32_t)get_queue_mask(adev, pipe_id, queue_id));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
+ WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1,
(uint32_t)get_queue_mask(adev, pipe_id, queue_id));
}
/* Start the EOP fetcher */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
+ WREG32_SOC15(GC, 0, mmCP_HQD_EOP_RPTR,
REG_SET_FIELD(m->cp_hqd_eop_rptr,
CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
+ WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data);
release_queue(kgd);
@@ -365,7 +365,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
break; \
(*dump)[i][0] = (addr) << 2; \
- (*dump)[i++][1] = RREG32(addr); \
+ (*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
} while (0)
*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
@@ -497,13 +497,13 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t low, high;
acquire_queue(kgd, pipe_id, queue_id);
- act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
+ act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
high = upper_32_bits(queue_address >> 8);
- if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
- high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
+ if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) &&
+ high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
retval = true;
}
release_queue(kgd);
@@ -621,11 +621,11 @@ loop:
preempt_enable();
#endif
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
+ WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, type);
end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) {
- temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
+ temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
if (time_after(jiffies, end_jiffies)) {
@@ -716,8 +716,8 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd,
mutex_lock(&adev->grbm_idx_mutex);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
+ WREG32_SOC15(GC, 0, mmSQ_CMD, sq_cmd);
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
INSTANCE_BROADCAST_WRITES, 1);
@@ -726,7 +726,7 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd,
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
SE_BROADCAST_WRITES, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data);
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index fad3b91f74f5..d39cff4a1fe3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -156,16 +156,16 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
case 1:
- sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
case 2:
- sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0,
- mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA2_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
case 3:
- sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0,
- mmSDMA3_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA3_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
}
@@ -450,7 +450,7 @@ static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd,
engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
-#define HQD_N_REGS (19+6+7+10)
+#define HQD_N_REGS (19+6+7+12)
*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 928e8d57cd08..668a28b80a62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -247,7 +247,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
if (!ef)
return -EINVAL;
- old = dma_resv_get_list(resv);
+ old = dma_resv_shared_list(resv);
if (!old)
return 0;
@@ -621,14 +621,13 @@ kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem,
ret = amdgpu_gem_object_create(adev, bo_size, 1,
AMDGPU_GEM_DOMAIN_CPU,
- 0, ttm_bo_type_sg,
- mem->bo->tbo.base.resv,
+ AMDGPU_GEM_CREATE_PREEMPTIBLE,
+ ttm_bo_type_sg, mem->bo->tbo.base.resv,
&gobj);
+ amdgpu_bo_unreserve(mem->bo);
if (ret)
return ret;
- amdgpu_bo_unreserve(mem->bo);
-
*bo = gem_to_amdgpu_bo(gobj);
(*bo)->parent = amdgpu_bo_ref(mem->bo);
@@ -640,14 +639,16 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
struct amdgpu_bo **bo)
{
struct drm_gem_object *gobj;
+ int ret;
if (!mem->dmabuf) {
mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
DRM_RDWR : 0);
if (IS_ERR(mem->dmabuf)) {
+ ret = PTR_ERR(mem->dmabuf);
mem->dmabuf = NULL;
- return PTR_ERR(mem->dmabuf);
+ return ret;
}
}
@@ -662,6 +663,7 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
dma_buf_put(mem->dmabuf);
*bo = gem_to_amdgpu_bo(gobj);
+ (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
(*bo)->parent = amdgpu_bo_ref(mem->bo);
return 0;
@@ -1410,7 +1412,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
- alloc_flags = 0;
+ alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
if (!offset || !*offset)
return -EINVAL;
user_addr = untagged_addr(*offset);
@@ -1666,7 +1668,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
* the next restore worker
*/
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
- bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+ bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
is_invalid_userptr = true;
ret = vm_validate_pt_pd_bos(avm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 18bd1b49ced6..96b7bb13a2dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1768,6 +1768,15 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
NULL);
+static struct attribute *amdgpu_vbios_version_attrs[] = {
+ &dev_attr_vbios_version.attr,
+ NULL
+};
+
+const struct attribute_group amdgpu_vbios_version_attr_group = {
+ .attrs = amdgpu_vbios_version_attrs
+};
+
/**
* amdgpu_atombios_fini - free the driver info and callbacks for atombios
*
@@ -1787,7 +1796,6 @@ void amdgpu_atombios_fini(struct amdgpu_device *adev)
adev->mode_info.atom_context = NULL;
kfree(adev->mode_info.atom_card_info);
adev->mode_info.atom_card_info = NULL;
- device_remove_file(adev->dev, &dev_attr_vbios_version);
}
/**
@@ -1804,7 +1812,6 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
{
struct card_info *atom_card_info =
kzalloc(sizeof(struct card_info), GFP_KERNEL);
- int ret;
if (!atom_card_info)
return -ENOMEM;
@@ -1836,12 +1843,6 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
amdgpu_atombios_allocate_fb_scratch(adev);
}
- ret = device_create_file(adev->dev, &dev_attr_vbios_version);
- if (ret) {
- DRM_ERROR("Failed to create device file for VBIOS version\n");
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 90136f9dedd6..f6a8f0c5a52f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -396,10 +396,10 @@ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
spin_unlock(&adev->mm_stats.lock);
}
-static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
- struct amdgpu_bo *bo)
+static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct amdgpu_cs_parser *p = param;
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
@@ -451,21 +451,6 @@ retry:
return r;
}
-static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
-{
- struct amdgpu_cs_parser *p = param;
- int r;
-
- r = amdgpu_cs_bo_validate(p, bo);
- if (r)
- return r;
-
- if (bo->shadow)
- r = amdgpu_cs_bo_validate(p, bo->shadow);
-
- return r;
-}
-
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct list_head *validated)
{
@@ -493,7 +478,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
lobj->user_pages);
}
- r = amdgpu_cs_validate(p, bo);
+ r = amdgpu_cs_bo_validate(p, bo);
if (r)
return r;
@@ -593,7 +578,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->bytes_moved_vis = 0;
r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
- amdgpu_cs_validate, p);
+ amdgpu_cs_bo_validate, p);
if (r) {
DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
goto error_validate;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index fc83445fbc40..e7a010b7ca1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -331,13 +331,15 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
return 0;
}
+#define AMDGPU_RAS_COUNTE_DELAY_MS 3000
+
static int amdgpu_ctx_query2(struct amdgpu_device *adev,
- struct amdgpu_fpriv *fpriv, uint32_t id,
- union drm_amdgpu_ctx_out *out)
+ struct amdgpu_fpriv *fpriv, uint32_t id,
+ union drm_amdgpu_ctx_out *out)
{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr;
- unsigned long ras_counter;
if (!fpriv)
return -EINVAL;
@@ -362,19 +364,28 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
if (atomic_read(&ctx->guilty))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
- /*query ue count*/
- ras_counter = amdgpu_ras_query_error_count(adev, false);
- /*ras counter is monotonic increasing*/
- if (ras_counter != ctx->ras_counter_ue) {
- out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
- ctx->ras_counter_ue = ras_counter;
- }
+ if (adev->ras_enabled && con) {
+ /* Return the cached values in O(1),
+ * and schedule delayed work to cache
+ * new vaues.
+ */
+ int ce_count, ue_count;
+
+ ce_count = atomic_read(&con->ras_ce_count);
+ ue_count = atomic_read(&con->ras_ue_count);
+
+ if (ce_count != ctx->ras_counter_ce) {
+ ctx->ras_counter_ce = ce_count;
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
+ }
+
+ if (ue_count != ctx->ras_counter_ue) {
+ ctx->ras_counter_ue = ue_count;
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
+ }
- /*query ce count*/
- ras_counter = amdgpu_ras_query_error_count(adev, true);
- if (ras_counter != ctx->ras_counter_ce) {
- out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
- ctx->ras_counter_ce = ras_counter;
+ schedule_delayed_work(&con->ras_counte_delay_work,
+ msecs_to_jiffies(AMDGPU_RAS_COUNTE_DELAY_MS));
}
mutex_unlock(&mgr->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index bcaf271b39bf..a9bbb0034e1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -990,7 +990,7 @@ err:
}
/**
- * amdgpu_debugfs_regs_gfxoff_write - Enable/disable GFXOFF
+ * amdgpu_debugfs_gfxoff_write - Enable/disable GFXOFF
*
* @f: open file handle
* @buf: User buffer to write data from
@@ -1041,7 +1041,7 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
/**
- * amdgpu_debugfs_regs_gfxoff_status - read gfxoff status
+ * amdgpu_debugfs_gfxoff_read - read gfxoff status
*
* @f: open file handle
* @buf: User buffer to store read data in
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b1eb005fe83e..736b8c121979 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -71,6 +71,8 @@
#include <drm/task_barrier.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_drv.h>
+
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -263,6 +265,21 @@ bool amdgpu_device_supports_baco(struct drm_device *dev)
return amdgpu_asic_supports_baco(adev);
}
+/**
+ * amdgpu_device_supports_smart_shift - Is the device dGPU with
+ * smart shift support
+ *
+ * @dev: drm_device pointer
+ *
+ * Returns true if the device is a dGPU with Smart Shift support,
+ * otherwise returns false.
+ */
+bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
+{
+ return (amdgpu_device_supports_boco(dev) &&
+ amdgpu_acpi_is_power_shift_control_supported());
+}
+
/*
* VRAM access helper functions
*/
@@ -282,7 +299,10 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
unsigned long flags;
uint32_t hi = ~0;
uint64_t last;
+ int idx;
+ if (!drm_dev_enter(&adev->ddev, &idx))
+ return;
#ifdef CONFIG_64BIT
last = min(pos + size, adev->gmc.visible_vram_size);
@@ -301,7 +321,7 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
}
if (count == size)
- return;
+ goto exit;
pos += count;
buf += count / 4;
@@ -324,6 +344,11 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
*buf++ = RREG32_NO_KIQ(mmMM_DATA);
}
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+
+#ifdef CONFIG_64BIT
+exit:
+#endif
+ drm_dev_exit(idx);
}
/*
@@ -333,7 +358,7 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
/* Check if hw access should be skipped because of hotplug or device error */
bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
{
- if (adev->in_pci_err_recovery)
+ if (adev->no_hw_access)
return true;
#ifdef CONFIG_LOCKDEP
@@ -491,7 +516,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
adev->gfx.rlc.funcs &&
adev->gfx.rlc.funcs->is_rlcg_access_range) {
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
- return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0);
+ return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0, 0);
} else {
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
}
@@ -2574,34 +2599,26 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
return 0;
}
-/**
- * amdgpu_device_ip_fini - run fini for hardware IPs
- *
- * @adev: amdgpu_device pointer
- *
- * Main teardown pass for hardware IPs. The list of all the hardware
- * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
- * are run. hw_fini tears down the hardware associated with each IP
- * and sw_fini tears down any software state associated with each IP.
- * Returns 0 on success, negative error code on failure.
- */
-static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
+static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
{
int i, r;
- if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
- amdgpu_virt_release_ras_err_handler_data(adev);
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].version->funcs->early_fini)
+ continue;
- amdgpu_ras_pre_fini(adev);
+ r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
+ if (r) {
+ DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ }
+ }
- if (adev->gmc.xgmi.num_physical_nodes > 1)
- amdgpu_xgmi_remove_device(adev);
+ amdgpu_amdkfd_suspend(adev, false);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
- amdgpu_amdkfd_device_fini(adev);
-
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.hw)
@@ -2632,6 +2649,33 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false;
}
+ return 0;
+}
+
+/**
+ * amdgpu_device_ip_fini - run fini for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main teardown pass for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
+ * are run. hw_fini tears down the hardware associated with each IP
+ * and sw_fini tears down any software state associated with each IP.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
+ amdgpu_virt_release_ras_err_handler_data(adev);
+
+ amdgpu_ras_pre_fini(adev);
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ amdgpu_xgmi_remove_device(adev);
+
+ amdgpu_amdkfd_device_fini_sw(adev);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.sw)
@@ -3122,7 +3166,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
*/
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
+ if (amdgpu_sriov_vf(adev) ||
+ adev->enable_virtual_display ||
+ (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
return false;
return amdgpu_device_asic_has_dc_support(adev->asic_type);
@@ -3251,7 +3297,6 @@ static const struct attribute *amdgpu_dev_attributes[] = {
NULL
};
-
/**
* amdgpu_device_init - initialize the driver
*
@@ -3653,6 +3698,27 @@ failed_unmap:
return r;
}
+static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
+{
+ /* Clear all CPU mappings pointing to this device */
+ unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
+
+ /* Unmap all mapped bars - Doorbell, registers and VRAM */
+ amdgpu_device_doorbell_fini(adev);
+
+ iounmap(adev->rmmio);
+ adev->rmmio = NULL;
+ if (adev->mman.aper_base_kaddr)
+ iounmap(adev->mman.aper_base_kaddr);
+ adev->mman.aper_base_kaddr = NULL;
+
+ /* Memory manager related */
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ arch_phys_wc_del(adev->gmc.vram_mtrr);
+ arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
+ }
+}
+
/**
* amdgpu_device_fini - tear down the driver
*
@@ -3661,15 +3727,13 @@ failed_unmap:
* Tear down the driver info (all asics).
* Called at driver shutdown.
*/
-void amdgpu_device_fini(struct amdgpu_device *adev)
+void amdgpu_device_fini_hw(struct amdgpu_device *adev)
{
dev_info(adev->dev, "amdgpu: finishing device.\n");
flush_delayed_work(&adev->delayed_init_work);
ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
adev->shutdown = true;
- kfree(adev->pci_state);
-
/* make sure IB test finished before entering exclusive mode
* to avoid preemption on IB test
* */
@@ -3686,11 +3750,29 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
else
drm_atomic_helper_shutdown(adev_to_drm(adev));
}
- amdgpu_fence_driver_fini(adev);
+ amdgpu_fence_driver_fini_hw(adev);
+
if (adev->pm_sysfs_en)
amdgpu_pm_sysfs_fini(adev);
+ if (adev->ucode_sysfs_en)
+ amdgpu_ucode_sysfs_fini(adev);
+ sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
+
amdgpu_fbdev_fini(adev);
+
+ amdgpu_irq_fini_hw(adev);
+
+ amdgpu_device_ip_fini_early(adev);
+
+ amdgpu_gart_dummy_page_fini(adev);
+
+ amdgpu_device_unmap_mmio(adev);
+}
+
+void amdgpu_device_fini_sw(struct amdgpu_device *adev)
+{
amdgpu_device_ip_fini(adev);
+ amdgpu_fence_driver_fini_sw(adev);
release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL;
adev->accel_working = false;
@@ -3712,18 +3794,14 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
}
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, NULL, NULL, NULL);
- iounmap(adev->rmmio);
- adev->rmmio = NULL;
- amdgpu_device_doorbell_fini(adev);
-
- if (adev->ucode_sysfs_en)
- amdgpu_ucode_sysfs_fini(adev);
- sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
if (adev->mman.discovery_bin)
amdgpu_discovery_fini(adev);
+
+ kfree(adev->pci_state);
+
}
@@ -3748,6 +3826,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
return 0;
adev->in_suspend = true;
+
+ if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
+ DRM_WARN("smart shift update failed\n");
+
drm_kms_helper_poll_disable(dev);
if (fbcon)
@@ -3857,6 +3939,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
#endif
adev->in_suspend = false;
+ if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
+ DRM_WARN("smart shift update failed\n");
+
return 0;
}
@@ -4042,9 +4127,9 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
/* No need to recover an evicted BO */
- if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
- shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
- shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
+ if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
+ shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
+ shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
continue;
r = amdgpu_bo_restore_shadow(shadow, &next);
@@ -4633,7 +4718,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
return 0;
}
-void amdgpu_device_recheck_guilty_jobs(
+static void amdgpu_device_recheck_guilty_jobs(
struct amdgpu_device *adev, struct list_head *device_list_handle,
struct amdgpu_reset_context *reset_context)
{
@@ -4936,6 +5021,8 @@ skip_hw_reset:
amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
+ if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
+ DRM_WARN("smart shift update failed\n");
}
}
@@ -5291,9 +5378,9 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
- adev->in_pci_err_recovery = true;
+ adev->no_hw_access = true;
r = amdgpu_device_pre_asic_reset(adev, &reset_context);
- adev->in_pci_err_recovery = false;
+ adev->no_hw_access = false;
if (r)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 8a1fb8b6606e..ac7b37dfff5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -203,9 +203,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto unpin;
}
- r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
- &work->shared_count,
- &work->shared);
+ r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl,
+ &work->shared_count, &work->shared);
if (unlikely(r != 0)) {
DRM_ERROR("failed to get fences for buffer\n");
goto unpin;
@@ -1077,12 +1076,9 @@ int amdgpu_display_gem_fb_verify_and_init(
/* Verify that the modifier is supported. */
if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
- struct drm_format_name_buf format_name;
drm_dbg_kms(dev,
- "unsupported pixel format %s / modifier 0x%llx\n",
- drm_get_format_name(mode_cmd->pixel_format,
- &format_name),
- mode_cmd->modifier[0]);
+ "unsupported pixel format %p4cc / modifier 0x%llx\n",
+ &mode_cmd->pixel_format, mode_cmd->modifier[0]);
ret = -EINVAL;
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index baa980a477d9..c3053b83b80c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -42,52 +42,6 @@
#include <linux/pci-p2pdma.h>
#include <linux/pm_runtime.h>
-/**
- * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
- * @obj: GEM BO
- * @vma: Virtual memory area
- *
- * Sets up a userspace mapping of the BO's memory in the given
- * virtual memory area.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- unsigned asize = amdgpu_bo_size(bo);
- int ret;
-
- if (!vma->vm_file)
- return -ENODEV;
-
- if (adev == NULL)
- return -ENODEV;
-
- /* Check for valid size. */
- if (asize < vma->vm_end - vma->vm_start)
- return -EINVAL;
-
- if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
- (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
- return -EPERM;
- }
- vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
-
- /* prime mmap does not need to check access, so allow here */
- ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
- if (ret)
- return ret;
-
- ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
- drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
-
- return ret;
-}
-
static int
__dma_resv_make_exclusive(struct dma_resv *obj)
{
@@ -95,10 +49,10 @@ __dma_resv_make_exclusive(struct dma_resv *obj)
unsigned int count;
int r;
- if (!dma_resv_get_list(obj)) /* no shared fences to convert */
+ if (!dma_resv_shared_list(obj)) /* no shared fences to convert */
return 0;
- r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
+ r = dma_resv_get_fences(obj, NULL, &count, &fences);
if (r)
return r;
@@ -272,12 +226,12 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
if (r)
return ERR_PTR(r);
- } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
+ } else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) &
AMDGPU_GEM_DOMAIN_GTT)) {
return ERR_PTR(-EBUSY);
}
- switch (bo->tbo.mem.mem_type) {
+ switch (bo->tbo.resource->mem_type) {
case TTM_PL_TT:
sgt = drm_prime_pages_to_sg(obj->dev,
bo->tbo.ttm->pages,
@@ -291,8 +245,9 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
break;
case TTM_PL_VRAM:
- r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, 0,
- bo->tbo.base.size, attach->dev, dir, &sgt);
+ r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
+ bo->tbo.base.size, attach->dev,
+ dir, &sgt);
if (r)
return ERR_PTR(r);
break;
@@ -482,7 +437,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
struct amdgpu_vm_bo_base *bo_base;
int r;
- if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+ if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
return;
r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
index 39b5b9616fd8..3e93b9b407a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
@@ -31,8 +31,6 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
struct amdgpu_bo *bo);
-int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
extern const struct dma_buf_ops amdgpu_dmabuf_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 1ed9748b9bc7..809aa7641d7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -95,9 +95,10 @@
* - 3.39.0 - DMABUF implicit sync does a full pipeline sync
* - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ
* - 3.41.0 - Add video codec query
+ * - 3.42.0 - Add 16bpc fixed point display support
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 41
+#define KMS_DRIVER_MINOR 42
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit;
@@ -1313,14 +1314,16 @@ amdgpu_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
-#ifdef MODULE
- if (THIS_MODULE->state != MODULE_STATE_GOING)
-#endif
- DRM_ERROR("Hotplug removal is not supported\n");
drm_dev_unplug(dev);
amdgpu_driver_unload_kms(dev);
+
+ /*
+ * Flush any in flight DMA operations from device.
+ * Clear the Bus Master Enable bit and then wait on the PCIe Device
+ * StatusTransactions Pending bit.
+ */
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
+ pci_wait_for_pending_transaction(pdev);
}
static void
@@ -1555,6 +1558,10 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
if (!adev->runpm)
return -EINVAL;
+ /* Avoids registers access if device is physically gone */
+ if (!pci_device_is_present(adev->pdev))
+ adev->no_hw_access = true;
+
if (amdgpu_device_supports_px(drm_dev)) {
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
@@ -1689,7 +1696,7 @@ static const struct file_operations amdgpu_driver_kms_fops = {
.flush = amdgpu_flush,
.release = drm_release,
.unlocked_ioctl = amdgpu_drm_ioctl,
- .mmap = amdgpu_mmap,
+ .mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
#ifdef CONFIG_COMPAT
@@ -1751,11 +1758,12 @@ static const struct drm_driver amdgpu_kms_driver = {
.dumb_create = amdgpu_mode_dumb_create,
.dumb_map_offset = amdgpu_mode_dumb_mmap,
.fops = &amdgpu_driver_kms_fops,
+ .release = &amdgpu_driver_release_kms,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = amdgpu_gem_prime_import,
- .gem_prime_mmap = amdgpu_gem_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -1772,6 +1780,18 @@ static struct pci_error_handlers amdgpu_pci_err_handler = {
.resume = amdgpu_pci_resume,
};
+extern const struct attribute_group amdgpu_vram_mgr_attr_group;
+extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
+extern const struct attribute_group amdgpu_vbios_version_attr_group;
+
+static const struct attribute_group *amdgpu_sysfs_groups[] = {
+ &amdgpu_vram_mgr_attr_group,
+ &amdgpu_gtt_mgr_attr_group,
+ &amdgpu_vbios_version_attr_group,
+ NULL,
+};
+
+
static struct pci_driver amdgpu_kms_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
@@ -1780,6 +1800,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
.shutdown = amdgpu_pci_shutdown,
.driver.pm = &amdgpu_pm_ops,
.err_handler = &amdgpu_pci_err_handler,
+ .dev_groups = amdgpu_sysfs_groups,
};
static int __init amdgpu_init(void)
@@ -1801,6 +1822,7 @@ static int __init amdgpu_init(void)
DRM_INFO("amdgpu kernel modesetting enabled.\n");
amdgpu_register_atpx_handler();
+ amdgpu_acpi_detect();
/* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
amdgpu_amdkfd_init();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 30772608eac6..72d9b92b1754 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -36,6 +36,7 @@
#include <linux/firmware.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
@@ -524,10 +525,9 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev)
*
* Tear down the fence driver for all possible rings (all asics).
*/
-void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
+void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev)
{
- unsigned i, j;
- int r;
+ int i, r;
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -536,16 +536,33 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
continue;
if (!ring->no_scheduler)
drm_sched_fini(&ring->sched);
- r = amdgpu_fence_wait_empty(ring);
- if (r) {
- /* no need to trigger GPU reset as we are unloading */
+ /* You can't wait for HW to signal if it's gone */
+ if (!drm_dev_is_unplugged(&adev->ddev))
+ r = amdgpu_fence_wait_empty(ring);
+ else
+ r = -ENODEV;
+ /* no need to trigger GPU reset as we are unloading */
+ if (r)
amdgpu_fence_driver_force_completion(ring);
- }
+
if (ring->fence_drv.irq_src)
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
del_timer_sync(&ring->fence_drv.fallback_timer);
+ }
+}
+
+void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->fence_drv.initialized)
+ continue;
+
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
dma_fence_put(ring->fence_drv.fences[j]);
kfree(ring->fence_drv.fences);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index 8f4a8f8d8146..39b6c6bfab45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -101,7 +101,8 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
{
unsigned char buff[34];
- int addrptr = 0, size = 0;
+ int addrptr, size;
+ int len;
if (!is_fru_eeprom_supported(adev))
return 0;
@@ -109,7 +110,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
/* If algo exists, it means that the i2c_adapter's initialized */
if (!adev->pm.smu_i2c.algo) {
DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
- return 0;
+ return -ENODEV;
}
/* There's a lot of repetition here. This is due to the FRU having
@@ -128,7 +129,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
- return size;
+ return -EINVAL;
}
/* Increment the addrptr by the size of the field, and 1 due to the
@@ -138,43 +139,45 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU product name, ret:%d", size);
- return size;
+ return -EINVAL;
}
+ len = size;
/* Product name should only be 32 characters. Any more,
* and something could be wrong. Cap it at 32 to be safe
*/
- if (size > 32) {
+ if (len >= sizeof(adev->product_name)) {
DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
- size = 32;
+ len = sizeof(adev->product_name) - 1;
}
/* Start at 2 due to buff using fields 0 and 1 for the address */
- memcpy(adev->product_name, &buff[2], size);
- adev->product_name[size] = '\0';
+ memcpy(adev->product_name, &buff[2], len);
+ adev->product_name[len] = '\0';
addrptr += size + 1;
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU product number, ret:%d", size);
- return size;
+ return -EINVAL;
}
+ len = size;
/* Product number should only be 16 characters. Any more,
* and something could be wrong. Cap it at 16 to be safe
*/
- if (size > 16) {
+ if (len >= sizeof(adev->product_number)) {
DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
- size = 16;
+ len = sizeof(adev->product_number) - 1;
}
- memcpy(adev->product_number, &buff[2], size);
- adev->product_number[size] = '\0';
+ memcpy(adev->product_number, &buff[2], len);
+ adev->product_number[len] = '\0';
addrptr += size + 1;
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU product version, ret:%d", size);
- return size;
+ return -EINVAL;
}
addrptr += size + 1;
@@ -182,18 +185,19 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
if (size < 1) {
DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
- return size;
+ return -EINVAL;
}
+ len = size;
/* Serial number should only be 16 characters. Any more,
* and something could be wrong. Cap it at 16 to be safe
*/
- if (size > 16) {
+ if (len >= sizeof(adev->serial)) {
DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
- size = 16;
+ len = sizeof(adev->serial) - 1;
}
- memcpy(adev->serial, &buff[2], size);
- adev->serial[size] = '\0';
+ memcpy(adev->serial, &buff[2], len);
+ adev->serial[len] = '\0';
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 5562b5c90c03..34243e122d11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -92,7 +92,7 @@ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
*
* Frees the dummy page used by the driver (all asics).
*/
-static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
+void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
{
if (!adev->dummy_page_addr)
return;
@@ -312,8 +312,6 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr,
uint64_t flags)
{
- int r, i;
-
if (!adev->gart.ready) {
WARN(1, "trying to bind memory to uninitialized GART !\n");
return -EINVAL;
@@ -322,16 +320,26 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
if (!adev->gart.ptr)
return 0;
- r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
- adev->gart.ptr);
- if (r)
- return r;
+ return amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
+ adev->gart.ptr);
+}
+
+/**
+ * amdgpu_gart_invalidate_tlb - invalidate gart TLB
+ *
+ * @adev: amdgpu device driver pointer
+ *
+ * Invalidate gart TLB which can be use as a way to flush gart changes
+ *
+ */
+void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev)
+{
+ int i;
mb();
amdgpu_asic_flush_hdp(adev, NULL);
for (i = 0; i < adev->num_vmhubs; i++)
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
- return 0;
}
/**
@@ -365,15 +373,3 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
return 0;
}
-
-/**
- * amdgpu_gart_fini - tear down the driver info for managing the gart
- *
- * @adev: amdgpu_device pointer
- *
- * Tear down the gart driver info and free the dummy page (all asics).
- */
-void amdgpu_gart_fini(struct amdgpu_device *adev)
-{
- amdgpu_gart_dummy_page_fini(adev);
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index a25fe97b0196..f53f6a7b9b60 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -57,7 +57,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
int amdgpu_gart_init(struct amdgpu_device *adev);
-void amdgpu_gart_fini(struct amdgpu_device *adev);
+void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev);
int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages);
int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
@@ -66,5 +66,5 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist,
dma_addr_t *dma_addr, uint64_t flags);
-
+void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 18974bd081f0..1c3e3b608332 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -32,6 +32,7 @@
#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
+#include <drm/drm_drv.h>
#include <drm/drm_gem_ttm_helper.h>
#include "amdgpu.h"
@@ -41,6 +42,46 @@
static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
+static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
+{
+ struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
+ struct drm_device *ddev = bo->base.dev;
+ vm_fault_t ret;
+ int idx;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ if (drm_dev_enter(ddev, &idx)) {
+ ret = amdgpu_bo_fault_reserve_notify(bo);
+ if (ret) {
+ drm_dev_exit(idx);
+ goto unlock;
+ }
+
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT, 1);
+
+ drm_dev_exit(idx);
+ } else {
+ ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
+ }
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+unlock:
+ dma_resv_unlock(bo->base.resv);
+ return ret;
+}
+
+static const struct vm_operations_struct amdgpu_gem_vm_ops = {
+ .fault = amdgpu_gem_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close,
+ .access = ttm_bo_vm_access
+};
+
static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
@@ -185,7 +226,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
if (!amdgpu_vm_ready(vm))
goto out_unlock;
- fence = dma_resv_get_excl(bo->tbo.base.resv);
+ fence = dma_resv_excl_fence(bo->tbo.base.resv);
if (fence) {
amdgpu_bo_fence(bo, fence, true);
fence = NULL;
@@ -205,6 +246,18 @@ out_unlock:
ttm_eu_backoff_reservation(&ticket, &list);
}
+static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
+ return -EPERM;
+ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ return -EPERM;
+
+ return drm_gem_ttm_mmap(obj, vma);
+}
+
static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
.free = amdgpu_gem_object_free,
.open = amdgpu_gem_object_open,
@@ -212,6 +265,8 @@ static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
.export = amdgpu_gem_prime_export,
.vmap = drm_gem_ttm_vmap,
.vunmap = drm_gem_ttm_vunmap,
+ .mmap = amdgpu_gem_object_mmap,
+ .vm_ops = &amdgpu_gem_vm_ops,
};
/*
@@ -471,8 +526,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_amdgpu_bo(gobj);
- ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
- timeout);
+ ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout);
/* ret == 0 means not signaled,
* ret > 0 means signaled
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index a129ecc73869..85b8bfb8857b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -31,6 +31,8 @@
#include "amdgpu_ras.h"
#include "amdgpu_xgmi.h"
+#include <drm/drm_drv.h>
+
/**
* amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
*
@@ -99,7 +101,7 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- switch (bo->tbo.mem.mem_type) {
+ switch (bo->tbo.resource->mem_type) {
case TTM_PL_TT:
*addr = bo->tbo.ttm->dma_address[0];
break;
@@ -110,7 +112,7 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
*addr = 0;
break;
}
- *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem);
+ *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource);
amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
}
@@ -151,6 +153,10 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
{
void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value;
+ int idx;
+
+ if (!drm_dev_enter(&adev->ddev, &idx))
+ return 0;
/*
* The following is for PTE only. GART does not have PDEs.
@@ -158,6 +164,9 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
value = addr & 0x0000FFFFFFFFF000ULL;
value |= flags;
writeq(value, ptr + (gpu_page_idx * 8));
+
+ drm_dev_exit(idx);
+
return 0;
}
@@ -528,7 +537,7 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
}
/**
- * amdgpu_tmz_set -- check and set if a device supports TMZ
+ * amdgpu_gmc_tmz_set -- check and set if a device supports TMZ
* @adev: amdgpu_device pointer
*
* Check and set if an the device @adev supports Trusted Memory
@@ -574,7 +583,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
}
/**
- * amdgpu_noretry_set -- set per asic noretry defaults
+ * amdgpu_gmc_noretry_set -- set per asic noretry defaults
* @adev: amdgpu_device pointer
*
* Set a per asic default for the no-retry parameter.
@@ -629,13 +638,18 @@ void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + hub->ctx_distance * i;
- tmp = RREG32(reg);
+ tmp = (hub_type == AMDGPU_GFXHUB_0) ?
+ RREG32_SOC15_IP(GC, reg) :
+ RREG32_SOC15_IP(MMHUB, reg);
+
if (enable)
tmp |= hub->vm_cntx_cntl_vm_fault;
else
tmp &= ~hub->vm_cntx_cntl_vm_fault;
- WREG32(reg, tmp);
+ (hub_type == AMDGPU_GFXHUB_0) ?
+ WREG32_SOC15_IP(GC, reg, tmp) :
+ WREG32_SOC15_IP(MMHUB, reg, tmp);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index c026972ca9a1..ec96e0b26b11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -22,18 +22,26 @@
* Authors: Christian König
*/
+#include <drm/ttm/ttm_range_manager.h>
+
#include "amdgpu.h"
+struct amdgpu_gtt_node {
+ struct ttm_buffer_object *tbo;
+ struct ttm_range_mgr_node base;
+};
+
static inline struct amdgpu_gtt_mgr *
to_gtt_mgr(struct ttm_resource_manager *man)
{
return container_of(man, struct amdgpu_gtt_mgr, manager);
}
-struct amdgpu_gtt_node {
- struct drm_mm_node node;
- struct ttm_buffer_object *tbo;
-};
+static inline struct amdgpu_gtt_node *
+to_amdgpu_gtt_node(struct ttm_resource *res)
+{
+ return container_of(res, struct amdgpu_gtt_node, base.base);
+}
/**
* DOC: mem_info_gtt_total
@@ -80,16 +88,28 @@ static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
static DEVICE_ATTR(mem_info_gtt_used, S_IRUGO,
amdgpu_mem_info_gtt_used_show, NULL);
+static struct attribute *amdgpu_gtt_mgr_attributes[] = {
+ &dev_attr_mem_info_gtt_total.attr,
+ &dev_attr_mem_info_gtt_used.attr,
+ NULL
+};
+
+const struct attribute_group amdgpu_gtt_mgr_attr_group = {
+ .attrs = amdgpu_gtt_mgr_attributes
+};
+
/**
* amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
*
- * @mem: the mem object to check
+ * @res: the mem object to check
*
* Check if a mem object has already address space allocated.
*/
-bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
+bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
{
- return mem->mm_node != NULL;
+ struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
+
+ return drm_mm_node_allocated(&node->base.mm_nodes[0]);
}
/**
@@ -105,54 +125,57 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
+ uint32_t num_pages = PFN_UP(tbo->base.size);
struct amdgpu_gtt_node *node;
int r;
spin_lock(&mgr->lock);
- if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
- atomic64_read(&mgr->available) < mem->num_pages) {
+ if (tbo->resource && tbo->resource->mem_type != TTM_PL_TT &&
+ atomic64_read(&mgr->available) < num_pages) {
spin_unlock(&mgr->lock);
return -ENOSPC;
}
- atomic64_sub(mem->num_pages, &mgr->available);
+ atomic64_sub(num_pages, &mgr->available);
spin_unlock(&mgr->lock);
- if (!place->lpfn) {
- mem->mm_node = NULL;
- mem->start = AMDGPU_BO_INVALID_OFFSET;
- return 0;
- }
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
+ node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
if (!node) {
r = -ENOMEM;
goto err_out;
}
node->tbo = tbo;
+ ttm_resource_init(tbo, place, &node->base.base);
- spin_lock(&mgr->lock);
- r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
- tbo->page_alignment, 0, place->fpfn,
- place->lpfn, DRM_MM_INSERT_BEST);
- spin_unlock(&mgr->lock);
-
- if (unlikely(r))
- goto err_free;
-
- mem->mm_node = node;
- mem->start = node->node.start;
+ if (place->lpfn) {
+ spin_lock(&mgr->lock);
+ r = drm_mm_insert_node_in_range(&mgr->mm,
+ &node->base.mm_nodes[0],
+ num_pages, tbo->page_alignment,
+ 0, place->fpfn, place->lpfn,
+ DRM_MM_INSERT_BEST);
+ spin_unlock(&mgr->lock);
+ if (unlikely(r))
+ goto err_free;
+
+ node->base.base.start = node->base.mm_nodes[0].start;
+ } else {
+ node->base.mm_nodes[0].start = 0;
+ node->base.mm_nodes[0].size = node->base.base.num_pages;
+ node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
+ }
+ *res = &node->base.base;
return 0;
err_free:
kfree(node);
err_out:
- atomic64_add(mem->num_pages, &mgr->available);
+ atomic64_add(num_pages, &mgr->available);
return r;
}
@@ -166,19 +189,18 @@ err_out:
* Free the allocated GTT again.
*/
static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
+ struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
- struct amdgpu_gtt_node *node = mem->mm_node;
- if (node) {
- spin_lock(&mgr->lock);
- drm_mm_remove_node(&node->node);
- spin_unlock(&mgr->lock);
- kfree(node);
- }
+ spin_lock(&mgr->lock);
+ if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
+ drm_mm_remove_node(&node->base.mm_nodes[0]);
+ spin_unlock(&mgr->lock);
+ atomic64_add(res->num_pages, &mgr->available);
- atomic64_add(mem->num_pages, &mgr->available);
+ kfree(node);
}
/**
@@ -206,19 +228,23 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man)
int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
{
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
+ struct amdgpu_device *adev;
struct amdgpu_gtt_node *node;
struct drm_mm_node *mm_node;
int r = 0;
+ adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
spin_lock(&mgr->lock);
drm_mm_for_each_node(mm_node, &mgr->mm) {
- node = container_of(mm_node, struct amdgpu_gtt_node, node);
+ node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
r = amdgpu_ttm_recover_gart(node->tbo);
if (r)
break;
}
spin_unlock(&mgr->lock);
+ amdgpu_gart_invalidate_tlb(adev);
+
return r;
}
@@ -263,7 +289,6 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
struct amdgpu_gtt_mgr *mgr = &adev->mman.gtt_mgr;
struct ttm_resource_manager *man = &mgr->manager;
uint64_t start, size;
- int ret;
man->use_tt = true;
man->func = &amdgpu_gtt_mgr_func;
@@ -276,17 +301,6 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
spin_lock_init(&mgr->lock);
atomic64_set(&mgr->available, gtt_size >> PAGE_SHIFT);
- ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_total);
- if (ret) {
- DRM_ERROR("Failed to create device file mem_info_gtt_total\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_used);
- if (ret) {
- DRM_ERROR("Failed to create device file mem_info_gtt_used\n");
- return ret;
- }
-
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager);
ttm_resource_manager_set_used(man, true);
return 0;
@@ -316,9 +330,6 @@ void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev)
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
- device_remove_file(adev->dev, &dev_attr_mem_info_gtt_total);
- device_remove_file(adev->dev, &dev_attr_mem_info_gtt_used);
-
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, NULL);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 2e6789a7dc46..77baf9b48d67 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -130,7 +130,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
struct dma_fence *tmp = NULL;
- bool skip_preamble, need_ctx_switch;
+ bool need_ctx_switch;
unsigned patch_offset = ~0;
struct amdgpu_vm *vm;
uint64_t fence_ctx;
@@ -227,7 +227,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (need_ctx_switch)
status |= AMDGPU_HAVE_CTX_SWITCH;
- skip_preamble = ring->current_ctx == fence_ctx;
if (job && ring->funcs->emit_cntxcntl) {
status |= job->preamble_status;
status |= job->preemption_status;
@@ -245,14 +244,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
- /* drop preamble IBs if we don't have a context switch */
- if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
- skip_preamble &&
- !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
- !amdgpu_mcbp &&
- !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
- continue;
-
if (job && ring->funcs->emit_frame_cntl) {
if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) {
amdgpu_ring_emit_frame_cntl(ring, false, secure);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index b4971e90b98c..b7fb72bff2c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
unsigned count;
int r;
- r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
+ r = dma_resv_get_fences(resv, NULL, &count, &fences);
if (r)
goto fallback;
@@ -156,8 +156,7 @@ fallback:
/* Not enough memory for the delayed delete, as last resort
* block for all the fences to complete.
*/
- dma_resv_wait_timeout_rcu(resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT);
amdgpu_pasid_free(pasid);
}
@@ -183,7 +182,7 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
}
/**
- * amdgpu_vm_grab_idle - grab idle VMID
+ * amdgpu_vmid_grab_idle - grab idle VMID
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
@@ -256,7 +255,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_grab_reserved - try to assign reserved VMID
+ * amdgpu_vmid_grab_reserved - try to assign reserved VMID
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
@@ -325,7 +324,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_grab_used - try to reuse a VMID
+ * amdgpu_vmid_grab_used - try to reuse a VMID
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
@@ -397,7 +396,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_grab_id - allocate the next free VMID
+ * amdgpu_vmid_grab - allocate the next free VMID
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index a36e191cf086..f3d62e196901 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -115,9 +115,11 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
*/
void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
{
+
+ if (!ih->ring)
+ return;
+
if (ih->use_bus_addr) {
- if (!ih->ring)
- return;
/* add 8 bytes for the rptr/wptr shadows and
* add them to the end of the ring allocation.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 90f50561b43a..32ce0e679dc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -49,6 +49,7 @@
#include <drm/drm_irq.h>
#include <drm/drm_vblank.h>
#include <drm/amdgpu_drm.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "atom.h"
@@ -348,6 +349,25 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
return 0;
}
+
+void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
+{
+ if (adev->irq.installed) {
+ drm_irq_uninstall(&adev->ddev);
+ adev->irq.installed = false;
+ if (adev->irq.msi_enabled)
+ pci_free_irq_vectors(adev->pdev);
+
+ if (!amdgpu_device_has_dc_support(adev))
+ flush_work(&adev->hotplug_work);
+ }
+
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
+}
+
/**
* amdgpu_irq_fini - shut down interrupt handling
*
@@ -357,19 +377,10 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
* functionality, shuts down vblank, hotplug and reset interrupt handling,
* turns off interrupts from all sources (all ASICs).
*/
-void amdgpu_irq_fini(struct amdgpu_device *adev)
+void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
{
unsigned i, j;
- if (adev->irq.installed) {
- drm_irq_uninstall(adev_to_drm(adev));
- adev->irq.installed = false;
- if (adev->irq.msi_enabled)
- pci_free_irq_vectors(adev->pdev);
- if (!amdgpu_device_has_dc_support(adev))
- flush_work(&adev->hotplug_work);
- }
-
for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
if (!adev->irq.client[i].sources)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index cf6116648322..78ad4784cc74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -103,7 +103,8 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev);
irqreturn_t amdgpu_irq_handler(int irq, void *arg);
int amdgpu_irq_init(struct amdgpu_device *adev);
-void amdgpu_irq_fini(struct amdgpu_device *adev);
+void amdgpu_irq_fini_sw(struct amdgpu_device *adev);
+void amdgpu_irq_fini_hw(struct amdgpu_device *adev);
int amdgpu_irq_add_id(struct amdgpu_device *adev,
unsigned client_id, unsigned src_id,
struct amdgpu_irq_src *source);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 759b34799221..d33e6d97cc89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -25,6 +25,8 @@
#include <linux/wait.h>
#include <linux/sched.h>
+#include <drm/drm_drv.h>
+
#include "amdgpu.h"
#include "amdgpu_trace.h"
@@ -34,6 +36,15 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
struct amdgpu_job *job = to_amdgpu_job(s_job);
struct amdgpu_task_info ti;
struct amdgpu_device *adev = ring->adev;
+ int idx;
+
+ if (!drm_dev_enter(&adev->ddev, &idx)) {
+ DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s",
+ __func__, s_job->sched->name);
+
+ /* Effectively the job is aborted as the device is gone */
+ return DRM_GPU_SCHED_STAT_ENODEV;
+ }
memset(&ti, 0, sizeof(struct amdgpu_task_info));
@@ -41,7 +52,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
DRM_ERROR("ring %s timeout, but soft recovered\n",
s_job->sched->name);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ goto exit;
}
amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
@@ -53,13 +64,15 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
if (amdgpu_device_should_recover_gpu(ring->adev)) {
amdgpu_device_gpu_recover(ring->adev, job);
- return DRM_GPU_SCHED_STAT_NOMINAL;
} else {
drm_sched_suspend_timeout(&ring->sched);
if (amdgpu_sriov_vf(adev))
adev->virt.tdr_debug = true;
- return DRM_GPU_SCHED_STAT_NOMINAL;
}
+
+exit:
+ drm_dev_exit(idx);
+ return DRM_GPU_SCHED_STAT_NOMINAL;
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 8d12e474745a..425596c2f6f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -28,6 +28,7 @@
#include "amdgpu.h"
#include <drm/amdgpu_drm.h>
+#include <drm/drm_drv.h>
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "atom.h"
@@ -91,8 +92,11 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
pm_runtime_forbid(dev->dev);
}
+ if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD))
+ DRM_WARN("smart shift update failed\n");
+
amdgpu_acpi_fini(adev);
- amdgpu_device_fini(adev);
+ amdgpu_device_fini_hw(adev);
}
void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
@@ -214,6 +218,9 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
pm_runtime_put_autosuspend(dev->dev);
}
+ if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
+ DRM_WARN("smart shift update failed\n");
+
out:
if (r) {
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
@@ -861,6 +868,21 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
min((size_t)size, (size_t)(bios_size - bios_offset)))
? -EFAULT : 0;
}
+ case AMDGPU_INFO_VBIOS_INFO: {
+ struct drm_amdgpu_info_vbios vbios_info = {};
+ struct atom_context *atom_context;
+
+ atom_context = adev->mode_info.atom_context;
+ memcpy(vbios_info.name, atom_context->name, sizeof(atom_context->name));
+ memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, sizeof(atom_context->vbios_pn));
+ vbios_info.version = atom_context->version;
+ memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
+ sizeof(atom_context->vbios_ver_str));
+ memcpy(vbios_info.date, atom_context->date, sizeof(atom_context->date));
+
+ return copy_to_user(out, &vbios_info,
+ min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0;
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n",
info->vbios_info.type);
@@ -1220,6 +1242,15 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
pm_runtime_put_autosuspend(dev->dev);
}
+
+void amdgpu_driver_release_kms(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ amdgpu_device_fini_sw(adev);
+ pci_set_drvdata(adev->pdev, NULL);
+}
+
/*
* VBlank related functions.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 2741c28ff1b5..d6c54c7f7679 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
mmu_interval_set_seq(mni, cur_seq);
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
mutex_unlock(&adev->notifier_lock);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 8714d50c5b20..c83f116edde2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -71,7 +71,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
}
amdgpu_bo_unref(&bo->parent);
- if (bo->tbo.type == ttm_bo_type_device) {
+ if (bo->tbo.type != ttm_bo_type_kernel) {
ubo = to_amdgpu_bo_user(bo);
kfree(ubo->metadata);
}
@@ -133,7 +133,9 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].mem_type = TTM_PL_TT;
+ places[c].mem_type =
+ abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
+ AMDGPU_PL_PREEMPT : TTM_PL_TT;
places[c].flags = 0;
c++;
}
@@ -362,14 +364,14 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
if (cpu_addr)
amdgpu_bo_kunmap(*bo_ptr);
- ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
+ ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
}
r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
- &(*bo_ptr)->tbo.mem, &ctx);
+ &(*bo_ptr)->tbo.resource, &ctx);
if (r)
goto error;
@@ -573,15 +575,15 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- bo->tbo.mem.mem_type == TTM_PL_VRAM &&
- bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ bo->tbo.resource->mem_type == TTM_PL_VRAM &&
+ bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
- bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+ bo->tbo.resource->mem_type == TTM_PL_VRAM) {
struct dma_fence *fence;
r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
@@ -612,35 +614,6 @@ fail_unreserve:
return r;
}
-int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
- unsigned long size,
- struct amdgpu_bo *bo)
-{
- struct amdgpu_bo_param bp;
- int r;
-
- if (bo->shadow)
- return 0;
-
- memset(&bp, 0, sizeof(bp));
- bp.size = size;
- bp.domain = AMDGPU_GEM_DOMAIN_GTT;
- bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
- bp.type = ttm_bo_type_kernel;
- bp.resv = bo->tbo.base.resv;
- bp.bo_ptr_size = sizeof(struct amdgpu_bo);
-
- r = amdgpu_bo_create(adev, &bp, &bo->shadow);
- if (!r) {
- bo->shadow->parent = amdgpu_bo_ref(bo);
- mutex_lock(&adev->shadow_list_lock);
- list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
- mutex_unlock(&adev->shadow_list_lock);
- }
-
- return r;
-}
-
/**
* amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
* @adev: amdgpu device object
@@ -668,6 +641,38 @@ int amdgpu_bo_create_user(struct amdgpu_device *adev,
*ubo_ptr = to_amdgpu_bo_user(bo_ptr);
return r;
}
+
+/**
+ * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object
+ * @adev: amdgpu device object
+ * @bp: parameters to be used for the buffer object
+ * @vmbo_ptr: pointer to the buffer object pointer
+ *
+ * Create a BO to be for GPUVM.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+
+int amdgpu_bo_create_vm(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo_vm **vmbo_ptr)
+{
+ struct amdgpu_bo *bo_ptr;
+ int r;
+
+ /* bo_ptr_size will be determined by the caller and it depends on
+ * num of amdgpu_vm_pt entries.
+ */
+ BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
+ r = amdgpu_bo_create(adev, bp, &bo_ptr);
+ if (r)
+ return r;
+
+ *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
+ return r;
+}
+
/**
* amdgpu_bo_validate - validate an &amdgpu_bo buffer object
* @bo: pointer to the buffer object
@@ -703,6 +708,22 @@ retry:
}
/**
+ * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
+ *
+ * @bo: BO that will be inserted into the shadow list
+ *
+ * Insert a BO to the shadow list.
+ */
+void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ mutex_lock(&adev->shadow_list_lock);
+ list_add_tail(&bo->shadow_list, &adev->shadow_list);
+ mutex_unlock(&adev->shadow_list_lock);
+}
+
+/**
* amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
*
* @shadow: &amdgpu_bo shadow to be restored
@@ -756,12 +777,12 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
return 0;
}
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
+ MAX_SCHEDULE_TIMEOUT);
if (r < 0)
return r;
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
if (r)
return r;
@@ -884,8 +905,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
if (bo->tbo.pin_count) {
- uint32_t mem_type = bo->tbo.mem.mem_type;
- uint32_t mem_flags = bo->tbo.mem.placement;
+ uint32_t mem_type = bo->tbo.resource->mem_type;
+ uint32_t mem_flags = bo->tbo.resource->placement;
if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
return -EINVAL;
@@ -935,7 +956,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
ttm_bo_pin(&bo->tbo);
- domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
@@ -987,11 +1008,11 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo)
if (bo->tbo.base.import_attach)
dma_buf_unpin(bo->tbo.base.import_attach);
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+ if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
&adev->visible_pin_size);
- } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ } else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
}
}
@@ -1072,10 +1093,6 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
void amdgpu_bo_fini(struct amdgpu_device *adev)
{
amdgpu_ttm_fini(adev);
- if (!adev->gmc.xgmi.connected_to_cpu) {
- arch_phys_wc_del(adev->gmc.vram_mtrr);
- arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
- }
}
/**
@@ -1195,6 +1212,9 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
ubo = to_amdgpu_bo_user(bo);
+ if (metadata_size)
+ *metadata_size = ubo->metadata_size;
+
if (buffer) {
if (buffer_size < ubo->metadata_size)
return -EINVAL;
@@ -1203,8 +1223,6 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
memcpy(buffer, ubo->metadata, ubo->metadata_size);
}
- if (metadata_size)
- *metadata_size = ubo->metadata_size;
if (flags)
*flags = ubo->metadata_flags;
@@ -1227,7 +1245,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
- struct ttm_resource *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = bo->resource;
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
@@ -1238,7 +1256,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
amdgpu_bo_kunmap(abo);
if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
- bo->mem.mem_type != TTM_PL_SYSTEM)
+ bo->resource->mem_type != TTM_PL_SYSTEM)
dma_buf_move_notify(abo->tbo.base.dma_buf);
/* remember the eviction */
@@ -1258,7 +1276,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
{
unsigned int domain;
- domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
*vram_mem += amdgpu_bo_size(bo);
@@ -1300,7 +1318,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (bo->base.resv == &bo->base._resv)
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
- if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
+ if (bo->resource->mem_type != TTM_PL_VRAM ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
return;
@@ -1337,10 +1355,10 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- if (bo->mem.mem_type != TTM_PL_VRAM)
+ if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
- offset = bo->mem.start << PAGE_SHIFT;
+ offset = bo->resource->start << PAGE_SHIFT;
if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
return 0;
@@ -1363,9 +1381,9 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
else if (unlikely(r))
return VM_FAULT_SIGBUS;
- offset = bo->mem.start << PAGE_SHIFT;
+ offset = bo->resource->start << PAGE_SHIFT;
/* this should never happen */
- if (bo->mem.mem_type == TTM_PL_VRAM &&
+ if (bo->resource->mem_type == TTM_PL_VRAM &&
(offset + bo->base.size) > adev->gmc.visible_vram_size)
return VM_FAULT_SIGBUS;
@@ -1450,11 +1468,11 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
*/
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{
- WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
+ WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
!bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
- WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
- WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
+ WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
!(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
return amdgpu_bo_gpu_offset_no_check(bo);
@@ -1472,8 +1490,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
uint64_t offset;
- offset = (bo->tbo.mem.start << PAGE_SHIFT) +
- amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
+ offset = (bo->tbo.resource->start << PAGE_SHIFT) +
+ amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
return amdgpu_gmc_sign_extend(offset);
}
@@ -1526,7 +1544,7 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
unsigned int pin_count;
u64 size;
- domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
placement = "VRAM";
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index a44779d3e0a7..126df03a7066 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -44,6 +44,7 @@
#define AMDGPU_AMDKFD_CREATE_SVM_BO (1ULL << 62)
#define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
+#define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo)
struct amdgpu_bo_param {
unsigned long size;
@@ -103,9 +104,6 @@ struct amdgpu_bo {
struct amdgpu_vm_bo_base *vm_bo;
/* Constant after initialization */
struct amdgpu_bo *parent;
- struct amdgpu_bo *shadow;
-
-
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_interval_notifier notifier;
@@ -125,6 +123,12 @@ struct amdgpu_bo_user {
};
+struct amdgpu_bo_vm {
+ struct amdgpu_bo bo;
+ struct amdgpu_bo *shadow;
+ struct amdgpu_vm_pt entries[];
+};
+
static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
{
return container_of(tbo, struct amdgpu_bo, tbo);
@@ -219,10 +223,10 @@ static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_res_cursor cursor;
- if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
+ if (bo->tbo.resource->mem_type != TTM_PL_VRAM)
return false;
- amdgpu_res_first(&bo->tbo.mem, 0, amdgpu_bo_size(bo), &cursor);
+ amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
while (cursor.remaining) {
if (cursor.start < adev->gmc.visible_vram_size)
return true;
@@ -252,6 +256,22 @@ static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
}
+/**
+ * amdgpu_bo_shadowed - check if the BO is shadowed
+ *
+ * @bo: BO to be tested.
+ *
+ * Returns:
+ * NULL if not shadowed or else return a BO pointer.
+ */
+static inline struct amdgpu_bo *amdgpu_bo_shadowed(struct amdgpu_bo *bo)
+{
+ if (bo->tbo.type == ttm_bo_type_kernel)
+ return to_amdgpu_bo_vm(bo)->shadow;
+
+ return NULL;
+}
+
bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
@@ -272,11 +292,11 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
int amdgpu_bo_create_user(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,
struct amdgpu_bo_user **ubo_ptr);
+int amdgpu_bo_create_vm(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo_vm **ubo_ptr);
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr);
-int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
- unsigned long size,
- struct amdgpu_bo *bo);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
@@ -312,6 +332,7 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
uint64_t *gtt_mem, uint64_t *cpu_mem);
+void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo *bo);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
struct dma_fence **fence);
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
new file mode 100644
index 000000000000..f6aff7ce5160
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2016-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König, Felix Kuehling
+ */
+
+#include "amdgpu.h"
+
+static inline struct amdgpu_preempt_mgr *
+to_preempt_mgr(struct ttm_resource_manager *man)
+{
+ return container_of(man, struct amdgpu_preempt_mgr, manager);
+}
+
+/**
+ * DOC: mem_info_preempt_used
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total amount of
+ * used preemptible memory.
+ * The file mem_info_preempt_used is used for this, and returns the current
+ * used size of the preemptible block, in bytes
+ */
+static ssize_t mem_info_preempt_used_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(&adev->mman.bdev, AMDGPU_PL_PREEMPT);
+ return sysfs_emit(buf, "%llu\n", amdgpu_preempt_mgr_usage(man));
+}
+
+static DEVICE_ATTR_RO(mem_info_preempt_used);
+
+/**
+ * amdgpu_preempt_mgr_new - allocate a new node
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: the resulting mem object
+ *
+ * Dummy, just count the space used without allocating resources or any limit.
+ */
+static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *tbo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
+
+ *res = kzalloc(sizeof(**res), GFP_KERNEL);
+ if (*res)
+ return -ENOMEM;
+
+ ttm_resource_init(tbo, place, *res);
+ (*res)->start = AMDGPU_BO_INVALID_OFFSET;
+
+ atomic64_add((*res)->num_pages, &mgr->used);
+ return 0;
+}
+
+/**
+ * amdgpu_preempt_mgr_del - free ranges
+ *
+ * @man: TTM memory type manager
+ * @mem: TTM memory object
+ *
+ * Free the allocated GTT again.
+ */
+static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+ struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
+
+ atomic64_sub(res->num_pages, &mgr->used);
+ kfree(res);
+}
+
+/**
+ * amdgpu_preempt_mgr_usage - return usage of PREEMPT domain
+ *
+ * @man: TTM memory type manager
+ *
+ * Return how many bytes are used in the GTT domain
+ */
+uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man)
+{
+ struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
+ s64 result = atomic64_read(&mgr->used);
+
+ return (result > 0 ? result : 0) * PAGE_SIZE;
+}
+
+/**
+ * amdgpu_preempt_mgr_debug - dump VRAM table
+ *
+ * @man: TTM memory type manager
+ * @printer: DRM printer to use
+ *
+ * Dump the table content using printk.
+ */
+static void amdgpu_preempt_mgr_debug(struct ttm_resource_manager *man,
+ struct drm_printer *printer)
+{
+ struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
+
+ drm_printf(printer, "man size:%llu pages, preempt used:%lld pages\n",
+ man->size, (u64)atomic64_read(&mgr->used));
+}
+
+static const struct ttm_resource_manager_func amdgpu_preempt_mgr_func = {
+ .alloc = amdgpu_preempt_mgr_new,
+ .free = amdgpu_preempt_mgr_del,
+ .debug = amdgpu_preempt_mgr_debug
+};
+
+/**
+ * amdgpu_preempt_mgr_init - init PREEMPT manager and DRM MM
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate and initialize the GTT manager.
+ */
+int amdgpu_preempt_mgr_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_preempt_mgr *mgr = &adev->mman.preempt_mgr;
+ struct ttm_resource_manager *man = &mgr->manager;
+ int ret;
+
+ man->use_tt = true;
+ man->func = &amdgpu_preempt_mgr_func;
+
+ ttm_resource_manager_init(man, (1 << 30));
+
+ atomic64_set(&mgr->used, 0);
+
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_preempt_used);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_preempt_used\n");
+ return ret;
+ }
+
+ ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT,
+ &mgr->manager);
+ ttm_resource_manager_set_used(man, true);
+ return 0;
+}
+
+/**
+ * amdgpu_preempt_mgr_fini - free and destroy GTT manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Destroy and free the GTT manager, returns -EBUSY if ranges are still
+ * allocated inside it.
+ */
+void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_preempt_mgr *mgr = &adev->mman.preempt_mgr;
+ struct ttm_resource_manager *man = &mgr->manager;
+ int ret;
+
+ ttm_resource_manager_set_used(man, false);
+
+ ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
+ if (ret)
+ return;
+
+ device_remove_file(adev->dev, &dev_attr_mem_info_preempt_used);
+
+ ttm_resource_manager_cleanup(man);
+ ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, NULL);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 55378c6b9722..3ff76cbaec8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -25,6 +25,7 @@
#include <linux/firmware.h>
#include <linux/dma-mapping.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_psp.h"
@@ -40,6 +41,8 @@
#include "amdgpu_securedisplay.h"
#include "amdgpu_atomfirmware.h"
+#include <drm/drm_drv.h>
+
static int psp_sysfs_init(struct amdgpu_device *adev);
static void psp_sysfs_fini(struct amdgpu_device *adev);
@@ -231,7 +234,7 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
int i;
struct amdgpu_device *adev = psp->adev;
- if (psp->adev->in_pci_err_recovery)
+ if (psp->adev->no_hw_access)
return 0;
for (i = 0; i < adev->usec_timeout; i++) {
@@ -255,12 +258,15 @@ psp_cmd_submit_buf(struct psp_context *psp,
struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
{
int ret;
- int index;
+ int index, idx;
int timeout = 20000;
bool ras_intr = false;
bool skip_unsupport = false;
- if (psp->adev->in_pci_err_recovery)
+ if (psp->adev->no_hw_access)
+ return 0;
+
+ if (!drm_dev_enter(&psp->adev->ddev, &idx))
return 0;
mutex_lock(&psp->mutex);
@@ -273,8 +279,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
if (ret) {
atomic_dec(&psp->fence_value);
- mutex_unlock(&psp->mutex);
- return ret;
+ goto exit;
}
amdgpu_asic_invalidate_hdp(psp->adev, NULL);
@@ -314,8 +319,8 @@ psp_cmd_submit_buf(struct psp_context *psp,
psp->cmd_buf_mem->cmd_id,
psp->cmd_buf_mem->resp.status);
if (!timeout) {
- mutex_unlock(&psp->mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit;
}
}
@@ -323,8 +328,10 @@ psp_cmd_submit_buf(struct psp_context *psp,
ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
}
- mutex_unlock(&psp->mutex);
+exit:
+ mutex_unlock(&psp->mutex);
+ drm_dev_exit(idx);
return ret;
}
@@ -368,8 +375,7 @@ static int psp_load_toc(struct psp_context *psp,
if (!cmd)
return -ENOMEM;
/* Copy toc to psp firmware private buffer */
- memset(psp->fw_pri_buf, 0, PSP_1_MEG);
- memcpy(psp->fw_pri_buf, psp->toc_start_addr, psp->toc_bin_size);
+ psp_copy_fw(psp, psp->toc_start_addr, psp->toc_bin_size);
psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size);
@@ -604,8 +610,7 @@ static int psp_asd_load(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- memset(psp->fw_pri_buf, 0, PSP_1_MEG);
- memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size);
+ psp_copy_fw(psp, psp->asd_start_addr, psp->asd_ucode_size);
psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
psp->asd_ucode_size);
@@ -760,8 +765,7 @@ static int psp_xgmi_load(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- memset(psp->fw_pri_buf, 0, PSP_1_MEG);
- memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
+ psp_copy_fw(psp, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
psp_prep_ta_load_cmd_buf(cmd,
psp->fw_pri_mc_addr,
@@ -1017,8 +1021,7 @@ static int psp_ras_load(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- memset(psp->fw_pri_buf, 0, PSP_1_MEG);
- memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
+ psp_copy_fw(psp, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
@@ -1284,9 +1287,8 @@ static int psp_hdcp_load(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- memset(psp->fw_pri_buf, 0, PSP_1_MEG);
- memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr,
- psp->ta_hdcp_ucode_size);
+ psp_copy_fw(psp, psp->ta_hdcp_start_addr,
+ psp->ta_hdcp_ucode_size);
psp_prep_ta_load_cmd_buf(cmd,
psp->fw_pri_mc_addr,
@@ -1436,8 +1438,7 @@ static int psp_dtm_load(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- memset(psp->fw_pri_buf, 0, PSP_1_MEG);
- memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
+ psp_copy_fw(psp, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
psp_prep_ta_load_cmd_buf(cmd,
psp->fw_pri_mc_addr,
@@ -1582,8 +1583,7 @@ static int psp_rap_load(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- memset(psp->fw_pri_buf, 0, PSP_1_MEG);
- memcpy(psp->fw_pri_buf, psp->ta_rap_start_addr, psp->ta_rap_ucode_size);
+ psp_copy_fw(psp, psp->ta_rap_start_addr, psp->ta_rap_ucode_size);
psp_prep_ta_load_cmd_buf(cmd,
psp->fw_pri_mc_addr,
@@ -3020,7 +3020,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
void *cpu_addr;
dma_addr_t dma_addr;
- int ret;
+ int ret, idx;
char fw_name[100];
const struct firmware *usbc_pd_fw;
@@ -3029,6 +3029,9 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
return -EBUSY;
}
+ if (!drm_dev_enter(ddev, &idx))
+ return -ENODEV;
+
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
if (ret)
@@ -3060,16 +3063,29 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
rel_buf:
dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr);
release_firmware(usbc_pd_fw);
-
fail:
if (ret) {
DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
- return ret;
+ count = ret;
}
+ drm_dev_exit(idx);
return count;
}
+void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
+{
+ int idx;
+
+ if (!drm_dev_enter(&psp->adev->ddev, &idx))
+ return;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, start_addr, bin_size);
+
+ drm_dev_exit(idx);
+}
+
static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR,
psp_usbc_pd_fw_sysfs_read,
psp_usbc_pd_fw_sysfs_write);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 46a5328e00e0..e5023f1de7fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -76,6 +76,7 @@ struct psp_ring
uint64_t ring_mem_mc_addr;
void *ring_mem_handle;
uint32_t ring_size;
+ uint32_t ring_wptr;
};
/* More registers may will be supported */
@@ -423,4 +424,6 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp,
int psp_load_fw_list(struct psp_context *psp,
struct amdgpu_firmware_info **ucode_list, int ucode_count);
+void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index e3a4c3a7635a..9dfc1ebb41a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -27,6 +27,7 @@
#include <linux/uaccess.h>
#include <linux/reboot.h>
#include <linux/syscalls.h>
+#include <linux/pm_runtime.h>
#include "amdgpu.h"
#include "amdgpu_ras.h"
@@ -1043,29 +1044,36 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
}
/* get the total error counts on all IPs */
-unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
- bool is_ce)
+void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+ unsigned long *ce_count,
+ unsigned long *ue_count)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
- struct ras_err_data data = {0, 0};
+ unsigned long ce, ue;
if (!adev->ras_enabled || !con)
- return 0;
+ return;
+ ce = 0;
+ ue = 0;
list_for_each_entry(obj, &con->head, node) {
struct ras_query_if info = {
.head = obj->head,
};
if (amdgpu_ras_query_error_status(adev, &info))
- return 0;
+ return;
- data.ce_count += info.ce_count;
- data.ue_count += info.ue_count;
+ ce += info.ce_count;
+ ue += info.ue_count;
}
- return is_ce ? data.ce_count : data.ue_count;
+ if (ce_count)
+ *ce_count = ce;
+
+ if (ue_count)
+ *ue_count = ue;
}
/* query/inject/cure end */
@@ -2109,6 +2117,30 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
adev->ras_hw_enabled & amdgpu_ras_mask;
}
+static void amdgpu_ras_counte_dw(struct work_struct *work)
+{
+ struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
+ ras_counte_delay_work.work);
+ struct amdgpu_device *adev = con->adev;
+ struct drm_device *dev = &adev->ddev;
+ unsigned long ce_count, ue_count;
+ int res;
+
+ res = pm_runtime_get_sync(dev->dev);
+ if (res < 0)
+ goto Out;
+
+ /* Cache new values.
+ */
+ amdgpu_ras_query_error_count(adev, &ce_count, &ue_count);
+ atomic_set(&con->ras_ce_count, ce_count);
+ atomic_set(&con->ras_ue_count, ue_count);
+
+ pm_runtime_mark_last_busy(dev->dev);
+Out:
+ pm_runtime_put_autosuspend(dev->dev);
+}
+
int amdgpu_ras_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -2123,6 +2155,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
if (!con)
return -ENOMEM;
+ con->adev = adev;
+ INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
+ atomic_set(&con->ras_ce_count, 0);
+ atomic_set(&con->ras_ue_count, 0);
+
con->objs = (struct ras_manager *)(con + 1);
amdgpu_ras_set_context(adev, con);
@@ -2226,6 +2263,8 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
struct ras_fs_if *fs_info,
struct ras_ih_if *ih_info)
{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ unsigned long ue_count, ce_count;
int r;
/* disable RAS feature per IP block if it is not supported */
@@ -2266,6 +2305,12 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
if (r)
goto sysfs;
+ /* Those are the cached values at init.
+ */
+ amdgpu_ras_query_error_count(adev, &ce_count, &ue_count);
+ atomic_set(&con->ras_ce_count, ce_count);
+ atomic_set(&con->ras_ue_count, ue_count);
+
return 0;
cleanup:
amdgpu_ras_sysfs_remove(adev, ras_block);
@@ -2362,6 +2407,7 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
if (!adev->ras_enabled || !con)
return 0;
+
/* Need disable ras on all IPs here before ip [hw/sw]fini */
amdgpu_ras_disable_all_features(adev, 0);
amdgpu_ras_recovery_fini(adev);
@@ -2383,6 +2429,8 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
if (con->features)
amdgpu_ras_disable_all_features(adev, 1);
+ cancel_delayed_work_sync(&con->ras_counte_delay_work);
+
amdgpu_ras_set_context(adev, NULL);
kfree(con);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index bfa40c8ecc94..256cea5d34f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -340,6 +340,11 @@ struct amdgpu_ras {
/* disable ras error count harvest in recovery */
bool disable_ras_err_cnt_harvest;
+
+ /* RAS count errors delayed work */
+ struct delayed_work ras_counte_delay_work;
+ atomic_t ras_ue_count;
+ atomic_t ras_ce_count;
};
struct ras_fs_data {
@@ -485,8 +490,9 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
void amdgpu_ras_resume(struct amdgpu_device *adev);
void amdgpu_ras_suspend(struct amdgpu_device *adev);
-unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
- bool is_ce);
+void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+ unsigned long *ce_count,
+ unsigned long *ue_count);
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 40f2adf305bc..59e0fefb15aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -28,6 +28,7 @@
#include <drm/drm_mm.h>
#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_range_manager.h>
/* state back for walking over vram_mgr and gtt_mgr allocations */
struct amdgpu_res_cursor {
@@ -53,7 +54,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
{
struct drm_mm_node *node;
- if (!res || !res->mm_node) {
+ if (!res) {
cur->start = start;
cur->size = size;
cur->remaining = size;
@@ -63,7 +64,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
- node = res->mm_node;
+ node = to_ttm_range_mgr_node(res)->mm_nodes;
while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index ca1622835296..e7d3d0dbdd96 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -107,7 +107,8 @@ struct amdgpu_fence_driver {
};
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
-void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
+void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev);
+void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index 4fc2ce8ce8ab..7a4775ab6804 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -127,8 +127,8 @@ struct amdgpu_rlc_funcs {
void (*reset)(struct amdgpu_device *adev);
void (*start)(struct amdgpu_device *adev);
void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
- void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag);
- u32 (*rlcg_rreg)(struct amdgpu_device *adev, u32 offset, u32 flag);
+ void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 acc_flags, u32 hwip);
+ u32 (*rlcg_rreg)(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip);
bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 4e558632a5d2..1b2ceccaf5b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -210,10 +210,10 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return -EINVAL;
/* always sync to the exclusive fence */
- f = dma_resv_get_excl(resv);
+ f = dma_resv_excl_fence(resv);
r = amdgpu_sync_fence(sync, f);
- flist = dma_resv_get_list(resv);
+ flist = dma_resv_shared_list(resv);
if (!flist || r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 792d20261846..0527772fe1b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -127,8 +127,8 @@ TRACE_EVENT(amdgpu_bo_create,
TP_fast_assign(
__entry->bo = bo;
- __entry->pages = bo->tbo.mem.num_pages;
- __entry->type = bo->tbo.mem.mem_type;
+ __entry->pages = bo->tbo.resource->num_pages;
+ __entry->type = bo->tbo.resource->mem_type;
__entry->prefer = bo->preferred_domains;
__entry->allow = bo->allowed_domains;
__entry->visible = bo->flags;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index e039f5b8bc87..5d0c57b9917f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -45,6 +45,7 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
#include <drm/amdgpu_drm.h>
@@ -125,7 +126,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
rcu_read_unlock();
return;
}
- switch (bo->mem.mem_type) {
+
+ switch (bo->resource->mem_type) {
case AMDGPU_PL_GDS:
case AMDGPU_PL_GWS:
case AMDGPU_PL_OA:
@@ -158,6 +160,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
}
break;
case TTM_PL_TT:
+ case AMDGPU_PL_PREEMPT:
default:
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
break;
@@ -166,25 +169,6 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
}
/**
- * amdgpu_verify_access - Verify access for a mmap call
- *
- * @bo: The buffer object to map
- * @filp: The file pointer from the process performing the mmap
- *
- * This is called by ttm_bo_mmap() to verify whether a process
- * has the right to mmap a BO to their process space.
- */
-static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
- struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
-
- if (amdgpu_ttm_tt_get_usermm(bo->ttm))
- return -EPERM;
- return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
- filp->private_data);
-}
-
-/**
* amdgpu_ttm_map_buffer - Map memory into the GART windows
* @bo: buffer object to map
* @mem: memory object to map
@@ -217,6 +201,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
+ BUG_ON(mem->mem_type == AMDGPU_PL_PREEMPT);
/* Map only what can't be accessed directly */
if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
@@ -477,10 +462,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
{
struct amdgpu_device *adev;
struct amdgpu_bo *abo;
- struct ttm_resource *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = bo->resource;
int r;
- if (new_mem->mem_type == TTM_PL_TT) {
+ if (new_mem->mem_type == TTM_PL_TT ||
+ new_mem->mem_type == AMDGPU_PL_PREEMPT) {
r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
if (r)
return r;
@@ -498,18 +484,20 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
goto out;
}
if (old_mem->mem_type == TTM_PL_SYSTEM &&
- new_mem->mem_type == TTM_PL_TT) {
+ (new_mem->mem_type == TTM_PL_TT ||
+ new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
ttm_bo_move_null(bo, new_mem);
goto out;
}
- if (old_mem->mem_type == TTM_PL_TT &&
+ if ((old_mem->mem_type == TTM_PL_TT ||
+ old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
new_mem->mem_type == TTM_PL_SYSTEM) {
r = ttm_bo_wait_ctx(bo, ctx);
if (r)
return r;
amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
goto out;
}
@@ -587,6 +575,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
/* system memory */
return 0;
case TTM_PL_TT:
+ case AMDGPU_PL_PREEMPT:
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -618,7 +607,8 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_res_cursor cursor;
- amdgpu_res_first(&bo->mem, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
+ amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
+ &cursor);
return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
}
@@ -967,49 +957,50 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
- struct ttm_resource tmp;
struct ttm_placement placement;
struct ttm_place placements;
+ struct ttm_resource *tmp;
uint64_t addr, flags;
int r;
- if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
+ if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
return 0;
addr = amdgpu_gmc_agp_addr(bo);
if (addr != AMDGPU_BO_INVALID_OFFSET) {
- bo->mem.start = addr >> PAGE_SHIFT;
- } else {
+ bo->resource->start = addr >> PAGE_SHIFT;
+ return 0;
+ }
- /* allocate GART space */
- placement.num_placement = 1;
- placement.placement = &placements;
- placement.num_busy_placement = 1;
- placement.busy_placement = &placements;
- placements.fpfn = 0;
- placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
- placements.mem_type = TTM_PL_TT;
- placements.flags = bo->mem.placement;
-
- r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
- if (unlikely(r))
- return r;
+ /* allocate GART space */
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &placements;
+ placements.fpfn = 0;
+ placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
+ placements.mem_type = TTM_PL_TT;
+ placements.flags = bo->resource->placement;
- /* compute PTE flags for this buffer object */
- flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
+ r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
+ if (unlikely(r))
+ return r;
- /* Bind pages */
- gtt->offset = (u64)tmp.start << PAGE_SHIFT;
- r = amdgpu_ttm_gart_bind(adev, bo, flags);
- if (unlikely(r)) {
- ttm_resource_free(bo, &tmp);
- return r;
- }
+ /* compute PTE flags for this buffer object */
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
- ttm_resource_free(bo, &bo->mem);
- bo->mem = tmp;
+ /* Bind pages */
+ gtt->offset = (u64)tmp->start << PAGE_SHIFT;
+ r = amdgpu_ttm_gart_bind(adev, bo, flags);
+ if (unlikely(r)) {
+ ttm_resource_free(bo, &tmp);
+ return r;
}
+ amdgpu_gart_invalidate_tlb(adev);
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_assign_mem(bo, tmp);
+
return 0;
}
@@ -1028,7 +1019,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
if (!tbo->ttm)
return 0;
- flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
+ flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
r = amdgpu_ttm_gart_bind(adev, tbo, flags);
return r;
@@ -1292,7 +1283,8 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
if (mem && mem->mem_type != TTM_PL_SYSTEM)
flags |= AMDGPU_PTE_VALID;
- if (mem && mem->mem_type == TTM_PL_TT) {
+ if (mem && (mem->mem_type == TTM_PL_TT ||
+ mem->mem_type == AMDGPU_PL_PREEMPT)) {
flags |= AMDGPU_PTE_SYSTEM;
if (ttm->caching == ttm_cached)
@@ -1341,12 +1333,16 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
- unsigned long num_pages = bo->mem.num_pages;
+ unsigned long num_pages = bo->resource->num_pages;
struct amdgpu_res_cursor cursor;
struct dma_resv_list *flist;
struct dma_fence *f;
int i;
+ /* Swapout? */
+ if (bo->resource->mem_type == TTM_PL_SYSTEM)
+ return true;
+
if (bo->type == ttm_bo_type_kernel &&
!amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
return false;
@@ -1355,7 +1351,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
* If true, then return false as any KFD process needs all its BOs to
* be resident to run successfully
*/
- flist = dma_resv_get_list(bo->base.resv);
+ flist = dma_resv_shared_list(bo->base.resv);
if (flist) {
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
@@ -1365,7 +1361,16 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
}
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
+ case AMDGPU_PL_PREEMPT:
+ /* Preemptible BOs don't own system resources managed by the
+ * driver (pages, VRAM, GART space). They point to resources
+ * owned by someone else (e.g. pageable memory in user mode
+ * or a DMABuf). They are used in a preemptible context so we
+ * can guarantee no deadlocks and good QoS in case of MMU
+ * notifiers or DMABuf move notifiers from the resource owner.
+ */
+ return false;
case TTM_PL_TT:
if (amdgpu_bo_is_amdgpu_bo(bo) &&
amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
@@ -1374,7 +1379,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
case TTM_PL_VRAM:
/* Check each drm MM node individually */
- amdgpu_res_first(&bo->mem, 0, (u64)num_pages << PAGE_SHIFT,
+ amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT,
&cursor);
while (cursor.remaining) {
if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
@@ -1416,10 +1421,10 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
uint32_t value = 0;
int ret = 0;
- if (bo->mem.mem_type != TTM_PL_VRAM)
+ if (bo->resource->mem_type != TTM_PL_VRAM)
return -EIO;
- amdgpu_res_first(&bo->mem, offset, len, &cursor);
+ amdgpu_res_first(bo->resource, offset, len, &cursor);
while (cursor.remaining) {
uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
uint64_t bytes = 4 - (cursor.start & 3);
@@ -1475,7 +1480,6 @@ static struct ttm_device_funcs amdgpu_bo_driver = {
.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
.move = &amdgpu_bo_move,
- .verify_access = &amdgpu_verify_access,
.delete_mem_notify = &amdgpu_bo_delete_mem_notify,
.release_notify = &amdgpu_bo_release_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
@@ -1747,6 +1751,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
(unsigned)(gtt_size / (1024 * 1024)));
+ /* Initialize preemptible memory pool */
+ r = amdgpu_preempt_mgr_init(adev);
+ if (r) {
+ DRM_ERROR("Failed initializing PREEMPT heap.\n");
+ return r;
+ }
+
/* Initialize various on-chip memory pools */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
if (r) {
@@ -1785,12 +1796,9 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
amdgpu_ttm_fw_reserve_vram_fini(adev);
- if (adev->mman.aper_base_kaddr)
- iounmap(adev->mman.aper_base_kaddr);
- adev->mman.aper_base_kaddr = NULL;
-
amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev);
+ amdgpu_preempt_mgr_fini(adev);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
@@ -1847,50 +1855,6 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
adev->mman.buffer_funcs_enabled = enable;
}
-static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
-{
- struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
- vm_fault_t ret;
-
- ret = ttm_bo_vm_reserve(bo, vmf);
- if (ret)
- return ret;
-
- ret = amdgpu_bo_fault_reserve_notify(bo);
- if (ret)
- goto unlock;
-
- ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
- TTM_BO_VM_NUM_PREFAULT, 1);
- if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- return ret;
-
-unlock:
- dma_resv_unlock(bo->base.resv);
- return ret;
-}
-
-static const struct vm_operations_struct amdgpu_ttm_vm_ops = {
- .fault = amdgpu_ttm_fault,
- .open = ttm_bo_vm_open,
- .close = ttm_bo_vm_close,
- .access = ttm_bo_vm_access
-};
-
-int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv = filp->private_data;
- struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
- int r;
-
- r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
- if (unlikely(r != 0))
- return r;
-
- vma->vm_ops = &amdgpu_ttm_vm_ops;
- return 0;
-}
-
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
@@ -1985,16 +1949,21 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
return -EINVAL;
}
- if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ if (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT) {
+ DRM_ERROR("Trying to clear preemptible memory.\n");
+ return -EINVAL;
+ }
+
+ if (bo->tbo.resource->mem_type == TTM_PL_TT) {
r = amdgpu_ttm_alloc_gart(&bo->tbo);
if (r)
return r;
}
- num_bytes = bo->tbo.mem.num_pages << PAGE_SHIFT;
+ num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT;
num_loops = 0;
- amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
+ amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
while (cursor.remaining) {
num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
amdgpu_res_next(&cursor, cursor.size);
@@ -2019,12 +1988,13 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
}
}
- amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
+ amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
while (cursor.remaining) {
uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
uint64_t dst_addr = cursor.start;
- dst_addr += amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
+ dst_addr += amdgpu_ttm_domain_start(adev,
+ bo->tbo.resource->mem_type);
amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
cur_size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index eb84a69c4b74..74a7021592c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -31,6 +31,7 @@
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
+#define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3)
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
@@ -54,6 +55,11 @@ struct amdgpu_gtt_mgr {
atomic64_t available;
};
+struct amdgpu_preempt_mgr {
+ struct ttm_resource_manager manager;
+ atomic64_t used;
+};
+
struct amdgpu_mman {
struct ttm_device bdev;
bool initialized;
@@ -70,6 +76,7 @@ struct amdgpu_mman {
struct amdgpu_vram_mgr vram_mgr;
struct amdgpu_gtt_mgr gtt_mgr;
+ struct amdgpu_preempt_mgr preempt_mgr;
uint64_t stolen_vga_size;
struct amdgpu_bo *stolen_vga_memory;
@@ -97,6 +104,8 @@ struct amdgpu_copy_mem {
int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size);
void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev);
+int amdgpu_preempt_mgr_init(struct amdgpu_device *adev);
+void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev);
int amdgpu_vram_mgr_init(struct amdgpu_device *adev);
void amdgpu_vram_mgr_fini(struct amdgpu_device *adev);
@@ -104,6 +113,8 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man);
int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man);
+uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man);
+
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
struct ttm_resource *mem,
@@ -142,7 +153,6 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
struct dma_resv *resv,
struct dma_fence **fence);
-int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index c6dbc0801604..0f576f294d8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <drm/drm.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
@@ -375,7 +376,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
{
unsigned size;
void *ptr;
- int i, j;
+ int i, j, idx;
bool in_ras_intr = amdgpu_ras_intr_triggered();
cancel_delayed_work_sync(&adev->uvd.idle_work);
@@ -403,11 +404,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (!adev->uvd.inst[j].saved_bo)
return -ENOMEM;
- /* re-write 0 since err_event_athub will corrupt VCPU buffer */
- if (in_ras_intr)
- memset(adev->uvd.inst[j].saved_bo, 0, size);
- else
- memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
+ if (drm_dev_enter(&adev->ddev, &idx)) {
+ /* re-write 0 since err_event_athub will corrupt VCPU buffer */
+ if (in_ras_intr)
+ memset(adev->uvd.inst[j].saved_bo, 0, size);
+ else
+ memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
+
+ drm_dev_exit(idx);
+ }
}
if (in_ras_intr)
@@ -420,7 +425,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
{
unsigned size;
void *ptr;
- int i;
+ int i, idx;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
if (adev->uvd.harvest_config & (1 << i))
@@ -432,7 +437,10 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
ptr = adev->uvd.inst[i].cpu_addr;
if (adev->uvd.inst[i].saved_bo != NULL) {
- memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
+ if (drm_dev_enter(&adev->ddev, &idx)) {
+ memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
+ drm_dev_exit(idx);
+ }
kvfree(adev->uvd.inst[i].saved_bo);
adev->uvd.inst[i].saved_bo = NULL;
} else {
@@ -442,8 +450,11 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
- le32_to_cpu(hdr->ucode_size_bytes));
+ if (drm_dev_enter(&adev->ddev, &idx)) {
+ memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
+ le32_to_cpu(hdr->ucode_size_bytes));
+ drm_dev_exit(idx);
+ }
size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr += le32_to_cpu(hdr->ucode_size_bytes);
}
@@ -829,9 +840,8 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
default:
DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
- return -EINVAL;
}
- BUG();
+
return -EINVAL;
}
@@ -1115,9 +1125,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
- true, false,
- msecs_to_jiffies(10));
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
+ msecs_to_jiffies(10));
if (r == 0)
r = -ETIMEDOUT;
if (r < 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index ea6a62f67e38..1ae7f824adc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <drm/drm.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
@@ -87,7 +88,7 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence);
/**
- * amdgpu_vce_init - allocate memory, load vce firmware
+ * amdgpu_vce_sw_init - allocate memory, load vce firmware
*
* @adev: amdgpu_device pointer
* @size: size for the new BO
@@ -204,7 +205,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
}
/**
- * amdgpu_vce_fini - free memory
+ * amdgpu_vce_sw_fini - free memory
*
* @adev: amdgpu_device pointer
*
@@ -293,7 +294,7 @@ int amdgpu_vce_resume(struct amdgpu_device *adev)
void *cpu_addr;
const struct common_firmware_header *hdr;
unsigned offset;
- int r;
+ int r, idx;
if (adev->vce.vcpu_bo == NULL)
return -EINVAL;
@@ -313,8 +314,12 @@ int amdgpu_vce_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
- adev->vce.fw->size - offset);
+
+ if (drm_dev_enter(&adev->ddev, &idx)) {
+ memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
+ adev->vce.fw->size - offset);
+ drm_dev_exit(idx);
+ }
amdgpu_bo_kunmap(adev->vce.vcpu_bo);
@@ -574,7 +579,7 @@ err:
}
/**
- * amdgpu_vce_cs_validate_bo - make sure not to cross 4GB boundary
+ * amdgpu_vce_validate_bo - make sure not to cross 4GB boundary
*
* @p: parser context
* @ib_idx: indirect buffer to use
@@ -715,7 +720,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
}
/**
- * amdgpu_vce_cs_parse - parse and validate the command stream
+ * amdgpu_vce_ring_parse_cs - parse and validate the command stream
*
* @p: parser context
* @ib_idx: indirect buffer to use
@@ -951,7 +956,7 @@ out:
}
/**
- * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
+ * amdgpu_vce_ring_parse_cs_vm - parse the command stream in VM mode
*
* @p: parser context
* @ib_idx: indirect buffer to use
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index d101cb9697a2..b812938c9322 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -27,6 +27,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
@@ -283,7 +284,7 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
{
unsigned size;
void *ptr;
- int i;
+ int i, idx;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -300,7 +301,10 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
if (!adev->vcn.inst[i].saved_bo)
return -ENOMEM;
- memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
+ if (drm_dev_enter(&adev->ddev, &idx)) {
+ memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
+ drm_dev_exit(idx);
+ }
}
return 0;
}
@@ -309,7 +313,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
{
unsigned size;
void *ptr;
- int i;
+ int i, idx;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -321,7 +325,10 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
ptr = adev->vcn.inst[i].cpu_addr;
if (adev->vcn.inst[i].saved_bo != NULL) {
- memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
+ if (drm_dev_enter(&adev->ddev, &idx)) {
+ memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
+ drm_dev_exit(idx);
+ }
kvfree(adev->vcn.inst[i].saved_bo);
adev->vcn.inst[i].saved_bo = NULL;
} else {
@@ -331,8 +338,11 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
- le32_to_cpu(hdr->ucode_size_bytes));
+ if (drm_dev_enter(&adev->ddev, &idx)) {
+ memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
+ le32_to_cpu(hdr->ucode_size_bytes));
+ drm_dev_exit(idx);
+ }
size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr += le32_to_cpu(hdr->ucode_size_bytes);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 55991f393481..777e8922ecf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -32,6 +32,7 @@
#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
@@ -341,7 +342,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
amdgpu_vm_bo_idle(base);
if (bo->preferred_domains &
- amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
+ amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
return;
/*
@@ -652,15 +653,16 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
spin_lock(&adev->mman.bdev.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
+ struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
if (!bo->parent)
continue;
- ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem,
+ ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource,
&vm->lru_bulk_move);
- if (bo->shadow)
- ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
- &bo->shadow->tbo.mem,
+ if (shadow)
+ ttm_bo_move_to_lru_tail(&shadow->tbo,
+ shadow->tbo.resource,
&vm->lru_bulk_move);
}
spin_unlock(&adev->mman.bdev.lru_lock);
@@ -692,15 +694,21 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
+ struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
r = validate(param, bo);
if (r)
return r;
+ if (shadow) {
+ r = validate(param, shadow);
+ if (r)
+ return r;
+ }
if (bo->tbo.type != ttm_bo_type_kernel) {
amdgpu_vm_bo_moved(bo_base);
} else {
- vm->update_funcs->map_table(bo);
+ vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
amdgpu_vm_bo_relocated(bo_base);
}
}
@@ -732,7 +740,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
*
* @adev: amdgpu_device pointer
* @vm: VM to clear BO from
- * @bo: BO to clear
+ * @vmbo: BO to clear
* @immediate: use an immediate update
*
* Root PD needs to be reserved when calling this.
@@ -742,13 +750,14 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_bo *bo,
+ struct amdgpu_bo_vm *vmbo,
bool immediate)
{
struct ttm_operation_ctx ctx = { true, false };
unsigned level = adev->vm_manager.root_level;
struct amdgpu_vm_update_params params;
- struct amdgpu_bo *ancestor = bo;
+ struct amdgpu_bo *ancestor = &vmbo->bo;
+ struct amdgpu_bo *bo = &vmbo->bo;
unsigned entries, ats_entries;
uint64_t addr;
int r;
@@ -788,14 +797,15 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
return r;
- if (bo->shadow) {
- r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement,
- &ctx);
+ if (vmbo->shadow) {
+ struct amdgpu_bo *shadow = vmbo->shadow;
+
+ r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx);
if (r)
return r;
}
- r = vm->update_funcs->map_table(bo);
+ r = vm->update_funcs->map_table(vmbo);
if (r)
return r;
@@ -819,7 +829,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
}
- r = vm->update_funcs->update(&params, bo, addr, 0, ats_entries,
+ r = vm->update_funcs->update(&params, vmbo, addr, 0, ats_entries,
value, flags);
if (r)
return r;
@@ -842,7 +852,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
}
}
- r = vm->update_funcs->update(&params, bo, addr, 0, entries,
+ r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
value, flags);
if (r)
return r;
@@ -858,14 +868,17 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
* @vm: requesting vm
* @level: the page table level
* @immediate: use a immediate update
- * @bo: pointer to the buffer object pointer
+ * @vmbo: pointer to the buffer object pointer
*/
static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
int level, bool immediate,
- struct amdgpu_bo **bo)
+ struct amdgpu_bo_vm **vmbo)
{
struct amdgpu_bo_param bp;
+ struct amdgpu_bo *bo;
+ struct dma_resv *resv;
+ unsigned int num_entries;
int r;
memset(&bp, 0, sizeof(bp));
@@ -876,7 +889,14 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
bp.domain = amdgpu_bo_get_preferred_pin_domain(adev, bp.domain);
bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
- bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ if (level < AMDGPU_VM_PTB)
+ num_entries = amdgpu_vm_num_entries(adev, level);
+ else
+ num_entries = 0;
+
+ bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries);
+
if (vm->use_cpu_for_update)
bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
@@ -885,26 +905,41 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
if (vm->root.base.bo)
bp.resv = vm->root.base.bo->tbo.base.resv;
- r = amdgpu_bo_create(adev, &bp, bo);
+ r = amdgpu_bo_create_vm(adev, &bp, vmbo);
if (r)
return r;
- if (vm->is_compute_context && (adev->flags & AMD_IS_APU))
+ bo = &(*vmbo)->bo;
+ if (vm->is_compute_context && (adev->flags & AMD_IS_APU)) {
+ (*vmbo)->shadow = NULL;
return 0;
+ }
if (!bp.resv)
- WARN_ON(dma_resv_lock((*bo)->tbo.base.resv,
+ WARN_ON(dma_resv_lock(bo->tbo.base.resv,
NULL));
- r = amdgpu_bo_create_shadow(adev, bp.size, *bo);
+ resv = bp.resv;
+ memset(&bp, 0, sizeof(bp));
+ bp.size = amdgpu_vm_bo_size(adev, level);
+ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+ bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = bo->tbo.base.resv;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
- if (!bp.resv)
- dma_resv_unlock((*bo)->tbo.base.resv);
+ r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
+
+ if (!resv)
+ dma_resv_unlock(bo->tbo.base.resv);
if (r) {
- amdgpu_bo_unref(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
+ (*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
+ amdgpu_bo_add_to_shadow_list((*vmbo)->shadow);
+
return 0;
}
@@ -928,22 +963,18 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
bool immediate)
{
struct amdgpu_vm_pt *entry = cursor->entry;
- struct amdgpu_bo *pt;
+ struct amdgpu_bo *pt_bo;
+ struct amdgpu_bo_vm *pt;
int r;
- if (cursor->level < AMDGPU_VM_PTB && !entry->entries) {
- unsigned num_entries;
-
- num_entries = amdgpu_vm_num_entries(adev, cursor->level);
- entry->entries = kvmalloc_array(num_entries,
- sizeof(*entry->entries),
- GFP_KERNEL | __GFP_ZERO);
- if (!entry->entries)
- return -ENOMEM;
- }
-
- if (entry->base.bo)
+ if (entry->base.bo) {
+ if (cursor->level < AMDGPU_VM_PTB)
+ entry->entries =
+ to_amdgpu_bo_vm(entry->base.bo)->entries;
+ else
+ entry->entries = NULL;
return 0;
+ }
r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
if (r)
@@ -952,8 +983,13 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
/* Keep a reference to the root directory to avoid
* freeing them up in the wrong order.
*/
- pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
- amdgpu_vm_bo_base_init(&entry->base, vm, pt);
+ pt_bo = &pt->bo;
+ pt_bo->parent = amdgpu_bo_ref(cursor->parent->base.bo);
+ amdgpu_vm_bo_base_init(&entry->base, vm, pt_bo);
+ if (cursor->level < AMDGPU_VM_PTB)
+ entry->entries = pt->entries;
+ else
+ entry->entries = NULL;
r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
if (r)
@@ -963,7 +999,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
error_free_pt:
amdgpu_bo_unref(&pt->shadow);
- amdgpu_bo_unref(&pt);
+ amdgpu_bo_unref(&pt_bo);
return r;
}
@@ -974,13 +1010,15 @@ error_free_pt:
*/
static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry)
{
+ struct amdgpu_bo *shadow;
+
if (entry->base.bo) {
+ shadow = amdgpu_bo_shadowed(entry->base.bo);
entry->base.bo->vm_bo = NULL;
list_del(&entry->base.vm_status);
- amdgpu_bo_unref(&entry->base.bo->shadow);
+ amdgpu_bo_unref(&shadow);
amdgpu_bo_unref(&entry->base.bo);
}
- kvfree(entry->entries);
entry->entries = NULL;
}
@@ -1279,7 +1317,8 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
level += params->adev->vm_manager.root_level;
amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
pde = (entry - parent->entries) * 8;
- return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
+ return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
+ 1, 0, flags);
}
/**
@@ -1359,9 +1398,9 @@ error:
* Make sure to set the right flags for the PTEs at the desired level.
*/
static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
- struct amdgpu_bo *bo, unsigned level,
+ struct amdgpu_bo_vm *pt, unsigned int level,
uint64_t pe, uint64_t addr,
- unsigned count, uint32_t incr,
+ unsigned int count, uint32_t incr,
uint64_t flags)
{
@@ -1377,7 +1416,7 @@ static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
flags |= AMDGPU_PTE_EXECUTABLE;
}
- params->vm->update_funcs->update(params, bo, pe, addr, count, incr,
+ params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
flags);
}
@@ -1557,9 +1596,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
nptes, dst, incr, upd_flags,
vm->task_info.pid,
vm->immediate.fence_context);
- amdgpu_vm_update_flags(params, pt, cursor.level,
- pe_start, dst, nptes, incr,
- upd_flags);
+ amdgpu_vm_update_flags(params, to_amdgpu_bo_vm(pt),
+ cursor.level, pe_start, dst,
+ nptes, incr, upd_flags);
pe_start += nptes * 8;
dst += nptes * incr;
@@ -1582,9 +1621,12 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* completely covered by the range and so potentially still in use.
*/
while (cursor.pfn < frag_start) {
- amdgpu_vm_free_pts(adev, params->vm, &cursor);
+ /* Make sure previous mapping is freed */
+ if (cursor.entry->base.bo) {
+ params->table_freed = true;
+ amdgpu_vm_free_pts(adev, params->vm, &cursor);
+ }
amdgpu_vm_pt_next(adev, &cursor);
- params->table_freed = true;
}
} else if (frag >= shift) {
@@ -1633,7 +1675,10 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct amdgpu_vm_update_params params;
struct amdgpu_res_cursor cursor;
enum amdgpu_sync_mode sync_mode;
- int r;
+ int r, idx;
+
+ if (!drm_dev_enter(&adev->ddev, &idx))
+ return -ENODEV;
memset(&params, 0, sizeof(params));
params.adev = adev;
@@ -1728,6 +1773,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
error_unlock:
amdgpu_vm_eviction_unlock(vm);
+ drm_dev_exit(idx);
return r;
}
@@ -1813,11 +1859,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
struct drm_gem_object *gobj = dma_buf->priv;
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
- if (abo->tbo.mem.mem_type == TTM_PL_VRAM)
+ if (abo->tbo.resource->mem_type == TTM_PL_VRAM)
bo = gem_to_amdgpu_bo(gobj);
}
- mem = &bo->tbo.mem;
- if (mem->mem_type == TTM_PL_TT)
+ mem = bo->tbo.resource;
+ if (mem->mem_type == TTM_PL_TT ||
+ mem->mem_type == AMDGPU_PL_PREEMPT)
pages_addr = bo->tbo.ttm->dma_address;
}
@@ -1876,7 +1923,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
* next command submission.
*/
if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
- uint32_t mem_type = bo->tbo.mem.mem_type;
+ uint32_t mem_type = bo->tbo.resource->mem_type;
if (!(bo->preferred_domains &
amdgpu_mem_type_to_domain(mem_type)))
@@ -2017,13 +2064,12 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
unsigned i, shared_count;
int r;
- r = dma_resv_get_fences_rcu(resv, &excl,
- &shared_count, &shared);
+ r = dma_resv_get_fences(resv, &excl, &shared_count, &shared);
if (r) {
/* Not enough memory to grab the fence list, as last resort
* block for all the fences to complete.
*/
- dma_resv_wait_timeout_rcu(resv, true, false,
+ dma_resv_wait_timeout(resv, true, false,
MAX_SCHEDULE_TIMEOUT);
return;
}
@@ -2635,7 +2681,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
return true;
/* Don't evict VM page tables while they are busy */
- if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
+ if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
return false;
/* Try to block ongoing updates */
@@ -2668,7 +2714,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_vm_bo_base *bo_base;
/* shadow bo doesn't have bo base, its validation needs its parent */
- if (bo->parent && bo->parent->shadow == bo)
+ if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
bo = bo->parent;
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
@@ -2815,8 +2861,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
- true, true, timeout);
+ timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv, true,
+ true, timeout);
if (timeout <= 0)
return timeout;
@@ -2837,7 +2883,8 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
{
- struct amdgpu_bo *root;
+ struct amdgpu_bo *root_bo;
+ struct amdgpu_bo_vm *root;
int r, i;
vm->va = RB_ROOT_CACHED;
@@ -2891,16 +2938,16 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
false, &root);
if (r)
goto error_free_delayed;
-
- r = amdgpu_bo_reserve(root, true);
+ root_bo = &root->bo;
+ r = amdgpu_bo_reserve(root_bo, true);
if (r)
goto error_free_root;
- r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
+ r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1);
if (r)
goto error_unreserve;
- amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
+ amdgpu_vm_bo_base_init(&vm->root.base, vm, root_bo);
r = amdgpu_vm_clear_bo(adev, vm, root, false);
if (r)
@@ -2929,8 +2976,8 @@ error_unreserve:
amdgpu_bo_unreserve(vm->root.base.bo);
error_free_root:
- amdgpu_bo_unref(&vm->root.base.bo->shadow);
- amdgpu_bo_unref(&vm->root.base.bo);
+ amdgpu_bo_unref(&root->shadow);
+ amdgpu_bo_unref(&root_bo);
vm->root.base.bo = NULL;
error_free_delayed:
@@ -3028,7 +3075,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/
if (pte_support_ats != vm->pte_support_ats) {
vm->pte_support_ats = pte_support_ats;
- r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false);
+ r = amdgpu_vm_clear_bo(adev, vm,
+ to_amdgpu_bo_vm(vm->root.base.bo),
+ false);
if (r)
goto free_idr;
}
@@ -3072,7 +3121,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
/* Free the shadow bo for compute VM */
- amdgpu_bo_unref(&vm->root.base.bo->shadow);
+ amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.base.bo)->shadow);
if (pasid)
vm->pasid = pasid;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 39d60e3c4e32..bee439dd673a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -39,6 +39,7 @@
struct amdgpu_bo_va;
struct amdgpu_job;
struct amdgpu_bo_list_entry;
+struct amdgpu_bo_vm;
/*
* GPUVM handling
@@ -239,11 +240,11 @@ struct amdgpu_vm_update_params {
};
struct amdgpu_vm_update_funcs {
- int (*map_table)(struct amdgpu_bo *bo);
+ int (*map_table)(struct amdgpu_bo_vm *bo);
int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode);
int (*update)(struct amdgpu_vm_update_params *p,
- struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
+ struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags);
int (*commit)(struct amdgpu_vm_update_params *p,
struct dma_fence **fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index ac45d9c7a4e9..03a44be50dd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -29,9 +29,9 @@
*
* @table: newly allocated or validated PD/PT
*/
-static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
+static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
{
- return amdgpu_bo_kmap(table, NULL);
+ return amdgpu_bo_kmap(&table->bo, NULL);
}
/**
@@ -58,7 +58,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
* amdgpu_vm_cpu_update - helper to update page tables via CPU
*
* @p: see amdgpu_vm_update_params definition
- * @bo: PD/PT to update
+ * @vmbo: PD/PT to update
* @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
* @addr: dst addr to write into pe
* @count: number of page entries to update
@@ -68,7 +68,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
* Write count number of PT/PD entries directly.
*/
static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
- struct amdgpu_bo *bo, uint64_t pe,
+ struct amdgpu_bo_vm *vmbo, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags)
{
@@ -76,13 +76,13 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
uint64_t value;
int r;
- if (bo->tbo.moving) {
- r = dma_fence_wait(bo->tbo.moving, true);
+ if (vmbo->bo.tbo.moving) {
+ r = dma_fence_wait(vmbo->bo.tbo.moving, true);
if (r)
return r;
}
- pe += (unsigned long)amdgpu_bo_kptr(bo);
+ pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index a83a646759c5..422958152c2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -33,11 +33,11 @@
*
* @table: newly allocated or validated PD/PT
*/
-static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
+static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
{
int r;
- r = amdgpu_ttm_alloc_gart(&table->tbo);
+ r = amdgpu_ttm_alloc_gart(&table->bo.tbo);
if (r)
return r;
@@ -186,7 +186,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
* amdgpu_vm_sdma_update - execute VM update
*
* @p: see amdgpu_vm_update_params definition
- * @bo: PD/PT to update
+ * @vmbo: PD/PT to update
* @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
* @addr: dst addr to write into pe
* @count: number of page entries to update
@@ -197,10 +197,11 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
* the IB.
*/
static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
- struct amdgpu_bo *bo, uint64_t pe,
+ struct amdgpu_bo_vm *vmbo, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags)
{
+ struct amdgpu_bo *bo = &vmbo->bo;
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED;
unsigned int i, ndw, nptes;
@@ -238,8 +239,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
if (!p->pages_addr) {
/* set page commands needed */
- if (bo->shadow)
- amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr,
+ if (vmbo->shadow)
+ amdgpu_vm_sdma_set_ptes(p, vmbo->shadow, pe, addr,
count, incr, flags);
amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
incr, flags);
@@ -248,7 +249,7 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
/* copy commands needed */
ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
- (bo->shadow ? 2 : 1);
+ (vmbo->shadow ? 2 : 1);
/* for padding */
ndw -= 7;
@@ -263,8 +264,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
pte[i] |= flags;
}
- if (bo->shadow)
- amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes);
+ if (vmbo->shadow)
+ amdgpu_vm_sdma_copy_ptes(p, vmbo->shadow, pe, nptes);
amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
pe += nptes * 8;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 0f897a343073..436ec246a7da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -23,6 +23,8 @@
*/
#include <linux/dma-mapping.h>
+#include <drm/ttm/ttm_range_manager.h>
+
#include "amdgpu.h"
#include "amdgpu_vm.h"
#include "amdgpu_res_cursor.h"
@@ -172,7 +174,7 @@ static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
amdgpu_mem_info_vram_vendor, NULL);
-static const struct attribute *amdgpu_vram_mgr_attributes[] = {
+static struct attribute *amdgpu_vram_mgr_attributes[] = {
&dev_attr_mem_info_vram_total.attr,
&dev_attr_mem_info_vis_vram_total.attr,
&dev_attr_mem_info_vram_used.attr,
@@ -181,6 +183,10 @@ static const struct attribute *amdgpu_vram_mgr_attributes[] = {
NULL
};
+const struct attribute_group amdgpu_vram_mgr_attr_group = {
+ .attrs = amdgpu_vram_mgr_attributes
+};
+
/**
* amdgpu_vram_mgr_vis_size - Calculate visible node size
*
@@ -213,19 +219,20 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct ttm_resource *mem = &bo->tbo.mem;
- struct drm_mm_node *nodes = mem->mm_node;
- unsigned pages = mem->num_pages;
+ struct ttm_resource *res = bo->tbo.resource;
+ unsigned pages = res->num_pages;
+ struct drm_mm_node *mm;
u64 usage;
if (amdgpu_gmc_vram_full_visible(&adev->gmc))
return amdgpu_bo_size(bo);
- if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
return 0;
- for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
- usage += amdgpu_vram_mgr_vis_size(adev, nodes);
+ mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0];
+ for (usage = 0; pages; pages -= mm->size, mm++)
+ usage += amdgpu_vram_mgr_vis_size(adev, mm);
return usage;
}
@@ -361,15 +368,15 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
uint64_t vis_usage = 0, mem_bytes, max_bytes;
+ struct ttm_range_mgr_node *node;
struct drm_mm *mm = &mgr->mm;
enum drm_mm_insert_mode mode;
- struct drm_mm_node *nodes;
unsigned i;
int r;
@@ -382,10 +389,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
max_bytes -= AMDGPU_VM_RESERVED_VRAM;
/* bail out quickly if there's likely not enough VRAM for this BO */
- mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
+ mem_bytes = tbo->base.size;
if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
- atomic64_sub(mem_bytes, &mgr->usage);
- return -ENOSPC;
+ r = -ENOSPC;
+ goto error_sub;
}
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
@@ -400,22 +407,23 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
#endif
pages_per_node = max_t(uint32_t, pages_per_node,
tbo->page_alignment);
- num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
+ num_nodes = DIV_ROUND_UP_ULL(PFN_UP(mem_bytes), pages_per_node);
}
- nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes),
- GFP_KERNEL | __GFP_ZERO);
- if (!nodes) {
- atomic64_sub(mem_bytes, &mgr->usage);
- return -ENOMEM;
+ node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!node) {
+ r = -ENOMEM;
+ goto error_sub;
}
+ ttm_resource_init(tbo, place, &node->base);
+
mode = DRM_MM_INSERT_BEST;
if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH;
- mem->start = 0;
- pages_left = mem->num_pages;
+ pages_left = node->base.num_pages;
/* Limit maximum size to 2GB due to SG table limitations */
pages = min(pages_left, 2UL << (30 - PAGE_SHIFT));
@@ -428,8 +436,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
if (pages >= pages_per_node)
alignment = pages_per_node;
- r = drm_mm_insert_node_in_range(mm, &nodes[i], pages, alignment,
- 0, place->fpfn, lpfn, mode);
+ r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages,
+ alignment, 0, place->fpfn,
+ lpfn, mode);
if (unlikely(r)) {
if (pages > pages_per_node) {
if (is_power_of_2(pages))
@@ -438,11 +447,11 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
pages = rounddown_pow_of_two(pages);
continue;
}
- goto error;
+ goto error_free;
}
- vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
- amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
+ amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]);
pages_left -= pages;
++i;
@@ -452,19 +461,20 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
spin_unlock(&mgr->lock);
if (i == 1)
- mem->placement |= TTM_PL_FLAG_CONTIGUOUS;
+ node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
atomic64_add(vis_usage, &mgr->vis_usage);
- mem->mm_node = nodes;
+ *res = &node->base;
return 0;
-error:
+error_free:
while (i--)
- drm_mm_remove_node(&nodes[i]);
+ drm_mm_remove_node(&node->mm_nodes[i]);
spin_unlock(&mgr->lock);
- atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
+ kvfree(node);
- kvfree(nodes);
+error_sub:
+ atomic64_sub(mem_bytes, &mgr->usage);
return r;
}
@@ -477,24 +487,22 @@ error:
* Free the allocated VRAM again.
*/
static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
+ struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
- struct drm_mm_node *nodes = mem->mm_node;
uint64_t usage = 0, vis_usage = 0;
- unsigned pages = mem->num_pages;
-
- if (!mem->mm_node)
- return;
+ unsigned i, pages;
spin_lock(&mgr->lock);
- while (pages) {
- pages -= nodes->size;
- drm_mm_remove_node(nodes);
- usage += nodes->size << PAGE_SHIFT;
- vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes);
- ++nodes;
+ for (i = 0, pages = res->num_pages; pages;
+ pages -= node->mm_nodes[i].size, ++i) {
+ struct drm_mm_node *mm = &node->mm_nodes[i];
+
+ drm_mm_remove_node(mm);
+ usage += mm->size << PAGE_SHIFT;
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
}
amdgpu_vram_mgr_do_reserve(man);
spin_unlock(&mgr->lock);
@@ -502,8 +510,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
atomic64_sub(usage, &mgr->usage);
atomic64_sub(vis_usage, &mgr->vis_usage);
- kvfree(mem->mm_node);
- mem->mm_node = NULL;
+ kvfree(node);
}
/**
@@ -520,7 +527,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
* Allocate and fill a sg table from a VRAM allocation.
*/
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
- struct ttm_resource *mem,
+ struct ttm_resource *res,
u64 offset, u64 length,
struct device *dev,
enum dma_data_direction dir,
@@ -536,7 +543,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
return -ENOMEM;
/* Determine the number of DRM_MM nodes to export */
- amdgpu_res_first(mem, offset, length, &cursor);
+ amdgpu_res_first(res, offset, length, &cursor);
while (cursor.remaining) {
num_entries++;
amdgpu_res_next(&cursor, cursor.size);
@@ -556,7 +563,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
* and the number of bytes from it. Access the following
* DRM_MM node(s) if more buffer needs to exported
*/
- amdgpu_res_first(mem, offset, length, &cursor);
+ amdgpu_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
phys_addr_t phys = cursor.start + adev->gmc.aper_base;
size_t size = cursor.size;
@@ -684,7 +691,6 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
{
struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
struct ttm_resource_manager *man = &mgr->manager;
- int ret;
ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT);
@@ -695,11 +701,6 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
INIT_LIST_HEAD(&mgr->reservations_pending);
INIT_LIST_HEAD(&mgr->reserved_pages);
- /* Add the two VRAM-related sysfs files */
- ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
- if (ret)
- DRM_ERROR("Failed to register sysfs\n");
-
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
ttm_resource_manager_set_used(man, true);
return 0;
@@ -737,8 +738,6 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
- sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
-
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 3dcb8b32f48b..6fa2229b7229 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -31,6 +31,7 @@
#define ATOM_DEBUG
+#include "atomfirmware.h"
#include "atom.h"
#include "atom-names.h"
#include "atom-bits.h"
@@ -1299,12 +1300,168 @@ static void atom_index_iio(struct atom_context *ctx, int base)
}
}
+static void atom_get_vbios_name(struct atom_context *ctx)
+{
+ unsigned char *p_rom;
+ unsigned char str_num;
+ unsigned short off_to_vbios_str;
+ unsigned char *c_ptr;
+ int name_size;
+ int i;
+
+ const char *na = "--N/A--";
+ char *back;
+
+ p_rom = ctx->bios;
+
+ str_num = *(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS);
+ if (str_num != 0) {
+ off_to_vbios_str =
+ *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START);
+
+ c_ptr = (unsigned char *)(p_rom + off_to_vbios_str);
+ } else {
+ /* do not know where to find name */
+ memcpy(ctx->name, na, 7);
+ ctx->name[7] = 0;
+ return;
+ }
+
+ /*
+ * skip the atombios strings, usually 4
+ * 1st is P/N, 2nd is ASIC, 3rd is PCI type, 4th is Memory type
+ */
+ for (i = 0; i < str_num; i++) {
+ while (*c_ptr != 0)
+ c_ptr++;
+ c_ptr++;
+ }
+
+ /* skip the following 2 chars: 0x0D 0x0A */
+ c_ptr += 2;
+
+ name_size = strnlen(c_ptr, STRLEN_LONG - 1);
+ memcpy(ctx->name, c_ptr, name_size);
+ back = ctx->name + name_size;
+ while ((*--back) == ' ')
+ ;
+ *(back + 1) = '\0';
+}
+
+static void atom_get_vbios_date(struct atom_context *ctx)
+{
+ unsigned char *p_rom;
+ unsigned char *date_in_rom;
+
+ p_rom = ctx->bios;
+
+ date_in_rom = p_rom + OFFSET_TO_VBIOS_DATE;
+
+ ctx->date[0] = '2';
+ ctx->date[1] = '0';
+ ctx->date[2] = date_in_rom[6];
+ ctx->date[3] = date_in_rom[7];
+ ctx->date[4] = '/';
+ ctx->date[5] = date_in_rom[0];
+ ctx->date[6] = date_in_rom[1];
+ ctx->date[7] = '/';
+ ctx->date[8] = date_in_rom[3];
+ ctx->date[9] = date_in_rom[4];
+ ctx->date[10] = ' ';
+ ctx->date[11] = date_in_rom[9];
+ ctx->date[12] = date_in_rom[10];
+ ctx->date[13] = date_in_rom[11];
+ ctx->date[14] = date_in_rom[12];
+ ctx->date[15] = date_in_rom[13];
+ ctx->date[16] = '\0';
+}
+
+static unsigned char *atom_find_str_in_rom(struct atom_context *ctx, char *str, int start,
+ int end, int maxlen)
+{
+ unsigned long str_off;
+ unsigned char *p_rom;
+ unsigned short str_len;
+
+ str_off = 0;
+ str_len = strnlen(str, maxlen);
+ p_rom = ctx->bios;
+
+ for (; start <= end; ++start) {
+ for (str_off = 0; str_off < str_len; ++str_off) {
+ if (str[str_off] != *(p_rom + start + str_off))
+ break;
+ }
+
+ if (str_off == str_len || str[str_off] == 0)
+ return p_rom + start;
+ }
+ return NULL;
+}
+
+static void atom_get_vbios_pn(struct atom_context *ctx)
+{
+ unsigned char *p_rom;
+ unsigned short off_to_vbios_str;
+ unsigned char *vbios_str;
+ int count;
+
+ off_to_vbios_str = 0;
+ p_rom = ctx->bios;
+
+ if (*(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS) != 0) {
+ off_to_vbios_str =
+ *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START);
+
+ vbios_str = (unsigned char *)(p_rom + off_to_vbios_str);
+ } else {
+ vbios_str = p_rom + OFFSET_TO_VBIOS_PART_NUMBER;
+ }
+
+ if (*vbios_str == 0) {
+ vbios_str = atom_find_str_in_rom(ctx, BIOS_ATOM_PREFIX, 3, 1024, 64);
+ if (vbios_str == NULL)
+ vbios_str += sizeof(BIOS_ATOM_PREFIX) - 1;
+ }
+ if (vbios_str != NULL && *vbios_str == 0)
+ vbios_str++;
+
+ if (vbios_str != NULL) {
+ count = 0;
+ while ((count < BIOS_STRING_LENGTH) && vbios_str[count] >= ' ' &&
+ vbios_str[count] <= 'z') {
+ ctx->vbios_pn[count] = vbios_str[count];
+ count++;
+ }
+
+ ctx->vbios_pn[count] = 0;
+ }
+}
+
+static void atom_get_vbios_version(struct atom_context *ctx)
+{
+ unsigned char *vbios_ver;
+
+ /* find anchor ATOMBIOSBK-AMD */
+ vbios_ver = atom_find_str_in_rom(ctx, BIOS_VERSION_PREFIX, 3, 1024, 64);
+ if (vbios_ver != NULL) {
+ /* skip ATOMBIOSBK-AMD VER */
+ vbios_ver += 18;
+ memcpy(ctx->vbios_ver_str, vbios_ver, STRLEN_NORMAL);
+ } else {
+ ctx->vbios_ver_str[0] = '\0';
+ }
+}
+
struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
{
int base;
struct atom_context *ctx =
kzalloc(sizeof(struct atom_context), GFP_KERNEL);
char *str;
+ struct _ATOM_ROM_HEADER *atom_rom_header;
+ struct _ATOM_MASTER_DATA_TABLE *master_table;
+ struct _ATOM_FIRMWARE_INFO *atom_fw_info;
u16 idx;
if (!ctx)
@@ -1353,6 +1510,21 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
}
+ atom_rom_header = (struct _ATOM_ROM_HEADER *)CSTR(base);
+ if (atom_rom_header->usMasterDataTableOffset != 0) {
+ master_table = (struct _ATOM_MASTER_DATA_TABLE *)
+ CSTR(atom_rom_header->usMasterDataTableOffset);
+ if (master_table->ListOfDataTables.FirmwareInfo != 0) {
+ atom_fw_info = (struct _ATOM_FIRMWARE_INFO *)
+ CSTR(master_table->ListOfDataTables.FirmwareInfo);
+ ctx->version = atom_fw_info->ulFirmwareRevision;
+ }
+ }
+
+ atom_get_vbios_name(ctx);
+ atom_get_vbios_pn(ctx);
+ atom_get_vbios_date(ctx);
+ atom_get_vbios_version(ctx);
return ctx;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index d279759cab47..0c1839824520 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -112,6 +112,10 @@ struct drm_device;
#define ATOM_IO_SYSIO 2
#define ATOM_IO_IIO 0x80
+#define STRLEN_NORMAL 32
+#define STRLEN_LONG 64
+#define STRLEN_VERYLONG 254
+
struct card_info {
struct drm_device *dev;
void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
@@ -140,6 +144,12 @@ struct atom_context {
uint32_t *scratch;
int scratch_size_bytes;
char vbios_version[20];
+
+ uint8_t name[STRLEN_LONG];
+ uint8_t vbios_pn[STRLEN_LONG];
+ uint32_t version;
+ uint8_t vbios_ver_str[STRLEN_NORMAL];
+ uint8_t date[STRLEN_NORMAL];
};
extern int amdgpu_atom_debug;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index d3745711d55f..df385ffc9768 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -309,8 +309,7 @@ static int cik_ih_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+ amdgpu_irq_fini_sw(adev);
amdgpu_irq_remove_domain(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index c4bb8eed246d..c8ebd108548d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -720,7 +720,7 @@ err0:
}
/**
- * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
+ * cik_sdma_vm_copy_pte - update PTEs by copying them from the GART
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
@@ -746,7 +746,7 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
}
/**
- * cik_sdma_vm_write_pages - update PTEs by writing them manually
+ * cik_sdma_vm_write_pte - update PTEs by writing them manually
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
@@ -775,7 +775,7 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
}
/**
- * cik_sdma_vm_set_pages - update the page tables using sDMA
+ * cik_sdma_vm_set_pte_pde - update the page tables using sDMA
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
@@ -804,7 +804,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
}
/**
- * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
+ * cik_sdma_ring_pad_ib - pad the IB to the required number of dw
*
* @ring: amdgpu_ring structure holding ring information
* @ib: indirect buffer to fill with padding
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 307c01301c87..b8c47e0cf37a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -301,8 +301,7 @@ static int cz_ih_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+ amdgpu_irq_fini_sw(adev);
amdgpu_irq_remove_domain(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index dbcb09cf83e6..c7803dc2b2d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -456,7 +456,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
}
/**
- * cik_get_number_of_dram_channels - get the number of dram channels
+ * si_get_number_of_dram_channels - get the number of dram channels
*
* @adev: amdgpu_device pointer
*
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index fc12e3c3e9ca..102f31526204 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -47,7 +47,7 @@
#include "gfx_v10_0.h"
#include "nbio_v2_3.h"
-/**
+/*
* Navi10 has two graphic rings to share each graphic pipe.
* 1. Primary ring
* 2. Async ring
@@ -1432,38 +1432,36 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000)
};
-static bool gfx_v10_is_rlcg_rw(struct amdgpu_device *adev, u32 offset, uint32_t *flag, bool write)
-{
- /* always programed by rlcg, only for gc */
- if (offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI) ||
- offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO) ||
- offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH) ||
- offset == SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL) ||
- offset == SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX) ||
- offset == SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL)) {
- if (!amdgpu_sriov_reg_indirect_gc(adev))
- *flag = GFX_RLCG_GC_WRITE_OLD;
- else
- *flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
+static bool gfx_v10_get_rlcg_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip,
+ int write, u32 *rlcg_flag)
+{
+ switch (hwip) {
+ case GC_HWIP:
+ if (amdgpu_sriov_reg_indirect_gc(adev)) {
+ *rlcg_flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
- return true;
- }
+ return true;
+ /* only in new version, AMDGPU_REGS_NO_KIQ and AMDGPU_REGS_RLC enabled simultaneously */
+ } else if ((acc_flags & AMDGPU_REGS_RLC) && !(acc_flags & AMDGPU_REGS_NO_KIQ)) {
+ *rlcg_flag = GFX_RLCG_GC_WRITE_OLD;
- /* currently support gc read/write, mmhub write */
- if (offset >= SOC15_REG_OFFSET(GC, 0, mmSDMA0_DEC_START) &&
- offset <= SOC15_REG_OFFSET(GC, 0, mmRLC_GTS_OFFSET_MSB)) {
- if (amdgpu_sriov_reg_indirect_gc(adev))
- *flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
- else
- return false;
- } else {
- if (amdgpu_sriov_reg_indirect_mmhub(adev))
- *flag = GFX_RLCG_MMHUB_WRITE;
- else
- return false;
+ return true;
+ }
+
+ break;
+ case MMHUB_HWIP:
+ if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
+ (acc_flags & AMDGPU_REGS_RLC) && write) {
+ *rlcg_flag = GFX_RLCG_MMHUB_WRITE;
+ return true;
+ }
+
+ break;
+ default:
+ DRM_DEBUG("Not program register by RLCG\n");
}
- return true;
+ return false;
}
static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag)
@@ -1523,36 +1521,34 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
return ret;
}
-static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 flag)
+static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 acc_flags, u32 hwip)
{
- uint32_t rlcg_flag;
+ u32 rlcg_flag;
- if (amdgpu_sriov_fullaccess(adev) &&
- gfx_v10_is_rlcg_rw(adev, offset, &rlcg_flag, 1)) {
+ if (!amdgpu_sriov_runtime(adev) &&
+ gfx_v10_get_rlcg_flag(adev, acc_flags, hwip, 1, &rlcg_flag)) {
gfx_v10_rlcg_rw(adev, offset, value, rlcg_flag);
-
return;
}
- if (flag & AMDGPU_REGS_NO_KIQ)
+
+ if (acc_flags & AMDGPU_REGS_NO_KIQ)
WREG32_NO_KIQ(offset, value);
else
WREG32(offset, value);
}
-static u32 gfx_v10_rlcg_rreg(struct amdgpu_device *adev, u32 offset, u32 flag)
+static u32 gfx_v10_rlcg_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip)
{
- uint32_t rlcg_flag;
+ u32 rlcg_flag;
- if (amdgpu_sriov_fullaccess(adev) &&
- gfx_v10_is_rlcg_rw(adev, offset, &rlcg_flag, 0))
+ if (!amdgpu_sriov_runtime(adev) &&
+ gfx_v10_get_rlcg_flag(adev, acc_flags, hwip, 0, &rlcg_flag))
return gfx_v10_rlcg_rw(adev, offset, 0, rlcg_flag);
- if (flag & AMDGPU_REGS_NO_KIQ)
+ if (acc_flags & AMDGPU_REGS_NO_KIQ)
return RREG32_NO_KIQ(offset);
else
return RREG32(offset);
-
- return 0;
}
static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
@@ -3935,7 +3931,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[40];
- char wks[10];
+ char *wks = "";
int err;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
@@ -3948,7 +3944,6 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
- memset(wks, 0, sizeof(wks));
switch (adev->asic_type) {
case CHIP_NAVI10:
chip_name = "navi10";
@@ -3957,7 +3952,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
chip_name = "navi14";
if (!(adev->pdev->device == 0x7340 &&
adev->pdev->revision != 0x00))
- snprintf(wks, sizeof(wks), "_wks");
+ wks = "_wks";
break;
case CHIP_NAVI12:
chip_name = "navi12";
@@ -5233,10 +5228,10 @@ static void gfx_v10_0_rlc_enable_srm(struct amdgpu_device *adev)
uint32_t tmp;
/* enable Save Restore Machine */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
+ tmp = RREG32_SOC15(GC, 0, mmRLC_SRM_CNTL);
tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
+ WREG32_SOC15(GC, 0, mmRLC_SRM_CNTL, tmp);
}
static int gfx_v10_0_rlc_load_microcode(struct amdgpu_device *adev)
@@ -7396,7 +7391,7 @@ static int gfx_v10_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
- if (!adev->in_pci_err_recovery) {
+ if (!adev->no_hw_access) {
#ifndef BRING_UP_DEBUG
if (amdgpu_async_gfx_ring) {
r = gfx_v10_0_kiq_disable_kgq(adev);
@@ -7941,12 +7936,12 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
u32 reg, data;
-
+ /* not for *_SOC15 */
reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg);
else
- data = RREG32(reg);
+ data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
@@ -8688,16 +8683,16 @@ gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(cp_int_cntl_reg);
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
TIME_STAMP_INT_ENABLE, 0);
- WREG32(cp_int_cntl_reg, cp_int_cntl);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(cp_int_cntl_reg);
+ cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
TIME_STAMP_INT_ENABLE, 1);
- WREG32(cp_int_cntl_reg, cp_int_cntl);
+ WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
break;
default:
break;
@@ -8741,16 +8736,16 @@ static void gfx_v10_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- mec_int_cntl = RREG32(mec_int_cntl_reg);
+ mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
TIME_STAMP_INT_ENABLE, 0);
- WREG32(mec_int_cntl_reg, mec_int_cntl);
+ WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- mec_int_cntl = RREG32(mec_int_cntl_reg);
+ mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
TIME_STAMP_INT_ENABLE, 1);
- WREG32(mec_int_cntl_reg, mec_int_cntl);
+ WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
break;
default:
break;
@@ -8946,20 +8941,20 @@ static int gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
GENERIC2_INT_ENABLE, 0);
WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
- tmp = RREG32(target);
+ tmp = RREG32_SOC15_IP(GC, target);
tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
GENERIC2_INT_ENABLE, 0);
- WREG32(target, tmp);
+ WREG32_SOC15_IP(GC, target, tmp);
} else {
tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
GENERIC2_INT_ENABLE, 1);
WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
- tmp = RREG32(target);
+ tmp = RREG32_SOC15_IP(GC, target);
tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
GENERIC2_INT_ENABLE, 1);
- WREG32(target, tmp);
+ WREG32_SOC15_IP(GC, target, tmp);
}
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index c35fdd2ef2d4..685212c3ddae 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2116,7 +2116,7 @@ error_free_scratch:
}
/**
- * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp
+ * gfx_v7_0_ring_emit_hdp_flush - emit an hdp flush on the cp
*
* @ring: amdgpu_ring structure holding ring information
*
@@ -2242,7 +2242,7 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
* IB stuff
*/
/**
- * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
+ * gfx_v7_0_ring_emit_ib_gfx - emit an IB (Indirect Buffer) on the ring
*
* @ring: amdgpu_ring structure holding ring information
* @job: job to retrieve vmid from
@@ -3196,7 +3196,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
}
/**
- * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
+ * gfx_v7_0_ring_emit_pipeline_sync - cik vm flush using the CP
*
* @ring: the ring to emit the commands to
*
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index feaa5e4a5538..fe5908f708cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -734,7 +734,7 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
};
-static void gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
+static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
{
static void *scratch_reg0;
static void *scratch_reg1;
@@ -787,15 +787,16 @@ static void gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32
}
-static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
+static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset,
+ u32 v, u32 acc_flags, u32 hwip)
{
if (amdgpu_sriov_fullaccess(adev)) {
- gfx_v9_0_rlcg_rw(adev, offset, v, flag);
+ gfx_v9_0_rlcg_w(adev, offset, v, acc_flags);
return;
}
- if (flag & AMDGPU_REGS_NO_KIQ)
+ if (acc_flags & AMDGPU_REGS_NO_KIQ)
WREG32_NO_KIQ(offset, v);
else
WREG32(offset, v);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index dbad9ef002d5..c0352dcc89be 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -1641,8 +1641,8 @@ static int gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device *adev,
return 0;
}
-int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
- void *ras_error_status)
+static int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
uint32_t sec_count = 0, ded_count = 0;
@@ -1676,13 +1676,14 @@ static void gfx_v9_4_2_reset_ea_err_status(struct amdgpu_device *adev)
uint32_t i, j;
uint32_t value;
- value = REG_SET_FIELD(0, GCEA_ERR_STATUS, CLEAR_ERROR_STATUS, 0x1);
-
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < gfx_v9_4_2_ea_err_status_regs.se_num; i++) {
for (j = 0; j < gfx_v9_4_2_ea_err_status_regs.instance;
j++) {
gfx_v9_4_2_select_se_sh(adev, i, 0, j);
+ value = RREG32(SOC15_REG_ENTRY_OFFSET(
+ gfx_v9_4_2_ea_err_status_regs));
+ value = REG_SET_FIELD(value, GCEA_ERR_STATUS, CLEAR_ERROR_STATUS, 0x1);
WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), value);
}
}
@@ -1690,7 +1691,7 @@ static void gfx_v9_4_2_reset_ea_err_status(struct amdgpu_device *adev)
mutex_unlock(&adev->grbm_idx_mutex);
}
-void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev)
+static void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev)
{
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return;
@@ -1699,7 +1700,7 @@ void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev)
gfx_v9_4_2_query_utc_edc_count(adev, NULL, NULL);
}
-int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
+static int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
{
struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
int ret;
@@ -1734,6 +1735,7 @@ static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev)
gfx_v9_4_2_select_se_sh(adev, i, 0, j);
reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
gfx_v9_4_2_ea_err_status_regs));
+
if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) ||
REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) ||
REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
@@ -1741,7 +1743,9 @@ static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev)
j, reg_value);
}
/* clear after read */
- WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), 0x10);
+ reg_value = REG_SET_FIELD(reg_value, GCEA_ERR_STATUS,
+ CLEAR_ERROR_STATUS, 0x1);
+ WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), reg_value);
}
}
@@ -1772,7 +1776,7 @@ static void gfx_v9_4_2_query_utc_err_status(struct amdgpu_device *adev)
}
}
-void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev)
+static void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev)
{
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return;
@@ -1782,7 +1786,7 @@ void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev)
gfx_v9_4_2_query_sq_timeout_status(adev);
}
-void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev)
+static void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev)
{
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return;
@@ -1792,7 +1796,7 @@ void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev)
gfx_v9_4_2_reset_sq_timeout_status(adev);
}
-void gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device *adev)
+static void gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device *adev)
{
uint32_t i;
uint32_t data;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index f02dc904e4cf..db154bf083e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -229,6 +229,10 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
/* Use register 17 for GART */
const unsigned eng = 17;
unsigned int i;
+ unsigned char hub_ip = 0;
+
+ hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
+ GC_HWIP : MMHUB_HWIP;
spin_lock(&adev->gmc.invalidate_lock);
/*
@@ -242,8 +246,9 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
if (use_semaphore) {
for (i = 0; i < adev->usec_timeout; i++) {
/* a read return value of 1 means semaphore acuqire */
- tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
- hub->eng_distance * eng);
+ tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
+ hub->eng_distance * eng, hub_ip);
+
if (tmp & 0x1)
break;
udelay(1);
@@ -253,7 +258,9 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
}
- WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
+ WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
+ hub->eng_distance * eng,
+ inv_req, hub_ip);
/*
* Issue a dummy read to wait for the ACK register to be cleared
@@ -261,12 +268,14 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
*/
if ((vmhub == AMDGPU_GFXHUB_0) &&
(adev->asic_type < CHIP_SIENNA_CICHLID))
- RREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng);
+ RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
+ hub->eng_distance * eng, hub_ip);
/* Wait for ACK with a delay.*/
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
- hub->eng_distance * eng);
+ tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
+ hub->eng_distance * eng, hub_ip);
+
tmp &= 1 << vmid;
if (tmp)
break;
@@ -280,8 +289,8 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
* add semaphore release after invalidation,
* write with 0 means semaphore release
*/
- WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
- hub->eng_distance * eng, 0);
+ WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
+ hub->eng_distance * eng, 0, hub_ip);
spin_unlock(&adev->gmc.invalidate_lock);
@@ -947,7 +956,7 @@ static int gmc_v10_0_sw_init(void *handle)
}
/**
- * gmc_v8_0_gart_fini - vm fini callback
+ * gmc_v10_0_gart_fini - vm fini callback
*
* @adev: amdgpu_device pointer
*
@@ -956,7 +965,6 @@ static int gmc_v10_0_sw_init(void *handle)
static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
{
amdgpu_gart_table_vram_free(adev);
- amdgpu_gart_fini(adev);
}
static int gmc_v10_0_sw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 405d6ad09022..0e81e03e9b49 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -898,7 +898,6 @@ static int gmc_v6_0_sw_fini(void *handle)
amdgpu_vm_manager_fini(adev);
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
- amdgpu_gart_fini(adev);
release_firmware(adev->gmc.fw);
adev->gmc.fw = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 210ada2289ec..0a50fdaced7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -516,7 +516,7 @@ static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
}
/**
- * gmc_v8_0_set_fault_enable_default - update VM fault handling
+ * gmc_v7_0_set_fault_enable_default - update VM fault handling
*
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
@@ -1085,7 +1085,6 @@ static int gmc_v7_0_sw_fini(void *handle)
kfree(adev->gmc.vm_fault_info);
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
- amdgpu_gart_fini(adev);
release_firmware(adev->gmc.fw);
adev->gmc.fw = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index e4f27b3f28fb..492ebed2915b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1201,7 +1201,6 @@ static int gmc_v8_0_sw_fini(void *handle)
kfree(adev->gmc.vm_fault_info);
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
- amdgpu_gart_fini(adev);
release_firmware(adev->gmc.fw);
adev->gmc.fw = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index fb3c7588dde3..68e6b6b8989a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1615,7 +1615,6 @@ static int gmc_v9_0_sw_fini(void *handle)
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_unref(&adev->gmc.pdb0_bo);
amdgpu_bo_fini(adev);
- amdgpu_gart_fini(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index cc957471f31e..ddfe4eaeea05 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -300,8 +300,7 @@ static int iceland_ih_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+ amdgpu_irq_fini_sw(adev);
amdgpu_irq_remove_domain(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index de5abceced0d..85967a5570cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -172,6 +172,8 @@ static int jpeg_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 938ef4ce5b76..46096ad7f0d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -187,14 +187,14 @@ static int jpeg_v2_5_hw_init(void *handle)
static int jpeg_v2_5_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring;
int i;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
- ring = &adev->jpeg.inst[i].ring_dec;
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index 94be35357f7d..bd77794315bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -159,9 +159,9 @@ static int jpeg_v3_0_hw_init(void *handle)
static int jpeg_v3_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring;
- ring = &adev->jpeg.inst->ring_dec;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 655c3d5f3d35..f7e93bbc4e15 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -29,6 +29,7 @@
#include "mmhub/mmhub_2_0_0_default.h"
#include "navi10_enum.h"
+#include "gc/gc_10_1_0_offset.h"
#include "soc15_common.h"
#define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid 0x064d
@@ -192,11 +193,11 @@ static void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmi
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
- WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
hub->ctx_addr_distance * vmid,
lower_32_bits(page_table_base));
- WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
hub->ctx_addr_distance * vmid,
upper_32_bits(page_table_base));
}
@@ -207,14 +208,14 @@ static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
- WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
- WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
(u32)(adev->gmc.gart_start >> 44));
- WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
(u32)(adev->gmc.gart_end >> 12));
- WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
(u32)(adev->gmc.gart_end >> 44));
}
@@ -223,12 +224,12 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
uint64_t value;
uint32_t tmp;
- /* Program the AGP BAR */
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0);
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
-
if (!amdgpu_sriov_vf(adev)) {
+ /* Program the AGP BAR */
+ WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0);
+ WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
@@ -335,7 +336,7 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
- WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp);
+ WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp);
}
static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev)
@@ -397,16 +398,16 @@ static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
!adev->gmc.noretry);
- WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
+ WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
- WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
+ WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
i * hub->ctx_addr_distance, 0);
- WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
+ WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
i * hub->ctx_addr_distance, 0);
- WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
+ WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
i * hub->ctx_addr_distance,
lower_32_bits(adev->vm_manager.max_pfn - 1));
- WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
+ WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
i * hub->ctx_addr_distance,
upper_32_bits(adev->vm_manager.max_pfn - 1));
}
@@ -418,9 +419,9 @@ static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev)
unsigned i;
for (i = 0; i < 18; ++i) {
- WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
+ WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
i * hub->eng_addr_distance, 0xffffffff);
- WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
+ WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
i * hub->eng_addr_distance, 0x1f);
}
}
@@ -449,7 +450,7 @@ static void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
/* Disable all tables */
for (i = 0; i < AMDGPU_NUM_VMID; i++)
- WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL,
+ WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_CNTL,
i * hub->ctx_distance, 0);
/* Setup TLB control */
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 47c8dd9d1c78..c4ef822bbe8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -436,7 +436,7 @@ static void mmhub_v9_4_gart_disable(struct amdgpu_device *adev)
}
/**
- * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
+ * mmhub_v9_4_set_fault_enable_default - update GART/VM fault handling
*
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 17ed61a34fbf..baa2fd119e6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -586,11 +586,7 @@ static int navi10_ih_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
- amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
- amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
- amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+ amdgpu_irq_fini_sw(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index ec5f835f60f2..27ba0408a2aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -466,7 +466,7 @@ void nv_grbm_select(struct amdgpu_device *adev,
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
}
static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
@@ -849,8 +849,13 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_NAVI12:
amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ if (!amdgpu_sriov_vf(adev)) {
+ amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ } else {
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ }
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 1f2e7e35c91e..fc400d95b83b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -23,6 +23,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_psp.h"
@@ -282,10 +283,8 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
if (ret)
return ret;
- memset(psp->fw_pri_buf, 0, PSP_1_MEG);
-
/* Copy PSP KDB binary to memory */
- memcpy(psp->fw_pri_buf, psp->kdb_start_addr, psp->kdb_bin_size);
+ psp_copy_fw(psp, psp->kdb_start_addr, psp->kdb_bin_size);
/* Provide the PSP KDB to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
@@ -315,10 +314,8 @@ static int psp_v11_0_bootloader_load_spl(struct psp_context *psp)
if (ret)
return ret;
- memset(psp->fw_pri_buf, 0, PSP_1_MEG);
-
/* Copy PSP SPL binary to memory */
- memcpy(psp->fw_pri_buf, psp->spl_start_addr, psp->spl_bin_size);
+ psp_copy_fw(psp, psp->spl_start_addr, psp->spl_bin_size);
/* Provide the PSP SPL to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
@@ -348,10 +345,8 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
if (ret)
return ret;
- memset(psp->fw_pri_buf, 0, PSP_1_MEG);
-
/* Copy PSP System Driver binary to memory */
- memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
+ psp_copy_fw(psp, psp->sys_start_addr, psp->sys_bin_size);
/* Provide the sys driver to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
@@ -384,10 +379,8 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
if (ret)
return ret;
- memset(psp->fw_pri_buf, 0, PSP_1_MEG);
-
/* Copy Secure OS binary to PSP memory */
- memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
+ psp_copy_fw(psp, psp->sos_start_addr, psp->sos_bin_size);
/* Provide the PSP secure OS to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
@@ -621,7 +614,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
uint32_t p2c_header[4];
uint32_t sz;
void *buf;
- int ret;
+ int ret, idx;
if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) {
DRM_DEBUG("Memory training is not supported.\n");
@@ -694,17 +687,24 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
return -ENOMEM;
}
- memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz);
- ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
- if (ret) {
- DRM_ERROR("Send long training msg failed.\n");
+ if (drm_dev_enter(&adev->ddev, &idx)) {
+ memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz);
+ ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
+ if (ret) {
+ DRM_ERROR("Send long training msg failed.\n");
+ vfree(buf);
+ drm_dev_exit(idx);
+ return ret;
+ }
+
+ memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
+ adev->hdp.funcs->flush_hdp(adev, NULL);
vfree(buf);
- return ret;
+ drm_dev_exit(idx);
+ } else {
+ vfree(buf);
+ return -ENODEV;
}
-
- memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->hdp.funcs->flush_hdp(adev, NULL);
- vfree(buf);
}
if (ops & PSP_MEM_TRAIN_SAVE) {
@@ -733,7 +733,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev))
- data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ data = psp->km_ring.ring_wptr;
else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -747,6 +747,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
if (amdgpu_sriov_vf(adev)) {
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+ psp->km_ring.ring_wptr = value;
} else
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index c4828bd3264b..618e5b6b85d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -138,10 +138,8 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
if (ret)
return ret;
- memset(psp->fw_pri_buf, 0, PSP_1_MEG);
-
/* Copy PSP System Driver binary to memory */
- memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
+ psp_copy_fw(psp, psp->sys_start_addr, psp->sys_bin_size);
/* Provide the sys driver to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
@@ -179,10 +177,8 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
if (ret)
return ret;
- memset(psp->fw_pri_buf, 0, PSP_1_MEG);
-
/* Copy Secure OS binary to PSP memory */
- memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
+ psp_copy_fw(psp, psp->sos_start_addr, psp->sos_bin_size);
/* Provide the PSP secure OS to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index f2e725f72d2f..ce7377d2368f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -102,10 +102,8 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
if (ret)
return ret;
- memset(psp->fw_pri_buf, 0, PSP_1_MEG);
-
/* Copy PSP System Driver binary to memory */
- memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
+ psp_copy_fw(psp, psp->sys_start_addr, psp->sys_bin_size);
/* Provide the sys driver to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
@@ -143,10 +141,8 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
if (ret)
return ret;
- memset(psp->fw_pri_buf, 0, PSP_1_MEG);
-
/* Copy Secure OS binary to PSP memory */
- memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
+ psp_copy_fw(psp, psp->sos_start_addr, psp->sos_bin_size);
/* Provide the PSP secure OS to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
@@ -379,7 +375,7 @@ static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev))
- data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ data = psp->km_ring.ring_wptr;
else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
return data;
@@ -394,6 +390,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
/* send interrupt to PSP for SRIOV ring write pointer update */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
GFX_CTRL_CMD_ID_CONSUME_CMD);
+ psp->km_ring.ring_wptr = value;
} else
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 9f0dda040ec8..4509bd4cce2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -271,7 +271,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
}
/**
- * sdma_v2_4_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
+ * sdma_v2_4_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
*
* @ring: amdgpu ring pointer
*
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index d197185f7789..ae5464e2535a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -754,7 +754,7 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
}
/**
- * sdma_v4_0_page_ring_set_wptr - commit the write pointer
+ * sdma_v4_0_ring_set_wptr - commit the write pointer
*
* @ring: amdgpu ring pointer
*
@@ -820,7 +820,7 @@ static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring)
}
/**
- * sdma_v4_0_ring_set_wptr - commit the write pointer
+ * sdma_v4_0_page_ring_set_wptr - commit the write pointer
*
* @ring: amdgpu ring pointer
*
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 75d7310f8439..6117ba8a4c3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -328,9 +328,9 @@ static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
} else {
- wptr = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI));
+ wptr = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI));
wptr = wptr << 32;
- wptr |= RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR));
+ wptr |= RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR));
DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr);
}
@@ -371,9 +371,9 @@ static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
lower_32_bits(ring->wptr << 2),
ring->me,
upper_32_bits(ring->wptr << 2));
- WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
lower_32_bits(ring->wptr << 2));
- WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
upper_32_bits(ring->wptr << 2));
}
}
@@ -440,20 +440,19 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
*/
static void sdma_v5_0_ring_emit_mem_sync(struct amdgpu_ring *ring)
{
- uint32_t gcr_cntl =
- SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
- SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
- SDMA_GCR_GLI_INV(1);
+ uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
+ SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
+ SDMA_GCR_GLI_INV(1);
/* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
- SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
+ SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
- SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
+ SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
- SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
+ SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
}
/**
@@ -549,12 +548,12 @@ static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
amdgpu_ttm_set_buffer_funcs_status(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
- rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
- ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
}
}
@@ -571,7 +570,7 @@ static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev)
}
/**
- * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
+ * sdma_v5_0_ctx_switch_enable - stop the async dma engines context switch
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs context switch.
@@ -615,11 +614,11 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
}
if (enable && amdgpu_sdma_phase_quantum) {
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
phase_quantum);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
phase_quantum);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
phase_quantum);
}
if (!amdgpu_sriov_vf(adev))
@@ -686,58 +685,63 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
/* Initialize the ring buffer's read and write pointers */
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
/* setup the wptr shadow polling */
wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
lower_32_bits(wptr_gpu_addr));
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
upper_32_bits(wptr_gpu_addr));
- wptr_poll_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i,
+ wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
SDMA0_GFX_RB_WPTR_POLL_CNTL,
F32_POLL_ENABLE, 1);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
wptr_poll_cntl);
/* set the wb address whether it's enabled or not */
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE),
+ ring->gpu_addr >> 8);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI),
+ ring->gpu_addr >> 40);
ring->wptr = 0;
/* before programing wptr to a less value, need set minor_ptr_update first */
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR),
+ lower_32_bits(ring->wptr) << 2);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI),
+ upper_32_bits(ring->wptr) << 2);
}
- doorbell = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
- doorbell_offset = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
+ doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
+ doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
+ mmSDMA0_GFX_DOORBELL_OFFSET));
if (ring->use_doorbell) {
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
@@ -746,8 +750,9 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
} else {
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
}
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET),
+ doorbell_offset);
adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
ring->doorbell_index, 20);
@@ -756,7 +761,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
sdma_v5_0_ring_set_wptr(ring);
/* set minor_ptr_update to 0 after wptr programed */
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
if (!amdgpu_sriov_vf(adev)) {
/* set utc l1 enable flag always to 1 */
@@ -790,15 +795,15 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
- ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
+ ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
/* enable DMA IBs */
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
ring->sched.ready = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index ecb82c39b106..98059bce692f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -147,9 +147,6 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
- if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_SIENNA_CICHLID))
- return 0;
-
DRM_DEBUG("\n");
switch (adev->asic_type) {
@@ -187,6 +184,9 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
(void *)&adev->sdma.instance[0],
sizeof(struct amdgpu_sdma_instance));
+ if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_SIENNA_CICHLID))
+ return 0;
+
DRM_DEBUG("psp_load == '%s'\n",
adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
@@ -517,7 +517,7 @@ static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev)
}
/**
- * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
+ * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs context switch.
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index cb703e307238..195b45bcb8ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -305,7 +305,7 @@ err0:
}
/**
- * cik_dma_vm_copy_pte - update PTEs by copying them from the GART
+ * si_dma_vm_copy_pte - update PTEs by copying them from the GART
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
@@ -402,7 +402,7 @@ static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
}
/**
- * si_dma_pad_ib - pad the IB to the required number of dw
+ * si_dma_ring_pad_ib - pad the IB to the required number of dw
*
* @ring: amdgpu_ring pointer
* @ib: indirect buffer to fill with padding
@@ -415,7 +415,7 @@ static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
}
/**
- * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
+ * si_dma_ring_emit_pipeline_sync - sync the pipeline
*
* @ring: amdgpu_ring pointer
*
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 51880f6ef634..9a24f17a5750 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -175,8 +175,7 @@ static int si_ih_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+ amdgpu_irq_fini_sw(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c
index 3c47c94846d6..39b7c206770f 100644
--- a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c
@@ -106,7 +106,7 @@ static u32 smuio_v13_0_get_socket_id(struct amdgpu_device *adev)
}
/**
- * smuio_v13_0_supports_host_gpu_xgmi - detect xgmi interface between cpu and gpu/s.
+ * smuio_v13_0_is_host_gpu_xgmi_supported - detect xgmi interface between cpu and gpu/s.
*
* @adev: amdgpu device pointer
*
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 75008cc5f189..de85577c9cfd 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -633,7 +633,9 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
if (entry->and_mask == 0xffffffff) {
tmp = entry->or_mask;
} else {
- tmp = RREG32(reg);
+ tmp = (entry->hwip == GC_HWIP) ?
+ RREG32_SOC15_IP(GC, reg) : RREG32(reg);
+
tmp &= ~(entry->and_mask);
tmp |= (entry->or_mask & entry->and_mask);
}
@@ -644,7 +646,8 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
WREG32_RLC(reg, tmp);
else
- WREG32(reg, tmp);
+ (entry->hwip == GC_HWIP) ?
+ WREG32_SOC15_IP(GC, reg, tmp) : WREG32(reg, tmp);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 14bd794bbea6..c781808e4dc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -27,28 +27,51 @@
/* Register Access Macros */
#define SOC15_REG_OFFSET(ip, inst, reg) (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+#define __WREG32_SOC15_RLC__(reg, value, flag, hwip) \
+ ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->rlcg_wreg) ? \
+ adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, flag, hwip) : \
+ WREG32(reg, value))
+
+#define __RREG32_SOC15_RLC__(reg, flag, hwip) \
+ ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->rlcg_rreg) ? \
+ adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, flag, hwip) : \
+ RREG32(reg))
+
#define WREG32_FIELD15(ip, idx, reg, field, val) \
- WREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
- (RREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
- & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+ __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
+ (__RREG32_SOC15_RLC__( \
+ adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
+ 0, ip##_HWIP) & \
+ ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \
+ 0, ip##_HWIP)
#define RREG32_SOC15(ip, inst, reg) \
- RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+ __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
+ 0, ip##_HWIP)
+
+#define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP)
#define RREG32_SOC15_NO_KIQ(ip, inst, reg) \
- RREG32_NO_KIQ(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+ __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
+ AMDGPU_REGS_NO_KIQ, ip##_HWIP)
#define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \
- RREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset)
+ __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, 0, ip##_HWIP)
#define WREG32_SOC15(ip, inst, reg, value) \
- WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
+ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), \
+ value, 0, ip##_HWIP)
+
+#define WREG32_SOC15_IP(ip, reg, value) \
+ __WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP)
#define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \
- WREG32_NO_KIQ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
+ __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
+ value, AMDGPU_REGS_NO_KIQ, ip##_HWIP)
#define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \
- WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value)
+ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, \
+ value, 0, ip##_HWIP)
#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask) \
({ int ret = 0; \
@@ -77,12 +100,7 @@
})
#define WREG32_RLC(reg, value) \
- do { \
- if (adev->gfx.rlc.funcs->rlcg_wreg) \
- adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, 0); \
- else \
- WREG32(reg, value); \
- } while (0)
+ __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_RLC, GC_HWIP)
#define WREG32_RLC_EX(prefix, reg, value) \
do { \
@@ -108,24 +126,19 @@
} \
} while (0)
+/* shadow the registers in the callback function */
#define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
- WREG32_RLC((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
+ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value, AMDGPU_REGS_RLC, GC_HWIP)
+/* for GC only */
#define RREG32_RLC(reg) \
- (adev->gfx.rlc.funcs->rlcg_rreg ? \
- adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, 0) : RREG32(reg))
-
-#define WREG32_RLC_NO_KIQ(reg, value) \
- do { \
- if (adev->gfx.rlc.funcs->rlcg_wreg) \
- adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, AMDGPU_REGS_NO_KIQ); \
- else \
- WREG32_NO_KIQ(reg, value); \
- } while (0)
+ __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_RLC, GC_HWIP)
+
+#define WREG32_RLC_NO_KIQ(reg, value, hwip) \
+ __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip)
-#define RREG32_RLC_NO_KIQ(reg) \
- (adev->gfx.rlc.funcs->rlcg_rreg ? \
- adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, AMDGPU_REGS_NO_KIQ) : RREG32_NO_KIQ(reg))
+#define RREG32_RLC_NO_KIQ(reg, hwip) \
+ __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip)
#define WREG32_SOC15_RLC_SHADOW_EX(prefix, ip, inst, reg, value) \
do { \
@@ -146,12 +159,12 @@
} while (0)
#define RREG32_SOC15_RLC(ip, inst, reg) \
- RREG32_RLC(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+ __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, AMDGPU_REGS_RLC, ip##_HWIP)
#define WREG32_SOC15_RLC(ip, inst, reg, value) \
do { \
uint32_t target_reg = adev->reg_offset[ip##_HWIP][0][reg##_BASE_IDX] + reg;\
- WREG32_RLC(target_reg, value); \
+ __WREG32_SOC15_RLC__(target_reg, value, AMDGPU_REGS_RLC, ip##_HWIP); \
} while (0)
#define WREG32_SOC15_RLC_EX(prefix, ip, inst, reg, value) \
@@ -161,14 +174,16 @@
} while (0)
#define WREG32_FIELD15_RLC(ip, idx, reg, field, val) \
- WREG32_RLC((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
- (RREG32_RLC(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
- & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
+ (__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
+ AMDGPU_REGS_RLC, ip##_HWIP) & \
+ ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \
+ AMDGPU_REGS_RLC, ip##_HWIP)
#define WREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset, value) \
- WREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset), value)
+ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value, AMDGPU_REGS_RLC, ip##_HWIP)
#define RREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset) \
- RREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset))
+ __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, AMDGPU_REGS_RLC, ip##_HWIP)
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 249fcbee7871..b08905d1c00f 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -312,8 +312,7 @@ static int tonga_ih_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+ amdgpu_irq_fini_sw(adev);
amdgpu_irq_remove_domain(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 284447d7a579..6c0e91495365 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -340,7 +340,7 @@ static int uvd_v3_1_start(struct amdgpu_device *adev)
/* enable VCPU clock */
WREG32(mmUVD_VCPU_CNTL, 1 << 9);
- /* disable interupt */
+ /* disable interrupt */
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
#ifdef __BIG_ENDIAN
@@ -405,7 +405,7 @@ static int uvd_v3_1_start(struct amdgpu_device *adev)
return r;
}
- /* enable interupt */
+ /* enable interrupt */
WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 2bab9c77952f..cf3803f8f075 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -357,6 +357,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
+ amdgpu_bo_unpin(bo);
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 8e238dea7bef..90910d19db12 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -25,6 +25,7 @@
*/
#include <linux/firmware.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_vce.h"
@@ -555,16 +556,19 @@ static int vce_v4_0_hw_fini(void *handle)
static int vce_v4_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int r;
+ int r, idx;
if (adev->vce.vcpu_bo == NULL)
return 0;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
- void *ptr = adev->vce.cpu_addr;
+ if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
+ void *ptr = adev->vce.cpu_addr;
- memcpy_fromio(adev->vce.saved_bo, ptr, size);
+ memcpy_fromio(adev->vce.saved_bo, ptr, size);
+ }
+ drm_dev_exit(idx);
}
r = vce_v4_0_hw_fini(adev);
@@ -577,16 +581,20 @@ static int vce_v4_0_suspend(void *handle)
static int vce_v4_0_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int r;
+ int r, idx;
if (adev->vce.vcpu_bo == NULL)
return -EINVAL;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
- void *ptr = adev->vce.cpu_addr;
- memcpy_toio(ptr, adev->vce.saved_bo, size);
+ if (drm_dev_enter(&adev->ddev, &idx)) {
+ unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
+ void *ptr = adev->vce.cpu_addr;
+
+ memcpy_toio(ptr, adev->vce.saved_bo, size);
+ drm_dev_exit(idx);
+ }
} else {
r = amdgpu_vce_resume(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 0c1beefa3e49..284bb42d6c86 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -231,9 +231,13 @@ static int vcn_v1_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- RREG32_SOC15(VCN, 0, mmUVD_STATUS))
+ (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ }
return 0;
}
@@ -765,7 +769,7 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
}
/**
- * vcn_v1_0_start - start VCN block
+ * vcn_v1_0_start_spg_mode - start VCN block
*
* @adev: amdgpu_device pointer
*
@@ -1101,7 +1105,7 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
}
/**
- * vcn_v1_0_stop - stop VCN block
+ * vcn_v1_0_stop_spg_mode - stop VCN block
*
* @adev: amdgpu_device pointer
*
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 116b9643d5ba..8af567c546db 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -262,6 +262,8 @@ static int vcn_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 948813d7caa0..888b17d84691 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -321,6 +321,8 @@ static int vcn_v2_5_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 946335d0f19c..4c36fc5c9738 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -34,6 +34,8 @@
#include "vcn/vcn_3_0_0_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
+#include <drm/drm_drv.h>
+
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
#define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
@@ -276,16 +278,20 @@ static int vcn_v3_0_sw_init(void *handle)
static int vcn_v3_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int i, r;
+ int i, r, idx;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_fw_shared *fw_shared;
+ if (drm_dev_enter(&adev->ddev, &idx)) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ volatile struct amdgpu_fw_shared *fw_shared;
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
- fw_shared->present_flag_0 = 0;
- fw_shared->sw_ring.is_enabled = false;
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
+ fw_shared->present_flag_0 = 0;
+ fw_shared->sw_ring.is_enabled = false;
+ }
+
+ drm_dev_exit(idx);
}
if (amdgpu_sriov_vf(adev))
@@ -380,15 +386,14 @@ done:
static int vcn_v3_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring;
int i;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
- ring = &adev->vcn.inst[i].ring_dec;
-
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 2f17c8a57015..a9ca6988009e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -514,11 +514,7 @@ static int vega10_ih_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
- amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
- amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
- amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+ amdgpu_irq_fini_sw(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 8a122b413bf5..f51dfc38ac65 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -565,11 +565,7 @@ static int vega20_ih_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
- amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
- amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
- amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+ amdgpu_irq_fini_sw(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 80015e866498..dd1b1a464c28 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -891,7 +891,6 @@ out:
void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
if (kfd->init_complete) {
- kgd2kfd_suspend(kfd, false);
svm_migrate_fini((struct amdgpu_device *)kfd->kgd);
device_queue_manager_uninit(kfd->dqm);
kfd_interrupt_exit(kfd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index fd8f544f0de2..2660f03e63a7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -29,6 +29,7 @@
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
#include "amdgpu_mn.h"
+#include "amdgpu_res_cursor.h"
#include "kfd_priv.h"
#include "kfd_svm.h"
#include "kfd_migrate.h"
@@ -205,34 +206,6 @@ svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
return r;
}
-static uint64_t
-svm_migrate_node_physical_addr(struct amdgpu_device *adev,
- struct drm_mm_node **mm_node, uint64_t *offset)
-{
- struct drm_mm_node *node = *mm_node;
- uint64_t pos = *offset;
-
- if (node->start == AMDGPU_BO_INVALID_OFFSET) {
- pr_debug("drm node is not validated\n");
- return 0;
- }
-
- pr_debug("vram node start 0x%llx npages 0x%llx\n", node->start,
- node->size);
-
- if (pos >= node->size) {
- do {
- pos -= node->size;
- node++;
- } while (pos >= node->size);
-
- *mm_node = node;
- *offset = pos;
- }
-
- return (node->start + pos) << PAGE_SHIFT;
-}
-
unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
{
@@ -297,11 +270,9 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
{
uint64_t npages = migrate->cpages;
struct device *dev = adev->dev;
- struct drm_mm_node *node;
+ struct amdgpu_res_cursor cursor;
dma_addr_t *src;
uint64_t *dst;
- uint64_t vram_addr;
- uint64_t offset;
uint64_t i, j;
int r;
@@ -317,19 +288,12 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
goto out;
}
- node = prange->ttm_res->mm_node;
- offset = prange->offset;
- vram_addr = svm_migrate_node_physical_addr(adev, &node, &offset);
- if (!vram_addr) {
- WARN_ONCE(1, "vram node address is 0\n");
- r = -ENOMEM;
- goto out;
- }
-
+ amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
+ npages << PAGE_SHIFT, &cursor);
for (i = j = 0; i < npages; i++) {
struct page *spage;
- dst[i] = vram_addr + (j << PAGE_SHIFT);
+ dst[i] = cursor.start + (j << PAGE_SHIFT);
migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
svm_migrate_get_vram_page(prange, migrate->dst[i]);
@@ -354,18 +318,10 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
mfence);
if (r)
goto out_free_vram_pages;
- offset += j;
- vram_addr = (node->start + offset) << PAGE_SHIFT;
+ amdgpu_res_next(&cursor, j << PAGE_SHIFT);
j = 0;
} else {
- offset++;
- vram_addr += PAGE_SIZE;
- }
- if (offset >= node->size) {
- node++;
- pr_debug("next node size 0x%llx\n", node->size);
- vram_addr = node->start << PAGE_SHIFT;
- offset = 0;
+ amdgpu_res_next(&cursor, PAGE_SIZE);
}
continue;
}
@@ -373,19 +329,15 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
pr_debug("dma mapping src to 0x%llx, page_to_pfn 0x%lx\n",
src[i] >> PAGE_SHIFT, page_to_pfn(spage));
- if (j + offset >= node->size - 1 && i < npages - 1) {
+ if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
r = svm_migrate_copy_memory_gart(adev, src + i - j,
dst + i - j, j + 1,
FROM_RAM_TO_VRAM,
mfence);
if (r)
goto out_free_vram_pages;
-
- node++;
- pr_debug("next node size 0x%llx\n", node->size);
- vram_addr = node->start << PAGE_SHIFT;
- offset = 0;
- j = 0;
+ amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
+ j= 0;
} else {
j++;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 5b6c5669c03d..2f8d352e0069 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -110,8 +110,6 @@ static void kfd_sdma_activity_worker(struct work_struct *work)
workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
sdma_activity_work);
- if (!workarea)
- return;
pdd = workarea->pdd;
if (!pdd)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index f2901e7a993e..0d57ef9e73eb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -409,7 +409,7 @@ svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange->start, prange->last);
- prange->ttm_res = &prange->svm_bo->bo->tbo.mem;
+ prange->ttm_res = prange->svm_bo->bo->tbo.resource;
return true;
}
@@ -515,7 +515,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
svm_bo->bo = bo;
prange->svm_bo = svm_bo;
- prange->ttm_res = &bo->tbo.mem;
+ prange->ttm_res = bo->tbo.resource;
prange->offset = 0;
spin_lock(&svm_bo->list_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 515b6d7d9128..ae519f929c57 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -28,6 +28,7 @@
#include "dm_services_types.h"
#include "dc.h"
+#include "dc_link_dp.h"
#include "dc/inc/core_types.h"
#include "dal_asic_id.h"
#include "dmub/dmub_srv.h"
@@ -314,10 +315,8 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
- if (otg_inst == -1) {
- WARN_ON(1);
+ if (WARN_ON(otg_inst == -1))
return adev->mode_info.crtcs[0];
- }
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
@@ -396,8 +395,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
e = amdgpu_crtc->event;
amdgpu_crtc->event = NULL;
- if (!e)
- WARN_ON(1);
+ WARN_ON(!e);
vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
@@ -600,14 +598,14 @@ static void dm_crtc_high_irq(void *interrupt_params)
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/**
* dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
* DCN generation ASICs
- * @interrupt params - interrupt parameters
+ * @interrupt_params: interrupt parameters
*
* Used to set crc window/read out crc value at vertical line 0 position
*/
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
@@ -981,7 +979,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
}
- adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
+ if (!adev->dm.dc->ctx->dmub_srv)
+ adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
if (!adev->dm.dc->ctx->dmub_srv) {
DRM_ERROR("Couldn't allocate DC DMUB server!\n");
return -ENOMEM;
@@ -1291,6 +1290,15 @@ error:
return -EINVAL;
}
+static int amdgpu_dm_early_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_dm_audio_fini(adev);
+
+ return 0;
+}
+
static void amdgpu_dm_fini(struct amdgpu_device *adev)
{
int i;
@@ -1299,8 +1307,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
}
- amdgpu_dm_audio_fini(adev);
-
amdgpu_dm_destroy_drm_device(&adev->dm);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
@@ -1328,10 +1334,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
}
#endif
- if (adev->dm.dc->ctx->dmub_srv) {
- dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
- adev->dm.dc->ctx->dmub_srv = NULL;
- }
+ dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
if (dc_enable_dmub_notifications(adev->dm.dc)) {
kfree(adev->dm.dmub_notify);
@@ -1708,7 +1711,6 @@ static int dm_late_init(void *handle)
unsigned int linear_lut[16];
int i;
struct dmcu *dmcu = NULL;
- bool ret = true;
dmcu = adev->dm.dc->res_pool->dmcu;
@@ -1725,18 +1727,23 @@ static int dm_late_init(void *handle)
* 0xFFFF x 0.01 = 0x28F
*/
params.min_abm_backlight = 0x28F;
-
/* In the case where abm is implemented on dmcub,
- * dmcu object will be null.
- * ABM 2.4 and up are implemented on dmcub.
- */
- if (dmcu)
- ret = dmcu_load_iram(dmcu, params);
- else if (adev->dm.dc->ctx->dmub_srv)
- ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
+ * dmcu object will be null.
+ * ABM 2.4 and up are implemented on dmcub.
+ */
+ if (dmcu) {
+ if (!dmcu_load_iram(dmcu, params))
+ return -EINVAL;
+ } else if (adev->dm.dc->ctx->dmub_srv) {
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ int edp_num;
- if (!ret)
- return -EINVAL;
+ get_edp_links(adev->dm.dc, edp_links, &edp_num);
+ for (i = 0; i < edp_num; i++) {
+ if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
+ return -EINVAL;
+ }
+ }
return detect_mst_link_for_all_connectors(adev_to_drm(adev));
}
@@ -2001,7 +2008,6 @@ static int dm_suspend(void *handle)
amdgpu_dm_irq_suspend(adev);
-
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
return 0;
@@ -2341,6 +2347,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
.late_init = dm_late_init,
.sw_init = dm_sw_init,
.sw_fini = dm_sw_fini,
+ .early_fini = amdgpu_dm_early_fini,
.hw_init = dm_hw_init,
.hw_fini = dm_hw_fini,
.suspend = dm_suspend,
@@ -2739,6 +2746,7 @@ static void handle_hpd_rx_irq(void *param)
enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev);
union hpd_irq_data hpd_irq_data;
+ bool lock_flag = 0;
memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
@@ -2768,15 +2776,28 @@ static void handle_hpd_rx_irq(void *param)
}
}
- if (!amdgpu_in_reset(adev)) {
+ /*
+ * TODO: We need the lock to avoid touching DC state while it's being
+ * modified during automated compliance testing, or when link loss
+ * happens. While this should be split into subhandlers and proper
+ * interfaces to avoid having to conditionally lock like this in the
+ * outer layer, we need this workaround temporarily to allow MST
+ * lightup in some scenarios to avoid timeout.
+ */
+ if (!amdgpu_in_reset(adev) &&
+ (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
+ hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
mutex_lock(&adev->dm.dc_lock);
+ lock_flag = 1;
+ }
+
#ifdef CONFIG_DRM_AMD_DC_HDCP
result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
#else
result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
#endif
+ if (!amdgpu_in_reset(adev) && lock_flag)
mutex_unlock(&adev->dm.dc_lock);
- }
out:
if (result && !is_mst_root_connector) {
@@ -3399,7 +3420,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
if (dm->backlight_caps.caps_valid)
return;
- amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
+ amdgpu_acpi_get_backlight_caps(&caps);
if (caps.caps_valid) {
dm->backlight_caps.caps_valid = true;
if (caps.aux_support)
@@ -3491,7 +3512,7 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
if (!rc) {
- DRM_ERROR("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
+ DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
break;
}
}
@@ -3499,7 +3520,7 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
for (i = 0; i < dm->num_of_edps; i++) {
rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
if (!rc) {
- DRM_ERROR("DM: Failed to update backlight on eDP[%d]\n", i);
+ DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i);
break;
}
}
@@ -3868,7 +3889,6 @@ fail:
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
{
- drm_mode_config_cleanup(dm->ddev);
drm_atomic_private_obj_fini(&dm->atomic_obj);
return;
}
@@ -4946,6 +4966,14 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
case DRM_FORMAT_ABGR16161616F:
plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
break;
+ case DRM_FORMAT_XRGB16161616:
+ case DRM_FORMAT_ARGB16161616:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
+ break;
+ case DRM_FORMAT_XBGR16161616:
+ case DRM_FORMAT_ABGR16161616:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
+ break;
default:
DRM_ERROR(
"Unsupported screen format %p4cc\n",
@@ -5522,6 +5550,63 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
+ struct dc_sink *sink, struct dc_stream_state *stream,
+ struct dsc_dec_dpcd_caps *dsc_caps)
+{
+ stream->timing.flags.DSC = 0;
+
+ if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
+ dsc_caps);
+ }
+}
+
+static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
+ struct dc_sink *sink, struct dc_stream_state *stream,
+ struct dsc_dec_dpcd_caps *dsc_caps)
+{
+ struct drm_connector *drm_connector = &aconnector->base;
+ uint32_t link_bandwidth_kbps;
+
+ link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
+ dc_link_get_link_cap(aconnector->dc_link));
+ /* Set DSC policy according to dsc_clock_en */
+ dc_dsc_policy_set_enable_dsc_when_not_needed(
+ aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
+
+ if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+ dsc_caps,
+ aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
+ 0,
+ link_bandwidth_kbps,
+ &stream->timing,
+ &stream->timing.dsc_cfg)) {
+ stream->timing.flags.DSC = 1;
+ DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
+ }
+ }
+
+ /* Overwrite the stream flag if DSC is enabled through debugfs */
+ if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
+ stream->timing.flags.DSC = 1;
+
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
+ stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
+
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
+ stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
+
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
+ stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
+}
+#endif
+
static struct drm_display_mode *
get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
bool use_probed_modes)
@@ -5618,12 +5703,12 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
struct drm_display_mode saved_mode;
struct drm_display_mode *freesync_mode = NULL;
bool native_mode_found = false;
- bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
+ bool recalculate_timing = false;
+ bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
int mode_refresh;
int preferred_refresh = 0;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_dec_dpcd_caps dsc_caps;
- uint32_t link_bandwidth_kbps;
#endif
struct dc_sink *sink = NULL;
@@ -5681,7 +5766,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
*/
DRM_DEBUG_DRIVER("No preferred mode found\n");
} else {
- recalculate_timing |= amdgpu_freesync_vid_mode &&
+ recalculate_timing = amdgpu_freesync_vid_mode &&
is_freesync_video_mode(&mode, aconnector);
if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
@@ -5689,11 +5774,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
mode = *freesync_mode;
} else {
decide_crtc_timing_for_drm_display_mode(
- &mode, preferred_mode,
- dm_state ? (dm_state->scaling != RMX_OFF) : false);
- }
+ &mode, preferred_mode, scale);
- preferred_refresh = drm_mode_vrefresh(preferred_mode);
+ preferred_refresh = drm_mode_vrefresh(preferred_mode);
+ }
}
if (recalculate_timing)
@@ -5705,7 +5789,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
* If scaling is enabled and refresh rate didn't change
* we copy the vic and polarities of the old timings
*/
- if (!recalculate_timing || mode_refresh != preferred_refresh)
+ if (!scale || mode_refresh != preferred_refresh)
fill_stream_properties_from_drm_display_mode(
stream, &mode, &aconnector->base, con_state, NULL,
requested_bpc);
@@ -5714,45 +5798,12 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream, &mode, &aconnector->base, con_state, old_stream,
requested_bpc);
- stream->timing.flags.DSC = 0;
-
- if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
#if defined(CONFIG_DRM_AMD_DC_DCN)
- dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
- aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
- aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
- &dsc_caps);
- link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
- dc_link_get_link_cap(aconnector->dc_link));
-
- if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
- /* Set DSC policy according to dsc_clock_en */
- dc_dsc_policy_set_enable_dsc_when_not_needed(
- aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
-
- if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
- &dsc_caps,
- aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
- 0,
- link_bandwidth_kbps,
- &stream->timing,
- &stream->timing.dsc_cfg))
- stream->timing.flags.DSC = 1;
- /* Overwrite the stream flag if DSC is enabled through debugfs */
- if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
- stream->timing.flags.DSC = 1;
-
- if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
- stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
-
- if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
- stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
-
- if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
- stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
- }
+ /* SST DSC determination policy */
+ update_dsc_caps(aconnector, sink, stream, &dsc_caps);
+ if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
+ apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
#endif
- }
update_stream_scaling_settings(&mode, dm_state, stream);
@@ -6551,9 +6602,8 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
dm_update_crtc_active_planes(crtc, crtc_state);
- if (unlikely(!dm_crtc_state->stream &&
- modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
- WARN_ON(1);
+ if (WARN_ON(unlikely(!dm_crtc_state->stream &&
+ modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
return ret;
}
@@ -7052,6 +7102,10 @@ static const uint32_t rgb_formats[] = {
DRM_FORMAT_XBGR2101010,
DRM_FORMAT_ARGB2101010,
DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XRGB16161616,
+ DRM_FORMAT_XBGR16161616,
+ DRM_FORMAT_ARGB16161616,
+ DRM_FORMAT_ABGR16161616,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGB565,
@@ -8393,9 +8447,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* deadlock during GPU reset when this fence will not signal
* but we hold reservation lock for the BO.
*/
- r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
- false,
- msecs_to_jiffies(5000));
+ r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
+ msecs_to_jiffies(5000));
if (unlikely(r <= 0))
DRM_ERROR("Waiting for fences timed out!");
@@ -8960,7 +9013,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
status = dc_stream_get_status(dm_new_crtc_state->stream);
- WARN_ON(!status);
+
+ if (WARN_ON(!status))
+ continue;
+
WARN_ON(!status->plane_count);
/*
@@ -9967,7 +10023,7 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
if (cursor_scale_w != primary_scale_w ||
cursor_scale_h != primary_scale_h) {
- DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
+ drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
return -EINVAL;
}
@@ -10003,11 +10059,11 @@ static int validate_overlay(struct drm_atomic_state *state)
{
int i;
struct drm_plane *plane;
- struct drm_plane_state *old_plane_state, *new_plane_state;
- struct drm_plane_state *primary_state, *overlay_state = NULL;
+ struct drm_plane_state *new_plane_state;
+ struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
/* Check if primary plane is contained inside overlay */
- for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
if (drm_atomic_plane_disabling(plane->state, new_plane_state))
return 0;
@@ -10034,6 +10090,14 @@ static int validate_overlay(struct drm_atomic_state *state)
if (!primary_state->crtc)
return 0;
+ /* check if cursor plane is enabled */
+ cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
+ if (IS_ERR(cursor_state))
+ return PTR_ERR(cursor_state);
+
+ if (drm_atomic_plane_disabling(plane->state, cursor_state))
+ return 0;
+
/* Perform the bounds check to ensure the overlay plane covers the primary */
if (primary_state->crtc_x < overlay_state->crtc_x ||
primary_state->crtc_y < overlay_state->crtc_y ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index e6b2eec9fb59..5568d4e518e6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -160,6 +160,8 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
struct dc_sink *dc_sink = aconnector->dc_sink;
struct drm_dp_mst_port *port = aconnector->port;
u8 dsc_caps[16] = { 0 };
+ u8 dsc_branch_dec_caps_raw[3] = { 0 }; // DSC branch decoder caps 0xA0 ~ 0xA2
+ u8 *dsc_branch_dec_caps = NULL;
aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
#if defined(CONFIG_HP_HOOK_WORKAROUND)
@@ -182,9 +184,13 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
return false;
+ if (drm_dp_dpcd_read(aconnector->dsc_aux,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, dsc_branch_dec_caps_raw, 3) == 3)
+ dsc_branch_dec_caps = dsc_branch_dec_caps_raw;
+
if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
- dsc_caps, NULL,
- &dc_sink->dsc_caps.dsc_dec_caps))
+ dsc_caps, dsc_branch_dec_caps,
+ &dc_sink->dsc_caps.dsc_dec_caps))
return false;
return true;
@@ -461,8 +467,8 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
&aconnector->dm_dp_aux.aux,
16,
4,
- (u8)max_link_enc_cap.lane_count,
- (u8)max_link_enc_cap.link_rate,
+ max_link_enc_cap.lane_count,
+ drm_dp_bw_code_to_link_rate(max_link_enc_cap.link_rate),
aconnector->connector_id);
drm_connector_attach_dp_subconnector_property(&aconnector->base);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index c67d21a5ee52..9b8ea6e9a2b9 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -979,7 +979,7 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
struct spread_spectrum_info *info);
/**
- * get_ss_info_from_table
+ * get_ss_info_from_tbl
* Get spread sprectrum information from the ASIC_InternalSS_Info Ver 2.1 or
* SS_Info table from the VBIOS
* There can not be more than 1 entry for ASIC_InternalSS_Info Ver 2.1 or
@@ -1548,7 +1548,7 @@ static uint32_t get_ss_entry_number_from_ss_info_tbl(
uint32_t id);
/**
- * BiosParserObject::GetNumberofSpreadSpectrumEntry
+ * bios_parser_get_ss_entry_number
* Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table from
* the VBIOS that match the SSid (to be converted from signal)
*
@@ -1725,7 +1725,7 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
return 0;
}
/**
- * get_ss_entry_number_from_internal_ss_info_table_V3_1
+ * get_ss_entry_number_from_internal_ss_info_tbl_V3_1
* Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of
* the VBIOS that matches id
*
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
index 5b77251e0590..e317a3615147 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
@@ -114,7 +114,7 @@ bool dal_cmd_table_helper_controller_id_to_atom(
}
/**
- * translate_transmitter_bp_to_atom - Translate the Transmitter to the
+ * dal_cmd_table_helper_transmitter_bp_to_atom - Translate the Transmitter to the
* corresponding ATOM BIOS value
* @t: transmitter
* returns: output digitalTransmitter
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 00706b072b5f..6d2fb112ad9f 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -129,7 +129,7 @@ bool dal_cmd_table_helper_controller_id_to_atom2(
}
/**
- * translate_transmitter_bp_to_atom2 - Translate the Transmitter to the
+ * dal_cmd_table_helper_transmitter_bp_to_atom2 - Translate the Transmitter to the
* corresponding ATOM BIOS value
* @t: transmitter
* returns: digitalTransmitter
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 1244fcb0f446..ff5bb152ef49 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -2863,6 +2863,7 @@ static void populate_initial_data(
data->bytes_per_pixel[num_displays + 4] = 4;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
data->bytes_per_pixel[num_displays + 4] = 8;
break;
@@ -2966,6 +2967,7 @@ static void populate_initial_data(
data->bytes_per_pixel[num_displays + 4] = 4;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
data->bytes_per_pixel[num_displays + 4] = 8;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index d4df4da5b81a..0e18df1283b6 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -236,6 +236,7 @@ static enum dcn_bw_defs tl_pixel_format_to_bw_defs(enum surface_pixel_format for
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
return dcn_bw_rgb_sub_32;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
return dcn_bw_rgb_sub_64;
@@ -375,6 +376,7 @@ static void pipe_ctx_to_e2e_pipe_params (
input->src.viewport_height_c = input->src.viewport_height / 2;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
input->src.source_format = dm_444_64;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index ef157b83bacd..f03889b3654b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -303,7 +303,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct dc_stream_state *stream,
struct dc_crtc_timing_adjust *adjust)
{
- int i = 0;
+ int i;
bool ret = false;
stream->adjust.v_total_max = adjust->v_total_max;
@@ -331,7 +331,7 @@ bool dc_stream_get_crtc_position(struct dc *dc,
{
/* TODO: Support multiple streams */
const struct dc_stream_state *stream = streams[0];
- int i = 0;
+ int i;
bool ret = false;
struct crtc_position position;
@@ -538,7 +538,7 @@ void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
enum dc_dynamic_expansion option)
{
/* OPP FMT dyn expansion updates*/
- int i = 0;
+ int i;
struct pipe_ctx *pipe_ctx;
for (i = 0; i < MAX_PIPES; i++) {
@@ -596,7 +596,7 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream,
bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
{
- int i = 0;
+ int i;
bool ret = false;
struct pipe_ctx *pipes;
@@ -613,7 +613,7 @@ bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stre
bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
{
- int i = 0;
+ int i;
bool ret = false;
struct pipe_ctx *pipes;
@@ -639,8 +639,7 @@ void dc_stream_set_static_screen_params(struct dc *dc,
int num_streams,
const struct dc_static_screen_params *params)
{
- int i = 0;
- int j = 0;
+ int i, j;
struct pipe_ctx *pipes_affected[MAX_PIPES];
int num_pipes_affected = 0;
@@ -895,7 +894,7 @@ static void disable_all_writeback_pipes_for_stream(
static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
struct dc_stream_state *stream, bool lock)
{
- int i = 0;
+ int i;
/* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
if (dc->hwss.interdependent_update_lock)
@@ -1155,7 +1154,7 @@ static void enable_timing_multisync(
struct dc *dc,
struct dc_state *ctx)
{
- int i = 0, multisync_count = 0;
+ int i, multisync_count = 0;
int pipe_count = dc->res_pool->pipe_count;
struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
@@ -3335,18 +3334,10 @@ void dc_hardware_release(struct dc *dc)
#endif
/**
- *****************************************************************************
- * Function: dc_enable_dmub_notifications
+ * dc_enable_dmub_notifications - Returns whether dmub notification can be enabled
+ * @dc: dc structure
*
- * @brief
- * Returns whether dmub notification can be enabled
- *
- * @param
- * [in] dc: dc structure
- *
- * @return
- * True to enable dmub notifications, False otherwise
- *****************************************************************************
+ * Returns: True to enable dmub notifications, False otherwise
*/
bool dc_enable_dmub_notifications(struct dc *dc)
{
@@ -3355,21 +3346,13 @@ bool dc_enable_dmub_notifications(struct dc *dc)
}
/**
- *****************************************************************************
- * Function: dc_process_dmub_aux_transfer_async
- *
- * @brief
- * Submits aux command to dmub via inbox message
- * Sets port index appropriately for legacy DDC
- *
- * @param
- * [in] dc: dc structure
- * [in] link_index: link index
- * [in] payload: aux payload
+ * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
+ * Sets port index appropriately for legacy DDC
+ * @dc: dc structure
+ * @link_index: link index
+ * @payload: aux payload
*
- * @return
- * True if successful, False if failure
- *****************************************************************************
+ * Returns: True if successful, False if failure
*/
bool dc_process_dmub_aux_transfer_async(struct dc *dc,
uint32_t link_index,
@@ -3428,16 +3411,8 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
}
/**
- *****************************************************************************
- * Function: dc_disable_accelerated_mode
- *
- * @brief
- * disable accelerated mode
- *
- * @param
- * [in] dc: dc structure
- *
- *****************************************************************************
+ * dc_disable_accelerated_mode - disable accelerated mode
+ * @dc: dc structure
*/
void dc_disable_accelerated_mode(struct dc *dc)
{
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 9e08410bfdfd..5a70f55e075c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -25,8 +25,6 @@ static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA";
link->ctx->logger
#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
-#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
-
/* maximum pre emphasis level allowed for each voltage swing level*/
static const enum dc_pre_emphasis
voltage_swing_to_pre_emphasis[] = { PRE_EMPHASIS_LEVEL3,
@@ -39,14 +37,6 @@ enum {
POST_LT_ADJ_REQ_TIMEOUT = 200
};
-enum {
- LINK_TRAINING_MAX_RETRY_COUNT = 5,
- /* to avoid infinite loop where-in the receiver
- * switches between different VS
- */
- LINK_TRAINING_MAX_CR_RETRY = 100
-};
-
static bool decide_fallback_link_setting(
struct dc_link_settings initial_link_settings,
struct dc_link_settings *current_link_setting,
@@ -97,7 +87,7 @@ static uint32_t get_eq_training_aux_rd_interval(
return wait_in_micro_secs;
}
-static void wait_for_training_aux_rd_interval(
+void dp_wait_for_training_aux_rd_interval(
struct dc_link *link,
uint32_t wait_in_micro_secs)
{
@@ -108,7 +98,7 @@ static void wait_for_training_aux_rd_interval(
wait_in_micro_secs);
}
-static enum dpcd_training_patterns
+enum dpcd_training_patterns
dc_dp_training_pattern_to_dpcd_training_pattern(
struct dc_link *link,
enum dc_dp_training_pattern pattern)
@@ -206,11 +196,12 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li
return DP_TRAINING_PATTERN_SEQUENCE_2;
}
-static void dpcd_set_link_settings(
+enum dc_status dpcd_set_link_settings(
struct dc_link *link,
const struct link_training_settings *lt_settings)
{
uint8_t rate;
+ enum dc_status status;
union down_spread_ctrl downspread = { {0} };
union lane_count_set lane_count_set = { {0} };
@@ -225,15 +216,16 @@ static void dpcd_set_link_settings(
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
- if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
+ lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
}
- core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
&downspread.raw, sizeof(downspread));
- core_link_write_dpcd(link, DP_LANE_COUNT_SET,
+ status = core_link_write_dpcd(link, DP_LANE_COUNT_SET,
&lane_count_set.raw, 1);
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
@@ -249,12 +241,12 @@ static void dpcd_set_link_settings(
core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
supported_link_rates, sizeof(supported_link_rates));
}
- core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
- core_link_write_dpcd(link, DP_LINK_RATE_SET,
+ status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
+ status = core_link_write_dpcd(link, DP_LINK_RATE_SET,
&lt_settings->link_settings.link_rate_set, 1);
} else {
rate = (uint8_t) (lt_settings->link_settings.link_rate);
- core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
+ status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
}
if (rate) {
@@ -278,9 +270,11 @@ static void dpcd_set_link_settings(
DP_DOWNSPREAD_CTRL,
lt_settings->link_settings.link_spread);
}
+
+ return status;
}
-static uint8_t dc_dp_initialize_scrambling_data_symbols(
+uint8_t dc_dp_initialize_scrambling_data_symbols(
struct dc_link *link,
enum dc_dp_training_pattern pattern)
{
@@ -429,7 +423,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
link->cur_lane_setting = lt_settings->lane_settings[0];
}
-static bool is_cr_done(enum dc_lane_count ln_count,
+bool dp_is_cr_done(enum dc_lane_count ln_count,
union lane_status *dpcd_lane_status)
{
uint32_t lane;
@@ -468,7 +462,7 @@ static inline bool is_interlane_aligned(union lane_align_status_updated align_st
return align_status.bits.INTERLANE_ALIGN_DONE == 1;
}
-static void update_drive_settings(
+void dp_update_drive_settings(
struct link_training_settings *dest,
struct link_training_settings src)
{
@@ -612,7 +606,7 @@ static void find_max_drive_settings(
}
-static void get_lane_status_and_drive_settings(
+enum dc_status dp_get_lane_status_and_drive_settings(
struct dc_link *link,
const struct link_training_settings *link_training_setting,
union lane_status *ln_status,
@@ -627,6 +621,7 @@ static void get_lane_status_and_drive_settings(
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
struct link_training_settings request_settings = { {0} };
uint32_t lane;
+ enum dc_status status;
memset(req_settings, '\0', sizeof(struct link_training_settings));
@@ -637,7 +632,7 @@ static void get_lane_status_and_drive_settings(
lane_adjust_offset = 3;
}
- core_link_read_dpcd(
+ status = core_link_read_dpcd(
link,
lane01_status_address,
(uint8_t *)(dpcd_buf),
@@ -725,9 +720,10 @@ static void get_lane_status_and_drive_settings(
* read DpcdAddress_AdjustRequestPostCursor2 = 0x020C
*/
+ return status;
}
-static void dpcd_set_lane_settings(
+enum dc_status dpcd_set_lane_settings(
struct dc_link *link,
const struct link_training_settings *link_training_setting,
uint32_t offset)
@@ -735,6 +731,7 @@ static void dpcd_set_lane_settings(
union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
uint32_t lane;
unsigned int lane0_set_address;
+ enum dc_status status;
lane0_set_address = DP_TRAINING_LANE0_SET;
@@ -762,7 +759,7 @@ static void dpcd_set_lane_settings(
PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
}
- core_link_write_dpcd(link,
+ status = core_link_write_dpcd(link,
lane0_set_address,
(uint8_t *)(dpcd_lane),
link_training_setting->link_settings.lane_count);
@@ -808,9 +805,10 @@ static void dpcd_set_lane_settings(
}
link->cur_lane_setting = link_training_setting->lane_settings[0];
+ return status;
}
-static bool is_max_vs_reached(
+bool dp_is_max_vs_reached(
const struct link_training_settings *lt_settings)
{
uint32_t lane;
@@ -852,19 +850,19 @@ static bool perform_post_lt_adj_req_sequence(
union lane_align_status_updated
dpcd_lane_status_updated;
- get_lane_status_and_drive_settings(
- link,
- lt_settings,
- dpcd_lane_status,
- &dpcd_lane_status_updated,
- &req_settings,
- DPRX);
+ dp_get_lane_status_and_drive_settings(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ &req_settings,
+ DPRX);
if (dpcd_lane_status_updated.bits.
POST_LT_ADJ_REQ_IN_PROGRESS == 0)
return true;
- if (!is_cr_done(lane_count, dpcd_lane_status))
+ if (!dp_is_cr_done(lane_count, dpcd_lane_status))
return false;
if (!is_ch_eq_done(lane_count, dpcd_lane_status) ||
@@ -887,7 +885,7 @@ static bool perform_post_lt_adj_req_sequence(
}
if (req_drv_setting_changed) {
- update_drive_settings(
+ dp_update_drive_settings(
lt_settings, req_settings);
dc_link_dp_set_drive_settings(link,
@@ -939,7 +937,7 @@ static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_inte
return aux_rd_interval_us;
}
-static enum link_training_result get_cr_failure(enum dc_lane_count ln_count,
+enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count,
union lane_status *dpcd_lane_status)
{
enum link_training_result result = LINK_TRAINING_SUCCESS;
@@ -1003,14 +1001,14 @@ static enum link_training_result perform_channel_equalization_sequence(
translate_training_aux_read_interval(
link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
- wait_for_training_aux_rd_interval(
+ dp_wait_for_training_aux_rd_interval(
link,
wait_time_microsec);
/* 4. Read lane status and requested
* drive settings as set by the sink*/
- get_lane_status_and_drive_settings(
+ dp_get_lane_status_and_drive_settings(
link,
lt_settings,
dpcd_lane_status,
@@ -1019,7 +1017,7 @@ static enum link_training_result perform_channel_equalization_sequence(
offset);
/* 5. check CR done*/
- if (!is_cr_done(lane_count, dpcd_lane_status))
+ if (!dp_is_cr_done(lane_count, dpcd_lane_status))
return LINK_TRAINING_EQ_FAIL_CR;
/* 6. check CHEQ done*/
@@ -1029,13 +1027,12 @@ static enum link_training_result perform_channel_equalization_sequence(
return LINK_TRAINING_SUCCESS;
/* 7. update VS/PE/PC2 in lt_settings*/
- update_drive_settings(lt_settings, req_settings);
+ dp_update_drive_settings(lt_settings, req_settings);
}
return LINK_TRAINING_EQ_FAIL_EQ;
}
-#define TRAINING_AUX_RD_INTERVAL 100 //us
static void start_clock_recovery_pattern_early(struct dc_link *link,
struct link_training_settings *lt_settings,
@@ -1106,14 +1103,14 @@ static enum link_training_result perform_clock_recovery_sequence(
if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
wait_time_microsec = TRAINING_AUX_RD_INTERVAL;
- wait_for_training_aux_rd_interval(
+ dp_wait_for_training_aux_rd_interval(
link,
wait_time_microsec);
/* 4. Read lane status and requested drive
* settings as set by the sink
*/
- get_lane_status_and_drive_settings(
+ dp_get_lane_status_and_drive_settings(
link,
lt_settings,
dpcd_lane_status,
@@ -1122,11 +1119,11 @@ static enum link_training_result perform_clock_recovery_sequence(
offset);
/* 5. check CR done*/
- if (is_cr_done(lane_count, dpcd_lane_status))
+ if (dp_is_cr_done(lane_count, dpcd_lane_status))
return LINK_TRAINING_SUCCESS;
/* 6. max VS reached*/
- if (is_max_vs_reached(lt_settings))
+ if (dp_is_max_vs_reached(lt_settings))
break;
/* 7. same lane settings*/
@@ -1141,7 +1138,7 @@ static enum link_training_result perform_clock_recovery_sequence(
retries_cr = 0;
/* 8. update VS/PE/PC2 in lt_settings*/
- update_drive_settings(lt_settings, req_settings);
+ dp_update_drive_settings(lt_settings, req_settings);
retry_count++;
}
@@ -1154,7 +1151,7 @@ static enum link_training_result perform_clock_recovery_sequence(
}
- return get_cr_failure(lane_count, dpcd_lane_status);
+ return dp_get_cr_failure(lane_count, dpcd_lane_status);
}
static inline enum link_training_result dp_transition_to_video_idle(
@@ -1173,8 +1170,16 @@ static inline enum link_training_result dp_transition_to_video_idle(
* TPS4 must be used instead of POST_LT_ADJ_REQ.
*/
if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
- lt_settings->pattern_for_eq == DP_TRAINING_PATTERN_SEQUENCE_4)
+ lt_settings->pattern_for_eq == DP_TRAINING_PATTERN_SEQUENCE_4) {
+ /* delay 5ms after Main Link output idle pattern and then check
+ * DPCD 0202h.
+ */
+ if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) {
+ msleep(5);
+ status = dp_check_link_loss_status(link, lt_settings);
+ }
return status;
+ }
if (status == LINK_TRAINING_SUCCESS &&
perform_post_lt_adj_req_sequence(link, lt_settings) == false)
@@ -1327,9 +1332,14 @@ static inline void decide_8b_10b_training_settings(
lt_settings->enhanced_framing = *overrides->enhanced_framing;
else
lt_settings->enhanced_framing = 1;
+
+ if (link->preferred_training_settings.fec_enable != NULL)
+ lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable;
+ else
+ lt_settings->should_set_fec_ready = true;
}
-static void decide_training_settings(
+void dp_decide_training_settings(
struct dc_link *link,
const struct dc_link_settings *link_settings,
const struct dc_link_training_overrides *overrides,
@@ -1365,18 +1375,18 @@ uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count)
return 0; // invalid value
}
-static void configure_lttpr_mode_transparent(struct dc_link *link)
+enum dc_status configure_lttpr_mode_transparent(struct dc_link *link)
{
uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
- core_link_write_dpcd(link,
+ return core_link_write_dpcd(link,
DP_PHY_REPEATER_MODE,
(uint8_t *)&repeater_mode,
sizeof(repeater_mode));
}
-static void configure_lttpr_mode_non_transparent(
+enum dc_status configure_lttpr_mode_non_transparent(
struct dc_link *link,
const struct link_training_settings *lt_settings)
{
@@ -1431,6 +1441,8 @@ static void configure_lttpr_mode_non_transparent(
}
}
}
+
+ return result;
}
static void repeater_training_done(struct dc_link *link, uint32_t offset)
@@ -1564,7 +1576,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
{
struct link_training_settings lt_settings;
- decide_training_settings(
+ dp_decide_training_settings(
link,
link_setting,
&link->preferred_training_settings,
@@ -1579,7 +1591,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
dp_set_hw_lane_settings(link, &lt_settings, DPRX);
/* wait receiver to lock-on*/
- wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time);
+ dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time);
/* 2. Perform_channel_equalization_sequence. */
@@ -1590,7 +1602,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
dp_set_hw_lane_settings(link, &lt_settings, DPRX);
/* wait receiver to lock-on. */
- wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time);
+ dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time);
/* 3. Perform_link_training_int. */
@@ -1602,42 +1614,61 @@ bool dc_link_dp_perform_link_training_skip_aux(
return true;
}
-enum link_training_result dc_link_dp_perform_link_training(
- struct dc_link *link,
- const struct dc_link_settings *link_setting,
- bool skip_video_pattern)
+enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_training_settings *lt_settings)
{
- enum link_training_result status = LINK_TRAINING_SUCCESS;
- struct link_training_settings lt_settings;
+ enum dc_status status = DC_OK;
- bool fec_enable;
- uint8_t repeater_cnt;
- uint8_t repeater_id;
+ if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ status = configure_lttpr_mode_transparent(link);
+
+ else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ status = configure_lttpr_mode_non_transparent(link, lt_settings);
+
+ return status;
+}
+
+static void dpcd_exit_training_mode(struct dc_link *link)
+{
+
+ /* clear training pattern set */
+ dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
+}
+
+enum dc_status dpcd_configure_channel_coding(struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ enum dp_link_encoding encoding =
+ dp_get_link_encoding_format(
+ &lt_settings->link_settings);
+ enum dc_status status;
- decide_training_settings(
+ status = core_link_write_dpcd(
link,
- link_setting,
- &link->preferred_training_settings,
- &lt_settings);
+ DP_MAIN_LINK_CHANNEL_CODING_SET,
+ (uint8_t *) &encoding,
+ 1);
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X MAIN_LINK_CHANNEL_CODING_SET = %x\n",
+ __func__,
+ DP_MAIN_LINK_CHANNEL_CODING_SET,
+ encoding);
- /* Configure lttpr mode */
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
- configure_lttpr_mode_non_transparent(link, &lt_settings);
- else if (link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
- configure_lttpr_mode_transparent(link);
+ return status;
+}
- if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
- start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
+static enum link_training_result dp_perform_8b_10b_link_training(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
- /* 1. set link rate, lane count and spread. */
- dpcd_set_link_settings(link, &lt_settings);
+ uint8_t repeater_cnt;
+ uint8_t repeater_id;
- if (link->preferred_training_settings.fec_enable != NULL)
- fec_enable = *link->preferred_training_settings.fec_enable;
- else
- fec_enable = true;
+ if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ start_clock_recovery_pattern_early(link, lt_settings, DPRX);
- dp_set_fec_ready(link, fec_enable);
+ /* 1. set link rate, lane count and spread. */
+ dpcd_set_link_settings(link, lt_settings);
if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
@@ -1648,13 +1679,13 @@ enum link_training_result dc_link_dp_perform_link_training(
for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
repeater_id--) {
- status = perform_clock_recovery_sequence(link, &lt_settings, repeater_id);
+ status = perform_clock_recovery_sequence(link, lt_settings, repeater_id);
if (status != LINK_TRAINING_SUCCESS)
break;
status = perform_channel_equalization_sequence(link,
- &lt_settings,
+ lt_settings,
repeater_id);
if (status != LINK_TRAINING_SUCCESS)
@@ -1665,36 +1696,62 @@ enum link_training_result dc_link_dp_perform_link_training(
}
if (status == LINK_TRAINING_SUCCESS) {
- status = perform_clock_recovery_sequence(link, &lt_settings, DPRX);
+ status = perform_clock_recovery_sequence(link, lt_settings, DPRX);
if (status == LINK_TRAINING_SUCCESS) {
status = perform_channel_equalization_sequence(link,
- &lt_settings,
+ lt_settings,
DPRX);
}
}
- /* 3. set training not in progress*/
- dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
- if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) {
+ return status;
+}
+
+enum link_training_result dc_link_dp_perform_link_training(
+ struct dc_link *link,
+ const struct dc_link_settings *link_settings,
+ bool skip_video_pattern)
+{
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ struct link_training_settings lt_settings;
+ enum dp_link_encoding encoding =
+ dp_get_link_encoding_format(link_settings);
+
+ /* decide training settings */
+ dp_decide_training_settings(
+ link,
+ link_settings,
+ &link->preferred_training_settings,
+ &lt_settings);
+
+ /* reset previous training states */
+ dpcd_exit_training_mode(link);
+
+ /* configure link prior to entering training mode */
+ dpcd_configure_lttpr_mode(link, &lt_settings);
+ dp_set_fec_ready(link, lt_settings.should_set_fec_ready);
+ dpcd_configure_channel_coding(link, &lt_settings);
+
+ /* enter training mode:
+ * Per DP specs starting from here, DPTX device shall not issue
+ * Non-LT AUX transactions inside training mode.
+ */
+ if (encoding == DP_8b_10b_ENCODING)
+ status = dp_perform_8b_10b_link_training(link, &lt_settings);
+ else
+ ASSERT(0);
+
+ /* exit training mode and switch to video idle */
+ dpcd_exit_training_mode(link);
+ if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern)
status = dp_transition_to_video_idle(link,
&lt_settings,
status);
- }
-
- /* delay 5ms after Main Link output idle pattern and then check
- * DPCD 0202h.
- */
- if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) {
- msleep(5);
- status = dp_check_link_loss_status(link, &lt_settings);
- }
- /* 6. print status message*/
+ /* dump debug data */
print_status_message(link, &lt_settings, status);
-
if (status != LINK_TRAINING_SUCCESS)
link->ctx->dc->debug_data.ltFailCount++;
-
return status;
}
@@ -1899,7 +1956,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
bool fec_enable = false;
- decide_training_settings(
+ dp_decide_training_settings(
link,
link_settings,
lt_overrides,
@@ -2070,7 +2127,7 @@ enum dc_status read_hpd_rx_irq_data(
return retval;
}
-static bool hpd_rx_irq_check_link_loss_status(
+bool hpd_rx_irq_check_link_loss_status(
struct dc_link *link,
union hpd_irq_data *hpd_irq_dpcd_data)
{
@@ -4606,50 +4663,74 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
return DP_PANEL_MODE_DEFAULT;
}
-void dp_set_fec_ready(struct dc_link *link, bool ready)
+enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready)
{
/* FEC has to be "set ready" before the link training.
* The policy is to always train with FEC
* if the sink supports it and leave it enabled on link.
* If FEC is not supported, disable it.
*/
- struct link_encoder *link_enc = link->link_enc;
+ struct link_encoder *link_enc = NULL;
+ enum dc_status status = DC_OK;
uint8_t fec_config = 0;
+ /* Access link encoder based on whether it is statically
+ * or dynamically assigned to a link.
+ */
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign)
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+ else
+ link_enc = link->link_enc;
+ ASSERT(link_enc);
+
if (!dc_link_should_enable_fec(link))
- return;
+ return status;
if (link_enc->funcs->fec_set_ready &&
link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
if (ready) {
fec_config = 1;
- if (core_link_write_dpcd(link,
+ status = core_link_write_dpcd(link,
DP_FEC_CONFIGURATION,
&fec_config,
- sizeof(fec_config)) == DC_OK) {
+ sizeof(fec_config));
+ if (status == DC_OK) {
link_enc->funcs->fec_set_ready(link_enc, true);
link->fec_state = dc_link_fec_ready;
} else {
- link->link_enc->funcs->fec_set_ready(link->link_enc, false);
+ link_enc->funcs->fec_set_ready(link->link_enc, false);
link->fec_state = dc_link_fec_not_ready;
dm_error("dpcd write failed to set fec_ready");
}
} else if (link->fec_state == dc_link_fec_ready) {
fec_config = 0;
- core_link_write_dpcd(link,
+ status = core_link_write_dpcd(link,
DP_FEC_CONFIGURATION,
&fec_config,
sizeof(fec_config));
- link->link_enc->funcs->fec_set_ready(
- link->link_enc, false);
+ link_enc->funcs->fec_set_ready(link_enc, false);
link->fec_state = dc_link_fec_not_ready;
}
}
+
+ return status;
}
void dp_set_fec_enable(struct dc_link *link, bool enable)
{
- struct link_encoder *link_enc = link->link_enc;
+ struct link_encoder *link_enc = NULL;
+
+ /* Access link encoder based on whether it is statically
+ * or dynamically assigned to a link.
+ */
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign)
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(
+ link->dc->current_state, link);
+ else
+ link_enc = link->link_enc;
+ ASSERT(link_enc);
if (!dc_link_should_enable_fec(link))
return;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 8a158a192ec0..cd864cc83539 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -611,6 +611,7 @@ static enum pixel_format convert_pixel_format_to_dalsurface(
dal_pixel_format = PIXEL_FORMAT_420BPP10;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
default:
dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
break;
@@ -701,124 +702,23 @@ static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *spli
}
}
-static void calculate_viewport(struct pipe_ctx *pipe_ctx)
+/*
+ * This is a preliminary vp size calculation to allow us to check taps support.
+ * The result is completely overridden afterwards.
+ */
+static void calculate_viewport_size(struct pipe_ctx *pipe_ctx)
{
- const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
- const struct dc_stream_state *stream = pipe_ctx->stream;
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
- struct rect surf_src = plane_state->src_rect;
- struct rect clip, dest;
- int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
- || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
- int split_count = 0;
- int split_idx = 0;
- bool orthogonal_rotation, flip_y_start, flip_x_start;
-
- calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
- if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
- stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
- split_count = 0;
- split_idx = 0;
- }
-
- /* The actual clip is an intersection between stream
- * source and surface clip
- */
- dest = plane_state->dst_rect;
- clip.x = stream->src.x > plane_state->clip_rect.x ?
- stream->src.x : plane_state->clip_rect.x;
-
- clip.width = stream->src.x + stream->src.width <
- plane_state->clip_rect.x + plane_state->clip_rect.width ?
- stream->src.x + stream->src.width - clip.x :
- plane_state->clip_rect.x + plane_state->clip_rect.width - clip.x ;
-
- clip.y = stream->src.y > plane_state->clip_rect.y ?
- stream->src.y : plane_state->clip_rect.y;
-
- clip.height = stream->src.y + stream->src.height <
- plane_state->clip_rect.y + plane_state->clip_rect.height ?
- stream->src.y + stream->src.height - clip.y :
- plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ;
-
- /*
- * Need to calculate how scan origin is shifted in vp space
- * to correctly rotate clip and dst
- */
- get_vp_scan_direction(
- plane_state->rotation,
- plane_state->horizontal_mirror,
- &orthogonal_rotation,
- &flip_y_start,
- &flip_x_start);
-
- if (orthogonal_rotation) {
- swap(clip.x, clip.y);
- swap(clip.width, clip.height);
- swap(dest.x, dest.y);
- swap(dest.width, dest.height);
- }
- if (flip_x_start) {
- clip.x = dest.x + dest.width - clip.x - clip.width;
- dest.x = 0;
- }
- if (flip_y_start) {
- clip.y = dest.y + dest.height - clip.y - clip.height;
- dest.y = 0;
- }
-
- /* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio
- * num_pixels = clip.num_pix * scl_ratio
- */
- data->viewport.x = surf_src.x + (clip.x - dest.x) * surf_src.width / dest.width;
- data->viewport.width = clip.width * surf_src.width / dest.width;
-
- data->viewport.y = surf_src.y + (clip.y - dest.y) * surf_src.height / dest.height;
- data->viewport.height = clip.height * surf_src.height / dest.height;
-
- /* Handle split */
- if (split_count) {
- /* extra pixels in the division remainder need to go to pipes after
- * the extra pixel index minus one(epimo) defined here as:
- */
- int epimo = 0;
-
- if (orthogonal_rotation) {
- if (flip_y_start)
- split_idx = split_count - split_idx;
-
- epimo = split_count - data->viewport.height % (split_count + 1);
-
- data->viewport.y += (data->viewport.height / (split_count + 1)) * split_idx;
- if (split_idx > epimo)
- data->viewport.y += split_idx - epimo - 1;
- data->viewport.height = data->viewport.height / (split_count + 1) + (split_idx > epimo ? 1 : 0);
- } else {
- if (flip_x_start)
- split_idx = split_count - split_idx;
-
- epimo = split_count - data->viewport.width % (split_count + 1);
-
- data->viewport.x += (data->viewport.width / (split_count + 1)) * split_idx;
- if (split_idx > epimo)
- data->viewport.x += split_idx - epimo - 1;
- data->viewport.width = data->viewport.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
- }
+ data->viewport.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz, data->recout.width));
+ data->viewport.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert, data->recout.height));
+ data->viewport_c.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz_c, data->recout.width));
+ data->viewport_c.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert_c, data->recout.height));
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
+ swap(data->viewport.width, data->viewport.height);
+ swap(data->viewport_c.width, data->viewport_c.height);
}
-
- /* Round down, compensate in init */
- data->viewport_c.x = data->viewport.x / vpc_div;
- data->viewport_c.y = data->viewport.y / vpc_div;
- data->inits.h_c = (data->viewport.x % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero;
- data->inits.v_c = (data->viewport.y % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero;
-
- /* Round up, assume original video size always even dimensions */
- data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
- data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
-
- data->viewport_unadjusted = data->viewport;
- data->viewport_c_unadjusted = data->viewport_c;
}
static void calculate_recout(struct pipe_ctx *pipe_ctx)
@@ -827,26 +727,21 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
const struct dc_stream_state *stream = pipe_ctx->stream;
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect surf_clip = plane_state->clip_rect;
- bool pri_split_tb = pipe_ctx->bottom_pipe &&
- pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state &&
- stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
- bool sec_split_tb = pipe_ctx->top_pipe &&
- pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state &&
- stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
- int split_count = 0;
- int split_idx = 0;
+ bool split_tb = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
+ int split_count, split_idx;
calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
+ if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
+ split_idx = 0;
/*
* Only the leftmost ODM pipe should be offset by a nonzero distance
*/
- if (!pipe_ctx->prev_odm_pipe) {
+ if (!pipe_ctx->prev_odm_pipe || split_idx == split_count) {
data->recout.x = stream->dst.x;
if (stream->src.x < surf_clip.x)
data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
/ stream->src.width;
-
} else
data->recout.x = 0;
@@ -867,26 +762,31 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
if (data->recout.height + data->recout.y > stream->dst.y + stream->dst.height)
data->recout.height = stream->dst.y + stream->dst.height - data->recout.y;
- /* Handle h & v split, handle rotation using viewport */
- if (sec_split_tb) {
- data->recout.y += data->recout.height / 2;
- /* Floor primary pipe, ceil 2ndary pipe */
- data->recout.height = (data->recout.height + 1) / 2;
- } else if (pri_split_tb)
+ /* Handle h & v split */
+ if (split_tb) {
+ ASSERT(data->recout.height % 2 == 0);
data->recout.height /= 2;
- else if (split_count) {
- /* extra pixels in the division remainder need to go to pipes after
- * the extra pixel index minus one(epimo) defined here as:
- */
- int epimo = split_count - data->recout.width % (split_count + 1);
-
- /*no recout offset due to odm */
+ } else if (split_count) {
if (!pipe_ctx->next_odm_pipe && !pipe_ctx->prev_odm_pipe) {
+ /* extra pixels in the division remainder need to go to pipes after
+ * the extra pixel index minus one(epimo) defined here as:
+ */
+ int epimo = split_count - data->recout.width % (split_count + 1);
+
data->recout.x += (data->recout.width / (split_count + 1)) * split_idx;
if (split_idx > epimo)
data->recout.x += split_idx - epimo - 1;
+ ASSERT(stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE || data->recout.width % 2 == 0);
+ data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
+ } else {
+ /* odm */
+ if (split_idx == split_count) {
+ /* rightmost pipe is the remainder recout */
+ data->recout.width -= data->h_active * split_count - data->recout.x;
+ data->recout.x = 0;
+ } else
+ data->recout.width = data->h_active - data->recout.x;
}
- data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
}
}
@@ -940,9 +840,15 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.ratios.vert_c, 19);
}
-static inline void adjust_vp_and_init_for_seamless_clip(
+
+/*
+ * We completely calculate vp offset, size and inits here based entirely on scaling
+ * ratios and recout for pixel perfect pipe combine.
+ */
+static void calculate_init_and_vp(
bool flip_scan_dir,
- int recout_skip,
+ int recout_offset_within_recout_full,
+ int recout_size,
int src_size,
int taps,
struct fixed31_32 ratio,
@@ -950,91 +856,87 @@ static inline void adjust_vp_and_init_for_seamless_clip(
int *vp_offset,
int *vp_size)
{
- if (!flip_scan_dir) {
- /* Adjust for viewport end clip-off */
- if ((*vp_offset + *vp_size) < src_size) {
- int vp_clip = src_size - *vp_size - *vp_offset;
- int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio));
-
- int_part = int_part > 0 ? int_part : 0;
- *vp_size += int_part < vp_clip ? int_part : vp_clip;
- }
-
- /* Adjust for non-0 viewport offset */
- if (*vp_offset) {
- int int_part;
-
- *init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip));
- int_part = dc_fixpt_floor(*init) - *vp_offset;
- if (int_part < taps) {
- int int_adj = *vp_offset >= (taps - int_part) ?
- (taps - int_part) : *vp_offset;
- *vp_offset -= int_adj;
- *vp_size += int_adj;
- int_part += int_adj;
- } else if (int_part > taps) {
- *vp_offset += int_part - taps;
- *vp_size -= int_part - taps;
- int_part = taps;
- }
- init->value &= 0xffffffff;
- *init = dc_fixpt_add_int(*init, int_part);
- }
- } else {
- /* Adjust for non-0 viewport offset */
- if (*vp_offset) {
- int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio));
-
- int_part = int_part > 0 ? int_part : 0;
- *vp_size += int_part < *vp_offset ? int_part : *vp_offset;
- *vp_offset -= int_part < *vp_offset ? int_part : *vp_offset;
- }
+ struct fixed31_32 temp;
+ int int_part;
- /* Adjust for viewport end clip-off */
- if ((*vp_offset + *vp_size) < src_size) {
- int int_part;
- int end_offset = src_size - *vp_offset - *vp_size;
-
- /*
- * this is init if vp had no offset, keep in mind this is from the
- * right side of vp due to scan direction
- */
- *init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip));
- /*
- * this is the difference between first pixel of viewport available to read
- * and init position, takning into account scan direction
- */
- int_part = dc_fixpt_floor(*init) - end_offset;
- if (int_part < taps) {
- int int_adj = end_offset >= (taps - int_part) ?
- (taps - int_part) : end_offset;
- *vp_size += int_adj;
- int_part += int_adj;
- } else if (int_part > taps) {
- *vp_size += int_part - taps;
- int_part = taps;
- }
- init->value &= 0xffffffff;
- *init = dc_fixpt_add_int(*init, int_part);
- }
+ /*
+ * First of the taps starts sampling pixel number <init_int_part> corresponding to recout
+ * pixel 1. Next recout pixel samples int part of <init + scaling ratio> and so on.
+ * All following calculations are based on this logic.
+ *
+ * Init calculated according to formula:
+ * init = (scaling_ratio + number_of_taps + 1) / 2
+ * init_bot = init + scaling_ratio
+ * to get pixel perfect combine add the fraction from calculating vp offset
+ */
+ temp = dc_fixpt_mul_int(ratio, recout_offset_within_recout_full);
+ *vp_offset = dc_fixpt_floor(temp);
+ temp.value &= 0xffffffff;
+ *init = dc_fixpt_truncate(dc_fixpt_add(dc_fixpt_div_int(
+ dc_fixpt_add_int(ratio, taps + 1), 2), temp), 19);
+ /*
+ * If viewport has non 0 offset and there are more taps than covered by init then
+ * we should decrease the offset and increase init so we are never sampling
+ * outside of viewport.
+ */
+ int_part = dc_fixpt_floor(*init);
+ if (int_part < taps) {
+ int_part = taps - int_part;
+ if (int_part > *vp_offset)
+ int_part = *vp_offset;
+ *vp_offset -= int_part;
+ *init = dc_fixpt_add_int(*init, int_part);
}
+ /*
+ * If taps are sampling outside of viewport at end of recout and there are more pixels
+ * available in the surface we should increase the viewport size, regardless set vp to
+ * only what is used.
+ */
+ temp = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_size - 1));
+ *vp_size = dc_fixpt_floor(temp);
+ if (*vp_size + *vp_offset > src_size)
+ *vp_size = src_size - *vp_offset;
+
+ /* We did all the math assuming we are scanning same direction as display does,
+ * however mirror/rotation changes how vp scans vs how it is offset. If scan direction
+ * is flipped we simply need to calculate offset from the other side of plane.
+ * Note that outside of viewport all scaling hardware works in recout space.
+ */
+ if (flip_scan_dir)
+ *vp_offset = src_size - *vp_offset - *vp_size;
}
-static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
+static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
- struct pipe_ctx *odm_pipe = pipe_ctx;
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
- struct rect src = pipe_ctx->plane_state->src_rect;
- int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v;
+ struct rect src = plane_state->src_rect;
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
- || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+ || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+ int split_count, split_idx, ro_lb, ro_tb, recout_full_x, recout_full_y;
bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
- int odm_idx = 0;
+ calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
/*
- * Need to calculate the scan direction for viewport to make adjustments
+ * recout full is what the recout would have been if we didnt clip
+ * the source plane at all. We only care about left(ro_lb) and top(ro_tb)
+ * offsets of recout within recout full because those are the directions
+ * we scan from and therefore the only ones that affect inits.
+ */
+ recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+ * stream->dst.width / stream->src.width;
+ recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+ * stream->dst.height / stream->src.height;
+ if (pipe_ctx->prev_odm_pipe && split_idx)
+ ro_lb = data->h_active * split_idx - recout_full_x;
+ else
+ ro_lb = data->recout.x - recout_full_x;
+ ro_tb = data->recout.y - recout_full_y;
+ ASSERT(ro_lb >= 0 && ro_tb >= 0);
+
+ /*
+ * Work in recout rotation since that requires less transformations
*/
get_vp_scan_direction(
plane_state->rotation,
@@ -1043,145 +945,62 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
&flip_vert_scan_dir,
&flip_horz_scan_dir);
- /* Calculate src rect rotation adjusted to recout space */
- surf_size_h = src.x + src.width;
- surf_size_v = src.y + src.height;
- if (flip_horz_scan_dir)
- src.x = 0;
- if (flip_vert_scan_dir)
- src.y = 0;
if (orthogonal_rotation) {
- swap(src.x, src.y);
swap(src.width, src.height);
+ swap(flip_vert_scan_dir, flip_horz_scan_dir);
}
- /*modified recout_skip_h calculation due to odm having no recout offset*/
- while (odm_pipe->prev_odm_pipe) {
- odm_idx++;
- odm_pipe = odm_pipe->prev_odm_pipe;
- }
- /*odm_pipe is the leftmost pipe in the ODM group*/
- recout_skip_h = odm_idx * data->recout.width;
-
- /* Recout matching initial vp offset = recout_offset - (stream dst offset +
- * ((surf dst offset - stream src offset) * 1/ stream scaling ratio)
- * - (surf surf_src offset * 1/ full scl ratio))
- */
- recout_skip_h += odm_pipe->plane_res.scl_data.recout.x
- - (stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
- * stream->dst.width / stream->src.width -
- src.x * plane_state->dst_rect.width / src.width
- * stream->dst.width / stream->src.width);
-
-
- recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
- * stream->dst.height / stream->src.height -
- src.y * plane_state->dst_rect.height / src.height
- * stream->dst.height / stream->src.height);
- if (orthogonal_rotation)
- swap(recout_skip_h, recout_skip_v);
- /*
- * Init calculated according to formula:
- * init = (scaling_ratio + number_of_taps + 1) / 2
- * init_bot = init + scaling_ratio
- * init_c = init + truncated_vp_c_offset(from calculate viewport)
- */
- data->inits.h = dc_fixpt_truncate(dc_fixpt_div_int(
- dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2), 19);
-
- data->inits.h_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int(
- dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)), 19);
-
- data->inits.v = dc_fixpt_truncate(dc_fixpt_div_int(
- dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2), 19);
-
- data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
- dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
-
- /*
- * Taps, inits and scaling ratios are in recout space need to rotate
- * to viewport rotation before adjustment
- */
- adjust_vp_and_init_for_seamless_clip(
+ calculate_init_and_vp(
flip_horz_scan_dir,
- recout_skip_h,
- surf_size_h,
- orthogonal_rotation ? data->taps.v_taps : data->taps.h_taps,
- orthogonal_rotation ? data->ratios.vert : data->ratios.horz,
- orthogonal_rotation ? &data->inits.v : &data->inits.h,
+ ro_lb,
+ data->recout.width,
+ src.width,
+ data->taps.h_taps,
+ data->ratios.horz,
+ &data->inits.h,
&data->viewport.x,
&data->viewport.width);
- adjust_vp_and_init_for_seamless_clip(
+ calculate_init_and_vp(
flip_horz_scan_dir,
- recout_skip_h,
- surf_size_h / vpc_div,
- orthogonal_rotation ? data->taps.v_taps_c : data->taps.h_taps_c,
- orthogonal_rotation ? data->ratios.vert_c : data->ratios.horz_c,
- orthogonal_rotation ? &data->inits.v_c : &data->inits.h_c,
+ ro_lb,
+ data->recout.width,
+ src.width / vpc_div,
+ data->taps.h_taps_c,
+ data->ratios.horz_c,
+ &data->inits.h_c,
&data->viewport_c.x,
&data->viewport_c.width);
- adjust_vp_and_init_for_seamless_clip(
+ calculate_init_and_vp(
flip_vert_scan_dir,
- recout_skip_v,
- surf_size_v,
- orthogonal_rotation ? data->taps.h_taps : data->taps.v_taps,
- orthogonal_rotation ? data->ratios.horz : data->ratios.vert,
- orthogonal_rotation ? &data->inits.h : &data->inits.v,
+ ro_tb,
+ data->recout.height,
+ src.height,
+ data->taps.v_taps,
+ data->ratios.vert,
+ &data->inits.v,
&data->viewport.y,
&data->viewport.height);
- adjust_vp_and_init_for_seamless_clip(
+ calculate_init_and_vp(
flip_vert_scan_dir,
- recout_skip_v,
- surf_size_v / vpc_div,
- orthogonal_rotation ? data->taps.h_taps_c : data->taps.v_taps_c,
- orthogonal_rotation ? data->ratios.horz_c : data->ratios.vert_c,
- orthogonal_rotation ? &data->inits.h_c : &data->inits.v_c,
+ ro_tb,
+ data->recout.height,
+ src.height / vpc_div,
+ data->taps.v_taps_c,
+ data->ratios.vert_c,
+ &data->inits.v_c,
&data->viewport_c.y,
&data->viewport_c.height);
-
- /* Interlaced inits based on final vert inits */
- data->inits.v_bot = dc_fixpt_add(data->inits.v, data->ratios.vert);
- data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
-
-}
-
-/*
- * When handling 270 rotation in mixed SLS mode, we have
- * stream->timing.h_border_left that is non zero. If we are doing
- * pipe-splitting, this h_border_left value gets added to recout.x and when it
- * calls calculate_inits_and_adj_vp() and
- * adjust_vp_and_init_for_seamless_clip(), it can cause viewport.height for a
- * pipe to be incorrect.
- *
- * To fix this, instead of using stream->timing.h_border_left, we can use
- * stream->dst.x to represent the border instead. So we will set h_border_left
- * to 0 and shift the appropriate amount in stream->dst.x. We will then
- * perform all calculations in resource_build_scaling_params() based on this
- * and then restore the h_border_left and stream->dst.x to their original
- * values.
- *
- * shift_border_left_to_dst() will shift the amount of h_border_left to
- * stream->dst.x and set h_border_left to 0. restore_border_left_from_dst()
- * will restore h_border_left and stream->dst.x back to their original values
- * We also need to make sure pipe_ctx->plane_res.scl_data.h_active uses the
- * original h_border_left value in its calculation.
- */
-static int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
-{
- int store_h_border_left = pipe_ctx->stream->timing.h_border_left;
-
- if (store_h_border_left) {
- pipe_ctx->stream->timing.h_border_left = 0;
- pipe_ctx->stream->dst.x += store_h_border_left;
+ if (orthogonal_rotation) {
+ swap(data->viewport.x, data->viewport.y);
+ swap(data->viewport.width, data->viewport.height);
+ swap(data->viewport_c.x, data->viewport_c.y);
+ swap(data->viewport_c.width, data->viewport_c.height);
}
- return store_h_border_left;
-}
-
-static void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx,
- int store_h_border_left)
-{
- pipe_ctx->stream->dst.x -= store_h_border_left;
- pipe_ctx->stream->timing.h_border_left = store_h_border_left;
+ data->viewport.x += src.x;
+ data->viewport.y += src.y;
+ ASSERT(src.x % vpc_div == 0 && src.y % vpc_div == 0);
+ data->viewport_c.x += src.x / vpc_div;
+ data->viewport_c.y += src.y / vpc_div;
}
bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
@@ -1189,48 +1008,45 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
bool res = false;
- int store_h_border_left = shift_border_left_to_dst(pipe_ctx);
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
- /* Important: scaling ratio calculation requires pixel format,
- * lb depth calculation requires recout and taps require scaling ratios.
- * Inits require viewport, taps, ratios and recout of split pipe
- */
+
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
pipe_ctx->plane_state->format);
- calculate_scaling_ratios(pipe_ctx);
-
- calculate_viewport(pipe_ctx);
-
- if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE ||
- pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) {
- if (store_h_border_left) {
- restore_border_left_from_dst(pipe_ctx,
- store_h_border_left);
- }
- return false;
- }
-
- calculate_recout(pipe_ctx);
-
- /**
- * Setting line buffer pixel depth to 24bpp yields banding
- * on certain displays, such as the Sharp 4k
+ /* Timing borders are part of vactive that we are also supposed to skip in addition
+ * to any stream dst offset. Since dm logic assumes dst is in addressable
+ * space we need to add the the left and top borders to dst offsets temporarily.
+ * TODO: fix in DM, stream dst is supposed to be in vactive
*/
- pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
- pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
-
- pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left;
- pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top;
+ pipe_ctx->stream->dst.x += timing->h_border_left;
+ pipe_ctx->stream->dst.y += timing->v_border_top;
+ /* Calculate H and V active size */
pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable +
- store_h_border_left + timing->h_border_right;
+ timing->h_border_left + timing->h_border_right;
pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
timing->v_border_top + timing->v_border_bottom;
if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe)
pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1;
- /* Taps calculations */
+ /* depends on h_active */
+ calculate_recout(pipe_ctx);
+ /* depends on pixel format */
+ calculate_scaling_ratios(pipe_ctx);
+ /* depends on scaling ratios and recout, does not calculate offset yet */
+ calculate_viewport_size(pipe_ctx);
+
+ /*
+ * LB calculations depend on vp size, h/v_active and scaling ratios
+ * Setting line buffer pixel depth to 24bpp yields banding
+ * on certain displays, such as the Sharp 4k. 36bpp is needed
+ * to support SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 and
+ * SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 with actual > 10 bpc
+ * precision on at least DCN display engines.
+ */
+ pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
+
if (pipe_ctx->plane_res.xfm != NULL)
res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
@@ -1257,9 +1073,31 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
&plane_state->scaling_quality);
}
+ /*
+ * Depends on recout, scaling ratios, h_active and taps
+ * May need to re-check lb size after this in some obscure scenario
+ */
if (res)
- /* May need to re-check lb size after this in some obscure scenario */
- calculate_inits_and_adj_vp(pipe_ctx);
+ calculate_inits_and_viewports(pipe_ctx);
+
+ /*
+ * Handle side by side and top bottom 3d recout offsets after vp calculation
+ * since 3d is special and needs to calculate vp as if there is no recout offset
+ * This may break with rotation, good thing we aren't mixing hw rotation and 3d
+ */
+ if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state == plane_state) {
+ ASSERT(plane_state->rotation == ROTATION_ANGLE_0 ||
+ (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_TOP_AND_BOTTOM &&
+ pipe_ctx->stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE));
+ if (pipe_ctx->stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM)
+ pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height;
+ else if (pipe_ctx->stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
+ pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width;
+ }
+
+ if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE ||
+ pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
+ res = false;
DC_LOG_SCALER("%s pipe %d:\nViewport: height:%d width:%d x:%d y:%d Recout: height:%d width:%d x:%d y:%d HACTIVE:%d VACTIVE:%d\n"
"src_rect: height:%d width:%d x:%d y:%d dst_rect: height:%d width:%d x:%d y:%d clip_rect: height:%d width:%d x:%d y:%d\n",
@@ -1288,8 +1126,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
plane_state->clip_rect.x,
plane_state->clip_rect.y);
- if (store_h_border_left)
- restore_border_left_from_dst(pipe_ctx, store_h_border_left);
+ pipe_ctx->stream->dst.x -= timing->h_border_left;
+ pipe_ctx->stream->dst.y -= timing->v_border_top;
return res;
}
@@ -3046,6 +2884,7 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format)
#endif
return 32;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
return 64;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 7da5e7a2e88d..c0fbcbd4cbfc 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -45,7 +45,7 @@
/* forward declaration */
struct aux_payload;
-#define DC_VER "3.2.136"
+#define DC_VER "3.2.137"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -1069,8 +1069,6 @@ bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
*/
bool dc_commit_state(struct dc *dc, struct dc_state *context);
-void dc_power_down_on_boot(struct dc *dc);
-
struct dc_state *dc_create_state(struct dc *dc);
struct dc_state *dc_copy_state(struct dc_state *src_ctx);
void dc_retain_state(struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index c5dc3a947020..4b2854d1d981 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -110,6 +110,15 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
}
+void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
+ union dmub_inbox0_data_register data)
+{
+ struct dmub_srv *dmub = dmub_srv->dmub;
+ if (dmub->hw_funcs.send_inbox0_cmd)
+ dmub->hw_funcs.send_inbox0_cmd(dmub, data);
+ // TODO: Add wait command -- poll register for ACK
+}
+
bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd)
{
struct dmub_srv *dmub;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 338f776990db..f615e3a76d01 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -66,4 +66,6 @@ bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_bu
void dc_dmub_trace_event_control(struct dc *dc, bool enable);
+void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data);
+
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 04957a9efab2..52355fe6994c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -182,6 +182,8 @@ enum surface_pixel_format {
SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS,
/*64 bpp */
SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616,
+ /*swapped*/
+ SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616,
/*float*/
SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F,
/*swaped & float*/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index c871923e7db0..83845d006c54 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -216,6 +216,23 @@ static inline void get_edp_links(const struct dc *dc,
}
}
+static inline bool dc_get_edp_link_panel_inst(const struct dc *dc,
+ const struct dc_link *link,
+ unsigned int *inst_out)
+{
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ int edp_num;
+
+ if (link->connector_signal != SIGNAL_TYPE_EDP)
+ return false;
+ get_edp_links(dc, edp_links, &edp_num);
+ if ((edp_num > 1) && (link->link_index > edp_links[0]->link_index))
+ *inst_out = 1;
+ else
+ *inst_out = 0;
+ return true;
+}
+
/* Set backlight level of an embedded panel (eDP, LVDS).
* backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
* and 16 bit fractional, where 1.0 is max backlight value.
@@ -316,7 +333,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
enum link_training_result dc_link_dp_perform_link_training(
struct dc_link *link,
- const struct dc_link_settings *link_setting,
+ const struct dc_link_settings *link_settings,
bool skip_video_pattern);
bool dc_link_dp_sync_lt_begin(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 13dae7238a58..0ab1a33dae84 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -179,6 +179,9 @@ struct dc_stream_state {
bool use_vsc_sdp_for_colorimetry;
bool ignore_msa_timing_param;
+
+ bool freesync_on_desktop;
+
bool converter_disable_audio;
uint8_t qs_bit;
uint8_t qy_bit;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 535da8db70b6..8016e22114ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -271,11 +271,6 @@ struct dc_edid_caps {
struct dc_panel_patch panel_patch;
};
-struct view {
- uint32_t width;
- uint32_t height;
-};
-
struct dc_mode_flags {
/* note: part of refresh rate flag*/
uint32_t INTERLACE :1;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 79a6f261a0da..4cdd4dacb761 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -566,6 +566,7 @@ static void program_grph_pixel_format(
* should problem swap endian*/
format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010 ||
format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS ||
+ format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 ||
format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
/* ABGR formats */
red_xbar = 2;
@@ -606,6 +607,7 @@ static void program_grph_pixel_format(
fallthrough;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
grph_depth = 3;
grph_format = 0;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
index 23db5c72f07e..08a4c8d029d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
@@ -181,7 +181,6 @@ struct dce_mem_input_registers {
SFB(blk, GRPH_ENABLE, GRPH_ENABLE, mask_sh),\
SFB(blk, GRPH_CONTROL, GRPH_DEPTH, mask_sh),\
SFB(blk, GRPH_CONTROL, GRPH_FORMAT, mask_sh),\
- SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
SFB(blk, GRPH_X_START, GRPH_X_START, mask_sh),\
SFB(blk, GRPH_Y_START, GRPH_Y_START, mask_sh),\
SFB(blk, GRPH_X_END, GRPH_X_END, mask_sh),\
@@ -207,7 +206,6 @@ struct dce_mem_input_registers {
SFB(blk, GRPH_ENABLE, GRPH_ENABLE, mask_sh),\
SFB(blk, GRPH_CONTROL, GRPH_DEPTH, mask_sh),\
SFB(blk, GRPH_CONTROL, GRPH_FORMAT, mask_sh),\
- SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
SFB(blk, GRPH_X_START, GRPH_X_START, mask_sh),\
SFB(blk, GRPH_Y_START, GRPH_Y_START, mask_sh),\
SFB(blk, GRPH_X_END, GRPH_X_END, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index 151dc7bf6d23..d9fd4ec60588 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -794,7 +794,7 @@ static void program_bit_depth_reduction(
enum dcp_out_trunc_round_mode trunc_mode;
bool spatial_dither_enable;
- ASSERT(depth < COLOR_DEPTH_121212); /* Invalid clamp bit depth */
+ ASSERT(depth <= COLOR_DEPTH_121212); /* Invalid clamp bit depth */
spatial_dither_enable = bit_depth_params->flags.SPATIAL_DITHER_ENABLED;
/* Default to 12 bit truncation without rounding */
@@ -854,7 +854,7 @@ static void dce60_program_bit_depth_reduction(
enum dcp_out_trunc_round_mode trunc_mode;
bool spatial_dither_enable;
- ASSERT(depth < COLOR_DEPTH_121212); /* Invalid clamp bit depth */
+ ASSERT(depth <= COLOR_DEPTH_121212); /* Invalid clamp bit depth */
spatial_dither_enable = bit_depth_params->flags.SPATIAL_DITHER_ENABLED;
/* Default to 12 bit truncation without rounding */
@@ -1647,7 +1647,8 @@ void dce_transform_construct(
xfm_dce->lb_pixel_depth_supported =
LB_PIXEL_DEPTH_18BPP |
LB_PIXEL_DEPTH_24BPP |
- LB_PIXEL_DEPTH_30BPP;
+ LB_PIXEL_DEPTH_30BPP |
+ LB_PIXEL_DEPTH_36BPP;
xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY;
xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/
@@ -1675,7 +1676,8 @@ void dce60_transform_construct(
xfm_dce->lb_pixel_depth_supported =
LB_PIXEL_DEPTH_18BPP |
LB_PIXEL_DEPTH_24BPP |
- LB_PIXEL_DEPTH_30BPP;
+ LB_PIXEL_DEPTH_30BPP |
+ LB_PIXEL_DEPTH_36BPP;
xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY;
xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index 6939ca2e8212..54a1408c8015 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -172,16 +172,12 @@ static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
static bool dmub_abm_init_config(struct abm *abm,
const char *src,
- unsigned int bytes)
+ unsigned int bytes,
+ unsigned int inst)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = abm->ctx;
- uint32_t edp_id_count = dc->dc_edp_id_count;
- int i;
- uint8_t panel_mask = 0;
-
- for (i = 0; i < edp_id_count; i++)
- panel_mask |= 0x01 << i;
+ uint8_t panel_mask = 0x01 << inst;
// TODO: Optimize by only reading back final 4 bytes
dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index c97ee5abc0ef..9baf8ca0a920 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -52,6 +52,14 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
dc_dmub_srv_wait_idle(dmub_srv);
}
+void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
+ union dmub_inbox0_cmd_lock_hw hw_lock_cmd)
+{
+ union dmub_inbox0_data_register data = { 0 };
+ data.inbox0_cmd_lock_hw = hw_lock_cmd;
+ dc_dmub_srv_send_inbox0_cmd(dmub_srv, data);
+}
+
bool should_use_dmub_lock(struct dc_link *link)
{
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
index bc5906347493..5a72b168fb4a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
@@ -34,6 +34,9 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
union dmub_hw_lock_flags *hw_locks,
struct dmub_hw_lock_inst_flags *inst_flags);
+void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
+ union dmub_inbox0_cmd_lock_hw hw_lock_cmd);
+
bool should_use_dmub_lock(struct dc_link *link);
#endif /*_DMUB_HW_LOCK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
index 295596d1f47f..faad8555ddbb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
@@ -27,19 +27,10 @@
#include "dmub/inc/dmub_cmd.h"
/**
- *****************************************************************************
- * Function: dmub_enable_outbox_notification
- *
- * @brief
- * Sends inbox cmd to dmub to enable outbox1 messages with interrupt.
- * Dmub sends outbox1 message and triggers outbox1 interrupt.
- *
- * @param
- * [in] dc: dc structure
- *
- * @return
- * None
- *****************************************************************************
+ * dmub_enable_outbox_notification - Sends inbox cmd to dmub to enable outbox1
+ * messages with interrupt. Dmub sends outbox1
+ * message and triggers outbox1 interrupt.
+ * @dc: dc structure
*/
void dmub_enable_outbox_notification(struct dc *dc)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 5ddeee96bf23..e73198738fd8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -63,6 +63,9 @@
#include "atomfirmware.h"
+#include "dce110_hw_sequencer.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+
#define GAMMA_HW_POINTS_NUM 256
/*
@@ -264,6 +267,7 @@ static void build_prescale_params(struct ipp_prescale_params *prescale_params,
prescale_params->scale = 0x2008;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
prescale_params->scale = 0x2000;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index 8bbb499067f7..db7557a1c613 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -393,6 +393,7 @@ static void program_pixel_format(
grph_format = 1;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
grph_depth = 3;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
index 29438c6050db..45bca0db5e5e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
@@ -708,7 +708,8 @@ bool dce110_transform_v_construct(
xfm_dce->lb_pixel_depth_supported =
LB_PIXEL_DEPTH_18BPP |
LB_PIXEL_DEPTH_24BPP |
- LB_PIXEL_DEPTH_30BPP;
+ LB_PIXEL_DEPTH_30BPP |
+ LB_PIXEL_DEPTH_36BPP;
xfm_dce->prescaler_on = true;
xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 7f8456b9988b..91fdfcd8a14e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -257,7 +257,8 @@ static void dpp1_setup_format_flags(enum surface_pixel_format input_format,\
if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F ||
input_format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F)
*fmt = PIXEL_FORMAT_FLOAT;
- else if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616)
+ else if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ||
+ input_format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616)
*fmt = PIXEL_FORMAT_FIXED16;
else
*fmt = PIXEL_FORMAT_FIXED;
@@ -368,7 +369,8 @@ void dpp1_cnv_setup (
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
+ pixel_format = 26; /* ARGB16161616_UNORM */
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
pixel_format = 24;
@@ -566,7 +568,8 @@ void dpp1_construct(
dpp->lb_pixel_depth_supported =
LB_PIXEL_DEPTH_18BPP |
LB_PIXEL_DEPTH_24BPP |
- LB_PIXEL_DEPTH_30BPP;
+ LB_PIXEL_DEPTH_30BPP |
+ LB_PIXEL_DEPTH_36BPP;
dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index 0bd8de4c73a9..673b93f4fea5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -631,8 +631,10 @@ static void dpp1_dscl_set_manual_ratio_init(
SCL_V_INIT_INT, init_int);
if (REG(SCL_VERT_FILTER_INIT_BOT)) {
- init_frac = dc_fixpt_u0d19(data->inits.v_bot) << 5;
- init_int = dc_fixpt_floor(data->inits.v_bot);
+ struct fixed31_32 bot = dc_fixpt_add(data->inits.v, data->ratios.vert);
+
+ init_frac = dc_fixpt_u0d19(bot) << 5;
+ init_int = dc_fixpt_floor(bot);
REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0,
SCL_V_INIT_FRAC_BOT, init_frac,
SCL_V_INIT_INT_BOT, init_int);
@@ -645,8 +647,10 @@ static void dpp1_dscl_set_manual_ratio_init(
SCL_V_INIT_INT_C, init_int);
if (REG(SCL_VERT_FILTER_INIT_BOT_C)) {
- init_frac = dc_fixpt_u0d19(data->inits.v_c_bot) << 5;
- init_int = dc_fixpt_floor(data->inits.v_c_bot);
+ struct fixed31_32 bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
+
+ init_frac = dc_fixpt_u0d19(bot) << 5;
+ init_int = dc_fixpt_floor(bot);
REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0,
SCL_V_INIT_FRAC_BOT_C, init_frac,
SCL_V_INIT_INT_BOT_C, init_int);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 6f42d10dd772..f4f423d0b8c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -785,6 +785,7 @@ static bool hubbub1_dcc_support_pixel_format(
*bytes_per_element = 4;
return true;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
*bytes_per_element = 8;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index e39e8a2f715d..04303fe9c659 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -245,6 +245,7 @@ void hubp1_program_pixel_format(
if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
|| format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
|| format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616
|| format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
red_bar = 2;
blue_bar = 3;
@@ -277,8 +278,9 @@ void hubp1_program_pixel_format(
SURFACE_PIXEL_FORMAT, 10);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
- SURFACE_PIXEL_FORMAT, 22);
+ SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 81803463ca9b..ef37d3abaa56 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2502,25 +2502,9 @@ static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
}
-void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
-{
- struct dce_hwseq *hws = dc->hwseq;
- struct mpc *mpc = dc->res_pool->mpc;
-
- if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
- hws->funcs.get_hdr_visual_confirm_color(pipe_ctx, color);
- else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
- hws->funcs.get_surface_visual_confirm_color(pipe_ctx, color);
- else
- color_space_to_black_color(
- dc, pipe_ctx->stream->output_color_space, color);
-
- if (mpc->funcs->set_bg_color)
- mpc->funcs->set_bg_color(mpc, color, mpcc_id);
-}
-
void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct mpcc_blnd_cfg blnd_cfg = {{0}};
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
@@ -2529,6 +2513,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
+ hws->funcs.get_hdr_visual_confirm_color(
+ pipe_ctx, &blnd_cfg.black_color);
+ } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
+ hws->funcs.get_surface_visual_confirm_color(
+ pipe_ctx, &blnd_cfg.black_color);
+ } else {
+ color_space_to_black_color(
+ dc, pipe_ctx->stream->output_color_space,
+ &blnd_cfg.black_color);
+ }
+
if (per_pixel_alpha)
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
else
@@ -2560,8 +2556,6 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
*/
mpcc_id = hubp->inst;
- dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
-
/* If there is no full update, don't need to touch MPC tree*/
if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
@@ -2599,7 +2593,7 @@ static void update_scaler(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
- pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
+ pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
/* scaler configuration */
pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 478180b96d8d..c9bdffe5989b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -206,10 +206,4 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc);
void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits);
-void dcn10_update_visual_confirm_color(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct tg_color *color,
- int mpcc_id);
-
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index 4ff3ebc25438..680ca53455a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -82,7 +82,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
- .update_visual_confirm_color = dcn10_update_visual_confirm_color,
};
static const struct hwseq_private_funcs dcn10_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index da74269feb75..b096011acb49 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -64,8 +64,6 @@ void mpc1_set_bg_color(struct mpc *mpc,
MPCC_BG_G_Y, bg_g_y);
REG_SET(MPCC_BG_B_CB[bottommost_mpcc->mpcc_id], 0,
MPCC_BG_B_CB, bg_b_cb);
-
- bottommost_mpcc->blnd_cfg.black_color = *bg_color;
}
static void mpc1_update_blending(
@@ -248,8 +246,6 @@ struct mpcc *mpc1_insert_plane(
}
}
- mpc->funcs->set_bg_color(mpc, &blnd_cfg->black_color, mpcc_id);
-
/* update the blending configuration */
mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id);
@@ -499,7 +495,6 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
.set_output_csc = NULL,
.set_output_gamma = NULL,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
- .set_bg_color = mpc1_set_bg_color,
};
void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 4af96cc5d9d6..a9e420c7d75a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -166,7 +166,8 @@ static void dpp2_cnv_setup (
select = DCN2_ICSC_SELECT_ICSC_A;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
+ pixel_format = 26; /* ARGB16161616_UNORM */
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
pixel_format = 24;
@@ -431,7 +432,8 @@ bool dpp2_construct(
dpp->lb_pixel_depth_supported =
LB_PIXEL_DEPTH_18BPP |
LB_PIXEL_DEPTH_24BPP |
- LB_PIXEL_DEPTH_30BPP;
+ LB_PIXEL_DEPTH_30BPP |
+ LB_PIXEL_DEPTH_36BPP;
dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index 6d03d98fca22..91a9305d42e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -158,6 +158,7 @@ bool hubbub2_dcc_support_pixel_format(
*bytes_per_element = 4;
return true;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
*bytes_per_element = 8;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index a1318c31bcfa..7e54058715aa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -431,6 +431,7 @@ void hubp2_program_pixel_format(
if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
|| format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
|| format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616
|| format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
red_bar = 2;
blue_bar = 3;
@@ -463,8 +464,9 @@ void hubp2_program_pixel_format(
SURFACE_PIXEL_FORMAT, 10);
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
REG_UPDATE(DCSURF_SURFACE_CONFIG,
- SURFACE_PIXEL_FORMAT, 22);
+ SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 25de15158801..14e322761645 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1473,7 +1473,7 @@ static void dcn20_update_dchubp_dpp(
plane_state->update_flags.bits.per_pixel_alpha_change ||
pipe_ctx->stream->update_flags.bits.scaling) {
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
- ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP);
+ ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_36BPP);
/* scaler configuration */
pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
@@ -2267,25 +2267,9 @@ void dcn20_get_mpctree_visual_confirm_color(
*color = pipe_colors[top_pipe->pipe_idx];
}
-void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
-{
- struct dce_hwseq *hws = dc->hwseq;
- struct mpc *mpc = dc->res_pool->mpc;
-
- /* input to MPCC is always RGB, by default leave black_color at 0 */
- if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
- hws->funcs.get_hdr_visual_confirm_color(pipe_ctx, color);
- else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
- hws->funcs.get_surface_visual_confirm_color(pipe_ctx, color);
- else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
- dcn20_get_mpctree_visual_confirm_color(pipe_ctx, color);
-
- if (mpc->funcs->set_bg_color)
- mpc->funcs->set_bg_color(mpc, color, mpcc_id);
-}
-
void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct mpcc_blnd_cfg blnd_cfg = { {0} };
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
@@ -2294,6 +2278,15 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
+ // input to MPCC is always RGB, by default leave black_color at 0
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
+ hws->funcs.get_hdr_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color);
+ } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
+ hws->funcs.get_surface_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color);
+ } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) {
+ dcn20_get_mpctree_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color);
+ }
+
if (per_pixel_alpha)
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
else
@@ -2327,8 +2320,6 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
*/
mpcc_id = hubp->inst;
- dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
-
/* If there is no full update, don't need to touch MPC tree*/
if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
!pipe_ctx->update_flags.bits.mpcc) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 6bba191cd33e..c69f766a40ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -146,10 +146,5 @@ void dcn20_set_disp_pattern_generator(const struct dc *dc,
const struct tg_color *solid_color,
int width, int height, int offset);
-void dcn20_update_visual_confirm_color(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct tg_color *color,
- int mpcc_id);
-
#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 2f59f10e5f09..b5bb613eed4d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -96,7 +96,6 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
#endif
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
- .update_visual_confirm_color = dcn20_update_visual_confirm_color,
};
static const struct hwseq_private_funcs dcn20_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 947eb0df3f12..6a99fdd55e8c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -67,6 +67,7 @@ void mpc2_update_blending(
REG_SET(MPCC_BOT_GAIN_INSIDE[mpcc_id], 0, MPCC_BOT_GAIN_INSIDE, blnd_cfg->bottom_inside_gain);
REG_SET(MPCC_BOT_GAIN_OUTSIDE[mpcc_id], 0, MPCC_BOT_GAIN_OUTSIDE, blnd_cfg->bottom_outside_gain);
+ mpc1_set_bg_color(mpc, &blnd_cfg->black_color, mpcc_id);
mpcc->blnd_cfg = *blnd_cfg;
}
@@ -556,7 +557,6 @@ const struct mpc_funcs dcn20_mpc_funcs = {
.set_output_gamma = mpc2_set_output_gamma,
.power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
- .set_bg_color = mpc1_set_bg_color,
};
void dcn20_mpc_construct(struct dcn20_mpc *mpc20,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 6a56a03cfba3..0d06307456a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2289,12 +2289,14 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
- pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport_unadjusted.y;
- pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c_unadjusted.y;
- pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport_unadjusted.width;
- pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c_unadjusted.width;
- pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport_unadjusted.height;
- pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c_unadjusted.height;
+ pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
+ pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y;
+ pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width;
+ pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
+ pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
+ pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
+ pipes[pipe_cnt].pipe.src.viewport_width_max = pln->src_rect.width;
+ pipes[pipe_cnt].pipe.src.viewport_height_max = pln->src_rect.height;
pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width;
pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height;
pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
@@ -2363,6 +2365,7 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.source_format = dm_420_10;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
pipes[pipe_cnt].pipe.src.source_format = dm_444_64;
@@ -3236,7 +3239,7 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
- if (voltage_supported && dummy_pstate_supported) {
+ if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
goto restore_dml_state;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index 523e25f7e410..4f20a85ff396 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -100,7 +100,6 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.is_abm_supported = dcn21_is_abm_supported,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
- .update_visual_confirm_color = dcn20_update_visual_confirm_color,
};
static const struct hwseq_private_funcs dcn21_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 434d3c46cad4..2140b75540cf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -245,7 +245,8 @@ static void dpp3_cnv_setup (
select = INPUT_CSC_SELECT_ICSC;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- pixel_format = 22;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
+ pixel_format = 26; /* ARGB16161616_UNORM */
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
pixel_format = 24;
@@ -1442,7 +1443,8 @@ bool dpp3_construct(
dpp->lb_pixel_depth_supported =
LB_PIXEL_DEPTH_18BPP |
LB_PIXEL_DEPTH_24BPP |
- LB_PIXEL_DEPTH_30BPP;
+ LB_PIXEL_DEPTH_30BPP |
+ LB_PIXEL_DEPTH_36BPP;
dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index a978d848d370..bf7fa98b39eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -99,7 +99,6 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.set_pipe = dcn21_set_pipe,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
- .update_visual_confirm_color = dcn20_update_visual_confirm_color,
};
static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index a82319f4d081..950c9bfd53de 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -1431,7 +1431,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
.release_rmu = mpcc3_release_rmu,
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
- .set_bg_color = mpc1_set_bg_color,
+
};
void dcn30_mpc_construct(struct dcn30_mpc *mpc30,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
index 181f2175ac95..70b053d9ba40 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
@@ -101,7 +101,6 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.optimize_pwr_state = dcn21_optimize_pwr_state,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
- .update_visual_confirm_color = dcn20_update_visual_confirm_color,
};
static const struct hwseq_private_funcs dcn301_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index fb41140e8381..4440d08743aa 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -245,6 +245,8 @@ struct pp_smu_funcs_nv {
#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 8
#define PP_SMU_NUM_FCLK_DPM_LEVELS 4
#define PP_SMU_NUM_MEMCLK_DPM_LEVELS 4
+#define PP_SMU_NUM_DCLK_DPM_LEVELS 8
+#define PP_SMU_NUM_VCLK_DPM_LEVELS 8
struct dpm_clock {
uint32_t Freq; // In MHz
@@ -258,6 +260,8 @@ struct dpm_clocks {
struct dpm_clock SocClocks[PP_SMU_NUM_SOCCLK_DPM_LEVELS];
struct dpm_clock FClocks[PP_SMU_NUM_FCLK_DPM_LEVELS];
struct dpm_clock MemClocks[PP_SMU_NUM_MEMCLK_DPM_LEVELS];
+ struct dpm_clock VClocks[PP_SMU_NUM_VCLK_DPM_LEVELS];
+ struct dpm_clock DClocks[PP_SMU_NUM_DCLK_DPM_LEVELS];
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 2ece3690bfa3..a0f0c54c863b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -253,6 +253,8 @@ struct _vcs_dpi_display_pipe_source_params_st {
unsigned int viewport_y_c;
unsigned int viewport_width_c;
unsigned int viewport_height_c;
+ unsigned int viewport_width_max;
+ unsigned int viewport_height_max;
unsigned int data_pitch;
unsigned int data_pitch_c;
unsigned int meta_pitch;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index d764d784e279..a4a1b96967c0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -630,6 +630,19 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
}
}
}
+ if (src->viewport_width_max) {
+ int hdiv_c = src->source_format >= dm_420_8 && src->source_format <= dm_422_10 ? 2 : 1;
+ int vdiv_c = src->source_format >= dm_420_8 && src->source_format <= dm_420_12 ? 2 : 1;
+
+ if (mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] > src->viewport_width_max)
+ mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width_max;
+ if (mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] > src->viewport_height_max)
+ mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height_max;
+ if (mode_lib->vba.ViewportWidthChroma[mode_lib->vba.NumberOfActivePlanes] > src->viewport_width_max / hdiv_c)
+ mode_lib->vba.ViewportWidthChroma[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width_max / hdiv_c;
+ if (mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] > src->viewport_height_max / vdiv_c)
+ mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height_max / vdiv_c;
+ }
if (pipes[k].pipe.src.immediate_flip) {
mode_lib->vba.ImmediateFlipSupport = true;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index 92280cc05e2d..dae8e489c8cf 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -53,8 +53,8 @@
*/
struct gpio_service *dal_gpio_service_create(
- enum dce_version dce_version_major,
- enum dce_version dce_version_minor,
+ enum dce_version dce_version,
+ enum dce_environment dce_environment,
struct dc_context *ctx)
{
struct gpio_service *service;
@@ -67,14 +67,14 @@ struct gpio_service *dal_gpio_service_create(
return NULL;
}
- if (!dal_hw_translate_init(&service->translate, dce_version_major,
- dce_version_minor)) {
+ if (!dal_hw_translate_init(&service->translate, dce_version,
+ dce_environment)) {
BREAK_TO_DEBUGGER();
goto failure_1;
}
- if (!dal_hw_factory_init(&service->factory, dce_version_major,
- dce_version_minor)) {
+ if (!dal_hw_factory_init(&service->factory, dce_version,
+ dce_environment)) {
BREAK_TO_DEBUGGER();
goto failure_1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index ffc3f2c63db8..bbb054f58fe2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -30,11 +30,21 @@
#define LINK_TRAINING_RETRY_DELAY 50 /* ms */
#define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/
#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/
+#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
+#define TRAINING_AUX_RD_INTERVAL 100 //us
struct dc_link;
struct dc_stream_state;
struct dc_link_settings;
+enum {
+ LINK_TRAINING_MAX_RETRY_COUNT = 5,
+ /* to avoid infinite loop where-in the receiver
+ * switches between different VS
+ */
+ LINK_TRAINING_MAX_CR_RETRY = 100
+};
+
bool dp_verify_link_cap(
struct dc_link *link,
struct dc_link_settings *known_limit_link_setting,
@@ -68,6 +78,10 @@ bool perform_link_training_with_retries(
enum signal_type signal,
bool do_fallback);
+bool hpd_rx_irq_check_link_loss_status(
+ struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data);
+
bool is_mst_supported(struct dc_link *link);
bool detect_dp_sink_caps(struct dc_link *link);
@@ -88,8 +102,51 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
void dpcd_set_source_specific_data(struct dc_link *link);
+/* Write DPCD link configuration data. */
+enum dc_status dpcd_set_link_settings(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings);
+/* Write DPCD drive settings. */
+enum dc_status dpcd_set_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting,
+ uint32_t offset);
+/* Read training status and adjustment requests from DPCD. */
+enum dc_status dp_get_lane_status_and_drive_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting,
+ union lane_status *ln_status,
+ union lane_align_status_updated *ln_status_updated,
+ struct link_training_settings *req_settings,
+ uint32_t offset);
+
+void dp_wait_for_training_aux_rd_interval(
+ struct dc_link *link,
+ uint32_t wait_in_micro_secs);
+
+bool dp_is_cr_done(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status);
+
+enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status);
+
+bool dp_is_max_vs_reached(
+ const struct link_training_settings *lt_settings);
-void dp_set_fec_ready(struct dc_link *link, bool ready);
+void dp_update_drive_settings(
+ struct link_training_settings *dest,
+ struct link_training_settings src);
+
+enum dpcd_training_patterns
+ dc_dp_training_pattern_to_dpcd_training_pattern(
+ struct dc_link *link,
+ enum dc_dp_training_pattern pattern);
+
+uint8_t dc_dp_initialize_scrambling_data_symbols(
+ struct dc_link *link,
+ enum dc_dp_training_pattern pattern);
+
+enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready);
void dp_set_fec_enable(struct dc_link *link, bool enable);
bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable);
@@ -97,6 +154,13 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx);
bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable);
+/* Initialize output parameter lt_settings. */
+void dp_decide_training_settings(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ const struct dc_link_training_overrides *overrides,
+ struct link_training_settings *lt_settings);
+
/* Convert PHY repeater count read from DPCD uint8_t. */
uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count);
@@ -105,5 +169,9 @@ enum link_training_result dp_check_link_loss_status(
struct dc_link *link,
const struct link_training_settings *link_training_setting);
+enum dc_status dpcd_configure_lttpr_mode(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings);
+
enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings);
#endif /* __DC_LINK_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index e8ce8c85adf1..142753644377 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -52,7 +52,8 @@ struct abm_funcs {
unsigned int (*get_target_backlight)(struct abm *abm);
bool (*init_abm_config)(struct abm *abm,
const char *src,
- unsigned int bytes);
+ unsigned int bytes,
+ unsigned int inst);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 640bb432bd6a..75c77ad9cbfe 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -363,9 +363,6 @@ struct mpc_funcs {
struct mpc *mpc,
int opp_id);
- void (*set_bg_color)(struct mpc *mpc,
- struct tg_color *bg_color,
- int mpcc_id);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
index 2947d1b15512..2a0db2b03047 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -162,9 +162,7 @@ struct scl_inits {
struct fixed31_32 h;
struct fixed31_32 h_c;
struct fixed31_32 v;
- struct fixed31_32 v_bot;
struct fixed31_32 v_c;
- struct fixed31_32 v_c_bot;
};
struct scaler_data {
@@ -173,8 +171,6 @@ struct scaler_data {
struct scaling_taps taps;
struct rect viewport;
struct rect viewport_c;
- struct rect viewport_unadjusted;
- struct rect viewport_c_unadjusted;
struct rect recout;
struct scaling_ratios ratios;
struct scl_inits inits;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 43284d410687..1d5853c95448 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -235,10 +235,6 @@ struct hw_sequencer_funcs {
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width, int height, int offset);
- void (*update_visual_confirm_color)(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct tg_color *color,
- int mpcc_id);
};
void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 79ff68c13902..ed58abc5b3f9 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -324,6 +324,7 @@ struct dmub_srv_hw_funcs {
uint32_t (*get_gpint_response)(struct dmub_srv *dmub);
+ void (*send_inbox0_cmd)(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
uint32_t (*get_current_time)(struct dmub_srv *dmub);
};
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
index e6f3bfab33d3..70766d534c9c 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
@@ -35,20 +35,13 @@
*/
/**
- *****************************************************************************
- * Function: dmub_srv_stat_get_notification
+ * dmub_srv_stat_get_notification - Retrieves a dmub outbox notification, set up dmub notification
+ * structure with message information. Also a pending bit if queue
+ * is having more notifications
+ * @dmub: dmub srv structure
+ * @notify: dmub notification structure to be filled up
*
- * @brief
- * Retrieves a dmub outbox notification, set up dmub notification
- * structure with message information. Also a pending bit if queue
- * is having more notifications
- *
- * @param [in] dmub: dmub srv structure
- * @param [out] pnotify: dmub notification structure to be filled up
- *
- * @return
- * dmub_status
- *****************************************************************************
+ * Returns: dmub_status
*/
enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
struct dmub_notification *notify)
diff --git a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
index 9c55d247227e..7e3240e73c1f 100644
--- a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
+++ b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
@@ -42,8 +42,8 @@ void dal_gpio_destroy(
struct gpio **ptr);
struct gpio_service *dal_gpio_service_create(
- enum dce_version dce_version_major,
- enum dce_version dce_version_minor,
+ enum dce_version dce_version,
+ enum dce_environment dce_environment,
struct dc_context *ctx);
struct gpio *dal_gpio_service_create_irq(
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 7a30ca01e7d4..32f5274ed34e 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -85,6 +85,7 @@ struct link_training_settings {
enum dc_voltage_swing *voltage_swing;
enum dc_pre_emphasis *pre_emphasis;
enum dc_post_cursor2 *post_cursor2;
+ bool should_set_fec_ready;
uint16_t cr_pattern_time;
uint16_t eq_pattern_time;
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 3f4f44b44e6a..b99aa232bd8b 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -516,7 +516,8 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
}
static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
- struct dc_info_packet *infopacket)
+ struct dc_info_packet *infopacket,
+ bool freesync_on_desktop)
{
/* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
infopacket->sb[1] = 0x1A;
@@ -542,10 +543,16 @@ static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
vrr->state != VRR_STATE_UNSUPPORTED)
infopacket->sb[6] |= 0x02;
- /* PB6 = [Bit 2 = FreeSync Active] */
- if (vrr->state != VRR_STATE_DISABLED &&
+ if (freesync_on_desktop) {
+ /* PB6 = [Bit 2 = FreeSync Active] */
+ if (vrr->state != VRR_STATE_DISABLED &&
vrr->state != VRR_STATE_UNSUPPORTED)
- infopacket->sb[6] |= 0x04;
+ infopacket->sb[6] |= 0x04;
+ } else {
+ if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
+ vrr->state == VRR_STATE_ACTIVE_FIXED)
+ infopacket->sb[6] |= 0x04;
+ }
// For v1 & 2 infoframes program nominal if non-fs mode, otherwise full range
/* PB7 = FreeSync Minimum refresh rate (Hz) */
@@ -824,13 +831,14 @@ static void build_vrr_infopacket_checksum(unsigned int *payload_size,
static void build_vrr_infopacket_v1(enum signal_type signal,
const struct mod_vrr_params *vrr,
- struct dc_info_packet *infopacket)
+ struct dc_info_packet *infopacket,
+ bool freesync_on_desktop)
{
/* SPD info packet for FreeSync */
unsigned int payload_size = 0;
build_vrr_infopacket_header_v1(signal, infopacket, &payload_size);
- build_vrr_infopacket_data_v1(vrr, infopacket);
+ build_vrr_infopacket_data_v1(vrr, infopacket, freesync_on_desktop);
build_vrr_infopacket_checksum(&payload_size, infopacket);
infopacket->valid = true;
@@ -839,12 +847,13 @@ static void build_vrr_infopacket_v1(enum signal_type signal,
static void build_vrr_infopacket_v2(enum signal_type signal,
const struct mod_vrr_params *vrr,
enum color_transfer_func app_tf,
- struct dc_info_packet *infopacket)
+ struct dc_info_packet *infopacket,
+ bool freesync_on_desktop)
{
unsigned int payload_size = 0;
build_vrr_infopacket_header_v2(signal, infopacket, &payload_size);
- build_vrr_infopacket_data_v1(vrr, infopacket);
+ build_vrr_infopacket_data_v1(vrr, infopacket, freesync_on_desktop);
build_vrr_infopacket_fs2_data(app_tf, infopacket);
@@ -953,12 +962,12 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
#endif
break;
case PACKET_TYPE_FS_V2:
- build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
+ build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket, stream->freesync_on_desktop);
break;
case PACKET_TYPE_VRR:
case PACKET_TYPE_FS_V1:
default:
- build_vrr_infopacket_v1(stream->signal, vrr, infopacket);
+ build_vrr_infopacket_v1(stream->signal, vrr, infopacket, stream->freesync_on_desktop);
}
if (true == pack_sdp_v1_3 &&
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 43e6f8b17e79..de872e7958b0 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -29,8 +29,10 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
{
uint64_t n = 0;
uint8_t count = 0;
+ u8 bksv[sizeof(n)] = { };
- memcpy(&n, hdcp->auth.msg.hdcp1.bksv, sizeof(uint64_t));
+ memcpy(bksv, hdcp->auth.msg.hdcp1.bksv, sizeof(hdcp->auth.msg.hdcp1.bksv));
+ n = *(uint64_t *)bksv;
while (n) {
count++;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 26f96c05e0ec..06910d2fd57a 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -371,19 +371,6 @@ enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
return status;
}
-enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
- enum mod_hdcp_encryption_status *encryption_status)
-{
- *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
-
- if (mod_hdcp_hdcp1_link_maintenance(hdcp) != MOD_HDCP_STATUS_SUCCESS)
- return MOD_HDCP_STATUS_FAILURE;
-
- *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON;
-
- return MOD_HDCP_STATUS_SUCCESS;
-}
-
enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 6270ecbd2438..5e7331be1c0d 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -660,7 +660,8 @@ static void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram
}
bool dmub_init_abm_config(struct resource_pool *res_pool,
- struct dmcu_iram_parameters params)
+ struct dmcu_iram_parameters params,
+ unsigned int inst)
{
struct iram_table_v_2_2 ram_table;
struct abm_config_table config;
@@ -669,7 +670,7 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
uint32_t i, j = 0;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (res_pool->abm == NULL && res_pool->multiple_abms[0] == NULL)
+ if (res_pool->abm == NULL && res_pool->multiple_abms[inst] == NULL)
return false;
#else
if (res_pool->abm == NULL)
@@ -728,13 +729,13 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
config.min_abm_backlight = ram_table.min_abm_backlight;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (res_pool->multiple_abms[0])
- result = res_pool->multiple_abms[0]->funcs->init_abm_config(
- res_pool->multiple_abms[0], (char *)(&config), sizeof(struct abm_config_table));
- else
+ if (res_pool->multiple_abms[inst]) {
+ result = res_pool->multiple_abms[inst]->funcs->init_abm_config(
+ res_pool->multiple_abms[inst], (char *)(&config), sizeof(struct abm_config_table), inst);
+ } else
#endif
result = res_pool->abm->funcs->init_abm_config(
- res_pool->abm, (char *)(&config), sizeof(struct abm_config_table));
+ res_pool->abm, (char *)(&config), sizeof(struct abm_config_table), 0);
return result;
}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index 6f2eecce6baa..2a9f8e2d8080 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -49,6 +49,7 @@ struct dmcu_iram_parameters {
bool dmcu_load_iram(struct dmcu *dmcu,
struct dmcu_iram_parameters params);
bool dmub_init_abm_config(struct resource_pool *res_pool,
- struct dmcu_iram_parameters params);
+ struct dmcu_iram_parameters params,
+ unsigned int inst);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/aldebaran_ip_offset.h b/drivers/gpu/drm/amd/include/aldebaran_ip_offset.h
index 644ffec2b0ce..cdd426b41c20 100644
--- a/drivers/gpu/drm/amd/include/aldebaran_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/aldebaran_ip_offset.h
@@ -30,7 +30,7 @@ struct IP_BASE_INSTANCE {
struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
-};
+} __maybe_unused;
static const struct IP_BASE ATHUB_BASE = { { { { 0x00000C20, 0x02408C00, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
diff --git a/drivers/gpu/drm/amd/include/amd_acpi.h b/drivers/gpu/drm/amd/include/amd_acpi.h
index c72cbfe8f684..2d089d30518f 100644
--- a/drivers/gpu/drm/amd/include/amd_acpi.h
+++ b/drivers/gpu/drm/amd/include/amd_acpi.h
@@ -103,6 +103,13 @@ struct atcs_pref_req_output {
u8 ret_val; /* return value */
} __packed;
+struct atcs_pwr_shift_input {
+ u16 size; /* structure size in bytes (includes size field) */
+ u16 dgpu_id; /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
+ u8 dev_acpi_state; /* D0 = 0, D3 hot = 3 */
+ u8 drv_state; /* 0 = operational, 1 = not operational */
+} __packed;
+
/* AMD hw uses four ACPI control methods:
* 1. ATIF
* ARG0: (ACPI_INTEGER) function code
@@ -418,6 +425,7 @@ struct atcs_pref_req_output {
# define ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED (1 << 1)
# define ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED (1 << 2)
# define ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED (1 << 3)
+# define ATCS_SET_POWER_SHIFT_CONTROL_SUPPORTED (1 << 7)
#define ATCS_FUNCTION_GET_EXTERNAL_STATE 0x1
/* ARG0: ATCS_FUNCTION_GET_EXTERNAL_STATE
* ARG1: none
@@ -472,4 +480,14 @@ struct atcs_pref_req_output {
* BYTE - number of active lanes
*/
+#define ATCS_FUNCTION_POWER_SHIFT_CONTROL 0x8
+/* ARG0: ATCS_FUNCTION_POWER_SHIFT_CONTROL
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - dGPU id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
+ * BYTE - Device ACPI state
+ * BYTE - Driver state
+ * OUTPUT: none
+ */
+
#endif
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 9ab706cd07ff..332b0df53e52 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -246,6 +246,7 @@ enum amd_dpm_forced_level;
* @late_init: sets up late driver/hw state (post hw_init) - Optional
* @sw_init: sets up driver state, does not configure hw
* @sw_fini: tears down driver state, does not configure hw
+ * @early_fini: tears down stuff before dev detached from driver
* @hw_init: sets up the hw state
* @hw_fini: tears down the hw state
* @late_fini: final cleanup
@@ -274,6 +275,7 @@ struct amd_ip_funcs {
int (*late_init)(void *handle);
int (*sw_init)(void *handle);
int (*sw_fini)(void *handle);
+ int (*early_fini)(void *handle);
int (*hw_init)(void *handle);
int (*hw_fini)(void *handle);
void (*late_fini)(void *handle);
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index 47eb84598b96..6a505d1b82a5 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -5178,11 +5178,11 @@ typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
typedef struct _ATOM_SVID2_VOLTAGE_OBJECT_V3
{
ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_SVID2
-// 14:7 � PSI0_VID
-// 6 � PSI0_EN
-// 5 � PSI1
-// 4:2 � load line slope trim.
-// 1:0 � offset trim,
+// 14:7 - PSI0_VID
+// 6 - PSI0_EN
+// 5 - PSI1
+// 4:2 - load line slope trim.
+// 1:0 - offset trim,
USHORT usLoadLine_PSI;
// GPU GPIO pin Id to SVID2 regulator VRHot pin. possible value 0~31. 0 means GPIO0, 31 means GPIO31
UCHAR ucSVDGpioId; //0~31 indicate GPIO0~31
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 2aa6d27ed68f..28deecc2f990 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -197,6 +197,9 @@ enum atom_dp_vs_preemph_def{
DP_VS_LEVEL0_PREEMPH_LEVEL3 = 0x18,
};
+#define BIOS_ATOM_PREFIX "ATOMBIOS"
+#define BIOS_VERSION_PREFIX "ATOMBIOSBK-AMD"
+#define BIOS_STRING_LENGTH 43
/*
enum atom_string_def{
@@ -209,12 +212,14 @@ atom_bios_string = "ATOM"
#pragma pack(1) /* BIOS data must use byte aligment*/
enum atombios_image_offset{
-OFFSET_TO_ATOM_ROM_HEADER_POINTER =0x00000048,
-OFFSET_TO_ATOM_ROM_IMAGE_SIZE =0x00000002,
-OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE =0x94,
-MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE =20, /*including the terminator 0x0!*/
-OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS =0x2f,
-OFFSET_TO_GET_ATOMBIOS_STRING_START =0x6e,
+ OFFSET_TO_ATOM_ROM_HEADER_POINTER = 0x00000048,
+ OFFSET_TO_ATOM_ROM_IMAGE_SIZE = 0x00000002,
+ OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE = 0x94,
+ MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE = 20, /*including the terminator 0x0!*/
+ OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS = 0x2f,
+ OFFSET_TO_GET_ATOMBIOS_STRING_START = 0x6e,
+ OFFSET_TO_VBIOS_PART_NUMBER = 0x80,
+ OFFSET_TO_VBIOS_DATE = 0x50,
};
/****************************************************************************
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index e2d13131a432..b1cd52a9d684 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -536,6 +536,75 @@ struct gpu_metrics_v1_2 {
uint64_t firmware_timestamp;
};
+struct gpu_metrics_v1_3 {
+ struct metrics_table_header common_header;
+
+ /* Temperature */
+ uint16_t temperature_edge;
+ uint16_t temperature_hotspot;
+ uint16_t temperature_mem;
+ uint16_t temperature_vrgfx;
+ uint16_t temperature_vrsoc;
+ uint16_t temperature_vrmem;
+
+ /* Utilization */
+ uint16_t average_gfx_activity;
+ uint16_t average_umc_activity; // memory controller
+ uint16_t average_mm_activity; // UVD or VCN
+
+ /* Power/Energy */
+ uint16_t average_socket_power;
+ uint64_t energy_accumulator;
+
+ /* Driver attached timestamp (in ns) */
+ uint64_t system_clock_counter;
+
+ /* Average clocks */
+ uint16_t average_gfxclk_frequency;
+ uint16_t average_socclk_frequency;
+ uint16_t average_uclk_frequency;
+ uint16_t average_vclk0_frequency;
+ uint16_t average_dclk0_frequency;
+ uint16_t average_vclk1_frequency;
+ uint16_t average_dclk1_frequency;
+
+ /* Current clocks */
+ uint16_t current_gfxclk;
+ uint16_t current_socclk;
+ uint16_t current_uclk;
+ uint16_t current_vclk0;
+ uint16_t current_dclk0;
+ uint16_t current_vclk1;
+ uint16_t current_dclk1;
+
+ /* Throttle status */
+ uint32_t throttle_status;
+
+ /* Fans */
+ uint16_t current_fan_speed;
+
+ /* Link width/speed */
+ uint16_t pcie_link_width;
+ uint16_t pcie_link_speed; // in 0.1 GT/s
+
+ uint16_t padding;
+
+ uint32_t gfx_activity_acc;
+ uint32_t mem_activity_acc;
+
+ uint16_t temperature_hbm[NUM_HBM_INSTANCES];
+
+ /* PMFW attached timestamp (10ns resolution) */
+ uint64_t firmware_timestamp;
+
+ /* Voltage (mV) */
+ uint16_t voltage_soc;
+ uint16_t voltage_gfx;
+ uint16_t voltage_mem;
+
+ uint16_t padding1;
+};
+
/*
* gpu_metrics_v2_0 is not recommended as it's not naturally aligned.
* Use gpu_metrics_v2_1 or later instead.
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 13da377888d2..f48132bc089d 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -1942,7 +1942,7 @@ static int amdgpu_device_attr_create(struct amdgpu_device *adev,
BUG_ON(!attr);
- attr_update = attr->attr_update ? attr_update : default_attr_update;
+ attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
ret = attr_update(adev, attr, mask, &attr_states);
if (ret) {
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h
index 7a6d049e65e3..61c87c39be80 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h
@@ -1176,7 +1176,7 @@ typedef struct {
uint16_t LedGpio; //GeneriA GPIO flag used to control the radeon LEDs
uint16_t GfxPowerStagesGpio; //Genlk_vsync GPIO flag used to control gfx power stages
- uint32_t SkuReserved[16];
+ uint32_t SkuReserved[63];
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
index 1687709507b3..6119a36b2cba 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
@@ -51,12 +51,6 @@
#define CTF_OFFSET_HOTSPOT 5
#define CTF_OFFSET_MEM 5
-static const struct smu_temperature_range smu13_thermal_policy[] =
-{
- {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
- { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
-};
-
struct smu_13_0_max_sustainable_clocks {
uint32_t display_clock;
uint32_t phy_clock;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
index 25b5831a15cd..981dc8c7112d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
@@ -82,7 +82,8 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
/* Skip for suspend/resume case */
if (!hwmgr->pp_one_vf && smum_is_dpm_running(hwmgr)
- && !amdgpu_passthrough(adev) && adev->in_suspend) {
+ && !amdgpu_passthrough(adev) && adev->in_suspend
+ && adev->asic_type != CHIP_RAVEN) {
pr_info("dpm has been enabled\n");
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index f5fe540cd536..8f71f6a4bb49 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -377,6 +377,27 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ int ret = -EINVAL;
+
+ if (adev->in_suspend) {
+ pr_info("restore the fine grain parameters\n");
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ smu10_data->gfx_actual_soft_min_freq,
+ NULL);
+ if (ret)
+ return ret;
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ smu10_data->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
index 0d38d4206848..6cfe148ed45b 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
@@ -129,10 +129,10 @@ int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
}
/**
-* Reset Fan Speed Control to default mode.
-* @hwmgr: the address of the powerplay hardware manager.
-* Exception: Should always succeed.
-*/
+ * smu7_fan_ctrl_set_default_mode - Reset Fan Speed Control to default mode.
+ * @hwmgr: the address of the powerplay hardware manager.
+ * Exception: Should always succeed.
+ */
int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
{
if (!hwmgr->fan_ctrl_is_in_default_mode) {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 31c61ac3bd5e..25979106fd25 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -544,7 +544,7 @@ static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
/**
- * Get Leakage VDDC based on leakage ID.
+ * vega10_get_evv_voltages - Get Leakage VDDC based on leakage ID.
*
* @hwmgr: the address of the powerplay hardware manager.
* return: always 0.
@@ -600,7 +600,7 @@ static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
}
/**
- * Change virtual leakage voltage to actual value.
+ * vega10_patch_with_vdd_leakage - Change virtual leakage voltage to actual value.
*
* @hwmgr: the address of the powerplay hardware manager.
* @voltage: pointer to changing voltage
@@ -626,7 +626,7 @@ static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
}
/**
- * Patch voltage lookup table by EVV leakages.
+ * vega10_patch_lookup_table_with_leakage - Patch voltage lookup table by EVV leakages.
*
* @hwmgr: the address of the powerplay hardware manager.
* @lookup_table: pointer to voltage lookup table
@@ -1003,7 +1003,7 @@ static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
}
/**
- * Remove repeated voltage values and create table with unique values.
+ * vega10_trim_voltage_table - Remove repeated voltage values and create table with unique values.
*
* @hwmgr: the address of the powerplay hardware manager.
* @vol_table: the pointer to changing voltage table
@@ -1152,7 +1152,7 @@ static void vega10_trim_voltage_table_to_fit_state_table(
}
/**
- * Create Voltage Tables.
+ * vega10_construct_voltage_tables - Create Voltage Tables.
*
* @hwmgr: the address of the powerplay hardware manager.
* return: always 0
@@ -1595,7 +1595,8 @@ static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
}
/**
- * Populates single SMC GFXSCLK structure using the provided engine clock
+ * vega10_populate_single_gfx_level - Populates single SMC GFXSCLK structure
+ * using the provided engine clock
*
* @hwmgr: the address of the hardware manager
* @gfx_clock: the GFX clock to use to populate the structure.
@@ -1660,7 +1661,8 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
}
/**
- * Populates single SMC SOCCLK structure using the provided clock.
+ * vega10_populate_single_soc_level - Populates single SMC SOCCLK structure
+ * using the provided clock.
*
* @hwmgr: the address of the hardware manager.
* @soc_clock: the SOC clock to use to populate the structure.
@@ -1710,7 +1712,8 @@ static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
}
/**
- * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+ * vega10_populate_all_graphic_levels - Populates all SMC SCLK levels' structure
+ * based on the trimmed allowed dpm engine clock states
*
* @hwmgr: the address of the hardware manager
*/
@@ -1859,7 +1862,8 @@ static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
}
/**
- * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
+ * vega10_populate_all_memory_levels - Populates all SMC MCLK levels' structure
+ * based on the trimmed allowed dpm memory clock states.
*
* @hwmgr: the address of the hardware manager.
* return: PP_Result_OK on success.
@@ -2537,7 +2541,7 @@ static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
}
/**
- * Initializes the SMC table and uploads it
+ * vega10_init_smc_table - Initializes the SMC table and uploads it
*
* @hwmgr: the address of the powerplay hardware manager.
* return: always 0
@@ -2919,7 +2923,7 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
}
/**
- * Tell SMC to enabled the supported DPMs.
+ * vega10_start_dpm - Tell SMC to enabled the supported DPMs.
*
* @hwmgr: the address of the powerplay hardware manager.
* @bitmap: bitmap for the features to enabled.
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index 1a097e608808..29e0d1d4035a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -803,7 +803,7 @@ static int vega12_save_default_power_profile(struct pp_hwmgr *hwmgr)
#endif
/**
- * Initializes the SMC table and uploads it
+ * vega12_init_smc_table - Initializes the SMC table and uploads it
*
* @hwmgr: the address of the powerplay hardware manager.
* return: always 0
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
index 0dc16f25a463..ed3dff0b52d2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
@@ -159,7 +159,8 @@ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr)
}
/**
- * Set the requested temperature range for high and low alert signals
+ * vega12_thermal_set_temperature_range - Set the requested temperature range
+ * for high and low alert signals
*
* @hwmgr: The address of the hardware manager.
* @range: Temperature range to be programmed for
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index d3177a534fdf..0791309586c5 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -772,7 +772,7 @@ static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
}
/**
- * Initializes the SMC table and uploads it
+ * vega20_init_smc_table - Initializes the SMC table and uploads it
*
* @hwmgr: the address of the powerplay hardware manager.
* return: always 0
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 77693bf0840c..1735a96dd307 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -822,6 +822,52 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
now) ? "*" : ""));
break;
+ case SMU_VCLK:
+ ret = arcturus_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
+ if (ret) {
+ dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+ ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, single_dpm_table->dpm_levels[i].value,
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz / 1000,
+ now) ? "*" : ""));
+ break;
+
+ case SMU_DCLK:
+ ret = arcturus_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
+ if (ret) {
+ dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+ ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, single_dpm_table->dpm_levels[i].value,
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz / 1000,
+ now) ? "*" : ""));
+ break;
+
case SMU_PCIE:
gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index ac13042672ea..78fe13183e8b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -505,7 +505,7 @@ static int navi10_tables_init(struct smu_context *smu)
goto err0_out;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table)
goto err1_out;
@@ -1273,6 +1273,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_MCLK:
case SMU_UCLK:
case SMU_FCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
case SMU_DCEFCLK:
ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
if (ret)
@@ -2627,8 +2629,8 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_1 *gpu_metrics =
- (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_3 *gpu_metrics =
+ (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
SmuMetrics_legacy_t metrics;
int ret = 0;
@@ -2646,7 +2648,7 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
mutex_unlock(&smu->metrics_lock);
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2681,17 +2683,27 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+ if (metrics.CurrGfxVoltageOffset)
+ gpu_metrics->voltage_gfx =
+ (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+ if (metrics.CurrMemVidOffset)
+ gpu_metrics->voltage_mem =
+ (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+ if (metrics.CurrSocVoltageOffset)
+ gpu_metrics->voltage_soc =
+ (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
*table = (void *)gpu_metrics;
- return sizeof(struct gpu_metrics_v1_1);
+ return sizeof(struct gpu_metrics_v1_3);
}
static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_1 *gpu_metrics =
- (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_3 *gpu_metrics =
+ (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
SmuMetrics_t metrics;
int ret = 0;
@@ -2709,7 +2721,7 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
mutex_unlock(&smu->metrics_lock);
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2746,17 +2758,27 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+ if (metrics.CurrGfxVoltageOffset)
+ gpu_metrics->voltage_gfx =
+ (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+ if (metrics.CurrMemVidOffset)
+ gpu_metrics->voltage_mem =
+ (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+ if (metrics.CurrSocVoltageOffset)
+ gpu_metrics->voltage_soc =
+ (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
*table = (void *)gpu_metrics;
- return sizeof(struct gpu_metrics_v1_1);
+ return sizeof(struct gpu_metrics_v1_3);
}
static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_1 *gpu_metrics =
- (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_3 *gpu_metrics =
+ (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
SmuMetrics_NV12_legacy_t metrics;
int ret = 0;
@@ -2774,7 +2796,7 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
mutex_unlock(&smu->metrics_lock);
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2814,17 +2836,27 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+ if (metrics.CurrGfxVoltageOffset)
+ gpu_metrics->voltage_gfx =
+ (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+ if (metrics.CurrMemVidOffset)
+ gpu_metrics->voltage_mem =
+ (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+ if (metrics.CurrSocVoltageOffset)
+ gpu_metrics->voltage_soc =
+ (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
*table = (void *)gpu_metrics;
- return sizeof(struct gpu_metrics_v1_1);
+ return sizeof(struct gpu_metrics_v1_3);
}
static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_1 *gpu_metrics =
- (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_3 *gpu_metrics =
+ (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
SmuMetrics_NV12_t metrics;
int ret = 0;
@@ -2842,7 +2874,7 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
mutex_unlock(&smu->metrics_lock);
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2884,9 +2916,19 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+ if (metrics.CurrGfxVoltageOffset)
+ gpu_metrics->voltage_gfx =
+ (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+ if (metrics.CurrMemVidOffset)
+ gpu_metrics->voltage_mem =
+ (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+ if (metrics.CurrSocVoltageOffset)
+ gpu_metrics->voltage_soc =
+ (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
*table = (void *)gpu_metrics;
- return sizeof(struct gpu_metrics_v1_1);
+ return sizeof(struct gpu_metrics_v1_3);
}
static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
@@ -2925,6 +2967,8 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
struct amdgpu_device *adev = smu->adev;
uint32_t param = 0;
@@ -2932,6 +2976,13 @@ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
if (adev->asic_type == CHIP_NAVI12)
return 0;
+ /*
+ * Skip the MGpuFanBoost setting for those ASICs
+ * which do not support it
+ */
+ if (!smc_pptable->MGpuFanBoostLimitRpm)
+ return 0;
+
/* Workaround for WS SKU */
if (adev->pdev->device == 0x7312 &&
adev->pdev->revision == 0)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 0c40a54c46d7..75acdb80c499 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -987,6 +987,10 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
case SMU_MCLK:
case SMU_UCLK:
case SMU_FCLK:
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ case SMU_DCLK:
+ case SMU_DCLK1:
case SMU_DCEFCLK:
ret = sienna_cichlid_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
if (ret)
@@ -3690,6 +3694,16 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
+
+ /*
+ * Skip the MGpuFanBoost setting for those ASICs
+ * which do not support it
+ */
+ if (!smc_pptable->MGpuFanBoostLimitRpm)
+ return 0;
+
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetMGpuFanBoostLimitRpm,
0,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index f43b4c623685..1c399c4ab4dc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -109,6 +109,8 @@ static struct cmn2asic_mapping renoir_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(SOCCLK, CLOCK_SOCCLK),
CLK_MAP(UCLK, CLOCK_FCLK),
CLK_MAP(MCLK, CLOCK_FCLK),
+ CLK_MAP(VCLK, CLOCK_VCLK),
+ CLK_MAP(DCLK, CLOCK_DCLK),
};
static struct cmn2asic_mapping renoir_table_map[SMU_TABLE_COUNT] = {
@@ -202,6 +204,17 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
return -EINVAL;
*freq = clk_table->FClocks[dpm_level].Freq;
break;
+ case SMU_VCLK:
+ if (dpm_level >= NUM_VCN_DPM_LEVELS)
+ return -EINVAL;
+ *freq = clk_table->VClocks[dpm_level].Freq;
+ break;
+ case SMU_DCLK:
+ if (dpm_level >= NUM_VCN_DPM_LEVELS)
+ return -EINVAL;
+ *freq = clk_table->DClocks[dpm_level].Freq;
+ break;
+
default:
return -EINVAL;
}
@@ -532,6 +545,14 @@ static int renoir_print_clk_levels(struct smu_context *smu,
count = NUM_FCLK_DPM_LEVELS;
cur_value = metrics.ClockFrequency[CLOCK_FCLK];
break;
+ case SMU_VCLK:
+ count = NUM_VCN_DPM_LEVELS;
+ cur_value = metrics.ClockFrequency[CLOCK_VCLK];
+ break;
+ case SMU_DCLK:
+ count = NUM_VCN_DPM_LEVELS;
+ cur_value = metrics.ClockFrequency[CLOCK_DCLK];
+ break;
default:
break;
}
@@ -543,6 +564,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
case SMU_MCLK:
case SMU_DCEFCLK:
case SMU_FCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
for (i = 0; i < count; i++) {
ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
if (ret)
@@ -730,6 +753,16 @@ static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks
clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
}
+ for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
+ clock_table->VClocks[i].Freq = table->VClocks[i].Freq;
+ clock_table->VClocks[i].Vol = table->VClocks[i].Vol;
+ }
+
+ for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
+ clock_table->DClocks[i].Freq = table->DClocks[i].Freq;
+ clock_table->DClocks[i].Vol = table->DClocks[i].Vol;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 7c191a5d6db9..7a1abb3d6a7a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -78,6 +78,12 @@
#define smnPCIE_ESM_CTRL 0x111003D0
+static const struct smu_temperature_range smu13_thermal_policy[] =
+{
+ {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
+ { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
+};
+
static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -816,6 +822,52 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
now) ? "*" : ""));
break;
+ case SMU_VCLK:
+ ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
+ if (ret) {
+ dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+ ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, single_dpm_table->dpm_levels[i].value,
+ (clocks.num_levels == 1) ? "*" :
+ (aldebaran_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz / 1000,
+ now) ? "*" : ""));
+ break;
+
+ case SMU_DCLK:
+ ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
+ if (ret) {
+ dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+ ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, single_dpm_table->dpm_levels[i].value,
+ (clocks.num_levels == 1) ? "*" :
+ (aldebaran_freqs_in_same_level(
+ clocks.data[i].clocks_in_khz / 1000,
+ now) ? "*" : ""));
+ break;
+
default:
break;
}
@@ -1316,10 +1368,13 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_
static bool aldebaran_is_dpm_running(struct smu_context *smu)
{
- int ret = 0;
+ int ret;
uint32_t feature_mask[2];
unsigned long feature_enabled;
+
ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
+ if (ret)
+ return false;
feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
((uint64_t)feature_mask[1] << 32));
return !!(feature_enabled & SMC_DPM_FEATURE);
@@ -1810,10 +1865,8 @@ static int aldebaran_set_mp1_state(struct smu_context *smu,
case PP_MP1_STATE_UNLOAD:
return smu_cmn_set_mp1_state(smu, mp1_state);
default:
- return -EINVAL;
+ return 0;
}
-
- return 0;
}
static const struct pptable_funcs aldebaran_ppt_funcs = {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 0934e5b3aa17..0882a1dd6797 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -126,7 +126,7 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
- if (smu->adev->in_pci_err_recovery)
+ if (smu->adev->no_hw_access)
return 0;
index = smu_cmn_to_asic_specific_index(smu,
@@ -764,6 +764,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
case METRICS_VERSION(1, 2):
structure_size = sizeof(struct gpu_metrics_v1_2);
break;
+ case METRICS_VERSION(1, 3):
+ structure_size = sizeof(struct gpu_metrics_v1_3);
+ break;
case METRICS_VERSION(2, 0):
structure_size = sizeof(struct gpu_metrics_v2_0);
break;
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index d25e900f07ef..85b673613687 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -50,6 +50,19 @@ config DRM_CHRONTEL_CH7033
If in doubt, say "N".
+config DRM_CROS_EC_ANX7688
+ tristate "ChromeOS EC ANX7688 bridge"
+ depends on OF
+ depends on I2C_CROS_EC_TUNNEL || COMPILE_TEST
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ help
+ ChromeOS EC ANX7688 is an ultra-low power
+ 4K Ultra-HD (4096x2160p60) mobile HD transmitter
+ designed for ChromeOS devices. It converts HDMI
+ 2.0 to DisplayPort 1.3 Ultra-HD. It is connected
+ to the ChromeOS Embedded Controller.
+
config DRM_DISPLAY_CONNECTOR
tristate "Display connector support"
depends on OF
@@ -272,6 +285,16 @@ config DRM_TI_TFP410
help
Texas Instruments TFP410 DVI/HDMI Transmitter driver
+config DRM_TI_SN65DSI83
+ tristate "TI SN65DSI83 and SN65DSI84 DSI to LVDS bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ select DRM_PANEL
+ select DRM_MIPI_DSI
+ help
+ Texas Instruments SN65DSI83 and SN65DSI84 DSI to LVDS Bridge driver
+
config DRM_TI_SN65DSI86
tristate "TI SN65DSI86 DSI to eDP bridge"
depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 965b54dccfe5..f2c73683cfcb 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
obj-$(CONFIG_DRM_CHIPONE_ICN6211) += chipone-icn6211.o
obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
+obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o
obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
obj-$(CONFIG_DRM_LONTIUM_LT8912B) += lontium-lt8912b.o
obj-$(CONFIG_DRM_LONTIUM_LT9611) += lontium-lt9611.o
@@ -22,6 +23,7 @@ obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
obj-$(CONFIG_DRM_TOSHIBA_TC358768) += tc358768.o
obj-$(CONFIG_DRM_TOSHIBA_TC358775) += tc358775.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
+obj-$(CONFIG_DRM_TI_SN65DSI83) += ti-sn65dsi83.o
obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index b4e349ca38fe..7519b7a0f29d 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -1005,33 +1006,6 @@ static void anx7625_power_on_init(struct anx7625_data *ctx)
}
}
-static void anx7625_chip_control(struct anx7625_data *ctx, int state)
-{
- struct device *dev = &ctx->client->dev;
-
- DRM_DEV_DEBUG_DRIVER(dev, "before set, power_state(%d).\n",
- atomic_read(&ctx->power_status));
-
- if (!ctx->pdata.low_power_mode)
- return;
-
- if (state) {
- atomic_inc(&ctx->power_status);
- if (atomic_read(&ctx->power_status) == 1)
- anx7625_power_on_init(ctx);
- } else {
- if (atomic_read(&ctx->power_status)) {
- atomic_dec(&ctx->power_status);
-
- if (atomic_read(&ctx->power_status) == 0)
- anx7625_power_standby(ctx);
- }
- }
-
- DRM_DEV_DEBUG_DRIVER(dev, "after set, power_state(%d).\n",
- atomic_read(&ctx->power_status));
-}
-
static void anx7625_init_gpio(struct anx7625_data *platform)
{
struct device *dev = &platform->client->dev;
@@ -1061,9 +1035,6 @@ static void anx7625_stop_dp_work(struct anx7625_data *ctx)
ctx->hpd_status = 0;
ctx->hpd_high_cnt = 0;
ctx->display_timing_valid = 0;
-
- if (ctx->pdata.low_power_mode == 0)
- anx7625_disable_pd_protocol(ctx);
}
static void anx7625_start_dp_work(struct anx7625_data *ctx)
@@ -1105,49 +1076,26 @@ static void anx7625_hpd_polling(struct anx7625_data *ctx)
int ret, val;
struct device *dev = &ctx->client->dev;
- if (atomic_read(&ctx->power_status) != 1) {
- DRM_DEV_DEBUG_DRIVER(dev, "No need to poling HPD status.\n");
- return;
- }
-
ret = readx_poll_timeout(anx7625_read_hpd_status_p0,
ctx, val,
((val & HPD_STATUS) || (val < 0)),
5000,
5000 * 100);
if (ret) {
- DRM_DEV_ERROR(dev, "HPD polling timeout!\n");
- } else {
- DRM_DEV_DEBUG_DRIVER(dev, "HPD raise up.\n");
- anx7625_reg_write(ctx, ctx->i2c.tcpc_client,
- INTR_ALERT_1, 0xFF);
- anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
- INTERFACE_CHANGE_INT, 0);
+ DRM_DEV_ERROR(dev, "no hpd.\n");
+ return;
}
- anx7625_start_dp_work(ctx);
-}
-
-static void anx7625_disconnect_check(struct anx7625_data *ctx)
-{
- if (atomic_read(&ctx->power_status) == 0)
- anx7625_stop_dp_work(ctx);
-}
-
-static void anx7625_low_power_mode_check(struct anx7625_data *ctx,
- int state)
-{
- struct device *dev = &ctx->client->dev;
+ DRM_DEV_DEBUG_DRIVER(dev, "system status: 0x%x. HPD raise up.\n", val);
+ anx7625_reg_write(ctx, ctx->i2c.tcpc_client,
+ INTR_ALERT_1, 0xFF);
+ anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ INTERFACE_CHANGE_INT, 0);
- DRM_DEV_DEBUG_DRIVER(dev, "low power mode check, state(%d).\n", state);
+ anx7625_start_dp_work(ctx);
- if (ctx->pdata.low_power_mode) {
- anx7625_chip_control(ctx, state);
- if (state)
- anx7625_hpd_polling(ctx);
- else
- anx7625_disconnect_check(ctx);
- }
+ if (!ctx->pdata.panel_bridge && ctx->bridge_attached)
+ drm_helper_hpd_irq_event(ctx->bridge.dev);
}
static void anx7625_remove_edid(struct anx7625_data *ctx)
@@ -1180,9 +1128,6 @@ static int anx7625_hpd_change_detect(struct anx7625_data *ctx)
int intr_vector, status;
struct device *dev = &ctx->client->dev;
- DRM_DEV_DEBUG_DRIVER(dev, "power_status=%d\n",
- (u32)atomic_read(&ctx->power_status));
-
status = anx7625_reg_write(ctx, ctx->i2c.tcpc_client,
INTR_ALERT_1, 0xFF);
if (status < 0) {
@@ -1228,22 +1173,25 @@ static void anx7625_work_func(struct work_struct *work)
struct anx7625_data, work);
mutex_lock(&ctx->lock);
+
+ if (pm_runtime_suspended(&ctx->client->dev))
+ goto unlock;
+
event = anx7625_hpd_change_detect(ctx);
- mutex_unlock(&ctx->lock);
if (event < 0)
- return;
+ goto unlock;
if (ctx->bridge_attached)
drm_helper_hpd_irq_event(ctx->bridge.dev);
+
+unlock:
+ mutex_unlock(&ctx->lock);
}
static irqreturn_t anx7625_intr_hpd_isr(int irq, void *data)
{
struct anx7625_data *ctx = (struct anx7625_data *)data;
- if (atomic_read(&ctx->power_status) != 1)
- return IRQ_NONE;
-
queue_work(ctx->workqueue, &ctx->work);
return IRQ_HANDLED;
@@ -1305,9 +1253,9 @@ static struct edid *anx7625_get_edid(struct anx7625_data *ctx)
return (struct edid *)edid;
}
- anx7625_low_power_mode_check(ctx, 1);
+ pm_runtime_get_sync(dev);
edid_num = sp_tx_edid_read(ctx, p_edid->edid_raw_data);
- anx7625_low_power_mode_check(ctx, 0);
+ pm_runtime_put_sync(dev);
if (edid_num < 1) {
DRM_DEV_ERROR(dev, "Fail to read EDID: %d\n", edid_num);
@@ -1611,10 +1559,7 @@ static void anx7625_bridge_enable(struct drm_bridge *bridge)
DRM_DEV_DEBUG_DRIVER(dev, "drm enable\n");
- anx7625_low_power_mode_check(ctx, 1);
-
- if (WARN_ON(!atomic_read(&ctx->power_status)))
- return;
+ pm_runtime_get_sync(dev);
anx7625_dp_start(ctx);
}
@@ -1624,14 +1569,11 @@ static void anx7625_bridge_disable(struct drm_bridge *bridge)
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = &ctx->client->dev;
- if (WARN_ON(!atomic_read(&ctx->power_status)))
- return;
-
DRM_DEV_DEBUG_DRIVER(dev, "drm disable\n");
anx7625_dp_stop(ctx);
- anx7625_low_power_mode_check(ctx, 0);
+ pm_runtime_put_sync(dev);
}
static enum drm_connector_status
@@ -1735,6 +1677,71 @@ static void anx7625_unregister_i2c_dummy_clients(struct anx7625_data *ctx)
i2c_unregister_device(ctx->i2c.tcpc_client);
}
+static int __maybe_unused anx7625_runtime_pm_suspend(struct device *dev)
+{
+ struct anx7625_data *ctx = dev_get_drvdata(dev);
+
+ mutex_lock(&ctx->lock);
+
+ anx7625_stop_dp_work(ctx);
+ anx7625_power_standby(ctx);
+
+ mutex_unlock(&ctx->lock);
+
+ return 0;
+}
+
+static int __maybe_unused anx7625_runtime_pm_resume(struct device *dev)
+{
+ struct anx7625_data *ctx = dev_get_drvdata(dev);
+
+ mutex_lock(&ctx->lock);
+
+ anx7625_power_on_init(ctx);
+ anx7625_hpd_polling(ctx);
+
+ mutex_unlock(&ctx->lock);
+
+ return 0;
+}
+
+static int __maybe_unused anx7625_resume(struct device *dev)
+{
+ struct anx7625_data *ctx = dev_get_drvdata(dev);
+
+ if (!ctx->pdata.intp_irq)
+ return 0;
+
+ if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
+ enable_irq(ctx->pdata.intp_irq);
+ anx7625_runtime_pm_resume(dev);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused anx7625_suspend(struct device *dev)
+{
+ struct anx7625_data *ctx = dev_get_drvdata(dev);
+
+ if (!ctx->pdata.intp_irq)
+ return 0;
+
+ if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
+ anx7625_runtime_pm_suspend(dev);
+ disable_irq(ctx->pdata.intp_irq);
+ flush_workqueue(ctx->workqueue);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops anx7625_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(anx7625_suspend, anx7625_resume)
+ SET_RUNTIME_PM_OPS(anx7625_runtime_pm_suspend,
+ anx7625_runtime_pm_resume, NULL)
+};
+
static int anx7625_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1778,8 +1785,6 @@ static int anx7625_i2c_probe(struct i2c_client *client,
}
anx7625_init_gpio(platform);
- atomic_set(&platform->power_status, 0);
-
mutex_init(&platform->lock);
platform->pdata.intp_irq = client->irq;
@@ -1809,9 +1814,11 @@ static int anx7625_i2c_probe(struct i2c_client *client,
goto free_wq;
}
- if (platform->pdata.low_power_mode == 0) {
+ pm_runtime_enable(dev);
+
+ if (!platform->pdata.low_power_mode) {
anx7625_disable_pd_protocol(platform);
- atomic_set(&platform->power_status, 1);
+ pm_runtime_get_sync(dev);
}
/* Add work function */
@@ -1847,6 +1854,9 @@ static int anx7625_i2c_remove(struct i2c_client *client)
if (platform->pdata.intp_irq)
destroy_workqueue(platform->workqueue);
+ if (!platform->pdata.low_power_mode)
+ pm_runtime_put_sync_suspend(&client->dev);
+
anx7625_unregister_i2c_dummy_clients(platform);
kfree(platform);
@@ -1869,6 +1879,7 @@ static struct i2c_driver anx7625_driver = {
.driver = {
.name = "anx7625",
.of_match_table = anx_match_table,
+ .pm = &anx7625_pm_ops,
},
.probe = anx7625_i2c_probe,
.remove = anx7625_i2c_remove,
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
index e4a086b3a3d7..034c3840028f 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.h
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
@@ -369,7 +369,6 @@ struct anx7625_i2c_client {
struct anx7625_data {
struct anx7625_platform_data pdata;
- atomic_t power_status;
int hpd_status;
int hpd_high_cnt;
/* Lock for work queue */
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 0cd8f40fb690..5530fbf64f1e 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -2140,7 +2140,7 @@ cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge)
if (!cdns_mhdp_state)
return NULL;
- __drm_atomic_helper_bridge_reset(bridge, &cdns_mhdp_state->base);
+ __drm_atomic_helper_bridge_reset(bridge, &cdns_mhdp_state->base);
return &cdns_mhdp_state->base;
}
@@ -2478,9 +2478,9 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
clk_prepare_enable(clk);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err(dev, "pm_runtime_resume_and_get failed\n");
pm_runtime_disable(dev);
goto clk_disable;
}
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index 76373e31df92..b31281f76117 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -1028,7 +1028,7 @@ static ssize_t cdns_dsi_transfer(struct mipi_dsi_host *host,
struct mipi_dsi_packet packet;
int ret, i, tx_len, rx_len;
- ret = pm_runtime_get_sync(host->dev);
+ ret = pm_runtime_resume_and_get(host->dev);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/bridge/cros-ec-anx7688.c b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
new file mode 100644
index 000000000000..0f6d907432e3
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CrOS EC ANX7688 HDMI->DP bridge driver
+ *
+ * Copyright 2020 Google LLC
+ */
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_print.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+/* Register addresses */
+#define ANX7688_VENDOR_ID_REG 0x00
+#define ANX7688_DEVICE_ID_REG 0x02
+
+#define ANX7688_FW_VERSION_REG 0x80
+
+#define ANX7688_DP_BANDWIDTH_REG 0x85
+#define ANX7688_DP_LANE_COUNT_REG 0x86
+
+#define ANX7688_VENDOR_ID 0x1f29
+#define ANX7688_DEVICE_ID 0x7688
+
+/* First supported firmware version (0.85) */
+#define ANX7688_MINIMUM_FW_VERSION 0x0085
+
+static const struct regmap_config cros_ec_anx7688_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+struct cros_ec_anx7688 {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct drm_bridge bridge;
+ bool filter;
+};
+
+static inline struct cros_ec_anx7688 *
+bridge_to_cros_ec_anx7688(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct cros_ec_anx7688, bridge);
+}
+
+static bool cros_ec_anx7688_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct cros_ec_anx7688 *anx = bridge_to_cros_ec_anx7688(bridge);
+ int totalbw, requiredbw;
+ u8 dpbw, lanecount;
+ u8 regs[2];
+ int ret;
+
+ if (!anx->filter)
+ return true;
+
+ /* Read both regs 0x85 (bandwidth) and 0x86 (lane count). */
+ ret = regmap_bulk_read(anx->regmap, ANX7688_DP_BANDWIDTH_REG, regs, 2);
+ if (ret < 0) {
+ DRM_ERROR("Failed to read bandwidth/lane count\n");
+ return false;
+ }
+ dpbw = regs[0];
+ lanecount = regs[1];
+
+ /* Maximum 0x19 bandwidth (6.75 Gbps Turbo mode), 2 lanes */
+ if (dpbw > 0x19 || lanecount > 2) {
+ DRM_ERROR("Invalid bandwidth/lane count (%02x/%d)\n", dpbw,
+ lanecount);
+ return false;
+ }
+
+ /* Compute available bandwidth (kHz) */
+ totalbw = dpbw * lanecount * 270000 * 8 / 10;
+
+ /* Required bandwidth (8 bpc, kHz) */
+ requiredbw = mode->clock * 8 * 3;
+
+ DRM_DEBUG_KMS("DP bandwidth: %d kHz (%02x/%d); mode requires %d Khz\n",
+ totalbw, dpbw, lanecount, requiredbw);
+
+ if (totalbw == 0) {
+ DRM_ERROR("Bandwidth/lane count are 0, not rejecting modes\n");
+ return true;
+ }
+
+ return totalbw >= requiredbw;
+}
+
+static const struct drm_bridge_funcs cros_ec_anx7688_bridge_funcs = {
+ .mode_fixup = cros_ec_anx7688_bridge_mode_fixup,
+};
+
+static int cros_ec_anx7688_bridge_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct cros_ec_anx7688 *anx7688;
+ u16 vendor, device, fw_version;
+ u8 buffer[4];
+ int ret;
+
+ anx7688 = devm_kzalloc(dev, sizeof(*anx7688), GFP_KERNEL);
+ if (!anx7688)
+ return -ENOMEM;
+
+ anx7688->client = client;
+ i2c_set_clientdata(client, anx7688);
+
+ anx7688->regmap = devm_regmap_init_i2c(client, &cros_ec_anx7688_regmap_config);
+ if (IS_ERR(anx7688->regmap)) {
+ ret = PTR_ERR(anx7688->regmap);
+ dev_err(dev, "regmap i2c init failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Read both vendor and device id (4 bytes). */
+ ret = regmap_bulk_read(anx7688->regmap, ANX7688_VENDOR_ID_REG,
+ buffer, 4);
+ if (ret) {
+ dev_err(dev, "Failed to read chip vendor/device id\n");
+ return ret;
+ }
+
+ vendor = (u16)buffer[1] << 8 | buffer[0];
+ device = (u16)buffer[3] << 8 | buffer[2];
+ if (vendor != ANX7688_VENDOR_ID || device != ANX7688_DEVICE_ID) {
+ dev_err(dev, "Invalid vendor/device id %04x/%04x\n",
+ vendor, device);
+ return -ENODEV;
+ }
+
+ ret = regmap_bulk_read(anx7688->regmap, ANX7688_FW_VERSION_REG,
+ buffer, 2);
+ if (ret) {
+ dev_err(dev, "Failed to read firmware version\n");
+ return ret;
+ }
+
+ fw_version = (u16)buffer[0] << 8 | buffer[1];
+ dev_info(dev, "ANX7688 firmware version 0x%04x\n", fw_version);
+
+ anx7688->bridge.of_node = dev->of_node;
+
+ /* FW version >= 0.85 supports bandwidth/lane count registers */
+ if (fw_version >= ANX7688_MINIMUM_FW_VERSION)
+ anx7688->filter = true;
+ else
+ /* Warn, but not fail, for backwards compatibility */
+ DRM_WARN("Old ANX7688 FW version (0x%04x), not filtering\n",
+ fw_version);
+
+ anx7688->bridge.funcs = &cros_ec_anx7688_bridge_funcs;
+ drm_bridge_add(&anx7688->bridge);
+
+ return 0;
+}
+
+static int cros_ec_anx7688_bridge_remove(struct i2c_client *client)
+{
+ struct cros_ec_anx7688 *anx7688 = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&anx7688->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id cros_ec_anx7688_bridge_match_table[] = {
+ { .compatible = "google,cros-ec-anx7688" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cros_ec_anx7688_bridge_match_table);
+
+static struct i2c_driver cros_ec_anx7688_bridge_driver = {
+ .probe_new = cros_ec_anx7688_bridge_probe,
+ .remove = cros_ec_anx7688_bridge_remove,
+ .driver = {
+ .name = "cros-ec-anx7688-bridge",
+ .of_match_table = cros_ec_anx7688_bridge_match_table,
+ },
+};
+
+module_i2c_driver(cros_ec_anx7688_bridge_driver);
+
+MODULE_DESCRIPTION("ChromeOS EC ANX7688 HDMI->DP bridge driver");
+MODULE_AUTHOR("Nicolas Boichat <[email protected]>");
+MODULE_AUTHOR("Enric Balletbo i Serra <[email protected]>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index d8a60691fd32..7149ed40af83 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -898,14 +898,14 @@ static int it66121_probe(struct i2c_client *client,
return -ENXIO;
}
- ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
- if (!ep)
- return -EINVAL;
-
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
+ if (!ep)
+ return -EINVAL;
+
ctx->dev = dev;
ctx->client = client;
@@ -943,7 +943,7 @@ static int it66121_probe(struct i2c_client *client,
ctx->regmap = devm_regmap_init_i2c(client, &it66121_regmap_config);
if (IS_ERR(ctx->regmap)) {
ite66121_power_off(ctx);
- return PTR_ERR(ctx);
+ return PTR_ERR(ctx->regmap);
}
regmap_read(ctx->regmap, IT66121_VENDOR_ID0_REG, &vendor_ids[0]);
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 443f1b47e031..76c720b535fb 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -7,8 +7,6 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <drm/drm_probe_helper.h>
@@ -755,7 +753,6 @@ static struct i2c_driver lt8912_i2c_driver = {
.driver = {
.name = "lt8912",
.of_match_table = lt8912_dt_match,
- .owner = THIS_MODULE,
},
.probe = lt8912_probe,
.remove = lt8912_remove,
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
new file mode 100644
index 000000000000..750f2172ef08
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -0,0 +1,709 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI SN65DSI83,84,85 driver
+ *
+ * Currently supported:
+ * - SN65DSI83
+ * = 1x Single-link DSI ~ 1x Single-link LVDS
+ * - Supported
+ * - Single-link LVDS mode tested
+ * - SN65DSI84
+ * = 1x Single-link DSI ~ 2x Single-link or 1x Dual-link LVDS
+ * - Supported
+ * - Dual-link LVDS mode tested
+ * - 2x Single-link LVDS mode unsupported
+ * (should be easy to add by someone who has the HW)
+ * - SN65DSI85
+ * = 2x Single-link or 1x Dual-link DSI ~ 2x Single-link or 1x Dual-link LVDS
+ * - Unsupported
+ * (should be easy to add by someone who has the HW)
+ *
+ * Copyright (C) 2021 Marek Vasut <[email protected]>
+ *
+ * Based on previous work of:
+ * Valentin Raevsky <[email protected]>
+ * Philippe Schenker <[email protected]>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+/* ID registers */
+#define REG_ID(n) (0x00 + (n))
+/* Reset and clock registers */
+#define REG_RC_RESET 0x09
+#define REG_RC_RESET_SOFT_RESET BIT(0)
+#define REG_RC_LVDS_PLL 0x0a
+#define REG_RC_LVDS_PLL_PLL_EN_STAT BIT(7)
+#define REG_RC_LVDS_PLL_LVDS_CLK_RANGE(n) (((n) & 0x7) << 1)
+#define REG_RC_LVDS_PLL_HS_CLK_SRC_DPHY BIT(0)
+#define REG_RC_DSI_CLK 0x0b
+#define REG_RC_DSI_CLK_DSI_CLK_DIVIDER(n) (((n) & 0x1f) << 3)
+#define REG_RC_DSI_CLK_REFCLK_MULTIPLIER(n) ((n) & 0x3)
+#define REG_RC_PLL_EN 0x0d
+#define REG_RC_PLL_EN_PLL_EN BIT(0)
+/* DSI registers */
+#define REG_DSI_LANE 0x10
+#define REG_DSI_LANE_LEFT_RIGHT_PIXELS BIT(7) /* DSI85-only */
+#define REG_DSI_LANE_DSI_CHANNEL_MODE_DUAL 0 /* DSI85-only */
+#define REG_DSI_LANE_DSI_CHANNEL_MODE_2SINGLE BIT(6) /* DSI85-only */
+#define REG_DSI_LANE_DSI_CHANNEL_MODE_SINGLE BIT(5)
+#define REG_DSI_LANE_CHA_DSI_LANES(n) (((n) & 0x3) << 3)
+#define REG_DSI_LANE_CHB_DSI_LANES(n) (((n) & 0x3) << 1)
+#define REG_DSI_LANE_SOT_ERR_TOL_DIS BIT(0)
+#define REG_DSI_EQ 0x11
+#define REG_DSI_EQ_CHA_DSI_DATA_EQ(n) (((n) & 0x3) << 6)
+#define REG_DSI_EQ_CHA_DSI_CLK_EQ(n) (((n) & 0x3) << 2)
+#define REG_DSI_CLK 0x12
+#define REG_DSI_CLK_CHA_DSI_CLK_RANGE(n) ((n) & 0xff)
+/* LVDS registers */
+#define REG_LVDS_FMT 0x18
+#define REG_LVDS_FMT_DE_NEG_POLARITY BIT(7)
+#define REG_LVDS_FMT_HS_NEG_POLARITY BIT(6)
+#define REG_LVDS_FMT_VS_NEG_POLARITY BIT(5)
+#define REG_LVDS_FMT_LVDS_LINK_CFG BIT(4) /* 0:AB 1:A-only */
+#define REG_LVDS_FMT_CHA_24BPP_MODE BIT(3)
+#define REG_LVDS_FMT_CHB_24BPP_MODE BIT(2)
+#define REG_LVDS_FMT_CHA_24BPP_FORMAT1 BIT(1)
+#define REG_LVDS_FMT_CHB_24BPP_FORMAT1 BIT(0)
+#define REG_LVDS_VCOM 0x19
+#define REG_LVDS_VCOM_CHA_LVDS_VOCM BIT(6)
+#define REG_LVDS_VCOM_CHB_LVDS_VOCM BIT(4)
+#define REG_LVDS_VCOM_CHA_LVDS_VOD_SWING(n) (((n) & 0x3) << 2)
+#define REG_LVDS_VCOM_CHB_LVDS_VOD_SWING(n) ((n) & 0x3)
+#define REG_LVDS_LANE 0x1a
+#define REG_LVDS_LANE_EVEN_ODD_SWAP BIT(6)
+#define REG_LVDS_LANE_CHA_REVERSE_LVDS BIT(5)
+#define REG_LVDS_LANE_CHB_REVERSE_LVDS BIT(4)
+#define REG_LVDS_LANE_CHA_LVDS_TERM BIT(1)
+#define REG_LVDS_LANE_CHB_LVDS_TERM BIT(0)
+#define REG_LVDS_CM 0x1b
+#define REG_LVDS_CM_CHA_LVDS_CM_ADJUST(n) (((n) & 0x3) << 4)
+#define REG_LVDS_CM_CHB_LVDS_CM_ADJUST(n) ((n) & 0x3)
+/* Video registers */
+#define REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW 0x20
+#define REG_VID_CHA_ACTIVE_LINE_LENGTH_HIGH 0x21
+#define REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW 0x24
+#define REG_VID_CHA_VERTICAL_DISPLAY_SIZE_HIGH 0x25
+#define REG_VID_CHA_SYNC_DELAY_LOW 0x28
+#define REG_VID_CHA_SYNC_DELAY_HIGH 0x29
+#define REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW 0x2c
+#define REG_VID_CHA_HSYNC_PULSE_WIDTH_HIGH 0x2d
+#define REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW 0x30
+#define REG_VID_CHA_VSYNC_PULSE_WIDTH_HIGH 0x31
+#define REG_VID_CHA_HORIZONTAL_BACK_PORCH 0x34
+#define REG_VID_CHA_VERTICAL_BACK_PORCH 0x36
+#define REG_VID_CHA_HORIZONTAL_FRONT_PORCH 0x38
+#define REG_VID_CHA_VERTICAL_FRONT_PORCH 0x3a
+#define REG_VID_CHA_TEST_PATTERN 0x3c
+/* IRQ registers */
+#define REG_IRQ_GLOBAL 0xe0
+#define REG_IRQ_GLOBAL_IRQ_EN BIT(0)
+#define REG_IRQ_EN 0xe1
+#define REG_IRQ_EN_CHA_SYNCH_ERR_EN BIT(7)
+#define REG_IRQ_EN_CHA_CRC_ERR_EN BIT(6)
+#define REG_IRQ_EN_CHA_UNC_ECC_ERR_EN BIT(5)
+#define REG_IRQ_EN_CHA_COR_ECC_ERR_EN BIT(4)
+#define REG_IRQ_EN_CHA_LLP_ERR_EN BIT(3)
+#define REG_IRQ_EN_CHA_SOT_BIT_ERR_EN BIT(2)
+#define REG_IRQ_EN_CHA_PLL_UNLOCK_EN BIT(0)
+#define REG_IRQ_STAT 0xe5
+#define REG_IRQ_STAT_CHA_SYNCH_ERR BIT(7)
+#define REG_IRQ_STAT_CHA_CRC_ERR BIT(6)
+#define REG_IRQ_STAT_CHA_UNC_ECC_ERR BIT(5)
+#define REG_IRQ_STAT_CHA_COR_ECC_ERR BIT(4)
+#define REG_IRQ_STAT_CHA_LLP_ERR BIT(3)
+#define REG_IRQ_STAT_CHA_SOT_BIT_ERR BIT(2)
+#define REG_IRQ_STAT_CHA_PLL_UNLOCK BIT(0)
+
+enum sn65dsi83_model {
+ MODEL_SN65DSI83,
+ MODEL_SN65DSI84,
+};
+
+struct sn65dsi83 {
+ struct drm_bridge bridge;
+ struct drm_display_mode mode;
+ struct device *dev;
+ struct regmap *regmap;
+ struct device_node *host_node;
+ struct mipi_dsi_device *dsi;
+ struct drm_bridge *panel_bridge;
+ struct gpio_desc *enable_gpio;
+ int dsi_lanes;
+ bool lvds_dual_link;
+ bool lvds_dual_link_even_odd_swap;
+ bool lvds_format_24bpp;
+ bool lvds_format_jeida;
+};
+
+static const struct regmap_range sn65dsi83_readable_ranges[] = {
+ regmap_reg_range(REG_ID(0), REG_ID(8)),
+ regmap_reg_range(REG_RC_LVDS_PLL, REG_RC_DSI_CLK),
+ regmap_reg_range(REG_RC_PLL_EN, REG_RC_PLL_EN),
+ regmap_reg_range(REG_DSI_LANE, REG_DSI_CLK),
+ regmap_reg_range(REG_LVDS_FMT, REG_LVDS_CM),
+ regmap_reg_range(REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW,
+ REG_VID_CHA_ACTIVE_LINE_LENGTH_HIGH),
+ regmap_reg_range(REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW,
+ REG_VID_CHA_VERTICAL_DISPLAY_SIZE_HIGH),
+ regmap_reg_range(REG_VID_CHA_SYNC_DELAY_LOW,
+ REG_VID_CHA_SYNC_DELAY_HIGH),
+ regmap_reg_range(REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW,
+ REG_VID_CHA_HSYNC_PULSE_WIDTH_HIGH),
+ regmap_reg_range(REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW,
+ REG_VID_CHA_VSYNC_PULSE_WIDTH_HIGH),
+ regmap_reg_range(REG_VID_CHA_HORIZONTAL_BACK_PORCH,
+ REG_VID_CHA_HORIZONTAL_BACK_PORCH),
+ regmap_reg_range(REG_VID_CHA_VERTICAL_BACK_PORCH,
+ REG_VID_CHA_VERTICAL_BACK_PORCH),
+ regmap_reg_range(REG_VID_CHA_HORIZONTAL_FRONT_PORCH,
+ REG_VID_CHA_HORIZONTAL_FRONT_PORCH),
+ regmap_reg_range(REG_VID_CHA_VERTICAL_FRONT_PORCH,
+ REG_VID_CHA_VERTICAL_FRONT_PORCH),
+ regmap_reg_range(REG_VID_CHA_TEST_PATTERN, REG_VID_CHA_TEST_PATTERN),
+ regmap_reg_range(REG_IRQ_GLOBAL, REG_IRQ_EN),
+ regmap_reg_range(REG_IRQ_STAT, REG_IRQ_STAT),
+};
+
+static const struct regmap_access_table sn65dsi83_readable_table = {
+ .yes_ranges = sn65dsi83_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sn65dsi83_readable_ranges),
+};
+
+static const struct regmap_range sn65dsi83_writeable_ranges[] = {
+ regmap_reg_range(REG_RC_RESET, REG_RC_DSI_CLK),
+ regmap_reg_range(REG_RC_PLL_EN, REG_RC_PLL_EN),
+ regmap_reg_range(REG_DSI_LANE, REG_DSI_CLK),
+ regmap_reg_range(REG_LVDS_FMT, REG_LVDS_CM),
+ regmap_reg_range(REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW,
+ REG_VID_CHA_ACTIVE_LINE_LENGTH_HIGH),
+ regmap_reg_range(REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW,
+ REG_VID_CHA_VERTICAL_DISPLAY_SIZE_HIGH),
+ regmap_reg_range(REG_VID_CHA_SYNC_DELAY_LOW,
+ REG_VID_CHA_SYNC_DELAY_HIGH),
+ regmap_reg_range(REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW,
+ REG_VID_CHA_HSYNC_PULSE_WIDTH_HIGH),
+ regmap_reg_range(REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW,
+ REG_VID_CHA_VSYNC_PULSE_WIDTH_HIGH),
+ regmap_reg_range(REG_VID_CHA_HORIZONTAL_BACK_PORCH,
+ REG_VID_CHA_HORIZONTAL_BACK_PORCH),
+ regmap_reg_range(REG_VID_CHA_VERTICAL_BACK_PORCH,
+ REG_VID_CHA_VERTICAL_BACK_PORCH),
+ regmap_reg_range(REG_VID_CHA_HORIZONTAL_FRONT_PORCH,
+ REG_VID_CHA_HORIZONTAL_FRONT_PORCH),
+ regmap_reg_range(REG_VID_CHA_VERTICAL_FRONT_PORCH,
+ REG_VID_CHA_VERTICAL_FRONT_PORCH),
+ regmap_reg_range(REG_VID_CHA_TEST_PATTERN, REG_VID_CHA_TEST_PATTERN),
+ regmap_reg_range(REG_IRQ_GLOBAL, REG_IRQ_EN),
+ regmap_reg_range(REG_IRQ_STAT, REG_IRQ_STAT),
+};
+
+static const struct regmap_access_table sn65dsi83_writeable_table = {
+ .yes_ranges = sn65dsi83_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sn65dsi83_writeable_ranges),
+};
+
+static const struct regmap_range sn65dsi83_volatile_ranges[] = {
+ regmap_reg_range(REG_RC_RESET, REG_RC_RESET),
+ regmap_reg_range(REG_RC_LVDS_PLL, REG_RC_LVDS_PLL),
+ regmap_reg_range(REG_IRQ_STAT, REG_IRQ_STAT),
+};
+
+static const struct regmap_access_table sn65dsi83_volatile_table = {
+ .yes_ranges = sn65dsi83_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sn65dsi83_volatile_ranges),
+};
+
+static const struct regmap_config sn65dsi83_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &sn65dsi83_readable_table,
+ .wr_table = &sn65dsi83_writeable_table,
+ .volatile_table = &sn65dsi83_volatile_table,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = REG_IRQ_STAT,
+};
+
+static struct sn65dsi83 *bridge_to_sn65dsi83(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct sn65dsi83, bridge);
+}
+
+static int sn65dsi83_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+ struct device *dev = ctx->dev;
+ struct mipi_dsi_device *dsi;
+ struct mipi_dsi_host *host;
+ int ret = 0;
+
+ const struct mipi_dsi_device_info info = {
+ .type = "sn65dsi83",
+ .channel = 0,
+ .node = NULL,
+ };
+
+ host = of_find_mipi_dsi_host_by_node(ctx->host_node);
+ if (!host) {
+ dev_err(dev, "failed to find dsi host\n");
+ return -EPROBE_DEFER;
+ }
+
+ dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dsi)) {
+ return dev_err_probe(dev, PTR_ERR(dsi),
+ "failed to create dsi device\n");
+ }
+
+ ctx->dsi = dsi;
+
+ dsi->lanes = ctx->dsi_lanes;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "failed to attach dsi to host\n");
+ goto err_dsi_attach;
+ }
+
+ return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ &ctx->bridge, flags);
+
+err_dsi_attach:
+ mipi_dsi_device_unregister(dsi);
+ return ret;
+}
+
+static void sn65dsi83_pre_enable(struct drm_bridge *bridge)
+{
+ struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+
+ /*
+ * Reset the chip, pull EN line low for t_reset=10ms,
+ * then high for t_en=1ms.
+ */
+ regcache_mark_dirty(ctx->regmap);
+ gpiod_set_value(ctx->enable_gpio, 0);
+ usleep_range(10000, 11000);
+ gpiod_set_value(ctx->enable_gpio, 1);
+ usleep_range(1000, 1100);
+}
+
+static u8 sn65dsi83_get_lvds_range(struct sn65dsi83 *ctx)
+{
+ /*
+ * The encoding of the LVDS_CLK_RANGE is as follows:
+ * 000 - 25 MHz <= LVDS_CLK < 37.5 MHz
+ * 001 - 37.5 MHz <= LVDS_CLK < 62.5 MHz
+ * 010 - 62.5 MHz <= LVDS_CLK < 87.5 MHz
+ * 011 - 87.5 MHz <= LVDS_CLK < 112.5 MHz
+ * 100 - 112.5 MHz <= LVDS_CLK < 137.5 MHz
+ * 101 - 137.5 MHz <= LVDS_CLK <= 154 MHz
+ * which is a range of 12.5MHz..162.5MHz in 50MHz steps, except that
+ * the ends of the ranges are clamped to the supported range. Since
+ * sn65dsi83_mode_valid() already filters the valid modes and limits
+ * the clock to 25..154 MHz, the range calculation can be simplified
+ * as follows:
+ */
+ int mode_clock = ctx->mode.clock;
+
+ if (ctx->lvds_dual_link)
+ mode_clock /= 2;
+
+ return (mode_clock - 12500) / 25000;
+}
+
+static u8 sn65dsi83_get_dsi_range(struct sn65dsi83 *ctx)
+{
+ /*
+ * The encoding of the CHA_DSI_CLK_RANGE is as follows:
+ * 0x00 through 0x07 - Reserved
+ * 0x08 - 40 <= DSI_CLK < 45 MHz
+ * 0x09 - 45 <= DSI_CLK < 50 MHz
+ * ...
+ * 0x63 - 495 <= DSI_CLK < 500 MHz
+ * 0x64 - 500 MHz
+ * 0x65 through 0xFF - Reserved
+ * which is DSI clock in 5 MHz steps, clamped to 40..500 MHz.
+ * The DSI clock are calculated as:
+ * DSI_CLK = mode clock * bpp / dsi_data_lanes / 2
+ * the 2 is there because the bus is DDR.
+ */
+ return DIV_ROUND_UP(clamp((unsigned int)ctx->mode.clock *
+ mipi_dsi_pixel_format_to_bpp(ctx->dsi->format) /
+ ctx->dsi_lanes / 2, 40000U, 500000U), 5000U);
+}
+
+static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
+{
+ /* The divider is (DSI_CLK / LVDS_CLK) - 1, which really is: */
+ unsigned int dsi_div = mipi_dsi_pixel_format_to_bpp(ctx->dsi->format);
+
+ dsi_div /= ctx->dsi_lanes;
+
+ if (!ctx->lvds_dual_link)
+ dsi_div /= 2;
+
+ return dsi_div - 1;
+}
+
+static void sn65dsi83_enable(struct drm_bridge *bridge)
+{
+ struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+ unsigned int pval;
+ u16 val;
+ int ret;
+
+ /* Clear reset, disable PLL */
+ regmap_write(ctx->regmap, REG_RC_RESET, 0x00);
+ regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
+
+ /* Reference clock derived from DSI link clock. */
+ regmap_write(ctx->regmap, REG_RC_LVDS_PLL,
+ REG_RC_LVDS_PLL_LVDS_CLK_RANGE(sn65dsi83_get_lvds_range(ctx)) |
+ REG_RC_LVDS_PLL_HS_CLK_SRC_DPHY);
+ regmap_write(ctx->regmap, REG_DSI_CLK,
+ REG_DSI_CLK_CHA_DSI_CLK_RANGE(sn65dsi83_get_dsi_range(ctx)));
+ regmap_write(ctx->regmap, REG_RC_DSI_CLK,
+ REG_RC_DSI_CLK_DSI_CLK_DIVIDER(sn65dsi83_get_dsi_div(ctx)));
+
+ /* Set number of DSI lanes and LVDS link config. */
+ regmap_write(ctx->regmap, REG_DSI_LANE,
+ REG_DSI_LANE_DSI_CHANNEL_MODE_SINGLE |
+ REG_DSI_LANE_CHA_DSI_LANES(~(ctx->dsi_lanes - 1)) |
+ /* CHB is DSI85-only, set to default on DSI83/DSI84 */
+ REG_DSI_LANE_CHB_DSI_LANES(3));
+ /* No equalization. */
+ regmap_write(ctx->regmap, REG_DSI_EQ, 0x00);
+
+ /* Set up sync signal polarity. */
+ val = (ctx->mode.flags & DRM_MODE_FLAG_NHSYNC ?
+ REG_LVDS_FMT_HS_NEG_POLARITY : 0) |
+ (ctx->mode.flags & DRM_MODE_FLAG_NVSYNC ?
+ REG_LVDS_FMT_VS_NEG_POLARITY : 0);
+
+ /* Set up bits-per-pixel, 18bpp or 24bpp. */
+ if (ctx->lvds_format_24bpp) {
+ val |= REG_LVDS_FMT_CHA_24BPP_MODE;
+ if (ctx->lvds_dual_link)
+ val |= REG_LVDS_FMT_CHB_24BPP_MODE;
+ }
+
+ /* Set up LVDS format, JEIDA/Format 1 or SPWG/Format 2 */
+ if (ctx->lvds_format_jeida) {
+ val |= REG_LVDS_FMT_CHA_24BPP_FORMAT1;
+ if (ctx->lvds_dual_link)
+ val |= REG_LVDS_FMT_CHB_24BPP_FORMAT1;
+ }
+
+ /* Set up LVDS output config (DSI84,DSI85) */
+ if (!ctx->lvds_dual_link)
+ val |= REG_LVDS_FMT_LVDS_LINK_CFG;
+
+ regmap_write(ctx->regmap, REG_LVDS_FMT, val);
+ regmap_write(ctx->regmap, REG_LVDS_VCOM, 0x05);
+ regmap_write(ctx->regmap, REG_LVDS_LANE,
+ (ctx->lvds_dual_link_even_odd_swap ?
+ REG_LVDS_LANE_EVEN_ODD_SWAP : 0) |
+ REG_LVDS_LANE_CHA_LVDS_TERM |
+ REG_LVDS_LANE_CHB_LVDS_TERM);
+ regmap_write(ctx->regmap, REG_LVDS_CM, 0x00);
+
+ val = cpu_to_le16(ctx->mode.hdisplay);
+ regmap_bulk_write(ctx->regmap, REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW,
+ &val, 2);
+ val = cpu_to_le16(ctx->mode.vdisplay);
+ regmap_bulk_write(ctx->regmap, REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW,
+ &val, 2);
+ /* 32 + 1 pixel clock to ensure proper operation */
+ val = cpu_to_le16(32 + 1);
+ regmap_bulk_write(ctx->regmap, REG_VID_CHA_SYNC_DELAY_LOW, &val, 2);
+ val = cpu_to_le16(ctx->mode.hsync_end - ctx->mode.hsync_start);
+ regmap_bulk_write(ctx->regmap, REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW,
+ &val, 2);
+ val = cpu_to_le16(ctx->mode.vsync_end - ctx->mode.vsync_start);
+ regmap_bulk_write(ctx->regmap, REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW,
+ &val, 2);
+ regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_BACK_PORCH,
+ ctx->mode.htotal - ctx->mode.hsync_end);
+ regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_BACK_PORCH,
+ ctx->mode.vtotal - ctx->mode.vsync_end);
+ regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_FRONT_PORCH,
+ ctx->mode.hsync_start - ctx->mode.hdisplay);
+ regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_FRONT_PORCH,
+ ctx->mode.vsync_start - ctx->mode.vdisplay);
+ regmap_write(ctx->regmap, REG_VID_CHA_TEST_PATTERN, 0x00);
+
+ /* Enable PLL */
+ regmap_write(ctx->regmap, REG_RC_PLL_EN, REG_RC_PLL_EN_PLL_EN);
+ usleep_range(3000, 4000);
+ ret = regmap_read_poll_timeout(ctx->regmap, REG_RC_LVDS_PLL, pval,
+ pval & REG_RC_LVDS_PLL_PLL_EN_STAT,
+ 1000, 100000);
+ if (ret) {
+ dev_err(ctx->dev, "failed to lock PLL, ret=%i\n", ret);
+ /* On failure, disable PLL again and exit. */
+ regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
+ return;
+ }
+
+ /* Trigger reset after CSR register update. */
+ regmap_write(ctx->regmap, REG_RC_RESET, REG_RC_RESET_SOFT_RESET);
+
+ /* Clear all errors that got asserted during initialization. */
+ regmap_read(ctx->regmap, REG_IRQ_STAT, &pval);
+ regmap_write(ctx->regmap, REG_IRQ_STAT, pval);
+}
+
+static void sn65dsi83_disable(struct drm_bridge *bridge)
+{
+ struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+
+ /* Clear reset, disable PLL */
+ regmap_write(ctx->regmap, REG_RC_RESET, 0x00);
+ regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
+}
+
+static void sn65dsi83_post_disable(struct drm_bridge *bridge)
+{
+ struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+
+ /* Put the chip in reset, pull EN line low. */
+ gpiod_set_value(ctx->enable_gpio, 0);
+}
+
+static enum drm_mode_status
+sn65dsi83_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ /* LVDS output clock range 25..154 MHz */
+ if (mode->clock < 25000)
+ return MODE_CLOCK_LOW;
+ if (mode->clock > 154000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void sn65dsi83_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adj)
+{
+ struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+
+ ctx->mode = *adj;
+}
+
+static bool sn65dsi83_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj)
+{
+ struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+ u32 input_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_device *ddev = encoder->dev;
+ struct drm_connector *connector;
+
+ /* The DSI format is always RGB888_1X24 */
+ list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
+ switch (connector->display_info.bus_formats[0]) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ ctx->lvds_format_24bpp = false;
+ ctx->lvds_format_jeida = true;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ ctx->lvds_format_24bpp = true;
+ ctx->lvds_format_jeida = true;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ ctx->lvds_format_24bpp = true;
+ ctx->lvds_format_jeida = false;
+ break;
+ default:
+ /*
+ * Some bridges still don't set the correct
+ * LVDS bus pixel format, use SPWG24 default
+ * format until those are fixed.
+ */
+ ctx->lvds_format_24bpp = true;
+ ctx->lvds_format_jeida = false;
+ dev_warn(ctx->dev,
+ "Unsupported LVDS bus format 0x%04x, please check output bridge driver. Falling back to SPWG24.\n",
+ connector->display_info.bus_formats[0]);
+ break;
+ }
+
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &input_bus_format, 1);
+ }
+
+ return true;
+}
+
+static const struct drm_bridge_funcs sn65dsi83_funcs = {
+ .attach = sn65dsi83_attach,
+ .pre_enable = sn65dsi83_pre_enable,
+ .enable = sn65dsi83_enable,
+ .disable = sn65dsi83_disable,
+ .post_disable = sn65dsi83_post_disable,
+ .mode_valid = sn65dsi83_mode_valid,
+ .mode_set = sn65dsi83_mode_set,
+ .mode_fixup = sn65dsi83_mode_fixup,
+};
+
+static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
+{
+ struct drm_bridge *panel_bridge;
+ struct device *dev = ctx->dev;
+ struct device_node *endpoint;
+ struct drm_panel *panel;
+ int ret;
+
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
+ ctx->dsi_lanes = of_property_count_u32_elems(endpoint, "data-lanes");
+ ctx->host_node = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
+
+ if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4)
+ return -EINVAL;
+ if (!ctx->host_node)
+ return -ENODEV;
+
+ ctx->lvds_dual_link = false;
+ ctx->lvds_dual_link_even_odd_swap = false;
+ if (model != MODEL_SN65DSI83) {
+ struct device_node *port2, *port3;
+ int dual_link;
+
+ port2 = of_graph_get_port_by_id(dev->of_node, 2);
+ port3 = of_graph_get_port_by_id(dev->of_node, 3);
+ dual_link = drm_of_lvds_get_dual_link_pixel_order(port2, port3);
+ of_node_put(port2);
+ of_node_put(port3);
+
+ if (dual_link == DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS) {
+ ctx->lvds_dual_link = true;
+ /* Odd pixels to LVDS Channel A, even pixels to B */
+ ctx->lvds_dual_link_even_odd_swap = false;
+ } else if (dual_link == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS) {
+ ctx->lvds_dual_link = true;
+ /* Even pixels to LVDS Channel A, odd pixels to B */
+ ctx->lvds_dual_link_even_odd_swap = true;
+ }
+ }
+
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, &panel_bridge);
+ if (ret < 0)
+ return ret;
+ if (panel) {
+ panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(panel_bridge))
+ return PTR_ERR(panel_bridge);
+ }
+
+ ctx->panel_bridge = panel_bridge;
+
+ return 0;
+}
+
+static int sn65dsi83_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ enum sn65dsi83_model model;
+ struct sn65dsi83 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+
+ if (dev->of_node) {
+ model = (enum sn65dsi83_model)(uintptr_t)
+ of_device_get_match_data(dev);
+ } else {
+ model = id->driver_data;
+ }
+
+ ctx->enable_gpio = devm_gpiod_get(ctx->dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->enable_gpio))
+ return PTR_ERR(ctx->enable_gpio);
+
+ ret = sn65dsi83_parse_dt(ctx, model);
+ if (ret)
+ return ret;
+
+ ctx->regmap = devm_regmap_init_i2c(client, &sn65dsi83_regmap_config);
+ if (IS_ERR(ctx->regmap))
+ return PTR_ERR(ctx->regmap);
+
+ dev_set_drvdata(dev, ctx);
+ i2c_set_clientdata(client, ctx);
+
+ ctx->bridge.funcs = &sn65dsi83_funcs;
+ ctx->bridge.of_node = dev->of_node;
+ drm_bridge_add(&ctx->bridge);
+
+ return 0;
+}
+
+static int sn65dsi83_remove(struct i2c_client *client)
+{
+ struct sn65dsi83 *ctx = i2c_get_clientdata(client);
+
+ mipi_dsi_detach(ctx->dsi);
+ mipi_dsi_device_unregister(ctx->dsi);
+ drm_bridge_remove(&ctx->bridge);
+ of_node_put(ctx->host_node);
+
+ return 0;
+}
+
+static struct i2c_device_id sn65dsi83_id[] = {
+ { "ti,sn65dsi83", MODEL_SN65DSI83 },
+ { "ti,sn65dsi84", MODEL_SN65DSI84 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, sn65dsi83_id);
+
+static const struct of_device_id sn65dsi83_match_table[] = {
+ { .compatible = "ti,sn65dsi83", .data = (void *)MODEL_SN65DSI83 },
+ { .compatible = "ti,sn65dsi84", .data = (void *)MODEL_SN65DSI84 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sn65dsi83_match_table);
+
+static struct i2c_driver sn65dsi83_driver = {
+ .probe = sn65dsi83_probe,
+ .remove = sn65dsi83_remove,
+ .id_table = sn65dsi83_id,
+ .driver = {
+ .name = "sn65dsi83",
+ .of_match_table = sn65dsi83_match_table,
+ },
+};
+module_i2c_driver(sn65dsi83_driver);
+
+MODULE_AUTHOR("Marek Vasut <[email protected]>");
+MODULE_DESCRIPTION("TI SN65DSI83 DSI to LVDS bridge driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index bb0a0e1c6341..45a2969afb2b 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1042,7 +1042,9 @@ exit:
pm_runtime_mark_last_busy(pdata->dev);
pm_runtime_put_autosuspend(pdata->dev);
- return ret ? ret : len;
+ if (ret)
+ return ret;
+ return len;
}
static int ti_sn_bridge_parse_dsi_host(struct ti_sn65dsi86 *pdata)
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 4805726b34ac..ae8e4d76209c 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -685,6 +685,7 @@ static void drm_cleanup_buf_error(struct drm_device *dev,
dmah->size,
dmah->vaddr,
dmah->busaddr);
+ kfree(dmah);
}
}
kfree(entry->seglist);
@@ -984,8 +985,16 @@ int drm_legacy_addbufs_pci(struct drm_device *dev,
while (entry->buf_count < count) {
dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
- if (!dmah)
+ if (!dmah) {
+ /* Set count correctly so we free the proper amount. */
+ entry->buf_count = count;
+ entry->seg_count = count;
+ drm_cleanup_buf_error(dev, entry);
+ kfree(temp_pagelist);
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
return -ENOMEM;
+ }
dmah->size = total;
dmah->vaddr = dma_alloc_coherent(dev->dev,
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 79a50ef1250f..546599f19a93 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -28,6 +28,7 @@
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
+#include <linux/dma-buf-map.h>
#include <linux/export.h>
#include <linux/highmem.h>
#include <linux/mem_encrypt.h>
@@ -35,6 +36,9 @@
#include <drm/drm_cache.h>
+/* A small bounce buffer that fits on the stack. */
+#define MEMCPY_BOUNCE_SIZE 128
+
#if defined(CONFIG_X86)
#include <asm/smp.h>
@@ -209,3 +213,147 @@ bool drm_need_swiotlb(int dma_bits)
return max_iomem > ((u64)1 << dma_bits);
}
EXPORT_SYMBOL(drm_need_swiotlb);
+
+static void memcpy_fallback(struct dma_buf_map *dst,
+ const struct dma_buf_map *src,
+ unsigned long len)
+{
+ if (!dst->is_iomem && !src->is_iomem) {
+ memcpy(dst->vaddr, src->vaddr, len);
+ } else if (!src->is_iomem) {
+ dma_buf_map_memcpy_to(dst, src->vaddr, len);
+ } else if (!dst->is_iomem) {
+ memcpy_fromio(dst->vaddr, src->vaddr_iomem, len);
+ } else {
+ /*
+ * Bounce size is not performance tuned, but using a
+ * bounce buffer like this is significantly faster than
+ * resorting to ioreadxx() + iowritexx().
+ */
+ char bounce[MEMCPY_BOUNCE_SIZE];
+ void __iomem *_src = src->vaddr_iomem;
+ void __iomem *_dst = dst->vaddr_iomem;
+
+ while (len >= MEMCPY_BOUNCE_SIZE) {
+ memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
+ memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
+ _src += MEMCPY_BOUNCE_SIZE;
+ _dst += MEMCPY_BOUNCE_SIZE;
+ len -= MEMCPY_BOUNCE_SIZE;
+ }
+ if (len) {
+ memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
+ memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
+ }
+ }
+}
+
+#ifdef CONFIG_X86
+
+static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
+
+static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
+{
+ kernel_fpu_begin();
+
+ while (len >= 4) {
+ asm("movntdqa (%0), %%xmm0\n"
+ "movntdqa 16(%0), %%xmm1\n"
+ "movntdqa 32(%0), %%xmm2\n"
+ "movntdqa 48(%0), %%xmm3\n"
+ "movaps %%xmm0, (%1)\n"
+ "movaps %%xmm1, 16(%1)\n"
+ "movaps %%xmm2, 32(%1)\n"
+ "movaps %%xmm3, 48(%1)\n"
+ :: "r" (src), "r" (dst) : "memory");
+ src += 64;
+ dst += 64;
+ len -= 4;
+ }
+ while (len--) {
+ asm("movntdqa (%0), %%xmm0\n"
+ "movaps %%xmm0, (%1)\n"
+ :: "r" (src), "r" (dst) : "memory");
+ src += 16;
+ dst += 16;
+ }
+
+ kernel_fpu_end();
+}
+
+/*
+ * __drm_memcpy_from_wc copies @len bytes from @src to @dst using
+ * non-temporal instructions where available. Note that all arguments
+ * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple
+ * of 16.
+ */
+static void __drm_memcpy_from_wc(void *dst, const void *src, unsigned long len)
+{
+ if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
+ memcpy(dst, src, len);
+ else if (likely(len))
+ __memcpy_ntdqa(dst, src, len >> 4);
+}
+
+/**
+ * drm_memcpy_from_wc - Perform the fastest available memcpy from a source
+ * that may be WC.
+ * @dst: The destination pointer
+ * @src: The source pointer
+ * @len: The size of the area o transfer in bytes
+ *
+ * Tries an arch optimized memcpy for prefetching reading out of a WC region,
+ * and if no such beast is available, falls back to a normal memcpy.
+ */
+void drm_memcpy_from_wc(struct dma_buf_map *dst,
+ const struct dma_buf_map *src,
+ unsigned long len)
+{
+ if (WARN_ON(in_interrupt())) {
+ memcpy_fallback(dst, src, len);
+ return;
+ }
+
+ if (static_branch_likely(&has_movntdqa)) {
+ __drm_memcpy_from_wc(dst->is_iomem ?
+ (void __force *)dst->vaddr_iomem :
+ dst->vaddr,
+ src->is_iomem ?
+ (void const __force *)src->vaddr_iomem :
+ src->vaddr,
+ len);
+ return;
+ }
+
+ memcpy_fallback(dst, src, len);
+}
+EXPORT_SYMBOL(drm_memcpy_from_wc);
+
+/*
+ * drm_memcpy_init_early - One time initialization of the WC memcpy code
+ */
+void drm_memcpy_init_early(void)
+{
+ /*
+ * Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
+ * emulation. So don't enable movntdqa in hypervisor guest.
+ */
+ if (static_cpu_has(X86_FEATURE_XMM4_1) &&
+ !boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ static_branch_enable(&has_movntdqa);
+}
+#else
+void drm_memcpy_from_wc(struct dma_buf_map *dst,
+ const struct dma_buf_map *src,
+ unsigned long len)
+{
+ WARN_ON(in_interrupt());
+
+ memcpy_fallback(dst, src, len);
+}
+EXPORT_SYMBOL(drm_memcpy_from_wc);
+
+void drm_memcpy_init_early(void)
+{
+}
+#endif /* CONFIG_X86 */
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 54604633e65c..32b7f8983b94 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -3722,9 +3722,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
}
lane_count = min_t(int, mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, mgr->max_lane_count);
- link_rate = min_t(int, mgr->dpcd[1], mgr->max_link_rate);
+ link_rate = min_t(int, drm_dp_bw_code_to_link_rate(mgr->dpcd[1]), mgr->max_link_rate);
mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr,
- drm_dp_bw_code_to_link_rate(link_rate),
+ link_rate,
lane_count);
if (mgr->pbn_div == 0) {
ret = -EINVAL;
@@ -5454,7 +5454,7 @@ EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
* @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
* @max_payloads: maximum number of payloads this GPU can source
* @max_lane_count: maximum number of lanes this GPU supports
- * @max_link_rate: maximum link rate this GPU supports, units as in DPCD
+ * @max_link_rate: maximum link rate per lane this GPU supports in kHz
* @conn_base_id: the connector object ID the MST device is connected to.
*
* Return 0 for success, or negative error code on failure
@@ -5462,7 +5462,7 @@ EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
struct drm_device *dev, struct drm_dp_aux *aux,
int max_dpcd_transaction_bytes, int max_payloads,
- u8 max_lane_count, u8 max_link_rate,
+ int max_lane_count, int max_link_rate,
int conn_base_id)
{
struct drm_dp_mst_topology_state *mst_state;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 3d8d68a98b95..8804ec7d3215 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/srcu.h>
+#include <drm/drm_cache.h>
#include <drm/drm_client.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_drv.h>
@@ -1041,6 +1042,7 @@ static int __init drm_core_init(void)
drm_connector_ida_init();
idr_init(&drm_minors_idr);
+ drm_memcpy_init_early();
ret = drm_sysfs_init();
if (ret < 0) {
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index cb2349ad338d..69c57273b184 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -9,12 +9,14 @@
* Copyright (C) 2012 Red Hat
*/
+#include <drm/drm_damage_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
/**
@@ -97,3 +99,47 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
return paddr;
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr);
+
+/**
+ * drm_fb_cma_sync_non_coherent - Sync GEM object to non-coherent backing
+ * memory
+ * @drm: DRM device
+ * @old_state: Old plane state
+ * @state: New plane state
+ *
+ * This function can be used by drivers that use damage clips and have
+ * CMA GEM objects backed by non-coherent memory. Calling this function
+ * in a plane's .atomic_update ensures that all the data in the backing
+ * memory have been written to RAM.
+ */
+void drm_fb_cma_sync_non_coherent(struct drm_device *drm,
+ struct drm_plane_state *old_state,
+ struct drm_plane_state *state)
+{
+ const struct drm_format_info *finfo = state->fb->format;
+ struct drm_atomic_helper_damage_iter iter;
+ const struct drm_gem_cma_object *cma_obj;
+ unsigned int offset, i;
+ struct drm_rect clip;
+ dma_addr_t daddr;
+ size_t nb_bytes;
+
+ for (i = 0; i < finfo->num_planes; i++) {
+ cma_obj = drm_fb_cma_get_gem_obj(state->fb, i);
+ if (!cma_obj->map_noncoherent)
+ continue;
+
+ daddr = drm_fb_cma_get_gem_addr(state->fb, state, i);
+ drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ /* Ignore x1/x2 values, invalidate complete lines */
+ offset = clip.y1 * state->fb->pitches[i];
+
+ nb_bytes = (clip.y2 - clip.y1) * state->fb->pitches[i];
+ dma_sync_single_for_device(drm->dev, daddr + offset,
+ nb_bytes, DMA_TO_DEVICE);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_sync_non_coherent);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index f6baa2046124..d77a24507d30 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1737,7 +1737,7 @@ void drm_fb_helper_fill_info(struct fb_info *info,
sizes->fb_width, sizes->fb_height);
info->par = fb_helper;
- snprintf(info->fix.id, sizeof(info->fix.id), "%sdrmfb",
+ snprintf(info->fix.id, sizeof(info->fix.id), "%s",
fb_helper->dev->driver->name);
}
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 03262472059c..eda832f9200d 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -30,11 +30,6 @@
#include <drm/drm_device.h>
#include <drm/drm_fourcc.h>
-static char printable_char(int c)
-{
- return isascii(c) && isprint(c) ? c : '?';
-}
-
/**
* drm_mode_legacy_fb_format - compute drm fourcc code from legacy description
* @bpp: bits per pixels
@@ -130,26 +125,6 @@ uint32_t drm_driver_legacy_fb_format(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_driver_legacy_fb_format);
-/**
- * drm_get_format_name - fill a string with a drm fourcc format's name
- * @format: format to compute name of
- * @buf: caller-supplied buffer
- */
-const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf)
-{
- snprintf(buf->str, sizeof(buf->str),
- "%c%c%c%c %s-endian (0x%08x)",
- printable_char(format & 0xff),
- printable_char((format >> 8) & 0xff),
- printable_char((format >> 16) & 0xff),
- printable_char((format >> 24) & 0x7f),
- format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
- format);
-
- return buf->str;
-}
-EXPORT_SYMBOL(drm_get_format_name);
-
/*
* Internal function to query information for a given format. See
* drm_format_info() for the public API.
@@ -203,6 +178,10 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_AXBXGXRX106106106106, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_XRGB16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGB888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGR888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_XRGB8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 9989425e9875..d62fb1a3c916 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -770,8 +770,7 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
return -EINVAL;
}
- ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
- true, timeout);
+ ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout);
if (ret == 0)
ret = -ETIME;
else if (ret > 0)
@@ -1375,12 +1374,12 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
if (!write) {
struct dma_fence *fence =
- dma_resv_get_excl_rcu(obj->resv);
+ dma_resv_get_excl_unlocked(obj->resv);
return drm_gem_fence_array_add(fence_array, fence);
}
- ret = dma_resv_get_fences_rcu(obj->resv, NULL,
+ ret = dma_resv_get_fences(obj->resv, NULL,
&fence_count, &fences);
if (ret || !fence_count)
return ret;
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index a005c5a0ba46..a27135084ae5 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -147,7 +147,7 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
return 0;
obj = drm_gem_fb_get_obj(state->fb, 0);
- fence = dma_resv_get_excl_rcu(obj->resv);
+ fence = dma_resv_get_excl_unlocked(obj->resv);
drm_atomic_set_fence_for_plane(state, fence);
return 0;
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 7942cf05cd93..d53388199f34 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -46,6 +46,7 @@ static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
* __drm_gem_cma_create - Create a GEM CMA object without allocating memory
* @drm: DRM device
* @size: size of the object to allocate
+ * @private: true if used for internal purposes
*
* This function creates and initializes a GEM CMA object of the given size,
* but doesn't allocate any memory to back the object.
@@ -55,11 +56,11 @@ static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
* error code on failure.
*/
static struct drm_gem_cma_object *
-__drm_gem_cma_create(struct drm_device *drm, size_t size)
+__drm_gem_cma_create(struct drm_device *drm, size_t size, bool private)
{
struct drm_gem_cma_object *cma_obj;
struct drm_gem_object *gem_obj;
- int ret;
+ int ret = 0;
if (drm->driver->gem_create_object)
gem_obj = drm->driver->gem_create_object(drm, size);
@@ -73,7 +74,14 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size)
cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
- ret = drm_gem_object_init(drm, gem_obj, size);
+ if (private) {
+ drm_gem_private_object_init(drm, gem_obj, size);
+
+ /* Always use writecombine for dma-buf mappings */
+ cma_obj->map_noncoherent = false;
+ } else {
+ ret = drm_gem_object_init(drm, gem_obj, size);
+ }
if (ret)
goto error;
@@ -96,8 +104,7 @@ error:
* @size: size of the object to allocate
*
* This function creates a CMA GEM object and allocates a contiguous chunk of
- * memory as backing store. The backing memory has the writecombine attribute
- * set.
+ * memory as backing store.
*
* Returns:
* A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
@@ -111,12 +118,19 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
size = round_up(size, PAGE_SIZE);
- cma_obj = __drm_gem_cma_create(drm, size);
+ cma_obj = __drm_gem_cma_create(drm, size, false);
if (IS_ERR(cma_obj))
return cma_obj;
- cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
- GFP_KERNEL | __GFP_NOWARN);
+ if (cma_obj->map_noncoherent) {
+ cma_obj->vaddr = dma_alloc_noncoherent(drm->dev, size,
+ &cma_obj->paddr,
+ DMA_TO_DEVICE,
+ GFP_KERNEL | __GFP_NOWARN);
+ } else {
+ cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
+ GFP_KERNEL | __GFP_NOWARN);
+ }
if (!cma_obj->vaddr) {
drm_dbg(drm, "failed to allocate buffer with size %zu\n",
size);
@@ -432,7 +446,7 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
return ERR_PTR(-EINVAL);
/* Create a CMA GEM buffer. */
- cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
+ cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size, true);
if (IS_ERR(cma_obj))
return ERR_CAST(cma_obj);
@@ -499,8 +513,16 @@ int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
cma_obj = to_drm_gem_cma_obj(obj);
- ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
- cma_obj->paddr, vma->vm_end - vma->vm_start);
+ if (cma_obj->map_noncoherent) {
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+
+ ret = dma_mmap_pages(cma_obj->base.dev->dev,
+ vma, vma->vm_end - vma->vm_start,
+ virt_to_page(cma_obj->vaddr));
+ } else {
+ ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
+ cma_obj->paddr, vma->vm_end - vma->vm_start);
+ }
if (ret)
drm_gem_vm_close(vma);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 5ed2067cebb6..e2c68822e05c 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -149,8 +149,10 @@ int drm_gem_fb_init_with_funcs(struct drm_device *dev,
int ret, i;
info = drm_get_format_info(dev, mode_cmd);
- if (!info)
+ if (!info) {
+ drm_dbg_kms(dev, "Failed to get FB format info\n");
return -EINVAL;
+ }
for (i = 0; i < info->num_planes; i++) {
unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
@@ -169,6 +171,9 @@ int drm_gem_fb_init_with_funcs(struct drm_device *dev,
+ mode_cmd->offsets[i];
if (objs[i]->size < min_size) {
+ drm_dbg_kms(dev,
+ "GEM object size (%zu) smaller than minimum size (%u) for plane %d\n",
+ objs[i]->size, min_size, i);
drm_gem_object_put(objs[i]);
ret = -EINVAL;
goto err_gem_object_put;
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index b14bed8be771..ecf3d2a54a98 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -40,12 +40,12 @@ void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
const struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
drm_printf_indent(p, indent, "placement=");
- drm_print_bits(p, bo->mem.placement, plname, ARRAY_SIZE(plname));
+ drm_print_bits(p, bo->resource->placement, plname, ARRAY_SIZE(plname));
drm_printf(p, "\n");
- if (bo->mem.bus.is_iomem)
+ if (bo->resource->bus.is_iomem)
drm_printf_indent(p, indent, "bus.offset=%lx\n",
- (unsigned long)bo->mem.bus.offset);
+ (unsigned long)bo->resource->bus.offset);
}
EXPORT_SYMBOL(drm_gem_ttm_print_info);
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 797200315854..2a1229b8364e 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -17,6 +17,8 @@
#include <drm/drm_prime.h>
#include <drm/drm_simple_kms_helper.h>
+#include <drm/ttm/ttm_range_manager.h>
+
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
/**
@@ -248,10 +250,11 @@ EXPORT_SYMBOL(drm_gem_vram_put);
static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
{
/* Keep TTM behavior for now, remove when drivers are audited */
- if (WARN_ON_ONCE(!gbo->bo.mem.mm_node))
+ if (WARN_ON_ONCE(!gbo->bo.resource ||
+ gbo->bo.resource->mem_type == TTM_PL_SYSTEM))
return 0;
- return gbo->bo.mem.start;
+ return gbo->bo.resource->start;
}
/**
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 1dcb5797a3bb..17f3548c8ed2 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -54,18 +54,10 @@ void drm_lastclose(struct drm_device *dev);
#ifdef CONFIG_PCI
/* drm_pci.c */
-int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
#else
-static inline int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return -EINVAL;
-}
-
static inline int drm_pci_set_busid(struct drm_device *dev,
struct drm_master *master)
{
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index b0856c139693..53d314103a37 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -577,7 +577,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_legacy_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_legacy_irq_by_busid,
+ DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, 0),
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index da4f085fc09e..00fb433bcef1 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -14,7 +14,6 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-#include "drm_legacy.h"
#define drm_for_each_lessee(lessee, lessor) \
list_for_each_entry((lessee), &(lessor)->lessees, lessee_list)
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index 7080d2538421..c9206840c87f 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -235,9 +235,17 @@ void drm_master_legacy_init(struct drm_master *master);
static inline void drm_master_legacy_init(struct drm_master *master) {}
#endif
+/* drm_pci.c */
#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_PCI)
+int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv);
void drm_legacy_pci_agp_destroy(struct drm_device *dev);
#else
+static inline int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -EINVAL;
+}
+
static inline void drm_legacy_pci_agp_destroy(struct drm_device *dev) {}
#endif
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 38c3cb72e7e6..39d35fc3a43b 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -72,7 +72,9 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
return 0;
}
-static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
+#ifdef CONFIG_DRM_LEGACY
+
+static int drm_legacy_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -115,11 +117,9 @@ int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EOPNOTSUPP;
- return drm_pci_irq_by_busid(dev, p);
+ return drm_legacy_pci_irq_by_busid(dev, p);
}
-#ifdef CONFIG_DRM_LEGACY
-
void drm_legacy_pci_agp_destroy(struct drm_device *dev)
{
if (dev->agp) {
@@ -165,9 +165,6 @@ static int drm_legacy_get_pci_dev(struct pci_dev *pdev,
dev->hose = pdev->sysdata;
#endif
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- pci_set_drvdata(pdev, dev);
-
drm_legacy_pci_agp_init(dev);
ret = drm_dev_register(dev, ent->driver_data);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index db69f19ab5bc..b8fa6ed3dd73 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -390,14 +390,12 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
}
if (op & ETNA_PREP_NOSYNC) {
- if (!dma_resv_test_signaled_rcu(obj->resv,
- write))
+ if (!dma_resv_test_signaled(obj->resv, write))
return -EBUSY;
} else {
unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
- ret = dma_resv_wait_timeout_rcu(obj->resv,
- write, true, remain);
+ ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
if (ret <= 0)
return ret == 0 ? -ETIMEDOUT : ret;
}
@@ -461,7 +459,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
off, etnaviv_obj->vaddr, obj->size);
rcu_read_lock();
- fobj = rcu_dereference(robj->fence);
+ fobj = dma_resv_shared_list(robj);
if (fobj) {
unsigned int i, shared_count = fobj->shared_count;
@@ -471,7 +469,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
}
}
- fence = rcu_dereference(robj->fence_excl);
+ fence = dma_resv_excl_fence(robj);
if (fence)
etnaviv_gem_describe_fence(fence, "Exclusive", m);
rcu_read_unlock();
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index d05c35994579..d53856d7a747 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -189,13 +189,13 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
continue;
if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
- ret = dma_resv_get_fences_rcu(robj, &bo->excl,
- &bo->nr_shared,
- &bo->shared);
+ ret = dma_resv_get_fences(robj, &bo->excl,
+ &bo->nr_shared,
+ &bo->shared);
if (ret)
return ret;
} else {
- bo->excl = dma_resv_get_excl_rcu(robj);
+ bo->excl = dma_resv_get_excl_unlocked(robj);
}
}
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index b9a4b7670a89..197b97341cad 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -815,10 +815,8 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctx->addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(ctx->addr)) {
- dev_err(dev, "ioremap failed\n");
+ if (IS_ERR(ctx->addr))
return PTR_ERR(ctx->addr);
- }
ret = decon_conf_irq(ctx, "vsync", decon_irq_handler, 0);
if (ret < 0)
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 431c5d32f9a4..9b5e6f94e558 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -344,7 +344,7 @@ static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win)
}
/**
- * shadow_protect_win() - disable updating values from shadow registers at vsync
+ * decon_shadow_protect_win() - disable updating values from shadow registers at vsync
*
* @ctx: display and enhancement controller context
* @win: window to protect registers for
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 44e402b7cdfb..2d2fe5ab26e7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1786,10 +1786,8 @@ static int exynos_dsi_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->reg_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(dsi->reg_base)) {
- dev_err(dev, "failed to remap io region\n");
+ if (IS_ERR(dsi->reg_base))
return PTR_ERR(dsi->reg_base);
- }
dsi->phy = devm_phy_get(dev, "dsim");
if (IS_ERR(dsi->phy)) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 49a2e0c53918..ae576122873e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -723,7 +723,7 @@ static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win)
}
/**
- * shadow_protect_win() - disable updating values from shadow registers at vsync
+ * fimd_shadow_protect_win() - disable updating values from shadow registers at vsync
*
* @ctx: local driver data
* @win: window to protect registers for
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 4f2b7551b251..9ae868935357 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -88,7 +88,7 @@ void exynos_drm_ipp_unregister(struct device *dev,
}
/**
- * exynos_drm_ipp_ioctl_get_res_ioctl - enumerate all ipp modules
+ * exynos_drm_ipp_get_res_ioctl - enumerate all ipp modules
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
@@ -136,7 +136,7 @@ static inline struct exynos_drm_ipp *__ipp_get(uint32_t id)
}
/**
- * exynos_drm_ipp_ioctl_get_caps - get ipp module capabilities and formats
+ * exynos_drm_ipp_get_caps_ioctl - get ipp module capabilities and formats
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 00e87c290796..9b565a057340 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -3,7 +3,7 @@
* DesignWare MIPI DSI Host Controller v1.02 driver
*
* Copyright (c) 2016 Linaro Limited.
- * Copyright (c) 2014-2016 Hisilicon Limited.
+ * Copyright (c) 2014-2016 HiSilicon Limited.
*
* Author:
* Xinliang Liu <[email protected]>
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_dsi_reg.h b/drivers/gpu/drm/hisilicon/kirin/dw_dsi_reg.h
index 19e81ff64fac..d79fc031e53d 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_dsi_reg.h
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_dsi_reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016 Linaro Limited.
- * Copyright (c) 2014-2016 Hisilicon Limited.
+ * Copyright (c) 2014-2016 HiSilicon Limited.
*/
#ifndef __DW_DSI_REG_H__
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
index e2ac09894a6d..be9e789c2d04 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016 Linaro Limited.
- * Copyright (c) 2014-2016 Hisilicon Limited.
+ * Copyright (c) 2014-2016 HiSilicon Limited.
*/
#ifndef __KIRIN_ADE_REG_H__
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 6dcf9ec05eec..1ab94620776f 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -3,7 +3,7 @@
* Hisilicon Hi6220 SoC ADE(Advanced Display Engine)'s crtc&plane driver
*
* Copyright (c) 2016 Linaro Limited.
- * Copyright (c) 2014-2016 Hisilicon Limited.
+ * Copyright (c) 2014-2016 HiSilicon Limited.
*
* Author:
* Xinliang Liu <[email protected]>
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 4349da3e2379..e590e19db657 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -3,7 +3,7 @@
* Hisilicon Kirin SoCs drm master driver
*
* Copyright (c) 2016 Linaro Limited.
- * Copyright (c) 2014-2016 Hisilicon Limited.
+ * Copyright (c) 2014-2016 HiSilicon Limited.
*
* Author:
* Xinliang Liu <[email protected]>
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
index 386d137f29e5..db0dc7bdf72c 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016 Linaro Limited.
- * Copyright (c) 2014-2016 Hisilicon Limited.
+ * Copyright (c) 2014-2016 HiSilicon Limited.
*/
#ifndef __KIRIN_DRM_DRV_H__
diff --git a/drivers/gpu/drm/hyperv/Makefile b/drivers/gpu/drm/hyperv/Makefile
new file mode 100644
index 000000000000..265f12f2c660
--- /dev/null
+++ b/drivers/gpu/drm/hyperv/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+hyperv_drm-y := \
+ hyperv_drm_drv.o \
+ hyperv_drm_modeset.o \
+ hyperv_drm_proto.o
+
+obj-$(CONFIG_DRM_HYPERV) += hyperv_drm.o
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm.h b/drivers/gpu/drm/hyperv/hyperv_drm.h
new file mode 100644
index 000000000000..886add4f9cd0
--- /dev/null
+++ b/drivers/gpu/drm/hyperv/hyperv_drm.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2021 Microsoft
+ */
+
+#ifndef _HYPERV_DRM_H_
+#define _HYPERV_DRM_H_
+
+#define VMBUS_MAX_PACKET_SIZE 0x4000
+
+struct hyperv_drm_device {
+ /* drm */
+ struct drm_device dev;
+ struct drm_simple_display_pipe pipe;
+ struct drm_connector connector;
+
+ /* mode */
+ u32 screen_width_max;
+ u32 screen_height_max;
+ u32 preferred_width;
+ u32 preferred_height;
+ u32 screen_depth;
+
+ /* hw */
+ struct resource *mem;
+ void __iomem *vram;
+ unsigned long fb_base;
+ unsigned long fb_size;
+ struct completion wait;
+ u32 synthvid_version;
+ u32 mmio_megabytes;
+ bool dirt_needed;
+
+ u8 init_buf[VMBUS_MAX_PACKET_SIZE];
+ u8 recv_buf[VMBUS_MAX_PACKET_SIZE];
+
+ struct hv_device *hdev;
+};
+
+#define to_hv(_dev) container_of(_dev, struct hyperv_drm_device, dev)
+
+/* hyperv_drm_modeset */
+int hyperv_mode_config_init(struct hyperv_drm_device *hv);
+
+/* hyperv_drm_proto */
+int hyperv_update_vram_location(struct hv_device *hdev, phys_addr_t vram_pp);
+int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
+ u32 w, u32 h, u32 pitch);
+int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect);
+int hyperv_connect_vsp(struct hv_device *hdev);
+
+#endif
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
new file mode 100644
index 000000000000..eb06c92c4bfd
--- /dev/null
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 Microsoft
+ */
+
+#include <linux/efi.h>
+#include <linux/hyperv.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <drm/drm_aperture.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "hyperv_drm.h"
+
+#define DRIVER_NAME "hyperv_drm"
+#define DRIVER_DESC "DRM driver for Hyper-V synthetic video device"
+#define DRIVER_DATE "2020"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+#define PCI_VENDOR_ID_MICROSOFT 0x1414
+#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
+
+DEFINE_DRM_GEM_FOPS(hv_fops);
+
+static struct drm_driver hyperv_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+
+ .fops = &hv_fops,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+};
+
+static int hyperv_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ return 0;
+}
+
+static void hyperv_pci_remove(struct pci_dev *pdev)
+{
+}
+
+static const struct pci_device_id hyperv_pci_tbl[] = {
+ {
+ .vendor = PCI_VENDOR_ID_MICROSOFT,
+ .device = PCI_DEVICE_ID_HYPERV_VIDEO,
+ },
+ { /* end of list */ }
+};
+
+/*
+ * PCI stub to support gen1 VM.
+ */
+static struct pci_driver hyperv_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = hyperv_pci_tbl,
+ .probe = hyperv_pci_probe,
+ .remove = hyperv_pci_remove,
+};
+
+static int hyperv_setup_gen1(struct hyperv_drm_device *hv)
+{
+ struct drm_device *dev = &hv->dev;
+ struct pci_dev *pdev;
+ int ret;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+ PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+ if (!pdev) {
+ drm_err(dev, "Unable to find PCI Hyper-V video\n");
+ return -ENODEV;
+ }
+
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "hypervdrmfb");
+ if (ret) {
+ drm_err(dev, "Not able to remove boot fb\n");
+ return ret;
+ }
+
+ if (pci_request_region(pdev, 0, DRIVER_NAME) != 0)
+ drm_warn(dev, "Cannot request framebuffer, boot fb still active?\n");
+
+ if ((pdev->resource[0].flags & IORESOURCE_MEM) == 0) {
+ drm_err(dev, "Resource at bar 0 is not IORESOURCE_MEM\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ hv->fb_base = pci_resource_start(pdev, 0);
+ hv->fb_size = pci_resource_len(pdev, 0);
+ if (!hv->fb_base) {
+ drm_err(dev, "Resource not available\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ hv->fb_size = min(hv->fb_size,
+ (unsigned long)(hv->mmio_megabytes * 1024 * 1024));
+ hv->vram = devm_ioremap(&pdev->dev, hv->fb_base, hv->fb_size);
+ if (!hv->vram) {
+ drm_err(dev, "Failed to map vram\n");
+ ret = -ENOMEM;
+ }
+
+error:
+ pci_dev_put(pdev);
+ return ret;
+}
+
+static int hyperv_setup_gen2(struct hyperv_drm_device *hv,
+ struct hv_device *hdev)
+{
+ struct drm_device *dev = &hv->dev;
+ int ret;
+
+ drm_aperture_remove_conflicting_framebuffers(screen_info.lfb_base,
+ screen_info.lfb_size,
+ false,
+ "hypervdrmfb");
+
+ hv->fb_size = (unsigned long)hv->mmio_megabytes * 1024 * 1024;
+
+ ret = vmbus_allocate_mmio(&hv->mem, hdev, 0, -1, hv->fb_size, 0x100000,
+ true);
+ if (ret) {
+ drm_err(dev, "Failed to allocate mmio\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Map the VRAM cacheable for performance. This is also required for VM
+ * connect to display properly for ARM64 Linux VM, as the host also maps
+ * the VRAM cacheable.
+ */
+ hv->vram = ioremap_cache(hv->mem->start, hv->fb_size);
+ if (!hv->vram) {
+ drm_err(dev, "Failed to map vram\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ hv->fb_base = hv->mem->start;
+ return 0;
+
+error:
+ vmbus_free_mmio(hv->mem->start, hv->fb_size);
+ return ret;
+}
+
+static int hyperv_vmbus_probe(struct hv_device *hdev,
+ const struct hv_vmbus_device_id *dev_id)
+{
+ struct hyperv_drm_device *hv;
+ struct drm_device *dev;
+ int ret;
+
+ hv = devm_drm_dev_alloc(&hdev->device, &hyperv_driver,
+ struct hyperv_drm_device, dev);
+ if (IS_ERR(hv))
+ return PTR_ERR(hv);
+
+ dev = &hv->dev;
+ init_completion(&hv->wait);
+ hv_set_drvdata(hdev, hv);
+ hv->hdev = hdev;
+
+ ret = hyperv_connect_vsp(hdev);
+ if (ret) {
+ drm_err(dev, "Failed to connect to vmbus.\n");
+ goto err_hv_set_drv_data;
+ }
+
+ if (efi_enabled(EFI_BOOT))
+ ret = hyperv_setup_gen2(hv, hdev);
+ else
+ ret = hyperv_setup_gen1(hv);
+
+ if (ret)
+ goto err_vmbus_close;
+
+ /*
+ * Should be done only once during init and resume. Failing to update
+ * vram location is not fatal. Device will update dirty area till
+ * preferred resolution only.
+ */
+ ret = hyperv_update_vram_location(hdev, hv->fb_base);
+ if (ret)
+ drm_warn(dev, "Failed to update vram location.\n");
+
+ hv->dirt_needed = true;
+
+ ret = hyperv_mode_config_init(hv);
+ if (ret)
+ goto err_vmbus_close;
+
+ ret = drm_dev_register(dev, 0);
+ if (ret) {
+ drm_err(dev, "Failed to register drm driver.\n");
+ goto err_vmbus_close;
+ }
+
+ drm_fbdev_generic_setup(dev, 0);
+
+ return 0;
+
+err_vmbus_close:
+ vmbus_close(hdev->channel);
+err_hv_set_drv_data:
+ hv_set_drvdata(hdev, NULL);
+ return ret;
+}
+
+static int hyperv_vmbus_remove(struct hv_device *hdev)
+{
+ struct drm_device *dev = hv_get_drvdata(hdev);
+ struct hyperv_drm_device *hv = to_hv(dev);
+
+ drm_dev_unplug(dev);
+ drm_atomic_helper_shutdown(dev);
+ vmbus_close(hdev->channel);
+ hv_set_drvdata(hdev, NULL);
+ vmbus_free_mmio(hv->mem->start, hv->fb_size);
+
+ return 0;
+}
+
+static int hyperv_vmbus_suspend(struct hv_device *hdev)
+{
+ struct drm_device *dev = hv_get_drvdata(hdev);
+ int ret;
+
+ ret = drm_mode_config_helper_suspend(dev);
+ if (ret)
+ return ret;
+
+ vmbus_close(hdev->channel);
+
+ return 0;
+}
+
+static int hyperv_vmbus_resume(struct hv_device *hdev)
+{
+ struct drm_device *dev = hv_get_drvdata(hdev);
+ struct hyperv_drm_device *hv = to_hv(dev);
+ int ret;
+
+ ret = hyperv_connect_vsp(hdev);
+ if (ret)
+ return ret;
+
+ ret = hyperv_update_vram_location(hdev, hv->fb_base);
+ if (ret)
+ return ret;
+
+ return drm_mode_config_helper_resume(dev);
+}
+
+static const struct hv_vmbus_device_id hyperv_vmbus_tbl[] = {
+ /* Synthetic Video Device GUID */
+ {HV_SYNTHVID_GUID},
+ {}
+};
+
+static struct hv_driver hyperv_hv_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = hyperv_vmbus_tbl,
+ .probe = hyperv_vmbus_probe,
+ .remove = hyperv_vmbus_remove,
+ .suspend = hyperv_vmbus_suspend,
+ .resume = hyperv_vmbus_resume,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+static int __init hyperv_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&hyperv_pci_driver);
+ if (ret != 0)
+ return ret;
+
+ return vmbus_driver_register(&hyperv_hv_driver);
+}
+
+static void __exit hyperv_exit(void)
+{
+ vmbus_driver_unregister(&hyperv_hv_driver);
+ pci_unregister_driver(&hyperv_pci_driver);
+}
+
+module_init(hyperv_init);
+module_exit(hyperv_exit);
+
+MODULE_DEVICE_TABLE(pci, hyperv_pci_tbl);
+MODULE_DEVICE_TABLE(vmbus, hyperv_vmbus_tbl);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Deepak Rawat <[email protected]>");
+MODULE_DESCRIPTION("DRM driver for Hyper-V synthetic video device");
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
new file mode 100644
index 000000000000..02718e3e859e
--- /dev/null
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 Microsoft
+ */
+
+#include <linux/hyperv.h>
+
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "hyperv_drm.h"
+
+static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb,
+ const struct dma_buf_map *map,
+ struct drm_rect *rect)
+{
+ struct hyperv_drm_device *hv = to_hv(fb->dev);
+ void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
+ int idx;
+
+ if (!drm_dev_enter(&hv->dev, &idx))
+ return -ENODEV;
+
+ drm_fb_memcpy_dstclip(hv->vram, fb->pitches[0], vmap, fb, rect);
+ drm_dev_exit(idx);
+
+ return 0;
+}
+
+static int hyperv_blit_to_vram_fullscreen(struct drm_framebuffer *fb, const struct dma_buf_map *map)
+{
+ struct drm_rect fullscreen = {
+ .x1 = 0,
+ .x2 = fb->width,
+ .y1 = 0,
+ .y2 = fb->height,
+ };
+ return hyperv_blit_to_vram_rect(fb, map, &fullscreen);
+}
+
+static int hyperv_connector_get_modes(struct drm_connector *connector)
+{
+ struct hyperv_drm_device *hv = to_hv(connector->dev);
+ int count;
+
+ count = drm_add_modes_noedid(connector,
+ connector->dev->mode_config.max_width,
+ connector->dev->mode_config.max_height);
+ drm_set_preferred_mode(connector, hv->preferred_width,
+ hv->preferred_height);
+
+ return count;
+}
+
+static const struct drm_connector_helper_funcs hyperv_connector_helper_funcs = {
+ .get_modes = hyperv_connector_get_modes,
+};
+
+static const struct drm_connector_funcs hyperv_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static inline int hyperv_conn_init(struct hyperv_drm_device *hv)
+{
+ drm_connector_helper_add(&hv->connector, &hyperv_connector_helper_funcs);
+ return drm_connector_init(&hv->dev, &hv->connector,
+ &hyperv_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+}
+
+static int hyperv_check_size(struct hyperv_drm_device *hv, int w, int h,
+ struct drm_framebuffer *fb)
+{
+ u32 pitch = w * (hv->screen_depth / 8);
+
+ if (fb)
+ pitch = fb->pitches[0];
+
+ if (pitch * h > hv->fb_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void hyperv_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+
+ hyperv_update_situation(hv->hdev, 1, hv->screen_depth,
+ crtc_state->mode.hdisplay,
+ crtc_state->mode.vdisplay,
+ plane_state->fb->pitches[0]);
+ hyperv_blit_to_vram_fullscreen(plane_state->fb, &shadow_plane_state->map[0]);
+}
+
+static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
+ struct drm_framebuffer *fb = plane_state->fb;
+
+ if (fb->format->format != DRM_FORMAT_XRGB8888)
+ return -EINVAL;
+
+ if (fb->pitches[0] * fb->height > hv->fb_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void hyperv_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
+{
+ struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
+ struct drm_plane_state *state = pipe->plane.state;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
+ struct drm_rect rect;
+
+ if (drm_atomic_helper_damage_merged(old_state, state, &rect)) {
+ hyperv_blit_to_vram_rect(state->fb, &shadow_plane_state->map[0], &rect);
+ hyperv_update_dirt(hv->hdev, &rect);
+ }
+}
+
+static const struct drm_simple_display_pipe_funcs hyperv_pipe_funcs = {
+ .enable = hyperv_pipe_enable,
+ .check = hyperv_pipe_check,
+ .update = hyperv_pipe_update,
+ DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
+};
+
+static const uint32_t hyperv_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static const uint64_t hyperv_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static inline int hyperv_pipe_init(struct hyperv_drm_device *hv)
+{
+ int ret;
+
+ ret = drm_simple_display_pipe_init(&hv->dev,
+ &hv->pipe,
+ &hyperv_pipe_funcs,
+ hyperv_formats,
+ ARRAY_SIZE(hyperv_formats),
+ NULL,
+ &hv->connector);
+ if (ret)
+ return ret;
+
+ drm_plane_enable_fb_damage_clips(&hv->pipe.plane);
+
+ return 0;
+}
+
+static enum drm_mode_status
+hyperv_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode)
+{
+ struct hyperv_drm_device *hv = to_hv(dev);
+
+ if (hyperv_check_size(hv, mode->hdisplay, mode->vdisplay, NULL))
+ return MODE_BAD;
+
+ return MODE_OK;
+}
+
+static const struct drm_mode_config_funcs hyperv_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .mode_valid = hyperv_mode_valid,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+int hyperv_mode_config_init(struct hyperv_drm_device *hv)
+{
+ struct drm_device *dev = &hv->dev;
+ int ret;
+
+ ret = drmm_mode_config_init(dev);
+ if (ret) {
+ drm_err(dev, "Failed to initialized mode setting.\n");
+ return ret;
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = hv->screen_width_max;
+ dev->mode_config.max_height = hv->screen_height_max;
+
+ dev->mode_config.preferred_depth = hv->screen_depth;
+ dev->mode_config.prefer_shadow = 0;
+
+ dev->mode_config.funcs = &hyperv_mode_config_funcs;
+
+ ret = hyperv_conn_init(hv);
+ if (ret) {
+ drm_err(dev, "Failed to initialized connector.\n");
+ return ret;
+ }
+
+ ret = hyperv_pipe_init(hv);
+ if (ret) {
+ drm_err(dev, "Failed to initialized pipe.\n");
+ return ret;
+ }
+
+ drm_mode_config_reset(dev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
new file mode 100644
index 000000000000..6d4bdccfbd1a
--- /dev/null
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 Microsoft
+ *
+ * Portions of this code is derived from hyperv_fb.c
+ */
+
+#include <linux/hyperv.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "hyperv_drm.h"
+
+#define VMBUS_RING_BUFSIZE (256 * 1024)
+#define VMBUS_VSP_TIMEOUT (10 * HZ)
+
+#define SYNTHVID_VERSION(major, minor) ((minor) << 16 | (major))
+#define SYNTHVID_VER_GET_MAJOR(ver) (ver & 0x0000ffff)
+#define SYNTHVID_VER_GET_MINOR(ver) ((ver & 0xffff0000) >> 16)
+#define SYNTHVID_VERSION_WIN7 SYNTHVID_VERSION(3, 0)
+#define SYNTHVID_VERSION_WIN8 SYNTHVID_VERSION(3, 2)
+#define SYNTHVID_VERSION_WIN10 SYNTHVID_VERSION(3, 5)
+
+#define SYNTHVID_DEPTH_WIN7 16
+#define SYNTHVID_DEPTH_WIN8 32
+#define SYNTHVID_FB_SIZE_WIN7 (4 * 1024 * 1024)
+#define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024)
+#define SYNTHVID_WIDTH_MAX_WIN7 1600
+#define SYNTHVID_HEIGHT_MAX_WIN7 1200
+
+enum pipe_msg_type {
+ PIPE_MSG_INVALID,
+ PIPE_MSG_DATA,
+ PIPE_MSG_MAX
+};
+
+enum synthvid_msg_type {
+ SYNTHVID_ERROR = 0,
+ SYNTHVID_VERSION_REQUEST = 1,
+ SYNTHVID_VERSION_RESPONSE = 2,
+ SYNTHVID_VRAM_LOCATION = 3,
+ SYNTHVID_VRAM_LOCATION_ACK = 4,
+ SYNTHVID_SITUATION_UPDATE = 5,
+ SYNTHVID_SITUATION_UPDATE_ACK = 6,
+ SYNTHVID_POINTER_POSITION = 7,
+ SYNTHVID_POINTER_SHAPE = 8,
+ SYNTHVID_FEATURE_CHANGE = 9,
+ SYNTHVID_DIRT = 10,
+ SYNTHVID_RESOLUTION_REQUEST = 13,
+ SYNTHVID_RESOLUTION_RESPONSE = 14,
+
+ SYNTHVID_MAX = 15
+};
+
+struct pipe_msg_hdr {
+ u32 type;
+ u32 size; /* size of message after this field */
+} __packed;
+
+struct hvd_screen_info {
+ u16 width;
+ u16 height;
+} __packed;
+
+struct synthvid_msg_hdr {
+ u32 type;
+ u32 size; /* size of this header + payload after this field */
+} __packed;
+
+struct synthvid_version_req {
+ u32 version;
+} __packed;
+
+struct synthvid_version_resp {
+ u32 version;
+ u8 is_accepted;
+ u8 max_video_outputs;
+} __packed;
+
+struct synthvid_vram_location {
+ u64 user_ctx;
+ u8 is_vram_gpa_specified;
+ u64 vram_gpa;
+} __packed;
+
+struct synthvid_vram_location_ack {
+ u64 user_ctx;
+} __packed;
+
+struct video_output_situation {
+ u8 active;
+ u32 vram_offset;
+ u8 depth_bits;
+ u32 width_pixels;
+ u32 height_pixels;
+ u32 pitch_bytes;
+} __packed;
+
+struct synthvid_situation_update {
+ u64 user_ctx;
+ u8 video_output_count;
+ struct video_output_situation video_output[1];
+} __packed;
+
+struct synthvid_situation_update_ack {
+ u64 user_ctx;
+} __packed;
+
+struct synthvid_pointer_position {
+ u8 is_visible;
+ u8 video_output;
+ s32 image_x;
+ s32 image_y;
+} __packed;
+
+#define SYNTHVID_CURSOR_MAX_X 96
+#define SYNTHVID_CURSOR_MAX_Y 96
+#define SYNTHVID_CURSOR_ARGB_PIXEL_SIZE 4
+#define SYNTHVID_CURSOR_MAX_SIZE (SYNTHVID_CURSOR_MAX_X * \
+ SYNTHVID_CURSOR_MAX_Y * SYNTHVID_CURSOR_ARGB_PIXEL_SIZE)
+#define SYNTHVID_CURSOR_COMPLETE (-1)
+
+struct synthvid_pointer_shape {
+ u8 part_idx;
+ u8 is_argb;
+ u32 width; /* SYNTHVID_CURSOR_MAX_X at most */
+ u32 height; /* SYNTHVID_CURSOR_MAX_Y at most */
+ u32 hot_x; /* hotspot relative to upper-left of pointer image */
+ u32 hot_y;
+ u8 data[4];
+} __packed;
+
+struct synthvid_feature_change {
+ u8 is_dirt_needed;
+ u8 is_ptr_pos_needed;
+ u8 is_ptr_shape_needed;
+ u8 is_situ_needed;
+} __packed;
+
+struct rect {
+ s32 x1, y1; /* top left corner */
+ s32 x2, y2; /* bottom right corner, exclusive */
+} __packed;
+
+struct synthvid_dirt {
+ u8 video_output;
+ u8 dirt_count;
+ struct rect rect[1];
+} __packed;
+
+#define SYNTHVID_EDID_BLOCK_SIZE 128
+#define SYNTHVID_MAX_RESOLUTION_COUNT 64
+
+struct synthvid_supported_resolution_req {
+ u8 maximum_resolution_count;
+} __packed;
+
+struct synthvid_supported_resolution_resp {
+ u8 edid_block[SYNTHVID_EDID_BLOCK_SIZE];
+ u8 resolution_count;
+ u8 default_resolution_index;
+ u8 is_standard;
+ struct hvd_screen_info supported_resolution[SYNTHVID_MAX_RESOLUTION_COUNT];
+} __packed;
+
+struct synthvid_msg {
+ struct pipe_msg_hdr pipe_hdr;
+ struct synthvid_msg_hdr vid_hdr;
+ union {
+ struct synthvid_version_req ver_req;
+ struct synthvid_version_resp ver_resp;
+ struct synthvid_vram_location vram;
+ struct synthvid_vram_location_ack vram_ack;
+ struct synthvid_situation_update situ;
+ struct synthvid_situation_update_ack situ_ack;
+ struct synthvid_pointer_position ptr_pos;
+ struct synthvid_pointer_shape ptr_shape;
+ struct synthvid_feature_change feature_chg;
+ struct synthvid_dirt dirt;
+ struct synthvid_supported_resolution_req resolution_req;
+ struct synthvid_supported_resolution_resp resolution_resp;
+ };
+} __packed;
+
+static inline bool hyperv_version_ge(u32 ver1, u32 ver2)
+{
+ if (SYNTHVID_VER_GET_MAJOR(ver1) > SYNTHVID_VER_GET_MAJOR(ver2) ||
+ (SYNTHVID_VER_GET_MAJOR(ver1) == SYNTHVID_VER_GET_MAJOR(ver2) &&
+ SYNTHVID_VER_GET_MINOR(ver1) >= SYNTHVID_VER_GET_MINOR(ver2)))
+ return true;
+
+ return false;
+}
+
+static inline int hyperv_sendpacket(struct hv_device *hdev, struct synthvid_msg *msg)
+{
+ static atomic64_t request_id = ATOMIC64_INIT(0);
+ struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+ int ret;
+
+ msg->pipe_hdr.type = PIPE_MSG_DATA;
+ msg->pipe_hdr.size = msg->vid_hdr.size;
+
+ ret = vmbus_sendpacket(hdev->channel, msg,
+ msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
+ atomic64_inc_return(&request_id),
+ VM_PKT_DATA_INBAND, 0);
+
+ if (ret)
+ drm_err(&hv->dev, "Unable to send packet via vmbus\n");
+
+ return ret;
+}
+
+static int hyperv_negotiate_version(struct hv_device *hdev, u32 ver)
+{
+ struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+ struct synthvid_msg *msg = (struct synthvid_msg *)hv->init_buf;
+ struct drm_device *dev = &hv->dev;
+ unsigned long t;
+
+ memset(msg, 0, sizeof(struct synthvid_msg));
+ msg->vid_hdr.type = SYNTHVID_VERSION_REQUEST;
+ msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+ sizeof(struct synthvid_version_req);
+ msg->ver_req.version = ver;
+ hyperv_sendpacket(hdev, msg);
+
+ t = wait_for_completion_timeout(&hv->wait, VMBUS_VSP_TIMEOUT);
+ if (!t) {
+ drm_err(dev, "Time out on waiting version response\n");
+ return -ETIMEDOUT;
+ }
+
+ if (!msg->ver_resp.is_accepted) {
+ drm_err(dev, "Version request not accepted\n");
+ return -ENODEV;
+ }
+
+ hv->synthvid_version = ver;
+ drm_info(dev, "Synthvid Version major %d, minor %d\n",
+ SYNTHVID_VER_GET_MAJOR(ver), SYNTHVID_VER_GET_MINOR(ver));
+
+ return 0;
+}
+
+int hyperv_update_vram_location(struct hv_device *hdev, phys_addr_t vram_pp)
+{
+ struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+ struct synthvid_msg *msg = (struct synthvid_msg *)hv->init_buf;
+ struct drm_device *dev = &hv->dev;
+ unsigned long t;
+
+ memset(msg, 0, sizeof(struct synthvid_msg));
+ msg->vid_hdr.type = SYNTHVID_VRAM_LOCATION;
+ msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+ sizeof(struct synthvid_vram_location);
+ msg->vram.user_ctx = vram_pp;
+ msg->vram.vram_gpa = vram_pp;
+ msg->vram.is_vram_gpa_specified = 1;
+ hyperv_sendpacket(hdev, msg);
+
+ t = wait_for_completion_timeout(&hv->wait, VMBUS_VSP_TIMEOUT);
+ if (!t) {
+ drm_err(dev, "Time out on waiting vram location ack\n");
+ return -ETIMEDOUT;
+ }
+ if (msg->vram_ack.user_ctx != vram_pp) {
+ drm_err(dev, "Unable to set VRAM location\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp,
+ u32 w, u32 h, u32 pitch)
+{
+ struct synthvid_msg msg;
+
+ memset(&msg, 0, sizeof(struct synthvid_msg));
+
+ msg.vid_hdr.type = SYNTHVID_SITUATION_UPDATE;
+ msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+ sizeof(struct synthvid_situation_update);
+ msg.situ.user_ctx = 0;
+ msg.situ.video_output_count = 1;
+ msg.situ.video_output[0].active = active;
+ /* vram_offset should always be 0 */
+ msg.situ.video_output[0].vram_offset = 0;
+ msg.situ.video_output[0].depth_bits = bpp;
+ msg.situ.video_output[0].width_pixels = w;
+ msg.situ.video_output[0].height_pixels = h;
+ msg.situ.video_output[0].pitch_bytes = pitch;
+
+ hyperv_sendpacket(hdev, &msg);
+
+ return 0;
+}
+
+int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect)
+{
+ struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+ struct synthvid_msg msg;
+
+ if (!hv->dirt_needed)
+ return 0;
+
+ memset(&msg, 0, sizeof(struct synthvid_msg));
+
+ msg.vid_hdr.type = SYNTHVID_DIRT;
+ msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+ sizeof(struct synthvid_dirt);
+ msg.dirt.video_output = 0;
+ msg.dirt.dirt_count = 1;
+ msg.dirt.rect[0].x1 = rect->x1;
+ msg.dirt.rect[0].y1 = rect->y1;
+ msg.dirt.rect[0].x2 = rect->x2;
+ msg.dirt.rect[0].y2 = rect->y2;
+
+ hyperv_sendpacket(hdev, &msg);
+
+ return 0;
+}
+
+static int hyperv_get_supported_resolution(struct hv_device *hdev)
+{
+ struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+ struct synthvid_msg *msg = (struct synthvid_msg *)hv->init_buf;
+ struct drm_device *dev = &hv->dev;
+ unsigned long t;
+ u8 index;
+ int i;
+
+ memset(msg, 0, sizeof(struct synthvid_msg));
+ msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST;
+ msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+ sizeof(struct synthvid_supported_resolution_req);
+ msg->resolution_req.maximum_resolution_count =
+ SYNTHVID_MAX_RESOLUTION_COUNT;
+ hyperv_sendpacket(hdev, msg);
+
+ t = wait_for_completion_timeout(&hv->wait, VMBUS_VSP_TIMEOUT);
+ if (!t) {
+ drm_err(dev, "Time out on waiting resolution response\n");
+ return -ETIMEDOUT;
+ }
+
+ if (msg->resolution_resp.resolution_count == 0) {
+ drm_err(dev, "No supported resolutions\n");
+ return -ENODEV;
+ }
+
+ index = msg->resolution_resp.default_resolution_index;
+ if (index >= msg->resolution_resp.resolution_count) {
+ drm_err(dev, "Invalid resolution index: %d\n", index);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < msg->resolution_resp.resolution_count; i++) {
+ hv->screen_width_max = max_t(u32, hv->screen_width_max,
+ msg->resolution_resp.supported_resolution[i].width);
+ hv->screen_height_max = max_t(u32, hv->screen_height_max,
+ msg->resolution_resp.supported_resolution[i].height);
+ }
+
+ hv->preferred_width =
+ msg->resolution_resp.supported_resolution[index].width;
+ hv->preferred_height =
+ msg->resolution_resp.supported_resolution[index].height;
+
+ return 0;
+}
+
+static void hyperv_receive_sub(struct hv_device *hdev)
+{
+ struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+ struct synthvid_msg *msg;
+
+ if (!hv)
+ return;
+
+ msg = (struct synthvid_msg *)hv->recv_buf;
+
+ /* Complete the wait event */
+ if (msg->vid_hdr.type == SYNTHVID_VERSION_RESPONSE ||
+ msg->vid_hdr.type == SYNTHVID_RESOLUTION_RESPONSE ||
+ msg->vid_hdr.type == SYNTHVID_VRAM_LOCATION_ACK) {
+ memcpy(hv->init_buf, msg, VMBUS_MAX_PACKET_SIZE);
+ complete(&hv->wait);
+ return;
+ }
+
+ if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE)
+ hv->dirt_needed = msg->feature_chg.is_dirt_needed;
+}
+
+static void hyperv_receive(void *ctx)
+{
+ struct hv_device *hdev = ctx;
+ struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+ struct synthvid_msg *recv_buf;
+ u32 bytes_recvd;
+ u64 req_id;
+ int ret;
+
+ if (!hv)
+ return;
+
+ recv_buf = (struct synthvid_msg *)hv->recv_buf;
+
+ do {
+ ret = vmbus_recvpacket(hdev->channel, recv_buf,
+ VMBUS_MAX_PACKET_SIZE,
+ &bytes_recvd, &req_id);
+ if (bytes_recvd > 0 &&
+ recv_buf->pipe_hdr.type == PIPE_MSG_DATA)
+ hyperv_receive_sub(hdev);
+ } while (bytes_recvd > 0 && ret == 0);
+}
+
+int hyperv_connect_vsp(struct hv_device *hdev)
+{
+ struct hyperv_drm_device *hv = hv_get_drvdata(hdev);
+ struct drm_device *dev = &hv->dev;
+ int ret;
+
+ ret = vmbus_open(hdev->channel, VMBUS_RING_BUFSIZE, VMBUS_RING_BUFSIZE,
+ NULL, 0, hyperv_receive, hdev);
+ if (ret) {
+ drm_err(dev, "Unable to open vmbus channel\n");
+ return ret;
+ }
+
+ /* Negotiate the protocol version with host */
+ switch (vmbus_proto_version) {
+ case VERSION_WIN10:
+ case VERSION_WIN10_V5:
+ ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN10);
+ if (!ret)
+ break;
+ fallthrough;
+ case VERSION_WIN8:
+ case VERSION_WIN8_1:
+ ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN8);
+ if (!ret)
+ break;
+ fallthrough;
+ case VERSION_WS2008:
+ case VERSION_WIN7:
+ ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN7);
+ break;
+ default:
+ ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN10);
+ break;
+ }
+
+ if (ret) {
+ drm_err(dev, "Synthetic video device version not accepted %d\n", ret);
+ goto error;
+ }
+
+ if (hv->synthvid_version == SYNTHVID_VERSION_WIN7)
+ hv->screen_depth = SYNTHVID_DEPTH_WIN7;
+ else
+ hv->screen_depth = SYNTHVID_DEPTH_WIN8;
+
+ if (hyperv_version_ge(hv->synthvid_version, SYNTHVID_VERSION_WIN10)) {
+ ret = hyperv_get_supported_resolution(hdev);
+ if (ret)
+ drm_err(dev, "Failed to get supported resolution from host, use default\n");
+ } else {
+ hv->screen_width_max = SYNTHVID_WIDTH_MAX_WIN7;
+ hv->screen_height_max = SYNTHVID_HEIGHT_MAX_WIN7;
+ }
+
+ hv->mmio_megabytes = hdev->channel->offermsg.offer.mmio_megabytes;
+
+ return 0;
+
+error:
+ vmbus_close(hdev->channel);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 69f57ca9c68d..93f4d059fc89 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -102,7 +102,6 @@ config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support"
depends on DRM_I915
depends on 64BIT
- depends on VFIO_MDEV=y || VFIO_MDEV=DRM_I915
default n
help
Choose this option if you want to enable Intel GVT-g graphics
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index c8e8422359b3..362bff9beb5c 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -11136,7 +11136,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
if (ret < 0)
goto unpin_fb;
- fence = dma_resv_get_excl_rcu(obj->base.resv);
+ fence = dma_resv_get_excl_unlocked(obj->base.resv);
if (fence) {
add_rps_boost_after_vblank(new_plane_state->hw.crtc,
fence);
@@ -11763,11 +11763,20 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
+ struct drm_i915_private *i915;
obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
if (!obj)
return ERR_PTR(-ENOENT);
+ /* object is backed with LMEM for discrete */
+ i915 = to_i915(obj->base.dev);
+ if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj)) {
+ /* object is "remote", not in local memory */
+ i915_gem_object_put(obj);
+ return ERR_PTR(-EREMOTE);
+ }
+
fb = intel_framebuffer_create(obj, &mode_cmd);
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 332d2f9fda5c..b170e272bdee 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -963,8 +963,8 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
intel_dp_create_fake_mst_encoders(dig_port);
ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
&intel_dp->aux, 16, 3,
- (u8)dig_port->max_lanes,
- drm_dp_link_rate_to_bw_code(max_source_rate),
+ dig_port->max_lanes,
+ max_source_rate,
conn_base_id);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index ccd00e65a5fe..4af40229f5ec 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -41,6 +41,8 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include "gem/i915_gem_lmem.h"
+
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_fbdev.h"
@@ -137,14 +139,22 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
size = mode_cmd.pitches[0] * mode_cmd.height;
size = PAGE_ALIGN(size);
- /* If the FB is too big, just don't use it since fbdev is not very
- * important and we should probably use that space with FBC or other
- * features. */
obj = ERR_PTR(-ENODEV);
- if (size * 2 < dev_priv->stolen_usable_size)
- obj = i915_gem_object_create_stolen(dev_priv, size);
- if (IS_ERR(obj))
- obj = i915_gem_object_create_shmem(dev_priv, size);
+ if (HAS_LMEM(dev_priv)) {
+ obj = i915_gem_object_create_lmem(dev_priv, size,
+ I915_BO_ALLOC_CONTIGUOUS);
+ } else {
+ /*
+ * If the FB is too big, just don't use it since fbdev is not very
+ * important and we should probably use that space with FBC or other
+ * features.
+ */
+ if (size * 2 < dev_priv->stolen_usable_size)
+ obj = i915_gem_object_create_stolen(dev_priv, size);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_shmem(dev_priv, size);
+ }
+
if (IS_ERR(obj)) {
drm_err(&dev_priv->drm, "failed to allocate framebuffer\n");
return PTR_ERR(obj);
@@ -178,6 +188,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
unsigned long flags = 0;
bool prealloc = false;
void __iomem *vaddr;
+ struct drm_i915_gem_object *obj;
int ret;
if (intel_fb &&
@@ -232,13 +243,27 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->fbops = &intelfb_ops;
/* setup aperture base/size for vesafb takeover */
- info->apertures->ranges[0].base = ggtt->gmadr.start;
- info->apertures->ranges[0].size = ggtt->mappable_end;
+ obj = intel_fb_obj(&intel_fb->base);
+ if (i915_gem_object_is_lmem(obj)) {
+ struct intel_memory_region *mem = obj->mm.region;
+
+ info->apertures->ranges[0].base = mem->io_start;
+ info->apertures->ranges[0].size = mem->total;
+
+ /* Use fbdev's framebuffer from lmem for discrete */
+ info->fix.smem_start =
+ (unsigned long)(mem->io_start +
+ i915_gem_object_get_dma_address(obj, 0));
+ info->fix.smem_len = obj->base.size;
+ } else {
+ info->apertures->ranges[0].base = ggtt->gmadr.start;
+ info->apertures->ranges[0].size = ggtt->mappable_end;
- /* Our framebuffer is the entirety of fbdev's system memory */
- info->fix.smem_start =
- (unsigned long)(ggtt->gmadr.start + vma->node.start);
- info->fix.smem_len = vma->node.size;
+ /* Our framebuffer is the entirety of fbdev's system memory */
+ info->fix.smem_start =
+ (unsigned long)(ggtt->gmadr.start + vma->node.start);
+ info->fix.smem_len = vma->node.size;
+ }
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 8161d49e78ba..8e75debcce1a 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -211,7 +211,6 @@ static int frontbuffer_active(struct i915_active *ref)
return 0;
}
-__i915_active_call
static void frontbuffer_retire(struct i915_active *ref)
{
struct intel_frontbuffer *front =
@@ -266,7 +265,8 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
atomic_set(&front->bits, 0);
i915_active_init(&front->write,
frontbuffer_active,
- i915_active_may_sleep(frontbuffer_retire));
+ frontbuffer_retire,
+ I915_ACTIVE_RETIRE_SLEEPS);
spin_lock(&i915->fb_tracking.lock);
if (rcu_access_pointer(obj->frontbuffer)) {
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 46cba12be888..7e3f5c6ca484 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -384,8 +384,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
i830_overlay_clock_gating(dev_priv, true);
}
-__i915_active_call static void
-intel_overlay_last_flip_retire(struct i915_active *active)
+static void intel_overlay_last_flip_retire(struct i915_active *active)
{
struct intel_overlay *overlay =
container_of(active, typeof(*overlay), last_flip);
@@ -1402,7 +1401,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
overlay->saturation = 146;
i915_active_init(&overlay->last_flip,
- NULL, intel_overlay_last_flip_retire);
+ NULL, intel_overlay_last_flip_retire, 0);
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
if (ret)
diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c
index 9e508e7d4629..7df91b7e4ca8 100644
--- a/drivers/gpu/drm/i915/dma_resv_utils.c
+++ b/drivers/gpu/drm/i915/dma_resv_utils.c
@@ -10,7 +10,7 @@
void dma_resv_prune(struct dma_resv *resv)
{
if (dma_resv_trylock(resv)) {
- if (dma_resv_test_signaled_rcu(resv, true))
+ if (dma_resv_test_signaled(resv, true))
dma_resv_add_excl_fence(resv, NULL);
dma_resv_unlock(resv);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 25235ef630c1..6234e17259c1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* Alternatively, we can trade that extra information on read/write
* activity with
* args->busy =
- * !dma_resv_test_signaled_rcu(obj->resv, true);
+ * !dma_resv_test_signaled(obj->resv, true);
* to report the overall busyness. This is what the wait-ioctl does.
*
*/
@@ -113,11 +113,10 @@ retry:
seq = raw_read_seqcount(&obj->base.resv->seq);
/* Translate the exclusive fence to the READ *and* WRITE engine */
- args->busy =
- busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
+ args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv));
/* Translate shared fences to READ set of engines */
- list = rcu_dereference(obj->base.resv->fence);
+ list = dma_resv_shared_list(obj->base.resv);
if (list) {
unsigned int shared_count = list->shared_count, i;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index fd8ee52e17a4..188dee13e017 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1046,7 +1046,6 @@ struct context_barrier_task {
void *data;
};
-__i915_active_call
static void cb_retire(struct i915_active *base)
{
struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
@@ -1080,7 +1079,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (!cb)
return -ENOMEM;
- i915_active_init(&cb->base, NULL, cb_retire);
+ i915_active_init(&cb->base, NULL, cb_retire, 0);
err = i915_active_acquire(&cb->base);
if (err) {
kfree(cb);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index 45d60e3d98e3..548ddf39d853 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -4,44 +4,102 @@
*/
#include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
+#include "i915_trace.h"
+#include "i915_user_extensions.h"
+
+static u32 object_max_page_size(struct drm_i915_gem_object *obj)
+{
+ u32 max_page_size = 0;
+ int i;
+
+ for (i = 0; i < obj->mm.n_placements; i++) {
+ struct intel_memory_region *mr = obj->mm.placements[i];
+
+ GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
+ max_page_size = max_t(u32, max_page_size, mr->min_page_size);
+ }
+
+ GEM_BUG_ON(!max_page_size);
+ return max_page_size;
+}
+
+static void object_set_placements(struct drm_i915_gem_object *obj,
+ struct intel_memory_region **placements,
+ unsigned int n_placements)
+{
+ GEM_BUG_ON(!n_placements);
+
+ /*
+ * For the common case of one memory region, skip storing an
+ * allocated array and just point at the region directly.
+ */
+ if (n_placements == 1) {
+ struct intel_memory_region *mr = placements[0];
+ struct drm_i915_private *i915 = mr->i915;
+
+ obj->mm.placements = &i915->mm.regions[mr->id];
+ obj->mm.n_placements = 1;
+ } else {
+ obj->mm.placements = placements;
+ obj->mm.n_placements = n_placements;
+ }
+}
+
+static int i915_gem_publish(struct drm_i915_gem_object *obj,
+ struct drm_file *file,
+ u64 *size_p,
+ u32 *handle_p)
+{
+ u64 size = obj->base.size;
+ int ret;
+
+ ret = drm_gem_handle_create(file, &obj->base, handle_p);
+ /* drop reference from allocate - handle holds it now */
+ i915_gem_object_put(obj);
+ if (ret)
+ return ret;
+
+ *size_p = size;
+ return 0;
+}
static int
-i915_gem_create(struct drm_file *file,
- struct intel_memory_region *mr,
- u64 *size_p,
- u32 *handle_p)
+i915_gem_setup(struct drm_i915_gem_object *obj, u64 size)
{
- struct drm_i915_gem_object *obj;
- u32 handle;
- u64 size;
+ struct intel_memory_region *mr = obj->mm.placements[0];
+ unsigned int flags;
int ret;
- GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
- size = round_up(*size_p, mr->min_page_size);
+ size = round_up(size, object_max_page_size(obj));
if (size == 0)
return -EINVAL;
/* For most of the ABI (e.g. mmap) we think in system pages */
GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
- /* Allocate the new object */
- obj = i915_gem_object_create_region(mr, size, 0);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ if (i915_gem_object_size_2big(size))
+ return -E2BIG;
- GEM_BUG_ON(size != obj->base.size);
+ /*
+ * For now resort to CPU based clearing for device local-memory, in the
+ * near future this will use the blitter engine for accelerated, GPU
+ * based clearing.
+ */
+ flags = 0;
+ if (mr->type == INTEL_MEMORY_LOCAL)
+ flags = I915_BO_ALLOC_CPU_CLEAR;
- ret = drm_gem_handle_create(file, &obj->base, &handle);
- /* drop reference from allocate - handle holds it now */
- i915_gem_object_put(obj);
+ ret = mr->ops->init_object(mr, obj, size, flags);
if (ret)
return ret;
- *handle_p = handle;
- *size_p = size;
+ GEM_BUG_ON(size != obj->base.size);
+
+ trace_i915_gem_object_create(obj);
return 0;
}
@@ -50,9 +108,12 @@ i915_gem_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
+ struct drm_i915_gem_object *obj;
+ struct intel_memory_region *mr;
enum intel_memory_type mem_type;
int cpp = DIV_ROUND_UP(args->bpp, 8);
u32 format;
+ int ret;
switch (cpp) {
case 1:
@@ -85,10 +146,22 @@ i915_gem_dumb_create(struct drm_file *file,
if (HAS_LMEM(to_i915(dev)))
mem_type = INTEL_MEMORY_LOCAL;
- return i915_gem_create(file,
- intel_memory_region_by_type(to_i915(dev),
- mem_type),
- &args->size, &args->handle);
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return -ENOMEM;
+
+ mr = intel_memory_region_by_type(to_i915(dev), mem_type);
+ object_set_placements(obj, &mr, 1);
+
+ ret = i915_gem_setup(obj, args->size);
+ if (ret)
+ goto object_free;
+
+ return i915_gem_publish(obj, file, &args->size, &args->handle);
+
+object_free:
+ i915_gem_object_free(obj);
+ return ret;
}
/**
@@ -103,11 +176,229 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_create *args = data;
+ struct drm_i915_gem_object *obj;
+ struct intel_memory_region *mr;
+ int ret;
i915_gem_flush_free_objects(i915);
- return i915_gem_create(file,
- intel_memory_region_by_type(i915,
- INTEL_MEMORY_SYSTEM),
- &args->size, &args->handle);
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return -ENOMEM;
+
+ mr = intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
+ object_set_placements(obj, &mr, 1);
+
+ ret = i915_gem_setup(obj, args->size);
+ if (ret)
+ goto object_free;
+
+ return i915_gem_publish(obj, file, &args->size, &args->handle);
+
+object_free:
+ i915_gem_object_free(obj);
+ return ret;
+}
+
+struct create_ext {
+ struct drm_i915_private *i915;
+ struct drm_i915_gem_object *vanilla_object;
+};
+
+static void repr_placements(char *buf, size_t size,
+ struct intel_memory_region **placements,
+ int n_placements)
+{
+ int i;
+
+ buf[0] = '\0';
+
+ for (i = 0; i < n_placements; i++) {
+ struct intel_memory_region *mr = placements[i];
+ int r;
+
+ r = snprintf(buf, size, "\n %s -> { class: %d, inst: %d }",
+ mr->name, mr->type, mr->instance);
+ if (r >= size)
+ return;
+
+ buf += r;
+ size -= r;
+ }
+}
+
+static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args,
+ struct create_ext *ext_data)
+{
+ struct drm_i915_private *i915 = ext_data->i915;
+ struct drm_i915_gem_memory_class_instance __user *uregions =
+ u64_to_user_ptr(args->regions);
+ struct drm_i915_gem_object *obj = ext_data->vanilla_object;
+ struct intel_memory_region **placements;
+ u32 mask;
+ int i, ret = 0;
+
+ if (args->pad) {
+ drm_dbg(&i915->drm, "pad should be zero\n");
+ ret = -EINVAL;
+ }
+
+ if (!args->num_regions) {
+ drm_dbg(&i915->drm, "num_regions is zero\n");
+ ret = -EINVAL;
+ }
+
+ if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) {
+ drm_dbg(&i915->drm, "num_regions is too large\n");
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ placements = kmalloc_array(args->num_regions,
+ sizeof(struct intel_memory_region *),
+ GFP_KERNEL);
+ if (!placements)
+ return -ENOMEM;
+
+ mask = 0;
+ for (i = 0; i < args->num_regions; i++) {
+ struct drm_i915_gem_memory_class_instance region;
+ struct intel_memory_region *mr;
+
+ if (copy_from_user(&region, uregions, sizeof(region))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ mr = intel_memory_region_lookup(i915,
+ region.memory_class,
+ region.memory_instance);
+ if (!mr || mr->private) {
+ drm_dbg(&i915->drm, "Device is missing region { class: %d, inst: %d } at index = %d\n",
+ region.memory_class, region.memory_instance, i);
+ ret = -EINVAL;
+ goto out_dump;
+ }
+
+ if (mask & BIT(mr->id)) {
+ drm_dbg(&i915->drm, "Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n",
+ mr->name, region.memory_class,
+ region.memory_instance, i);
+ ret = -EINVAL;
+ goto out_dump;
+ }
+
+ placements[i] = mr;
+ mask |= BIT(mr->id);
+
+ ++uregions;
+ }
+
+ if (obj->mm.placements) {
+ ret = -EINVAL;
+ goto out_dump;
+ }
+
+ object_set_placements(obj, placements, args->num_regions);
+ if (args->num_regions == 1)
+ kfree(placements);
+
+ return 0;
+
+out_dump:
+ if (1) {
+ char buf[256];
+
+ if (obj->mm.placements) {
+ repr_placements(buf,
+ sizeof(buf),
+ obj->mm.placements,
+ obj->mm.n_placements);
+ drm_dbg(&i915->drm,
+ "Placements were already set in previous EXT. Existing placements: %s\n",
+ buf);
+ }
+
+ repr_placements(buf, sizeof(buf), placements, i);
+ drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf);
+ }
+
+out_free:
+ kfree(placements);
+ return ret;
+}
+
+static int ext_set_placements(struct i915_user_extension __user *base,
+ void *data)
+{
+ struct drm_i915_gem_create_ext_memory_regions ext;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM))
+ return -ENODEV;
+
+ if (copy_from_user(&ext, base, sizeof(ext)))
+ return -EFAULT;
+
+ return set_placements(&ext, data);
+}
+
+static const i915_user_extension_fn create_extensions[] = {
+ [I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements,
+};
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
+ */
+int
+i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_create_ext *args = data;
+ struct create_ext ext_data = { .i915 = i915 };
+ struct intel_memory_region **placements_ext;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ if (args->flags)
+ return -EINVAL;
+
+ i915_gem_flush_free_objects(i915);
+
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return -ENOMEM;
+
+ ext_data.vanilla_object = obj;
+ ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
+ create_extensions,
+ ARRAY_SIZE(create_extensions),
+ &ext_data);
+ placements_ext = obj->mm.placements;
+ if (ret)
+ goto object_free;
+
+ if (!placements_ext) {
+ struct intel_memory_region *mr =
+ intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
+
+ object_set_placements(obj, &mr, 1);
+ }
+
+ ret = i915_gem_setup(obj, args->size);
+ if (ret)
+ goto object_free;
+
+ return i915_gem_publish(obj, file, &args->size, &args->handle);
+
+object_free:
+ if (obj->mm.n_placements > 1)
+ kfree(placements_ext);
+ i915_gem_object_free(obj);
+ return ret;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 297143511f99..66789111a24b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1481,7 +1481,7 @@ static inline bool use_reloc_gpu(struct i915_vma *vma)
if (DBG_FORCE_RELOC)
return false;
- return !dma_resv_test_signaled_rcu(vma->resv, true);
+ return !dma_resv_test_signaled(vma->resv, true);
}
static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
index 7fd22f3efbef..28d6526e32ab 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
@@ -14,6 +14,8 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+int i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index ce1c83c13d05..f44bdd08f7cb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -17,9 +17,27 @@ const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
.release = i915_gem_object_release_memory_region,
};
+void __iomem *
+i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned long size)
+{
+ resource_size_t offset;
+
+ GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+ offset -= obj->mm.region->region.start;
+
+ return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
+}
+
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
{
- return obj->ops == &i915_gem_lmem_obj_ops;
+ struct intel_memory_region *mr = obj->mm.region;
+
+ return mr && (mr->type == INTEL_MEMORY_LOCAL ||
+ mr->type == INTEL_MEMORY_STOLEN_LOCAL);
}
struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index 036d53c01de9..fac6bc5a5ebb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -14,6 +14,11 @@ struct intel_memory_region;
extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
+void __iomem *
+i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned long size);
+
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index ea74cbca95be..28144410df86 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -249,6 +249,9 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (obj->ops->release)
obj->ops->release(obj);
+ if (obj->mm.n_placements > 1)
+ kfree(obj->mm.placements);
+
/* But keep the pointer alive for RCU-protected lookups */
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
cond_resched();
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 2ebd79537aea..7c0eb425cb3b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -500,7 +500,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
struct dma_fence *fence;
rcu_read_lock();
- fence = dma_resv_get_excl_rcu(obj->base.resv);
+ fence = dma_resv_get_excl_unlocked(obj->base.resv);
rcu_read_unlock();
if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 8e485cb3343c..0727d0c76aa0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -172,11 +172,13 @@ struct drm_i915_gem_object {
#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
#define I915_BO_ALLOC_VOLATILE BIT(1)
#define I915_BO_ALLOC_STRUCT_PAGE BIT(2)
+#define I915_BO_ALLOC_CPU_CLEAR BIT(3)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
I915_BO_ALLOC_VOLATILE | \
- I915_BO_ALLOC_STRUCT_PAGE)
-#define I915_BO_READONLY BIT(3)
-#define I915_TILING_QUIRK_BIT 4 /* unknown swizzling; do not release! */
+ I915_BO_ALLOC_STRUCT_PAGE | \
+ I915_BO_ALLOC_CPU_CLEAR)
+#define I915_BO_READONLY BIT(4)
+#define I915_TILING_QUIRK_BIT 5 /* unknown swizzling; do not release! */
/*
* Is the object to be mapped as read-only to the GPU
@@ -220,6 +222,12 @@ struct drm_i915_gem_object {
atomic_t shrink_pin;
/**
+ * Priority list of potential placements for this object.
+ */
+ struct intel_memory_region **placements;
+ int n_placements;
+
+ /**
* Memory region for this object.
*/
struct intel_memory_region *region;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index aed8a37ccdc9..7361971c177d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -63,6 +63,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
i915_gem_object_set_tiling_quirk(obj);
+ GEM_BUG_ON(!list_empty(&obj->mm.link));
+ atomic_inc(&obj->mm.shrink_pin);
shrinkable = false;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 81dc2bf59bc3..51a05e62875d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -8,7 +8,6 @@
#include <linux/shmem_fs.h>
#include <linux/swap.h>
-#include <drm/drm.h> /* for drm_legacy.h! */
#include <drm/drm_cache.h>
#include "gt/intel_gt.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 6a84fb6dde24..ce8fcfc54079 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -95,6 +95,28 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
sg_mark_end(sg);
i915_sg_trim(st);
+ /* Intended for kernel internal use only */
+ if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
+ struct scatterlist *sg;
+ unsigned long i;
+
+ for_each_sg(st->sgl, sg, st->nents, i) {
+ unsigned int length;
+ void __iomem *vaddr;
+ dma_addr_t daddr;
+
+ daddr = sg_dma_address(sg);
+ daddr -= mem->region.start;
+ length = sg_dma_len(sg);
+
+ vaddr = io_mapping_map_wc(&mem->iomap, daddr, length);
+ memset64((void __force *)vaddr, 0, length / sizeof(u64));
+ io_mapping_unmap(vaddr);
+ }
+
+ wmb();
+ }
+
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 4f9c8d3021ab..f4fb68e8955a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -38,15 +38,17 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
}
static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
- unsigned long shrink)
+ unsigned long shrink, bool trylock_vm)
{
unsigned long flags;
flags = 0;
if (shrink & I915_SHRINK_ACTIVE)
- flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
+ flags |= I915_GEM_OBJECT_UNBIND_ACTIVE;
if (!(shrink & I915_SHRINK_BOUND))
- flags = I915_GEM_OBJECT_UNBIND_TEST;
+ flags |= I915_GEM_OBJECT_UNBIND_TEST;
+ if (trylock_vm)
+ flags |= I915_GEM_OBJECT_UNBIND_VM_TRYLOCK;
if (i915_gem_object_unbind(obj, flags) == 0)
return true;
@@ -117,6 +119,9 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
unsigned long scanned = 0;
int err;
+ /* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */
+ bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
+
trace_i915_gem_shrink(i915, target, shrink);
/*
@@ -204,7 +209,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
err = 0;
- if (unsafe_drop_pages(obj, shrink)) {
+ if (unsafe_drop_pages(obj, shrink, trylock_vm)) {
/* May arrive from get_pages on another bo */
if (!ww) {
if (!i915_gem_object_trylock(obj))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index b0597de206de..b5553fc3ac4d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -10,6 +10,7 @@
#include <drm/drm_mm.h>
#include <drm/i915_drm.h>
+#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_gem_stolen.h"
@@ -122,6 +123,14 @@ static int i915_adjust_stolen(struct drm_i915_private *i915,
}
/*
+ * With stolen lmem, we don't need to check if the address range
+ * overlaps with the non-stolen system memory range, since lmem is local
+ * to the gpu.
+ */
+ if (HAS_LMEM(i915))
+ return 0;
+
+ /*
* Verify that nothing else uses this physical address. Stolen
* memory should be reserved by the BIOS and hidden from the
* kernel. So if the region is already marked as busy, something
@@ -374,8 +383,9 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
}
}
-static int i915_gem_init_stolen(struct drm_i915_private *i915)
+static int i915_gem_init_stolen(struct intel_memory_region *mem)
{
+ struct drm_i915_private *i915 = mem->i915;
struct intel_uncore *uncore = &i915->uncore;
resource_size_t reserved_base, stolen_top;
resource_size_t reserved_total, reserved_size;
@@ -396,10 +406,10 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
return 0;
}
- if (resource_size(&intel_graphics_stolen_res) == 0)
+ if (resource_size(&mem->region) == 0)
return 0;
- i915->dsm = intel_graphics_stolen_res;
+ i915->dsm = mem->region;
if (i915_adjust_stolen(i915, &i915->dsm))
return 0;
@@ -627,10 +637,17 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
{
static struct lock_class_key lock_class;
unsigned int cache_level;
+ unsigned int flags;
int err;
+ /*
+ * Stolen objects are always physically contiguous since we just
+ * allocate one big block underneath using the drm_mm range allocator.
+ */
+ flags = I915_BO_ALLOC_CONTIGUOUS;
+
drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
- i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, 0);
+ i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
obj->stolen = stolen;
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
@@ -640,9 +657,11 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
if (WARN_ON(!i915_gem_object_trylock(obj)))
return -EBUSY;
+ i915_gem_object_init_memory_region(obj, mem);
+
err = i915_gem_object_pin_pages(obj);
- if (!err)
- i915_gem_object_init_memory_region(obj, mem);
+ if (err)
+ i915_gem_object_release_memory_region(obj);
i915_gem_object_unlock(obj);
return err;
@@ -667,7 +686,8 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
if (!stolen)
return -ENOMEM;
- ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096);
+ ret = i915_gem_stolen_insert_node(i915, stolen, size,
+ mem->min_page_size);
if (ret)
goto err_free;
@@ -688,39 +708,126 @@ struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private *i915,
resource_size_t size)
{
- return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN_SMEM],
- size, I915_BO_ALLOC_CONTIGUOUS);
+ return i915_gem_object_create_region(i915->mm.stolen_region, size, 0);
}
-static int init_stolen(struct intel_memory_region *mem)
+static int init_stolen_smem(struct intel_memory_region *mem)
{
- intel_memory_region_set_name(mem, "stolen");
-
/*
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
*/
- return i915_gem_init_stolen(mem->i915);
+ return i915_gem_init_stolen(mem);
}
-static void release_stolen(struct intel_memory_region *mem)
+static void release_stolen_smem(struct intel_memory_region *mem)
{
i915_gem_cleanup_stolen(mem->i915);
}
-static const struct intel_memory_region_ops i915_region_stolen_ops = {
- .init = init_stolen,
- .release = release_stolen,
+static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
+ .init = init_stolen_smem,
+ .release = release_stolen_smem,
.init_object = _i915_gem_object_stolen_init,
};
-struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
+static int init_stolen_lmem(struct intel_memory_region *mem)
{
- return intel_memory_region_create(i915,
- intel_graphics_stolen_res.start,
- resource_size(&intel_graphics_stolen_res),
- PAGE_SIZE, 0,
- &i915_region_stolen_ops);
+ int err;
+
+ if (GEM_WARN_ON(resource_size(&mem->region) == 0))
+ return -ENODEV;
+
+ if (!io_mapping_init_wc(&mem->iomap,
+ mem->io_start,
+ resource_size(&mem->region)))
+ return -EIO;
+
+ /*
+ * TODO: For stolen lmem we mostly just care about populating the dsm
+ * related bits and setting up the drm_mm allocator for the range.
+ * Perhaps split up i915_gem_init_stolen() for this.
+ */
+ err = i915_gem_init_stolen(mem);
+ if (err)
+ goto err_fini;
+
+ return 0;
+
+err_fini:
+ io_mapping_fini(&mem->iomap);
+ return err;
+}
+
+static void release_stolen_lmem(struct intel_memory_region *mem)
+{
+ io_mapping_fini(&mem->iomap);
+ i915_gem_cleanup_stolen(mem->i915);
+}
+
+static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
+ .init = init_stolen_lmem,
+ .release = release_stolen_lmem,
+ .init_object = _i915_gem_object_stolen_init,
+};
+
+struct intel_memory_region *
+i915_gem_stolen_lmem_setup(struct drm_i915_private *i915)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct intel_memory_region *mem;
+ resource_size_t io_start;
+ resource_size_t lmem_size;
+ u64 lmem_base;
+
+ lmem_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
+ if (GEM_WARN_ON(lmem_base >= pci_resource_len(pdev, 2)))
+ return ERR_PTR(-ENODEV);
+
+ lmem_size = pci_resource_len(pdev, 2) - lmem_base;
+ io_start = pci_resource_start(pdev, 2) + lmem_base;
+
+ mem = intel_memory_region_create(i915, lmem_base, lmem_size,
+ I915_GTT_PAGE_SIZE_4K, io_start,
+ &i915_region_stolen_lmem_ops);
+ if (IS_ERR(mem))
+ return mem;
+
+ /*
+ * TODO: consider creating common helper to just print all the
+ * interesting stuff from intel_memory_region, which we can use for all
+ * our probed regions.
+ */
+
+ drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
+ &mem->io_start);
+
+ intel_memory_region_set_name(mem, "stolen-local");
+
+ mem->private = true;
+
+ return mem;
+}
+
+struct intel_memory_region*
+i915_gem_stolen_smem_setup(struct drm_i915_private *i915)
+{
+ struct intel_memory_region *mem;
+
+ mem = intel_memory_region_create(i915,
+ intel_graphics_stolen_res.start,
+ resource_size(&intel_graphics_stolen_res),
+ PAGE_SIZE, 0,
+ &i915_region_stolen_smem_ops);
+ if (IS_ERR(mem))
+ return mem;
+
+ intel_memory_region_set_name(mem, "stolen-system");
+
+ mem->private = true;
+
+ return mem;
}
struct drm_i915_gem_object *
@@ -728,7 +835,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
resource_size_t stolen_offset,
resource_size_t size)
{
- struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN_SMEM];
+ struct intel_memory_region *mem = i915->mm.stolen_region;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
@@ -742,8 +849,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
/* KISS and expect everything to be page-aligned */
if (GEM_WARN_ON(size == 0) ||
- GEM_WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
- GEM_WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
+ GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
+ GEM_WARN_ON(!IS_ALIGNED(stolen_offset, mem->min_page_size)))
return ERR_PTR(-EINVAL);
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index b03489706796..2bec6c367b9c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -21,7 +21,8 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
u64 end);
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node);
-struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915);
+struct intel_memory_region *i915_gem_stolen_smem_setup(struct drm_i915_private *i915);
+struct intel_memory_region *i915_gem_stolen_lmem_setup(struct drm_i915_private *i915);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
resource_size_t size);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index a657b99ec760..b5cbbe659a77 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -85,8 +85,8 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
return true;
/* we will unbind on next submission, still have userptr pins */
- r = dma_resv_wait_timeout_rcu(obj->base.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(obj->base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 4b9856d5ba14..1e97520c62b2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -45,7 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
unsigned int count, i;
int ret;
- ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
+ ret = dma_resv_get_fences(resv, &excl, &count, &shared);
if (ret)
return ret;
@@ -73,7 +73,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
*/
prune_fences = count && timeout >= 0;
} else {
- excl = dma_resv_get_excl_rcu(resv);
+ excl = dma_resv_get_excl_unlocked(resv);
}
if (excl && timeout >= 0)
@@ -158,8 +158,8 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int count, i;
int ret;
- ret = dma_resv_get_fences_rcu(obj->base.resv,
- &excl, &count, &shared);
+ ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
+ &shared);
if (ret)
return ret;
@@ -170,7 +170,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
kfree(shared);
} else {
- excl = dma_resv_get_excl_rcu(obj->base.resv);
+ excl = dma_resv_get_excl_unlocked(obj->base.resv);
}
if (excl) {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 5fef592390cb..ce70d0a3afb2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -1740,7 +1740,6 @@ out:
static int check_scratch_page(struct i915_gem_context *ctx, u32 *out)
{
struct i915_address_space *vm;
- struct page *page;
u32 *vaddr;
int err = 0;
@@ -1748,24 +1747,18 @@ static int check_scratch_page(struct i915_gem_context *ctx, u32 *out)
if (!vm)
return -ENODEV;
- page = __px_page(vm->scratch[0]);
- if (!page) {
+ if (!vm->scratch[0]) {
pr_err("No scratch page!\n");
return -EINVAL;
}
- vaddr = kmap(page);
- if (!vaddr) {
- pr_err("No (mappable) scratch page!\n");
- return -EINVAL;
- }
+ vaddr = __px_vaddr(vm->scratch[0]);
memcpy(out, vaddr, sizeof(*out));
if (memchr_inv(vaddr, *out, PAGE_SIZE)) {
pr_err("Inconsistent initial state of scratch page!\n");
err = -EINVAL;
}
- kunmap(page);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 5cf6df49c333..05a3b29f545e 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -842,6 +842,24 @@ static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
return true;
}
+static void object_set_placements(struct drm_i915_gem_object *obj,
+ struct intel_memory_region **placements,
+ unsigned int n_placements)
+{
+ GEM_BUG_ON(!n_placements);
+
+ if (n_placements == 1) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_memory_region *mr = placements[0];
+
+ obj->mm.placements = &i915->mm.regions[mr->id];
+ obj->mm.n_placements = 1;
+ } else {
+ obj->mm.placements = placements;
+ obj->mm.n_placements = n_placements;
+ }
+}
+
#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
static int __igt_mmap(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj,
@@ -950,6 +968,8 @@ static int igt_mmap(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
+ object_set_placements(obj, &mr, 1);
+
err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
if (err == 0)
err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
@@ -1068,6 +1088,8 @@ static int igt_mmap_access(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
+ object_set_placements(obj, &mr, 1);
+
err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
if (err == 0)
err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
@@ -1211,6 +1233,8 @@ static int igt_mmap_gpu(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
+ object_set_placements(obj, &mr, 1);
+
err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
if (err == 0)
err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
@@ -1354,6 +1378,8 @@ static int igt_mmap_revoke(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
+ object_set_placements(obj, &mr, 1);
+
err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
if (err == 0)
err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index e08dff376339..1aee5e6b1b23 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -96,9 +96,8 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
* entries back to scratch.
*/
- vaddr = kmap_atomic_px(pt);
+ vaddr = px_vaddr(pt);
memset32(vaddr + pte, scratch_pte, count);
- kunmap_atomic(vaddr);
pte = 0;
}
@@ -120,7 +119,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
GEM_BUG_ON(!pd->entry[act_pt]);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
+ vaddr = px_vaddr(i915_pt_entry(pd, act_pt));
do {
GEM_BUG_ON(sg_dma_len(iter.sg) < I915_GTT_PAGE_SIZE);
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
@@ -136,12 +135,10 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
}
if (++act_pte == GEN6_PTES) {
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
+ vaddr = px_vaddr(i915_pt_entry(pd, ++act_pt));
act_pte = 0;
}
} while (1);
- kunmap_atomic(vaddr);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
}
@@ -235,7 +232,7 @@ static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
goto err_scratch0;
}
- ret = pin_pt_dma(vm, vm->scratch[1]);
+ ret = map_pt_dma(vm, vm->scratch[1]);
if (ret)
goto err_scratch1;
@@ -346,7 +343,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
if (!vma)
return ERR_PTR(-ENOMEM);
- i915_active_init(&vma->active, NULL, NULL);
+ i915_active_init(&vma->active, NULL, NULL, 0);
kref_init(&vma->ref);
mutex_init(&vma->pages_mutex);
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 74bf6fc8461f..e3a8924d2286 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -242,11 +242,10 @@ static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
atomic_read(&pt->used));
GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
- vaddr = kmap_atomic_px(pt);
+ vaddr = px_vaddr(pt);
memset64(vaddr + gen8_pd_index(start, 0),
vm->scratch[0]->encode,
count);
- kunmap_atomic(vaddr);
atomic_sub(count, &pt->used);
start += count;
@@ -375,7 +374,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
gen8_pte_t *vaddr;
pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
- vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+ vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
do {
GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE);
vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
@@ -402,12 +401,10 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
}
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+ vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
}
} while (1);
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap_atomic(vaddr);
return idx;
}
@@ -442,7 +439,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
encode |= GEN8_PDE_PS_2M;
page_size = I915_GTT_PAGE_SIZE_2M;
- vaddr = kmap_atomic_px(pd);
+ vaddr = px_vaddr(pd);
} else {
struct i915_page_table *pt =
i915_pt_entry(pd, __gen8_pte_index(start, 1));
@@ -457,7 +454,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
maybe_64K = __gen8_pte_index(start, 1);
- vaddr = kmap_atomic_px(pt);
+ vaddr = px_vaddr(pt);
}
do {
@@ -491,7 +488,6 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
} while (rem >= page_size && index < I915_PDES);
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap_atomic(vaddr);
/*
* Is it safe to mark the 2M block as 64K? -- Either we have
@@ -505,9 +501,8 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
!iter->sg && IS_ALIGNED(vma->node.start +
vma->node.size,
I915_GTT_PAGE_SIZE_2M)))) {
- vaddr = kmap_atomic_px(pd);
+ vaddr = px_vaddr(pd);
vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
- kunmap_atomic(vaddr);
page_size = I915_GTT_PAGE_SIZE_64K;
/*
@@ -523,12 +518,11 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
u16 i;
encode = vma->vm->scratch[0]->encode;
- vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
+ vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K));
for (i = 1; i < index; i += 16)
memset64(vaddr + i, encode, 15);
- kunmap_atomic(vaddr);
}
}
@@ -602,7 +596,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
if (IS_ERR(obj))
goto free_scratch;
- ret = pin_pt_dma(vm, obj);
+ ret = map_pt_dma(vm, obj);
if (ret) {
i915_gem_object_put(obj);
goto free_scratch;
@@ -639,7 +633,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
if (IS_ERR(pde))
return PTR_ERR(pde);
- err = pin_pt_dma(vm, pde->pt.base);
+ err = map_pt_dma(vm, pde->pt.base);
if (err) {
free_pd(vm, pde);
return err;
@@ -674,7 +668,7 @@ gen8_alloc_top_pd(struct i915_address_space *vm)
goto err_pd;
}
- err = pin_pt_dma(vm, pd->pt.base);
+ err = map_pt_dma(vm, pd->pt.base);
if (err)
goto err_pd;
@@ -717,7 +711,10 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
*/
ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
- ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
+ if (HAS_LMEM(gt->i915))
+ ppgtt->vm.alloc_pt_dma = alloc_pt_lmem;
+ else
+ ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
err = gen8_init_scratch(&ppgtt->vm);
if (err)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 17cf2640b082..4033184f13b9 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -326,7 +326,6 @@ void intel_context_unpin(struct intel_context *ce)
intel_context_put(ce);
}
-__i915_active_call
static void __intel_context_retire(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
@@ -385,7 +384,7 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
mutex_init(&ce->pin_mutex);
i915_active_init(&ce->active,
- __intel_context_active, __intel_context_retire);
+ __intel_context_active, __intel_context_retire, 0);
}
void intel_context_fini(struct intel_context *ce)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 47ee8578e511..8d9184920c51 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -13,8 +13,9 @@
#include "i915_reg.h"
#include "i915_request.h"
#include "i915_selftest.h"
-#include "gt/intel_timeline.h"
#include "intel_engine_types.h"
+#include "intel_gt_types.h"
+#include "intel_timeline.h"
#include "intel_workarounds.h"
struct drm_printer;
@@ -262,6 +263,11 @@ void intel_engine_init_active(struct intel_engine_cs *engine,
#define ENGINE_MOCK 1
#define ENGINE_VIRTUAL 2
+static inline bool intel_engine_uses_guc(const struct intel_engine_cs *engine)
+{
+ return engine->gt->submission_method >= INTEL_SUBMISSION_GUC;
+}
+
static inline bool
intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 6dbdbde00f14..3f9a811eb02b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -255,6 +255,11 @@ static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
intel_engine_set_hwsp_writemask(engine, ~0u);
}
+static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir)
+{
+ GEM_DEBUG_WARN_ON(iir);
+}
+
static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
@@ -292,6 +297,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->hw_id = info->hw_id;
engine->guc_id = MAKE_GUC_ID(info->class, info->instance);
+ engine->irq_handler = nop_irq_handler;
+
engine->class = info->class;
engine->instance = info->instance;
__sprint_engine_name(engine);
@@ -898,7 +905,7 @@ static int engine_init_common(struct intel_engine_cs *engine)
return 0;
err_context:
- intel_context_put(ce);
+ destroy_pinned_context(ce);
return ret;
}
@@ -909,12 +916,16 @@ int intel_engines_init(struct intel_gt *gt)
enum intel_engine_id id;
int err;
- if (intel_uc_uses_guc_submission(&gt->uc))
+ if (intel_uc_uses_guc_submission(&gt->uc)) {
+ gt->submission_method = INTEL_SUBMISSION_GUC;
setup = intel_guc_submission_setup;
- else if (HAS_EXECLISTS(gt->i915))
+ } else if (HAS_EXECLISTS(gt->i915)) {
+ gt->submission_method = INTEL_SUBMISSION_ELSP;
setup = intel_execlists_submission_setup;
- else
+ } else {
+ gt->submission_method = INTEL_SUBMISSION_RING;
setup = intel_ring_submission_setup;
+ }
for_each_engine(engine, gt, id) {
err = engine_setup_common(engine);
@@ -1479,7 +1490,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
}
- if (intel_engine_in_guc_submission_mode(engine)) {
+ if (intel_engine_uses_guc(engine)) {
/* nothing to print yet */
} else if (HAS_EXECLISTS(dev_priv)) {
struct i915_request * const *port, *rq;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 7c9af86fdb1e..47f4397095e5 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -23,7 +23,7 @@ static void dbg_poison_ce(struct intel_context *ce)
if (ce->state) {
struct drm_i915_gem_object *obj = ce->state->obj;
- int type = i915_coherent_map_type(ce->engine->i915);
+ int type = i915_coherent_map_type(ce->engine->i915, obj, true);
void *map;
if (!i915_gem_object_trylock(obj))
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 883bafc44902..9ef349cd5cea 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -402,6 +402,7 @@ struct intel_engine_cs {
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
void (*irq_enable)(struct intel_engine_cs *engine);
void (*irq_disable)(struct intel_engine_cs *engine);
+ void (*irq_handler)(struct intel_engine_cs *engine, u16 iir);
void (*sanitize)(struct intel_engine_cs *engine);
int (*resume)(struct intel_engine_cs *engine);
@@ -481,10 +482,9 @@ struct intel_engine_cs {
#define I915_ENGINE_HAS_PREEMPTION BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
#define I915_ENGINE_HAS_TIMESLICES BIT(4)
-#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(5)
-#define I915_ENGINE_IS_VIRTUAL BIT(6)
-#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(7)
-#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(8)
+#define I915_ENGINE_IS_VIRTUAL BIT(5)
+#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
+#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
unsigned int flags;
/*
@@ -594,12 +594,6 @@ intel_engine_has_timeslices(const struct intel_engine_cs *engine)
}
static inline bool
-intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
-{
- return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
-}
-
-static inline bool
intel_engine_is_virtual(const struct intel_engine_cs *engine)
{
return engine->flags & I915_ENGINE_IS_VIRTUAL;
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index de124870af44..8db200422950 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -118,6 +118,7 @@
#include "intel_engine_stats.h"
#include "intel_execlists_submission.h"
#include "intel_gt.h"
+#include "intel_gt_irq.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_lrc.h"
@@ -1768,7 +1769,6 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
*/
GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
!reset_in_progress(execlists));
- GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
/*
* Note that csb_write, csb_status may be either in HWSP or mmio.
@@ -2385,6 +2385,45 @@ static void execlists_submission_tasklet(struct tasklet_struct *t)
rcu_read_unlock();
}
+static void execlists_irq_handler(struct intel_engine_cs *engine, u16 iir)
+{
+ bool tasklet = false;
+
+ if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
+ u32 eir;
+
+ /* Upper 16b are the enabling mask, rsvd for internal errors */
+ eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0);
+ ENGINE_TRACE(engine, "CS error: %x\n", eir);
+
+ /* Disable the error interrupt until after the reset */
+ if (likely(eir)) {
+ ENGINE_WRITE(engine, RING_EMR, ~0u);
+ ENGINE_WRITE(engine, RING_EIR, eir);
+ WRITE_ONCE(engine->execlists.error_interrupt, eir);
+ tasklet = true;
+ }
+ }
+
+ if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) {
+ WRITE_ONCE(engine->execlists.yield,
+ ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI));
+ ENGINE_TRACE(engine, "semaphore yield: %08x\n",
+ engine->execlists.yield);
+ if (del_timer(&engine->execlists.timer))
+ tasklet = true;
+ }
+
+ if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
+ tasklet = true;
+
+ if (iir & GT_RENDER_USER_INTERRUPT)
+ intel_engine_signal_breadcrumbs(engine);
+
+ if (tasklet)
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+}
+
static void __execlists_kick(struct intel_engine_execlists *execlists)
{
/* Kick the tasklet for some interrupt coalescing and reset handling */
@@ -3076,29 +3115,6 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
engine->submit_request = execlists_submit_request;
engine->schedule = i915_schedule;
engine->execlists.tasklet.callback = execlists_submission_tasklet;
-
- engine->reset.prepare = execlists_reset_prepare;
- engine->reset.rewind = execlists_reset_rewind;
- engine->reset.cancel = execlists_reset_cancel;
- engine->reset.finish = execlists_reset_finish;
-
- engine->park = execlists_park;
- engine->unpark = NULL;
-
- engine->flags |= I915_ENGINE_SUPPORTS_STATS;
- if (!intel_vgpu_active(engine->i915)) {
- engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
- if (can_preempt(engine)) {
- engine->flags |= I915_ENGINE_HAS_PREEMPTION;
- if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
- engine->flags |= I915_ENGINE_HAS_TIMESLICES;
- }
- }
-
- if (intel_engine_has_preemption(engine))
- engine->emit_bb_start = gen8_emit_bb_start;
- else
- engine->emit_bb_start = gen8_emit_bb_start_noarb;
}
static void execlists_shutdown(struct intel_engine_cs *engine)
@@ -3129,6 +3145,14 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->cops = &execlists_context_ops;
engine->request_alloc = execlists_request_alloc;
+ engine->reset.prepare = execlists_reset_prepare;
+ engine->reset.rewind = execlists_reset_rewind;
+ engine->reset.cancel = execlists_reset_cancel;
+ engine->reset.finish = execlists_reset_finish;
+
+ engine->park = execlists_park;
+ engine->unpark = NULL;
+
engine->emit_flush = gen8_emit_flush_xcs;
engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
@@ -3149,6 +3173,22 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
* until a more refined solution exists.
*/
}
+ intel_engine_set_irq_handler(engine, execlists_irq_handler);
+
+ engine->flags |= I915_ENGINE_SUPPORTS_STATS;
+ if (!intel_vgpu_active(engine->i915)) {
+ engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+ if (can_preempt(engine)) {
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+ if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+ }
+ }
+
+ if (intel_engine_has_preemption(engine))
+ engine->emit_bb_start = gen8_emit_bb_start;
+ else
+ engine->emit_bb_start = gen8_emit_bb_start_noarb;
}
static void logical_ring_default_irqs(struct intel_engine_cs *engine)
@@ -3884,13 +3924,6 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
spin_unlock_irqrestore(&engine->active.lock, flags);
}
-bool
-intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine)
-{
- return engine->set_default_submission ==
- execlists_set_default_submission;
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_execlists.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
index fd61dae820e9..4ca9b475e252 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
@@ -43,7 +43,4 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
const struct intel_engine_cs *master,
const struct intel_engine_cs *sibling);
-bool
-intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
-
#endif /* __INTEL_EXECLISTS_SUBMISSION_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 38742bf33fa3..35069ca5d7de 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -658,7 +658,7 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
goto err_ppgtt;
i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
- err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
+ err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
i915_gem_object_unlock(ppgtt->vm.scratch[0]);
if (err)
goto err_stash;
@@ -907,9 +907,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
- /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
- if (intel_ggtt_update_needs_vtd_wa(i915) ||
- IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) {
+ /*
+ * Serialize GTT updates with aperture access on BXT if VT-d is on,
+ * and always on CHV.
+ */
+ if (intel_vm_no_concurrent_access_wa(i915)) {
ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
ggtt->vm.bind_async_flags =
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 8a322594210c..7bf84cd21543 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -653,8 +653,8 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
* banks of memory are paired and unswizzled on the
* uneven portion, so leave that as unknown.
*/
- if (intel_uncore_read16(uncore, C0DRB3) ==
- intel_uncore_read16(uncore, C1DRB3)) {
+ if (intel_uncore_read16(uncore, C0DRB3_BW) ==
+ intel_uncore_read16(uncore, C1DRB3_BW)) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
@@ -867,7 +867,7 @@ void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
for (i = 0; i < num_fences; i++) {
struct i915_fence_reg *fence = &ggtt->fence_regs[i];
- i915_active_init(&fence->active, NULL, NULL);
+ i915_active_init(&fence->active, NULL, NULL, 0);
fence->ggtt = ggtt;
fence->id = i;
list_add_tail(&fence->link, &ggtt->fence_list);
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 14e2ffb6c0e5..2694dbb9967e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT*/
/*
- * Copyright � 2003-2018 Intel Corporation
+ * Copyright © 2003-2018 Intel Corporation
*/
#ifndef _INTEL_GPU_COMMANDS_H_
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index c59468107598..aa0a59c5b614 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -98,7 +98,6 @@ static void pool_free_work(struct work_struct *wrk)
round_jiffies_up_relative(HZ));
}
-__i915_active_call
static void pool_retire(struct i915_active *ref)
{
struct intel_gt_buffer_pool_node *node =
@@ -154,7 +153,7 @@ node_create(struct intel_gt_buffer_pool *pool, size_t sz,
node->age = 0;
node->pool = pool;
node->pinned = false;
- i915_active_init(&node->active, NULL, pool_retire);
+ i915_active_init(&node->active, NULL, pool_retire, 0);
obj = i915_gem_object_create_internal(gt->i915, sz);
if (IS_ERR(obj)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 9fc6c912a4e5..d29126c458ba 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -20,48 +20,6 @@ static void guc_irq_handler(struct intel_guc *guc, u16 iir)
intel_guc_to_host_event_handler(guc);
}
-static void
-cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
-{
- bool tasklet = false;
-
- if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
- u32 eir;
-
- /* Upper 16b are the enabling mask, rsvd for internal errors */
- eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0);
- ENGINE_TRACE(engine, "CS error: %x\n", eir);
-
- /* Disable the error interrupt until after the reset */
- if (likely(eir)) {
- ENGINE_WRITE(engine, RING_EMR, ~0u);
- ENGINE_WRITE(engine, RING_EIR, eir);
- WRITE_ONCE(engine->execlists.error_interrupt, eir);
- tasklet = true;
- }
- }
-
- if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) {
- WRITE_ONCE(engine->execlists.yield,
- ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI));
- ENGINE_TRACE(engine, "semaphore yield: %08x\n",
- engine->execlists.yield);
- if (del_timer(&engine->execlists.timer))
- tasklet = true;
- }
-
- if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
- tasklet = true;
-
- if (iir & GT_RENDER_USER_INTERRUPT) {
- intel_engine_signal_breadcrumbs(engine);
- tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
- }
-
- if (tasklet)
- tasklet_hi_schedule(&engine->execlists.tasklet);
-}
-
static u32
gen11_gt_engine_identity(struct intel_gt *gt,
const unsigned int bank, const unsigned int bit)
@@ -122,7 +80,7 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
engine = NULL;
if (likely(engine))
- return cs_irq_handler(engine, iir);
+ return intel_engine_cs_irq(engine, iir);
WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
class, instance);
@@ -275,9 +233,12 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]);
+ intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
+ gt_iir);
+
if (gt_iir & ILK_BSD_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+ intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
+ gt_iir);
}
static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
@@ -301,11 +262,16 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]);
+ intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
+ gt_iir);
+
if (gt_iir & GT_BSD_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+ intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
+ gt_iir >> 12);
+
if (gt_iir & GT_BLT_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(gt->engine_class[COPY_ENGINE_CLASS][0]);
+ intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
+ gt_iir >> 22);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
@@ -324,10 +290,10 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
iir = raw_reg_read(regs, GEN8_GT_IIR(0));
if (likely(iir)) {
- cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
- iir >> GEN8_RCS_IRQ_SHIFT);
- cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
- iir >> GEN8_BCS_IRQ_SHIFT);
+ intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
+ iir >> GEN8_RCS_IRQ_SHIFT);
+ intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
+ iir >> GEN8_BCS_IRQ_SHIFT);
raw_reg_write(regs, GEN8_GT_IIR(0), iir);
}
}
@@ -335,10 +301,10 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
iir = raw_reg_read(regs, GEN8_GT_IIR(1));
if (likely(iir)) {
- cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0],
- iir >> GEN8_VCS0_IRQ_SHIFT);
- cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1],
- iir >> GEN8_VCS1_IRQ_SHIFT);
+ intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
+ iir >> GEN8_VCS0_IRQ_SHIFT);
+ intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][1],
+ iir >> GEN8_VCS1_IRQ_SHIFT);
raw_reg_write(regs, GEN8_GT_IIR(1), iir);
}
}
@@ -346,8 +312,8 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
if (master_ctl & GEN8_GT_VECS_IRQ) {
iir = raw_reg_read(regs, GEN8_GT_IIR(3));
if (likely(iir)) {
- cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
- iir >> GEN8_VECS_IRQ_SHIFT);
+ intel_engine_cs_irq(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
+ iir >> GEN8_VECS_IRQ_SHIFT);
raw_reg_write(regs, GEN8_GT_IIR(3), iir);
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
index f667e976fb2b..41cad38668c5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+#include "intel_engine_types.h"
+
struct intel_gt;
#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
@@ -39,4 +41,25 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl);
void gen8_gt_irq_reset(struct intel_gt *gt);
void gen8_gt_irq_postinstall(struct intel_gt *gt);
+static inline void intel_engine_cs_irq(struct intel_engine_cs *engine, u16 iir)
+{
+ if (iir)
+ engine->irq_handler(engine, iir);
+}
+
+static inline void
+intel_engine_set_irq_handler(struct intel_engine_cs *engine,
+ void (*fn)(struct intel_engine_cs *engine,
+ u16 iir))
+{
+ /*
+ * As the interrupt is live as allocate and setup the engines,
+ * err on the side of caution and apply barriers to updating
+ * the irq handler callback. This assures that when we do use
+ * the engine, we will receive interrupts only to ourselves,
+ * and not lose any.
+ */
+ smp_store_mb(engine->irq_handler, fn);
+}
+
#endif /* INTEL_GT_IRQ_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 0caf6ca0a784..fecfacf551d5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -31,6 +31,12 @@ struct i915_ggtt;
struct intel_engine_cs;
struct intel_uncore;
+enum intel_submission_method {
+ INTEL_SUBMISSION_RING,
+ INTEL_SUBMISSION_ELSP,
+ INTEL_SUBMISSION_GUC,
+};
+
struct intel_gt {
struct drm_i915_private *i915;
struct intel_uncore *uncore;
@@ -118,6 +124,7 @@ struct intel_gt {
struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
[MAX_ENGINE_INSTANCE + 1];
+ enum intel_submission_method submission_method;
/*
* Default address space (either GGTT or ppGTT depending on arch).
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 941f8af016d6..9b98f9d9faa3 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -7,10 +7,26 @@
#include <linux/fault-inject.h>
+#include "gem/i915_gem_lmem.h"
#include "i915_trace.h"
#include "intel_gt.h"
#include "intel_gtt.h"
+struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_create_lmem(vm->i915, sz, 0);
+ /*
+ * Ensure all paging structures for this vm share the same dma-resv
+ * object underneath, with the idea that one object_lock() will lock
+ * them all at once.
+ */
+ if (!IS_ERR(obj))
+ obj->base.resv = &vm->resv;
+ return obj;
+}
+
struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
{
struct drm_i915_gem_object *obj;
@@ -19,33 +35,39 @@ struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
i915_gem_shrink_all(vm->i915);
obj = i915_gem_object_create_internal(vm->i915, sz);
- /* ensure all dma objects have the same reservation class */
+ /*
+ * Ensure all paging structures for this vm share the same dma-resv
+ * object underneath, with the idea that one object_lock() will lock
+ * them all at once.
+ */
if (!IS_ERR(obj))
obj->base.resv = &vm->resv;
return obj;
}
-int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
+int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
- int err;
+ enum i915_map_type type;
+ void *vaddr;
- i915_gem_object_lock(obj, NULL);
- err = i915_gem_object_pin_pages(obj);
- i915_gem_object_unlock(obj);
- if (err)
- return err;
+ type = i915_coherent_map_type(vm->i915, obj, true);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, type);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
i915_gem_object_make_unshrinkable(obj);
return 0;
}
-int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
+int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
- int err;
+ enum i915_map_type type;
+ void *vaddr;
- err = i915_gem_object_pin_pages(obj);
- if (err)
- return err;
+ type = i915_coherent_map_type(vm->i915, obj, true);
+ vaddr = i915_gem_object_pin_map(obj, type);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
i915_gem_object_make_unshrinkable(obj);
return 0;
@@ -132,7 +154,22 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
*/
mutex_init(&vm->mutex);
lockdep_set_subclass(&vm->mutex, subclass);
- i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
+
+ if (!intel_vm_no_concurrent_access_wa(vm->i915)) {
+ i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
+ } else {
+ /*
+ * CHV + BXT VTD workaround use stop_machine(),
+ * which is allowed to allocate memory. This means &vm->mutex
+ * is the outer lock, and in theory we can allocate memory inside
+ * it through stop_machine().
+ *
+ * Add the annotation for this, we use trylock in shrinker.
+ */
+ mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_);
+ might_alloc(GFP_KERNEL);
+ mutex_release(&vm->mutex.dep_map, _THIS_IP_);
+ }
dma_resv_init(&vm->resv);
GEM_BUG_ON(!vm->total);
@@ -155,6 +192,14 @@ void clear_pages(struct i915_vma *vma)
memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
}
+void *__px_vaddr(struct drm_i915_gem_object *p)
+{
+ enum i915_map_type type;
+
+ GEM_BUG_ON(!i915_gem_object_has_pages(p));
+ return page_unpack_bits(p->mm.mapping, &type);
+}
+
dma_addr_t __px_dma(struct drm_i915_gem_object *p)
{
GEM_BUG_ON(!i915_gem_object_has_pages(p));
@@ -170,32 +215,22 @@ struct page *__px_page(struct drm_i915_gem_object *p)
void
fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
{
- struct page *page = __px_page(p);
- void *vaddr;
+ void *vaddr = __px_vaddr(p);
- vaddr = kmap(page);
memset64(vaddr, val, count);
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap(page);
}
static void poison_scratch_page(struct drm_i915_gem_object *scratch)
{
- struct sgt_iter sgt;
- struct page *page;
+ void *vaddr = __px_vaddr(scratch);
u8 val;
val = 0;
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
val = POISON_FREE;
- for_each_sgt_page(page, sgt, scratch->mm.pages) {
- void *vaddr;
-
- vaddr = kmap(page);
- memset(vaddr, val, PAGE_SIZE);
- kunmap(page);
- }
+ memset(vaddr, val, scratch->base.size);
}
int setup_scratch_page(struct i915_address_space *vm)
@@ -225,7 +260,7 @@ int setup_scratch_page(struct i915_address_space *vm)
if (IS_ERR(obj))
goto skip;
- if (pin_pt_dma(vm, obj))
+ if (map_pt_dma(vm, obj))
goto skip_obj;
/* We need a single contiguous page for our scratch */
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 441644f2506a..50a98ce39f74 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -180,6 +180,9 @@ struct page *__px_page(struct drm_i915_gem_object *p);
dma_addr_t __px_dma(struct drm_i915_gem_object *p);
#define px_dma(px) (__px_dma(px_base(px)))
+void *__px_vaddr(struct drm_i915_gem_object *p);
+#define px_vaddr(px) (__px_vaddr(px_base(px)))
+
#define px_pt(px) \
__px_choose_expr(px, struct i915_page_table *, __x, \
__px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
@@ -517,8 +520,6 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);
-#define kmap_atomic_px(px) kmap_atomic(__px_page(px_base(px)))
-
void
fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
@@ -532,12 +533,13 @@ int setup_scratch_page(struct i915_address_space *vm);
void free_scratch(struct i915_address_space *vm);
struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz);
+struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz);
struct i915_page_table *alloc_pt(struct i915_address_space *vm);
struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
struct i915_page_directory *__alloc_pd(int npde);
-int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
-int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
+int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
+int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
void free_px(struct i915_address_space *vm,
struct i915_page_table *pt, int lvl);
@@ -584,7 +586,7 @@ void setup_private_pat(struct intel_uncore *uncore);
int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
u64 size);
-int i915_vm_pin_pt_stash(struct i915_address_space *vm,
+int i915_vm_map_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash);
void i915_vm_free_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index e86897cde984..aafe2a4df496 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -903,7 +903,9 @@ lrc_pre_pin(struct intel_context *ce,
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
*vaddr = i915_gem_object_pin_map(ce->state->obj,
- i915_coherent_map_type(ce->engine->i915) |
+ i915_coherent_map_type(ce->engine->i915,
+ ce->state->obj,
+ false) |
I915_MAP_OVERRIDE);
return PTR_ERR_OR_ZERO(*vaddr);
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 014ae8ac4480..4e3d80c2295c 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -87,11 +87,10 @@ write_dma_entry(struct drm_i915_gem_object * const pdma,
const unsigned short idx,
const u64 encoded_entry)
{
- u64 * const vaddr = kmap_atomic(__px_page(pdma));
+ u64 * const vaddr = __px_vaddr(pdma);
vaddr[idx] = encoded_entry;
clflush_cache_range(&vaddr[idx], sizeof(u64));
- kunmap_atomic(vaddr);
}
void
@@ -258,7 +257,7 @@ int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
return 0;
}
-int i915_vm_pin_pt_stash(struct i915_address_space *vm,
+int i915_vm_map_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash)
{
struct i915_page_table *pt;
@@ -266,7 +265,7 @@ int i915_vm_pin_pt_stash(struct i915_address_space *vm,
for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
for (pt = stash->pt[n]; pt; pt = pt->stash) {
- err = pin_pt_dma_locked(vm, pt->base);
+ err = map_pt_dma_locked(vm, pt->base);
if (err)
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index a377c4588aaa..8091846d955b 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -338,15 +338,69 @@ static int gen6_reset_engines(struct intel_gt *gt,
return gen6_hw_domain_reset(gt, hw_mask);
}
-static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
+static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine)
+{
+ int vecs_id;
+
+ GEM_BUG_ON(engine->class != VIDEO_DECODE_CLASS);
+
+ vecs_id = _VECS((engine->instance) / 2);
+
+ return engine->gt->engine[vecs_id];
+}
+
+struct sfc_lock_data {
+ i915_reg_t lock_reg;
+ i915_reg_t ack_reg;
+ i915_reg_t usage_reg;
+ u32 lock_bit;
+ u32 ack_bit;
+ u32 usage_bit;
+ u32 reset_bit;
+};
+
+static void get_sfc_forced_lock_data(struct intel_engine_cs *engine,
+ struct sfc_lock_data *sfc_lock)
+{
+ switch (engine->class) {
+ default:
+ MISSING_CASE(engine->class);
+ fallthrough;
+ case VIDEO_DECODE_CLASS:
+ sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine);
+ sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
+
+ sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine);
+ sfc_lock->ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
+
+ sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine);
+ sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT;
+ sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
+
+ break;
+ case VIDEO_ENHANCEMENT_CLASS:
+ sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine);
+ sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
+
+ sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine);
+ sfc_lock->ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
+
+ sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine);
+ sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT;
+ sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
+
+ break;
+ }
+}
+
+static int gen11_lock_sfc(struct intel_engine_cs *engine,
+ u32 *reset_mask,
+ u32 *unlock_mask)
{
struct intel_uncore *uncore = engine->uncore;
u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
- i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
- u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
- i915_reg_t sfc_usage;
- u32 sfc_usage_bit;
- u32 sfc_reset_bit;
+ struct sfc_lock_data sfc_lock;
+ bool lock_obtained, lock_to_other = false;
int ret;
switch (engine->class) {
@@ -354,53 +408,72 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
return 0;
- sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
- sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
-
- sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
- sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
+ fallthrough;
+ case VIDEO_ENHANCEMENT_CLASS:
+ get_sfc_forced_lock_data(engine, &sfc_lock);
- sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
- sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
- sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
break;
+ default:
+ return 0;
+ }
- case VIDEO_ENHANCEMENT_CLASS:
- sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
- sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
+ if (!(intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & sfc_lock.usage_bit)) {
+ struct intel_engine_cs *paired_vecs;
- sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
- sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
+ if (engine->class != VIDEO_DECODE_CLASS ||
+ !IS_GEN(engine->i915, 12))
+ return 0;
- sfc_usage = GEN11_VECS_SFC_USAGE(engine);
- sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
- sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
- break;
+ /*
+ * Wa_14010733141
+ *
+ * If the VCS-MFX isn't using the SFC, we also need to check
+ * whether VCS-HCP is using it. If so, we need to issue a *VE*
+ * forced lock on the VE engine that shares the same SFC.
+ */
+ if (!(intel_uncore_read_fw(uncore,
+ GEN12_HCP_SFC_LOCK_STATUS(engine)) &
+ GEN12_HCP_SFC_USAGE_BIT))
+ return 0;
- default:
- return 0;
+ paired_vecs = find_sfc_paired_vecs_engine(engine);
+ get_sfc_forced_lock_data(paired_vecs, &sfc_lock);
+ lock_to_other = true;
+ *unlock_mask |= paired_vecs->mask;
+ } else {
+ *unlock_mask |= engine->mask;
}
/*
- * If the engine is using a SFC, tell the engine that a software reset
+ * If the engine is using an SFC, tell the engine that a software reset
* is going to happen. The engine will then try to force lock the SFC.
* If SFC ends up being locked to the engine we want to reset, we have
* to reset it as well (we will unlock it once the reset sequence is
* completed).
*/
- if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
- return 0;
-
- rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
+ rmw_set_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
ret = __intel_wait_for_register_fw(uncore,
- sfc_forced_lock_ack,
- sfc_forced_lock_ack_bit,
- sfc_forced_lock_ack_bit,
+ sfc_lock.ack_reg,
+ sfc_lock.ack_bit,
+ sfc_lock.ack_bit,
1000, 0, NULL);
- /* Was the SFC released while we were trying to lock it? */
- if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
+ /*
+ * Was the SFC released while we were trying to lock it?
+ *
+ * We should reset both the engine and the SFC if:
+ * - We were locking the SFC to this engine and the lock succeeded
+ * OR
+ * - We were locking the SFC to a different engine (Wa_14010733141)
+ * but the SFC was released before the lock was obtained.
+ *
+ * Otherwise we need only reset the engine by itself and we can
+ * leave the SFC alone.
+ */
+ lock_obtained = (intel_uncore_read_fw(uncore, sfc_lock.usage_reg) &
+ sfc_lock.usage_bit) != 0;
+ if (lock_obtained == lock_to_other)
return 0;
if (ret) {
@@ -408,7 +481,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
return ret;
}
- *hw_mask |= sfc_reset_bit;
+ *reset_mask |= sfc_lock.reset_bit;
return 0;
}
@@ -416,28 +489,19 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine)
{
struct intel_uncore *uncore = engine->uncore;
u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
- i915_reg_t sfc_forced_lock;
- u32 sfc_forced_lock_bit;
-
- switch (engine->class) {
- case VIDEO_DECODE_CLASS:
- if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
- return;
+ struct sfc_lock_data sfc_lock = {};
- sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
- sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
- break;
-
- case VIDEO_ENHANCEMENT_CLASS:
- sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
- sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
- break;
+ if (engine->class != VIDEO_DECODE_CLASS &&
+ engine->class != VIDEO_ENHANCEMENT_CLASS)
+ return;
- default:
+ if (engine->class == VIDEO_DECODE_CLASS &&
+ (BIT(engine->instance) & vdbox_sfc_access) == 0)
return;
- }
- rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
+ get_sfc_forced_lock_data(engine, &sfc_lock);
+
+ rmw_clear_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
}
static int gen11_reset_engines(struct intel_gt *gt,
@@ -456,23 +520,23 @@ static int gen11_reset_engines(struct intel_gt *gt,
};
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
- u32 hw_mask;
+ u32 reset_mask, unlock_mask = 0;
int ret;
if (engine_mask == ALL_ENGINES) {
- hw_mask = GEN11_GRDOM_FULL;
+ reset_mask = GEN11_GRDOM_FULL;
} else {
- hw_mask = 0;
+ reset_mask = 0;
for_each_engine_masked(engine, gt, engine_mask, tmp) {
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
- hw_mask |= hw_engine_mask[engine->id];
- ret = gen11_lock_sfc(engine, &hw_mask);
+ reset_mask |= hw_engine_mask[engine->id];
+ ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask);
if (ret)
goto sfc_unlock;
}
}
- ret = gen6_hw_domain_reset(gt, hw_mask);
+ ret = gen6_hw_domain_reset(gt, reset_mask);
sfc_unlock:
/*
@@ -480,10 +544,14 @@ sfc_unlock:
* gen11_lock_sfc to make sure that we clean properly if something
* wrong happened during the lock (e.g. lock acquired after timeout
* expiration).
+ *
+ * Due to Wa_14010733141, we may have locked an SFC to an engine that
+ * wasn't being reset. So instead of calling gen11_unlock_sfc()
+ * on engine_mask, we instead call it on the mask of engines that our
+ * gen11_lock_sfc() calls told us actually had locks attempted.
*/
- if (engine_mask != ALL_ENGINES)
- for_each_engine_masked(engine, gt, engine_mask, tmp)
- gen11_unlock_sfc(engine);
+ for_each_engine_masked(engine, gt, unlock_mask, tmp)
+ gen11_unlock_sfc(engine);
return ret;
}
@@ -1118,7 +1186,6 @@ static int intel_gt_reset_engine(struct intel_engine_cs *engine)
int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
{
struct intel_gt *gt = engine->gt;
- bool uses_guc = intel_engine_in_guc_submission_mode(engine);
int ret;
ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
@@ -1134,10 +1201,10 @@ int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
"Resetting %s for %s\n", engine->name, msg);
atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
- if (!uses_guc)
- ret = intel_gt_reset_engine(engine);
- else
+ if (intel_engine_uses_guc(engine))
ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
+ else
+ ret = intel_gt_reset_engine(engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
ENGINE_TRACE(engine, "Failed to reset, err: %d\n", ret);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index aee0a77c77e0..7c4d5158e03b 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -51,11 +51,14 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
if (unlikely(ret))
goto err_unpin;
- if (i915_vma_is_map_and_fenceable(vma))
+ if (i915_vma_is_map_and_fenceable(vma)) {
addr = (void __force *)i915_vma_pin_iomap(vma);
- else
- addr = i915_gem_object_pin_map(vma->obj,
- i915_coherent_map_type(vma->vm->i915));
+ } else {
+ int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false);
+
+ addr = i915_gem_object_pin_map(vma->obj, type);
+ }
+
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
goto err_ring;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 9585546556ee..2b6dffcc2262 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -12,6 +12,7 @@
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_gt.h"
+#include "intel_gt_irq.h"
#include "intel_reset.h"
#include "intel_ring.h"
#include "shmem_utils.h"
@@ -989,14 +990,10 @@ static void gen6_bsd_submit_request(struct i915_request *request)
static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = i9xx_submit_request;
-
- engine->park = NULL;
- engine->unpark = NULL;
}
static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
{
- i9xx_set_default_submission(engine);
engine->submit_request = gen6_bsd_submit_request;
}
@@ -1021,10 +1018,17 @@ static void ring_release(struct intel_engine_cs *engine)
intel_timeline_put(engine->legacy.timeline);
}
+static void irq_handler(struct intel_engine_cs *engine, u16 iir)
+{
+ intel_engine_signal_breadcrumbs(engine);
+}
+
static void setup_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
+ intel_engine_set_irq_handler(engine, irq_handler);
+
if (INTEL_GEN(i915) >= 6) {
engine->irq_enable = gen6_irq_enable;
engine->irq_disable = gen6_irq_disable;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 405d814e9040..97cab1b99871 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1774,7 +1774,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
return;
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(gt->engine[VECS0]);
+ intel_engine_cs_irq(gt->engine[VECS0], pm_iir >> 10);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index f19cf6d2fa85..c4a126c8caef 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -32,7 +32,6 @@ static struct i915_vma *hwsp_alloc(struct intel_gt *gt)
return vma;
}
-__i915_active_call
static void __timeline_retire(struct i915_active *active)
{
struct intel_timeline *tl =
@@ -104,7 +103,8 @@ static int intel_timeline_init(struct intel_timeline *timeline,
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
- i915_active_init(&timeline->active, __timeline_active, __timeline_retire);
+ i915_active_init(&timeline->active, __timeline_active,
+ __timeline_retire, 0);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 2c6f7217469f..62cb9ee5bfc3 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -607,9 +607,38 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
}
+/*
+ * These settings aren't actually workarounds, but general tuning settings that
+ * need to be programmed on several platforms.
+ */
+static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ /*
+ * Although some platforms refer to it as Wa_1604555607, we need to
+ * program it even on those that don't explicitly list that
+ * workaround.
+ *
+ * Note that the programming of this register is further modified
+ * according to the FF_MODE2 guidance given by Wa_1608008084:gen12.
+ * Wa_1608008084 tells us the FF_MODE2 register will return the wrong
+ * value when read. The default value for this register is zero for all
+ * fields and there are no bit masks. So instead of doing a RMW we
+ * should just write TDS timer value. For the same reason read
+ * verification is ignored.
+ */
+ wa_add(wal,
+ FF_MODE2,
+ FF_MODE2_TDS_TIMER_MASK,
+ FF_MODE2_TDS_TIMER_128,
+ 0);
+}
+
static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
+ gen12_ctx_gt_tuning_init(engine, wal);
+
/*
* Wa_1409142259:tgl
* Wa_1409347922:tgl
@@ -628,27 +657,17 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
-}
-
-static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
- struct i915_wa_list *wal)
-{
- gen12_ctx_workarounds_init(engine, wal);
/*
- * Wa_1604555607:tgl,rkl
+ * Wa_16011163337
*
- * Note that the implementation of this workaround is further modified
- * according to the FF_MODE2 guidance given by Wa_1608008084:gen12.
- * FF_MODE2 register will return the wrong value when read. The default
- * value for this register is zero for all fields and there are no bit
- * masks. So instead of doing a RMW we should just write the GS Timer
- * and TDS timer values for Wa_1604555607 and Wa_16011163337.
+ * Like in gen12_ctx_gt_tuning_init(), read verification is ignored due
+ * to Wa_1608008084.
*/
wa_add(wal,
FF_MODE2,
- FF_MODE2_GS_TIMER_MASK | FF_MODE2_TDS_TIMER_MASK,
- FF_MODE2_GS_TIMER_224 | FF_MODE2_TDS_TIMER_128,
+ FF_MODE2_GS_TIMER_MASK,
+ FF_MODE2_GS_TIMER_224,
0);
}
@@ -664,16 +683,6 @@ static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
/* Wa_22010493298 */
wa_masked_en(wal, HIZ_CHICKEN,
DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
-
- /*
- * Wa_16011163337
- *
- * Like in tgl_ctx_workarounds_init(), read verification is ignored due
- * to Wa_1608008084.
- */
- wa_add(wal,
- FF_MODE2,
- FF_MODE2_GS_TIMER_MASK, FF_MODE2_GS_TIMER_224, 0);
}
static void
@@ -690,9 +699,6 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (IS_DG1(i915))
dg1_ctx_workarounds_init(engine, wal);
- else if (IS_ALDERLAKE_S(i915) || IS_ROCKETLAKE(i915) ||
- IS_TIGERLAKE(i915))
- tgl_ctx_workarounds_init(engine, wal);
else if (IS_GEN(i915, 12))
gen12_ctx_workarounds_init(engine, wal);
else if (IS_GEN(i915, 11))
@@ -1078,11 +1084,37 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
}
+/*
+ * Though there are per-engine instances of these registers,
+ * they retain their value through engine resets and should
+ * only be provided on the GT workaround list rather than
+ * the engine-specific workaround list.
+ */
+static void
+wa_14011060649(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+ struct intel_engine_cs *engine;
+ struct intel_gt *gt = &i915->gt;
+ int id;
+
+ for_each_engine(engine, gt, id) {
+ if (engine->class != VIDEO_DECODE_CLASS ||
+ (engine->instance % 2))
+ continue;
+
+ wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base),
+ IECPUNIT_CLKGATE_DIS);
+ }
+}
+
static void
gen12_gt_workarounds_init(struct drm_i915_private *i915,
struct i915_wa_list *wal)
{
wa_init_mcr(i915, wal);
+
+ /* Wa_14011060649:tgl,rkl,dg1,adls */
+ wa_14011060649(i915, wal);
}
static void
@@ -1755,11 +1787,10 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
- /* Wa_22010271021:ehl */
- if (IS_JSL_EHL(i915))
- wa_masked_en(wal,
- GEN9_CS_DEBUG_MODE1,
- FF_DOP_CLOCK_GATE_DISABLE);
+ /* Wa_22010271021 */
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
}
if (IS_GEN_RANGE(i915, 9, 12)) {
@@ -1828,9 +1859,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
CACHE_MODE_0_GEN7,
/* enable HiZ Raw Stall Optimization */
HIZ_RAW_STALL_OPT_DISABLE);
-
- /* WaDisable4x2SubspanOptimization:hsw */
- wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
}
if (IS_VALLEYVIEW(i915)) {
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index e1ba03b93ffa..32589c6625e1 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -55,7 +55,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
kfree(ring);
return NULL;
}
- i915_active_init(&ring->vma->active, NULL, NULL);
+ i915_active_init(&ring->vma->active, NULL, NULL, 0);
__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(ring->vma));
__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &ring->vma->node.flags);
ring->vma->node.size = sz;
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index b9bdd1d23243..26685b927169 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -88,7 +88,8 @@ static int __live_context_size(struct intel_engine_cs *engine)
goto err;
vaddr = i915_gem_object_pin_map_unlocked(ce->state->obj,
- i915_coherent_map_type(engine->i915));
+ i915_coherent_map_type(engine->i915,
+ ce->state->obj, false));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
intel_context_unpin(ce);
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index b2c369317bf1..4896e4ccad50 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -77,7 +77,7 @@ static struct pulse *pulse_create(void)
return p;
kref_init(&p->kref);
- i915_active_init(&p->active, pulse_active, pulse_retire);
+ i915_active_init(&p->active, pulse_active, pulse_retire, 0);
return p;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 1081cd36a2bd..1f93591a8c69 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -4716,7 +4716,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_virtual_reset),
};
- if (!HAS_EXECLISTS(i915))
+ if (i915->gt.submission_method != INTEL_SUBMISSION_ELSP)
return 0;
if (intel_gt_is_wedged(&i915->gt))
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 746985971c3a..5b63d4df8c93 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -69,7 +69,7 @@ static int hang_init(struct hang *h, struct intel_gt *gt)
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
vaddr = i915_gem_object_pin_map_unlocked(h->obj,
- i915_coherent_map_type(gt->i915));
+ i915_coherent_map_type(gt->i915, h->obj, false));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_unpin_hws;
@@ -130,7 +130,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
return ERR_CAST(obj);
}
- vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915));
+ vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915, obj, false));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
i915_vm_put(vm);
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 85e7df6a5123..d8f6623524e8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1221,7 +1221,9 @@ static int compare_isolation(struct intel_engine_cs *engine,
}
lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
- i915_coherent_map_type(engine->i915));
+ i915_coherent_map_type(engine->i915,
+ ce->state->obj,
+ false));
if (IS_ERR(lrc)) {
err = PTR_ERR(lrc);
goto err_B1;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index f097e420ac45..710f825f6e5a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -34,6 +34,7 @@ int live_rc6_manual(void *arg)
struct intel_rc6 *rc6 = &gt->rc6;
u64 rc0_power, rc6_power;
intel_wakeref_t wakeref;
+ bool has_power;
ktime_t dt;
u64 res[2];
int err = 0;
@@ -50,6 +51,7 @@ int live_rc6_manual(void *arg)
if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
return 0;
+ has_power = librapl_supported(gt->i915);
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
/* Force RC6 off for starters */
@@ -71,11 +73,14 @@ int live_rc6_manual(void *arg)
goto out_unlock;
}
- rc0_power = div64_u64(NSEC_PER_SEC * rc0_power, ktime_to_ns(dt));
- if (!rc0_power) {
- pr_err("No power measured while in RC0\n");
- err = -EINVAL;
- goto out_unlock;
+ if (has_power) {
+ rc0_power = div64_u64(NSEC_PER_SEC * rc0_power,
+ ktime_to_ns(dt));
+ if (!rc0_power) {
+ pr_err("No power measured while in RC0\n");
+ err = -EINVAL;
+ goto out_unlock;
+ }
}
/* Manually enter RC6 */
@@ -97,13 +102,16 @@ int live_rc6_manual(void *arg)
err = -EINVAL;
}
- rc6_power = div64_u64(NSEC_PER_SEC * rc6_power, ktime_to_ns(dt));
- pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n",
- rc0_power, rc6_power);
- if (2 * rc6_power > rc0_power) {
- pr_err("GPU leaked energy while in RC6!\n");
- err = -EINVAL;
- goto out_unlock;
+ if (has_power) {
+ rc6_power = div64_u64(NSEC_PER_SEC * rc6_power,
+ ktime_to_ns(dt));
+ pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n",
+ rc0_power, rc6_power);
+ if (2 * rc6_power > rc0_power) {
+ pr_err("GPU leaked energy while in RC6!\n");
+ err = -EINVAL;
+ goto out_unlock;
+ }
}
/* Restore what should have been the original state! */
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
index 99609271c3a7..c12e74171b63 100644
--- a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
@@ -291,7 +291,7 @@ int intel_ring_submission_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_ctx_switch_wa),
};
- if (HAS_EXECLISTS(i915))
+ if (i915->gt.submission_method > INTEL_SUBMISSION_RING)
return 0;
return intel_gt_live_subtests(tests, &i915->gt);
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index 967641fee42a..3ca1bd5793c3 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -606,7 +606,7 @@ int live_rps_frequency_cs(void *arg)
int err = 0;
/*
- * The premise is that the GPU does change freqency at our behest.
+ * The premise is that the GPU does change frequency at our behest.
* Let's check there is a correspondence between the requested
* frequency, the actual frequency, and the observed clock rate.
*/
@@ -747,7 +747,7 @@ int live_rps_frequency_srm(void *arg)
int err = 0;
/*
- * The premise is that the GPU does change freqency at our behest.
+ * The premise is that the GPU does change frequency at our behest.
* Let's check there is a correspondence between the requested
* frequency, the actual frequency, and the observed clock rate.
*/
@@ -1139,7 +1139,7 @@ int live_rps_power(void *arg)
if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6)
return 0;
- if (!librapl_energy_uJ())
+ if (!librapl_supported(gt->i915))
return 0;
if (igt_spinner_init(&spin, gt))
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index f8f02aab842b..0683b27a3890 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -8,6 +8,7 @@
#include <linux/shmem_fs.h>
#include "gem/i915_gem_object.h"
+#include "gem/i915_gem_lmem.h"
#include "shmem_utils.h"
struct file *shmem_create_from_data(const char *name, void *data, size_t len)
@@ -39,7 +40,8 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
return file;
}
- ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
+ ptr = i915_gem_object_pin_map_unlocked(obj, i915_gem_object_is_lmem(obj) ?
+ I915_MAP_WC : I915_MAP_WB);
if (IS_ERR(ptr))
return ERR_CAST(ptr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 78305b2ec89d..adae04c47aab 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -682,7 +682,9 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
if (IS_ERR(vma))
return PTR_ERR(vma);
- vaddr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
+ i915_coherent_map_type(guc_to_gt(guc)->i915,
+ vma->obj, true));
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
return PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 92688a9b6717..335719f17490 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -11,6 +11,7 @@
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_lrc.h"
#include "gt/intel_mocs.h"
@@ -264,6 +265,14 @@ static void guc_submission_tasklet(struct tasklet_struct *t)
spin_unlock_irqrestore(&engine->active.lock, flags);
}
+static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir)
+{
+ if (iir & GT_RENDER_USER_INTERRUPT) {
+ intel_engine_signal_breadcrumbs(engine);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ }
+}
+
static void guc_reset_prepare(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -608,35 +617,6 @@ static int guc_resume(struct intel_engine_cs *engine)
static void guc_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = guc_submit_request;
- engine->schedule = i915_schedule;
- engine->execlists.tasklet.callback = guc_submission_tasklet;
-
- engine->reset.prepare = guc_reset_prepare;
- engine->reset.rewind = guc_reset_rewind;
- engine->reset.cancel = guc_reset_cancel;
- engine->reset.finish = guc_reset_finish;
-
- engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
- engine->flags |= I915_ENGINE_HAS_PREEMPTION;
-
- /*
- * TODO: GuC supports timeslicing and semaphores as well, but they're
- * handled by the firmware so some minor tweaks are required before
- * enabling.
- *
- * engine->flags |= I915_ENGINE_HAS_TIMESLICES;
- * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
- */
-
- engine->emit_bb_start = gen8_emit_bb_start;
-
- /*
- * For the breadcrumb irq to work we need the interrupts to stay
- * enabled. However, on all platforms on which we'll have support for
- * GuC submission we don't allow disabling the interrupts at runtime, so
- * we're always safe with the current flow.
- */
- GEM_BUG_ON(engine->irq_enable || engine->irq_disable);
}
static void guc_release(struct intel_engine_cs *engine)
@@ -658,6 +638,13 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
engine->cops = &guc_context_ops;
engine->request_alloc = guc_request_alloc;
+ engine->schedule = i915_schedule;
+
+ engine->reset.prepare = guc_reset_prepare;
+ engine->reset.rewind = guc_reset_rewind;
+ engine->reset.cancel = guc_reset_cancel;
+ engine->reset.finish = guc_reset_finish;
+
engine->emit_flush = gen8_emit_flush_xcs;
engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
@@ -666,6 +653,19 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
engine->emit_flush = gen12_emit_flush_xcs;
}
engine->set_default_submission = guc_set_default_submission;
+
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+
+ /*
+ * TODO: GuC supports timeslicing and semaphores as well, but they're
+ * handled by the firmware so some minor tweaks are required before
+ * enabling.
+ *
+ * engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+ * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+ */
+
+ engine->emit_bb_start = gen8_emit_bb_start;
}
static void rcs_submission_override(struct intel_engine_cs *engine)
@@ -689,6 +689,7 @@ static void rcs_submission_override(struct intel_engine_cs *engine)
static inline void guc_default_irqs(struct intel_engine_cs *engine)
{
engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT;
+ intel_engine_set_irq_handler(engine, cs_irq_handler);
}
int intel_guc_submission_setup(struct intel_engine_cs *engine)
@@ -753,8 +754,3 @@ void intel_guc_submission_init_early(struct intel_guc *guc)
{
guc->submission_selected = __guc_submission_selected(guc);
}
-
-bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine)
-{
- return engine->set_default_submission == guc_set_default_submission;
-}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index 5f7b9e6347d0..3f7005018939 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -20,7 +20,6 @@ void intel_guc_submission_fini(struct intel_guc *guc);
int intel_guc_preempt_work_create(struct intel_guc *guc);
void intel_guc_preempt_work_destroy(struct intel_guc *guc);
int intel_guc_submission_setup(struct intel_engine_cs *engine);
-bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine);
static inline bool intel_guc_submission_is_supported(struct intel_guc *guc)
{
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 2126dd81ac38..56d2144dc6a0 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -82,7 +82,9 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc)
if (IS_ERR(vma))
return PTR_ERR(vma);
- vaddr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
+ i915_coherent_map_type(gt->i915,
+ vma->obj, true));
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
return PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index e7c2babcee8b..cbac409f6c8a 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -46,118 +46,6 @@ static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
-static struct intel_vgpu_type *
-intel_gvt_find_vgpu_type(struct intel_gvt *gvt, unsigned int type_group_id)
-{
- if (WARN_ON(type_group_id >= gvt->num_types))
- return NULL;
- return &gvt->types[type_group_id];
-}
-
-static ssize_t available_instances_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr,
- char *buf)
-{
- struct intel_vgpu_type *type;
- unsigned int num = 0;
- void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype));
- if (!type)
- num = 0;
- else
- num = type->avail_instance;
-
- return sprintf(buf, "%u\n", num);
-}
-
-static ssize_t device_api_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
-}
-
-static ssize_t description_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- struct intel_vgpu_type *type;
- void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype));
- if (!type)
- return 0;
-
- return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
- "fence: %d\nresolution: %s\n"
- "weight: %d\n",
- BYTES_TO_MB(type->low_gm_size),
- BYTES_TO_MB(type->high_gm_size),
- type->fence, vgpu_edid_str(type->resolution),
- type->weight);
-}
-
-static MDEV_TYPE_ATTR_RO(available_instances);
-static MDEV_TYPE_ATTR_RO(device_api);
-static MDEV_TYPE_ATTR_RO(description);
-
-static struct attribute *gvt_type_attrs[] = {
- &mdev_type_attr_available_instances.attr,
- &mdev_type_attr_device_api.attr,
- &mdev_type_attr_description.attr,
- NULL,
-};
-
-static struct attribute_group *gvt_vgpu_type_groups[] = {
- [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
-};
-
-static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups)
-{
- *intel_vgpu_type_groups = gvt_vgpu_type_groups;
- return true;
-}
-
-static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i, j;
- struct intel_vgpu_type *type;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- type = &gvt->types[i];
-
- group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
- if (WARN_ON(!group))
- goto unwind;
-
- group->name = type->name;
- group->attrs = gvt_type_attrs;
- gvt_vgpu_type_groups[i] = group;
- }
-
- return 0;
-
-unwind:
- for (j = 0; j < i; j++) {
- group = gvt_vgpu_type_groups[j];
- kfree(group);
- }
-
- return -ENOMEM;
-}
-
-static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- group = gvt_vgpu_type_groups[i];
- gvt_vgpu_type_groups[i] = NULL;
- kfree(group);
- }
-}
-
static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
@@ -169,8 +57,6 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_reset = intel_gvt_reset_vgpu,
.vgpu_activate = intel_gvt_activate_vgpu,
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
- .gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
- .get_gvt_attrs = intel_get_gvt_attrs,
.vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
.write_protect_handler = intel_vgpu_page_track_handler,
@@ -274,7 +160,6 @@ void intel_gvt_clean_device(struct drm_i915_private *i915)
return;
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
- intel_gvt_cleanup_vgpu_type_groups(gvt);
intel_gvt_clean_vgpu_types(gvt);
intel_gvt_debugfs_clean(gvt);
@@ -363,12 +248,6 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
if (ret)
goto out_clean_thread;
- ret = intel_gvt_init_vgpu_type_groups(gvt);
- if (ret) {
- gvt_err("failed to init vgpu type groups: %d\n", ret);
- goto out_clean_types;
- }
-
vgpu = intel_gvt_create_idle_vgpu(gvt);
if (IS_ERR(vgpu)) {
ret = PTR_ERR(vgpu);
@@ -454,7 +333,8 @@ EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
void
intel_gvt_unregister_hypervisor(void)
{
- intel_gvt_hypervisor_host_exit(intel_gvt_host.dev);
+ void *gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
+ intel_gvt_hypervisor_host_exit(intel_gvt_host.dev, gvt);
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 88ab360fcb31..0c0615602343 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -574,9 +574,6 @@ struct intel_gvt_ops {
void (*vgpu_reset)(struct intel_vgpu *);
void (*vgpu_activate)(struct intel_vgpu *);
void (*vgpu_deactivate)(struct intel_vgpu *);
- struct intel_vgpu_type *(*gvt_find_vgpu_type)(
- struct intel_gvt *gvt, unsigned int type_group_id);
- bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups);
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index b79da5124f83..f33e3cbd0439 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -49,7 +49,7 @@ enum hypervisor_type {
struct intel_gvt_mpt {
enum hypervisor_type type;
int (*host_init)(struct device *dev, void *gvt, const void *ops);
- void (*host_exit)(struct device *dev);
+ void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(void *vgpu);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 65ff43cfc0f7..48b4d4cf805d 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -144,6 +144,104 @@ static inline bool handle_valid(unsigned long handle)
return !!(handle & ~0xff);
}
+static ssize_t available_instances_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr,
+ char *buf)
+{
+ struct intel_vgpu_type *type;
+ unsigned int num = 0;
+ struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
+
+ type = &gvt->types[mtype_get_type_group_id(mtype)];
+ if (!type)
+ num = 0;
+ else
+ num = type->avail_instance;
+
+ return sprintf(buf, "%u\n", num);
+}
+
+static ssize_t device_api_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
+}
+
+static ssize_t description_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, char *buf)
+{
+ struct intel_vgpu_type *type;
+ struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
+
+ type = &gvt->types[mtype_get_type_group_id(mtype)];
+ if (!type)
+ return 0;
+
+ return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
+ "fence: %d\nresolution: %s\n"
+ "weight: %d\n",
+ BYTES_TO_MB(type->low_gm_size),
+ BYTES_TO_MB(type->high_gm_size),
+ type->fence, vgpu_edid_str(type->resolution),
+ type->weight);
+}
+
+static MDEV_TYPE_ATTR_RO(available_instances);
+static MDEV_TYPE_ATTR_RO(device_api);
+static MDEV_TYPE_ATTR_RO(description);
+
+static struct attribute *gvt_type_attrs[] = {
+ &mdev_type_attr_available_instances.attr,
+ &mdev_type_attr_device_api.attr,
+ &mdev_type_attr_description.attr,
+ NULL,
+};
+
+static struct attribute_group *gvt_vgpu_type_groups[] = {
+ [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
+};
+
+static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i, j;
+ struct intel_vgpu_type *type;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ type = &gvt->types[i];
+
+ group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+ if (!group)
+ goto unwind;
+
+ group->name = type->name;
+ group->attrs = gvt_type_attrs;
+ gvt_vgpu_type_groups[i] = group;
+ }
+
+ return 0;
+
+unwind:
+ for (j = 0; j < i; j++) {
+ group = gvt_vgpu_type_groups[j];
+ kfree(group);
+ }
+
+ return -ENOMEM;
+}
+
+static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ group = gvt_vgpu_type_groups[i];
+ gvt_vgpu_type_groups[i] = NULL;
+ kfree(group);
+ }
+}
+
static int kvmgt_guest_init(struct mdev_device *mdev);
static void intel_vgpu_release_work(struct work_struct *work);
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
@@ -694,14 +792,13 @@ static int intel_vgpu_create(struct mdev_device *mdev)
struct intel_vgpu *vgpu = NULL;
struct intel_vgpu_type *type;
struct device *pdev;
- void *gvt;
+ struct intel_gvt *gvt;
int ret;
pdev = mdev_parent_dev(mdev);
gvt = kdev_to_i915(pdev)->gvt;
- type = intel_gvt_ops->gvt_find_vgpu_type(gvt,
- mdev_get_type_group_id(mdev));
+ type = &gvt->types[mdev_get_type_group_id(mdev)];
if (!type) {
ret = -EINVAL;
goto out;
@@ -1667,19 +1764,26 @@ static struct mdev_parent_ops intel_vgpu_ops = {
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
{
- struct attribute_group **kvm_vgpu_type_groups;
+ int ret;
+
+ ret = intel_gvt_init_vgpu_type_groups((struct intel_gvt *)gvt);
+ if (ret)
+ return ret;
intel_gvt_ops = ops;
- if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups))
- return -EFAULT;
- intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
+ intel_vgpu_ops.supported_type_groups = gvt_vgpu_type_groups;
- return mdev_register_device(dev, &intel_vgpu_ops);
+ ret = mdev_register_device(dev, &intel_vgpu_ops);
+ if (ret)
+ intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
+
+ return ret;
}
-static void kvmgt_host_exit(struct device *dev)
+static void kvmgt_host_exit(struct device *dev, void *gvt)
{
mdev_unregister_device(dev);
+ intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
}
static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 550a456e936f..e6c5a792a49a 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -63,13 +63,13 @@ static inline int intel_gvt_hypervisor_host_init(struct device *dev,
/**
* intel_gvt_hypervisor_host_exit - exit GVT-g host side
*/
-static inline void intel_gvt_hypervisor_host_exit(struct device *dev)
+static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt)
{
/* optional to provide */
if (!intel_gvt_host.mpt->host_exit)
return;
- intel_gvt_host.mpt->host_exit(dev);
+ intel_gvt_host.mpt->host_exit(dev, gvt);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index aa573b078ae7..b1aa1c482c32 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -343,18 +343,15 @@ out:
void __i915_active_init(struct i915_active *ref,
int (*active)(struct i915_active *ref),
void (*retire)(struct i915_active *ref),
+ unsigned long flags,
struct lock_class_key *mkey,
struct lock_class_key *wkey)
{
- unsigned long bits;
-
debug_active_init(ref);
- ref->flags = 0;
+ ref->flags = flags;
ref->active = active;
- ref->retire = ptr_unpack_bits(retire, &bits, 2);
- if (bits & I915_ACTIVE_MAY_SLEEP)
- ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
+ ref->retire = retire;
spin_lock_init(&ref->tree_lock);
ref->tree = RB_ROOT;
@@ -1156,8 +1153,7 @@ static int auto_active(struct i915_active *ref)
return 0;
}
-__i915_active_call static void
-auto_retire(struct i915_active *ref)
+static void auto_retire(struct i915_active *ref)
{
i915_active_put(ref);
}
@@ -1171,7 +1167,7 @@ struct i915_active *i915_active_create(void)
return NULL;
kref_init(&aa->ref);
- i915_active_init(&aa->base, auto_active, auto_retire);
+ i915_active_init(&aa->base, auto_active, auto_retire, 0);
return &aa->base;
}
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index fb165d3f01cf..d0feda68b874 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -152,15 +152,16 @@ i915_active_fence_isset(const struct i915_active_fence *active)
void __i915_active_init(struct i915_active *ref,
int (*active)(struct i915_active *ref),
void (*retire)(struct i915_active *ref),
+ unsigned long flags,
struct lock_class_key *mkey,
struct lock_class_key *wkey);
/* Specialise each class of i915_active to avoid impossible lockdep cycles. */
-#define i915_active_init(ref, active, retire) do { \
- static struct lock_class_key __mkey; \
- static struct lock_class_key __wkey; \
- \
- __i915_active_init(ref, active, retire, &__mkey, &__wkey); \
+#define i915_active_init(ref, active, retire, flags) do { \
+ static struct lock_class_key __mkey; \
+ static struct lock_class_key __wkey; \
+ \
+ __i915_active_init(ref, active, retire, flags, &__mkey, &__wkey); \
} while (0)
struct dma_fence *
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index 6360c3e4b765..c149f348a972 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -24,11 +24,6 @@ struct i915_active_fence {
struct active_node;
-#define I915_ACTIVE_MAY_SLEEP BIT(0)
-
-#define __i915_active_call __aligned(4)
-#define i915_active_may_sleep(fn) ptr_pack_bits(&(fn), I915_ACTIVE_MAY_SLEEP, 2)
-
struct i915_active {
atomic_t count;
struct mutex mutex;
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index c1167bc15964..3992c25a191d 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1369,6 +1369,20 @@ static int check_bbstart(u32 *cmd, u32 offset, u32 length,
return 0;
}
+/**
+ * intel_engine_cmd_parser_alloc_jump_whitelist() - preallocate jump whitelist for intel_engine_cmd_parser()
+ * @batch_length: length of the commands in batch_obj
+ * @trampoline: Whether jump trampolines are used.
+ *
+ * Preallocates a jump whitelist for parsing the cmd buffer in intel_engine_cmd_parser().
+ * This has to be preallocated, because the command parser runs in signaling context,
+ * and may not allocate any memory.
+ *
+ * Return: NULL or pointer to a jump whitelist, or ERR_PTR() on failure. Use
+ * IS_ERR() to check for errors. Must bre freed() with kfree().
+ *
+ * NULL is a valid value, meaning no allocation was required.
+ */
unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
bool trampoline)
{
@@ -1401,7 +1415,9 @@ unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
* @batch_offset: byte offset in the batch at which execution starts
* @batch_length: length of the commands in batch_obj
* @shadow: validated copy of the batch buffer in question
- * @trampoline: whether to emit a conditional trampoline at the end of the batch
+ * @jump_whitelist: buffer preallocated with intel_engine_cmd_parser_alloc_jump_whitelist()
+ * @shadow_map: mapping to @shadow vma
+ * @batch_map: mapping to @batch vma
*
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b5facdd5edec..cc745751ac53 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -633,9 +633,9 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_printf(m, "DDC2 = 0x%08x\n",
intel_uncore_read(uncore, DCC2));
seq_printf(m, "C0DRB3 = 0x%04x\n",
- intel_uncore_read16(uncore, C0DRB3));
+ intel_uncore_read16(uncore, C0DRB3_BW));
seq_printf(m, "C1DRB3 = 0x%04x\n",
- intel_uncore_read16(uncore, C1DRB3));
+ intel_uncore_read16(uncore, C1DRB3_BW));
} else if (INTEL_GEN(dev_priv) >= 6) {
seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
intel_uncore_read(uncore, MAD_DIMM_C0));
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6817c612eb78..d5b3c5ba6bd2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1727,6 +1727,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CREATE_EXT, i915_gem_create_ext_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index eb3d685cbd22..7b7918f72c41 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -51,7 +51,6 @@
#include <linux/xarray.h>
#include <drm/intel-gtt.h>
-#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
#include <drm/drm_gem.h>
#include <drm/drm_auth.h>
#include <drm/drm_cache.h>
@@ -79,6 +78,7 @@
#include "gem/i915_gem_context_types.h"
#include "gem/i915_gem_shrinker.h"
#include "gem/i915_gem_stolen.h"
+#include "gem/i915_gem_lmem.h"
#include "gt/intel_engine.h"
#include "gt/intel_gt_types.h"
@@ -498,6 +498,13 @@ struct intel_l3_parity {
};
struct i915_gem_mm {
+ /*
+ * Shortcut for the stolen region. This points to either
+ * INTEL_REGION_STOLEN_SMEM for integrated platforms, or
+ * INTEL_REGION_STOLEN_LMEM for discrete, or NULL if the device doesn't
+ * support stolen.
+ */
+ struct intel_memory_region *stolen_region;
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
/** Protects the usage of the GTT stolen memory allocator. This is
@@ -1709,9 +1716,15 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
}
static inline bool
-intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
+intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
{
- return IS_BROXTON(dev_priv) && intel_vtd_active();
+ return IS_BROXTON(i915) && intel_vtd_active();
+}
+
+static inline bool
+intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915)
+{
+ return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915);
}
/* i915_drv.c */
@@ -1791,6 +1804,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
+#define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3)
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
@@ -1923,9 +1937,15 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
}
static inline enum i915_map_type
-i915_coherent_map_type(struct drm_i915_private *i915)
+i915_coherent_map_type(struct drm_i915_private *i915,
+ struct drm_i915_gem_object *obj, bool always_coherent)
{
- return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
+ if (i915_gem_object_is_lmem(obj))
+ return I915_MAP_WC;
+ if (HAS_LLC(i915) || always_coherent)
+ return I915_MAP_WB;
+ else
+ return I915_MAP_WC;
}
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6fd3ab61de37..1aadc021d92e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -157,8 +157,18 @@ try_again:
if (vma) {
ret = -EBUSY;
if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
- !i915_vma_is_active(vma))
- ret = i915_vma_unbind(vma);
+ !i915_vma_is_active(vma)) {
+ if (flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK) {
+ if (mutex_trylock(&vma->vm->mutex)) {
+ ret = __i915_vma_unbind(vma);
+ mutex_unlock(&vma->vm->mutex);
+ } else {
+ ret = -EBUSY;
+ }
+ } else {
+ ret = i915_vma_unbind(vma);
+ }
+ }
__i915_vma_put(vma);
}
@@ -999,12 +1009,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
obj->mm.madv = args->madv;
if (i915_gem_object_has_pages(obj)) {
- struct list_head *list;
+ unsigned long flags;
- if (i915_gem_object_is_shrinkable(obj)) {
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ if (!list_empty(&obj->mm.link)) {
+ struct list_head *list;
if (obj->mm.madv != I915_MADV_WILLNEED)
list = &i915->mm.purge_list;
@@ -1012,8 +1021,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
list = &i915->mm.shrink_list;
list_move_tail(&obj->mm.link, list);
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
/* if the object is no longer attached, discard its backing storage */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index eb435f9e0220..b98d8cdbe4f2 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -1,7 +1,7 @@
/*
* SPDX-License-Identifier: MIT
*
- * Copyright � 2008-2018 Intel Corporation
+ * Copyright © 2008-2018 Intel Corporation
*/
#ifndef _I915_GPU_ERROR_H_
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index bf93344b8564..a11bdb667241 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -4037,7 +4037,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
+ intel_engine_cs_irq(dev_priv->gt.engine[RCS0], iir);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4145,7 +4145,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
+ intel_engine_cs_irq(dev_priv->gt.engine[RCS0], iir);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4290,10 +4290,12 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
+ intel_engine_cs_irq(dev_priv->gt.engine[RCS0],
+ iir);
if (iir & I915_BSD_USER_INTERRUPT)
- intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]);
+ intel_engine_cs_irq(dev_priv->gt.engine[VCS0],
+ iir >> 25);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 25576fa73ff0..9a777b0ff59b 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -28,10 +28,46 @@
#include "i915_drv.h"
-#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+struct remap_pfn {
+ struct mm_struct *mm;
+ unsigned long pfn;
+ pgprot_t prot;
+
+ struct sgt_iter sgt;
+ resource_size_t iobase;
+};
#define use_dma(io) ((io) != -1)
+static inline unsigned long sgt_pfn(const struct remap_pfn *r)
+{
+ if (use_dma(r->iobase))
+ return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
+ else
+ return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
+}
+
+static int remap_sg(pte_t *pte, unsigned long addr, void *data)
+{
+ struct remap_pfn *r = data;
+
+ if (GEM_WARN_ON(!r->sgt.sgp))
+ return -EINVAL;
+
+ /* Special PTE are not associated with any struct page */
+ set_pte_at(r->mm, addr, pte,
+ pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
+ r->pfn++; /* track insertions in case we need to unwind later */
+
+ r->sgt.curr += PAGE_SIZE;
+ if (r->sgt.curr >= r->sgt.max)
+ r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
+
+ return 0;
+}
+
+#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+
/**
* remap_io_sg - remap an IO mapping to userspace
* @vma: user vma to map to
@@ -46,8 +82,13 @@ int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
struct scatterlist *sgl, resource_size_t iobase)
{
- unsigned long pfn, len, remapped = 0;
- int err = 0;
+ struct remap_pfn r = {
+ .mm = vma->vm_mm,
+ .prot = vma->vm_page_prot,
+ .sgt = __sgt_iter(sgl, use_dma(iobase)),
+ .iobase = iobase,
+ };
+ int err;
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
@@ -55,25 +96,11 @@ int remap_io_sg(struct vm_area_struct *vma,
if (!use_dma(iobase))
flush_cache_range(vma, addr, size);
- do {
- if (use_dma(iobase)) {
- if (!sg_dma_len(sgl))
- break;
- pfn = (sg_dma_address(sgl) + iobase) >> PAGE_SHIFT;
- len = sg_dma_len(sgl);
- } else {
- pfn = page_to_pfn(sg_page(sgl));
- len = sgl->length;
- }
-
- err = remap_pfn_range(vma, addr + remapped, pfn, len,
- vma->vm_page_prot);
- if (err)
- break;
- remapped += len;
- } while ((sgl = __sg_next(sgl)));
-
- if (err)
- zap_vma_ptes(vma, addr, remapped);
- return err;
+ err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
+ if (unlikely(err)) {
+ zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
+ return err;
+ }
+
+ return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 14cd64cc61d0..4a114a5ad000 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -71,18 +71,18 @@ struct drm_printer;
param(int, fastboot, -1, 0600) \
param(int, enable_dpcd_backlight, -1, 0600) \
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \
- param(unsigned long, fake_lmem_start, 0, 0400) \
- param(unsigned int, request_timeout_ms, CONFIG_DRM_I915_REQUEST_TIMEOUT, 0600) \
+ param(unsigned long, fake_lmem_start, 0, IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM) ? 0400 : 0) \
+ param(unsigned int, request_timeout_ms, CONFIG_DRM_I915_REQUEST_TIMEOUT, CONFIG_DRM_I915_REQUEST_TIMEOUT ? 0600 : 0) \
/* leave bools at the end to not create holes */ \
param(bool, enable_hangcheck, true, 0600) \
param(bool, load_detect_test, false, 0600) \
param(bool, force_reset_modeset_test, false, 0600) \
- param(bool, error_capture, true, 0600) \
+ param(bool, error_capture, true, IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) ? 0600 : 0) \
param(bool, disable_display, false, 0400) \
param(bool, verbose_state_checks, true, 0) \
param(bool, nuclear_pageflip, false, 0400) \
param(bool, enable_dp_mst, true, 0600) \
- param(bool, enable_gvt, false, 0400)
+ param(bool, enable_gvt, false, IS_ENABLED(CONFIG_DRM_I915_GVT) ? 0400 : 0)
#define MEMBER(T, member, ...) T member;
struct i915_params {
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index f44002abc0d2..83b500bb170c 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -908,7 +908,7 @@ static const struct intel_device_info rkl_info = {
};
#define DGFX_FEATURES \
- .memory_regions = REGION_SMEM | REGION_LMEM, \
+ .memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \
.has_master_unit_irq = 1, \
.has_llc = 0, \
.has_snoop = 1, \
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 6f79db3ef38c..9f94914958c3 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1257,11 +1257,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
case 8:
case 9:
case 10:
- if (intel_engine_in_execlists_submission_mode(ce->engine)) {
- stream->specific_ctx_id_mask =
- (1U << GEN8_CTX_ID_WIDTH) - 1;
- stream->specific_ctx_id = stream->specific_ctx_id_mask;
- } else {
+ if (intel_engine_uses_guc(ce->engine)) {
/*
* When using GuC, the context descriptor we write in
* i915 is read by GuC and rewritten before it's
@@ -1280,6 +1276,10 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
*/
stream->specific_ctx_id_mask =
(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
+ } else {
+ stream->specific_ctx_id_mask =
+ (1U << GEN8_CTX_ID_WIDTH) - 1;
+ stream->specific_ctx_id = stream->specific_ctx_id_mask;
}
break;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 8164cd763737..22c4d4178766 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -476,6 +476,8 @@ engine_event_status(struct intel_engine_cs *engine,
static int
config_status(struct drm_i915_private *i915, u64 config)
{
+ struct intel_gt *gt = &i915->gt;
+
switch (config) {
case I915_PMU_ACTUAL_FREQUENCY:
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
@@ -489,7 +491,7 @@ config_status(struct drm_i915_private *i915, u64 config)
case I915_PMU_INTERRUPTS:
break;
case I915_PMU_RC6_RESIDENCY:
- if (!HAS_RC6(i915))
+ if (!gt->rc6.supported)
return -ENODEV;
break;
case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index fed337ad7b68..e49da36c62fb 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -419,11 +419,73 @@ static int query_perf_config(struct drm_i915_private *i915,
}
}
+static int query_memregion_info(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item)
+{
+ struct drm_i915_query_memory_regions __user *query_ptr =
+ u64_to_user_ptr(query_item->data_ptr);
+ struct drm_i915_memory_region_info __user *info_ptr =
+ &query_ptr->regions[0];
+ struct drm_i915_memory_region_info info = { };
+ struct drm_i915_query_memory_regions query;
+ struct intel_memory_region *mr;
+ u32 total_length;
+ int ret, id, i;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM))
+ return -ENODEV;
+
+ if (query_item->flags != 0)
+ return -EINVAL;
+
+ total_length = sizeof(query);
+ for_each_memory_region(mr, i915, id) {
+ if (mr->private)
+ continue;
+
+ total_length += sizeof(info);
+ }
+
+ ret = copy_query_item(&query, sizeof(query), total_length, query_item);
+ if (ret != 0)
+ return ret;
+
+ if (query.num_regions)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(query.rsvd); i++) {
+ if (query.rsvd[i])
+ return -EINVAL;
+ }
+
+ for_each_memory_region(mr, i915, id) {
+ if (mr->private)
+ continue;
+
+ info.region.memory_class = mr->type;
+ info.region.memory_instance = mr->instance;
+ info.probed_size = mr->total;
+ info.unallocated_size = mr->avail;
+
+ if (__copy_to_user(info_ptr, &info, sizeof(info)))
+ return -EFAULT;
+
+ query.num_regions++;
+ info_ptr++;
+ }
+
+ if (__copy_to_user(query_ptr, &query, sizeof(query)))
+ return -EFAULT;
+
+ return total_length;
+}
+
static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item) = {
query_topology_info,
query_engine_info,
query_perf_config,
+ query_memregion_info,
};
int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 996d841c7835..e915ec034c98 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -416,6 +416,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014)
#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
+#define GEN12_HCP_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x2910)
+#define GEN12_HCP_SFC_FORCED_LOCK_BIT REG_BIT(0)
+#define GEN12_HCP_SFC_LOCK_STATUS(engine) _MMIO((engine)->mmio_base + 0x2914)
+#define GEN12_HCP_SFC_LOCK_ACK_BIT REG_BIT(1)
+#define GEN12_HCP_SFC_USAGE_BIT REG_BIT(0)
+
#define GEN12_SFC_DONE(n) _MMIO(0x1cc00 + (n) * 0x100)
#define GEN12_SFC_DONE_MAX 4
@@ -487,6 +493,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GAB_CTL _MMIO(0x24000)
#define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8)
+#define GU_CNTL _MMIO(0x101010)
+#define LMEM_INIT REG_BIT(7)
+
#define GEN6_STOLEN_RESERVED _MMIO(0x1082C0)
#define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20)
#define GEN7_STOLEN_RESERVED_ADDR_MASK (0x3FFF << 18)
@@ -2715,6 +2724,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */
#define RING_CTX_TIMESTAMP(base) _MMIO((base) + 0x3a8) /* gen8+ */
+#define VDBOX_CGCTL3F10(base) _MMIO((base) + 0x3f10)
+#define IECPUNIT_CLKGATE_DIS REG_BIT(22)
+
#define ERROR_GEN6 _MMIO(0x40a0)
#define GEN7_ERR_INT _MMIO(0x44040)
#define ERR_INT_POISON (1 << 31)
@@ -3790,8 +3802,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define CSHRDDR3CTL_DDR3 (1 << 2)
/* 965 MCH register controlling DRAM channel configuration */
-#define C0DRB3 _MMIO(MCHBAR_MIRROR_BASE + 0x206)
-#define C1DRB3 _MMIO(MCHBAR_MIRROR_BASE + 0x606)
+#define C0DRB3_BW _MMIO(MCHBAR_MIRROR_BASE + 0x206)
+#define C1DRB3_BW _MMIO(MCHBAR_MIRROR_BASE + 0x606)
/* snb MCH registers for reading the DRAM channel configuration */
#define MAD_DIMM_C0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5004)
@@ -12303,6 +12315,7 @@ enum skl_power_gate {
#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */
#define GEN12_GSMBASE _MMIO(0x108100)
+#define GEN12_DSMBASE _MMIO(0x1080C0)
/* gamt regs */
#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c8a6ed8617f3..1014c71cf7f5 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -929,7 +929,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
u32 seqno;
int ret;
- might_sleep_if(gfpflags_allow_blocking(gfp));
+ might_alloc(gfp);
/* Check that the caller provided an already pinned context */
__intel_context_pin(ce);
@@ -1594,8 +1594,8 @@ i915_request_await_object(struct i915_request *to,
struct dma_fence **shared;
unsigned int count, i;
- ret = dma_resv_get_fences_rcu(obj->base.resv,
- &excl, &count, &shared);
+ ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
+ &shared);
if (ret)
return ret;
@@ -1611,7 +1611,7 @@ i915_request_await_object(struct i915_request *to,
dma_fence_put(shared[i]);
kfree(shared);
} else {
- excl = dma_resv_get_excl_rcu(obj->base.resv);
+ excl = dma_resv_get_excl_unlocked(obj->base.resv);
}
if (excl) {
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 2744558f3050..c589a681da77 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -582,7 +582,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct dma_fence **shared;
unsigned int count, i;
- ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
+ ret = dma_resv_get_fences(resv, &excl, &count, &shared);
if (ret)
return ret;
@@ -606,7 +606,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
dma_fence_put(shared[i]);
kfree(shared);
} else {
- excl = dma_resv_get_excl_rcu(resv);
+ excl = dma_resv_get_excl_unlocked(resv);
}
if (ret >= 0 && excl && excl->ops != exclude) {
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 37a00992cec5..b319fd3f91cc 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -27,6 +27,7 @@
#include "display/intel_frontbuffer.h"
+#include "gem/i915_gem_lmem.h"
#include "gt/intel_engine.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
@@ -93,7 +94,6 @@ static int __i915_vma_active(struct i915_active *ref)
return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
}
-__i915_active_call
static void __i915_vma_retire(struct i915_active *ref)
{
i915_vma_put(active_to_vma(ref));
@@ -124,7 +124,7 @@ vma_create(struct drm_i915_gem_object *obj,
vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
- i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
+ i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
/* Declare ourselves safe for use inside shrinkers */
if (IS_ENABLED(CONFIG_LOCKDEP)) {
@@ -448,9 +448,11 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
void __iomem *ptr;
int err;
- if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
- err = -ENODEV;
- goto err;
+ if (!i915_gem_object_is_lmem(vma->obj)) {
+ if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
+ err = -ENODEV;
+ goto err;
+ }
}
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
@@ -458,9 +460,19 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
ptr = READ_ONCE(vma->iomap);
if (ptr == NULL) {
- ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
- vma->node.start,
- vma->node.size);
+ /*
+ * TODO: consider just using i915_gem_object_pin_map() for lmem
+ * instead, which already supports mapping non-contiguous chunks
+ * of pages, that way we can also drop the
+ * I915_BO_ALLOC_CONTIGUOUS when allocating the object.
+ */
+ if (i915_gem_object_is_lmem(vma->obj))
+ ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
+ vma->obj->base.size);
+ else
+ ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
+ vma->node.start,
+ vma->node.size);
if (ptr == NULL) {
err = -ENOMEM;
goto err;
@@ -905,8 +917,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (err)
goto err_fence;
- err = i915_vm_pin_pt_stash(vma->vm,
- &work->stash);
+ err = i915_vm_map_pt_stash(vma->vm, &work->stash);
if (err)
goto err_fence;
}
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index bf837b6bb185..d98e8b81d322 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -22,9 +22,29 @@ static const struct {
.class = INTEL_MEMORY_STOLEN_SYSTEM,
.instance = 0,
},
+ [INTEL_REGION_STOLEN_LMEM] = {
+ .class = INTEL_MEMORY_STOLEN_LOCAL,
+ .instance = 0,
+ },
};
struct intel_memory_region *
+intel_memory_region_lookup(struct drm_i915_private *i915,
+ u16 class, u16 instance)
+{
+ struct intel_memory_region *mr;
+ int id;
+
+ /* XXX: consider maybe converting to an rb tree at some point */
+ for_each_memory_region(mr, i915, id) {
+ if (mr->type == class && mr->instance == instance)
+ return mr;
+ }
+
+ return NULL;
+}
+
+struct intel_memory_region *
intel_memory_region_by_type(struct drm_i915_private *i915,
enum intel_memory_type mem_type)
{
@@ -278,8 +298,15 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
case INTEL_MEMORY_SYSTEM:
mem = i915_gem_shmem_setup(i915);
break;
+ case INTEL_MEMORY_STOLEN_LOCAL:
+ mem = i915_gem_stolen_lmem_setup(i915);
+ if (!IS_ERR(mem))
+ i915->mm.stolen_region = mem;
+ break;
case INTEL_MEMORY_STOLEN_SYSTEM:
- mem = i915_gem_stolen_setup(i915);
+ mem = i915_gem_stolen_smem_setup(i915);
+ if (!IS_ERR(mem))
+ i915->mm.stolen_region = mem;
break;
default:
continue;
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index edd49067c8ca..d24ce5a0b30b 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -11,6 +11,7 @@
#include <linux/mutex.h>
#include <linux/io-mapping.h>
#include <drm/drm_mm.h>
+#include <drm/i915_drm.h>
#include "i915_buddy.h"
@@ -19,25 +20,25 @@ struct drm_i915_gem_object;
struct intel_memory_region;
struct sg_table;
-/**
- * Base memory type
- */
enum intel_memory_type {
- INTEL_MEMORY_SYSTEM = 0,
- INTEL_MEMORY_LOCAL,
+ INTEL_MEMORY_SYSTEM = I915_MEMORY_CLASS_SYSTEM,
+ INTEL_MEMORY_LOCAL = I915_MEMORY_CLASS_DEVICE,
INTEL_MEMORY_STOLEN_SYSTEM,
+ INTEL_MEMORY_STOLEN_LOCAL,
};
enum intel_region_id {
INTEL_REGION_SMEM = 0,
INTEL_REGION_LMEM,
INTEL_REGION_STOLEN_SMEM,
+ INTEL_REGION_STOLEN_LMEM,
INTEL_REGION_UNKNOWN, /* Should be last */
};
#define REGION_SMEM BIT(INTEL_REGION_SMEM)
#define REGION_LMEM BIT(INTEL_REGION_LMEM)
#define REGION_STOLEN_SMEM BIT(INTEL_REGION_STOLEN_SMEM)
+#define REGION_STOLEN_LMEM BIT(INTEL_REGION_STOLEN_LMEM)
#define I915_ALLOC_MIN_PAGE_SIZE BIT(0)
#define I915_ALLOC_CONTIGUOUS BIT(1)
@@ -82,7 +83,8 @@ struct intel_memory_region {
u16 type;
u16 instance;
enum intel_region_id id;
- char name[8];
+ char name[16];
+ bool private; /* not for userspace */
struct list_head reserved;
@@ -95,6 +97,10 @@ struct intel_memory_region {
} objects;
};
+struct intel_memory_region *
+intel_memory_region_lookup(struct drm_i915_private *i915,
+ u16 class, u16 instance);
+
int intel_memory_region_init_buddy(struct intel_memory_region *mem);
void intel_memory_region_release_buddy(struct intel_memory_region *mem);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 43701fc90a47..1bed8f666048 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1917,6 +1917,18 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
if (ret)
return ret;
+ /*
+ * The boot firmware initializes local memory and assesses its health.
+ * If memory training fails, the punit will have been instructed to
+ * keep the GT powered down; we won't be able to communicate with it
+ * and we should not continue with driver initialization.
+ */
+ if (IS_DGFX(i915) &&
+ !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
+ drm_err(&i915->drm, "LMEM not initialized by firmware\n");
+ return -ENODEV;
+ }
+
if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
uncore->flags |= UNCORE_HAS_FORCEWAKE;
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 4002c984c2e0..61bf4560d8af 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -68,7 +68,7 @@ static struct live_active *__live_alloc(struct drm_i915_private *i915)
return NULL;
kref_init(&active->ref);
- i915_active_init(&active->base, __live_active, __live_retire);
+ i915_active_init(&active->base, __live_active, __live_retire, 0);
return active;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index dc394fb7ccfa..152d9ab135b1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -87,14 +87,14 @@ static void simulate_hibernate(struct drm_i915_private *i915)
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
-static int pm_prepare(struct drm_i915_private *i915)
+static int igt_pm_prepare(struct drm_i915_private *i915)
{
i915_gem_suspend(i915);
return 0;
}
-static void pm_suspend(struct drm_i915_private *i915)
+static void igt_pm_suspend(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
@@ -104,7 +104,7 @@ static void pm_suspend(struct drm_i915_private *i915)
}
}
-static void pm_hibernate(struct drm_i915_private *i915)
+static void igt_pm_hibernate(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
@@ -116,7 +116,7 @@ static void pm_hibernate(struct drm_i915_private *i915)
}
}
-static void pm_resume(struct drm_i915_private *i915)
+static void igt_pm_resume(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
@@ -148,16 +148,16 @@ static int igt_gem_suspend(void *arg)
if (err)
goto out;
- err = pm_prepare(i915);
+ err = igt_pm_prepare(i915);
if (err)
goto out;
- pm_suspend(i915);
+ igt_pm_suspend(i915);
/* Here be dragons! Note that with S3RST any S3 may become S4! */
simulate_hibernate(i915);
- pm_resume(i915);
+ igt_pm_resume(i915);
err = switch_to_context(ctx);
out:
@@ -183,16 +183,16 @@ static int igt_gem_hibernate(void *arg)
if (err)
goto out;
- err = pm_prepare(i915);
+ err = igt_pm_prepare(i915);
if (err)
goto out;
- pm_hibernate(i915);
+ igt_pm_hibernate(i915);
/* Here be dragons! */
simulate_hibernate(i915);
- pm_resume(i915);
+ igt_pm_resume(i915);
err = switch_to_context(ctx);
out:
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 6c2284ef4024..f843a5040706 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -186,7 +186,7 @@ retry:
if (err)
goto err_ppgtt_cleanup;
- err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
+ err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
if (err) {
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
goto err_ppgtt_cleanup;
@@ -208,7 +208,7 @@ retry:
if (err)
goto err_ppgtt_cleanup;
- err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
+ err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
if (err) {
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
goto err_ppgtt_cleanup;
@@ -325,11 +325,10 @@ retry:
BIT_ULL(size)))
goto alloc_vm_end;
- err = i915_vm_pin_pt_stash(vm, &stash);
+ err = i915_vm_map_pt_stash(vm, &stash);
if (!err)
vm->allocate_va_range(vm, &stash,
addr, BIT_ULL(size));
-
i915_vm_free_pt_stash(vm, &stash);
alloc_vm_end:
if (err == -EDEADLK) {
@@ -1968,10 +1967,9 @@ retry:
if (err)
goto end_ww;
- err = i915_vm_pin_pt_stash(vm, &stash);
+ err = i915_vm_map_pt_stash(vm, &stash);
if (!err)
vm->allocate_va_range(vm, &stash, offset, chunk_size);
-
i915_vm_free_pt_stash(vm, &stash);
end_ww:
if (err == -EDEADLK) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index bd83a8ec55af..9e9a6cb1d9e5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -307,7 +307,7 @@ static int live_noa_gpr(void *arg)
}
/* Poison the ce->vm so we detect writes not to the GGTT gt->scratch */
- scratch = kmap(__px_page(ce->vm->scratch[0]));
+ scratch = __px_vaddr(ce->vm->scratch[0]);
memset(scratch, POISON_FREE, PAGE_SIZE);
rq = intel_context_create_request(ce);
@@ -405,7 +405,6 @@ static int live_noa_gpr(void *arg)
out_rq:
i915_request_put(rq);
out_ce:
- kunmap(__px_page(ce->vm->scratch[0]));
intel_context_put(ce);
out:
stream_destroy(stream);
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 5fe7b80ca0bd..dd0607254a95 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -967,6 +967,9 @@ static int igt_vma_remapped_gtt(void *arg)
intel_wakeref_t wakeref;
int err = 0;
+ if (!i915_ggtt_has_aperture(&i915->ggtt))
+ return 0;
+
obj = i915_gem_object_create_internal(i915, 10 * 10 * PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 55c9356de5ad..24d87d0fc747 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -94,9 +94,9 @@ int igt_spinner_pin(struct igt_spinner *spin,
}
if (!spin->batch) {
- unsigned int mode =
- i915_coherent_map_type(spin->gt->i915);
+ unsigned int mode;
+ mode = i915_coherent_map_type(spin->gt->i915, spin->obj, false);
vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index a5fc0bf3feb9..f85fd8cbfbf5 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -513,7 +513,7 @@ static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
if (err)
return err;
- ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
+ ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
@@ -593,7 +593,9 @@ static int igt_gpu_write(struct i915_gem_context *ctx,
if (err)
break;
+ i915_gem_object_lock(obj, NULL);
err = igt_cpu_check(obj, dword, rng);
+ i915_gem_object_unlock(obj);
if (err)
break;
} while (!__igt_timeout(end_time, NULL));
@@ -629,6 +631,88 @@ out_put:
return err;
}
+static int igt_lmem_create_cleared_cpu(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ u32 size, i;
+ int err;
+
+ i915_gem_drain_freed_objects(i915);
+
+ size = max_t(u32, PAGE_SIZE, i915_prandom_u32_max_state(SZ_32M, &prng));
+ size = round_up(size, PAGE_SIZE);
+ i = 0;
+
+ do {
+ struct drm_i915_gem_object *obj;
+ unsigned int flags;
+ u32 dword, val;
+ void *vaddr;
+
+ /*
+ * Alternate between cleared and uncleared allocations, while
+ * also dirtying the pages each time to check that the pages are
+ * always cleared if requested, since we should get some overlap
+ * of the underlying pages, if not all, since we are the only
+ * user.
+ */
+
+ flags = I915_BO_ALLOC_CPU_CLEAR;
+ if (i & 1)
+ flags = 0;
+
+ obj = i915_gem_object_create_lmem(i915, size, flags);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ i915_gem_object_lock(obj, NULL);
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ dword = i915_prandom_u32_max_state(PAGE_SIZE / sizeof(u32),
+ &prng);
+
+ if (flags & I915_BO_ALLOC_CPU_CLEAR) {
+ err = igt_cpu_check(obj, dword, 0);
+ if (err) {
+ pr_err("%s failed with size=%u, flags=%u\n",
+ __func__, size, flags);
+ goto out_unpin;
+ }
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_unpin;
+ }
+
+ val = prandom_u32_state(&prng);
+
+ memset32(vaddr, val, obj->base.size / sizeof(u32));
+
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj);
+out_put:
+ i915_gem_object_unlock(obj);
+ i915_gem_object_put(obj);
+
+ if (err)
+ break;
+ ++i;
+ } while (!__igt_timeout(end_time, NULL));
+
+ pr_info("%s completed (%u) iterations\n", __func__, i);
+
+ return err;
+}
+
static int igt_lmem_write_gpu(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -1043,6 +1127,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_lmem_create),
+ SUBTEST(igt_lmem_create_cleared_cpu),
SUBTEST(igt_lmem_write_cpu),
SUBTEST(igt_lmem_write_gpu),
};
diff --git a/drivers/gpu/drm/i915/selftests/librapl.c b/drivers/gpu/drm/i915/selftests/librapl.c
index 58710ac3f979..eb03b5b28bad 100644
--- a/drivers/gpu/drm/i915/selftests/librapl.c
+++ b/drivers/gpu/drm/i915/selftests/librapl.c
@@ -5,8 +5,18 @@
#include <asm/msr.h>
+#include "i915_drv.h"
#include "librapl.h"
+bool librapl_supported(const struct drm_i915_private *i915)
+{
+ /* Discrete cards require hwmon integration */
+ if (IS_DGFX(i915))
+ return false;
+
+ return librapl_energy_uJ();
+}
+
u64 librapl_energy_uJ(void)
{
unsigned long long power;
diff --git a/drivers/gpu/drm/i915/selftests/librapl.h b/drivers/gpu/drm/i915/selftests/librapl.h
index 887f3e91dd05..e3b24fad0a7a 100644
--- a/drivers/gpu/drm/i915/selftests/librapl.h
+++ b/drivers/gpu/drm/i915/selftests/librapl.h
@@ -8,6 +8,10 @@
#include <linux/types.h>
+struct drm_i915_private;
+
+bool librapl_supported(const struct drm_i915_private *i915);
+
u64 librapl_energy_uJ(void);
#endif /* SELFTEST_LIBRAPL_H */
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 09225b770bb8..5244f4763477 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -9,6 +9,7 @@
#include <linux/component.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
@@ -23,6 +24,7 @@
#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem_cma_helper.h>
@@ -57,6 +59,7 @@ struct ingenic_dma_hwdescs {
struct jz_soc_info {
bool needs_dev_clk;
bool has_osd;
+ bool map_noncoherent;
unsigned int max_width, max_height;
const u32 *formats_f0, *formats_f1;
unsigned int num_formats_f0, num_formats_f1;
@@ -342,7 +345,7 @@ static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc,
if (priv->update_clk_rate) {
mutex_lock(&priv->clk_mutex);
clk_set_rate(priv->pix_clk,
- crtc_state->adjusted_mode.clock * 1000);
+ crtc_state->adjusted_mode.crtc_clock * 1000);
priv->update_clk_rate = false;
mutex_unlock(&priv->clk_mutex);
}
@@ -410,6 +413,9 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane,
old_plane_state->fb->format->format != new_plane_state->fb->format->format))
crtc_state->mode_changed = true;
+ if (priv->soc_info->map_noncoherent)
+ drm_atomic_helper_check_plane_damage(state, new_plane_state);
+
return 0;
}
@@ -526,6 +532,13 @@ void ingenic_drm_plane_config(struct device *dev,
}
}
+bool ingenic_drm_map_noncoherent(const struct device *dev)
+{
+ const struct ingenic_drm *priv = dev_get_drvdata(dev);
+
+ return priv->soc_info->map_noncoherent;
+}
+
static void ingenic_drm_update_palette(struct ingenic_drm *priv,
const struct drm_color_lut *lut)
{
@@ -544,8 +557,8 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_device_get_priv(plane->dev);
- struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state,
- plane);
+ struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state, plane);
struct drm_crtc_state *crtc_state;
struct ingenic_dma_hwdesc *hwdesc;
unsigned int width, height, cpp, offset;
@@ -553,6 +566,9 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
u32 fourcc;
if (newstate && newstate->fb) {
+ if (priv->soc_info->map_noncoherent)
+ drm_fb_cma_sync_non_coherent(&priv->drm, oldstate, newstate);
+
crtc_state = newstate->crtc->state;
addr = drm_fb_cma_get_gem_addr(newstate->fb, newstate, 0);
@@ -742,6 +758,33 @@ static void ingenic_drm_disable_vblank(struct drm_crtc *crtc)
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL, JZ_LCD_CTRL_EOF_IRQ, 0);
}
+static struct drm_framebuffer *
+ingenic_drm_gem_fb_create(struct drm_device *drm, struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct ingenic_drm *priv = drm_device_get_priv(drm);
+
+ if (priv->soc_info->map_noncoherent)
+ return drm_gem_fb_create_with_dirty(drm, file, mode_cmd);
+
+ return drm_gem_fb_create(drm, file, mode_cmd);
+}
+
+static struct drm_gem_object *
+ingenic_drm_gem_create_object(struct drm_device *drm, size_t size)
+{
+ struct ingenic_drm *priv = drm_device_get_priv(drm);
+ struct drm_gem_cma_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ obj->map_noncoherent = priv->soc_info->map_noncoherent;
+
+ return &obj->base;
+}
+
DEFINE_DRM_GEM_CMA_FOPS(ingenic_drm_fops);
static const struct drm_driver ingenic_drm_driver_data = {
@@ -754,6 +797,7 @@ static const struct drm_driver ingenic_drm_driver_data = {
.patchlevel = 0,
.fops = &ingenic_drm_fops,
+ .gem_create_object = ingenic_drm_gem_create_object,
DRM_GEM_CMA_DRIVER_OPS,
.irq_handler = ingenic_drm_irq_handler,
@@ -804,7 +848,7 @@ static const struct drm_encoder_helper_funcs ingenic_drm_encoder_helper_funcs =
};
static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create,
+ .fb_create = ingenic_drm_gem_fb_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
@@ -961,6 +1005,9 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
return ret;
}
+ if (soc_info->map_noncoherent)
+ drm_plane_enable_fb_damage_clips(&priv->f1);
+
drm_crtc_helper_add(&priv->crtc, &ingenic_drm_crtc_helper_funcs);
ret = drm_crtc_init_with_planes(drm, &priv->crtc, primary,
@@ -989,6 +1036,9 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
return ret;
}
+ if (soc_info->map_noncoherent)
+ drm_plane_enable_fb_damage_clips(&priv->f0);
+
if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && has_components) {
ret = component_bind_all(dev, drm);
if (ret) {
@@ -1245,6 +1295,7 @@ static const u32 jz4770_formats_f0[] = {
static const struct jz_soc_info jz4740_soc_info = {
.needs_dev_clk = true,
.has_osd = false,
+ .map_noncoherent = false,
.max_width = 800,
.max_height = 600,
.formats_f1 = jz4740_formats,
@@ -1255,6 +1306,7 @@ static const struct jz_soc_info jz4740_soc_info = {
static const struct jz_soc_info jz4725b_soc_info = {
.needs_dev_clk = false,
.has_osd = true,
+ .map_noncoherent = false,
.max_width = 800,
.max_height = 600,
.formats_f1 = jz4725b_formats_f1,
@@ -1266,6 +1318,7 @@ static const struct jz_soc_info jz4725b_soc_info = {
static const struct jz_soc_info jz4770_soc_info = {
.needs_dev_clk = false,
.has_osd = true,
+ .map_noncoherent = true,
.max_width = 1280,
.max_height = 720,
.formats_f1 = jz4770_formats_f1,
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.h b/drivers/gpu/drm/ingenic/ingenic-drm.h
index 1b4347f7f084..22654ac1dde1 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.h
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.h
@@ -184,6 +184,7 @@ struct platform_driver;
void ingenic_drm_plane_config(struct device *dev,
struct drm_plane *plane, u32 fourcc);
void ingenic_drm_plane_disable(struct device *dev, struct drm_plane *plane);
+bool ingenic_drm_map_noncoherent(const struct device *dev);
extern struct platform_driver *ingenic_ipu_driver_ptr;
diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c
index 3b1091e7c0cd..61b6d9fdbba1 100644
--- a/drivers/gpu/drm/ingenic/ingenic-ipu.c
+++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c
@@ -20,10 +20,13 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_property.h>
@@ -285,8 +288,8 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane);
- struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state,
- plane);
+ struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state, plane);
const struct drm_format_info *finfo;
u32 ctrl, stride = 0, coef_index = 0, format = 0;
bool needs_modeset, upscaling_w, upscaling_h;
@@ -317,6 +320,9 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
JZ_IPU_CTRL_CHIP_EN | JZ_IPU_CTRL_LCDC_SEL);
}
+ if (ingenic_drm_map_noncoherent(ipu->master))
+ drm_fb_cma_sync_non_coherent(ipu->drm, oldstate, newstate);
+
/* New addresses will be committed in vblank handler... */
ipu->addr_y = drm_fb_cma_get_gem_addr(newstate->fb, newstate, 0);
if (finfo->num_planes > 1)
@@ -541,7 +547,7 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
if (!new_plane_state->crtc ||
!crtc_state->mode.hdisplay || !crtc_state->mode.vdisplay)
- return 0;
+ goto out_check_damage;
/* Plane must be fully visible */
if (new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0 ||
@@ -558,7 +564,7 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
if (!osd_changed(new_plane_state, old_plane_state))
- return 0;
+ goto out_check_damage;
crtc_state->mode_changed = true;
@@ -592,6 +598,10 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
ipu->denom_w = denom_w;
ipu->denom_h = denom_h;
+out_check_damage:
+ if (ingenic_drm_map_noncoherent(ipu->master))
+ drm_atomic_helper_check_plane_damage(state, new_plane_state);
+
return 0;
}
@@ -773,6 +783,9 @@ static int ingenic_ipu_bind(struct device *dev, struct device *master, void *d)
return err;
}
+ if (ingenic_drm_map_noncoherent(master))
+ drm_plane_enable_fb_damage_clips(plane);
+
/*
* Sharpness settings range is [0,32]
* 0 : nearest-neighbor
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index b3fd3501c412..5aade1d13961 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -1196,10 +1196,8 @@ static int mcde_dsi_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
d->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(d->regs)) {
- dev_err(dev, "no DSI regs\n");
+ if (IS_ERR(d->regs))
return PTR_ERR(d->regs);
- }
dsi_id = readl(d->regs + DSI_ID_REG);
dev_info(dev, "HW revision 0x%08x\n", dsi_id);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 63f411ab393b..6f4c80bbc0eb 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -30,9 +30,8 @@ struct mtk_disp_color_data {
unsigned int color_offset;
};
-/**
+/*
* struct mtk_disp_color - DISP_COLOR driver structure
- * @ddp_comp: structure containing type enum and hardware resources
* @crtc: associated crtc to report irq events to
* @data: platform colour driver data
*/
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
index 3ebf91e0ab41..3a5815ab4079 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
@@ -29,10 +29,8 @@ struct mtk_disp_gamma_data {
bool has_dither;
};
-/**
+/*
* struct mtk_disp_gamma - DISP_GAMMA driver structure
- * @ddp_comp - structure containing type enum and hardware resources
- * @crtc - associated crtc to report irq events to
*/
struct mtk_disp_gamma {
struct clk *clk;
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 961f87f8d4d1..fa9d79963cd3 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -66,9 +66,8 @@ struct mtk_disp_ovl_data {
bool smi_id_en;
};
-/**
+/*
* struct mtk_disp_ovl - DISP_OVL driver structure
- * @ddp_comp: structure containing type enum and hardware resources
* @crtc: associated crtc to report vblank events to
* @data: platform data
*/
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 728aaadfea8c..705f28ceb4dd 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -55,10 +55,8 @@ struct mtk_disp_rdma_data {
unsigned int fifo_size;
};
-/**
+/*
* struct mtk_disp_rdma - DISP_RDMA driver structure
- * @ddp_comp: structure containing type enum and hardware resources
- * @crtc: associated crtc to report irq events to
* @data: local driver data
*/
struct mtk_disp_rdma {
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 10f693ea89d3..52536e7adb95 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -7,6 +7,8 @@ config DRM_MSM
depends on IOMMU_SUPPORT
depends on OF && COMMON_CLK
depends on QCOM_OCMEM || QCOM_OCMEM=n
+ depends on QCOM_LLCC || QCOM_LLCC=n
+ depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
select IOMMU_IO_PGTABLE
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
@@ -15,7 +17,6 @@ config DRM_MSM
select SHMEM
select TMPFS
select QCOM_SCM if ARCH_QCOM
- select QCOM_COMMAND_DB if ARCH_QCOM
select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 56df86e5f740..a94a43de95ef 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -817,9 +817,9 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
struct dma_fence *fence;
int i, ret;
- fobj = dma_resv_get_list(obj->resv);
+ fobj = dma_resv_shared_list(obj->resv);
if (!fobj || (fobj->shared_count == 0)) {
- fence = dma_resv_get_excl(obj->resv);
+ fence = dma_resv_excl_fence(obj->resv);
/* don't need to wait on our own fences, since ring is fifo */
if (fence && (fence->context != fctx->context)) {
ret = dma_fence_wait(fence, true);
@@ -915,8 +915,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
- ret = dma_resv_wait_timeout_rcu(obj->resv, write,
- true, remain);
+ ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
if (ret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT;
else if (ret < 0)
@@ -1025,7 +1024,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
}
rcu_read_lock();
- fobj = rcu_dereference(robj->fence);
+ fobj = dma_resv_shared_list(robj);
if (fobj) {
unsigned int i, shared_count = fobj->shared_count;
@@ -1035,7 +1034,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
}
}
- fence = rcu_dereference(robj->fence_excl);
+ fence = dma_resv_excl_fence(robj);
if (fence)
describe_fence(fence, "Exclusive", m);
rcu_read_unlock();
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index c46d0374b6e6..f949767698fc 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1617,8 +1617,9 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
mstm->mgr.cbs = &nv50_mstm;
ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
- (u8)max_payloads, outp->dcb->dpconf.link_nr,
- (u8)outp->dcb->dpconf.link_bw, conn_base_id);
+ max_payloads, outp->dcb->dpconf.link_nr,
+ drm_dp_bw_code_to_link_rate(outp->dcb->dpconf.link_bw),
+ conn_base_id);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 0cb1f9d848d3..8d048bacd6f0 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -561,7 +561,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
asyw->image.handle[0] = ctxdma->object.handle;
}
- asyw->state.fence = dma_resv_get_excl_rcu(nvbo->bo.base.resv);
+ asyw->state.fence = dma_resv_get_excl_unlocked(nvbo->bo.base.resv);
asyw->image.offset[0] = nvbo->offset;
if (wndw->func->prepare) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 0a9334deffe2..b45ec3086285 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -312,7 +312,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
else
- if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
+ if (chan->chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM)
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
else
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7a2624c0ba4c..520b1ea9d16c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -433,7 +433,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
if (nvbo->bo.pin_count) {
bool error = evict;
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
break;
@@ -446,7 +446,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
if (error) {
NV_ERROR(drm, "bo %p pinned elsewhere: "
"0x%08x vs 0x%08x\n", bo,
- bo->mem.mem_type, domain);
+ bo->resource->mem_type, domain);
ret = -EBUSY;
}
ttm_bo_pin(&nvbo->bo);
@@ -467,7 +467,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
ttm_bo_pin(&nvbo->bo);
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
drm->gem.vram_available -= bo->base.size;
break;
@@ -498,7 +498,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
ttm_bo_unpin(&nvbo->bo);
if (!nvbo->bo.pin_count) {
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
drm->gem.vram_available += bo->base.size;
break;
@@ -523,7 +523,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
if (ret)
return ret;
- ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap);
ttm_bo_unreserve(&nvbo->bo);
return ret;
@@ -737,7 +737,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
NOUVEAU_GEM_DOMAIN_CPU);
@@ -754,7 +754,7 @@ static int
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
struct ttm_resource *reg)
{
- struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
+ struct nouveau_mem *old_mem = nouveau_mem(bo->resource);
struct nouveau_mem *new_mem = nouveau_mem(reg);
struct nvif_vmm *vmm = &drm->client.vmm.vmm;
int ret;
@@ -809,7 +809,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
if (ret == 0) {
- ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
+ ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
if (ret == 0) {
ret = nouveau_fence_new(chan, false, &fence);
if (ret == 0) {
@@ -918,12 +918,8 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
}
}
- if (new_reg) {
- if (new_reg->mm_node)
- nvbo->offset = (new_reg->start << PAGE_SHIFT);
- else
- nvbo->offset = 0;
- }
+ if (new_reg)
+ nvbo->offset = (new_reg->start << PAGE_SHIFT);
}
@@ -955,7 +951,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
- struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
+ struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv);
nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile;
@@ -969,7 +965,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct ttm_resource *old_reg = &bo->mem;
+ struct ttm_resource *old_reg = bo->resource;
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
@@ -1009,7 +1005,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
if (old_reg->mem_type == TTM_PL_TT &&
new_reg->mem_type == TTM_PL_SYSTEM) {
nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_reg);
goto out;
}
@@ -1045,20 +1041,11 @@ out:
}
out_ntfy:
if (ret) {
- nouveau_bo_move_ntfy(bo, &bo->mem);
+ nouveau_bo_move_ntfy(bo, bo->resource);
}
return ret;
}
-static int
-nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
- struct nouveau_bo *nvbo = nouveau_bo(bo);
-
- return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
- filp->private_data);
-}
-
static void
nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
struct ttm_resource *reg)
@@ -1179,7 +1166,7 @@ out:
list_del_init(&nvbo->io_reserve_lru);
drm_vma_node_unmap(&nvbo->bo.base.vma_node,
bdev->dev_mapping);
- nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
+ nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
goto retry;
}
@@ -1209,12 +1196,12 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
/* as long as the bo isn't in vram, and isn't tiled, we've got
* nothing to do here.
*/
- if (bo->mem.mem_type != TTM_PL_VRAM) {
+ if (bo->resource->mem_type != TTM_PL_VRAM) {
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
!nvbo->kind)
return 0;
- if (bo->mem.mem_type != TTM_PL_SYSTEM)
+ if (bo->resource->mem_type != TTM_PL_SYSTEM)
return 0;
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
@@ -1222,7 +1209,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
} else {
/* make sure bo is in mappable vram */
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
- bo->mem.start + bo->mem.num_pages < mappable)
+ bo->resource->start + bo->resource->num_pages < mappable)
return 0;
for (i = 0; i < nvbo->placement.num_placement; ++i) {
@@ -1331,7 +1318,6 @@ struct ttm_device_funcs nouveau_bo_driver = {
.evict_flags = nouveau_bo_evict_flags,
.delete_mem_notify = nouveau_bo_delete_mem_notify,
.move = nouveau_bo_move,
- .verify_access = nouveau_bo_verify_access,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 7cfac265fd45..40362600eed2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -212,7 +212,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
args.start = 0;
args.limit = chan->vmm->vmm.limit - 1;
} else
- if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
+ if (chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) {
if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
/* nv04 vram pushbuf hack, retarget to its location in
* the framebuffer bar rather than direct vram access..
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 3204fc0a90d2..a616cf4573b8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -1179,7 +1179,7 @@ nouveau_driver_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = nouveau_drm_ioctl,
- .mmap = nouveau_ttm_mmap,
+ .mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
#if defined(CONFIG_COMPAT)
@@ -1212,6 +1212,7 @@ driver_stub = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.dumb_create = nouveau_display_dumb_create,
.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 93ac78bda750..4f9b3aa5deda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -378,7 +378,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_IMAGEBLIT;
info->fbops = &nouveau_fbcon_sw_ops;
- info->fix.smem_start = nvbo->bo.mem.bus.offset;
+ info->fix.smem_start = nvbo->bo.resource->bus.offset;
info->fix.smem_len = nvbo->bo.base.size;
info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index e5dcbf67de7e..6b43918035df 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -355,8 +355,8 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
return ret;
}
- fobj = dma_resv_get_list(resv);
- fence = dma_resv_get_excl(resv);
+ fobj = dma_resv_shared_list(resv);
+ fence = dma_resv_excl_fence(resv);
if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
struct nouveau_channel *prev = NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index a70e82413fa7..5b27845075a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -39,6 +39,40 @@
#include <nvif/class.h>
#include <nvif/push206e.h>
+static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ pgprot_t prot;
+ vm_fault_t ret;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ ret = nouveau_ttm_fault_reserve_notify(bo);
+ if (ret)
+ goto error_unlock;
+
+ nouveau_bo_del_io_reserve_lru(bo);
+ prot = vm_get_page_prot(vma->vm_flags);
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
+ nouveau_bo_add_io_reserve_lru(bo);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+error_unlock:
+ dma_resv_unlock(bo->base.resv);
+ return ret;
+}
+
+static const struct vm_operations_struct nouveau_ttm_vm_ops = {
+ .fault = nouveau_ttm_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close,
+ .access = ttm_bo_vm_access
+};
+
void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
@@ -180,6 +214,8 @@ const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
.get_sg_table = nouveau_gem_prime_get_sg_table,
.vmap = drm_gem_ttm_vmap,
.vunmap = drm_gem_ttm_vunmap,
+ .mmap = drm_gem_ttm_mmap,
+ .vm_ops = &nouveau_ttm_vm_ops,
};
int
@@ -240,7 +276,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
if (is_power_of_2(nvbo->valid_domains))
rep->domain = nvbo->valid_domains;
- else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ else if (nvbo->bo.resource->mem_type == TTM_PL_TT)
rep->domain = NOUVEAU_GEM_DOMAIN_GART;
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
@@ -311,11 +347,11 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
- bo->mem.mem_type == TTM_PL_VRAM)
+ bo->resource->mem_type == TTM_PL_VRAM)
pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
- bo->mem.mem_type == TTM_PL_TT)
+ bo->resource->mem_type == TTM_PL_TT)
pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
@@ -525,13 +561,13 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
if (nvbo->offset == b->presumed.offset &&
- ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
+ ((nvbo->bo.resource->mem_type == TTM_PL_VRAM &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
- (nvbo->bo.mem.mem_type == TTM_PL_TT &&
+ (nvbo->bo.resource->mem_type == TTM_PL_TT &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
continue;
- if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ if (nvbo->bo.resource->mem_type == TTM_PL_TT)
b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
else
b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
@@ -645,7 +681,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
}
if (!nvbo->kmap.virtual) {
- ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages,
&nvbo->kmap);
if (ret) {
NV_PRINTK(err, cli, "failed kmap for reloc\n");
@@ -834,7 +870,7 @@ revalidate:
if (unlikely(cmd != req->suffix0)) {
if (!nvbo->kmap.virtual) {
ret = ttm_bo_kmap(&nvbo->bo, 0,
- nvbo->bo.mem.
+ nvbo->bo.resource->
num_pages,
&nvbo->kmap);
if (ret) {
@@ -928,8 +964,8 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
- no_wait ? 0 : 30 * HZ);
+ lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true,
+ no_wait ? 0 : 30 * HZ);
if (!lret)
ret = -EBUSY;
else if (lret > 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index a1049e9feee1..0de6549fb875 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -178,25 +178,24 @@ void
nouveau_mem_del(struct ttm_resource *reg)
{
struct nouveau_mem *mem = nouveau_mem(reg);
- if (!mem)
- return;
+
nouveau_mem_fini(mem);
- kfree(reg->mm_node);
- reg->mm_node = NULL;
+ kfree(mem);
}
int
nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
- struct ttm_resource *reg)
+ struct ttm_resource **res)
{
struct nouveau_mem *mem;
if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
return -ENOMEM;
+
mem->cli = cli;
mem->kind = kind;
mem->comp = comp;
- reg->mm_node = mem;
+ *res = &mem->base;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
index 7df3848e85aa..2c01166a90f2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -6,13 +6,8 @@ struct ttm_tt;
#include <nvif/mem.h>
#include <nvif/vmm.h>
-static inline struct nouveau_mem *
-nouveau_mem(struct ttm_resource *reg)
-{
- return reg->mm_node;
-}
-
struct nouveau_mem {
+ struct ttm_resource base;
struct nouveau_cli *cli;
u8 kind;
u8 comp;
@@ -20,8 +15,14 @@ struct nouveau_mem {
struct nvif_vma vma[2];
};
+static inline struct nouveau_mem *
+nouveau_mem(struct ttm_resource *reg)
+{
+ return container_of(reg, struct nouveau_mem, base);
+}
+
int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
- struct ttm_resource *);
+ struct ttm_resource **);
void nouveau_mem_del(struct ttm_resource *);
int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index e8b506a6685b..f4c2e46b6fe1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -26,13 +26,13 @@
#include <linux/limits.h>
#include <linux/swiotlb.h>
+#include <drm/ttm/ttm_range_manager.h>
+
#include "nouveau_drv.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
#include "nouveau_ttm.h"
-#include <drm/drm_legacy.h>
-
#include <core/tegra.h>
static void
@@ -45,7 +45,7 @@ static int
nouveau_vram_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *reg)
+ struct ttm_resource **res)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
@@ -54,13 +54,15 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man,
if (drm->client.device.info.ram_size == 0)
return -ENOMEM;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+ ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
- ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
+ ttm_resource_init(bo, place, *res);
+
+ ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page);
if (ret) {
- nouveau_mem_del(reg);
+ nouveau_mem_del(*res);
return ret;
}
@@ -76,17 +78,18 @@ static int
nouveau_gart_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *reg)
+ struct ttm_resource **res)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
int ret;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+ ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
- reg->start = 0;
+ ttm_resource_init(bo, place, *res);
+ (*res)->start = 0;
return 0;
}
@@ -99,26 +102,27 @@ static int
nv04_gart_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *reg)
+ struct ttm_resource **res)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_mem *mem;
int ret;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
- mem = nouveau_mem(reg);
+ ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
+ mem = nouveau_mem(*res);
+ ttm_resource_init(bo, place, *res);
ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
- (long)reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
+ (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
if (ret) {
- nouveau_mem_del(reg);
+ nouveau_mem_del(*res);
return ret;
}
- reg->start = mem->vma[0].addr >> PAGE_SHIFT;
+ (*res)->start = mem->vma[0].addr >> PAGE_SHIFT;
return 0;
}
@@ -127,55 +131,6 @@ const struct ttm_resource_manager_func nv04_gart_manager = {
.free = nouveau_manager_del,
};
-static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct ttm_buffer_object *bo = vma->vm_private_data;
- pgprot_t prot;
- vm_fault_t ret;
-
- ret = ttm_bo_vm_reserve(bo, vmf);
- if (ret)
- return ret;
-
- ret = nouveau_ttm_fault_reserve_notify(bo);
- if (ret)
- goto error_unlock;
-
- nouveau_bo_del_io_reserve_lru(bo);
- prot = vm_get_page_prot(vma->vm_flags);
- ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
- nouveau_bo_add_io_reserve_lru(bo);
- if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- return ret;
-
-error_unlock:
- dma_resv_unlock(bo->base.resv);
- return ret;
-}
-
-static const struct vm_operations_struct nouveau_ttm_vm_ops = {
- .fault = nouveau_ttm_fault,
- .open = ttm_bo_vm_open,
- .close = ttm_bo_vm_close,
- .access = ttm_bo_vm_access
-};
-
-int
-nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv = filp->private_data;
- struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
- int ret;
-
- ret = ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
- if (ret)
- return ret;
-
- vma->vm_ops = &nouveau_ttm_vm_ops;
- return 0;
-}
-
static int
nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h
index dbf6dc238efd..2f0efda7ccdb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h
@@ -17,7 +17,6 @@ struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo,
int nouveau_ttm_init(struct nouveau_drm *drm);
void nouveau_ttm_fini(struct nouveau_drm *drm);
-int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
int nouveau_ttm_global_init(struct nouveau_drm *);
void nouveau_ttm_global_release(struct nouveau_drm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index a49e88129c92..67d6619fcd5e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -77,7 +77,7 @@ int
nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
struct nouveau_vma **pvma)
{
- struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem);
+ struct nouveau_mem *mem = nouveau_mem(nvbo->bo.resource);
struct nouveau_vma *vma;
struct nvif_vma tmp;
int ret;
@@ -96,7 +96,7 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
vma->fence = NULL;
list_add_tail(&vma->head, &nvbo->vma_list);
- if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
+ if (nvbo->bo.resource->mem_type != TTM_PL_SYSTEM &&
mem->mem.page == nvbo->page) {
ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0,
mem->mem.size, &tmp);
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index b1cd8d7dd87d..07c2e0878c24 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -77,8 +77,8 @@ static int
nv17_fence_context_new(struct nouveau_channel *chan)
{
struct nv10_fence_priv *priv = chan->drm->fence;
+ struct ttm_resource *reg = priv->bo->bo.resource;
struct nv10_fence_chan *fctx;
- struct ttm_resource *reg = &priv->bo->bo.mem;
u32 start = reg->start * PAGE_SIZE;
u32 limit = start + priv->bo->bo.base.size - 1;
int ret = 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 1625826505f6..ea1e1f480bfe 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -37,7 +37,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
{
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
- struct ttm_resource *reg = &priv->bo->bo.mem;
+ struct ttm_resource *reg = priv->bo->bo.resource;
u32 start = reg->start * PAGE_SIZE;
u32 limit = start + priv->bo->bo.base.size - 1;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
index 58db83ebadc5..a96084b34a78 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
@@ -46,7 +46,7 @@ tu102_mc_intr_update(struct tu102_mc *mc)
nvkm_wr32(device, 0xb81610, 0x6);
}
-void
+static void
tu102_mc_intr_unarm(struct nvkm_mc *base)
{
struct tu102_mc *mc = tu102_mc(base);
@@ -58,7 +58,7 @@ tu102_mc_intr_unarm(struct nvkm_mc *base)
spin_unlock_irqrestore(&mc->lock, flags);
}
-void
+static void
tu102_mc_intr_rearm(struct nvkm_mc *base)
{
struct tu102_mc *mc = tu102_mc(base);
@@ -70,7 +70,7 @@ tu102_mc_intr_rearm(struct nvkm_mc *base)
spin_unlock_irqrestore(&mc->lock, flags);
}
-void
+static void
tu102_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
{
struct tu102_mc *mc = tu102_mc(base);
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index fe5ac3ef9018..4787f0833264 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -42,6 +42,7 @@ struct kd35t133 {
struct gpio_desc *reset_gpio;
struct regulator *vdd;
struct regulator *iovcc;
+ enum drm_panel_orientation orientation;
bool prepared;
};
@@ -216,6 +217,7 @@ static int kd35t133_get_modes(struct drm_panel *panel,
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
+ drm_connector_set_panel_orientation(connector, ctx->orientation);
return 1;
}
@@ -258,6 +260,12 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
return ret;
}
+ ret = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation);
+ if (ret < 0) {
+ dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, ret);
+ return ret;
+ }
+
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 5e9ccefb88f6..2229f1af2ca8 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -29,7 +29,7 @@
* DEALINGS IN THE SOFTWARE.
*/
-/**
+/*
* Raspberry Pi 7" touchscreen panel driver.
*
* The 7" touchscreen consists of a DPI LCD panel, a Toshiba
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
index 9c3563c61e8c..07a48f621289 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
@@ -27,7 +27,7 @@ static int s6e63m0_dsi_dcs_read(struct device *dev, const u8 cmd, u8 *data)
return ret;
}
- dev_info(dev, "DSI read CMD %02x = %02x\n", cmd, *data);
+ dev_dbg(dev, "DSI read CMD %02x = %02x\n", cmd, *data);
return 0;
}
@@ -42,7 +42,7 @@ static int s6e63m0_dsi_dcs_write(struct device *dev, const u8 *data, size_t len)
int chunk;
int ret;
- dev_info(dev, "DSI writing dcs seq: %*ph\n", (int)len, data);
+ dev_dbg(dev, "DSI writing dcs seq: %*ph\n", (int)len, data);
/* Pick out and skip past the DCS command */
cmd = *seqp;
@@ -80,7 +80,7 @@ static int s6e63m0_dsi_dcs_write(struct device *dev, const u8 *data, size_t len)
cmdwritten += chunk;
seqp += chunk;
}
- dev_info(dev, "sent command %02x %02x bytes\n", cmd, cmdwritten);
+ dev_dbg(dev, "sent command %02x %02x bytes\n", cmd, cmdwritten);
usleep_range(8000, 9000);
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 9be050ab372f..21939d4352cf 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -798,6 +798,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
return 0;
disable_pm_runtime:
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
free_ddc:
if (panel->ddc)
@@ -814,6 +815,7 @@ static int panel_simple_remove(struct device *dev)
drm_panel_disable(&panel->base);
drm_panel_unprepare(&panel->base);
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
if (panel->ddc)
put_device(&panel->ddc->dev);
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 4d2a149b202c..320a2a8fd459 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -38,7 +38,7 @@
#define DSI_CMD2_BK1_SPD2 0xC2 /* Source EQ2 Setting */
#define DSI_CMD2_BK1_MIPISET1 0xD0 /* MIPI Setting 1 */
-/**
+/*
* Command2 with BK function selection.
*
* BIT[4, 0]: [CN2, BKXSEL]
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 597cf1459b0a..f614e98771e4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -45,6 +45,7 @@ struct panfrost_features {
u32 thread_max_workgroup_sz;
u32 thread_max_barrier_sz;
u32 coherency_features;
+ u32 afbc_features;
u32 texture_features[4];
u32 js_features[16];
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index ca07098a6141..075ec0ef746c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -63,6 +63,7 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ,
thread_max_barrier_sz);
PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features);
+ PANFROST_FEATURE(AFBC_FEATURES, afbc_features);
PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3);
PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
@@ -311,8 +312,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
if (!gem_obj)
return -ENOENT;
- ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
- true, timeout);
+ ret = dma_resv_wait_timeout(gem_obj->resv, true, true, timeout);
if (!ret)
ret = timeout ? -ETIMEDOUT : -EBUSY;
@@ -547,6 +547,7 @@ DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
* Panfrost driver version:
* - 1.0 - initial interface
* - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
+ * - 1.2 - adds AFBC_FEATURES query
*/
static const struct drm_driver panfrost_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
@@ -559,7 +560,7 @@ static const struct drm_driver panfrost_drm_driver = {
.desc = "panfrost DRM",
.date = "20180908",
.major = 1,
- .minor = 1,
+ .minor = 2,
.gem_create_object = panfrost_gem_create_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 2aae636f1cf5..0e70e27fd8c3 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -228,6 +228,7 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE);
pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
+ pfdev->features.afbc_features = gpu_read(pfdev, GPU_AFBC_FEATURES);
for (i = 0; i < 4; i++)
pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 6003cfeb1322..2df3e999a38d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -203,7 +203,7 @@ static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
int i;
for (i = 0; i < bo_count; i++)
- implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
+ implicit_fences[i] = dma_resv_get_excl_unlocked(bos[i]->resv);
}
static void panfrost_attach_object_fences(struct drm_gem_object **bos,
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index eddaa62ad8b0..dc9df5457f1c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -82,6 +82,7 @@
#define GPU_TEXTURE_FEATURES(n) (0x0B0 + ((n) * 4))
#define GPU_JS_FEATURES(n) (0x0C0 + ((n) * 4))
+#define GPU_AFBC_FEATURES (0x4C) /* (RO) AFBC support on Bifrost */
#define GPU_SHADER_PRESENT_LO 0x100 /* (RO) Shader core present bitmap, low word */
#define GPU_SHADER_PRESENT_HI 0x104 /* (RO) Shader core present bitmap, high word */
diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig
index 80f6748055e3..3aae387a96af 100644
--- a/drivers/gpu/drm/pl111/Kconfig
+++ b/drivers/gpu/drm/pl111/Kconfig
@@ -3,6 +3,7 @@ config DRM_PL111
tristate "DRM Support for PL111 CLCD Controller"
depends on DRM
depends on ARM || ARM64 || COMPILE_TEST
+ depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n
depends on COMMON_CLK
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 183d15e2cf58..1f9a59601bb1 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -61,7 +61,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
int rel;
rcu_read_lock();
- fobj = rcu_dereference(bo->tbo.base.resv->fence);
+ fobj = dma_resv_shared_list(bo->tbo.base.resv);
rel = fobj ? fobj->shared_count : 0;
rcu_read_unlock();
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 20a0f3ab84ad..dd6abee55f56 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -292,12 +292,12 @@ qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
unsigned long offset)
{
struct qxl_memslot *slot =
- (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ (bo->tbo.resource->mem_type == TTM_PL_VRAM)
? &qdev->main_slot : &qdev->surfaces_slot;
- /* TODO - need to hold one of the locks to read bo->tbo.mem.start */
+ /* TODO - need to hold one of the locks to read bo->tbo.resource->start */
- return slot->high_bits | ((bo->tbo.mem.start << PAGE_SHIFT) + offset);
+ return slot->high_bits | ((bo->tbo.resource->start << PAGE_SHIFT) + offset);
}
/* qxl_display.c */
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index a635d9fdf8ac..d636ba685451 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -58,6 +58,8 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
surf.height = args->height;
surf.stride = pitch;
surf.format = format;
+ surf.data = 0;
+
r = qxl_gem_object_create_with_handle(qdev, file_priv,
QXL_GEM_DOMAIN_CPU,
args->size, &surf, &qobj,
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 6e26d70f2f07..fbb36e3e8564 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -212,14 +212,14 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
struct io_mapping *map;
struct dma_buf_map bo_map;
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
map = qdev->vram_mapping;
- else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
+ else if (bo->tbo.resource->mem_type == TTM_PL_PRIV)
map = qdev->surface_mapping;
else
goto fallback;
- offset = bo->tbo.mem.start << PAGE_SHIFT;
+ offset = bo->tbo.resource->start << PAGE_SHIFT;
return io_mapping_map_atomic_wc(map, offset + page_offset);
fallback:
if (bo->kptr) {
@@ -266,8 +266,8 @@ int qxl_bo_vunmap(struct qxl_bo *bo)
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, void *pmap)
{
- if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
- (bo->tbo.mem.mem_type != TTM_PL_PRIV))
+ if ((bo->tbo.resource->mem_type != TTM_PL_VRAM) &&
+ (bo->tbo.resource->mem_type != TTM_PL_PRIV))
goto fallback;
io_mapping_unmap_atomic(pmap);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 47afe95d04a1..19fd39d9a00c 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -32,6 +32,7 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
#include "qxl_drv.h"
#include "qxl_object.h"
@@ -131,7 +132,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
qbo = to_qxl_bo(bo);
qdev = to_qxl(qbo->tbo.base.dev);
- if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
+ if (bo->resource->mem_type == TTM_PL_PRIV && qbo->surface_id)
qxl_surface_evict(qdev, qbo, new_mem ? true : false);
}
@@ -140,7 +141,7 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *new_mem,
struct ttm_place *hop)
{
- struct ttm_resource *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = bo->resource;
int ret;
qxl_bo_move_notify(bo, new_mem);
diff --git a/drivers/gpu/drm/r128/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c
index 5d73043446e3..0ecccf25a3c7 100644
--- a/drivers/gpu/drm/r128/ati_pcigart.c
+++ b/drivers/gpu/drm/r128/ati_pcigart.c
@@ -71,6 +71,8 @@ static void drm_ati_free_pcigart_table(struct drm_device *dev,
drm_dma_handle_t *dmah = gart_info->table_handle;
dma_free_coherent(dev->dev, dmah->size, dmah->vaddr, dmah->busaddr);
+ kfree(dmah);
+
gart_info->table_handle = NULL;
}
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 8b256123cf2b..2e1bc01aa5c9 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -29,7 +29,7 @@
* Rickard E. (Rik) Faith <[email protected]>
* Kevin E. Martin <[email protected]>
* Gareth Hughes <[email protected]>
- * Michel D�zer <[email protected]>
+ * Michel Dänzer <[email protected]>
*/
#ifndef __R128_DRV_H__
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 42a8afa839cb..73ea5189dfb1 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7439,7 +7439,7 @@ static void cik_irq_disable(struct radeon_device *rdev)
}
/**
- * cik_irq_disable - disable interrupts for suspend
+ * cik_irq_suspend - disable interrupts for suspend
*
* @rdev: radeon_device pointer
*
diff --git a/drivers/gpu/drm/radeon/evergreen.h b/drivers/gpu/drm/radeon/evergreen.h
index 4025a4e474d1..b07befe14458 100644
--- a/drivers/gpu/drm/radeon/evergreen.h
+++ b/drivers/gpu/drm/radeon/evergreen.h
@@ -45,7 +45,6 @@ void sumo_rlc_fini(struct radeon_device *rdev);
int sumo_rlc_init(struct radeon_device *rdev);
void evergreen_gpu_pci_config_reset(struct radeon_device *rdev);
u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
-void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev);
int evergreen_rlc_resume(struct radeon_device *rdev);
struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index fcfcaec25a9e..3c4e7c15fd15 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1406,7 +1406,7 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
}
/**
- * r100_cs_packet_next_vline() - parse userspace VLINE packet
+ * r100_cs_packet_parse_vline() - parse userspace VLINE packet
* @p: parser structure holding parsing context.
*
* Userspace sends a special sequence for VLINE waits.
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 1cf2a5e0d91d..1e00f6b99f94 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -187,7 +187,6 @@ extern int rv370_pcie_gart_init(struct radeon_device *rdev);
extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
-extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
/*
* r420,r423,rv410
@@ -404,7 +403,6 @@ void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock);
void r600_hdmi_audio_workaround(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
-int r600_mc_wait_for_idle(struct radeon_device *rdev);
u32 r600_get_xclk(struct radeon_device *rdev);
uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
int rv6xx_get_temp(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 48162501c1ee..9ed2b2700e0a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -400,12 +400,12 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a,
struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
/* Sort A before B if A is smaller. */
- return (int)la->robj->tbo.mem.num_pages -
- (int)lb->robj->tbo.mem.num_pages;
+ return (int)la->robj->tbo.resource->num_pages -
+ (int)lb->robj->tbo.resource->num_pages;
}
/**
- * cs_parser_fini() - clean parser states
+ * radeon_cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
* @error: error number
* @backoff: indicator to backoff the reservation
@@ -516,7 +516,7 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
}
r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
- &rdev->ring_tmp_bo.bo->tbo.mem);
+ rdev->ring_tmp_bo.bo->tbo.resource);
if (r)
return r;
@@ -530,7 +530,7 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
return -EINVAL;
}
- r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
+ r = radeon_vm_bo_update(rdev, bo_va, bo->tbo.resource);
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 652af7a134bd..406681317419 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv));
+ work->fence = dma_fence_get(dma_resv_excl_fence(new_rbo->tbo.base.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 13072c2a6502..ec867fa880a4 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -642,7 +642,7 @@ radeon_dp_mst_init(struct radeon_connector *radeon_connector)
radeon_connector->mst_mgr.cbs = &mst_cbs;
return drm_dp_mst_topology_mgr_init(&radeon_connector->mst_mgr, dev,
&radeon_connector->ddc_bus->aux, 16, 6,
- 4, (u8)max_link_rate,
+ 4, drm_dp_bw_code_to_link_rate(max_link_rate),
radeon_connector->base.base.id);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 31d3dd0e5258..8cd135fa6dcd 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -545,7 +545,7 @@ static const struct file_operations radeon_driver_kms_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = radeon_drm_ioctl,
- .mmap = radeon_mmap,
+ .mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
#ifdef CONFIG_COMPAT
@@ -620,6 +620,7 @@ static const struct drm_driver kms_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 05ea2f39f626..458f92a70887 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -44,6 +44,42 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj);
const struct drm_gem_object_funcs radeon_gem_object_funcs;
+static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
+{
+ struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
+ struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
+ vm_fault_t ret;
+
+ down_read(&rdev->pm.mclk_lock);
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ goto unlock_mclk;
+
+ ret = radeon_bo_fault_reserve_notify(bo);
+ if (ret)
+ goto unlock_resv;
+
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT, 1);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ goto unlock_mclk;
+
+unlock_resv:
+ dma_resv_unlock(bo->base.resv);
+
+unlock_mclk:
+ up_read(&rdev->pm.mclk_lock);
+ return ret;
+}
+
+static const struct vm_operations_struct radeon_gem_vm_ops = {
+ .fault = radeon_gem_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close,
+ .access = ttm_bo_vm_access
+};
+
static void radeon_gem_object_free(struct drm_gem_object *gobj)
{
struct radeon_bo *robj = gem_to_radeon_bo(gobj);
@@ -125,7 +161,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
- r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+ r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
if (!r)
r = -EBUSY;
@@ -226,6 +262,17 @@ static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
return r;
}
+static int radeon_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
+ struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev);
+
+ if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm))
+ return -EPERM;
+
+ return drm_gem_ttm_mmap(obj, vma);
+}
+
const struct drm_gem_object_funcs radeon_gem_object_funcs = {
.free = radeon_gem_object_free,
.open = radeon_gem_object_open,
@@ -236,6 +283,8 @@ const struct drm_gem_object_funcs radeon_gem_object_funcs = {
.get_sg_table = radeon_gem_prime_get_sg_table,
.vmap = drm_gem_ttm_vmap,
.vunmap = drm_gem_ttm_vunmap,
+ .mmap = radeon_gem_object_mmap,
+ .vm_ops = &radeon_gem_vm_ops,
};
/*
@@ -474,13 +523,13 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
- r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
+ r = dma_resv_test_signaled(robj->tbo.base.resv, true);
if (r == 0)
r = -EBUSY;
else
r = 0;
- cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
+ cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
args->domain = radeon_mem_type_to_domain(cur_placement);
drm_gem_object_put(gobj);
return r;
@@ -503,14 +552,14 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
- ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+ ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
if (ret == 0)
r = -EBUSY;
else if (ret < 0)
r = ret;
/* Flush HDP cache via MMIO if necessary */
- cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
+ cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
if (rdev->asic->mmio_hdp_flush &&
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
robj->rdev->asic->mmio_hdp_flush(rdev);
@@ -594,7 +643,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
goto error_free;
list_for_each_entry(entry, &list, head) {
- domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
+ domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
if (domain == RADEON_GEM_DOMAIN_CPU)
@@ -607,7 +656,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
goto error_unlock;
if (bo_va->it.start)
- r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
+ r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
error_unlock:
mutex_unlock(&bo_va->vm->mutex);
@@ -811,7 +860,7 @@ static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
unsigned domain;
const char *placement;
- domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
+ domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type);
switch (domain) {
case RADEON_GEM_DOMAIN_VRAM:
placement = "VRAM";
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index e37c9a57a7c3..9fa88549c89e 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -66,8 +66,8 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
return true;
}
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index cee11c55fd15..bfaaa3c969a3 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -76,7 +76,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
bo = container_of(tbo, struct radeon_bo, tbo);
- radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
+ radeon_update_memory_usage(bo, bo->tbo.resource->mem_type, -1);
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
@@ -250,7 +250,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
}
return 0;
}
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
if (r) {
return r;
}
@@ -359,7 +359,7 @@ void radeon_bo_unpin(struct radeon_bo *bo)
{
ttm_bo_unpin(&bo->tbo);
if (!bo->tbo.pin_count) {
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
bo->rdev->vram_pin_size -= radeon_bo_size(bo);
else
bo->rdev->gart_pin_size -= radeon_bo_size(bo);
@@ -506,7 +506,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u32 domain = lobj->preferred_domains;
u32 allowed = lobj->allowed_domains;
u32 current_domain =
- radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
+ radeon_mem_type_to_domain(bo->tbo.resource->mem_type);
/* Check if this buffer will be moved and don't move it
* if we have moved too many buffers for this IB already.
@@ -605,7 +605,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
out:
radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
- bo->tbo.mem.start << PAGE_SHIFT,
+ bo->tbo.resource->start << PAGE_SHIFT,
bo->tbo.base.size);
return 0;
}
@@ -711,7 +711,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
return 0;
}
- if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
+ if (bo->tbo.resource->mem_type != TTM_PL_VRAM) {
if (!has_moved)
return 0;
@@ -743,7 +743,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
if (!new_mem)
return;
- radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
+ radeon_update_memory_usage(rbo, bo->resource->mem_type, -1);
radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
}
@@ -760,11 +760,11 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 0);
rdev = rbo->rdev;
- if (bo->mem.mem_type != TTM_PL_VRAM)
+ if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
- size = bo->mem.num_pages << PAGE_SHIFT;
- offset = bo->mem.start << PAGE_SHIFT;
+ size = bo->resource->num_pages << PAGE_SHIFT;
+ offset = bo->resource->start << PAGE_SHIFT;
if ((offset + size) <= rdev->mc.visible_vram_size)
return 0;
@@ -786,7 +786,7 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
r = ttm_bo_validate(bo, &rbo->placement, &ctx);
} else if (likely(!r)) {
- offset = bo->mem.start << PAGE_SHIFT;
+ offset = bo->resource->start << PAGE_SHIFT;
/* this should never happen */
if ((offset + size) > rdev->mc.visible_vram_size)
return VM_FAULT_SIGBUS;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index fd4116bdde0f..1739c6a142cd 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -95,7 +95,7 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
rdev = radeon_get_rdev(bo->tbo.bdev);
- switch (bo->tbo.mem.mem_type) {
+ switch (bo->tbo.resource->mem_type) {
case TTM_PL_TT:
start = rdev->mc.gtt_start;
break;
@@ -104,7 +104,7 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
break;
}
- return (bo->tbo.mem.start << PAGE_SHIFT) + start;
+ return (bo->tbo.resource->start << PAGE_SHIFT) + start;
}
static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 3861c0b98fcf..c67b6ddb29a4 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -154,7 +154,7 @@ static void radeon_unmap_vram_bos(struct radeon_device *rdev)
return;
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
ttm_bo_unmap_virtual(&bo->tbo);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 5d3302945076..9257b60144c4 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -98,14 +98,14 @@ int radeon_sync_resv(struct radeon_device *rdev,
int r = 0;
/* always sync to the exclusive fence */
- f = dma_resv_get_excl(resv);
+ f = dma_resv_excl_fence(resv);
fence = f ? to_radeon_fence(f) : NULL;
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
else if (f)
r = dma_fence_wait(f, true);
- flist = dma_resv_get_list(resv);
+ flist = dma_resv_shared_list(resv);
if (shared || !flist || r)
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 1729cb9a95c5..c9fed5f2b870 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create,
TP_fast_assign(
__entry->bo = bo;
- __entry->pages = bo->tbo.mem.num_pages;
+ __entry->pages = bo->tbo.resource->num_pages;
),
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 3361d11769a2..ad2a5a791bba 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -45,6 +45,7 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
#include "radeon_reg.h"
#include "radeon.h"
@@ -98,12 +99,12 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
return;
}
rbo = container_of(bo, struct radeon_bo, tbo);
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
- bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
+ bo->resource->start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
int i;
@@ -135,17 +136,6 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
*placement = rbo->placement;
}
-static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
- struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
- struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
-
- if (radeon_ttm_tt_has_userptr(rdev, bo->ttm))
- return -EPERM;
- return drm_vma_node_verify_access(&rbo->tbo.base.vma_node,
- filp->private_data);
-}
-
static int radeon_move_blit(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem,
@@ -206,9 +196,9 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *new_mem,
struct ttm_place *hop)
{
+ struct ttm_resource *old_mem = bo->resource;
struct radeon_device *rdev;
struct radeon_bo *rbo;
- struct ttm_resource *old_mem = &bo->mem;
int r;
if (new_mem->mem_type == TTM_PL_TT) {
@@ -240,7 +230,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
if (old_mem->mem_type == TTM_PL_TT &&
new_mem->mem_type == TTM_PL_SYSTEM) {
radeon_ttm_tt_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
goto out;
}
@@ -703,7 +693,6 @@ static struct ttm_device_funcs radeon_bo_driver = {
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &radeon_evict_flags,
.move = &radeon_bo_move,
- .verify_access = &radeon_verify_access,
.delete_mem_notify = &radeon_bo_delete_mem_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
};
@@ -800,59 +789,6 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
man->size = size >> PAGE_SHIFT;
}
-static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
-{
- struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
- struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
- vm_fault_t ret;
-
- down_read(&rdev->pm.mclk_lock);
-
- ret = ttm_bo_vm_reserve(bo, vmf);
- if (ret)
- goto unlock_mclk;
-
- ret = radeon_bo_fault_reserve_notify(bo);
- if (ret)
- goto unlock_resv;
-
- ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
- TTM_BO_VM_NUM_PREFAULT, 1);
- if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- goto unlock_mclk;
-
-unlock_resv:
- dma_resv_unlock(bo->base.resv);
-
-unlock_mclk:
- up_read(&rdev->pm.mclk_lock);
- return ret;
-}
-
-static const struct vm_operations_struct radeon_ttm_vm_ops = {
- .fault = radeon_ttm_fault,
- .open = ttm_bo_vm_open,
- .close = ttm_bo_vm_close,
- .access = ttm_bo_vm_access
-};
-
-int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- int r;
- struct drm_file *file_priv = filp->private_data;
- struct radeon_device *rdev = file_priv->minor->dev->dev_private;
-
- if (rdev == NULL)
- return -EINVAL;
-
- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
- if (unlikely(r != 0))
- return r;
-
- vma->vm_ops = &radeon_ttm_vm_ops;
- return 0;
-}
-
#if defined(CONFIG_DEBUG_FS)
static int radeon_mm_vram_dump_table_show(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.h b/drivers/gpu/drm/radeon/radeon_ttm.h
index 4d7b90ee2774..91ea7141bc81 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.h
+++ b/drivers/gpu/drm/radeon/radeon_ttm.h
@@ -32,6 +32,5 @@ struct radeon_device;
int radeon_ttm_init(struct radeon_device *rdev);
void radeon_ttm_fini(struct radeon_device *rdev);
-int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
#endif /* __RADEON_TTM_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index dfa9fdbe98da..1f5b1a5c0a09 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL;
}
- f = dma_resv_get_excl(bo->tbo.base.resv);
+ f = dma_resv_excl_fence(bo->tbo.base.resv);
if (f) {
r = radeon_fence_wait((struct radeon_fence *)f, false);
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 2dc9c9f98049..36a38adaaea9 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -51,7 +51,7 @@
*/
/**
- * radeon_vm_num_pde - return the number of page directory entries
+ * radeon_vm_num_pdes - return the number of page directory entries
*
* @rdev: radeon_device pointer
*
@@ -626,7 +626,7 @@ static uint32_t radeon_vm_page_flags(uint32_t flags)
}
/**
- * radeon_vm_update_pdes - make sure that page directory is valid
+ * radeon_vm_update_page_directory - make sure that page directory is valid
*
* @rdev: radeon_device pointer
* @vm: requested vm
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index a4a45daf93f2..8ab3247dbc4a 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -42,6 +42,7 @@
#define CDN_FW_TIMEOUT_MS (64 * 1000)
#define CDN_DPCD_TIMEOUT_MS 5000
#define CDN_DP_FIRMWARE "rockchip/dptx.bin"
+MODULE_FIRMWARE(CDN_DP_FIRMWARE);
struct cdn_dp_data {
u8 max_phy;
@@ -73,6 +74,7 @@ static int cdn_dp_grf_write(struct cdn_dp_device *dp,
ret = regmap_write(dp->grf, reg, val);
if (ret) {
DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
+ clk_disable_unprepare(dp->grf_clk);
return ret;
}
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 9d2163ef4d6e..33fb4d05c506 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -658,7 +658,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
*/
do {
tu_size_reg += 2;
- symbol = tu_size_reg * mode->clock * bit_per_pix;
+ symbol = (u64)tu_size_reg * mode->clock * bit_per_pix;
do_div(symbol, dp->max_lanes * link_rate * 8);
rem = do_div(symbol, 1000);
if (tu_size_reg > 64) {
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 24a71091759c..ec7729d18cb8 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -243,7 +243,6 @@ struct dw_mipi_dsi_rockchip {
struct dw_mipi_dsi *dmd;
const struct rockchip_dw_dsi_chip_data *cdata;
struct dw_mipi_dsi_plat_data pdata;
- int devcnt;
};
struct dphy_pll_parameter_map {
@@ -317,11 +316,6 @@ static inline u32 dsi_read(struct dw_mipi_dsi_rockchip *dsi, u32 reg)
return readl(dsi->base + reg);
}
-static inline void dsi_set(struct dw_mipi_dsi_rockchip *dsi, u32 reg, u32 mask)
-{
- dsi_write(dsi, reg, dsi_read(dsi, reg) | mask);
-}
-
static inline void dsi_update_bits(struct dw_mipi_dsi_rockchip *dsi, u32 reg,
u32 mask, u32 val)
{
@@ -692,13 +686,8 @@ static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_rockchip_phy_ops = {
.get_timing = dw_mipi_dsi_phy_get_timing,
};
-static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi,
- int mux)
+static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi)
{
- if (dsi->cdata->lcdsel_grf_reg)
- regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
- mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
-
if (dsi->cdata->lanecfg1_grf_reg)
regmap_write(dsi->grf_regmap, dsi->cdata->lanecfg1_grf_reg,
dsi->cdata->lanecfg1);
@@ -712,6 +701,13 @@ static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi,
dsi->cdata->enable);
}
+static void dw_mipi_dsi_rockchip_set_lcdsel(struct dw_mipi_dsi_rockchip *dsi,
+ int mux)
+{
+ regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
+ mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
+}
+
static int
dw_mipi_dsi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -767,9 +763,9 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
return;
}
- dw_mipi_dsi_rockchip_config(dsi, mux);
+ dw_mipi_dsi_rockchip_set_lcdsel(dsi, mux);
if (dsi->slave)
- dw_mipi_dsi_rockchip_config(dsi->slave, mux);
+ dw_mipi_dsi_rockchip_set_lcdsel(dsi->slave, mux);
clk_disable_unprepare(dsi->grf_clk);
}
@@ -923,6 +919,24 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev,
return ret;
}
+ /*
+ * With the GRF clock running, write lane and dual-mode configurations
+ * that won't change immediately. If we waited until enable() to do
+ * this, things like panel preparation would not be able to send
+ * commands over DSI.
+ */
+ ret = clk_prepare_enable(dsi->grf_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
+ return ret;
+ }
+
+ dw_mipi_dsi_rockchip_config(dsi);
+ if (dsi->slave)
+ dw_mipi_dsi_rockchip_config(dsi->slave);
+
+ clk_disable_unprepare(dsi->grf_clk);
+
ret = rockchip_dsi_drm_create_encoder(dsi, drm_dev);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to create drm encoder\n");
@@ -1121,9 +1135,6 @@ static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
{
struct dw_mipi_dsi_rockchip *dsi = platform_get_drvdata(pdev);
- if (dsi->devcnt == 0)
- component_del(dsi->dev, &dw_mipi_dsi_rockchip_ops);
-
dw_mipi_dsi_remove(dsi->dmd);
return 0;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 212bd87c0c4a..b730b8d5d949 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -16,6 +16,7 @@
#include <linux/console.h>
#include <linux/iommu.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
@@ -114,6 +115,15 @@ static int rockchip_drm_bind(struct device *dev)
struct rockchip_drm_private *private;
int ret;
+ /* Remove existing drivers that may own the framebuffer memory. */
+ ret = drm_aperture_remove_framebuffers(false, "rockchip-drm-fb");
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "Failed to remove existing framebuffers - %d.\n",
+ ret);
+ return ret;
+ }
+
drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
if (IS_ERR(drm_dev))
return PTR_ERR(drm_dev);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 64469439ddf2..f5b9028a16a3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1022,6 +1022,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
VOP_WIN_SET(vop, win, alpha_en, 1);
} else {
VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
+ VOP_WIN_SET(vop, win, alpha_en, 0);
}
VOP_WIN_SET(vop, win, enable, 1);
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index bd5ba10822c2..489d63c05c0d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -499,11 +499,11 @@ static int px30_lvds_probe(struct platform_device *pdev,
if (IS_ERR(lvds->dphy))
return PTR_ERR(lvds->dphy);
- phy_init(lvds->dphy);
+ ret = phy_init(lvds->dphy);
if (ret)
return ret;
- phy_set_mode(lvds->dphy, PHY_MODE_LVDS);
+ ret = phy_set_mode(lvds->dphy, PHY_MODE_LVDS);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 80053d91a301..ca7cc82125cb 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -77,15 +77,20 @@ static const uint64_t format_modifiers_win_lite[] = {
DRM_FORMAT_MOD_INVALID,
};
-static const struct vop_scl_regs rk3036_win_scl = {
+static const struct vop_scl_regs rk3036_win0_scl = {
.scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
.scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
.scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
.scale_cbcr_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
};
+static const struct vop_scl_regs rk3036_win1_scl = {
+ .scale_yrgb_x = VOP_REG(RK3036_WIN1_SCL_FACTOR_YRGB, 0xffff, 0x0),
+ .scale_yrgb_y = VOP_REG(RK3036_WIN1_SCL_FACTOR_YRGB, 0xffff, 16),
+};
+
static const struct vop_win_phy rk3036_win0_data = {
- .scl = &rk3036_win_scl,
+ .scl = &rk3036_win0_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
.format_modifiers = format_modifiers_win_full,
@@ -99,9 +104,13 @@ static const struct vop_win_phy rk3036_win0_data = {
.uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0),
.uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16),
+ .alpha_mode = VOP_REG(RK3036_DSP_CTRL0, 0x1, 18),
+ .alpha_en = VOP_REG(RK3036_ALPHA_CTRL, 0x1, 0),
+ .alpha_pre_mul = VOP_REG(RK3036_DSP_CTRL0, 0x1, 29),
};
static const struct vop_win_phy rk3036_win1_data = {
+ .scl = &rk3036_win1_scl,
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
.format_modifiers = format_modifiers_win_lite,
@@ -113,6 +122,9 @@ static const struct vop_win_phy rk3036_win1_data = {
.dsp_st = VOP_REG(RK3036_WIN1_DSP_ST, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(RK3036_WIN1_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0),
+ .alpha_mode = VOP_REG(RK3036_DSP_CTRL0, 0x1, 19),
+ .alpha_en = VOP_REG(RK3036_ALPHA_CTRL, 0x1, 1),
+ .alpha_pre_mul = VOP_REG(RK3036_DSP_CTRL0, 0x1, 29),
};
static const struct vop_win_data rk3036_vop_win_data[] = {
@@ -179,6 +191,9 @@ static const struct vop_win_phy rk3126_win1_data = {
.dsp_st = VOP_REG(RK3126_WIN1_DSP_ST, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(RK3126_WIN1_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0),
+ .alpha_mode = VOP_REG(RK3036_DSP_CTRL0, 0x1, 19),
+ .alpha_en = VOP_REG(RK3036_ALPHA_CTRL, 0x1, 1),
+ .alpha_pre_mul = VOP_REG(RK3036_DSP_CTRL0, 0x1, 29),
};
static const struct vop_win_data rk3126_vop_win_data[] = {
@@ -312,6 +327,7 @@ static const struct vop_win_data px30_vop_big_win_data[] = {
};
static const struct vop_data px30_vop_big = {
+ .version = VOP_VERSION(2, 6),
.intr = &px30_intr,
.feature = VOP_FEATURE_INTERNAL_RGB,
.common = &px30_common,
@@ -327,6 +343,7 @@ static const struct vop_win_data px30_vop_lit_win_data[] = {
};
static const struct vop_data px30_vop_lit = {
+ .version = VOP_VERSION(2, 5),
.intr = &px30_intr,
.feature = VOP_FEATURE_INTERNAL_RGB,
.common = &px30_common,
@@ -349,8 +366,8 @@ static const struct vop_win_phy rk3066_win0_data = {
.nformats = ARRAY_SIZE(formats_win_full),
.format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 0),
- .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 4),
- .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 19),
+ .format = VOP_REG(RK3066_SYS_CTRL1, 0x7, 4),
+ .rb_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 19),
.act_info = VOP_REG(RK3066_WIN0_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(RK3066_WIN0_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3066_WIN0_DSP_ST, 0x1fff1fff, 0),
@@ -358,16 +375,17 @@ static const struct vop_win_phy rk3066_win0_data = {
.uv_mst = VOP_REG(RK3066_WIN0_CBR_MST0, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3066_WIN0_VIR, 0xffff, 0),
.uv_vir = VOP_REG(RK3066_WIN0_VIR, 0x1fff, 16),
+ .alpha_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 21),
+ .alpha_en = VOP_REG(RK3066_BLEND_CTRL, 0x1, 0),
};
static const struct vop_win_phy rk3066_win1_data = {
- .scl = &rk3066_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
.format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 1),
- .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 7),
- .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 23),
+ .format = VOP_REG(RK3066_SYS_CTRL1, 0x7, 7),
+ .rb_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 23),
.act_info = VOP_REG(RK3066_WIN1_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(RK3066_WIN1_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3066_WIN1_DSP_ST, 0x1fff1fff, 0),
@@ -375,6 +393,8 @@ static const struct vop_win_phy rk3066_win1_data = {
.uv_mst = VOP_REG(RK3066_WIN1_CBR_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3066_WIN1_VIR, 0xffff, 0),
.uv_vir = VOP_REG(RK3066_WIN1_VIR, 0x1fff, 16),
+ .alpha_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 22),
+ .alpha_en = VOP_REG(RK3066_BLEND_CTRL, 0x1, 1),
};
static const struct vop_win_phy rk3066_win2_data = {
@@ -382,12 +402,14 @@ static const struct vop_win_phy rk3066_win2_data = {
.nformats = ARRAY_SIZE(formats_win_lite),
.format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 2),
- .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 10),
- .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 27),
+ .format = VOP_REG(RK3066_SYS_CTRL1, 0x7, 10),
+ .rb_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 27),
.dsp_info = VOP_REG(RK3066_WIN2_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3066_WIN2_DSP_ST, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(RK3066_WIN2_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3066_WIN2_VIR, 0xffff, 0),
+ .alpha_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 23),
+ .alpha_en = VOP_REG(RK3066_BLEND_CTRL, 0x1, 2),
};
static const struct vop_modeset rk3066_modeset = {
@@ -408,6 +430,9 @@ static const struct vop_common rk3066_common = {
.dither_down_en = VOP_REG(RK3066_DSP_CTRL0, 0x1, 11),
.dither_down_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 10),
.dsp_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 24),
+ .dither_up = VOP_REG(RK3066_DSP_CTRL0, 0x1, 9),
+ .dsp_lut_en = VOP_REG(RK3066_SYS_CTRL1, 0x1, 31),
+ .data_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 25),
};
static const struct vop_win_data rk3066_vop_win_data[] = {
@@ -470,6 +495,9 @@ static const struct vop_win_phy rk3188_win0_data = {
.yrgb_mst = VOP_REG(RK3188_WIN0_YRGB_MST0, 0xffffffff, 0),
.uv_mst = VOP_REG(RK3188_WIN0_CBR_MST0, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3188_WIN_VIR, 0x1fff, 0),
+ .alpha_mode = VOP_REG(RK3188_DSP_CTRL0, 0x1, 18),
+ .alpha_en = VOP_REG(RK3188_ALPHA_CTRL, 0x1, 0),
+ .alpha_pre_mul = VOP_REG(RK3188_DSP_CTRL0, 0x1, 29),
};
static const struct vop_win_phy rk3188_win1_data = {
@@ -484,6 +512,9 @@ static const struct vop_win_phy rk3188_win1_data = {
.dsp_st = VOP_REG(RK3188_WIN1_DSP_ST, 0x0fff0fff, 0),
.yrgb_mst = VOP_REG(RK3188_WIN1_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3188_WIN_VIR, 0x1fff, 16),
+ .alpha_mode = VOP_REG(RK3188_DSP_CTRL0, 0x1, 19),
+ .alpha_en = VOP_REG(RK3188_ALPHA_CTRL, 0x1, 1),
+ .alpha_pre_mul = VOP_REG(RK3188_DSP_CTRL0, 0x1, 29),
};
static const struct vop_modeset rk3188_modeset = {
@@ -505,7 +536,10 @@ static const struct vop_common rk3188_common = {
.dither_down_sel = VOP_REG(RK3188_DSP_CTRL0, 0x1, 27),
.dither_down_en = VOP_REG(RK3188_DSP_CTRL0, 0x1, 11),
.dither_down_mode = VOP_REG(RK3188_DSP_CTRL0, 0x1, 10),
- .dsp_blank = VOP_REG(RK3188_DSP_CTRL1, 0x3, 24),
+ .dsp_blank = VOP_REG(RK3188_DSP_CTRL1, 0x1, 24),
+ .dither_up = VOP_REG(RK3188_DSP_CTRL0, 0x1, 9),
+ .dsp_lut_en = VOP_REG(RK3188_SYS_CTRL, 0x1, 28),
+ .data_blank = VOP_REG(RK3188_DSP_CTRL1, 0x1, 25),
};
static const struct vop_win_data rk3188_vop_win_data[] = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
index 6e9fa5815d4d..0b3cd65ba5c1 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
@@ -955,6 +955,7 @@
#define RK3188_DSP_CTRL0 0x04
#define RK3188_DSP_CTRL1 0x08
#define RK3188_INT_STATUS 0x10
+#define RK3188_ALPHA_CTRL 0x14
#define RK3188_WIN0_YRGB_MST0 0x20
#define RK3188_WIN0_CBR_MST0 0x24
#define RK3188_WIN0_YRGB_MST1 0x28
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 0249c7450188..79554aa4dbb1 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -116,7 +116,8 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
rmb(); /* for list_empty to work without lock */
if (list_empty(&entity->list) ||
- spsc_queue_count(&entity->job_queue) == 0)
+ spsc_queue_count(&entity->job_queue) == 0 ||
+ entity->stopped)
return true;
return false;
@@ -221,11 +222,16 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
{
struct drm_sched_job *job;
+ struct dma_fence *f;
int r;
while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
struct drm_sched_fence *s_fence = job->s_fence;
+ /* Wait for all dependencies to avoid data corruptions */
+ while ((f = job->sched->ops->dependency(job, entity)))
+ dma_fence_wait(f, false);
+
drm_sched_fence_scheduled(s_fence);
dma_fence_set_error(&s_fence->finished, -ESRCH);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index f4f474944169..a2a953693b45 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -314,6 +314,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
+ enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
@@ -331,7 +332,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
list_del_init(&job->list);
spin_unlock(&sched->job_list_lock);
- job->sched->ops->timedout_job(job);
+ status = job->sched->ops->timedout_job(job);
/*
* Guilty job did complete and hence needs to be manually removed
@@ -345,9 +346,11 @@ static void drm_sched_job_timedout(struct work_struct *work)
spin_unlock(&sched->job_list_lock);
}
- spin_lock(&sched->job_list_lock);
- drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ if (status != DRM_GPU_SCHED_STAT_ENODEV) {
+ spin_lock(&sched->job_list_lock);
+ drm_sched_start_timeout(sched);
+ spin_unlock(&sched->job_list_lock);
+ }
}
/**
@@ -895,9 +898,33 @@ EXPORT_SYMBOL(drm_sched_init);
*/
void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
+ struct drm_sched_entity *s_entity;
+ int i;
+
if (sched->thread)
kthread_stop(sched->thread);
+ for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ struct drm_sched_rq *rq = &sched->sched_rq[i];
+
+ if (!rq)
+ continue;
+
+ spin_lock(&rq->lock);
+ list_for_each_entry(s_entity, &rq->entities, list)
+ /*
+ * Prevents reinsertion and marks job_queue as idle,
+ * it will removed from rq in drm_sched_entity_fini
+ * eventually
+ */
+ s_entity->stopped = true;
+ spin_unlock(&rq->lock);
+
+ }
+
+ /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
+ wake_up_all(&sched->job_scheduled);
+
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&sched->work_tdr);
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 5c2b650b561d..03f3377f918c 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -272,7 +272,7 @@ static void hda_write(struct sti_hda *hda, u32 val, int offset)
}
/**
- * Search for a video mode in the supported modes table
+ * hda_get_mode_idx - Search for a video mode in the supported modes table
*
* @mode: mode being searched
* @idx: index of the found mode
@@ -292,7 +292,7 @@ static bool hda_get_mode_idx(struct drm_display_mode mode, int *idx)
}
/**
- * Enable the HD DACS
+ * hda_enable_hd_dacs - Enable the HD DACS
*
* @hda: pointer to HD analog structure
* @enable: true if HD DACS need to be enabled, else false
@@ -380,7 +380,7 @@ static void hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
}
/**
- * Configure AWG, writing instructions
+ * sti_hda_configure_awg - Configure AWG, writing instructions
*
* @hda: pointer to HD analog structure
* @awg_instr: pointer to AWG instructions table
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
index d5f94dca0d32..d25ecd4f4b67 100644
--- a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
@@ -67,7 +67,7 @@ static struct hdmi_phy_config hdmiphy_config[NB_HDMI_PHY_CONFIG] = {
};
/**
- * Start hdmi phy macro cell tx3g4c28
+ * sti_hdmi_tx3g4c28phy_start - Start hdmi phy macro cell tx3g4c28
*
* @hdmi: pointer on the hdmi internal structure
*
@@ -179,7 +179,7 @@ err:
}
/**
- * Stop hdmi phy macro cell tx3g4c28
+ * sti_hdmi_tx3g4c28phy_stop - Stop hdmi phy macro cell tx3g4c28
*
* @hdmi: pointer on the hdmi internal structure
*/
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index edbb99f53de1..d09b08995b12 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -782,7 +782,7 @@ static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
}
/**
- * sti_vdp_vtg_cb
+ * sti_hqvdp_vtg_cb
* @nb: notifier block
* @evt: event message
* @data: private data
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index df3817f0fd30..2499715a69b7 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -153,7 +153,7 @@ static void tvout_write(struct sti_tvout *tvout, u32 val, int offset)
}
/**
- * Set the clipping mode of a VIP
+ * tvout_vip_set_color_order - Set the clipping mode of a VIP
*
* @tvout: tvout structure
* @reg: register to set
@@ -177,7 +177,7 @@ static void tvout_vip_set_color_order(struct sti_tvout *tvout, int reg,
}
/**
- * Set the clipping mode of a VIP
+ * tvout_vip_set_clip_mode - Set the clipping mode of a VIP
*
* @tvout: tvout structure
* @reg: register to set
@@ -193,7 +193,7 @@ static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, int reg, u32 range)
}
/**
- * Set the rounded value of a VIP
+ * tvout_vip_set_rnd - Set the rounded value of a VIP
*
* @tvout: tvout structure
* @reg: register to set
@@ -209,7 +209,7 @@ static void tvout_vip_set_rnd(struct sti_tvout *tvout, int reg, u32 rnd)
}
/**
- * Select the VIP input
+ * tvout_vip_set_sel_input - Select the VIP input
*
* @tvout: tvout structure
* @reg: register to set
@@ -247,7 +247,7 @@ static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
}
/**
- * Select the input video signed or unsigned
+ * tvout_vip_set_in_vid_fmt - Select the input video signed or unsigned
*
* @tvout: tvout structure
* @reg: register to set
@@ -264,7 +264,7 @@ static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout,
}
/**
- * Set preformatter matrix
+ * tvout_preformatter_set_matrix - Set preformatter matrix
*
* @tvout: tvout structure
* @mode: display mode structure
@@ -289,7 +289,7 @@ static void tvout_preformatter_set_matrix(struct sti_tvout *tvout,
}
/**
- * Start VIP block for DVO output
+ * tvout_dvo_start - Start VIP block for DVO output
*
* @tvout: pointer on tvout structure
* @main_path: true if main path has to be used in the vip configuration
@@ -343,7 +343,7 @@ static void tvout_dvo_start(struct sti_tvout *tvout, bool main_path)
}
/**
- * Start VIP block for HDMI output
+ * tvout_hdmi_start - Start VIP block for HDMI output
*
* @tvout: pointer on tvout structure
* @main_path: true if main path has to be used in the vip configuration
@@ -392,7 +392,7 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
}
/**
- * Start HDF VIP and HD DAC
+ * tvout_hda_start - Start HDF VIP and HD DAC
*
* @tvout: pointer on tvout structure
* @main_path: true if main path has to be used in the vip configuration
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index e99771b947b6..08b71248044d 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -531,7 +531,6 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct drm_encoder *encoder = NULL;
struct drm_bridge *bridge = NULL;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
- struct videomode vm;
u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
u32 total_width, total_height;
u32 bus_flags = 0;
@@ -570,31 +569,33 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
}
- drm_display_mode_to_videomode(mode, &vm);
-
DRM_DEBUG_DRIVER("CRTC:%d mode:%s\n", crtc->base.id, mode->name);
- DRM_DEBUG_DRIVER("Video mode: %dx%d", vm.hactive, vm.vactive);
+ DRM_DEBUG_DRIVER("Video mode: %dx%d", mode->hdisplay, mode->vdisplay);
DRM_DEBUG_DRIVER(" hfp %d hbp %d hsl %d vfp %d vbp %d vsl %d\n",
- vm.hfront_porch, vm.hback_porch, vm.hsync_len,
- vm.vfront_porch, vm.vback_porch, vm.vsync_len);
+ mode->hsync_start - mode->hdisplay,
+ mode->htotal - mode->hsync_end,
+ mode->hsync_end - mode->hsync_start,
+ mode->vsync_start - mode->vdisplay,
+ mode->vtotal - mode->vsync_end,
+ mode->vsync_end - mode->vsync_start);
/* Convert video timings to ltdc timings */
- hsync = vm.hsync_len - 1;
- vsync = vm.vsync_len - 1;
- accum_hbp = hsync + vm.hback_porch;
- accum_vbp = vsync + vm.vback_porch;
- accum_act_w = accum_hbp + vm.hactive;
- accum_act_h = accum_vbp + vm.vactive;
- total_width = accum_act_w + vm.hfront_porch;
- total_height = accum_act_h + vm.vfront_porch;
+ hsync = mode->hsync_end - mode->hsync_start - 1;
+ vsync = mode->vsync_end - mode->vsync_start - 1;
+ accum_hbp = mode->htotal - mode->hsync_start - 1;
+ accum_vbp = mode->vtotal - mode->vsync_start - 1;
+ accum_act_w = accum_hbp + mode->hdisplay;
+ accum_act_h = accum_vbp + mode->vdisplay;
+ total_width = mode->htotal - 1;
+ total_height = mode->vtotal - 1;
/* Configures the HS, VS, DE and PC polarities. Default Active Low */
val = 0;
- if (vm.flags & DISPLAY_FLAGS_HSYNC_HIGH)
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
val |= GCR_HSPOL;
- if (vm.flags & DISPLAY_FLAGS_VSYNC_HIGH)
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= GCR_VSPOL;
if (bus_flags & DRM_BUS_FLAG_DE_LOW)
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index 0db164a774a1..e779855bcd6e 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -370,6 +370,11 @@ static const u32 sun8i_ui_layer_formats[] = {
DRM_FORMAT_XRGB8888,
};
+static const uint64_t sun8i_layer_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
struct sun8i_mixer *mixer,
int index)
@@ -392,7 +397,7 @@ struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
&sun8i_ui_layer_funcs,
sun8i_ui_layer_formats,
ARRAY_SIZE(sun8i_ui_layer_formats),
- NULL, type, NULL);
+ sun8i_layer_modifiers, type, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 46420780db59..1c86c2dd0bbf 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -534,6 +534,11 @@ static const u32 sun8i_vi_layer_de3_formats[] = {
DRM_FORMAT_YVU422,
};
+static const uint64_t sun8i_layer_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
struct sun8i_mixer *mixer,
int index)
@@ -560,7 +565,8 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun8i_vi_layer_funcs,
formats, format_count,
- NULL, DRM_PLANE_TYPE_OVERLAY, NULL);
+ sun8i_layer_modifiers,
+ DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 4f605c5fe856..08ae66b1e6f5 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -468,7 +468,6 @@ static int simpledrm_device_init_fb(struct simpledrm_device *sdev)
{
int width, height, stride;
const struct drm_format_info *format;
- struct drm_format_name_buf buf;
struct drm_device *dev = &sdev->dev;
struct platform_device *pdev = sdev->pdev;
const struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev);
@@ -512,9 +511,8 @@ static int simpledrm_device_init_fb(struct simpledrm_device *sdev)
drm_dbg_kms(dev, "display mode={" DRM_MODE_FMT "}\n",
DRM_MODE_ARG(&sdev->mode));
drm_dbg_kms(dev,
- "framebuffer format=\"%s\", size=%dx%d, stride=%d byte\n",
- drm_get_format_name(format->format, &buf), width,
- height, stride);
+ "framebuffer format=%p4cc, size=%dx%d, stride=%d byte\n",
+ &format->format, width, height, stride);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index ca1b098b6a56..db53fecca696 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -58,7 +58,7 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
int i, mem_type;
drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
- bo, bo->mem.num_pages, bo->base.size >> 10,
+ bo, bo->resource->num_pages, bo->base.size >> 10,
bo->base.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
mem_type = placement->placement[i].mem_type;
@@ -109,7 +109,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
bdev->funcs->del_from_lru_notify(bo);
if (bulk && !bo->pin_count) {
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_TT:
ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
break;
@@ -163,11 +163,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
struct ttm_place *hop)
{
+ struct ttm_resource_manager *old_man, *new_man;
struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
- struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
int ret;
+ old_man = ttm_manager_type(bdev, bo->resource->mem_type);
+ new_man = ttm_manager_type(bdev, mem->mem_type);
+
ttm_bo_unmap_virtual(bo);
/*
@@ -200,7 +202,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
return 0;
out_err:
- new_man = ttm_manager_type(bdev, bo->mem.mem_type);
+ new_man = ttm_manager_type(bdev, bo->resource->mem_type);
if (!new_man->use_tt)
ttm_bo_tt_destroy(bo);
@@ -221,7 +223,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
bo->bdev->funcs->delete_mem_notify(bo);
ttm_bo_tt_destroy(bo);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
}
static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
@@ -259,8 +261,8 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
int i;
rcu_read_lock();
- fobj = rcu_dereference(resv->fence);
- fence = rcu_dereference(resv->fence_excl);
+ fobj = dma_resv_shared_list(resv);
+ fence = dma_resv_excl_fence(resv);
if (fence && !fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
@@ -294,7 +296,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
struct dma_resv *resv = &bo->base._resv;
int ret;
- if (dma_resv_test_signaled_rcu(resv, true))
+ if (dma_resv_test_signaled(resv, true))
ret = 0;
else
ret = -EBUSY;
@@ -306,8 +308,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
dma_resv_unlock(bo->base.resv);
spin_unlock(&bo->bdev->lru_lock);
- lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
- 30 * HZ);
+ lret = dma_resv_wait_timeout(resv, true, interruptible,
+ 30 * HZ);
if (lret < 0)
return lret;
@@ -409,18 +411,18 @@ static void ttm_bo_release(struct kref *kref)
/* Last resort, if we fail to allocate memory for the
* fences block for the BO to become idle
*/
- dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
- 30 * HZ);
+ dma_resv_wait_timeout(bo->base.resv, true, false,
+ 30 * HZ);
}
if (bo->bdev->funcs->release_notify)
bo->bdev->funcs->release_notify(bo);
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
- ttm_mem_io_free(bdev, &bo->mem);
+ ttm_mem_io_free(bdev, bo->resource);
}
- if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
+ if (!dma_resv_test_signaled(bo->base.resv, true) ||
!dma_resv_trylock(bo->base.resv)) {
/* The BO is not idle, resurrect it for delayed destroy */
ttm_bo_flush_all_fences(bo);
@@ -438,7 +440,7 @@ static void ttm_bo_release(struct kref *kref)
*/
if (bo->pin_count) {
bo->pin_count = 0;
- ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
+ ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
}
kref_init(&bo->kref);
@@ -487,7 +489,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx)
{
struct ttm_device *bdev = bo->bdev;
- struct ttm_resource evict_mem;
+ struct ttm_resource *evict_mem;
struct ttm_placement placement;
struct ttm_place hop;
int ret = 0;
@@ -501,10 +503,15 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
bdev->funcs->evict_flags(bo, &placement);
if (!placement.num_placement && !placement.num_busy_placement) {
- ttm_bo_wait(bo, false, false);
+ ret = ttm_bo_wait(bo, true, false);
+ if (ret)
+ return ret;
- ttm_bo_cleanup_memtype_use(bo);
- return ttm_tt_create(bo, false);
+ /*
+ * Since we've already synced, this frees backing store
+ * immediately.
+ */
+ return ttm_bo_pipeline_gutting(bo);
}
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
@@ -517,7 +524,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
goto out;
}
- ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx, &hop);
+ ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
if (unlikely(ret)) {
WARN(ret == -EMULTIHOP, "Unexpected multihop in eviction - likely driver bug\n");
if (ret != -ERESTARTSYS)
@@ -531,11 +538,15 @@ out:
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
+ dma_resv_assert_held(bo->base.resv);
+ if (bo->resource->mem_type == TTM_PL_SYSTEM)
+ return true;
+
/* Don't evict this BO if it's outside of the
* requested placement range
*/
- if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
- (place->lpfn && place->lpfn <= bo->mem.start))
+ if (place->fpfn >= (bo->resource->start + bo->resource->num_pages) ||
+ (place->lpfn && place->lpfn <= bo->resource->start))
return false;
return true;
@@ -553,7 +564,9 @@ EXPORT_SYMBOL(ttm_bo_eviction_valuable);
* b. Otherwise, trylock it.
*/
static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
+ struct ttm_operation_ctx *ctx,
+ const struct ttm_place *place,
+ bool *locked, bool *busy)
{
bool ret = false;
@@ -571,6 +584,14 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
*busy = !ret;
}
+ if (ret && place && !bo->bdev->funcs->eviction_valuable(bo, place)) {
+ ret = false;
+ if (*locked) {
+ dma_resv_unlock(bo->base.resv);
+ *locked = false;
+ }
+ }
+
return ret;
}
@@ -625,20 +646,14 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
list_for_each_entry(bo, &man->lru[i], lru) {
bool busy;
- if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
- &busy)) {
+ if (!ttm_bo_evict_swapout_allowable(bo, ctx, place,
+ &locked, &busy)) {
if (busy && !busy_bo && ticket !=
dma_resv_locking_ctx(bo->base.resv))
busy_bo = bo;
continue;
}
- if (place && !bdev->funcs->eviction_valuable(bo,
- place)) {
- if (locked)
- dma_resv_unlock(bo->base.resv);
- continue;
- }
if (!ttm_bo_get_unless_zero(bo)) {
if (locked)
dma_resv_unlock(bo->base.resv);
@@ -682,7 +697,9 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
}
/*
- * Add the last move fence to the BO and reserve a new shared slot.
+ * Add the last move fence to the BO and reserve a new shared slot. We only use
+ * a shared slot to avoid unecessary sync and rely on the subsequent bo move to
+ * either stall or use an exclusive fence respectively set bo->moving.
*/
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man,
@@ -724,14 +741,15 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
*/
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem,
+ struct ttm_resource **mem,
struct ttm_operation_ctx *ctx)
{
struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
+ struct ttm_resource_manager *man;
struct ww_acquire_ctx *ticket;
int ret;
+ man = ttm_manager_type(bdev, place->mem_type);
ticket = dma_resv_locking_ctx(bo->base.resv);
do {
ret = ttm_resource_alloc(bo, place, mem);
@@ -745,37 +763,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
} while (1);
- return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
-}
-
-/**
- * ttm_bo_mem_placement - check if placement is compatible
- * @bo: BO to find memory for
- * @place: where to search
- * @mem: the memory object to fill in
- *
- * Check if placement is compatible and fill in mem structure.
- * Returns -EBUSY if placement won't work or negative error code.
- * 0 when placement can be used.
- */
-static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_resource *mem)
-{
- struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *man;
-
- man = ttm_manager_type(bdev, place->mem_type);
- if (!man || !ttm_resource_manager_used(man))
- return -EBUSY;
-
- mem->mem_type = place->mem_type;
- mem->placement = place->flags;
-
- spin_lock(&bo->bdev->lru_lock);
- ttm_bo_move_to_lru_tail(bo, mem, NULL);
- spin_unlock(&bo->bdev->lru_lock);
- return 0;
+ return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
}
/*
@@ -788,7 +776,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
*/
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- struct ttm_resource *mem,
+ struct ttm_resource **mem,
struct ttm_operation_ctx *ctx)
{
struct ttm_device *bdev = bo->bdev;
@@ -803,8 +791,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man;
- ret = ttm_bo_mem_placement(bo, place, mem);
- if (ret)
+ man = ttm_manager_type(bdev, place->mem_type);
+ if (!man || !ttm_resource_manager_used(man))
continue;
type_found = true;
@@ -814,8 +802,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (unlikely(ret))
goto error;
- man = ttm_manager_type(bdev, mem->mem_type);
- ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
+ ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
if (unlikely(ret)) {
ttm_resource_free(bo, mem);
if (ret == -EBUSY)
@@ -828,9 +815,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
for (i = 0; i < placement->num_busy_placement; ++i) {
const struct ttm_place *place = &placement->busy_placement[i];
+ struct ttm_resource_manager *man;
- ret = ttm_bo_mem_placement(bo, place, mem);
- if (ret)
+ man = ttm_manager_type(bdev, place->mem_type);
+ if (!man || !ttm_resource_manager_used(man))
continue;
type_found = true;
@@ -849,7 +837,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
error:
- if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count)
+ if (bo->resource->mem_type == TTM_PL_SYSTEM && !bo->pin_count)
ttm_bo_move_to_lru_tail_unlocked(bo);
return ret;
@@ -857,12 +845,12 @@ error:
EXPORT_SYMBOL(ttm_bo_mem_space);
static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
- struct ttm_resource *mem,
+ struct ttm_resource **mem,
struct ttm_operation_ctx *ctx,
struct ttm_place *hop)
{
struct ttm_placement hop_placement;
- struct ttm_resource hop_mem;
+ struct ttm_resource *hop_mem;
int ret;
hop_placement.num_placement = hop_placement.num_busy_placement = 1;
@@ -873,7 +861,7 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
if (ret)
return ret;
/* move to the bounce domain */
- ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL);
+ ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
if (ret) {
ttm_resource_free(bo, &hop_mem);
return ret;
@@ -885,14 +873,12 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx)
{
+ struct ttm_resource *mem;
struct ttm_place hop;
- struct ttm_resource mem;
int ret;
dma_resv_assert_held(bo->base.resv);
- memset(&hop, 0, sizeof(hop));
-
/*
* Determine where to move the buffer.
*
@@ -906,7 +892,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
if (ret)
return ret;
bounce:
- ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop);
+ ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop);
if (ret == -EMULTIHOP) {
ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
if (ret)
@@ -974,18 +960,13 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* Remove the backing store if no placement is given.
*/
- if (!placement->num_placement && !placement->num_busy_placement) {
- ret = ttm_bo_pipeline_gutting(bo);
- if (ret)
- return ret;
-
- return ttm_tt_create(bo, false);
- }
+ if (!placement->num_placement && !placement->num_busy_placement)
+ return ttm_bo_pipeline_gutting(bo);
/*
* Check whether we need to move buffer.
*/
- if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
+ if (!ttm_bo_mem_compat(placement, bo->resource, &new_flags)) {
ret = ttm_bo_move_buffer(bo, placement, ctx);
if (ret)
return ret;
@@ -993,7 +974,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* We might need to add a TTM.
*/
- if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+ if (bo->resource->mem_type == TTM_PL_SYSTEM) {
ret = ttm_tt_create(bo, true);
if (ret)
return ret;
@@ -1015,7 +996,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
{
static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
bool locked;
- int ret = 0;
+ int ret;
bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
@@ -1025,7 +1006,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
bo->bdev = bdev;
bo->type = type;
bo->page_alignment = page_alignment;
- ttm_resource_alloc(bo, &sys_mem, &bo->mem);
bo->moving = NULL;
bo->pin_count = 0;
bo->sg = sg;
@@ -1037,6 +1017,12 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
}
atomic_inc(&ttm_glob.bo_count);
+ ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
+ if (unlikely(ret)) {
+ ttm_bo_put(bo);
+ return ret;
+ }
+
/*
* For ttm_bo_type_device buffers, allocate
* address space from the device.
@@ -1044,7 +1030,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
if (bo->type == ttm_bo_type_device ||
bo->type == ttm_bo_type_sg)
ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
- bo->mem.num_pages);
+ bo->resource->num_pages);
/* passed reservation objects should already be locked,
* since otherwise lockdep will be angered in radeon.
@@ -1106,7 +1092,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
struct ttm_device *bdev = bo->bdev;
drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
- ttm_mem_io_free(bdev, &bo->mem);
+ ttm_mem_io_free(bdev, bo->resource);
}
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
@@ -1116,14 +1102,14 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
long timeout = 15 * HZ;
if (no_wait) {
- if (dma_resv_test_signaled_rcu(bo->base.resv, true))
+ if (dma_resv_test_signaled(bo->base.resv, true))
return 0;
else
return -EBUSY;
}
- timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
- interruptible, timeout);
+ timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible,
+ timeout);
if (timeout < 0)
return timeout;
@@ -1138,10 +1124,19 @@ EXPORT_SYMBOL(ttm_bo_wait);
int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
{
+ struct ttm_place place;
bool locked;
int ret;
- if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, NULL))
+ /*
+ * While the bo may already reside in SYSTEM placement, set
+ * SYSTEM as new placement to cover also the move further below.
+ * The driver may use the fact that we're moving from SYSTEM
+ * as an indication that we're about to swap out.
+ */
+ memset(&place, 0, sizeof(place));
+ place.mem_type = TTM_PL_SYSTEM;
+ if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
return -EBUSY;
if (!ttm_bo_get_unless_zero(bo)) {
@@ -1163,21 +1158,17 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
/*
* Move to system cached
*/
- if (bo->mem.mem_type != TTM_PL_SYSTEM) {
+ if (bo->resource->mem_type != TTM_PL_SYSTEM) {
struct ttm_operation_ctx ctx = { false, false };
- struct ttm_resource evict_mem;
- struct ttm_place place, hop;
+ struct ttm_resource *evict_mem;
+ struct ttm_place hop;
- memset(&place, 0, sizeof(place));
memset(&hop, 0, sizeof(hop));
-
- place.mem_type = TTM_PL_SYSTEM;
-
ret = ttm_resource_alloc(bo, &place, &evict_mem);
if (unlikely(ret))
goto out;
- ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx, &hop);
+ ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
if (unlikely(ret != 0)) {
WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
goto out;
@@ -1200,7 +1191,8 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
if (bo->bdev->funcs->swap_notify)
bo->bdev->funcs->swap_notify(bo);
- ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
+ if (ttm_tt_is_populated(bo->ttm))
+ ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
out:
/*
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ae8b61460724..2f57f824e6db 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -31,6 +31,7 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/drm_cache.h>
#include <drm/drm_vma_manager.h>
#include <linux/dma-buf-map.h>
#include <linux/io.h>
@@ -72,190 +73,112 @@ void ttm_mem_io_free(struct ttm_device *bdev,
mem->bus.addr = NULL;
}
-static int ttm_resource_ioremap(struct ttm_device *bdev,
- struct ttm_resource *mem,
- void **virtual)
+/**
+ * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
+ * @bo: The struct ttm_buffer_object.
+ * @new_mem: The struct ttm_resource we're moving to (copy destination).
+ * @new_iter: A struct ttm_kmap_iter representing the destination resource.
+ * @src_iter: A struct ttm_kmap_iter representing the source resource.
+ *
+ * This function is intended to be able to move out async under a
+ * dma-fence if desired.
+ */
+void ttm_move_memcpy(struct ttm_buffer_object *bo,
+ u32 num_pages,
+ struct ttm_kmap_iter *dst_iter,
+ struct ttm_kmap_iter *src_iter)
{
- int ret;
- void *addr;
-
- *virtual = NULL;
- ret = ttm_mem_io_reserve(bdev, mem);
- if (ret || !mem->bus.is_iomem)
- return ret;
+ const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
+ const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
+ struct ttm_tt *ttm = bo->ttm;
+ struct dma_buf_map src_map, dst_map;
+ pgoff_t i;
- if (mem->bus.addr) {
- addr = mem->bus.addr;
- } else {
- size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
+ /* Single TTM move. NOP */
+ if (dst_ops->maps_tt && src_ops->maps_tt)
+ return;
- if (mem->bus.caching == ttm_write_combined)
- addr = ioremap_wc(mem->bus.offset, bus_size);
-#ifdef CONFIG_X86
- else if (mem->bus.caching == ttm_cached)
- addr = ioremap_cache(mem->bus.offset, bus_size);
-#endif
- else
- addr = ioremap(mem->bus.offset, bus_size);
- if (!addr) {
- ttm_mem_io_free(bdev, mem);
- return -ENOMEM;
+ /* Don't move nonexistent data. Clear destination instead. */
+ if (src_ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm))) {
+ if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
+ return;
+
+ for (i = 0; i < num_pages; ++i) {
+ dst_ops->map_local(dst_iter, &dst_map, i);
+ if (dst_map.is_iomem)
+ memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
+ else
+ memset(dst_map.vaddr, 0, PAGE_SIZE);
+ if (dst_ops->unmap_local)
+ dst_ops->unmap_local(dst_iter, &dst_map);
}
+ return;
}
- *virtual = addr;
- return 0;
-}
-
-static void ttm_resource_iounmap(struct ttm_device *bdev,
- struct ttm_resource *mem,
- void *virtual)
-{
- if (virtual && mem->bus.addr == NULL)
- iounmap(virtual);
- ttm_mem_io_free(bdev, mem);
-}
-
-static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
-{
- uint32_t *dstP =
- (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
- uint32_t *srcP =
- (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
-
- int i;
- for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
- iowrite32(ioread32(srcP++), dstP++);
- return 0;
-}
-
-static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
- unsigned long page,
- pgprot_t prot)
-{
- struct page *d = ttm->pages[page];
- void *dst;
-
- if (!d)
- return -ENOMEM;
-
- src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
- dst = kmap_atomic_prot(d, prot);
- if (!dst)
- return -ENOMEM;
-
- memcpy_fromio(dst, src, PAGE_SIZE);
-
- kunmap_atomic(dst);
-
- return 0;
-}
-
-static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
- unsigned long page,
- pgprot_t prot)
-{
- struct page *s = ttm->pages[page];
- void *src;
-
- if (!s)
- return -ENOMEM;
-
- dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
- src = kmap_atomic_prot(s, prot);
- if (!src)
- return -ENOMEM;
- memcpy_toio(dst, src, PAGE_SIZE);
+ for (i = 0; i < num_pages; ++i) {
+ dst_ops->map_local(dst_iter, &dst_map, i);
+ src_ops->map_local(src_iter, &src_map, i);
- kunmap_atomic(src);
+ drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
- return 0;
+ if (src_ops->unmap_local)
+ src_ops->unmap_local(src_iter, &src_map);
+ if (dst_ops->unmap_local)
+ dst_ops->unmap_local(dst_iter, &dst_map);
+ }
}
+EXPORT_SYMBOL(ttm_move_memcpy);
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
- struct ttm_resource *new_mem)
+ struct ttm_resource *dst_mem)
{
struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
+ struct ttm_resource_manager *dst_man =
+ ttm_manager_type(bo->bdev, dst_mem->mem_type);
struct ttm_tt *ttm = bo->ttm;
- struct ttm_resource *old_mem = &bo->mem;
- struct ttm_resource old_copy = *old_mem;
- void *old_iomap;
- void *new_iomap;
- int ret;
- unsigned long i;
-
- ret = ttm_bo_wait_ctx(bo, ctx);
- if (ret)
- return ret;
-
- ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
- if (ret)
- return ret;
- ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
- if (ret)
- goto out;
-
- /*
- * Single TTM move. NOP.
- */
- if (old_iomap == NULL && new_iomap == NULL)
- goto out2;
-
- /*
- * Don't move nonexistent data. Clear destination instead.
- */
- if (old_iomap == NULL &&
- (ttm == NULL || (!ttm_tt_is_populated(ttm) &&
- !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
- memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
- goto out2;
- }
+ struct ttm_resource *src_mem = bo->resource;
+ struct ttm_resource_manager *src_man =
+ ttm_manager_type(bdev, src_mem->mem_type);
+ struct ttm_resource src_copy = *src_mem;
+ union {
+ struct ttm_kmap_iter_tt tt;
+ struct ttm_kmap_iter_linear_io io;
+ } _dst_iter, _src_iter;
+ struct ttm_kmap_iter *dst_iter, *src_iter;
+ int ret = 0;
- /*
- * TTM might be null for moves within the same region.
- */
- if (ttm) {
+ if (ttm && ((ttm->page_flags & TTM_PAGE_FLAG_SWAPPED) ||
+ dst_man->use_tt)) {
ret = ttm_tt_populate(bdev, ttm, ctx);
if (ret)
- goto out1;
+ return ret;
}
- for (i = 0; i < new_mem->num_pages; ++i) {
- if (old_iomap == NULL) {
- pgprot_t prot = ttm_io_prot(bo, old_mem, PAGE_KERNEL);
- ret = ttm_copy_ttm_io_page(ttm, new_iomap, i,
- prot);
- } else if (new_iomap == NULL) {
- pgprot_t prot = ttm_io_prot(bo, new_mem, PAGE_KERNEL);
- ret = ttm_copy_io_ttm_page(ttm, old_iomap, i,
- prot);
- } else {
- ret = ttm_copy_io_page(new_iomap, old_iomap, i);
- }
- if (ret)
- goto out1;
+ dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
+ if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
+ dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
+ if (IS_ERR(dst_iter))
+ return PTR_ERR(dst_iter);
+
+ src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
+ if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
+ src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
+ if (IS_ERR(src_iter)) {
+ ret = PTR_ERR(src_iter);
+ goto out_src_iter;
}
- mb();
-out2:
- old_copy = *old_mem;
- ttm_bo_assign_mem(bo, new_mem);
+ ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
+ src_copy = *src_mem;
+ ttm_bo_move_sync_cleanup(bo, dst_mem);
- if (!man->use_tt)
- ttm_bo_tt_destroy(bo);
+ if (!src_iter->ops->maps_tt)
+ ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, &src_copy);
+out_src_iter:
+ if (!dst_iter->ops->maps_tt)
+ ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
-out1:
- ttm_resource_iounmap(bdev, old_mem, new_iomap);
-out:
- ttm_resource_iounmap(bdev, &old_copy, old_iomap);
-
- /*
- * On error, keep the mm node!
- */
- if (!ret)
- ttm_resource_free(bo, &old_copy);
return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);
@@ -336,27 +259,7 @@ pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
man = ttm_manager_type(bo->bdev, res->mem_type);
caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
- /* Cached mappings need no adjustment */
- if (caching == ttm_cached)
- return tmp;
-
-#if defined(__i386__) || defined(__x86_64__)
- if (caching == ttm_write_combined)
- tmp = pgprot_writecombine(tmp);
- else if (boot_cpu_data.x86 > 3)
- tmp = pgprot_noncached(tmp);
-#endif
-#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
- defined(__powerpc__) || defined(__mips__)
- if (caching == ttm_write_combined)
- tmp = pgprot_writecombine(tmp);
- else
- tmp = pgprot_noncached(tmp);
-#endif
-#if defined(__sparc__)
- tmp = pgprot_noncached(tmp);
-#endif
- return tmp;
+ return ttm_prot_from_caching(caching, tmp);
}
EXPORT_SYMBOL(ttm_io_prot);
@@ -365,24 +268,23 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long size,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_resource *mem = &bo->mem;
+ struct ttm_resource *mem = bo->resource;
- if (bo->mem.bus.addr) {
+ if (bo->resource->bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
- map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
+ map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
} else {
+ resource_size_t res = bo->resource->bus.offset + offset;
+
map->bo_kmap_type = ttm_bo_map_iomap;
if (mem->bus.caching == ttm_write_combined)
- map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
- size);
+ map->virtual = ioremap_wc(res, size);
#ifdef CONFIG_X86
else if (mem->bus.caching == ttm_cached)
- map->virtual = ioremap_cache(bo->mem.bus.offset + offset,
- size);
+ map->virtual = ioremap_cache(res, size);
#endif
else
- map->virtual = ioremap(bo->mem.bus.offset + offset,
- size);
+ map->virtual = ioremap(res, size);
}
return (!map->virtual) ? -ENOMEM : 0;
}
@@ -392,7 +294,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_resource *mem = &bo->mem;
+ struct ttm_resource *mem = bo->resource;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
@@ -438,15 +340,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
map->virtual = NULL;
map->bo = bo;
- if (num_pages > bo->mem.num_pages)
+ if (num_pages > bo->resource->num_pages)
return -EINVAL;
- if ((start_page + num_pages) > bo->mem.num_pages)
+ if ((start_page + num_pages) > bo->resource->num_pages)
return -EINVAL;
- ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
+ ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
if (ret)
return ret;
- if (!bo->mem.bus.is_iomem) {
+ if (!bo->resource->bus.is_iomem) {
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {
offset = start_page << PAGE_SHIFT;
@@ -475,7 +377,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
default:
BUG();
}
- ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
+ ttm_mem_io_free(map->bo->bdev, map->bo->resource);
map->virtual = NULL;
map->page = NULL;
}
@@ -483,7 +385,7 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
{
- struct ttm_resource *mem = &bo->mem;
+ struct ttm_resource *mem = bo->resource;
int ret;
ret = ttm_mem_io_reserve(bo->bdev, mem);
@@ -542,7 +444,7 @@ EXPORT_SYMBOL(ttm_bo_vmap);
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
{
- struct ttm_resource *mem = &bo->mem;
+ struct ttm_resource *mem = bo->resource;
if (dma_buf_map_is_null(map))
return;
@@ -553,7 +455,7 @@ void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
iounmap(map->vaddr_iomem);
dma_buf_map_clear(map);
- ttm_mem_io_free(bo->bdev, &bo->mem);
+ ttm_mem_io_free(bo->bdev, bo->resource);
}
EXPORT_SYMBOL(ttm_bo_vunmap);
@@ -567,7 +469,7 @@ static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
if (!dst_use_tt)
ttm_bo_tt_destroy(bo);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
return 0;
}
@@ -605,6 +507,7 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
ghost_obj->ttm = NULL;
else
bo->ttm = NULL;
+ bo->resource = NULL;
dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
@@ -615,7 +518,9 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
struct dma_fence *fence)
{
struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
+ struct ttm_resource_manager *from;
+
+ from = ttm_manager_type(bdev, bo->resource->mem_type);
/**
* BO doesn't have a TTM we need to bind/unbind. Just remember
@@ -628,7 +533,7 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
}
spin_unlock(&from->move_lock);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
dma_fence_put(bo->moving);
bo->moving = dma_fence_get(fence);
@@ -641,7 +546,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem)
{
struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
+ struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
int ret = 0;
@@ -662,26 +567,82 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+/**
+ * ttm_bo_pipeline_gutting - purge the contents of a bo
+ * @bo: The buffer object
+ *
+ * Purge the contents of a bo, async if the bo is not idle.
+ * After a successful call, the bo is left unpopulated in
+ * system placement. The function may wait uninterruptible
+ * for idle on OOM.
+ *
+ * Return: 0 if successful, negative error code on failure.
+ */
int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
{
static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
struct ttm_buffer_object *ghost;
+ struct ttm_resource *sys_res;
+ struct ttm_tt *ttm;
int ret;
- ret = ttm_buffer_object_transfer(bo, &ghost);
+ ret = ttm_resource_alloc(bo, &sys_mem, &sys_res);
if (ret)
return ret;
+ /* If already idle, no need for ghost object dance. */
+ ret = ttm_bo_wait(bo, false, true);
+ if (ret != -EBUSY) {
+ if (!bo->ttm) {
+ /* See comment below about clearing. */
+ ret = ttm_tt_create(bo, true);
+ if (ret)
+ goto error_free_sys_mem;
+ } else {
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
+ if (bo->type == ttm_bo_type_device)
+ ttm_tt_mark_for_clear(bo->ttm);
+ }
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_assign_mem(bo, sys_res);
+ return 0;
+ }
+
+ /*
+ * We need an unpopulated ttm_tt after giving our current one,
+ * if any, to the ghost object. And we can't afford to fail
+ * creating one *after* the operation. If the bo subsequently gets
+ * resurrected, make sure it's cleared (if ttm_bo_type_device)
+ * to avoid leaking sensitive information to user-space.
+ */
+
+ ttm = bo->ttm;
+ bo->ttm = NULL;
+ ret = ttm_tt_create(bo, true);
+ swap(bo->ttm, ttm);
+ if (ret)
+ goto error_free_sys_mem;
+
+ ret = ttm_buffer_object_transfer(bo, &ghost);
+ if (ret)
+ goto error_destroy_tt;
+
ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
/* Last resort, wait for the BO to be idle when we are OOM */
if (ret)
ttm_bo_wait(bo, false, false);
- ttm_resource_alloc(bo, &sys_mem, &bo->mem);
- bo->ttm = NULL;
-
dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
-
+ bo->ttm = ttm;
+ bo->resource = NULL;
+ ttm_bo_assign_mem(bo, sys_res);
return 0;
+
+error_destroy_tt:
+ ttm_tt_destroy(bo->bdev, ttm);
+
+error_free_sys_mem:
+ ttm_resource_free(bo, &sys_res);
+ return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index b31b18058965..f56be5bc0861 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -34,6 +34,8 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_vma_manager.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
#include <linux/mm.h>
#include <linux/pfn_t.h>
#include <linux/rbtree.h>
@@ -100,7 +102,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
if (bdev->funcs->io_mem_pfn)
return bdev->funcs->io_mem_pfn(bo, page_offset);
- return (bo->mem.bus.offset >> PAGE_SHIFT) + page_offset;
+ return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset;
}
/**
@@ -198,10 +200,10 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
/* Fault should not cross bo boundary. */
page_offset &= ~(fault_page_size - 1);
- if (page_offset + fault_page_size > bo->mem.num_pages)
+ if (page_offset + fault_page_size > bo->resource->num_pages)
goto out_fallback;
- if (bo->mem.bus.is_iomem)
+ if (bo->resource->bus.is_iomem)
pfn = ttm_bo_io_mem_pfn(bo, page_offset);
else
pfn = page_to_pfn(ttm->pages[page_offset]);
@@ -211,7 +213,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
goto out_fallback;
/* Check that memory is contiguous. */
- if (!bo->mem.bus.is_iomem) {
+ if (!bo->resource->bus.is_iomem) {
for (i = 1; i < fault_page_size; ++i) {
if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i)
goto out_fallback;
@@ -297,7 +299,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
if (unlikely(ret != 0))
return ret;
- err = ttm_mem_io_reserve(bdev, &bo->mem);
+ err = ttm_mem_io_reserve(bdev, bo->resource);
if (unlikely(err != 0))
return VM_FAULT_SIGBUS;
@@ -306,11 +308,11 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
page_last = vma_pages(vma) + vma->vm_pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (unlikely(page_offset >= bo->mem.num_pages))
+ if (unlikely(page_offset >= bo->resource->num_pages))
return VM_FAULT_SIGBUS;
- prot = ttm_io_prot(bo, &bo->mem, prot);
- if (!bo->mem.bus.is_iomem) {
+ prot = ttm_io_prot(bo, bo->resource, prot);
+ if (!bo->resource->bus.is_iomem) {
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
@@ -335,7 +337,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
* first page.
*/
for (i = 0; i < num_prefault; ++i) {
- if (bo->mem.bus.is_iomem) {
+ if (bo->resource->bus.is_iomem) {
pfn = ttm_bo_io_mem_pfn(bo, page_offset);
} else {
page = ttm->pages[page_offset];
@@ -357,12 +359,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
* at arbitrary times while the data is mmap'ed.
* See vmf_insert_mixed_prot() for a discussion.
*/
- if (vma->vm_flags & VM_MIXEDMAP)
- ret = vmf_insert_mixed_prot(vma, address,
- __pfn_to_pfn_t(pfn, PFN_DEV),
- prot);
- else
- ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
+ ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
/* Never error on prefaulted PTEs */
if (unlikely((ret & VM_FAULT_ERROR))) {
@@ -380,19 +377,63 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
}
EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
+static void ttm_bo_release_dummy_page(struct drm_device *dev, void *res)
+{
+ struct page *dummy_page = (struct page *)res;
+
+ __free_page(dummy_page);
+}
+
+vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ struct drm_device *ddev = bo->base.dev;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ unsigned long address;
+ unsigned long pfn;
+ struct page *page;
+
+ /* Allocate new dummy page to map all the VA range in this VMA to it*/
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return VM_FAULT_OOM;
+
+ /* Set the page to be freed using drmm release action */
+ if (drmm_add_action_or_reset(ddev, ttm_bo_release_dummy_page, page))
+ return VM_FAULT_OOM;
+
+ pfn = page_to_pfn(page);
+
+ /* Prefault the entire VMA range right away to avoid further faults */
+ for (address = vma->vm_start; address < vma->vm_end;
+ address += PAGE_SIZE)
+ ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_vm_dummy_page);
+
vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
pgprot_t prot;
struct ttm_buffer_object *bo = vma->vm_private_data;
+ struct drm_device *ddev = bo->base.dev;
vm_fault_t ret;
+ int idx;
ret = ttm_bo_vm_reserve(bo, vmf);
if (ret)
return ret;
prot = vma->vm_page_prot;
- ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
+ if (drm_dev_enter(ddev, &idx)) {
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
+ drm_dev_exit(idx);
+ } else {
+ ret = ttm_bo_vm_dummy_page(vmf, prot);
+ }
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
@@ -469,14 +510,14 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
<< PAGE_SHIFT);
int ret;
- if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->mem.num_pages)
+ if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->resource->num_pages)
return -EIO;
ret = ttm_bo_reserve(bo, true, false, NULL);
if (ret)
return ret;
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_SYSTEM:
if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(bo->ttm);
@@ -508,33 +549,20 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
.access = ttm_bo_vm_access,
};
-static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_device *bdev,
- unsigned long offset,
- unsigned long pages)
+int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
{
- struct drm_vma_offset_node *node;
- struct ttm_buffer_object *bo = NULL;
-
- drm_vma_offset_lock_lookup(bdev->vma_manager);
-
- node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
- if (likely(node)) {
- bo = container_of(node, struct ttm_buffer_object,
- base.vma_node);
- bo = ttm_bo_get_unless_zero(bo);
- }
-
- drm_vma_offset_unlock_lookup(bdev->vma_manager);
-
- if (!bo)
- pr_err("Could not find buffer object to map\n");
+ /* Enforce no COW since would have really strange behavior with it. */
+ if (is_cow_mapping(vma->vm_flags))
+ return -EINVAL;
- return bo;
-}
+ ttm_bo_get(bo);
-static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_struct *vma)
-{
- vma->vm_ops = &ttm_bo_vm_ops;
+ /*
+ * Drivers may want to override the vm_ops field. Otherwise we
+ * use TTM's default callbacks.
+ */
+ if (!vma->vm_ops)
+ vma->vm_ops = &ttm_bo_vm_ops;
/*
* Note: We're transferring the bo reference to
@@ -543,50 +571,8 @@ static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_s
vma->vm_private_data = bo;
- /*
- * We'd like to use VM_PFNMAP on shared mappings, where
- * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
- * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
- * bad for performance. Until that has been sorted out, use
- * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
- */
- vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_flags |= VM_PFNMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
-}
-
-int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
- struct ttm_device *bdev)
-{
- struct ttm_buffer_object *bo;
- int ret;
-
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
- return -EINVAL;
-
- bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
- if (unlikely(!bo))
- return -EINVAL;
-
- if (unlikely(!bo->bdev->funcs->verify_access)) {
- ret = -EPERM;
- goto out_unref;
- }
- ret = bo->bdev->funcs->verify_access(bo, filp);
- if (unlikely(ret != 0))
- goto out_unref;
-
- ttm_bo_mmap_vma_setup(bo, vma);
- return 0;
-out_unref:
- ttm_bo_put(bo);
- return ret;
-}
-EXPORT_SYMBOL(ttm_bo_mmap);
-
-int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
-{
- ttm_bo_get(bo);
- ttm_bo_mmap_vma_setup(bo, vma);
return 0;
}
EXPORT_SYMBOL(ttm_bo_mmap_obj);
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 56b0efdba1a9..997c458f68a9 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -31,12 +31,47 @@
*/
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/pgtable.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <drm/drm_sysfs.h>
+#include <drm/ttm/ttm_caching.h>
#include "ttm_module.h"
+/**
+ * ttm_prot_from_caching - Modify the page protection according to the
+ * ttm cacing mode
+ * @caching: The ttm caching mode
+ * @tmp: The original page protection
+ *
+ * Return: The modified page protection
+ */
+pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
+{
+ /* Cached mappings need no adjustment */
+ if (caching == ttm_cached)
+ return tmp;
+
+#if defined(__i386__) || defined(__x86_64__)
+ if (caching == ttm_write_combined)
+ tmp = pgprot_writecombine(tmp);
+ else if (boot_cpu_data.x86 > 3)
+ tmp = pgprot_noncached(tmp);
+#endif
+#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
+ defined(__powerpc__) || defined(__mips__)
+ if (caching == ttm_write_combined)
+ tmp = pgprot_writecombine(tmp);
+ else
+ tmp = pgprot_noncached(tmp);
+#endif
+#if defined(__sparc__)
+ tmp = pgprot_noncached(tmp);
+#endif
+ return tmp;
+}
+
struct dentry *ttm_debugfs_root;
static int __init ttm_init(void)
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index b9d5da6e6a81..03395386e8a7 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -29,12 +29,13 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_bo_api.h>
#include <drm/drm_mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/module.h>
/*
* Currently we use a spinlock for the lock, but a mutex *may* be
@@ -57,11 +58,11 @@ to_range_manager(struct ttm_resource_manager *man)
static int ttm_range_man_alloc(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
struct ttm_range_manager *rman = to_range_manager(man);
+ struct ttm_range_mgr_node *node;
struct drm_mm *mm = &rman->mm;
- struct drm_mm_node *node;
enum drm_mm_insert_mode mode;
unsigned long lpfn;
int ret;
@@ -70,7 +71,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
if (!lpfn)
lpfn = man->size;
- node = kzalloc(sizeof(*node), GFP_KERNEL);
+ node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
if (!node)
return -ENOMEM;
@@ -78,35 +79,36 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH;
+ ttm_resource_init(bo, place, &node->base);
+
spin_lock(&rman->lock);
- ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+ ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
+ node->base.num_pages,
bo->page_alignment, 0,
place->fpfn, lpfn, mode);
spin_unlock(&rman->lock);
if (unlikely(ret)) {
kfree(node);
- } else {
- mem->mm_node = node;
- mem->start = node->start;
+ return ret;
}
- return ret;
+ node->base.start = node->mm_nodes[0].start;
+ *res = &node->base;
+ return 0;
}
static void ttm_range_man_free(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
+ struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct ttm_range_manager *rman = to_range_manager(man);
- if (mem->mm_node) {
- spin_lock(&rman->lock);
- drm_mm_remove_node(mem->mm_node);
- spin_unlock(&rman->lock);
+ spin_lock(&rman->lock);
+ drm_mm_remove_node(&node->mm_nodes[0]);
+ spin_unlock(&rman->lock);
- kfree(mem->mm_node);
- mem->mm_node = NULL;
- }
+ kfree(node);
}
static void ttm_range_man_debug(struct ttm_resource_manager *man,
@@ -125,6 +127,17 @@ static const struct ttm_resource_manager_func ttm_range_manager_func = {
.debug = ttm_range_man_debug
};
+/**
+ * ttm_range_man_init
+ *
+ * @bdev: ttm device
+ * @type: memory manager type
+ * @use_tt: if the memory manager uses tt
+ * @p_size: size of area to be managed in pages.
+ *
+ * Initialise a generic range manager for the selected memory type.
+ * The range manager is installed for this device in the type slot.
+ */
int ttm_range_man_init(struct ttm_device *bdev,
unsigned type, bool use_tt,
unsigned long p_size)
@@ -152,6 +165,14 @@ int ttm_range_man_init(struct ttm_device *bdev,
}
EXPORT_SYMBOL(ttm_range_man_init);
+/**
+ * ttm_range_man_fini
+ *
+ * @bdev: ttm device
+ * @type: memory manager type
+ *
+ * Remove the generic range manager from a slot and tear it down.
+ */
int ttm_range_man_fini(struct ttm_device *bdev,
unsigned type)
{
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 59e2b7157e41..2431717376e7 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -22,17 +22,17 @@
* Authors: Christian König
*/
+#include <linux/dma-buf-map.h>
+#include <linux/io-mapping.h>
+#include <linux/scatterlist.h>
+
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_bo_driver.h>
-int ttm_resource_alloc(struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_resource *res)
+void ttm_resource_init(struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource *res)
{
- struct ttm_resource_manager *man =
- ttm_manager_type(bo->bdev, place->mem_type);
-
- res->mm_node = NULL;
res->start = 0;
res->num_pages = PFN_UP(bo->base.size);
res->mem_type = place->mem_type;
@@ -41,18 +41,29 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
res->bus.offset = 0;
res->bus.is_iomem = false;
res->bus.caching = ttm_cached;
-
- return man->func->alloc(man, bo, place, res);
}
+EXPORT_SYMBOL(ttm_resource_init);
-void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res)
+int ttm_resource_alloc(struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res_ptr)
{
struct ttm_resource_manager *man =
- ttm_manager_type(bo->bdev, res->mem_type);
+ ttm_manager_type(bo->bdev, place->mem_type);
- man->func->free(man, res);
- res->mm_node = NULL;
- res->mem_type = TTM_PL_SYSTEM;
+ return man->func->alloc(man, bo, place, res_ptr);
+}
+
+void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
+{
+ struct ttm_resource_manager *man;
+
+ if (!*res)
+ return;
+
+ man = ttm_manager_type(bo->bdev, (*res)->mem_type);
+ man->func->free(man, *res);
+ *res = NULL;
}
EXPORT_SYMBOL(ttm_resource_free);
@@ -147,3 +158,192 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
man->func->debug(man, p);
}
EXPORT_SYMBOL(ttm_resource_manager_debug);
+
+static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
+ struct dma_buf_map *dmap,
+ pgoff_t i)
+{
+ struct ttm_kmap_iter_iomap *iter_io =
+ container_of(iter, typeof(*iter_io), base);
+ void __iomem *addr;
+
+retry:
+ while (i >= iter_io->cache.end) {
+ iter_io->cache.sg = iter_io->cache.sg ?
+ sg_next(iter_io->cache.sg) : iter_io->st->sgl;
+ iter_io->cache.i = iter_io->cache.end;
+ iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >>
+ PAGE_SHIFT;
+ iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) -
+ iter_io->start;
+ }
+
+ if (i < iter_io->cache.i) {
+ iter_io->cache.end = 0;
+ iter_io->cache.sg = NULL;
+ goto retry;
+ }
+
+ addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs +
+ (((resource_size_t)i - iter_io->cache.i)
+ << PAGE_SHIFT));
+ dma_buf_map_set_vaddr_iomem(dmap, addr);
+}
+
+static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter,
+ struct dma_buf_map *map)
+{
+ io_mapping_unmap_local(map->vaddr_iomem);
+}
+
+static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = {
+ .map_local = ttm_kmap_iter_iomap_map_local,
+ .unmap_local = ttm_kmap_iter_iomap_unmap_local,
+ .maps_tt = false,
+};
+
+/**
+ * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap
+ * @iter_io: The struct ttm_kmap_iter_iomap to initialize.
+ * @iomap: The struct io_mapping representing the underlying linear io_memory.
+ * @st: sg_table into @iomap, representing the memory of the struct
+ * ttm_resource.
+ * @start: Offset that needs to be subtracted from @st to make
+ * sg_dma_address(st->sgl) - @start == 0 for @iomap start.
+ *
+ * Return: Pointer to the embedded struct ttm_kmap_iter.
+ */
+struct ttm_kmap_iter *
+ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
+ struct io_mapping *iomap,
+ struct sg_table *st,
+ resource_size_t start)
+{
+ iter_io->base.ops = &ttm_kmap_iter_io_ops;
+ iter_io->iomap = iomap;
+ iter_io->st = st;
+ iter_io->start = start;
+ memset(&iter_io->cache, 0, sizeof(iter_io->cache));
+
+ return &iter_io->base;
+}
+EXPORT_SYMBOL(ttm_kmap_iter_iomap_init);
+
+/**
+ * DOC: Linear io iterator
+ *
+ * This code should die in the not too near future. Best would be if we could
+ * make io-mapping use memremap for all io memory, and have memremap
+ * implement a kmap_local functionality. We could then strip a huge amount of
+ * code. These linear io iterators are implemented to mimic old functionality,
+ * and they don't use kmap_local semantics at all internally. Rather ioremap or
+ * friends, and at least on 32-bit they add global TLB flushes and points
+ * of failure.
+ */
+
+static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter,
+ struct dma_buf_map *dmap,
+ pgoff_t i)
+{
+ struct ttm_kmap_iter_linear_io *iter_io =
+ container_of(iter, typeof(*iter_io), base);
+
+ *dmap = iter_io->dmap;
+ dma_buf_map_incr(dmap, i * PAGE_SIZE);
+}
+
+static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = {
+ .map_local = ttm_kmap_iter_linear_io_map_local,
+ .maps_tt = false,
+};
+
+/**
+ * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory
+ * @iter_io: The iterator to initialize
+ * @bdev: The TTM device
+ * @mem: The ttm resource representing the iomap.
+ *
+ * This function is for internal TTM use only. It sets up a memcpy kmap iterator
+ * pointing at a linear chunk of io memory.
+ *
+ * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on
+ * failure.
+ */
+struct ttm_kmap_iter *
+ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
+ struct ttm_device *bdev,
+ struct ttm_resource *mem)
+{
+ int ret;
+
+ ret = ttm_mem_io_reserve(bdev, mem);
+ if (ret)
+ goto out_err;
+ if (!mem->bus.is_iomem) {
+ ret = -EINVAL;
+ goto out_io_free;
+ }
+
+ if (mem->bus.addr) {
+ dma_buf_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
+ iter_io->needs_unmap = false;
+ } else {
+ size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
+
+ iter_io->needs_unmap = true;
+ memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
+ if (mem->bus.caching == ttm_write_combined)
+ dma_buf_map_set_vaddr_iomem(&iter_io->dmap,
+ ioremap_wc(mem->bus.offset,
+ bus_size));
+ else if (mem->bus.caching == ttm_cached)
+ dma_buf_map_set_vaddr(&iter_io->dmap,
+ memremap(mem->bus.offset, bus_size,
+ MEMREMAP_WB |
+ MEMREMAP_WT |
+ MEMREMAP_WC));
+
+ /* If uncached requested or if mapping cached or wc failed */
+ if (dma_buf_map_is_null(&iter_io->dmap))
+ dma_buf_map_set_vaddr_iomem(&iter_io->dmap,
+ ioremap(mem->bus.offset,
+ bus_size));
+
+ if (dma_buf_map_is_null(&iter_io->dmap)) {
+ ret = -ENOMEM;
+ goto out_io_free;
+ }
+ }
+
+ iter_io->base.ops = &ttm_kmap_iter_linear_io_ops;
+ return &iter_io->base;
+
+out_io_free:
+ ttm_mem_io_free(bdev, mem);
+out_err:
+ return ERR_PTR(ret);
+}
+
+/**
+ * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory
+ * @iter_io: The iterator to initialize
+ * @bdev: The TTM device
+ * @mem: The ttm resource representing the iomap.
+ *
+ * This function is for internal TTM use only. It cleans up a memcpy kmap
+ * iterator initialized by ttm_kmap_iter_linear_io_init.
+ */
+void
+ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
+ struct ttm_device *bdev,
+ struct ttm_resource *mem)
+{
+ if (iter_io->needs_unmap && dma_buf_map_is_set(&iter_io->dmap)) {
+ if (iter_io->dmap.is_iomem)
+ iounmap(iter_io->dmap.vaddr_iomem);
+ else
+ memunmap(iter_io->dmap.vaddr);
+ }
+
+ ttm_mem_io_free(bdev, mem);
+}
diff --git a/drivers/gpu/drm/ttm/ttm_sys_manager.c b/drivers/gpu/drm/ttm/ttm_sys_manager.c
index 474221e863d0..63aca52f75e1 100644
--- a/drivers/gpu/drm/ttm/ttm_sys_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_sys_manager.c
@@ -3,20 +3,27 @@
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
+#include <linux/slab.h>
#include "ttm_module.h"
static int ttm_sys_man_alloc(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
+ *res = kzalloc(sizeof(**res), GFP_KERNEL);
+ if (!*res)
+ return -ENOMEM;
+
+ ttm_resource_init(bo, place, *res);
return 0;
}
static void ttm_sys_man_free(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
+ kfree(res);
}
static const struct ttm_resource_manager_func ttm_sys_manager_func = {
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 539e0232cb3b..24031a8acd2d 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -415,7 +415,7 @@ DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
#endif
-/**
+/*
* ttm_tt_mgr_init - register with the MM shrinker
*
* Register with the MM shrinker for swapping out BOs.
@@ -433,3 +433,48 @@ void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
if (!ttm_dma32_pages_limit)
ttm_dma32_pages_limit = num_dma32_pages;
}
+
+static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
+ struct dma_buf_map *dmap,
+ pgoff_t i)
+{
+ struct ttm_kmap_iter_tt *iter_tt =
+ container_of(iter, typeof(*iter_tt), base);
+
+ dma_buf_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
+ iter_tt->prot));
+}
+
+static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
+ struct dma_buf_map *map)
+{
+ kunmap_local(map->vaddr);
+}
+
+static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
+ .map_local = ttm_kmap_iter_tt_map_local,
+ .unmap_local = ttm_kmap_iter_tt_unmap_local,
+ .maps_tt = true,
+};
+
+/**
+ * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
+ * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
+ * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
+ *
+ * Return: Pointer to the embedded struct ttm_kmap_iter.
+ */
+struct ttm_kmap_iter *
+ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
+ struct ttm_tt *tt)
+{
+ iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
+ iter_tt->tt = tt;
+ if (tt)
+ iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
+ else
+ iter_tt->prot = PAGE_KERNEL;
+
+ return &iter_tt->base;
+}
+EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_base.c b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
index 361d3193258e..8c041d7ce4f1 100644
--- a/drivers/gpu/drm/vboxvideo/hgsmi_base.c
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
@@ -9,7 +9,8 @@
#include "hgsmi_ch_setup.h"
/**
- * Inform the host of the location of the host flags in VRAM via an HGSMI cmd.
+ * hgsmi_report_flags_location - Inform the host of the location of
+ * the host flags in VRAM via an HGSMI cmd.
* Return: 0 or negative errno value.
* @ctx: The context of the guest heap to use.
* @location: The offset chosen for the flags within guest VRAM.
@@ -33,7 +34,8 @@ int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location)
}
/**
- * Notify the host of HGSMI-related guest capabilities via an HGSMI command.
+ * hgsmi_send_caps_info - Notify the host of HGSMI-related guest capabilities
+ * via an HGSMI command.
* Return: 0 or negative errno value.
* @ctx: The context of the guest heap to use.
* @caps: The capabilities to report, see vbva_caps.
@@ -71,7 +73,8 @@ int hgsmi_test_query_conf(struct gen_pool *ctx)
}
/**
- * Query the host for an HGSMI configuration parameter via an HGSMI command.
+ * hgsmi_query_conf - Query the host for an HGSMI configuration
+ * parameter via an HGSMI command.
* Return: 0 or negative errno value.
* @ctx: The context containing the heap used.
* @index: The index of the parameter to query.
@@ -99,7 +102,8 @@ int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret)
}
/**
- * Pass the host a new mouse pointer shape via an HGSMI command.
+ * hgsmi_update_pointer_shape - Pass the host a new mouse pointer shape
+ * via an HGSMI command.
* Return: 0 or negative errno value.
* @ctx: The context containing the heap to be used.
* @flags: Cursor flags.
@@ -171,9 +175,10 @@ int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
}
/**
- * Report the guest cursor position. The host may wish to use this information
- * to re-position its own cursor (though this is currently unlikely). The
- * current host cursor position is returned.
+ * hgsmi_cursor_position - Report the guest cursor position. The host may
+ * wish to use this information to re-position its
+ * own cursor (though this is currently unlikely).
+ * The current host cursor position is returned.
* Return: 0 or negative errno value.
* @ctx: The context containing the heap used.
* @report_position: Are we reporting a position?
diff --git a/drivers/gpu/drm/vboxvideo/modesetting.c b/drivers/gpu/drm/vboxvideo/modesetting.c
index 7580b9002379..10b32d986b95 100644
--- a/drivers/gpu/drm/vboxvideo/modesetting.c
+++ b/drivers/gpu/drm/vboxvideo/modesetting.c
@@ -8,9 +8,11 @@
#include "hgsmi_channels.h"
/**
- * Set a video mode via an HGSMI request. The views must have been
- * initialised first using @a VBoxHGSMISendViewInfo and if the mode is being
- * set on the first display then it must be set first using registers.
+ * hgsmi_process_display_info - Set a video mode via an HGSMI request.
+ * The views must have been initialised first
+ * using @a VBoxHGSMISendViewInfo and if the mode
+ * is being set on the first display then it must
+ * be set first using registers.
* @ctx: The context containing the heap to use.
* @display: The screen number.
* @origin_x: The horizontal displacement relative to the first scrn.
@@ -51,10 +53,12 @@ void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
}
/**
- * Report the rectangle relative to which absolute pointer events should be
- * expressed. This information remains valid until the next VBVA resize event
- * for any screen, at which time it is reset to the bounding rectangle of all
- * virtual screens.
+ * hgsmi_update_input_mapping - Report the rectangle relative to which absolute
+ * pointer events should be expressed. This
+ * information remains valid until the next VBVA
+ * resize event for any screen, at which time it is
+ * reset to the bounding rectangle of all virtual
+ * screens.
* Return: 0 or negative errno value.
* @ctx: The context containing the heap to use.
* @origin_x: Upper left X co-ordinate relative to the first screen.
@@ -84,7 +88,7 @@ int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
}
/**
- * Get most recent video mode hints.
+ * hgsmi_get_mode_hints - Get most recent video mode hints.
* Return: 0 or negative errno value.
* @ctx: The context containing the heap to use.
* @screens: The number of screens to query hints for, starting at 0.
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 76657dcdf9b0..18f5009ce90e 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -279,14 +279,22 @@ static u32 vc4_crtc_get_fifo_full_level_bits(struct vc4_crtc *vc4_crtc,
* allows drivers to push pixels to more than one encoder from the
* same CRTC.
*/
-static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc)
+static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc,
+ struct drm_atomic_state *state,
+ struct drm_connector_state *(*get_state)(struct drm_atomic_state *state,
+ struct drm_connector *connector))
{
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
drm_connector_list_iter_begin(crtc->dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->state->crtc == crtc) {
+ struct drm_connector_state *conn_state = get_state(state, connector);
+
+ if (!conn_state)
+ continue;
+
+ if (conn_state->crtc == crtc) {
drm_connector_list_iter_end(&conn_iter);
return connector->encoder;
}
@@ -305,16 +313,17 @@ static void vc4_crtc_pixelvalve_reset(struct drm_crtc *crtc)
CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_FIFO_CLR);
}
-static void vc4_crtc_config_pv(struct drm_crtc *crtc)
+static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
+ struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, state,
+ drm_atomic_get_new_connector_state);
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
- struct drm_crtc_state *state = crtc->state;
- struct drm_display_mode *mode = &state->adjusted_mode;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
u32 pixel_rep = (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1;
bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 ||
@@ -421,10 +430,10 @@ static void require_hvs_enabled(struct drm_device *dev)
}
static int vc4_crtc_disable(struct drm_crtc *crtc,
+ struct drm_encoder *encoder,
struct drm_atomic_state *state,
unsigned int channel)
{
- struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -465,10 +474,29 @@ static int vc4_crtc_disable(struct drm_crtc *crtc,
return 0;
}
+static struct drm_encoder *vc4_crtc_get_encoder_by_type(struct drm_crtc *crtc,
+ enum vc4_encoder_type type)
+{
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder(encoder, crtc->dev) {
+ struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+
+ if (vc4_encoder->type == type)
+ return encoder;
+ }
+
+ return NULL;
+}
+
int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
{
struct drm_device *drm = crtc->dev;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ enum vc4_encoder_type encoder_type;
+ const struct vc4_pv_data *pv_data;
+ struct drm_encoder *encoder;
+ unsigned encoder_sel;
int channel;
if (!(of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
@@ -487,7 +515,17 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
if (channel < 0)
return 0;
- return vc4_crtc_disable(crtc, NULL, channel);
+ encoder_sel = VC4_GET_FIELD(CRTC_READ(PV_CONTROL), PV_CONTROL_CLK_SELECT);
+ if (WARN_ON(encoder_sel != 0))
+ return 0;
+
+ pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
+ encoder_type = pv_data->encoder_types[encoder_sel];
+ encoder = vc4_crtc_get_encoder_by_type(crtc, encoder_type);
+ if (WARN_ON(!encoder))
+ return 0;
+
+ return vc4_crtc_disable(crtc, encoder, NULL, channel);
}
static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -496,6 +534,8 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct vc4_crtc_state *old_vc4_state = to_vc4_crtc_state(old_state);
+ struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, state,
+ drm_atomic_get_old_connector_state);
struct drm_device *dev = crtc->dev;
require_hvs_enabled(dev);
@@ -503,7 +543,7 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
/* Disable vblank irq handling before crtc is disabled. */
drm_crtc_vblank_off(crtc);
- vc4_crtc_disable(crtc, state, old_vc4_state->assigned_channel);
+ vc4_crtc_disable(crtc, encoder, state, old_vc4_state->assigned_channel);
/*
* Make sure we issue a vblank event after disabling the CRTC if
@@ -524,7 +564,8 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
- struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
+ struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, state,
+ drm_atomic_get_new_connector_state);
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
require_hvs_enabled(dev);
@@ -539,7 +580,7 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
if (vc4_encoder->pre_crtc_configure)
vc4_encoder->pre_crtc_configure(encoder, state);
- vc4_crtc_config_pv(crtc);
+ vc4_crtc_config_pv(crtc, state);
CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_EN);
@@ -994,7 +1035,7 @@ static const struct vc4_pv_data bcm2711_pv3_data = {
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
- [0] = VC4_ENCODER_TYPE_VEC,
+ [PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC,
},
};
@@ -1035,6 +1076,9 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
struct vc4_encoder *vc4_encoder;
int i;
+ if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+ continue;
+
vc4_encoder = to_vc4_encoder(encoder);
for (i = 0; i < ARRAY_SIZE(pv_data->encoder_types); i++) {
if (vc4_encoder->type == encoder_types[i]) {
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 9eff45b48869..8a60fb8ad370 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -304,12 +304,21 @@ static const struct component_master_ops vc4_drm_ops = {
.unbind = vc4_drm_unbind,
};
+/*
+ * This list determines the binding order of our components, and we have
+ * a few constraints:
+ * - The TXP driver needs to be bound before the PixelValves (CRTC)
+ * but after the HVS to set the possible_crtc field properly
+ * - The HDMI driver needs to be bound after the HVS so that we can
+ * lookup the HVS maximum core clock rate and figure out if we
+ * support 4kp60 or not.
+ */
static struct platform_driver *const component_drivers[] = {
+ &vc4_hvs_driver,
&vc4_hdmi_driver,
&vc4_vec_driver,
&vc4_dpi_driver,
&vc4_dsi_driver,
- &vc4_hvs_driver,
&vc4_txp_driver,
&vc4_crtc_driver,
&vc4_v3d_driver,
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index c27b287d2053..3c4cc133e3df 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -35,6 +35,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_scdc_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/i2c.h>
@@ -76,6 +77,8 @@
#define VC5_HDMI_VERTB_VSPO_SHIFT 16
#define VC5_HDMI_VERTB_VSPO_MASK VC4_MASK(29, 16)
+#define VC5_HDMI_SCRAMBLER_CTL_ENABLE BIT(0)
+
#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_SHIFT 8
#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_MASK VC4_MASK(10, 8)
@@ -91,10 +94,14 @@
# define VC4_HD_M_ENABLE BIT(0)
#define CEC_CLOCK_FREQ 40000
-#define VC4_HSM_MID_CLOCK 149985000
#define HDMI_14_MAX_TMDS_CLK (340 * 1000 * 1000)
+static bool vc4_hdmi_mode_needs_scrambling(const struct drm_display_mode *mode)
+{
+ return (mode->clock * 1000) > HDMI_14_MAX_TMDS_CLK;
+}
+
static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
@@ -159,10 +166,9 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
bool connected = false;
- if (vc4_hdmi->hpd_gpio) {
- if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^
- vc4_hdmi->hpd_active_low)
- connected = true;
+ if (vc4_hdmi->hpd_gpio &&
+ gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) {
+ connected = true;
} else if (drm_probe_ddc(vc4_hdmi->ddc)) {
connected = true;
} else if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED) {
@@ -211,6 +217,18 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
+ if (vc4_hdmi->disable_4kp60) {
+ struct drm_device *drm = connector->dev;
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ if (vc4_hdmi_mode_needs_scrambling(mode)) {
+ drm_warn_once(drm, "The core clock cannot reach frequencies high enough to support 4k @ 60Hz.");
+ drm_warn_once(drm, "Please change your config.txt file to add hdmi_enable_4kp60.");
+ }
+ }
+ }
+
return ret;
}
@@ -502,6 +520,88 @@ static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder)
vc4_hdmi_set_hdr_infoframe(encoder);
}
+static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_display_info *display = &vc4_hdmi->connector.display_info;
+
+ if (!vc4_encoder->hdmi_monitor)
+ return false;
+
+ if (!display->hdmi.scdc.supported ||
+ !display->hdmi.scdc.scrambling.supported)
+ return false;
+
+ return true;
+}
+
+#define SCRAMBLING_POLLING_DELAY_MS 1000
+
+static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
+{
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+
+ if (!vc4_hdmi_supports_scrambling(encoder, mode))
+ return;
+
+ if (!vc4_hdmi_mode_needs_scrambling(mode))
+ return;
+
+ drm_scdc_set_high_tmds_clock_ratio(vc4_hdmi->ddc, true);
+ drm_scdc_set_scrambling(vc4_hdmi->ddc, true);
+
+ HDMI_WRITE(HDMI_SCRAMBLER_CTL, HDMI_READ(HDMI_SCRAMBLER_CTL) |
+ VC5_HDMI_SCRAMBLER_CTL_ENABLE);
+
+ queue_delayed_work(system_wq, &vc4_hdmi->scrambling_work,
+ msecs_to_jiffies(SCRAMBLING_POLLING_DELAY_MS));
+}
+
+static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
+{
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_crtc *crtc = encoder->crtc;
+
+ /*
+ * At boot, encoder->crtc will be NULL. Since we don't know the
+ * state of the scrambler and in order to avoid any
+ * inconsistency, let's disable it all the time.
+ */
+ if (crtc && !vc4_hdmi_supports_scrambling(encoder, &crtc->mode))
+ return;
+
+ if (crtc && !vc4_hdmi_mode_needs_scrambling(&crtc->mode))
+ return;
+
+ if (delayed_work_pending(&vc4_hdmi->scrambling_work))
+ cancel_delayed_work_sync(&vc4_hdmi->scrambling_work);
+
+ HDMI_WRITE(HDMI_SCRAMBLER_CTL, HDMI_READ(HDMI_SCRAMBLER_CTL) &
+ ~VC5_HDMI_SCRAMBLER_CTL_ENABLE);
+
+ drm_scdc_set_scrambling(vc4_hdmi->ddc, false);
+ drm_scdc_set_high_tmds_clock_ratio(vc4_hdmi->ddc, false);
+}
+
+static void vc4_hdmi_scrambling_wq(struct work_struct *work)
+{
+ struct vc4_hdmi *vc4_hdmi = container_of(to_delayed_work(work),
+ struct vc4_hdmi,
+ scrambling_work);
+
+ if (drm_scdc_get_scrambling_status(vc4_hdmi->ddc))
+ return;
+
+ drm_scdc_set_high_tmds_clock_ratio(vc4_hdmi->ddc, true);
+ drm_scdc_set_scrambling(vc4_hdmi->ddc, true);
+
+ queue_delayed_work(system_wq, &vc4_hdmi->scrambling_work,
+ msecs_to_jiffies(SCRAMBLING_POLLING_DELAY_MS));
+}
+
static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
@@ -514,6 +614,8 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
HDMI_WRITE(HDMI_VID_CTL,
HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX);
+
+ vc4_hdmi_disable_scrambling(encoder);
}
static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
@@ -795,10 +897,10 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
conn_state_to_vc4_hdmi_conn_state(conn_state);
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- unsigned long pixel_rate, hsm_rate;
+ unsigned long bvb_rate, pixel_rate, hsm_rate;
int ret;
- ret = pm_runtime_get_sync(&vc4_hdmi->pdev->dev);
+ ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to retain power domain: %d\n", ret);
return;
@@ -849,12 +951,14 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
- /*
- * FIXME: When the pixel freq is 594MHz (4k60), this needs to be setup
- * at 300MHz.
- */
- ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock,
- (hsm_rate > VC4_HSM_MID_CLOCK ? 150000000 : 75000000));
+ if (pixel_rate > 297000000)
+ bvb_rate = 300000000;
+ else if (pixel_rate > 148500000)
+ bvb_rate = 150000000;
+ else
+ bvb_rate = 75000000;
+
+ ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
if (ret) {
DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
clk_disable_unprepare(vc4_hdmi->hsm_clock);
@@ -962,6 +1066,7 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
}
vc4_hdmi_recenter_fifo(vc4_hdmi);
+ vc4_hdmi_enable_scrambling(encoder);
}
static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
@@ -1014,6 +1119,9 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
if (pixel_rate > vc4_hdmi->variant->max_pixel_clock)
return -EINVAL;
+ if (vc4_hdmi->disable_4kp60 && (pixel_rate > HDMI_14_MAX_TMDS_CLK))
+ return -EINVAL;
+
vc4_state->pixel_rate = pixel_rate;
return 0;
@@ -1033,6 +1141,9 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
if ((mode->clock * 1000) > vc4_hdmi->variant->max_pixel_clock)
return MODE_CLOCK_HIGH;
+ if (vc4_hdmi->disable_4kp60 && vc4_hdmi_mode_needs_scrambling(mode))
+ return MODE_CLOCK_HIGH;
+
return MODE_OK;
}
@@ -1993,12 +2104,12 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
struct vc4_hdmi *vc4_hdmi;
struct drm_encoder *encoder;
struct device_node *ddc_node;
- u32 value;
int ret;
vc4_hdmi = devm_kzalloc(dev, sizeof(*vc4_hdmi), GFP_KERNEL);
if (!vc4_hdmi)
return -ENOMEM;
+ INIT_DELAYED_WORK(&vc4_hdmi->scrambling_work, vc4_hdmi_scrambling_wq);
dev_set_drvdata(dev, vc4_hdmi);
encoder = &vc4_hdmi->encoder.base.base;
@@ -2031,26 +2142,34 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
/* Only use the GPIO HPD pin if present in the DT, otherwise
* we'll use the HDMI core's register.
*/
- if (of_find_property(dev->of_node, "hpd-gpios", &value)) {
- enum of_gpio_flags hpd_gpio_flags;
-
- vc4_hdmi->hpd_gpio = of_get_named_gpio_flags(dev->of_node,
- "hpd-gpios", 0,
- &hpd_gpio_flags);
- if (vc4_hdmi->hpd_gpio < 0) {
- ret = vc4_hdmi->hpd_gpio;
- goto err_unprepare_hsm;
- }
-
- vc4_hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
+ vc4_hdmi->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+ if (IS_ERR(vc4_hdmi->hpd_gpio)) {
+ ret = PTR_ERR(vc4_hdmi->hpd_gpio);
+ goto err_put_ddc;
}
vc4_hdmi->disable_wifi_frequencies =
of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence");
+ if (variant->max_pixel_clock == 600000000) {
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ long max_rate = clk_round_rate(vc4->hvs->core_clk, 550000000);
+
+ if (max_rate < 550000000)
+ vc4_hdmi->disable_4kp60 = true;
+ }
+
if (vc4_hdmi->variant->reset)
vc4_hdmi->variant->reset(vc4_hdmi);
+ if ((of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi0") ||
+ of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1")) &&
+ HDMI_READ(HDMI_VID_CTL) & VC4_HD_VID_CTL_ENABLE) {
+ clk_prepare_enable(vc4_hdmi->pixel_clock);
+ clk_prepare_enable(vc4_hdmi->hsm_clock);
+ clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
+ }
+
pm_runtime_enable(dev);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
@@ -2080,8 +2199,8 @@ err_destroy_conn:
vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
err_destroy_encoder:
drm_encoder_cleanup(encoder);
-err_unprepare_hsm:
pm_runtime_disable(dev);
+err_put_ddc:
put_device(&vc4_hdmi->ddc->dev);
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index 060bcaefbeb5..884d245507a9 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -129,6 +129,8 @@ struct vc4_hdmi {
struct vc4_hdmi_encoder encoder;
struct drm_connector connector;
+ struct delayed_work scrambling_work;
+
struct i2c_adapter *ddc;
void __iomem *hdmicore_regs;
void __iomem *hd_regs;
@@ -146,8 +148,7 @@ struct vc4_hdmi {
/* VC5 Only */
void __iomem *rm_regs;
- int hpd_gpio;
- bool hpd_active_low;
+ struct gpio_desc *hpd_gpio;
/*
* On some systems (like the RPi4), some modes are in the same
@@ -157,6 +158,14 @@ struct vc4_hdmi {
*/
bool disable_wifi_frequencies;
+ /*
+ * Even if HDMI0 on the RPi4 can output modes requiring a pixel
+ * rate higher than 297MHz, it needs some adjustments in the
+ * config.txt file to be able to do so and thus won't always be
+ * available.
+ */
+ bool disable_4kp60;
+
struct cec_adapter *cec_adap;
struct cec_msg cec_rx_msg;
bool cec_tx_ok;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
index e1b58eac766f..19d2fdc446bc 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
@@ -100,6 +100,7 @@ enum vc4_hdmi_field {
HDMI_RM_FORMAT,
HDMI_RM_OFFSET,
HDMI_SCHEDULER_CONTROL,
+ HDMI_SCRAMBLER_CTL,
HDMI_SW_RESET_CONTROL,
HDMI_TX_PHY_CHANNEL_SWAP,
HDMI_TX_PHY_CLK_DIV,
@@ -238,6 +239,7 @@ static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi0_fields[] = {
VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x178),
VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x17c),
VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8),
+ VC4_HDMI_REG(HDMI_SCRAMBLER_CTL, 0x1c4),
VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
VC5_DVP_REG(HDMI_VEC_INTERFACE_XBAR, 0x0f0),
@@ -317,6 +319,7 @@ static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi1_fields[] = {
VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x178),
VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x17c),
VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8),
+ VC4_HDMI_REG(HDMI_SCRAMBLER_CTL, 0x1c4),
VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
VC5_DVP_REG(HDMI_VEC_INTERFACE_XBAR, 0x0f0),
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index c0122d83b651..2fc7f4b5fa09 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -507,7 +507,7 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
return ret;
encoder = &txp->connector.encoder;
- encoder->possible_crtcs |= drm_crtc_mask(crtc);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = devm_request_irq(dev, irq, vc4_txp_interrupt, 0,
dev_name(dev), txp);
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 090529d0d5dc..11fc3d6f66b1 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -154,9 +154,14 @@
#define VEC_DAC_MISC_DAC_RST_N BIT(0)
+struct vc4_vec_variant {
+ u32 dac_config;
+};
+
/* General VEC hardware state. */
struct vc4_vec {
struct platform_device *pdev;
+ const struct vc4_vec_variant *variant;
struct drm_encoder *encoder;
struct drm_connector *connector;
@@ -445,10 +450,7 @@ static void vc4_vec_encoder_enable(struct drm_encoder *encoder)
VEC_WRITE(VEC_CONFIG2,
VEC_CONFIG2_UV_DIG_DIS | VEC_CONFIG2_RGB_DIG_DIS);
VEC_WRITE(VEC_CONFIG3, VEC_CONFIG3_HORIZ_LEN_STD);
- VEC_WRITE(VEC_DAC_CONFIG,
- VEC_DAC_CONFIG_DAC_CTRL(0xc) |
- VEC_DAC_CONFIG_DRIVER_CTRL(0xc) |
- VEC_DAC_CONFIG_LDO_BIAS_CTRL(0x46));
+ VEC_WRITE(VEC_DAC_CONFIG, vec->variant->dac_config);
/* Mask all interrupts. */
VEC_WRITE(VEC_MASK0, 0);
@@ -501,8 +503,21 @@ static const struct drm_encoder_helper_funcs vc4_vec_encoder_helper_funcs = {
.atomic_mode_set = vc4_vec_encoder_atomic_mode_set,
};
+static const struct vc4_vec_variant bcm2835_vec_variant = {
+ .dac_config = VEC_DAC_CONFIG_DAC_CTRL(0xc) |
+ VEC_DAC_CONFIG_DRIVER_CTRL(0xc) |
+ VEC_DAC_CONFIG_LDO_BIAS_CTRL(0x46)
+};
+
+static const struct vc4_vec_variant bcm2711_vec_variant = {
+ .dac_config = VEC_DAC_CONFIG_DAC_CTRL(0x0) |
+ VEC_DAC_CONFIG_DRIVER_CTRL(0x80) |
+ VEC_DAC_CONFIG_LDO_BIAS_CTRL(0x61)
+};
+
static const struct of_device_id vc4_vec_dt_match[] = {
- { .compatible = "brcm,bcm2835-vec", .data = NULL },
+ { .compatible = "brcm,bcm2835-vec", .data = &bcm2835_vec_variant },
+ { .compatible = "brcm,bcm2711-vec", .data = &bcm2711_vec_variant },
{ /* sentinel */ },
};
@@ -540,6 +555,8 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
vec->encoder = &vc4_vec_encoder->base.base;
vec->pdev = pdev;
+ vec->variant = (const struct vc4_vec_variant *)
+ of_device_get_match_data(dev);
vec->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(vec->regs))
return PTR_ERR(vec->regs);
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index a0e75f1d5d01..bf38a7e319d1 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -25,7 +25,7 @@
* Ben Widawsky <[email protected]>
*/
-/**
+/*
* This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's
* software renderer and the X server for efficient buffer sharing.
*/
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 2902dc6e64fa..bd6f75285fd9 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -151,8 +151,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
/* Check for a conflicting fence */
resv = obj->resv;
- if (!dma_resv_test_signaled_rcu(resv,
- arg->flags & VGEM_FENCE_WRITE)) {
+ if (!dma_resv_test_signaled(resv, arg->flags & VGEM_FENCE_WRITE)) {
ret = -EBUSY;
goto err_fence;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 33bf5f53ae31..ca77edbc5ea0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -125,11 +125,13 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
ret = drm_dev_register(dev, 0);
if (ret)
- goto err_free;
+ goto err_deinit;
drm_fbdev_generic_setup(vdev->priv, 32);
return 0;
+err_deinit:
+ virtio_gpu_deinit(dev);
err_free:
drm_dev_put(dev);
return ret;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 669f2ee39515..5c1ad1596889 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -451,10 +451,9 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
if (args->flags & VIRTGPU_WAIT_NOWAIT) {
- ret = dma_resv_test_signaled_rcu(obj->resv, true);
+ ret = dma_resv_test_signaled(obj->resv, true);
} else {
- ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
- timeout);
+ ret = dma_resv_wait_timeout(obj->resv, true, true, timeout);
}
if (ret == 0)
ret = -EBUSY;
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index b375394193be..f3379059f324 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -234,6 +234,7 @@ err_scanouts:
err_vbufs:
vgdev->vdev->config->del_vqs(vgdev->vdev);
err_vqs:
+ dev->dev_private = NULL;
kfree(vgdev);
return ret;
}
@@ -264,6 +265,9 @@ void virtio_gpu_release(struct drm_device *dev)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
+ if (!vgdev)
+ return;
+
virtio_gpu_modeset_fini(vgdev);
virtio_gpu_free_vbufs(vgdev);
virtio_gpu_cleanup_cap_cache(vgdev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index cdbd5a870711..09fe20e918f9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -483,10 +483,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
d.src_addr = NULL;
d.dst_pages = dst->ttm->pages;
d.src_pages = src->ttm->pages;
- d.dst_num_pages = dst->mem.num_pages;
- d.src_num_pages = src->mem.num_pages;
- d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL);
- d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL);
+ d.dst_num_pages = dst->resource->num_pages;
+ d.src_num_pages = src->resource->num_pages;
+ d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
+ d.src_prot = ttm_io_prot(src, src->resource, PAGE_KERNEL);
d.diff = diff;
for (j = 0; j < h; ++j) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 04dd49c4c257..362f56d5b12b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -103,7 +103,7 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
goto err;
if (buf->base.pin_count > 0)
- ret = ttm_bo_mem_compat(placement, &bo->mem,
+ ret = ttm_bo_mem_compat(placement, bo->resource,
&new_flags) == true ? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, placement, &ctx);
@@ -145,7 +145,7 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
goto err;
if (buf->base.pin_count > 0) {
- ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
+ ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, bo->resource,
&new_flags) == true ? 0 : -EINVAL;
goto out_unreserve;
}
@@ -211,7 +211,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
uint32_t new_flags;
place = vmw_vram_placement.placement[0];
- place.lpfn = bo->mem.num_pages;
+ place.lpfn = bo->resource->num_pages;
placement.num_placement = 1;
placement.placement = &place;
placement.num_busy_placement = 1;
@@ -227,22 +227,22 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
* In that case, evict it first because TTM isn't good at handling
* that situation.
*/
- if (bo->mem.mem_type == TTM_PL_VRAM &&
- bo->mem.start < bo->mem.num_pages &&
- bo->mem.start > 0 &&
+ if (bo->resource->mem_type == TTM_PL_VRAM &&
+ bo->resource->start < bo->resource->num_pages &&
+ bo->resource->start > 0 &&
buf->base.pin_count == 0) {
ctx.interruptible = false;
(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
}
if (buf->base.pin_count > 0)
- ret = ttm_bo_mem_compat(&placement, &bo->mem,
+ ret = ttm_bo_mem_compat(&placement, bo->resource,
&new_flags) == true ? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, &placement, &ctx);
/* For some reason we didn't end up at the start of vram */
- WARN_ON(ret == 0 && bo->mem.start != 0);
+ WARN_ON(ret == 0 && bo->resource->start != 0);
if (!ret)
vmw_bo_pin_reserved(buf, true);
@@ -293,11 +293,11 @@ err:
void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
SVGAGuestPtr *ptr)
{
- if (bo->mem.mem_type == TTM_PL_VRAM) {
+ if (bo->resource->mem_type == TTM_PL_VRAM) {
ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
- ptr->offset = bo->mem.start << PAGE_SHIFT;
+ ptr->offset = bo->resource->start << PAGE_SHIFT;
} else {
- ptr->gmrId = bo->mem.start;
+ ptr->gmrId = bo->resource->start;
ptr->offset = 0;
}
}
@@ -316,7 +316,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
struct ttm_place pl;
struct ttm_placement placement;
struct ttm_buffer_object *bo = &vbo->base;
- uint32_t old_mem_type = bo->mem.mem_type;
+ uint32_t old_mem_type = bo->resource->mem_type;
int ret;
dma_resv_assert_held(bo->base.resv);
@@ -326,8 +326,8 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
pl.fpfn = 0;
pl.lpfn = 0;
- pl.mem_type = bo->mem.mem_type;
- pl.flags = bo->mem.placement;
+ pl.mem_type = bo->resource->mem_type;
+ pl.flags = bo->resource->placement;
memset(&placement, 0, sizeof(placement));
placement.num_placement = 1;
@@ -335,7 +335,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
ret = ttm_bo_validate(bo, &placement, &ctx);
- BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
+ BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
if (pin)
ttm_bo_pin(bo);
@@ -369,7 +369,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
if (virtual)
return virtual;
- ret = ttm_bo_kmap(bo, 0, bo->mem.num_pages, &vbo->map);
+ ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map);
if (ret)
DRM_ERROR("Buffer object map failed: %d.\n", ret);
@@ -743,9 +743,9 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
if (flags & drm_vmw_synccpu_allow_cs) {
long lret;
- lret = dma_resv_wait_timeout_rcu
- (bo->base.resv, true, true,
- nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
+ lret = dma_resv_wait_timeout(bo->base.resv, true, true,
+ nonblock ? 0 :
+ MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
else if (lret < 0)
@@ -1197,7 +1197,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
* With other types of moves, the underlying pages stay the same,
* and the map can be kept.
*/
- if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
+ if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
vmw_bo_unmap(vbo);
/*
@@ -1205,6 +1205,6 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
* read back all resource content first, and unbind the MOB from
* the resource.
*/
- if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
+ if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
vmw_resource_unbind_list(vbo);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index 9c89189a226d..956b85e35cef 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -576,11 +576,11 @@ static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
cmd->body.cid = cid;
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
- if (bo->mem.mem_type == TTM_PL_VRAM) {
+ if (bo->resource->mem_type == TTM_PL_VRAM) {
cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
- cmd->body.guestResult.offset = bo->mem.start << PAGE_SHIFT;
+ cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT;
} else {
- cmd->body.guestResult.gmrId = bo->mem.start;
+ cmd->body.guestResult.gmrId = bo->resource->start;
cmd->body.guestResult.offset = 0;
}
@@ -621,8 +621,8 @@ static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = cid;
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd->body.mobid = bo->mem.start;
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
+ cmd->body.mobid = bo->resource->start;
cmd->body.offset = 0;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 05ca310ed61a..6bb4961e64a5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -889,7 +889,7 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
header->cmd = man->map + offset;
if (man->using_mob) {
cb_hdr->flags = SVGA_CB_FLAG_MOB;
- cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
+ cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start;
cb_hdr->ptr.mob.mobOffset = offset;
} else {
cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 3ed9914cb994..dffe3804ad3e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -346,7 +346,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
} *cmd;
struct ttm_buffer_object *bo = val_buf->bo;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
@@ -355,7 +355,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- cmd->body.mobid = bo->mem.start;
+ cmd->body.mobid = bo->resource->start;
cmd->body.validContents = res->backup_dirty;
res->backup_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -385,7 +385,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
uint8_t *cmd;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
vmw_binding_state_scrub(uctx->cbs);
@@ -513,7 +513,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
} *cmd;
struct ttm_buffer_object *bo = val_buf->bo;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
@@ -522,7 +522,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
- cmd->body.mobid = bo->mem.start;
+ cmd->body.mobid = bo->resource->start;
cmd->body.validContents = res->backup_dirty;
res->backup_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -594,7 +594,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
uint8_t *cmd;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
vmw_dx_context_scrub_cotables(res, readback);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index b40aa002bf2b..c84a16c1def0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -173,7 +173,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
SVGA3dCmdDXSetCOTable body;
} *cmd;
- WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+ WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
dma_resv_assert_held(bo->base.resv);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
@@ -181,12 +181,12 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
return -ENOMEM;
WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
- WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
+ WARN_ON(bo->resource->mem_type != VMW_PL_MOB);
cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = vcotbl->ctx->id;
cmd->body.type = vcotbl->type;
- cmd->body.mobid = bo->mem.start;
+ cmd->body.mobid = bo->resource->start;
cmd->body.validSizeInBytes = vcotbl->size_read_back;
vmw_cmd_commit_flush(dev_priv, sizeof(*cmd));
@@ -315,7 +315,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
if (!vmw_resource_mob_attached(res))
return 0;
- WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+ WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
dma_resv_assert_held(bo->base.resv);
mutex_lock(&dev_priv->binding_mutex);
@@ -431,7 +431,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* Do a page by page copy of COTables. This eliminates slow vmap()s.
* This should really be a TTM utility.
*/
- for (i = 0; i < old_bo->mem.num_pages; ++i) {
+ for (i = 0; i < old_bo->resource->num_pages; ++i) {
bool dummy;
ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 32a84dff3fbf..a2b8464b3f56 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -735,7 +735,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = ctx_res->id;
- cmd->body.mobid = dx_query_mob->base.mem.start;
+ cmd->body.mobid = dx_query_mob->base.resource->start;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_context_bind_dx_query(ctx_res, dx_query_mob);
@@ -1046,7 +1046,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
- if (unlikely(new_query_bo->base.mem.num_pages > 4)) {
+ if (unlikely(new_query_bo->base.resource->num_pages > 4)) {
VMW_DEBUG_USER("Query buffer too large.\n");
return -EINVAL;
}
@@ -3710,16 +3710,16 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
bo = &reloc->vbo->base;
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
- reloc->location->offset += bo->mem.start << PAGE_SHIFT;
+ reloc->location->offset += bo->resource->start << PAGE_SHIFT;
reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
break;
case VMW_PL_GMR:
- reloc->location->gmrId = bo->mem.start;
+ reloc->location->gmrId = bo->resource->start;
break;
case VMW_PL_MOB:
- *reloc->mob_loc = bo->mem.start;
+ *reloc->mob_loc = bo->resource->start;
break;
default:
BUG();
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index 1774960d1b89..28ceb749a733 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -52,11 +52,17 @@ static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *ma
static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
int id;
+ *res = kmalloc(sizeof(**res), GFP_KERNEL);
+ if (!*res)
+ return -ENOMEM;
+
+ ttm_resource_init(bo, place, *res);
+
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
if (id < 0)
return id;
@@ -64,36 +70,34 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
spin_lock(&gman->lock);
if (gman->max_gmr_pages > 0) {
- gman->used_gmr_pages += mem->num_pages;
+ gman->used_gmr_pages += (*res)->num_pages;
if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
goto nospace;
}
- mem->mm_node = gman;
- mem->start = id;
+ (*res)->start = id;
spin_unlock(&gman->lock);
return 0;
nospace:
- gman->used_gmr_pages -= mem->num_pages;
+ gman->used_gmr_pages -= (*res)->num_pages;
spin_unlock(&gman->lock);
ida_free(&gman->gmr_ida, id);
+ kfree(*res);
return -ENOSPC;
}
static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
- if (mem->mm_node) {
- ida_free(&gman->gmr_ida, mem->start);
- spin_lock(&gman->lock);
- gman->used_gmr_pages -= mem->num_pages;
- spin_unlock(&gman->lock);
- mem->mm_node = NULL;
- }
+ ida_free(&gman->gmr_ida, res->start);
+ spin_lock(&gman->lock);
+ gman->used_gmr_pages -= res->num_pages;
+ spin_unlock(&gman->lock);
+ kfree(res);
}
static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 45c9c6a7f1d6..e5a9a5cbd01a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -232,7 +232,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
- pgoff_t num_pages = vbo->base.mem.num_pages;
+ pgoff_t num_pages = vbo->base.resource->num_pages;
size_t size, acc_size;
int ret;
static struct ttm_operation_ctx ctx = {
@@ -413,7 +413,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
return ret;
page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
- if (unlikely(page_offset >= bo->mem.num_pages)) {
+ if (unlikely(page_offset >= bo->resource->num_pages)) {
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
@@ -456,7 +456,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
page_offset = vmf->pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (page_offset >= bo->mem.num_pages ||
+ if (page_offset >= bo->resource->num_pages ||
vmw_resources_clean(vbo, page_offset,
page_offset + PAGE_SIZE,
&allowed_prefault)) {
@@ -529,7 +529,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
page_offset = vmf->pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (page_offset >= bo->mem.num_pages ||
+ if (page_offset >= bo->resource->num_pages ||
vmw_resources_clean(vbo, page_offset,
page_offset + PAGE_SIZE,
&allowed_prefault)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 62ea920addc3..7b45393ad98e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1166,7 +1166,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
if (bo->moving)
dma_fence_put(bo->moving);
bo->moving = dma_fence_get
- (dma_resv_get_excl(bo->base.resv));
+ (dma_resv_excl_fence(bo->base.resv));
}
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index b391975871a5..b3c8d2da6f1a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -254,7 +254,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
} *cmd;
struct ttm_buffer_object *bo = val_buf->bo;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
@@ -263,7 +263,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
cmd->header.size = sizeof(cmd->body);
cmd->body.shid = res->id;
- cmd->body.mobid = bo->mem.start;
+ cmd->body.mobid = bo->resource->start;
cmd->body.offsetInBytes = res->backup_offset;
res->backup_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -282,7 +282,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
} *cmd;
struct vmw_fence_obj *fence;
- BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+ BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
@@ -402,7 +402,7 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = shader->ctx->id;
cmd->body.shid = shader->id;
- cmd->body.mobid = res->backup->base.mem.start;
+ cmd->body.mobid = res->backup->base.resource->start;
cmd->body.offsetInBytes = res->backup_offset;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -450,7 +450,7 @@ static int vmw_dx_shader_bind(struct vmw_resource *res,
struct vmw_private *dev_priv = res->dev_priv;
struct ttm_buffer_object *bo = val_buf->bo;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
vmw_dx_shader_unscrub(res);
mutex_unlock(&dev_priv->binding_mutex);
@@ -513,7 +513,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res,
struct vmw_fence_obj *fence;
int ret;
- BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+ BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
ret = vmw_dx_shader_scrub(res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
index 1dd042a20a66..c8efa4a6c995 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
@@ -106,7 +106,7 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
cmd->header.size = sizeof(cmd->body);
cmd->body.soid = so->id;
- cmd->body.mobid = res->backup->base.mem.start;
+ cmd->body.mobid = res->backup->base.resource->start;
cmd->body.offsetInBytes = res->backup_offset;
cmd->body.sizeInBytes = so->size;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -142,7 +142,7 @@ static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
struct ttm_buffer_object *bo = val_buf->bo;
int ret;
- if (WARN_ON(bo->mem.mem_type != VMW_PL_MOB))
+ if (WARN_ON(bo->resource->mem_type != VMW_PL_MOB))
return -EINVAL;
mutex_lock(&dev_priv->binding_mutex);
@@ -197,7 +197,7 @@ static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
struct vmw_fence_obj *fence;
int ret;
- if (WARN_ON(res->backup->base.mem.mem_type != VMW_PL_MOB))
+ if (WARN_ON(res->backup->base.resource->mem_type != VMW_PL_MOB))
return -EINVAL;
mutex_lock(&dev_priv->binding_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 8ead06574850..0835468bb2ee 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1212,7 +1212,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
uint32_t submit_size;
struct ttm_buffer_object *bo = val_buf->bo;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
@@ -1223,7 +1223,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
cmd1->header.size = sizeof(cmd1->body);
cmd1->body.sid = res->id;
- cmd1->body.mobid = bo->mem.start;
+ cmd1->body.mobid = bo->resource->start;
if (res->backup_dirty) {
cmd2 = (void *) &cmd1[1];
cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
@@ -1266,7 +1266,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
uint8_t *cmd;
- BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+ BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
index 5ccc35b3194c..2a3d3468e4e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -7,6 +7,7 @@
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
/**
* struct vmw_thp_manager - Range manager implementing huge page alignment
@@ -50,20 +51,22 @@ static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo,
static int vmw_thp_get_node(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem)
+ struct ttm_resource **res)
{
struct vmw_thp_manager *rman = to_thp_manager(man);
struct drm_mm *mm = &rman->mm;
- struct drm_mm_node *node;
+ struct ttm_range_mgr_node *node;
unsigned long align_pages;
unsigned long lpfn;
enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
int ret;
- node = kzalloc(sizeof(*node), GFP_KERNEL);
+ node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
if (!node)
return -ENOMEM;
+ ttm_resource_init(bo, place, &node->base);
+
lpfn = place->lpfn;
if (!lpfn)
lpfn = man->size;
@@ -75,23 +78,26 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
spin_lock(&rman->lock);
if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
- if (mem->num_pages >= align_pages) {
- ret = vmw_thp_insert_aligned(bo, mm, node, align_pages,
- place, mem, lpfn, mode);
+ if (node->base.num_pages >= align_pages) {
+ ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
+ align_pages, place,
+ &node->base, lpfn, mode);
if (!ret)
goto found_unlock;
}
}
align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
- if (mem->num_pages >= align_pages) {
- ret = vmw_thp_insert_aligned(bo, mm, node, align_pages, place,
- mem, lpfn, mode);
+ if (node->base.num_pages >= align_pages) {
+ ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
+ align_pages, place, &node->base,
+ lpfn, mode);
if (!ret)
goto found_unlock;
}
- ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+ ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
+ node->base.num_pages,
bo->page_alignment, 0,
place->fpfn, lpfn, mode);
found_unlock:
@@ -100,28 +106,24 @@ found_unlock:
if (unlikely(ret)) {
kfree(node);
} else {
- mem->mm_node = node;
- mem->start = node->start;
+ node->base.start = node->mm_nodes[0].start;
+ *res = &node->base;
}
return ret;
}
-
-
static void vmw_thp_put_node(struct ttm_resource_manager *man,
- struct ttm_resource *mem)
+ struct ttm_resource *res)
{
+ struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct vmw_thp_manager *rman = to_thp_manager(man);
- if (mem->mm_node) {
- spin_lock(&rman->lock);
- drm_mm_remove_node(mem->mm_node);
- spin_unlock(&rman->lock);
+ spin_lock(&rman->lock);
+ drm_mm_remove_node(&node->mm_nodes[0]);
+ spin_unlock(&rman->lock);
- kfree(mem->mm_node);
- mem->mm_node = NULL;
- }
+ kfree(node);
}
int vmw_thp_init(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 7bfe83c936ff..0488042fb287 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -661,14 +661,6 @@ static void vmw_evict_flags(struct ttm_buffer_object *bo,
*placement = vmw_sys_placement;
}
-static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
- struct ttm_object_file *tfile =
- vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
-
- return vmw_user_bo_verify_access(bo, tfile);
-}
-
static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
@@ -727,7 +719,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem,
struct ttm_place *hop)
{
- struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->mem.mem_type);
+ struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type);
struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
int ret;
@@ -737,11 +729,11 @@ static int vmw_move(struct ttm_buffer_object *bo,
return ret;
}
- vmw_move_notify(bo, &bo->mem, new_mem);
+ vmw_move_notify(bo, bo->resource, new_mem);
if (old_man->use_tt && new_man->use_tt) {
- if (bo->mem.mem_type == TTM_PL_SYSTEM) {
- ttm_bo_assign_mem(bo, new_mem);
+ if (bo->resource->mem_type == TTM_PL_SYSTEM) {
+ ttm_bo_move_null(bo, new_mem);
return 0;
}
ret = ttm_bo_wait_ctx(bo, ctx);
@@ -749,7 +741,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
goto fail;
vmw_ttm_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
return 0;
} else {
@@ -759,7 +751,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
}
return 0;
fail:
- vmw_move_notify(bo, new_mem, &bo->mem);
+ vmw_move_notify(bo, new_mem, bo->resource);
return ret;
}
@@ -771,7 +763,6 @@ struct ttm_device_funcs vmw_bo_driver = {
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = vmw_evict_flags,
.move = vmw_move,
- .verify_access = vmw_verify_access,
.swap_notify = vmw_swap_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index cb9975889e2f..e6b1f98ec99f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -27,6 +27,32 @@
#include "vmwgfx_drv.h"
+static struct ttm_buffer_object *vmw_bo_vm_lookup(struct ttm_device *bdev,
+ unsigned long offset,
+ unsigned long pages)
+{
+ struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
+ struct drm_device *drm = &dev_priv->drm;
+ struct drm_vma_offset_node *node;
+ struct ttm_buffer_object *bo = NULL;
+
+ drm_vma_offset_lock_lookup(bdev->vma_manager);
+
+ node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
+ if (likely(node)) {
+ bo = container_of(node, struct ttm_buffer_object,
+ base.vma_node);
+ bo = ttm_bo_get_unless_zero(bo);
+ }
+
+ drm_vma_offset_unlock_lookup(bdev->vma_manager);
+
+ if (!bo)
+ drm_err(drm, "Could not find buffer object to map\n");
+
+ return bo;
+}
+
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
{
static const struct vm_operations_struct vmw_vm_ops = {
@@ -41,10 +67,25 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
};
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
- int ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev);
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct ttm_device *bdev = &dev_priv->bdev;
+ struct ttm_buffer_object *bo;
+ int ret;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
+ return -EINVAL;
+
+ bo = vmw_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
+ if (unlikely(!bo))
+ return -EINVAL;
- if (ret)
- return ret;
+ ret = vmw_user_bo_verify_access(bo, tfile);
+ if (unlikely(ret != 0))
+ goto out_unref;
+
+ ret = ttm_bo_mmap_obj(vma, bo);
+ if (unlikely(ret != 0))
+ goto out_unref;
vma->vm_ops = &vmw_vm_ops;
@@ -52,7 +93,13 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
if (!is_cow_mapping(vma->vm_flags))
vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP;
+ ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */
+
return 0;
+
+out_unref:
+ ttm_bo_put(bo);
+ return ret;
}
/* struct vmw_validation_mem callback */
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 5180c5687ee5..949fde433ea2 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -50,6 +50,7 @@
#include <linux/screen_info.h>
#include <linux/vt.h>
#include <linux/console.h>
+#include <linux/acpi.h>
#include <linux/uaccess.h>
@@ -1450,9 +1451,23 @@ static struct miscdevice vga_arb_device = {
MISC_DYNAMIC_MINOR, "vga_arbiter", &vga_arb_device_fops
};
+#if defined(CONFIG_ACPI)
+static bool vga_arb_integrated_gpu(struct device *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ return adev && !strcmp(acpi_device_hid(adev), ACPI_VIDEO_HID);
+}
+#else
+static bool vga_arb_integrated_gpu(struct device *dev)
+{
+ return false;
+}
+#endif
+
static void __init vga_arb_select_default_device(void)
{
- struct pci_dev *pdev;
+ struct pci_dev *pdev, *found = NULL;
struct vga_device *vgadev;
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
@@ -1505,20 +1520,26 @@ static void __init vga_arb_select_default_device(void)
#endif
if (!vga_default_device()) {
- list_for_each_entry(vgadev, &vga_list, list) {
+ list_for_each_entry_reverse(vgadev, &vga_list, list) {
struct device *dev = &vgadev->pdev->dev;
u16 cmd;
pdev = vgadev->pdev;
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
- vgaarb_info(dev, "setting as boot device (VGA legacy resources not available)\n");
- vga_set_default_device(pdev);
- break;
+ found = pdev;
+ if (vga_arb_integrated_gpu(dev))
+ break;
}
}
}
+ if (found) {
+ vgaarb_info(&found->dev, "setting as boot device (VGA legacy resources not available)\n");
+ vga_set_default_device(found);
+ return;
+ }
+
if (!vga_default_device()) {
vgadev = list_first_entry_or_null(&vga_list,
struct vga_device, list);
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index ac4adb44b224..97ab491d2922 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -596,7 +596,6 @@ static int lm80_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct lm80_data *data;
- int rv;
data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
if (!data)
@@ -609,14 +608,8 @@ static int lm80_probe(struct i2c_client *client)
lm80_init_client(client);
/* A few vars need to be filled upon startup */
- rv = lm80_read_value(client, LM80_REG_FAN_MIN(1));
- if (rv < 0)
- return rv;
- data->fan[f_min][0] = rv;
- rv = lm80_read_value(client, LM80_REG_FAN_MIN(2));
- if (rv < 0)
- return rv;
- data->fan[f_min][1] = rv;
+ data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
+ data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data, lm80_groups);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 2b9ffc21cbc4..ab148a696c0c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -473,6 +473,7 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
list_del(&id_priv->list);
cma_dev_put(id_priv->cma_dev);
id_priv->cma_dev = NULL;
+ id_priv->id.device = NULL;
if (id_priv->id.route.addr.dev_addr.sgid_attr) {
rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
@@ -1860,6 +1861,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
iw_destroy_cm_id(id_priv->cm_id.iw);
}
cma_leave_mc_groups(id_priv);
+ rdma_restrack_del(&id_priv->res);
cma_release_dev(id_priv);
}
@@ -1873,7 +1875,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
kfree(id_priv->id.route.path_rec);
put_net(id_priv->id.route.addr.dev_addr.net);
- rdma_restrack_del(&id_priv->res);
kfree(id_priv);
}
@@ -3774,7 +3775,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
}
id_priv->backlog = backlog;
- if (id->device) {
+ if (id_priv->cma_dev) {
if (rdma_cap_ib_cm(id->device, 1)) {
ret = cma_ib_listen(id_priv);
if (ret)
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index 0d65ce146fc4..c6e875619fac 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -66,7 +66,7 @@ wait_fence:
* may be not up-to-date. Wait for the exporter to finish
* the migration.
*/
- fence = dma_resv_get_excl(umem_dmabuf->attach->dmabuf->resv);
+ fence = dma_resv_excl_fence(umem_dmabuf->attach->dmabuf->resv);
if (fence)
return dma_fence_wait(fence, false);
diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
index 9ec6971056fa..049684880ae0 100644
--- a/drivers/infiniband/core/uverbs_std_types_device.c
+++ b/drivers/infiniband/core/uverbs_std_types_device.c
@@ -117,8 +117,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_INFO_HANDLES)(
return ret;
uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id);
- if (!uapi_object)
- return -EINVAL;
+ if (IS_ERR(uapi_object))
+ return PTR_ERR(uapi_object);
handles = gather_objects_handle(attrs->ufile, uapi_object, attrs,
out_len, &total);
@@ -331,6 +331,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)(
if (ret)
return ret;
+ if (!user_entry_size)
+ return -EINVAL;
+
max_entries = uverbs_attr_ptr_get_array_size(
attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
user_entry_size);
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index a0b677accd96..eb9b0a2707f8 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -630,9 +630,8 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
case UVERBS_OBJECT_QP:
{
struct mlx5_ib_qp *qp = to_mqp(uobj->object);
- enum ib_qp_type qp_type = qp->ibqp.qp_type;
- if (qp_type == IB_QPT_RAW_PACKET ||
+ if (qp->type == IB_QPT_RAW_PACKET ||
(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
struct mlx5_ib_raw_packet_qp *raw_packet_qp =
&qp->raw_packet_qp;
@@ -649,10 +648,9 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
sq->tisn) == obj_id);
}
- if (qp_type == MLX5_IB_QPT_DCT)
+ if (qp->type == MLX5_IB_QPT_DCT)
return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
qp->dct.mdct.mqp.qpn) == obj_id;
-
return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
qp->ibqp.qp_num) == obj_id;
}
diff --git a/drivers/infiniband/hw/mlx5/dm.c b/drivers/infiniband/hw/mlx5/dm.c
index 094bf85589db..001d766cf291 100644
--- a/drivers/infiniband/hw/mlx5/dm.c
+++ b/drivers/infiniband/hw/mlx5/dm.c
@@ -217,6 +217,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR)(
if (err)
return err;
+ if (op >= BITS_PER_TYPE(u32))
+ return -EOPNOTSUPP;
+
if (!(MLX5_CAP_DEV_MEM(dev->mdev, memic_operations) & BIT(op)))
return -EOPNOTSUPP;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 6d1dd09a4388..644d5d0ac544 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4419,6 +4419,7 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
if (bound) {
rdma_roce_rescan_device(&dev->ib_dev);
+ mpi->ibdev->ib_active = true;
break;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 2af26737d32d..a6712e373eed 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -346,13 +346,15 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, payload_addr(pkt),
payload_size(pkt), to_mr_obj, NULL);
- if (ret)
+ if (ret) {
+ wqe->status = IB_WC_LOC_PROT_ERR;
return COMPST_ERROR;
+ }
if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
return COMPST_COMP_ACK;
- else
- return COMPST_UPDATE_COMP;
+
+ return COMPST_UPDATE_COMP;
}
static inline enum comp_state do_atomic(struct rxe_qp *qp,
@@ -366,10 +368,12 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, &atomic_orig,
sizeof(u64), to_mr_obj, NULL);
- if (ret)
+ if (ret) {
+ wqe->status = IB_WC_LOC_PROT_ERR;
return COMPST_ERROR;
- else
- return COMPST_COMP_ACK;
+ }
+
+ return COMPST_COMP_ACK;
}
static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 34ae957a315c..b0f350d674fd 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -242,6 +242,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
if (err) {
vfree(qp->sq.queue->buf);
kfree(qp->sq.queue);
+ qp->sq.queue = NULL;
return err;
}
@@ -295,6 +296,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
if (err) {
vfree(qp->rq.queue->buf);
kfree(qp->rq.queue);
+ qp->rq.queue = NULL;
return err;
}
}
@@ -355,6 +357,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
err2:
rxe_queue_cleanup(qp->sq.queue);
err1:
+ qp->pd = NULL;
+ qp->rcq = NULL;
+ qp->scq = NULL;
+ qp->srq = NULL;
+
if (srq)
rxe_drop_ref(srq);
rxe_drop_ref(scq);
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index d2313efb26db..3f175f220a22 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -300,7 +300,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
struct siw_ucontext *uctx =
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
- struct siw_cq *scq = NULL, *rcq = NULL;
unsigned long flags;
int num_sqe, num_rqe, rv = 0;
size_t length;
@@ -343,10 +342,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
rv = -EINVAL;
goto err_out;
}
- scq = to_siw_cq(attrs->send_cq);
- rcq = to_siw_cq(attrs->recv_cq);
- if (!scq || (!rcq && !attrs->srq)) {
+ if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
rv = -EINVAL;
goto err_out;
@@ -378,7 +375,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
else {
/* Zero sized SQ is not supported */
rv = -EINVAL;
- goto err_out;
+ goto err_out_xa;
}
if (num_rqe)
num_rqe = roundup_pow_of_two(num_rqe);
@@ -401,8 +398,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
}
}
qp->pd = pd;
- qp->scq = scq;
- qp->rcq = rcq;
+ qp->scq = to_siw_cq(attrs->send_cq);
+ qp->rcq = to_siw_cq(attrs->recv_cq);
if (attrs->srq) {
/*
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index b90e825df7e1..62543a4eccc0 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -596,7 +596,7 @@ config IRQ_IDT3243X
config APPLE_AIC
bool "Apple Interrupt Controller (AIC)"
depends on ARM64
- default ARCH_APPLE
+ depends on ARCH_APPLE || COMPILE_TEST
help
Support for the Apple Interrupt Controller found on Apple Silicon SoCs,
such as the M1.
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index 91adf771f185..090bc3f4f7d8 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -359,10 +359,8 @@ static int mvebu_icu_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
icu->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(icu->base)) {
- dev_err(&pdev->dev, "Failed to map icu base address.\n");
+ if (IS_ERR(icu->base))
return PTR_ERR(icu->base);
- }
/*
* Legacy bindings: ICU is one node with one MSI parent: force manually
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
index 18832ccc8ff8..3a7b7a7f20ca 100644
--- a/drivers/irqchip/irq-mvebu-sei.c
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -384,10 +384,8 @@ static int mvebu_sei_probe(struct platform_device *pdev)
sei->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sei->base = devm_ioremap_resource(sei->dev, sei->res);
- if (IS_ERR(sei->base)) {
- dev_err(sei->dev, "Failed to remap SEI resource\n");
+ if (IS_ERR(sei->base))
return PTR_ERR(sei->base);
- }
/* Retrieve the SEI capabilities with the interrupt ranges */
sei->caps = of_device_get_match_data(&pdev->dev);
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index b9db90c4aa56..4704f2ee5797 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -892,10 +892,8 @@ static int stm32_exti_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host_data->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(host_data->base)) {
- dev_err(dev, "Unable to map registers\n");
+ if (IS_ERR(host_data->base))
return PTR_ERR(host_data->base);
- }
for (i = 0; i < drv_data->bank_nr; i++)
stm32_exti_chip_init(host_data, i, np);
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 70061991915a..cd5642cef01f 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -46,7 +46,7 @@ static void hfcsusb_start_endpoint(struct hfcsusb *hw, int channel);
static void hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel);
static int hfcsusb_setup_bch(struct bchannel *bch, int protocol);
static void deactivate_bchannel(struct bchannel *bch);
-static void hfcsusb_ph_info(struct hfcsusb *hw);
+static int hfcsusb_ph_info(struct hfcsusb *hw);
/* start next background transfer for control channel */
static void
@@ -241,7 +241,7 @@ hfcusb_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
* send full D/B channel status information
* as MPH_INFORMATION_IND
*/
-static void
+static int
hfcsusb_ph_info(struct hfcsusb *hw)
{
struct ph_info *phi;
@@ -250,7 +250,7 @@ hfcsusb_ph_info(struct hfcsusb *hw)
phi = kzalloc(struct_size(phi, bch, dch->dev.nrbchan), GFP_ATOMIC);
if (!phi)
- return;
+ return -ENOMEM;
phi->dch.ch.protocol = hw->protocol;
phi->dch.ch.Flags = dch->Flags;
@@ -263,6 +263,8 @@ hfcsusb_ph_info(struct hfcsusb *hw)
_queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY,
struct_size(phi, bch, dch->dev.nrbchan), phi, GFP_ATOMIC);
kfree(phi);
+
+ return 0;
}
/*
@@ -347,8 +349,7 @@ hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
ret = l1_event(dch->l1, hh->prim);
break;
case MPH_INFORMATION_REQ:
- hfcsusb_ph_info(hw);
- ret = 0;
+ ret = hfcsusb_ph_info(hw);
break;
}
@@ -403,8 +404,7 @@ hfc_l1callback(struct dchannel *dch, u_int cmd)
hw->name, __func__, cmd);
return -1;
}
- hfcsusb_ph_info(hw);
- return 0;
+ return hfcsusb_ph_info(hw);
}
static int
@@ -746,8 +746,7 @@ hfcsusb_setup_bch(struct bchannel *bch, int protocol)
handle_led(hw, (bch->nr == 1) ? LED_B1_OFF :
LED_B2_OFF);
}
- hfcsusb_ph_info(hw);
- return 0;
+ return hfcsusb_ph_info(hw);
}
static void
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index a16c7a2a7f3d..88d592bafdb0 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -630,17 +630,19 @@ static void
release_io(struct inf_hw *hw)
{
if (hw->cfg.mode) {
- if (hw->cfg.p) {
+ if (hw->cfg.mode == AM_MEMIO) {
release_mem_region(hw->cfg.start, hw->cfg.size);
- iounmap(hw->cfg.p);
+ if (hw->cfg.p)
+ iounmap(hw->cfg.p);
} else
release_region(hw->cfg.start, hw->cfg.size);
hw->cfg.mode = AM_NONE;
}
if (hw->addr.mode) {
- if (hw->addr.p) {
+ if (hw->addr.mode == AM_MEMIO) {
release_mem_region(hw->addr.start, hw->addr.size);
- iounmap(hw->addr.p);
+ if (hw->addr.p)
+ iounmap(hw->addr.p);
} else
release_region(hw->addr.start, hw->addr.size);
hw->addr.mode = AM_NONE;
@@ -670,9 +672,12 @@ setup_io(struct inf_hw *hw)
(ulong)hw->cfg.start, (ulong)hw->cfg.size);
return err;
}
- if (hw->ci->cfg_mode == AM_MEMIO)
- hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size);
hw->cfg.mode = hw->ci->cfg_mode;
+ if (hw->ci->cfg_mode == AM_MEMIO) {
+ hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size);
+ if (!hw->cfg.p)
+ return -ENOMEM;
+ }
if (debug & DEBUG_HW)
pr_notice("%s: IO cfg %lx (%lu bytes) mode%d\n",
hw->name, (ulong)hw->cfg.start,
@@ -697,12 +702,12 @@ setup_io(struct inf_hw *hw)
(ulong)hw->addr.start, (ulong)hw->addr.size);
return err;
}
+ hw->addr.mode = hw->ci->addr_mode;
if (hw->ci->addr_mode == AM_MEMIO) {
hw->addr.p = ioremap(hw->addr.start, hw->addr.size);
- if (unlikely(!hw->addr.p))
+ if (!hw->addr.p)
return -ENOMEM;
}
- hw->addr.mode = hw->ci->addr_mode;
if (debug & DEBUG_HW)
pr_notice("%s: IO addr %lx (%lu bytes) mode%d\n",
hw->name, (ulong)hw->addr.start,
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index fc433e63b1dc..b1590cb4a188 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -307,7 +307,7 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
usleep_range(3000, 6000);
ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
if (ret)
- return ret;
+ goto out;
status &= LP5523_ENG_STATUS_MASK;
if (status != LP5523_ENG_STATUS_MASK) {
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 781942aeddd1..20f2510db1f6 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -66,14 +66,14 @@ struct superblock {
__u8 magic[8];
__u8 version;
__u8 log2_interleave_sectors;
- __u16 integrity_tag_size;
- __u32 journal_sections;
- __u64 provided_data_sectors; /* userspace uses this value */
- __u32 flags;
+ __le16 integrity_tag_size;
+ __le32 journal_sections;
+ __le64 provided_data_sectors; /* userspace uses this value */
+ __le32 flags;
__u8 log2_sectors_per_block;
__u8 log2_blocks_per_bitmap_bit;
__u8 pad[2];
- __u64 recalc_sector;
+ __le64 recalc_sector;
__u8 pad2[8];
__u8 salt[SALT_SIZE];
};
@@ -86,16 +86,16 @@ struct superblock {
#define JOURNAL_ENTRY_ROUNDUP 8
-typedef __u64 commit_id_t;
+typedef __le64 commit_id_t;
#define JOURNAL_MAC_PER_SECTOR 8
struct journal_entry {
union {
struct {
- __u32 sector_lo;
- __u32 sector_hi;
+ __le32 sector_lo;
+ __le32 sector_hi;
} s;
- __u64 sector;
+ __le64 sector;
} u;
commit_id_t last_bytes[];
/* __u8 tag[0]; */
@@ -806,7 +806,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
}
if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
- uint64_t section_le;
+ __le64 section_le;
r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE);
if (unlikely(r < 0)) {
@@ -1640,7 +1640,7 @@ static void integrity_end_io(struct bio *bio)
static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
const char *data, char *result)
{
- __u64 sector_le = cpu_to_le64(sector);
+ __le64 sector_le = cpu_to_le64(sector);
SHASH_DESC_ON_STACK(req, ic->internal_hash);
int r;
unsigned digest_size;
@@ -2689,30 +2689,26 @@ next_chunk:
if (unlikely(dm_integrity_failed(ic)))
goto err;
- if (!ic->discard) {
- io_req.bi_op = REQ_OP_READ;
- io_req.bi_op_flags = 0;
- io_req.mem.type = DM_IO_VMA;
- io_req.mem.ptr.addr = ic->recalc_buffer;
- io_req.notify.fn = NULL;
- io_req.client = ic->io;
- io_loc.bdev = ic->dev->bdev;
- io_loc.sector = get_data_sector(ic, area, offset);
- io_loc.count = n_sectors;
+ io_req.bi_op = REQ_OP_READ;
+ io_req.bi_op_flags = 0;
+ io_req.mem.type = DM_IO_VMA;
+ io_req.mem.ptr.addr = ic->recalc_buffer;
+ io_req.notify.fn = NULL;
+ io_req.client = ic->io;
+ io_loc.bdev = ic->dev->bdev;
+ io_loc.sector = get_data_sector(ic, area, offset);
+ io_loc.count = n_sectors;
- r = dm_io(&io_req, 1, &io_loc, NULL);
- if (unlikely(r)) {
- dm_integrity_io_error(ic, "reading data", r);
- goto err;
- }
+ r = dm_io(&io_req, 1, &io_loc, NULL);
+ if (unlikely(r)) {
+ dm_integrity_io_error(ic, "reading data", r);
+ goto err;
+ }
- t = ic->recalc_tags;
- for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
- integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
- t += ic->tag_size;
- }
- } else {
- t = ic->recalc_tags + (n_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
+ t = ic->recalc_tags;
+ for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
+ integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
+ t += ic->tag_size;
}
metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
@@ -3826,7 +3822,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
for (i = 0; i < ic->journal_sections; i++) {
struct scatterlist sg;
struct skcipher_request *section_req;
- __u32 section_le = cpu_to_le32(i);
+ __le32 section_le = cpu_to_le32(i);
memset(crypt_iv, 0x00, ivsize);
memset(crypt_data, 0x00, crypt_len);
@@ -4368,13 +4364,11 @@ try_smaller_buffer:
goto bad;
}
INIT_WORK(&ic->recalc_work, integrity_recalc);
- if (!ic->discard) {
- ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
- if (!ic->recalc_buffer) {
- ti->error = "Cannot allocate buffer for recalculating";
- r = -ENOMEM;
- goto bad;
- }
+ ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
+ if (!ic->recalc_buffer) {
+ ti->error = "Cannot allocate buffer for recalculating";
+ r = -ENOMEM;
+ goto bad;
}
ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
ic->tag_size, GFP_KERNEL);
@@ -4383,9 +4377,6 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
- if (ic->discard)
- memset(ic->recalc_tags, DISCARD_FILLER,
- (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size);
} else {
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
ti->error = "Recalculate can only be specified with internal_hash";
@@ -4579,7 +4570,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 9, 0},
+ .version = {1, 10, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a2acb014c13a..b8e4d31124ea 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -855,12 +855,11 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
static uint32_t __minimum_chunk_size(struct origin *o)
{
struct dm_snapshot *snap;
- unsigned chunk_size = 0;
+ unsigned chunk_size = rounddown_pow_of_two(UINT_MAX);
if (o)
list_for_each_entry(snap, &o->snapshots, list)
- chunk_size = min_not_zero(chunk_size,
- snap->store->chunk_size);
+ chunk_size = min(chunk_size, snap->store->chunk_size);
return (uint32_t) chunk_size;
}
@@ -1409,6 +1408,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (!s->store->chunk_size) {
ti->error = "Chunk size not set";
+ r = -EINVAL;
goto bad_read_metadata;
}
diff --git a/drivers/media/dvb-frontends/sp8870.c b/drivers/media/dvb-frontends/sp8870.c
index 655db8272268..9767159aeb9b 100644
--- a/drivers/media/dvb-frontends/sp8870.c
+++ b/drivers/media/dvb-frontends/sp8870.c
@@ -281,7 +281,7 @@ static int sp8870_set_frontend_parameters(struct dvb_frontend *fe)
// read status reg in order to clear pending irqs
err = sp8870_readreg(state, 0x200);
- if (err)
+ if (err < 0)
return err;
// system controller start
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index 83bd9a412a56..1e3b68a8743a 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -915,7 +915,6 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
{
struct rcar_drif_sdr *sdr = video_drvdata(file);
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
f->fmt.sdr.buffersize = sdr->fmt->buffersize;
diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
index a4f7431486f3..d93d384286c1 100644
--- a/drivers/media/usb/gspca/cpia1.c
+++ b/drivers/media/usb/gspca/cpia1.c
@@ -1424,7 +1424,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
{
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
- int ret;
sd->mainsFreq = FREQ_DEF == V4L2_CID_POWER_LINE_FREQUENCY_60HZ;
reset_camera_params(gspca_dev);
@@ -1436,10 +1435,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->cam_mode = mode;
cam->nmodes = ARRAY_SIZE(mode);
- ret = goto_low_power(gspca_dev);
- if (ret)
- gspca_err(gspca_dev, "Cannot go to low power mode: %d\n",
- ret);
+ goto_low_power(gspca_dev);
/* Check the firmware version. */
sd->params.version.firmwareVersion = 0;
get_version_information(gspca_dev);
diff --git a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
index bfa3b381d8a2..bf1af6ed9131 100644
--- a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
+++ b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
@@ -195,7 +195,7 @@ static const struct v4l2_ctrl_config mt9m111_greenbal_cfg = {
int mt9m111_probe(struct sd *sd)
{
u8 data[2] = {0x00, 0x00};
- int i, rc = 0;
+ int i, err;
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
if (force_sensor) {
@@ -213,18 +213,18 @@ int mt9m111_probe(struct sd *sd)
/* Do the preinit */
for (i = 0; i < ARRAY_SIZE(preinit_mt9m111); i++) {
if (preinit_mt9m111[i][0] == BRIDGE) {
- rc |= m5602_write_bridge(sd,
- preinit_mt9m111[i][1],
- preinit_mt9m111[i][2]);
+ err = m5602_write_bridge(sd,
+ preinit_mt9m111[i][1],
+ preinit_mt9m111[i][2]);
} else {
data[0] = preinit_mt9m111[i][2];
data[1] = preinit_mt9m111[i][3];
- rc |= m5602_write_sensor(sd,
- preinit_mt9m111[i][1], data, 2);
+ err = m5602_write_sensor(sd,
+ preinit_mt9m111[i][1], data, 2);
}
+ if (err < 0)
+ return err;
}
- if (rc < 0)
- return rc;
if (m5602_read_sensor(sd, MT9M111_SC_CHIPVER, data, 2))
return -ENODEV;
diff --git a/drivers/media/usb/gspca/m5602/m5602_po1030.c b/drivers/media/usb/gspca/m5602/m5602_po1030.c
index d680b777f097..8fd99ceee4b6 100644
--- a/drivers/media/usb/gspca/m5602/m5602_po1030.c
+++ b/drivers/media/usb/gspca/m5602/m5602_po1030.c
@@ -154,8 +154,8 @@ static const struct v4l2_ctrl_config po1030_greenbal_cfg = {
int po1030_probe(struct sd *sd)
{
- int rc = 0;
u8 dev_id_h = 0, i;
+ int err;
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
if (force_sensor) {
@@ -174,14 +174,14 @@ int po1030_probe(struct sd *sd)
for (i = 0; i < ARRAY_SIZE(preinit_po1030); i++) {
u8 data = preinit_po1030[i][2];
if (preinit_po1030[i][0] == SENSOR)
- rc |= m5602_write_sensor(sd,
- preinit_po1030[i][1], &data, 1);
+ err = m5602_write_sensor(sd, preinit_po1030[i][1],
+ &data, 1);
else
- rc |= m5602_write_bridge(sd, preinit_po1030[i][1],
- data);
+ err = m5602_write_bridge(sd, preinit_po1030[i][1],
+ data);
+ if (err < 0)
+ return err;
}
- if (rc < 0)
- return rc;
if (m5602_read_sensor(sd, PO1030_DEVID_H, &dev_id_h, 1))
return -ENODEV;
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 926408b41270..7a6f01ace78a 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -763,7 +763,8 @@ static int at24_probe(struct i2c_client *client)
at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
if (IS_ERR(at24->nvmem)) {
pm_runtime_disable(dev);
- regulator_disable(at24->vcc_reg);
+ if (!pm_runtime_status_suspended(dev))
+ regulator_disable(at24->vcc_reg);
return PTR_ERR(at24->nvmem);
}
@@ -774,7 +775,8 @@ static int at24_probe(struct i2c_client *client)
err = at24_read(at24, 0, &test_byte, 1);
if (err) {
pm_runtime_disable(dev);
- regulator_disable(at24->vcc_reg);
+ if (!pm_runtime_status_suspended(dev))
+ regulator_disable(at24->vcc_reg);
return -ENODEV;
}
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index ff8791a651fd..af3c497defb1 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -2017,7 +2017,7 @@ wait_again:
if (completion_value >= target_value) {
*status = CS_WAIT_STATUS_COMPLETED;
} else {
- timeout -= jiffies_to_usecs(completion_rc);
+ timeout = completion_rc;
goto wait_again;
}
} else {
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 832dd5c5bb06..0713b2c12d54 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -362,12 +362,9 @@ static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
}
if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY) {
- dev_warn(hdev->dev,
+ dev_err(hdev->dev,
"Device boot warning - security not ready\n");
- /* This is a warning so we don't want it to disable the
- * device
- */
- err_val &= ~CPU_BOOT_ERR0_SECURITY_NOT_RDY;
+ err_exists = true;
}
if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL) {
@@ -403,7 +400,8 @@ static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
err_exists = true;
}
- if (err_exists)
+ if (err_exists && ((err_val & ~CPU_BOOT_ERR0_ENABLED) &
+ lower_32_bits(hdev->boot_error_status_mask)))
return -EIO;
return 0;
@@ -661,18 +659,13 @@ int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
return rc;
}
-int get_used_pll_index(struct hl_device *hdev, enum pll_index input_pll_index,
+int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
enum pll_index *pll_index)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u8 pll_byte, pll_bit_off;
bool dynamic_pll;
-
- if (input_pll_index >= PLL_MAX) {
- dev_err(hdev->dev, "PLL index %d is out of range\n",
- input_pll_index);
- return -EINVAL;
- }
+ int fw_pll_idx;
dynamic_pll = prop->fw_security_status_valid &&
(prop->fw_app_security_map & CPU_BOOT_DEV_STS0_DYN_PLL_EN);
@@ -680,28 +673,39 @@ int get_used_pll_index(struct hl_device *hdev, enum pll_index input_pll_index,
if (!dynamic_pll) {
/*
* in case we are working with legacy FW (each asic has unique
- * PLL numbering) extract the legacy numbering
+ * PLL numbering) use the driver based index as they are
+ * aligned with fw legacy numbering
*/
- *pll_index = hdev->legacy_pll_map[input_pll_index];
+ *pll_index = input_pll_index;
return 0;
}
+ /* retrieve a FW compatible PLL index based on
+ * ASIC specific user request
+ */
+ fw_pll_idx = hdev->asic_funcs->map_pll_idx_to_fw_idx(input_pll_index);
+ if (fw_pll_idx < 0) {
+ dev_err(hdev->dev, "Invalid PLL index (%u) error %d\n",
+ input_pll_index, fw_pll_idx);
+ return -EINVAL;
+ }
+
/* PLL map is a u8 array */
- pll_byte = prop->cpucp_info.pll_map[input_pll_index >> 3];
- pll_bit_off = input_pll_index & 0x7;
+ pll_byte = prop->cpucp_info.pll_map[fw_pll_idx >> 3];
+ pll_bit_off = fw_pll_idx & 0x7;
if (!(pll_byte & BIT(pll_bit_off))) {
dev_err(hdev->dev, "PLL index %d is not supported\n",
- input_pll_index);
+ fw_pll_idx);
return -EINVAL;
}
- *pll_index = input_pll_index;
+ *pll_index = fw_pll_idx;
return 0;
}
-int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, enum pll_index pll_index,
+int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
u16 *pll_freq_arr)
{
struct cpucp_packet pkt;
@@ -844,8 +848,13 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
if (rc) {
dev_err(hdev->dev, "Failed to read preboot version\n");
detect_cpu_boot_status(hdev, status);
- fw_read_errors(hdev, boot_err0_reg,
- cpu_security_boot_status_reg);
+
+ /* If we read all FF, then something is totally wrong, no point
+ * of reading specific errors
+ */
+ if (status != -1)
+ fw_read_errors(hdev, boot_err0_reg,
+ cpu_security_boot_status_reg);
return -EIO;
}
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 44e89da30b4a..6579f8767abd 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -930,6 +930,9 @@ enum div_select_defs {
* driver is ready to receive asynchronous events. This
* function should be called during the first init and
* after every hard-reset of the device
+ * @get_msi_info: Retrieve asic-specific MSI ID of the f/w async event
+ * @map_pll_idx_to_fw_idx: convert driver specific per asic PLL index to
+ * generic f/w compatible PLL Indexes
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -1054,6 +1057,7 @@ struct hl_asic_funcs {
u32 block_id, u32 block_size);
void (*enable_events_from_fw)(struct hl_device *hdev);
void (*get_msi_info)(u32 *table);
+ int (*map_pll_idx_to_fw_idx)(u32 pll_idx);
};
@@ -1950,8 +1954,6 @@ struct hl_mmu_funcs {
* @aggregated_cs_counters: aggregated cs counters among all contexts
* @mmu_priv: device-specific MMU data.
* @mmu_func: device-related MMU functions.
- * @legacy_pll_map: map holding map between dynamic (common) PLL indexes and
- * static (asic specific) PLL indexes.
* @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
@@ -1960,6 +1962,12 @@ struct hl_mmu_funcs {
* @clock_gating_mask: is clock gating enabled. bitmask that represents the
* different engines. See debugfs-driver-habanalabs for
* details.
+ * @boot_error_status_mask: contains a mask of the device boot error status.
+ * Each bit represents a different error, according to
+ * the defines in hl_boot_if.h. If the bit is cleared,
+ * the error will be ignored by the driver during
+ * device initialization. Mainly used to debug and
+ * workaround firmware bugs
* @in_reset: is device in reset flow.
* @curr_pll_profile: current PLL profile.
* @card_type: Various ASICs have several card types. This indicates the card
@@ -2071,12 +2079,11 @@ struct hl_device {
struct hl_mmu_priv mmu_priv;
struct hl_mmu_funcs mmu_func[MMU_NUM_PGT_LOCATIONS];
- enum pll_index *legacy_pll_map;
-
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
u64 clock_gating_mask;
+ u64 boot_error_status_mask;
atomic_t in_reset;
enum hl_pll_frequency curr_pll_profile;
enum cpucp_card_types card_type;
@@ -2387,9 +2394,9 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
struct hl_info_pci_counters *counters);
int hl_fw_cpucp_total_energy_get(struct hl_device *hdev,
u64 *total_energy);
-int get_used_pll_index(struct hl_device *hdev, enum pll_index input_pll_index,
+int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
enum pll_index *pll_index);
-int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, enum pll_index pll_index,
+int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
u16 *pll_freq_arr);
int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power);
int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
@@ -2411,9 +2418,9 @@ int hl_pci_set_outbound_region(struct hl_device *hdev,
int hl_pci_init(struct hl_device *hdev);
void hl_pci_fini(struct hl_device *hdev);
-long hl_get_frequency(struct hl_device *hdev, enum pll_index pll_index,
+long hl_get_frequency(struct hl_device *hdev, u32 pll_index,
bool curr);
-void hl_set_frequency(struct hl_device *hdev, enum pll_index pll_index,
+void hl_set_frequency(struct hl_device *hdev, u32 pll_index,
u64 freq);
int hl_get_temperature(struct hl_device *hdev,
int sensor_index, u32 attr, long *value);
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 7135f1e03864..64d1530db985 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -30,6 +30,7 @@ static DEFINE_MUTEX(hl_devs_idr_lock);
static int timeout_locked = 30;
static int reset_on_lockup = 1;
static int memory_scrub = 1;
+static ulong boot_error_status_mask = ULONG_MAX;
module_param(timeout_locked, int, 0444);
MODULE_PARM_DESC(timeout_locked,
@@ -43,6 +44,10 @@ module_param(memory_scrub, int, 0444);
MODULE_PARM_DESC(memory_scrub,
"Scrub device memory in various states (0 = no, 1 = yes, default yes)");
+module_param(boot_error_status_mask, ulong, 0444);
+MODULE_PARM_DESC(boot_error_status_mask,
+ "Mask of the error status during device CPU boot (If bitX is cleared then error X is masked. Default all 1's)");
+
#define PCI_VENDOR_ID_HABANALABS 0x1da3
#define PCI_IDS_GOYA 0x0001
@@ -319,6 +324,8 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
hdev->major = hl_major;
hdev->reset_on_lockup = reset_on_lockup;
hdev->memory_scrub = memory_scrub;
+ hdev->boot_error_status_mask = boot_error_status_mask;
+
hdev->pldm = 0;
set_driver_behavior_per_device(hdev);
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index 9fa61573a89d..c9f649b31e3a 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -9,7 +9,7 @@
#include <linux/pci.h>
-long hl_get_frequency(struct hl_device *hdev, enum pll_index pll_index,
+long hl_get_frequency(struct hl_device *hdev, u32 pll_index,
bool curr)
{
struct cpucp_packet pkt;
@@ -44,7 +44,7 @@ long hl_get_frequency(struct hl_device *hdev, enum pll_index pll_index,
return (long) result;
}
-void hl_set_frequency(struct hl_device *hdev, enum pll_index pll_index,
+void hl_set_frequency(struct hl_device *hdev, u32 pll_index,
u64 freq)
{
struct cpucp_packet pkt;
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index b751652f80a8..9e4a6bb3acd1 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -105,36 +105,6 @@
#define GAUDI_PLL_MAX 10
-/*
- * this enum kept here for compatibility with old FW (in which each asic has
- * unique PLL numbering
- */
-enum gaudi_pll_index {
- GAUDI_CPU_PLL = 0,
- GAUDI_PCI_PLL,
- GAUDI_SRAM_PLL,
- GAUDI_HBM_PLL,
- GAUDI_NIC_PLL,
- GAUDI_DMA_PLL,
- GAUDI_MESH_PLL,
- GAUDI_MME_PLL,
- GAUDI_TPC_PLL,
- GAUDI_IF_PLL,
-};
-
-static enum pll_index gaudi_pll_map[PLL_MAX] = {
- [CPU_PLL] = GAUDI_CPU_PLL,
- [PCI_PLL] = GAUDI_PCI_PLL,
- [SRAM_PLL] = GAUDI_SRAM_PLL,
- [HBM_PLL] = GAUDI_HBM_PLL,
- [NIC_PLL] = GAUDI_NIC_PLL,
- [DMA_PLL] = GAUDI_DMA_PLL,
- [MESH_PLL] = GAUDI_MESH_PLL,
- [MME_PLL] = GAUDI_MME_PLL,
- [TPC_PLL] = GAUDI_TPC_PLL,
- [IF_PLL] = GAUDI_IF_PLL,
-};
-
static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
"gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
"gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
@@ -810,7 +780,7 @@ static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
freq = 0;
}
} else {
- rc = hl_fw_cpucp_pll_info_get(hdev, CPU_PLL, pll_freq_arr);
+ rc = hl_fw_cpucp_pll_info_get(hdev, HL_GAUDI_CPU_PLL, pll_freq_arr);
if (rc)
return rc;
@@ -1652,9 +1622,6 @@ static int gaudi_sw_init(struct hl_device *hdev)
hdev->asic_specific = gaudi;
- /* store legacy PLL map */
- hdev->legacy_pll_map = gaudi_pll_map;
-
/* Create DMA pool for small allocations */
hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
&hdev->pdev->dev, GAUDI_DMA_POOL_BLK_SIZE, 8, 0);
@@ -5612,6 +5579,7 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
struct hl_cs_job *job;
u32 cb_size, ctl, err_cause;
struct hl_cb *cb;
+ u64 id;
int rc;
cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
@@ -5678,8 +5646,9 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
}
release_cb:
+ id = cb->id;
hl_cb_put(cb);
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, id << PAGE_SHIFT);
return rc;
}
@@ -8783,6 +8752,23 @@ static void gaudi_enable_events_from_fw(struct hl_device *hdev)
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_INTS_REGISTER);
}
+static int gaudi_map_pll_idx_to_fw_idx(u32 pll_idx)
+{
+ switch (pll_idx) {
+ case HL_GAUDI_CPU_PLL: return CPU_PLL;
+ case HL_GAUDI_PCI_PLL: return PCI_PLL;
+ case HL_GAUDI_NIC_PLL: return NIC_PLL;
+ case HL_GAUDI_DMA_PLL: return DMA_PLL;
+ case HL_GAUDI_MESH_PLL: return MESH_PLL;
+ case HL_GAUDI_MME_PLL: return MME_PLL;
+ case HL_GAUDI_TPC_PLL: return TPC_PLL;
+ case HL_GAUDI_IF_PLL: return IF_PLL;
+ case HL_GAUDI_SRAM_PLL: return SRAM_PLL;
+ case HL_GAUDI_HBM_PLL: return HBM_PLL;
+ default: return -EINVAL;
+ }
+}
+
static const struct hl_asic_funcs gaudi_funcs = {
.early_init = gaudi_early_init,
.early_fini = gaudi_early_fini,
@@ -8866,7 +8852,8 @@ static const struct hl_asic_funcs gaudi_funcs = {
.ack_protection_bits_errors = gaudi_ack_protection_bits_errors,
.get_hw_block_id = gaudi_get_hw_block_id,
.hw_block_mmap = gaudi_block_mmap,
- .enable_events_from_fw = gaudi_enable_events_from_fw
+ .enable_events_from_fw = gaudi_enable_events_from_fw,
+ .map_pll_idx_to_fw_idx = gaudi_map_pll_idx_to_fw_idx
};
/**
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
index 8c49da4bcbd5..9b60eadd4c35 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
@@ -13,7 +13,7 @@ void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
struct gaudi_device *gaudi = hdev->asic_specific;
if (freq == PLL_LAST)
- hl_set_frequency(hdev, MME_PLL, gaudi->max_freq_value);
+ hl_set_frequency(hdev, HL_GAUDI_MME_PLL, gaudi->max_freq_value);
}
int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
@@ -23,7 +23,7 @@ int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, MME_PLL, false);
+ value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, false);
if (value < 0) {
dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n",
@@ -33,7 +33,7 @@ int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
*max_clk = (value / 1000 / 1000);
- value = hl_get_frequency(hdev, MME_PLL, true);
+ value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, true);
if (value < 0) {
dev_err(hdev->dev,
@@ -57,7 +57,7 @@ static ssize_t clk_max_freq_mhz_show(struct device *dev,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, MME_PLL, false);
+ value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, false);
gaudi->max_freq_value = value;
@@ -85,7 +85,7 @@ static ssize_t clk_max_freq_mhz_store(struct device *dev,
gaudi->max_freq_value = value * 1000 * 1000;
- hl_set_frequency(hdev, MME_PLL, gaudi->max_freq_value);
+ hl_set_frequency(hdev, HL_GAUDI_MME_PLL, gaudi->max_freq_value);
fail:
return count;
@@ -100,7 +100,7 @@ static ssize_t clk_cur_freq_mhz_show(struct device *dev,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, MME_PLL, true);
+ value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, true);
return sprintf(buf, "%lu\n", (value / 1000 / 1000));
}
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index e27338f4aad2..e0ad2a269779 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -118,30 +118,6 @@
#define IS_MME_IDLE(mme_arch_sts) \
(((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
-/*
- * this enum kept here for compatibility with old FW (in which each asic has
- * unique PLL numbering
- */
-enum goya_pll_index {
- GOYA_CPU_PLL = 0,
- GOYA_IC_PLL,
- GOYA_MC_PLL,
- GOYA_MME_PLL,
- GOYA_PCI_PLL,
- GOYA_EMMC_PLL,
- GOYA_TPC_PLL,
-};
-
-static enum pll_index goya_pll_map[PLL_MAX] = {
- [CPU_PLL] = GOYA_CPU_PLL,
- [IC_PLL] = GOYA_IC_PLL,
- [MC_PLL] = GOYA_MC_PLL,
- [MME_PLL] = GOYA_MME_PLL,
- [PCI_PLL] = GOYA_PCI_PLL,
- [EMMC_PLL] = GOYA_EMMC_PLL,
- [TPC_PLL] = GOYA_TPC_PLL,
-};
-
static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
"goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
"goya cq 4", "goya cpu eq"
@@ -775,7 +751,8 @@ static void goya_fetch_psoc_frequency(struct hl_device *hdev)
freq = 0;
}
} else {
- rc = hl_fw_cpucp_pll_info_get(hdev, PCI_PLL, pll_freq_arr);
+ rc = hl_fw_cpucp_pll_info_get(hdev, HL_GOYA_PCI_PLL,
+ pll_freq_arr);
if (rc)
return;
@@ -897,9 +874,6 @@ static int goya_sw_init(struct hl_device *hdev)
hdev->asic_specific = goya;
- /* store legacy PLL map */
- hdev->legacy_pll_map = goya_pll_map;
-
/* Create DMA pool for small allocations */
hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
&hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
@@ -5512,6 +5486,20 @@ static void goya_enable_events_from_fw(struct hl_device *hdev)
GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
}
+static int goya_map_pll_idx_to_fw_idx(u32 pll_idx)
+{
+ switch (pll_idx) {
+ case HL_GOYA_CPU_PLL: return CPU_PLL;
+ case HL_GOYA_PCI_PLL: return PCI_PLL;
+ case HL_GOYA_MME_PLL: return MME_PLL;
+ case HL_GOYA_TPC_PLL: return TPC_PLL;
+ case HL_GOYA_IC_PLL: return IC_PLL;
+ case HL_GOYA_MC_PLL: return MC_PLL;
+ case HL_GOYA_EMMC_PLL: return EMMC_PLL;
+ default: return -EINVAL;
+ }
+}
+
static const struct hl_asic_funcs goya_funcs = {
.early_init = goya_early_init,
.early_fini = goya_early_fini,
@@ -5595,7 +5583,8 @@ static const struct hl_asic_funcs goya_funcs = {
.ack_protection_bits_errors = goya_ack_protection_bits_errors,
.get_hw_block_id = goya_get_hw_block_id,
.hw_block_mmap = goya_block_mmap,
- .enable_events_from_fw = goya_enable_events_from_fw
+ .enable_events_from_fw = goya_enable_events_from_fw,
+ .map_pll_idx_to_fw_idx = goya_map_pll_idx_to_fw_idx
};
/*
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
index 3acb36a1a902..7d007125727f 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -13,19 +13,19 @@ void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
switch (freq) {
case PLL_HIGH:
- hl_set_frequency(hdev, MME_PLL, hdev->high_pll);
- hl_set_frequency(hdev, TPC_PLL, hdev->high_pll);
- hl_set_frequency(hdev, IC_PLL, hdev->high_pll);
+ hl_set_frequency(hdev, HL_GOYA_MME_PLL, hdev->high_pll);
+ hl_set_frequency(hdev, HL_GOYA_TPC_PLL, hdev->high_pll);
+ hl_set_frequency(hdev, HL_GOYA_IC_PLL, hdev->high_pll);
break;
case PLL_LOW:
- hl_set_frequency(hdev, MME_PLL, GOYA_PLL_FREQ_LOW);
- hl_set_frequency(hdev, TPC_PLL, GOYA_PLL_FREQ_LOW);
- hl_set_frequency(hdev, IC_PLL, GOYA_PLL_FREQ_LOW);
+ hl_set_frequency(hdev, HL_GOYA_MME_PLL, GOYA_PLL_FREQ_LOW);
+ hl_set_frequency(hdev, HL_GOYA_TPC_PLL, GOYA_PLL_FREQ_LOW);
+ hl_set_frequency(hdev, HL_GOYA_IC_PLL, GOYA_PLL_FREQ_LOW);
break;
case PLL_LAST:
- hl_set_frequency(hdev, MME_PLL, goya->mme_clk);
- hl_set_frequency(hdev, TPC_PLL, goya->tpc_clk);
- hl_set_frequency(hdev, IC_PLL, goya->ic_clk);
+ hl_set_frequency(hdev, HL_GOYA_MME_PLL, goya->mme_clk);
+ hl_set_frequency(hdev, HL_GOYA_TPC_PLL, goya->tpc_clk);
+ hl_set_frequency(hdev, HL_GOYA_IC_PLL, goya->ic_clk);
break;
default:
dev_err(hdev->dev, "unknown frequency setting\n");
@@ -39,7 +39,7 @@ int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, MME_PLL, false);
+ value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, false);
if (value < 0) {
dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n",
@@ -49,7 +49,7 @@ int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
*max_clk = (value / 1000 / 1000);
- value = hl_get_frequency(hdev, MME_PLL, true);
+ value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, true);
if (value < 0) {
dev_err(hdev->dev,
@@ -72,7 +72,7 @@ static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, MME_PLL, false);
+ value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, false);
if (value < 0)
return value;
@@ -105,7 +105,7 @@ static ssize_t mme_clk_store(struct device *dev, struct device_attribute *attr,
goto fail;
}
- hl_set_frequency(hdev, MME_PLL, value);
+ hl_set_frequency(hdev, HL_GOYA_MME_PLL, value);
goya->mme_clk = value;
fail:
@@ -121,7 +121,7 @@ static ssize_t tpc_clk_show(struct device *dev, struct device_attribute *attr,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, TPC_PLL, false);
+ value = hl_get_frequency(hdev, HL_GOYA_TPC_PLL, false);
if (value < 0)
return value;
@@ -154,7 +154,7 @@ static ssize_t tpc_clk_store(struct device *dev, struct device_attribute *attr,
goto fail;
}
- hl_set_frequency(hdev, TPC_PLL, value);
+ hl_set_frequency(hdev, HL_GOYA_TPC_PLL, value);
goya->tpc_clk = value;
fail:
@@ -170,7 +170,7 @@ static ssize_t ic_clk_show(struct device *dev, struct device_attribute *attr,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, IC_PLL, false);
+ value = hl_get_frequency(hdev, HL_GOYA_IC_PLL, false);
if (value < 0)
return value;
@@ -203,7 +203,7 @@ static ssize_t ic_clk_store(struct device *dev, struct device_attribute *attr,
goto fail;
}
- hl_set_frequency(hdev, IC_PLL, value);
+ hl_set_frequency(hdev, HL_GOYA_IC_PLL, value);
goya->ic_clk = value;
fail:
@@ -219,7 +219,7 @@ static ssize_t mme_clk_curr_show(struct device *dev,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, MME_PLL, true);
+ value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, true);
if (value < 0)
return value;
@@ -236,7 +236,7 @@ static ssize_t tpc_clk_curr_show(struct device *dev,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, TPC_PLL, true);
+ value = hl_get_frequency(hdev, HL_GOYA_TPC_PLL, true);
if (value < 0)
return value;
@@ -253,7 +253,7 @@ static ssize_t ic_clk_curr_show(struct device *dev,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, IC_PLL, true);
+ value = hl_get_frequency(hdev, HL_GOYA_IC_PLL, true);
if (value < 0)
return value;
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 2bdf560ee681..0f9ea75b0b18 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -134,7 +134,7 @@ static struct ics932s401_data *ics932s401_update_device(struct device *dev)
for (i = 0; i < NUM_MIRRORED_REGS; i++) {
temp = i2c_smbus_read_word_data(client, regs_to_copy[i]);
if (temp < 0)
- data->regs[regs_to_copy[i]] = 0;
+ temp = 0;
data->regs[regs_to_copy[i]] = temp >> 8;
}
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
index c394c0b08519..7ac788fae1b8 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.h
+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
@@ -271,6 +271,7 @@ struct lis3lv02d {
int regs_size;
u8 *reg_cache;
bool regs_stored;
+ bool init_required;
u8 odr_mask; /* ODR bit mask */
u8 whoami; /* indicates measurement precision */
s16 (*read_data) (struct lis3lv02d *lis3, int reg);
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index b8b771b643cc..016a6106151a 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -236,7 +236,8 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
if (host->dram_access_quirk)
return;
- if (data->blocks > 1) {
+ /* SD_IO_RW_EXTENDED (CMD53) can also use block mode under the hood */
+ if (data->blocks > 1 || mrq->cmd->opcode == SD_IO_RW_EXTENDED) {
/*
* In block mode DMA descriptor format, "length" field indicates
* number of blocks and there is no way to pass DMA size that
@@ -258,7 +259,9 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
for_each_sg(data->sg, sg, data->sg_len, i) {
/* check for 8 byte alignment */
if (sg->offset % 8) {
- WARN_ONCE(1, "unaligned scatterlist buffer\n");
+ dev_warn_once(mmc_dev(mmc),
+ "unaligned sg offset %u, disabling descriptor DMA for transfer\n",
+ sg->offset);
return;
}
}
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 592d79082f58..061618aa247f 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -627,8 +627,13 @@ static void sdhci_gli_voltage_switch(struct sdhci_host *host)
*
* Wait 5ms after set 1.8V signal enable in Host Control 2 register
* to ensure 1.8V signal enable bit is set by GL9750/GL9755.
+ *
+ * ...however, the controller in the NUC10i3FNK4 (a 9755) requires
+ * slightly longer than 5ms before the control register reports that
+ * 1.8V is ready, and far longer still before the card will actually
+ * work reliably.
*/
- usleep_range(5000, 5500);
+ usleep_range(100000, 110000);
}
static void sdhci_gl9750_reset(struct sdhci_host *host, u8 mask)
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index da6fffb4d5a8..d17482395a4d 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -269,9 +269,6 @@ static netdev_tx_t caif_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ser_device *ser;
- if (WARN_ON(!dev))
- return -EINVAL;
-
ser = netdev_priv(dev);
/* Send flow off once, on high water mark */
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 7c5af4beedc6..591229b96257 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1153,7 +1153,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
* @lio: per-network private data
* @start_stop: whether to start or stop
*/
-static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
struct octeon_soft_command *sc;
union octnet_cmd *ncmd;
@@ -1161,15 +1161,15 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
int retval;
if (oct->props[lio->ifidx].rx_on == start_stop)
- return;
+ return 0;
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
16, 0);
if (!sc) {
netif_info(lio, rx_err, lio->netdev,
- "Failed to allocate octeon_soft_command\n");
- return;
+ "Failed to allocate octeon_soft_command struct\n");
+ return -ENOMEM;
}
ncmd = (union octnet_cmd *)sc->virtdptr;
@@ -1192,18 +1192,19 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
if (retval == IQ_SEND_FAILED) {
netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
octeon_free_soft_command(oct, sc);
- return;
} else {
/* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
retval = wait_for_sc_completion_timeout(oct, sc, 0);
if (retval)
- return;
+ return retval;
oct->props[lio->ifidx].rx_on = start_stop;
WRITE_ONCE(sc->caller_is_done, true);
}
+
+ return retval;
}
/**
@@ -1778,6 +1779,7 @@ static int liquidio_open(struct net_device *netdev)
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
+ int ret = 0;
if (oct->props[lio->ifidx].napi_enabled == 0) {
tasklet_disable(&oct_priv->droq_tasklet);
@@ -1813,7 +1815,9 @@ static int liquidio_open(struct net_device *netdev)
netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
/* tell Octeon to start forwarding packets to host */
- send_rx_ctrl_cmd(lio, 1);
+ ret = send_rx_ctrl_cmd(lio, 1);
+ if (ret)
+ return ret;
/* start periodical statistics fetch */
INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
@@ -1824,7 +1828,7 @@ static int liquidio_open(struct net_device *netdev)
dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
netdev->name);
- return 0;
+ return ret;
}
/**
@@ -1838,6 +1842,7 @@ static int liquidio_stop(struct net_device *netdev)
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
+ int ret = 0;
ifstate_reset(lio, LIO_IFSTATE_RUNNING);
@@ -1854,7 +1859,9 @@ static int liquidio_stop(struct net_device *netdev)
lio->link_changes++;
/* Tell Octeon that nic interface is down. */
- send_rx_ctrl_cmd(lio, 0);
+ ret = send_rx_ctrl_cmd(lio, 0);
+ if (ret)
+ return ret;
if (OCTEON_CN23XX_PF(oct)) {
if (!oct->msix_on)
@@ -1889,7 +1896,7 @@ static int liquidio_stop(struct net_device *netdev)
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
- return 0;
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 516f166ceff8..ffddb3126a32 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -595,7 +595,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
* @lio: per-network private data
* @start_stop: whether to start or stop
*/
-static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
struct octeon_soft_command *sc;
@@ -603,11 +603,16 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
int retval;
if (oct->props[lio->ifidx].rx_on == start_stop)
- return;
+ return 0;
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
16, 0);
+ if (!sc) {
+ netif_info(lio, rx_err, lio->netdev,
+ "Failed to allocate octeon_soft_command struct\n");
+ return -ENOMEM;
+ }
ncmd = (union octnet_cmd *)sc->virtdptr;
@@ -635,11 +640,13 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
*/
retval = wait_for_sc_completion_timeout(oct, sc, 0);
if (retval)
- return;
+ return retval;
oct->props[lio->ifidx].rx_on = start_stop;
WRITE_ONCE(sc->caller_is_done, true);
}
+
+ return retval;
}
/**
@@ -906,6 +913,7 @@ static int liquidio_open(struct net_device *netdev)
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
+ int ret = 0;
if (!oct->props[lio->ifidx].napi_enabled) {
tasklet_disable(&oct_priv->droq_tasklet);
@@ -932,11 +940,13 @@ static int liquidio_open(struct net_device *netdev)
(LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
/* tell Octeon to start forwarding packets to host */
- send_rx_ctrl_cmd(lio, 1);
+ ret = send_rx_ctrl_cmd(lio, 1);
+ if (ret)
+ return ret;
dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
- return 0;
+ return ret;
}
/**
@@ -950,9 +960,12 @@ static int liquidio_stop(struct net_device *netdev)
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
+ int ret = 0;
/* tell Octeon to stop forwarding packets to host */
- send_rx_ctrl_cmd(lio, 0);
+ ret = send_rx_ctrl_cmd(lio, 0);
+ if (ret)
+ return ret;
netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
/* Inform that netif carrier is down */
@@ -986,7 +999,7 @@ static int liquidio_stop(struct net_device *netdev)
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
- return 0;
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index a7b7a4aace79..b0c0504950d8 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -548,8 +548,8 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
base = ioremap(link->resource[2]->start, resource_size(link->resource[2]));
if (!base) {
- pcmcia_release_window(link, link->resource[2]);
- return -ENOMEM;
+ pcmcia_release_window(link, link->resource[2]);
+ return -1;
}
pcmcia_map_mem_page(link, link->resource[2], 0);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index d8a3ecaed3fc..d8f0863b3934 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1048,7 +1048,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
if (!skb)
- break;
+ goto error;
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
adapter->ahw->diag_cnt = 0;
@@ -1072,6 +1072,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
cnt++;
}
if (cnt != i) {
+error:
dev_err(&adapter->pdev->dev,
"LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
if (mode != QLCNIC_ILB_MODE)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 527077c98ebc..fc3b0acc8f99 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -30,7 +30,7 @@ struct sunxi_priv_data {
static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
- int ret;
+ int ret = 0;
if (gmac->regulator) {
ret = regulator_enable(gmac->regulator);
@@ -51,11 +51,11 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
} else {
clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
ret = clk_prepare(gmac->tx_clk);
- if (ret)
- return ret;
+ if (ret && gmac->regulator)
+ regulator_disable(gmac->regulator);
}
- return 0;
+ return ret;
}
static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 707ccdd03b19..74e748662ec0 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -8144,10 +8144,10 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
"VPD_SCAN: Reading in property [%s] len[%d]\n",
namebuf, prop_len);
for (i = 0; i < prop_len; i++) {
- err = niu_pci_eeprom_read(np, off + i);
- if (err >= 0)
- *prop_buf = err;
- ++prop_buf;
+ err = niu_pci_eeprom_read(np, off + i);
+ if (err < 0)
+ return err;
+ *prop_buf++ = err;
}
}
@@ -8158,14 +8158,14 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
}
/* ESPC_PIO_EN_ENABLE must be set */
-static void niu_pci_vpd_fetch(struct niu *np, u32 start)
+static int niu_pci_vpd_fetch(struct niu *np, u32 start)
{
u32 offset;
int err;
err = niu_pci_eeprom_read16_swp(np, start + 1);
if (err < 0)
- return;
+ return err;
offset = err + 3;
@@ -8174,12 +8174,14 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
u32 end;
err = niu_pci_eeprom_read(np, here);
+ if (err < 0)
+ return err;
if (err != 0x90)
- return;
+ return -EINVAL;
err = niu_pci_eeprom_read16_swp(np, here + 1);
if (err < 0)
- return;
+ return err;
here = start + offset + 3;
end = start + offset + err;
@@ -8187,9 +8189,12 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
offset += err;
err = niu_pci_vpd_scan_props(np, here, end);
- if (err < 0 || err == 1)
- return;
+ if (err < 0)
+ return err;
+ if (err == 1)
+ return -EINVAL;
}
+ return 0;
}
/* ESPC_PIO_EN_ENABLE must be set */
@@ -9280,8 +9285,11 @@ static int niu_get_invariants(struct niu *np)
offset = niu_pci_vpd_offset(np);
netif_printk(np, probe, KERN_DEBUG, np->dev,
"%s() VPD offset [%08x]\n", __func__, offset);
- if (offset)
- niu_pci_vpd_fetch(np, offset);
+ if (offset) {
+ err = niu_pci_vpd_fetch(np, offset);
+ if (err < 0)
+ return err;
+ }
nw64(ESPC_PIO_EN, 0);
if (np->flags & NIU_FLAGS_VPD_VALID) {
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 7506cea46f58..433a047f3747 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -1027,14 +1027,17 @@ static ssize_t ath6kl_lrssi_roam_write(struct file *file,
{
struct ath6kl *ar = file->private_data;
unsigned long lrssi_roam_threshold;
+ int ret;
if (kstrtoul_from_user(user_buf, count, 0, &lrssi_roam_threshold))
return -EINVAL;
ar->lrssi_roam_threshold = lrssi_roam_threshold;
- ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold);
+ ret = ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold);
+ if (ret)
+ return ret;
return count;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index ce8c102df7b3..633d0ab19031 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1217,13 +1217,9 @@ static struct sdio_driver brcmf_sdmmc_driver = {
},
};
-void brcmf_sdio_register(void)
+int brcmf_sdio_register(void)
{
- int ret;
-
- ret = sdio_register_driver(&brcmf_sdmmc_driver);
- if (ret)
- brcmf_err("sdio_register_driver failed: %d\n", ret);
+ return sdio_register_driver(&brcmf_sdmmc_driver);
}
void brcmf_sdio_exit(void)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 08f9d47f2e5c..3f5da3bb6aa5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -275,11 +275,26 @@ void brcmf_bus_add_txhdrlen(struct device *dev, uint len);
#ifdef CONFIG_BRCMFMAC_SDIO
void brcmf_sdio_exit(void);
-void brcmf_sdio_register(void);
+int brcmf_sdio_register(void);
+#else
+static inline void brcmf_sdio_exit(void) { }
+static inline int brcmf_sdio_register(void) { return 0; }
#endif
+
#ifdef CONFIG_BRCMFMAC_USB
void brcmf_usb_exit(void);
-void brcmf_usb_register(void);
+int brcmf_usb_register(void);
+#else
+static inline void brcmf_usb_exit(void) { }
+static inline int brcmf_usb_register(void) { return 0; }
+#endif
+
+#ifdef CONFIG_BRCMFMAC_PCIE
+void brcmf_pcie_exit(void);
+int brcmf_pcie_register(void);
+#else
+static inline void brcmf_pcie_exit(void) { }
+static inline int brcmf_pcie_register(void) { return 0; }
#endif
#endif /* BRCMFMAC_BUS_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 838b09b23abf..cee1682d2333 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -1518,40 +1518,34 @@ void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state)
}
}
-static void brcmf_driver_register(struct work_struct *work)
-{
-#ifdef CONFIG_BRCMFMAC_SDIO
- brcmf_sdio_register();
-#endif
-#ifdef CONFIG_BRCMFMAC_USB
- brcmf_usb_register();
-#endif
-#ifdef CONFIG_BRCMFMAC_PCIE
- brcmf_pcie_register();
-#endif
-}
-static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
-
int __init brcmf_core_init(void)
{
- if (!schedule_work(&brcmf_driver_work))
- return -EBUSY;
+ int err;
+ err = brcmf_sdio_register();
+ if (err)
+ return err;
+
+ err = brcmf_usb_register();
+ if (err)
+ goto error_usb_register;
+
+ err = brcmf_pcie_register();
+ if (err)
+ goto error_pcie_register;
return 0;
+
+error_pcie_register:
+ brcmf_usb_exit();
+error_usb_register:
+ brcmf_sdio_exit();
+ return err;
}
void __exit brcmf_core_exit(void)
{
- cancel_work_sync(&brcmf_driver_work);
-
-#ifdef CONFIG_BRCMFMAC_SDIO
brcmf_sdio_exit();
-#endif
-#ifdef CONFIG_BRCMFMAC_USB
brcmf_usb_exit();
-#endif
-#ifdef CONFIG_BRCMFMAC_PCIE
brcmf_pcie_exit();
-#endif
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index ad79e3b7e74a..143a705b5cb3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -2140,15 +2140,10 @@ static struct pci_driver brcmf_pciedrvr = {
};
-void brcmf_pcie_register(void)
+int brcmf_pcie_register(void)
{
- int err;
-
brcmf_dbg(PCIE, "Enter\n");
- err = pci_register_driver(&brcmf_pciedrvr);
- if (err)
- brcmf_err(NULL, "PCIE driver registration failed, err=%d\n",
- err);
+ return pci_register_driver(&brcmf_pciedrvr);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
index d026401d2001..8e6c227e8315 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
@@ -11,9 +11,4 @@ struct brcmf_pciedev {
struct brcmf_pciedev_info *devinfo;
};
-
-void brcmf_pcie_exit(void);
-void brcmf_pcie_register(void);
-
-
#endif /* BRCMFMAC_PCIE_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 586f4dfc638b..9fb68c2dc7e3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1584,12 +1584,8 @@ void brcmf_usb_exit(void)
usb_deregister(&brcmf_usbdrvr);
}
-void brcmf_usb_register(void)
+int brcmf_usb_register(void)
{
- int ret;
-
brcmf_dbg(USB, "Enter\n");
- ret = usb_register(&brcmf_usbdrvr);
- if (ret)
- brcmf_err("usb_register failed %d\n", ret);
+ return usb_register(&brcmf_usbdrvr);
}
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index f5b78257d551..c68814841583 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -801,24 +801,6 @@ static const struct attribute_group mesh_ie_group = {
.attrs = mesh_ie_attrs,
};
-static void lbs_persist_config_init(struct net_device *dev)
-{
- int ret;
- ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group);
- if (ret)
- pr_err("failed to create boot_opts_group.\n");
-
- ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group);
- if (ret)
- pr_err("failed to create mesh_ie_group.\n");
-}
-
-static void lbs_persist_config_remove(struct net_device *dev)
-{
- sysfs_remove_group(&(dev->dev.kobj), &boot_opts_group);
- sysfs_remove_group(&(dev->dev.kobj), &mesh_ie_group);
-}
-
/***************************************************************************
* Initializing and starting, stopping mesh
@@ -1014,6 +996,10 @@ static int lbs_add_mesh(struct lbs_private *priv)
SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
+ mesh_dev->sysfs_groups[0] = &lbs_mesh_attr_group;
+ mesh_dev->sysfs_groups[1] = &boot_opts_group;
+ mesh_dev->sysfs_groups[2] = &mesh_ie_group;
+
/* Register virtual mesh interface */
ret = register_netdev(mesh_dev);
if (ret) {
@@ -1021,19 +1007,10 @@ static int lbs_add_mesh(struct lbs_private *priv)
goto err_free_netdev;
}
- ret = sysfs_create_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
- if (ret)
- goto err_unregister;
-
- lbs_persist_config_init(mesh_dev);
-
/* Everything successful */
ret = 0;
goto done;
-err_unregister:
- unregister_netdev(mesh_dev);
-
err_free_netdev:
free_netdev(mesh_dev);
@@ -1054,8 +1031,6 @@ void lbs_remove_mesh(struct lbs_private *priv)
netif_stop_queue(mesh_dev);
netif_carrier_off(mesh_dev);
- sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
- lbs_persist_config_remove(mesh_dev);
unregister_netdev(mesh_dev);
priv->mesh_dev = NULL;
kfree(mesh_dev->ieee80211_ptr);
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 2a7ee90a3f54..ffd150ec181f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -440,9 +440,14 @@ static void rtl_watchdog_wq_callback(struct work_struct *work);
static void rtl_fwevt_wq_callback(struct work_struct *work);
static void rtl_c2hcmd_wq_callback(struct work_struct *work);
-static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
+static int _rtl_init_deferred_work(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct workqueue_struct *wq;
+
+ wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
+ if (!wq)
+ return -ENOMEM;
/* <1> timer */
timer_setup(&rtlpriv->works.watchdog_timer,
@@ -451,11 +456,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
rtl_easy_concurrent_retrytimer_callback, 0);
/* <2> work queue */
rtlpriv->works.hw = hw;
- rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
- if (unlikely(!rtlpriv->works.rtl_wq)) {
- pr_err("Failed to allocate work queue\n");
- return;
- }
+ rtlpriv->works.rtl_wq = wq;
INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
rtl_watchdog_wq_callback);
@@ -466,6 +467,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
rtl_swlps_rfon_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq, rtl_fwevt_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq, rtl_c2hcmd_wq_callback);
+ return 0;
}
void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
@@ -564,9 +566,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
rtlmac->link_state = MAC80211_NOLINK;
/* <6> init deferred work */
- _rtl_init_deferred_work(hw);
-
- return 0;
+ return _rtl_init_deferred_work(hw);
}
EXPORT_SYMBOL_GPL(rtl_init_core);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index d9ab9e7871d0..256e87721a01 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2461,6 +2461,18 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
static void
__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
{
+ int q;
+
+ /*
+ * if aborting io, the queues are no longer good, mark them
+ * all as not live.
+ */
+ if (ctrl->ctrl.queue_count > 1) {
+ for (q = 1; q < ctrl->ctrl.queue_count; q++)
+ clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
+ }
+ clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
+
/*
* If io queues are present, stop them and terminate all outstanding
* ios on them. As FC allocates FC exchange for each io, the
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 0222e23f5936..34f4b3402f7c 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -943,7 +943,6 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
if (ret <= 0)
return ret;
- nvme_tcp_advance_req(req, ret);
if (queue->data_digest)
nvme_tcp_ddgst_update(queue->snd_hash, page,
offset, ret);
@@ -960,6 +959,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
}
return 1;
}
+ nvme_tcp_advance_req(req, ret);
}
return -EAGAIN;
}
@@ -1140,7 +1140,8 @@ static void nvme_tcp_io_work(struct work_struct *w)
pending = true;
else if (unlikely(result < 0))
break;
- }
+ } else
+ pending = !llist_empty(&queue->req_list);
result = nvme_tcp_try_recv(queue);
if (result > 0)
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 25cc2ee8de3f..1853db38b682 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1372,7 +1372,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
goto out_free_changed_ns_list;
if (subsys->cntlid_min > subsys->cntlid_max)
- goto out_free_changed_ns_list;
+ goto out_free_sqs;
ret = ida_simple_get(&cntlid_ida,
subsys->cntlid_min, subsys->cntlid_max,
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 74b3b150e1a5..cb30cb942e1d 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -590,8 +590,10 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
0 /* no quirks, we're perfect! */);
- if (ret)
+ if (ret) {
+ kfree(ctrl);
goto out;
+ }
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
WARN_ON_ONCE(1);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index ec44a79e951a..3a72352aa5cf 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1385,6 +1385,7 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
drv->driver.owner = owner;
drv->driver.mod_name = mod_name;
drv->driver.groups = drv->groups;
+ drv->driver.dev_groups = drv->dev_groups;
spin_lock_init(&drv->dynids.lock);
INIT_LIST_HEAD(&drv->dynids.list);
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index bbc4e71a16ff..38800e86ed8a 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -294,6 +294,9 @@ mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring)
if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx))
return NULL;
+ /* Make sure 'avail->idx' is visible already. */
+ virtio_rmb(false);
+
idx = vring->next_avail % vr->num;
head = virtio16_to_cpu(vdev, vr->avail->ring[idx]);
if (WARN_ON(head >= vr->num))
@@ -322,7 +325,7 @@ static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring,
* done or not. Add a memory barrier here to make sure the update above
* completes before updating the idx.
*/
- mb();
+ virtio_mb(false);
vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1);
}
@@ -733,6 +736,12 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
desc = NULL;
fifo->vring[is_rx] = NULL;
+ /*
+ * Make sure the load/store are in order before
+ * returning back to virtio.
+ */
+ virtio_mb(false);
+
/* Notify upper layer that packet is done. */
spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
vring_interrupt(0, vring->vq);
diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
index 69e86cd599d3..8a70df60142c 100644
--- a/drivers/platform/surface/aggregator/controller.c
+++ b/drivers/platform/surface/aggregator/controller.c
@@ -2483,8 +2483,7 @@ int ssam_irq_setup(struct ssam_controller *ctrl)
* interrupt, and let the SAM resume callback during the controller
* resume process clear it.
*/
- const int irqf = IRQF_SHARED | IRQF_ONESHOT |
- IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN;
+ const int irqf = IRQF_ONESHOT | IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN;
gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS);
if (IS_ERR(gpiod))
diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c
index 63ce587e79e3..5d9b758a99bb 100644
--- a/drivers/platform/surface/surface_dtx.c
+++ b/drivers/platform/surface/surface_dtx.c
@@ -527,20 +527,14 @@ static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt
struct sdtx_client *client = file->private_data;
__poll_t events = 0;
- if (down_read_killable(&client->ddev->lock))
- return -ERESTARTSYS;
-
- if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
- up_read(&client->ddev->lock);
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags))
return EPOLLHUP | EPOLLERR;
- }
poll_wait(file, &client->ddev->waitq, pt);
if (!kfifo_is_empty(&client->buffer))
events |= EPOLLIN | EPOLLRDNORM;
- up_read(&client->ddev->lock);
return events;
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 2714f7c3843e..60592fb88e7a 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -711,7 +711,7 @@ config INTEL_HID_EVENT
config INTEL_INT0002_VGPIO
tristate "Intel ACPI INT0002 Virtual GPIO driver"
- depends on GPIOLIB && ACPI
+ depends on GPIOLIB && ACPI && PM_SLEEP
select GPIOLIB_IRQCHIP
help
Some peripherals on Bay Trail and Cherry Trail platforms signal a
diff --git a/drivers/platform/x86/dell/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c
index a1753485159c..33f823772733 100644
--- a/drivers/platform/x86/dell/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell/dell-smbios-wmi.c
@@ -270,7 +270,8 @@ int init_dell_smbios_wmi(void)
void exit_dell_smbios_wmi(void)
{
- wmi_driver_unregister(&dell_smbios_wmi_driver);
+ if (wmi_supported)
+ wmi_driver_unregister(&dell_smbios_wmi_driver);
}
MODULE_DEVICE_TABLE(wmi, dell_smbios_wmi_id_table);
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
index 13d57434e60f..5529d7b0abea 100644
--- a/drivers/platform/x86/gigabyte-wmi.c
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -133,31 +133,21 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
return r;
}
+#define DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME(name) \
+ { .matches = { \
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), \
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, name), \
+ }}
+
static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
- { .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550 GAMING X V2"),
- }},
- { .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550M AORUS PRO-P"),
- }},
- { .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550M DS3H"),
- }},
- { .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "Z390 I AORUS PRO WIFI-CF"),
- }},
- { .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "X570 AORUS ELITE"),
- }},
- { .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "X570 I AORUS PRO WIFI"),
- }},
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
{ }
};
diff --git a/drivers/platform/x86/hp-wireless.c b/drivers/platform/x86/hp-wireless.c
index 12c31fd5d5ae..0753ef18e721 100644
--- a/drivers/platform/x86/hp-wireless.c
+++ b/drivers/platform/x86/hp-wireless.c
@@ -17,12 +17,14 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
MODULE_ALIAS("acpi*:HPQ6001:*");
MODULE_ALIAS("acpi*:WSTADEF:*");
+MODULE_ALIAS("acpi*:AMDI0051:*");
static struct input_dev *hpwl_input_dev;
static const struct acpi_device_id hpwl_ids[] = {
{"HPQ6001", 0},
{"WSTADEF", 0},
+ {"AMDI0051", 0},
{"", 0},
};
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 799cbe2ffcf3..8c0867bda828 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -88,6 +88,9 @@ MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
static int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
{
struct acpi_device *dev = lis3->bus_priv;
+ if (!lis3->init_required)
+ return 0;
+
if (acpi_evaluate_object(dev->handle, METHOD_NAME__INI,
NULL, NULL) != AE_OK)
return -EINVAL;
@@ -356,6 +359,7 @@ static int lis3lv02d_add(struct acpi_device *device)
}
/* call the core layer do its init */
+ lis3_dev.init_required = true;
ret = lis3lv02d_init_device(&lis3_dev);
if (ret)
return ret;
@@ -403,11 +407,27 @@ static int lis3lv02d_suspend(struct device *dev)
static int lis3lv02d_resume(struct device *dev)
{
+ lis3_dev.init_required = false;
+ lis3lv02d_poweron(&lis3_dev);
+ return 0;
+}
+
+static int lis3lv02d_restore(struct device *dev)
+{
+ lis3_dev.init_required = true;
lis3lv02d_poweron(&lis3_dev);
return 0;
}
-static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
+static const struct dev_pm_ops hp_accel_pm = {
+ .suspend = lis3lv02d_suspend,
+ .resume = lis3lv02d_resume,
+ .freeze = lis3lv02d_suspend,
+ .thaw = lis3lv02d_resume,
+ .poweroff = lis3lv02d_suspend,
+ .restore = lis3lv02d_restore,
+};
+
#define HP_ACCEL_PM (&hp_accel_pm)
#else
#define HP_ACCEL_PM NULL
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 6cb5ad4be231..387817290921 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -57,8 +57,8 @@ enum {
};
enum {
- SMBC_CONSERVATION_ON = 3,
- SMBC_CONSERVATION_OFF = 5,
+ SBMC_CONSERVATION_ON = 3,
+ SBMC_CONSERVATION_OFF = 5,
};
enum {
@@ -182,9 +182,9 @@ static int eval_gbmd(acpi_handle handle, unsigned long *res)
return eval_int(handle, "GBMD", res);
}
-static int exec_smbc(acpi_handle handle, unsigned long arg)
+static int exec_sbmc(acpi_handle handle, unsigned long arg)
{
- return exec_simple_method(handle, "SMBC", arg);
+ return exec_simple_method(handle, "SBMC", arg);
}
static int eval_hals(acpi_handle handle, unsigned long *res)
@@ -477,7 +477,7 @@ static ssize_t conservation_mode_store(struct device *dev,
if (err)
return err;
- err = exec_smbc(priv->adev->handle, state ? SMBC_CONSERVATION_ON : SMBC_CONSERVATION_OFF);
+ err = exec_sbmc(priv->adev->handle, state ? SBMC_CONSERVATION_ON : SBMC_CONSERVATION_OFF);
if (err)
return err;
@@ -809,6 +809,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
{
struct ideapad_dytc_priv *dytc = container_of(pprof, struct ideapad_dytc_priv, pprof);
struct ideapad_private *priv = dytc->priv;
+ unsigned long output;
int err;
err = mutex_lock_interruptible(&dytc->mutex);
@@ -829,7 +830,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
/* Determine if we are in CQL mode. This alters the commands we do */
err = dytc_cql_command(priv, DYTC_SET_COMMAND(DYTC_FUNCTION_MMC, perfmode, 1),
- NULL);
+ &output);
if (err)
goto unlock;
}
diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
index 289c6655d425..569342aa8926 100644
--- a/drivers/platform/x86/intel_int0002_vgpio.c
+++ b/drivers/platform/x86/intel_int0002_vgpio.c
@@ -51,6 +51,12 @@
#define GPE0A_STS_PORT 0x420
#define GPE0A_EN_PORT 0x428
+struct int0002_data {
+ struct gpio_chip chip;
+ int parent_irq;
+ int wake_enable_count;
+};
+
/*
* As this is not a real GPIO at all, but just a hack to model an event in
* ACPI the get / set functions are dummy functions.
@@ -98,14 +104,16 @@ static void int0002_irq_mask(struct irq_data *data)
static int int0002_irq_set_wake(struct irq_data *data, unsigned int on)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct platform_device *pdev = to_platform_device(chip->parent);
- int irq = platform_get_irq(pdev, 0);
+ struct int0002_data *int0002 = container_of(chip, struct int0002_data, chip);
- /* Propagate to parent irq */
+ /*
+ * Applying of the wakeup flag to our parent IRQ is delayed till system
+ * suspend, because we only want to do this when using s2idle.
+ */
if (on)
- enable_irq_wake(irq);
+ int0002->wake_enable_count++;
else
- disable_irq_wake(irq);
+ int0002->wake_enable_count--;
return 0;
}
@@ -135,7 +143,7 @@ static bool int0002_check_wake(void *data)
return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
}
-static struct irq_chip int0002_byt_irqchip = {
+static struct irq_chip int0002_irqchip = {
.name = DRV_NAME,
.irq_ack = int0002_irq_ack,
.irq_mask = int0002_irq_mask,
@@ -143,21 +151,9 @@ static struct irq_chip int0002_byt_irqchip = {
.irq_set_wake = int0002_irq_set_wake,
};
-static struct irq_chip int0002_cht_irqchip = {
- .name = DRV_NAME,
- .irq_ack = int0002_irq_ack,
- .irq_mask = int0002_irq_mask,
- .irq_unmask = int0002_irq_unmask,
- /*
- * No set_wake, on CHT the IRQ is typically shared with the ACPI SCI
- * and we don't want to mess with the ACPI SCI irq settings.
- */
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
static const struct x86_cpu_id int0002_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &int0002_byt_irqchip),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &int0002_cht_irqchip),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
{}
};
@@ -172,8 +168,9 @@ static int int0002_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct x86_cpu_id *cpu_id;
- struct gpio_chip *chip;
+ struct int0002_data *int0002;
struct gpio_irq_chip *girq;
+ struct gpio_chip *chip;
int irq, ret;
/* Menlow has a different INT0002 device? <sigh> */
@@ -185,10 +182,13 @@ static int int0002_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
+ int0002 = devm_kzalloc(dev, sizeof(*int0002), GFP_KERNEL);
+ if (!int0002)
return -ENOMEM;
+ int0002->parent_irq = irq;
+
+ chip = &int0002->chip;
chip->label = DRV_NAME;
chip->parent = dev;
chip->owner = THIS_MODULE;
@@ -214,7 +214,7 @@ static int int0002_probe(struct platform_device *pdev)
}
girq = &chip->irq;
- girq->chip = (struct irq_chip *)cpu_id->driver_data;
+ girq->chip = &int0002_irqchip;
/* This let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
@@ -230,6 +230,7 @@ static int int0002_probe(struct platform_device *pdev)
acpi_register_wakeup_handler(irq, int0002_check_wake, NULL);
device_init_wakeup(dev, true);
+ dev_set_drvdata(dev, int0002);
return 0;
}
@@ -240,6 +241,36 @@ static int int0002_remove(struct platform_device *pdev)
return 0;
}
+static int int0002_suspend(struct device *dev)
+{
+ struct int0002_data *int0002 = dev_get_drvdata(dev);
+
+ /*
+ * The INT0002 parent IRQ is often shared with the ACPI GPE IRQ, don't
+ * muck with it when firmware based suspend is used, otherwise we may
+ * cause spurious wakeups from firmware managed suspend.
+ */
+ if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
+ enable_irq_wake(int0002->parent_irq);
+
+ return 0;
+}
+
+static int int0002_resume(struct device *dev)
+{
+ struct int0002_data *int0002 = dev_get_drvdata(dev);
+
+ if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
+ disable_irq_wake(int0002->parent_irq);
+
+ return 0;
+}
+
+static const struct dev_pm_ops int0002_pm_ops = {
+ .suspend = int0002_suspend,
+ .resume = int0002_resume,
+};
+
static const struct acpi_device_id int0002_acpi_ids[] = {
{ "INT0002", 0 },
{ },
@@ -250,6 +281,7 @@ static struct platform_driver int0002_driver = {
.driver = {
.name = DRV_NAME,
.acpi_match_table = int0002_acpi_ids,
+ .pm = &int0002_pm_ops,
},
.probe = int0002_probe,
.remove = int0002_remove,
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index 05cced59e251..f58b8543f6ac 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -312,6 +312,7 @@ static const struct acpi_device_id punit_ipc_acpi_ids[] = {
{ "INT34D4", 0 },
{ }
};
+MODULE_DEVICE_TABLE(acpi, punit_ipc_acpi_ids);
static struct platform_driver intel_punit_ipc_driver = {
.probe = intel_punit_ipc_probe,
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 90fe4f8f3c2c..bde740d6120e 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -115,6 +115,32 @@ static const struct ts_dmi_data chuwi_hi10_plus_data = {
.properties = chuwi_hi10_plus_props,
};
+static const struct property_entry chuwi_hi10_pro_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1912),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1272),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-pro.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_hi10_pro_data = {
+ .embedded_fw = {
+ .name = "silead/gsl1680-chuwi-hi10-pro.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 42504,
+ .sha256 = { 0xdb, 0x92, 0x68, 0xa8, 0xdb, 0x81, 0x31, 0x00,
+ 0x1f, 0x58, 0x89, 0xdb, 0x19, 0x1b, 0x15, 0x8c,
+ 0x05, 0x14, 0xf4, 0x95, 0xba, 0x15, 0x45, 0x98,
+ 0x42, 0xa3, 0xbb, 0x65, 0xe3, 0x30, 0xa5, 0x93 },
+ },
+ .acpi_name = "MSSL1680:00",
+ .properties = chuwi_hi10_pro_props,
+};
+
static const struct property_entry chuwi_vi8_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
PROPERTY_ENTRY_U32("touchscreen-min-y", 6),
@@ -916,6 +942,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Chuwi Hi10 Prus (CWI597) */
+ .driver_data = (void *)&chuwi_hi10_pro_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ },
+ },
+ {
/* Chuwi Vi8 (CWI506) */
.driver_data = (void *)&chuwi_vi8_data,
.matches = {
@@ -1097,6 +1132,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Mediacom WinPad 7.0 W700 (same hw as Wintron surftab 7") */
+ .driver_data = (void *)&trekstor_surftab_wintron70_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "WinPad 7 W10 - WPW700"),
+ },
+ },
+ {
/* Mediacom Flexbook Edge 11 (same hw as TS Primebook C11) */
.driver_data = (void *)&trekstor_primebook_c11_data,
.matches = {
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
index 50ec53d67a4c..db4c265287ae 100644
--- a/drivers/rapidio/rio_cm.c
+++ b/drivers/rapidio/rio_cm.c
@@ -2127,6 +2127,14 @@ static int riocm_add_mport(struct device *dev,
return -ENODEV;
}
+ cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
+ if (!cm->rx_wq) {
+ rio_release_inb_mbox(mport, cmbox);
+ rio_release_outb_mbox(mport, cmbox);
+ kfree(cm);
+ return -ENOMEM;
+ }
+
/*
* Allocate and register inbound messaging buffers to be ready
* to receive channel and system management requests
@@ -2137,15 +2145,6 @@ static int riocm_add_mport(struct device *dev,
cm->rx_slots = RIOCM_RX_RING_SIZE;
mutex_init(&cm->rx_lock);
riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
- cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
- if (!cm->rx_wq) {
- riocm_error("failed to allocate IBMBOX_%d on %s",
- cmbox, mport->name);
- rio_release_outb_mbox(mport, cmbox);
- kfree(cm);
- return -ENOMEM;
- }
-
INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
cm->tx_slot = 0;
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 3ee46a843cb5..adddcd589941 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2926,11 +2926,11 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
ccb->opcode = BLOGIC_INITIATOR_CCB_SG;
ccb->datalen = count * sizeof(struct blogic_sg_seg);
if (blogic_multimaster_type(adapter))
- ccb->data = (void *)((unsigned int) ccb->dma_handle +
+ ccb->data = (unsigned int) ccb->dma_handle +
((unsigned long) &ccb->sglist -
- (unsigned long) ccb));
+ (unsigned long) ccb);
else
- ccb->data = ccb->sglist;
+ ccb->data = virt_to_32bit_virt(ccb->sglist);
scsi_for_each_sg(command, sg, count, i) {
ccb->sglist[i].segbytes = sg_dma_len(sg);
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index a8e4a19788a7..7d1ec10f2430 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -806,7 +806,7 @@ struct blogic_ccb {
unsigned char cdblen; /* Byte 2 */
unsigned char sense_datalen; /* Byte 3 */
u32 datalen; /* Bytes 4-7 */
- void *data; /* Bytes 8-11 */
+ u32 data; /* Bytes 8-11 */
unsigned char:8; /* Byte 12 */
unsigned char:8; /* Byte 13 */
enum blogic_adapter_status adapter_status; /* Byte 14 */
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index ecd06d2d7e81..71aa6af08340 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3765,11 +3765,13 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
case HW_EVENT_PHY_START_STATUS:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS status = %x\n",
status);
- if (status == 0) {
+ if (status == 0)
phy->phy_state = 1;
- if (pm8001_ha->flags == PM8001F_RUN_TIME &&
- phy->enable_completion != NULL)
- complete(phy->enable_completion);
+
+ if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+ phy->enable_completion != NULL) {
+ complete(phy->enable_completion);
+ phy->enable_completion = NULL;
}
break;
case HW_EVENT_SAS_PHY_UP:
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 390c33df0357..af09bd282cb9 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -1151,8 +1151,8 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
goto err_out_shost;
}
list_add_tail(&pm8001_ha->list, &hba_list);
- scsi_scan_host(pm8001_ha->shost);
pm8001_ha->flags = PM8001F_RUN_TIME;
+ scsi_scan_host(pm8001_ha->shost);
return 0;
err_out_shost:
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index d28af413b93a..335cf37e6cb9 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -264,12 +264,17 @@ void pm8001_scan_start(struct Scsi_Host *shost)
int i;
struct pm8001_hba_info *pm8001_ha;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ DECLARE_COMPLETION_ONSTACK(completion);
pm8001_ha = sha->lldd_ha;
/* SAS_RE_INITIALIZATION not available in SPCv/ve */
if (pm8001_ha->chip_id == chip_8001)
PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
- for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
+ for (i = 0; i < pm8001_ha->chip->n_phy; ++i) {
+ pm8001_ha->phy[i].enable_completion = &completion;
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
+ wait_for_completion(&completion);
+ msleep(300);
+ }
}
int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 4e980830f9f5..700530e969ac 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -3487,13 +3487,13 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, INIT,
"phy start resp status:0x%x, phyid:0x%x\n",
status, phy_id);
- if (status == 0) {
+ if (status == 0)
phy->phy_state = PHY_LINK_DOWN;
- if (pm8001_ha->flags == PM8001F_RUN_TIME &&
- phy->enable_completion != NULL) {
- complete(phy->enable_completion);
- phy->enable_completion = NULL;
- }
+
+ if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+ phy->enable_completion != NULL) {
+ complete(phy->enable_completion);
+ phy->enable_completion = NULL;
}
return 0;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 69f7784233f9..756231151882 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -536,7 +536,9 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
- fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
+ if (lport->host && lport->host->shost_data)
+ fc_host_supported_speeds(lport->host) =
+ lport->link_supported_speeds;
}
static void qedf_bw_update(void *dev)
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 0677295957bc..615e44af1ca6 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1063,7 +1063,8 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
return ret;
}
- if (qla82xx_flash_set_write_enable(ha))
+ ret = qla82xx_flash_set_write_enable(ha);
+ if (ret < 0)
goto done_write;
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 0aa58131e791..d0626773eb38 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -467,21 +467,24 @@ static int ufs_hisi_init_common(struct ufs_hba *hba)
host->hba = hba;
ufshcd_set_variant(hba, host);
- host->rst = devm_reset_control_get(dev, "rst");
+ host->rst = devm_reset_control_get(dev, "rst");
if (IS_ERR(host->rst)) {
dev_err(dev, "%s: failed to get reset control\n", __func__);
- return PTR_ERR(host->rst);
+ err = PTR_ERR(host->rst);
+ goto error;
}
ufs_hisi_set_pm_lvl(hba);
err = ufs_hisi_get_resource(host);
- if (err) {
- ufshcd_set_variant(hba, NULL);
- return err;
- }
+ if (err)
+ goto error;
return 0;
+
+error:
+ ufshcd_set_variant(hba, NULL);
+ return err;
}
static int ufs_hi3660_init(struct ufs_hba *hba)
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index a981f261b304..aee3cfc7142a 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -922,6 +922,7 @@ static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int err;
+ struct arm_smccc_res res;
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_lpm(hba);
@@ -941,6 +942,9 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto fail;
}
+ if (ufshcd_is_link_off(hba))
+ ufs_mtk_device_reset_ctrl(0, res);
+
return 0;
fail:
/*
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 3eb54937f1d8..72fd41bfbd54 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -2842,7 +2842,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
* ufshcd_exec_dev_cmd - API for sending device management requests
* @hba: UFS hba
* @cmd_type: specifies the type (NOP, Query...)
- * @timeout: time in seconds
+ * @timeout: timeout in milliseconds
*
* NOTE: Since there is only one available tag for device management commands,
* it is expected you hold the hba->dev_cmd.lock mutex.
@@ -2872,6 +2872,9 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
}
tag = req->tag;
WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
+ /* Set the timeout such that the SCSI error handler is not activated. */
+ req->timeout = msecs_to_jiffies(2 * timeout);
+ blk_mq_start_request(req);
init_completion(&wait);
lrbp = &hba->lrb[tag];
diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h
index 337c8d82f74e..6d0f7062bb87 100644
--- a/drivers/tee/amdtee/amdtee_private.h
+++ b/drivers/tee/amdtee/amdtee_private.h
@@ -21,6 +21,7 @@
#define TEEC_SUCCESS 0x00000000
#define TEEC_ERROR_GENERIC 0xFFFF0000
#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
#define TEEC_ORIGIN_COMMS 0x00000002
@@ -93,6 +94,18 @@ struct amdtee_shm_data {
u32 buf_id;
};
+/**
+ * struct amdtee_ta_data - Keeps track of all TAs loaded in AMD Secure
+ * Processor
+ * @ta_handle: Handle to TA loaded in TEE
+ * @refcount: Reference count for the loaded TA
+ */
+struct amdtee_ta_data {
+ struct list_head list_node;
+ u32 ta_handle;
+ u32 refcount;
+};
+
#define LOWER_TWO_BYTE_MASK 0x0000FFFF
/**
diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
index 096dd4d92d39..07f36ac834c8 100644
--- a/drivers/tee/amdtee/call.c
+++ b/drivers/tee/amdtee/call.c
@@ -121,15 +121,69 @@ static int amd_params_to_tee_params(struct tee_param *tee, u32 count,
return ret;
}
+static DEFINE_MUTEX(ta_refcount_mutex);
+static struct list_head ta_list = LIST_HEAD_INIT(ta_list);
+
+static u32 get_ta_refcount(u32 ta_handle)
+{
+ struct amdtee_ta_data *ta_data;
+ u32 count = 0;
+
+ /* Caller must hold a mutex */
+ list_for_each_entry(ta_data, &ta_list, list_node)
+ if (ta_data->ta_handle == ta_handle)
+ return ++ta_data->refcount;
+
+ ta_data = kzalloc(sizeof(*ta_data), GFP_KERNEL);
+ if (ta_data) {
+ ta_data->ta_handle = ta_handle;
+ ta_data->refcount = 1;
+ count = ta_data->refcount;
+ list_add(&ta_data->list_node, &ta_list);
+ }
+
+ return count;
+}
+
+static u32 put_ta_refcount(u32 ta_handle)
+{
+ struct amdtee_ta_data *ta_data;
+ u32 count = 0;
+
+ /* Caller must hold a mutex */
+ list_for_each_entry(ta_data, &ta_list, list_node)
+ if (ta_data->ta_handle == ta_handle) {
+ count = --ta_data->refcount;
+ if (count == 0) {
+ list_del(&ta_data->list_node);
+ kfree(ta_data);
+ break;
+ }
+ }
+
+ return count;
+}
+
int handle_unload_ta(u32 ta_handle)
{
struct tee_cmd_unload_ta cmd = {0};
- u32 status;
+ u32 status, count;
int ret;
if (!ta_handle)
return -EINVAL;
+ mutex_lock(&ta_refcount_mutex);
+
+ count = put_ta_refcount(ta_handle);
+
+ if (count) {
+ pr_debug("unload ta: not unloading %u count %u\n",
+ ta_handle, count);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
cmd.ta_handle = ta_handle;
ret = psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, (void *)&cmd,
@@ -137,8 +191,12 @@ int handle_unload_ta(u32 ta_handle)
if (!ret && status != 0) {
pr_err("unload ta: status = 0x%x\n", status);
ret = -EBUSY;
+ } else {
+ pr_debug("unloaded ta handle %u\n", ta_handle);
}
+unlock:
+ mutex_unlock(&ta_refcount_mutex);
return ret;
}
@@ -340,7 +398,8 @@ int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
{
- struct tee_cmd_load_ta cmd = {0};
+ struct tee_cmd_unload_ta unload_cmd = {};
+ struct tee_cmd_load_ta load_cmd = {};
phys_addr_t blob;
int ret;
@@ -353,21 +412,36 @@ int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
return -EINVAL;
}
- cmd.hi_addr = upper_32_bits(blob);
- cmd.low_addr = lower_32_bits(blob);
- cmd.size = size;
+ load_cmd.hi_addr = upper_32_bits(blob);
+ load_cmd.low_addr = lower_32_bits(blob);
+ load_cmd.size = size;
- ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&cmd,
- sizeof(cmd), &arg->ret);
+ mutex_lock(&ta_refcount_mutex);
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&load_cmd,
+ sizeof(load_cmd), &arg->ret);
if (ret) {
arg->ret_origin = TEEC_ORIGIN_COMMS;
arg->ret = TEEC_ERROR_COMMUNICATION;
- } else {
- set_session_id(cmd.ta_handle, 0, &arg->session);
+ } else if (arg->ret == TEEC_SUCCESS) {
+ ret = get_ta_refcount(load_cmd.ta_handle);
+ if (!ret) {
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+
+ /* Unload the TA on error */
+ unload_cmd.ta_handle = load_cmd.ta_handle;
+ psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
+ (void *)&unload_cmd,
+ sizeof(unload_cmd), &ret);
+ } else {
+ set_session_id(load_cmd.ta_handle, 0, &arg->session);
+ }
}
+ mutex_unlock(&ta_refcount_mutex);
pr_debug("load TA: TA handle = 0x%x, RO = 0x%x, ret = 0x%x\n",
- cmd.ta_handle, arg->ret_origin, arg->ret);
+ load_cmd.ta_handle, arg->ret_origin, arg->ret);
return 0;
}
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
index 8a6a8f30bb42..da6b88e80dc0 100644
--- a/drivers/tee/amdtee/core.c
+++ b/drivers/tee/amdtee/core.c
@@ -59,10 +59,9 @@ static void release_session(struct amdtee_session *sess)
continue;
handle_close_session(sess->ta_handle, sess->session_info[i]);
+ handle_unload_ta(sess->ta_handle);
}
- /* Unload Trusted Application once all sessions are closed */
- handle_unload_ta(sess->ta_handle);
kfree(sess);
}
@@ -224,8 +223,6 @@ static void destroy_session(struct kref *ref)
struct amdtee_session *sess = container_of(ref, struct amdtee_session,
refcount);
- /* Unload the TA from TEE */
- handle_unload_ta(sess->ta_handle);
mutex_lock(&session_list_mutex);
list_del(&sess->list_node);
mutex_unlock(&session_list_mutex);
@@ -238,7 +235,7 @@ int amdtee_open_session(struct tee_context *ctx,
{
struct amdtee_context_data *ctxdata = ctx->data;
struct amdtee_session *sess = NULL;
- u32 session_info;
+ u32 session_info, ta_handle;
size_t ta_size;
int rc, i;
void *ta;
@@ -259,11 +256,14 @@ int amdtee_open_session(struct tee_context *ctx,
if (arg->ret != TEEC_SUCCESS)
goto out;
+ ta_handle = get_ta_handle(arg->session);
+
mutex_lock(&session_list_mutex);
sess = alloc_session(ctxdata, arg->session);
mutex_unlock(&session_list_mutex);
if (!sess) {
+ handle_unload_ta(ta_handle);
rc = -ENOMEM;
goto out;
}
@@ -277,6 +277,7 @@ int amdtee_open_session(struct tee_context *ctx,
if (i >= TEE_NUM_SESSIONS) {
pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+ handle_unload_ta(ta_handle);
kref_put(&sess->refcount, destroy_session);
rc = -ENOMEM;
goto out;
@@ -289,12 +290,13 @@ int amdtee_open_session(struct tee_context *ctx,
spin_lock(&sess->lock);
clear_bit(i, sess->sess_mask);
spin_unlock(&sess->lock);
+ handle_unload_ta(ta_handle);
kref_put(&sess->refcount, destroy_session);
goto out;
}
sess->session_info[i] = session_info;
- set_session_id(sess->ta_handle, i, &arg->session);
+ set_session_id(ta_handle, i, &arg->session);
out:
free_pages((u64)ta, get_order(ta_size));
return rc;
@@ -329,6 +331,7 @@ int amdtee_close_session(struct tee_context *ctx, u32 session)
/* Close the session */
handle_close_session(ta_handle, session_info);
+ handle_unload_ta(ta_handle);
kref_put(&sess->refcount, destroy_session);
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 8534d6e45a1d..3cbc757d7be7 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1519,6 +1519,8 @@ static int __init max310x_uart_init(void)
#ifdef CONFIG_SPI_MASTER
ret = spi_register_driver(&max310x_spi_driver);
+ if (ret)
+ uart_unregister_driver(&max310x_uart);
#endif
return ret;
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index e0c00a1b0763..51b0ecabf2ec 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -818,9 +818,6 @@ static int mvebu_uart_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (!match)
- return -ENODEV;
-
/* Assume that all UART ports have a DT alias or none has */
id = of_alias_get_id(pdev->dev.of_node, "serial");
if (!pdev->dev.of_node || id < 0)
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 0330ba99730e..652fe2547587 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -291,13 +291,15 @@ hv_uio_probe(struct hv_device *dev,
pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
if (pdata->recv_buf == NULL) {
ret = -ENOMEM;
- goto fail_close;
+ goto fail_free_ring;
}
ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
RECV_BUFFER_SIZE, &pdata->recv_gpadl);
- if (ret)
+ if (ret) {
+ vfree(pdata->recv_buf);
goto fail_close;
+ }
/* put Global Physical Address Label in name */
snprintf(pdata->recv_name, sizeof(pdata->recv_name),
@@ -316,8 +318,10 @@ hv_uio_probe(struct hv_device *dev,
ret = vmbus_establish_gpadl(channel, pdata->send_buf,
SEND_BUFFER_SIZE, &pdata->send_gpadl);
- if (ret)
+ if (ret) {
+ vfree(pdata->send_buf);
goto fail_close;
+ }
snprintf(pdata->send_name, sizeof(pdata->send_name),
"send:%u", pdata->send_gpadl);
@@ -347,6 +351,8 @@ hv_uio_probe(struct hv_device *dev,
fail_close:
hv_uio_cleanup(dev, pdata);
+fail_free_ring:
+ vmbus_free_ring(dev->channel);
return ret;
}
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index c7d681fef198..3bb0b0075467 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -82,7 +82,7 @@ static int probe(struct pci_dev *pdev,
}
if (pdev->irq && !pci_intx_mask_supported(pdev))
- return -ENOMEM;
+ return -ENODEV;
gdev = devm_kzalloc(&pdev->dev, sizeof(struct uio_pci_generic_dev), GFP_KERNEL);
if (!gdev)
diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
index b06fa6e42e6e..4ad0331a8c57 100644
--- a/drivers/video/fbdev/aty/mach64_cursor.c
+++ b/drivers/video/fbdev/aty/mach64_cursor.c
@@ -46,7 +46,7 @@
* The Screen position of the top left corner of the displayed
* cursor is specificed by CURS_HORZ_VERT_POSN. Care must be taken
* when the cursor hot spot is not the top left corner and the
- * physical cursor position becomes negative. It will be be displayed
+ * physical cursor position becomes negative. It will be displayed
* if either the horizontal or vertical cursor position is negative
*
* If x becomes negative the cursor manager must adjust the CURS_HORZ_OFFSET
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index 8bbac7182ad3..cc8e62ae93f6 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -286,7 +286,7 @@ static int hga_card_detect(void)
hga_vram = ioremap(0xb0000, hga_vram_len);
if (!hga_vram)
- goto error;
+ return -ENOMEM;
if (request_region(0x3b0, 12, "hgafb"))
release_io_ports = 1;
@@ -346,13 +346,18 @@ static int hga_card_detect(void)
hga_type_name = "Hercules";
break;
}
- return 1;
+ return 0;
error:
if (release_io_ports)
release_region(0x3b0, 12);
if (release_io_port)
release_region(0x3bf, 1);
- return 0;
+
+ iounmap(hga_vram);
+
+ pr_err("hgafb: HGA card not detected.\n");
+
+ return -EINVAL;
}
/**
@@ -550,13 +555,11 @@ static const struct fb_ops hgafb_ops = {
static int hgafb_probe(struct platform_device *pdev)
{
struct fb_info *info;
+ int ret;
- if (! hga_card_detect()) {
- printk(KERN_INFO "hgafb: HGA card not detected.\n");
- if (hga_vram)
- iounmap(hga_vram);
- return -EINVAL;
- }
+ ret = hga_card_detect();
+ if (!ret)
+ return ret;
printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n",
hga_type_name, hga_vram_len/1024);
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 3ac053b88495..16f272a50811 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -1469,6 +1469,7 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct imstt_par *par;
struct fb_info *info;
struct device_node *dp;
+ int ret = -ENOMEM;
dp = pci_device_to_OF_node(pdev);
if(dp)
@@ -1504,28 +1505,37 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
default:
printk(KERN_INFO "imsttfb: Device 0x%x unknown, "
"contact maintainer.\n", pdev->device);
- release_mem_region(addr, size);
- framebuffer_release(info);
- return -ENODEV;
+ ret = -ENODEV;
+ goto error;
}
info->fix.smem_start = addr;
info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
0x400000 : 0x800000);
- if (!info->screen_base) {
- release_mem_region(addr, size);
- framebuffer_release(info);
- return -ENOMEM;
- }
+ if (!info->screen_base)
+ goto error;
info->fix.mmio_start = addr + 0x800000;
par->dc_regs = ioremap(addr + 0x800000, 0x1000);
+ if (!par->dc_regs)
+ goto error;
par->cmap_regs_phys = addr + 0x840000;
par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000);
+ if (!par->cmap_regs)
+ goto error;
info->pseudo_palette = par->palette;
init_imstt(info);
pci_set_drvdata(pdev, info);
return 0;
+
+error:
+ if (par->dc_regs)
+ iounmap(par->dc_regs);
+ if (info->screen_base)
+ iounmap(info->screen_base);
+ release_mem_region(addr, size);
+ framebuffer_release(info);
+ return ret;
}
static void imsttfb_remove(struct pci_dev *pdev)
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 4325bf7f388c..5c82611e93d9 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -2486,8 +2486,6 @@ static int __init matroxfb_init(void)
return err;
}
-module_init(matroxfb_init);
-
#else
/* *************************** init module code **************************** */
@@ -2572,7 +2570,7 @@ module_param_named(cmode, default_cmode, int, 0);
MODULE_PARM_DESC(cmode, "Specify the video depth that should be used (8bit default)");
#endif
-int __init init_module(void){
+static int __init matroxfb_init(void){
DBG(__func__)
@@ -2603,6 +2601,7 @@ int __init init_module(void){
}
#endif /* MODULE */
+module_init(matroxfb_init);
module_exit(matrox_done);
EXPORT_SYMBOL(matroxfb_register_driver);
EXPORT_SYMBOL(matroxfb_unregister_driver);
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index 52755b591c14..63721337a377 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -542,8 +542,8 @@ static int mb862xxfb_init_fbinfo(struct fb_info *fbi)
/*
* show some display controller and cursor registers
*/
-static ssize_t mb862xxfb_show_dispregs(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t dispregs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct mb862xxfb_par *par = fbi->par;
@@ -577,7 +577,7 @@ static ssize_t mb862xxfb_show_dispregs(struct device *dev,
return ptr - buf;
}
-static DEVICE_ATTR(dispregs, 0444, mb862xxfb_show_dispregs, NULL);
+static DEVICE_ATTR_RO(dispregs);
static irqreturn_t mb862xx_intr(int irq, void *dev_id)
{
diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
index 4162d0e7e00d..cc7450f2b2a9 100644
--- a/drivers/xen/xen-pciback/vpci.c
+++ b/drivers/xen/xen-pciback/vpci.c
@@ -70,7 +70,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev, int devid,
publish_pci_dev_cb publish_cb)
{
- int err = 0, slot, func = -1;
+ int err = 0, slot, func = PCI_FUNC(dev->devfn);
struct pci_dev_entry *t, *dev_entry;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
@@ -95,22 +95,25 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
/*
* Keep multi-function devices together on the virtual PCI bus, except
- * virtual functions.
+ * that we want to keep virtual functions at func 0 on their own. They
+ * aren't multi-function devices and hence their presence at func 0
+ * may cause guests to not scan the other functions.
*/
- if (!dev->is_virtfn) {
+ if (!dev->is_virtfn || func) {
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (list_empty(&vpci_dev->dev_list[slot]))
continue;
t = list_entry(list_first(&vpci_dev->dev_list[slot]),
struct pci_dev_entry, list);
+ if (t->dev->is_virtfn && !PCI_FUNC(t->dev->devfn))
+ continue;
if (match_slot(dev, t->dev)) {
dev_info(&dev->dev, "vpci: assign to virtual slot %d func %d\n",
- slot, PCI_FUNC(dev->devfn));
+ slot, func);
list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]);
- func = PCI_FUNC(dev->devfn);
goto unlock;
}
}
@@ -123,7 +126,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
slot);
list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]);
- func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
goto unlock;
}
}
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 5188f02e75fb..c09c7ebd6968 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -359,7 +359,8 @@ out:
return err;
}
-static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
+static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev,
+ enum xenbus_state state)
{
int err = 0;
int num_devs;
@@ -373,9 +374,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
mutex_lock(&pdev->dev_lock);
- /* Make sure we only reconfigure once */
- if (xenbus_read_driver_state(pdev->xdev->nodename) !=
- XenbusStateReconfiguring)
+ if (xenbus_read_driver_state(pdev->xdev->nodename) != state)
goto out;
err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
@@ -500,6 +499,10 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
}
}
+ if (state != XenbusStateReconfiguring)
+ /* Make sure we only reconfigure once. */
+ goto out;
+
err = xenbus_switch_state(pdev->xdev, XenbusStateReconfigured);
if (err) {
xenbus_dev_fatal(pdev->xdev, err,
@@ -525,7 +528,7 @@ static void xen_pcibk_frontend_changed(struct xenbus_device *xdev,
break;
case XenbusStateReconfiguring:
- xen_pcibk_reconfigure(pdev);
+ xen_pcibk_reconfigure(pdev, XenbusStateReconfiguring);
break;
case XenbusStateConnected:
@@ -664,6 +667,15 @@ static void xen_pcibk_be_watch(struct xenbus_watch *watch,
xen_pcibk_setup_backend(pdev);
break;
+ case XenbusStateInitialised:
+ /*
+ * We typically move to Initialised when the first device was
+ * added. Hence subsequent devices getting added may need
+ * reconfiguring.
+ */
+ xen_pcibk_reconfigure(pdev, XenbusStateInitialised);
+ break;
+
default:
break;
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index b8abccd03e5d..6cc4d4cfe0c2 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1244,6 +1244,9 @@ int bdev_disk_changed(struct block_device *bdev, bool invalidate)
lockdep_assert_held(&bdev->bd_mutex);
+ if (!(disk->flags & GENHD_FL_UP))
+ return -ENXIO;
+
rescan:
if (bdev->bd_part_count)
return -EBUSY;
@@ -1298,6 +1301,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode)
struct gendisk *disk = bdev->bd_disk;
int ret = 0;
+ if (!(disk->flags & GENHD_FL_UP))
+ return -ENXIO;
+
if (!bdev->bd_openers) {
if (!bdev_is_partition(bdev)) {
ret = 0;
@@ -1332,8 +1338,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode)
whole->bd_part_count++;
mutex_unlock(&whole->bd_mutex);
- if (!(disk->flags & GENHD_FL_UP) ||
- !bdev_nr_sectors(bdev)) {
+ if (!bdev_nr_sectors(bdev)) {
__blkdev_put(whole, mode, 1);
bdput(whole);
return -ENXIO;
@@ -1364,16 +1369,12 @@ struct block_device *blkdev_get_no_open(dev_t dev)
struct block_device *bdev;
struct gendisk *disk;
- down_read(&bdev_lookup_sem);
bdev = bdget(dev);
if (!bdev) {
- up_read(&bdev_lookup_sem);
blk_request_module(dev);
- down_read(&bdev_lookup_sem);
-
bdev = bdget(dev);
if (!bdev)
- goto unlock;
+ return NULL;
}
disk = bdev->bd_disk;
@@ -1383,14 +1384,11 @@ struct block_device *blkdev_get_no_open(dev_t dev)
goto put_disk;
if (!try_module_get(bdev->bd_disk->fops->owner))
goto put_disk;
- up_read(&bdev_lookup_sem);
return bdev;
put_disk:
put_disk(disk);
bdput:
bdput(bdev);
-unlock:
- up_read(&bdev_lookup_sem);
return NULL;
}
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 2bea01d23a5b..d17ac301032e 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -28,6 +28,7 @@
#include "compression.h"
#include "extent_io.h"
#include "extent_map.h"
+#include "zoned.h"
static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
@@ -349,6 +350,7 @@ static void end_compressed_bio_write(struct bio *bio)
*/
inode = cb->inode;
cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
+ btrfs_record_physical_zoned(inode, cb->start, bio);
btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0],
cb->start, cb->start + cb->len - 1,
bio->bi_status == BLK_STS_OK);
@@ -401,6 +403,8 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
u64 first_byte = disk_start;
blk_status_t ret;
int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
+ const bool use_append = btrfs_use_zone_append(inode, disk_start);
+ const unsigned int bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
WARN_ON(!PAGE_ALIGNED(start));
cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
@@ -418,10 +422,31 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
cb->nr_pages = nr_pages;
bio = btrfs_bio_alloc(first_byte);
- bio->bi_opf = REQ_OP_WRITE | write_flags;
+ bio->bi_opf = bio_op | write_flags;
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
+ if (use_append) {
+ struct extent_map *em;
+ struct map_lookup *map;
+ struct block_device *bdev;
+
+ em = btrfs_get_chunk_map(fs_info, disk_start, PAGE_SIZE);
+ if (IS_ERR(em)) {
+ kfree(cb);
+ bio_put(bio);
+ return BLK_STS_NOTSUPP;
+ }
+
+ map = em->map_lookup;
+ /* We only support single profile for now */
+ ASSERT(map->num_stripes == 1);
+ bdev = map->stripes[0].dev->bdev;
+
+ bio_set_dev(bio, bdev);
+ free_extent_map(em);
+ }
+
if (blkcg_css) {
bio->bi_opf |= REQ_CGROUP_PUNT;
kthread_associate_blkcg(blkcg_css);
@@ -432,6 +457,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
bytes_left = compressed_len;
for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
int submit = 0;
+ int len;
page = compressed_pages[pg_index];
page->mapping = inode->vfs_inode.i_mapping;
@@ -439,9 +465,13 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
0);
+ if (pg_index == 0 && use_append)
+ len = bio_add_zone_append_page(bio, page, PAGE_SIZE, 0);
+ else
+ len = bio_add_page(bio, page, PAGE_SIZE, 0);
+
page->mapping = NULL;
- if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
- PAGE_SIZE) {
+ if (submit || len < PAGE_SIZE) {
/*
* inc the count before we submit the bio so
* we know the end IO handler won't happen before
@@ -465,11 +495,15 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
}
bio = btrfs_bio_alloc(first_byte);
- bio->bi_opf = REQ_OP_WRITE | write_flags;
+ bio->bi_opf = bio_op | write_flags;
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
if (blkcg_css)
bio->bi_opf |= REQ_CGROUP_PUNT;
+ /*
+ * Use bio_add_page() to ensure the bio has at least one
+ * page.
+ */
bio_add_page(bio, page, PAGE_SIZE, 0);
}
if (bytes_left < PAGE_SIZE) {
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 074a78a202b8..dee2dafbc872 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3753,7 +3753,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
/* Note that em_end from extent_map_end() is exclusive */
iosize = min(em_end, end + 1) - cur;
- if (btrfs_use_zone_append(inode, em))
+ if (btrfs_use_zone_append(inode, em->block_start))
opf = REQ_OP_ZONE_APPEND;
free_extent_map(em);
@@ -5196,7 +5196,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
int ret = 0;
- u64 off = start;
+ u64 off;
u64 max = start + len;
u32 flags = 0;
u32 found_type;
@@ -5231,6 +5231,11 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
goto out_free_ulist;
}
+ /*
+ * We can't initialize that to 'start' as this could miss extents due
+ * to extent item merging
+ */
+ off = 0;
start = round_down(start, btrfs_inode_sectorsize(inode));
len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index eb6fddf40841..33f14573f2ec 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3241,6 +3241,7 @@ void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
inode = list_first_entry(&fs_info->delayed_iputs,
struct btrfs_inode, delayed_iput);
run_delayed_iput_locked(fs_info, inode);
+ cond_resched_lock(&fs_info->delayed_iput_lock);
}
spin_unlock(&fs_info->delayed_iput_lock);
}
@@ -7785,7 +7786,7 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
iomap->bdev = fs_info->fs_devices->latest_bdev;
iomap->length = len;
- if (write && btrfs_use_zone_append(BTRFS_I(inode), em))
+ if (write && btrfs_use_zone_append(BTRFS_I(inode), em->block_start))
iomap->flags |= IOMAP_F_ZONE_APPEND;
free_extent_map(em);
diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
index 3928ecc40d7b..d434dc78dadf 100644
--- a/fs/btrfs/reflink.c
+++ b/fs/btrfs/reflink.c
@@ -282,6 +282,11 @@ copy_inline_extent:
out:
if (!ret && !trans) {
/*
+ * Release path before starting a new transaction so we don't
+ * hold locks that would confuse lockdep.
+ */
+ btrfs_release_path(path);
+ /*
* No transaction here means we copied the inline extent into a
* page of the destination inode.
*
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 95a600034d61..326be57f2828 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1858,8 +1858,6 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
} else if (ret == -EEXIST) {
ret = 0;
- } else {
- BUG(); /* Logic Error */
}
iput(inode);
@@ -6463,6 +6461,24 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
(!old_dir || old_dir->logged_trans < trans->transid))
return;
+ /*
+ * If we are doing a rename (old_dir is not NULL) from a directory that
+ * was previously logged, make sure the next log attempt on the directory
+ * is not skipped and logs the inode again. This is because the log may
+ * not currently be authoritative for a range including the old
+ * BTRFS_DIR_ITEM_KEY and BTRFS_DIR_INDEX_KEY keys, so we want to make
+ * sure after a log replay we do not end up with both the new and old
+ * dentries around (in case the inode is a directory we would have a
+ * directory with two hard links and 2 inode references for different
+ * parents). The next log attempt of old_dir will happen at
+ * btrfs_log_all_parents(), called through btrfs_log_inode_parent()
+ * below, because we have previously set inode->last_unlink_trans to the
+ * current transaction ID, either here or at btrfs_record_unlink_dir() in
+ * case inode is a directory.
+ */
+ if (old_dir)
+ old_dir->logged_trans = 0;
+
btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
ctx.logging_new_name = true;
/*
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 9a1ead0c4a31..47d27059d064 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1459,7 +1459,7 @@ static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
/* Given hole range was invalid (outside of device) */
if (ret == -ERANGE) {
*hole_start += *hole_size;
- *hole_size = false;
+ *hole_size = 0;
return true;
}
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 304ce64c70a4..1bb8ee97aae0 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1278,7 +1278,7 @@ void btrfs_free_redirty_list(struct btrfs_transaction *trans)
spin_unlock(&trans->releasing_ebs_lock);
}
-bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em)
+bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_block_group *cache;
@@ -1293,7 +1293,7 @@ bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em)
if (!is_data_inode(&inode->vfs_inode))
return false;
- cache = btrfs_lookup_block_group(fs_info, em->block_start);
+ cache = btrfs_lookup_block_group(fs_info, start);
ASSERT(cache);
if (!cache)
return false;
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index 5e41a74a9cb2..e55d32595c2c 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -53,7 +53,7 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache);
void btrfs_redirty_list_add(struct btrfs_transaction *trans,
struct extent_buffer *eb);
void btrfs_free_redirty_list(struct btrfs_transaction *trans);
-bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em);
+bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start);
void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
struct bio *bio);
void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered);
@@ -152,8 +152,7 @@ static inline void btrfs_redirty_list_add(struct btrfs_transaction *trans,
struct extent_buffer *eb) { }
static inline void btrfs_free_redirty_list(struct btrfs_transaction *trans) { }
-static inline bool btrfs_use_zone_append(struct btrfs_inode *inode,
- struct extent_map *em)
+static inline bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start)
{
return false;
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index d7ea9c5fe0f8..2ffcb29d5c8f 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -133,7 +133,7 @@ struct workqueue_struct *cifsiod_wq;
struct workqueue_struct *decrypt_wq;
struct workqueue_struct *fileinfo_put_wq;
struct workqueue_struct *cifsoplockd_wq;
-struct workqueue_struct *deferredclose_wq;
+struct workqueue_struct *deferredclose_wq;
__u32 cifs_lock_secret;
/*
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d88b4b523dcc..8488d7024462 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1257,8 +1257,7 @@ struct cifsFileInfo {
struct work_struct oplock_break; /* work for oplock breaks */
struct work_struct put; /* work for the final part of _put */
struct delayed_work deferred;
- bool oplock_break_received; /* Flag to indicate oplock break */
- bool deferred_scheduled;
+ bool deferred_close_scheduled; /* Flag to indicate close is scheduled */
};
struct cifs_io_parms {
@@ -1418,6 +1417,7 @@ struct cifsInodeInfo {
struct inode vfs_inode;
struct list_head deferred_closes; /* list of deferred closes */
spinlock_t deferred_lock; /* protection on deferred list */
+ bool lease_granted; /* Flag to indicate whether lease or oplock is granted. */
};
static inline struct cifsInodeInfo *
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 6caad100c3f3..379a427f3c2f 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -323,8 +323,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
cfile->dentry = dget(dentry);
cfile->f_flags = file->f_flags;
cfile->invalidHandle = false;
- cfile->oplock_break_received = false;
- cfile->deferred_scheduled = false;
+ cfile->deferred_close_scheduled = false;
cfile->tlink = cifs_get_tlink(tlink);
INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
INIT_WORK(&cfile->put, cifsFileInfo_put_work);
@@ -574,21 +573,18 @@ int cifs_open(struct inode *inode, struct file *file)
file->f_op = &cifs_file_direct_ops;
}
- spin_lock(&CIFS_I(inode)->deferred_lock);
/* Get the cached handle as SMB2 close is deferred */
rc = cifs_get_readable_path(tcon, full_path, &cfile);
if (rc == 0) {
if (file->f_flags == cfile->f_flags) {
file->private_data = cfile;
+ spin_lock(&CIFS_I(inode)->deferred_lock);
cifs_del_deferred_close(cfile);
spin_unlock(&CIFS_I(inode)->deferred_lock);
goto out;
} else {
- spin_unlock(&CIFS_I(inode)->deferred_lock);
_cifsFileInfo_put(cfile, true, false);
}
- } else {
- spin_unlock(&CIFS_I(inode)->deferred_lock);
}
if (server->oplocks)
@@ -878,12 +874,8 @@ void smb2_deferred_work_close(struct work_struct *work)
struct cifsFileInfo, deferred.work);
spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
- if (!cfile->deferred_scheduled) {
- spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
- return;
- }
cifs_del_deferred_close(cfile);
- cfile->deferred_scheduled = false;
+ cfile->deferred_close_scheduled = false;
spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
_cifsFileInfo_put(cfile, true, false);
}
@@ -900,19 +892,26 @@ int cifs_close(struct inode *inode, struct file *file)
file->private_data = NULL;
dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
+ cinode->lease_granted &&
dclose) {
if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags))
inode->i_ctime = inode->i_mtime = current_time(inode);
spin_lock(&cinode->deferred_lock);
cifs_add_deferred_close(cfile, dclose);
- if (cfile->deferred_scheduled) {
- mod_delayed_work(deferredclose_wq,
- &cfile->deferred, cifs_sb->ctx->acregmax);
+ if (cfile->deferred_close_scheduled &&
+ delayed_work_pending(&cfile->deferred)) {
+ /*
+ * If there is no pending work, mod_delayed_work queues new work.
+ * So, Increase the ref count to avoid use-after-free.
+ */
+ if (!mod_delayed_work(deferredclose_wq,
+ &cfile->deferred, cifs_sb->ctx->acregmax))
+ cifsFileInfo_get(cfile);
} else {
/* Deferred close for files */
queue_delayed_work(deferredclose_wq,
&cfile->deferred, cifs_sb->ctx->acregmax);
- cfile->deferred_scheduled = true;
+ cfile->deferred_close_scheduled = true;
spin_unlock(&cinode->deferred_lock);
return 0;
}
@@ -2020,8 +2019,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
continue;
if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
- if ((!open_file->invalidHandle) &&
- (!open_file->oplock_break_received)) {
+ if ((!open_file->invalidHandle)) {
/* found a good file */
/* lock it so it will not be closed on us */
cifsFileInfo_get(open_file);
@@ -4874,14 +4872,20 @@ oplock_break_ack:
}
/*
* When oplock break is received and there are no active
- * file handles but cached, then set the flag oplock_break_received.
+ * file handles but cached, then schedule deferred close immediately.
* So, new open will not use cached handle.
*/
spin_lock(&CIFS_I(inode)->deferred_lock);
is_deferred = cifs_is_deferred_close(cfile, &dclose);
- if (is_deferred && cfile->deferred_scheduled) {
- cfile->oplock_break_received = true;
- mod_delayed_work(deferredclose_wq, &cfile->deferred, 0);
+ if (is_deferred &&
+ cfile->deferred_close_scheduled &&
+ delayed_work_pending(&cfile->deferred)) {
+ /*
+ * If there is no pending work, mod_delayed_work queues new work.
+ * So, Increase the ref count to avoid use-after-free.
+ */
+ if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
+ cifsFileInfo_get(cfile);
}
spin_unlock(&CIFS_I(inode)->deferred_lock);
_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
index 5d21cd905315..92d4ab029c91 100644
--- a/fs/cifs/fs_context.c
+++ b/fs/cifs/fs_context.c
@@ -1145,7 +1145,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
/* if iocharset not set then load_nls_default
* is used by caller
*/
- cifs_dbg(FYI, "iocharset set to %s\n", ctx->iocharset);
+ cifs_dbg(FYI, "iocharset set to %s\n", ctx->iocharset);
break;
case Opt_netbiosname:
memset(ctx->source_rfc1001_name, 0x20,
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 524dbdfb7184..7207a63819cb 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -672,6 +672,11 @@ cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
}
+/*
+ * Critical section which runs after acquiring deferred_lock.
+ * As there is no reference count on cifs_deferred_close, pdclose
+ * should not be used outside deferred_lock.
+ */
bool
cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose)
{
@@ -688,6 +693,9 @@ cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **
return false;
}
+/*
+ * Critical section which runs after acquiring deferred_lock.
+ */
void
cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose)
{
@@ -707,6 +715,9 @@ cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *
list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes);
}
+/*
+ * Critical section which runs after acquiring deferred_lock.
+ */
void
cifs_del_deferred_close(struct cifsFileInfo *cfile)
{
@@ -738,15 +749,19 @@ void
cifs_close_all_deferred_files(struct cifs_tcon *tcon)
{
struct cifsFileInfo *cfile;
- struct cifsInodeInfo *cinode;
struct list_head *tmp;
spin_lock(&tcon->open_file_lock);
list_for_each(tmp, &tcon->openFileList) {
cfile = list_entry(tmp, struct cifsFileInfo, tlist);
- cinode = CIFS_I(d_inode(cfile->dentry));
- if (delayed_work_pending(&cfile->deferred))
- mod_delayed_work(deferredclose_wq, &cfile->deferred, 0);
+ if (delayed_work_pending(&cfile->deferred)) {
+ /*
+ * If there is no pending work, mod_delayed_work queues new work.
+ * So, Increase the ref count to avoid use-after-free.
+ */
+ if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
+ cifsFileInfo_get(cfile);
+ }
}
spin_unlock(&tcon->open_file_lock);
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index dd0eb665b680..21ef51d338e0 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1861,6 +1861,8 @@ smb2_copychunk_range(const unsigned int xid,
cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
/* Request server copy to target from src identified by key */
+ kfree(retbuf);
+ retbuf = NULL;
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
true /* is_fsctl */, (char *)pcchunk,
@@ -3981,6 +3983,7 @@ smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
unsigned int epoch, bool *purge_cache)
{
oplock &= 0xFF;
+ cinode->lease_granted = false;
if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
return;
if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
@@ -4007,6 +4010,7 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
unsigned int new_oplock = 0;
oplock &= 0xFF;
+ cinode->lease_granted = true;
if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
return;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index a8bf43184773..9f24eb88297a 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -3900,10 +3900,10 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
* Related requests use info from previous read request
* in chain.
*/
- shdr->SessionId = 0xFFFFFFFF;
+ shdr->SessionId = 0xFFFFFFFFFFFFFFFF;
shdr->TreeId = 0xFFFFFFFF;
- req->PersistentFileId = 0xFFFFFFFF;
- req->VolatileFileId = 0xFFFFFFFF;
+ req->PersistentFileId = 0xFFFFFFFFFFFFFFFF;
+ req->VolatileFileId = 0xFFFFFFFFFFFFFFFF;
}
}
if (remaining_bytes > io_parms->length)
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 345f8061e3b4..e3f5d7f3c8a0 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -296,10 +296,6 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
struct extent_crypt_result ecr;
int rc = 0;
- if (!crypt_stat || !crypt_stat->tfm
- || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
- return -EINVAL;
-
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
crypt_stat->key_size);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 9d9e0097c1d3..55efd3dd04f6 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -529,7 +529,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
* the subpool and global reserve usage count can need
* to be adjusted.
*/
- VM_BUG_ON(PagePrivate(page));
+ VM_BUG_ON(HPageRestoreReserve(page));
remove_huge_page(page);
freed++;
if (!truncate_op) {
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e481ac8a757a..5f82954004f6 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5019,10 +5019,10 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
* Can't handle multishot for double wait for now, turn it
* into one-shot mode.
*/
- if (!(req->poll.events & EPOLLONESHOT))
- req->poll.events |= EPOLLONESHOT;
+ if (!(poll_one->events & EPOLLONESHOT))
+ poll_one->events |= EPOLLONESHOT;
/* double add on the same waitqueue head, ignore */
- if (poll->head == head)
+ if (poll_one->head == head)
return;
poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
if (!poll) {
@@ -9035,15 +9035,15 @@ static void io_uring_del_task_file(unsigned long index)
static void io_uring_clean_tctx(struct io_uring_task *tctx)
{
+ struct io_wq *wq = tctx->io_wq;
struct io_tctx_node *node;
unsigned long index;
+ tctx->io_wq = NULL;
xa_for_each(&tctx->xa, index, node)
io_uring_del_task_file(index);
- if (tctx->io_wq) {
- io_wq_put_and_exit(tctx->io_wq);
- tctx->io_wq = NULL;
- }
+ if (wq)
+ io_wq_put_and_exit(wq);
}
static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
diff --git a/fs/namespace.c b/fs/namespace.c
index f63337828e1c..c3f1a78ba369 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -3855,8 +3855,12 @@ static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
return -EINVAL;
+ /* Don't yet support filesystem mountable in user namespaces. */
+ if (m->mnt_sb->s_user_ns != &init_user_ns)
+ return -EINVAL;
+
/* We're not controlling the superblock. */
- if (!ns_capable(m->mnt_sb->s_user_ns, CAP_SYS_ADMIN))
+ if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/* Mount has already been visible in the filesystem hierarchy. */
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 4f1373463766..22d904bde6ab 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -288,14 +288,12 @@ static inline void remove_dquot_hash(struct dquot *dquot)
static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
struct kqid qid)
{
- struct hlist_node *node;
struct dquot *dquot;
- hlist_for_each (node, dquot_hash+hashent) {
- dquot = hlist_entry(node, struct dquot, dq_hash);
+ hlist_for_each_entry(dquot, dquot_hash+hashent, dq_hash)
if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
return dquot;
- }
+
return NULL;
}
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 040a1142915f..167b5889db4b 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -114,29 +114,24 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
break;
case SIL_FAULT_BNDERR:
case SIL_FAULT_PKUERR:
+ case SIL_PERF_EVENT:
/*
- * Fall through to the SIL_FAULT case. Both SIL_FAULT_BNDERR
- * and SIL_FAULT_PKUERR are only generated by faults that
- * deliver them synchronously to userspace. In case someone
- * injects one of these signals and signalfd catches it treat
- * it as SIL_FAULT.
+ * Fall through to the SIL_FAULT case. SIL_FAULT_BNDERR,
+ * SIL_FAULT_PKUERR, and SIL_PERF_EVENT are only
+ * generated by faults that deliver them synchronously to
+ * userspace. In case someone injects one of these signals
+ * and signalfd catches it treat it as SIL_FAULT.
*/
case SIL_FAULT:
new.ssi_addr = (long) kinfo->si_addr;
-#ifdef __ARCH_SI_TRAPNO
- new.ssi_trapno = kinfo->si_trapno;
-#endif
break;
- case SIL_FAULT_MCEERR:
+ case SIL_FAULT_TRAPNO:
new.ssi_addr = (long) kinfo->si_addr;
-#ifdef __ARCH_SI_TRAPNO
new.ssi_trapno = kinfo->si_trapno;
-#endif
- new.ssi_addr_lsb = (short) kinfo->si_addr_lsb;
break;
- case SIL_PERF_EVENT:
+ case SIL_FAULT_MCEERR:
new.ssi_addr = (long) kinfo->si_addr;
- new.ssi_perf = kinfo->si_perf;
+ new.ssi_addr_lsb = (short) kinfo->si_addr_lsb;
break;
case SIL_CHLD:
new.ssi_pid = kinfo->si_pid;
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index a83bdd0c47a8..bde2b4c64dbe 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -770,6 +770,8 @@ struct xfs_scrub_metadata {
/*
* ioctl commands that are used by Linux filesystems
*/
+#define XFS_IOC_GETXFLAGS FS_IOC_GETFLAGS
+#define XFS_IOC_SETXFLAGS FS_IOC_SETFLAGS
#define XFS_IOC_GETVERSION FS_IOC_GETVERSION
/*
@@ -780,6 +782,8 @@ struct xfs_scrub_metadata {
#define XFS_IOC_ALLOCSP _IOW ('X', 10, struct xfs_flock64)
#define XFS_IOC_FREESP _IOW ('X', 11, struct xfs_flock64)
#define XFS_IOC_DIOINFO _IOR ('X', 30, struct dioattr)
+#define XFS_IOC_FSGETXATTR FS_IOC_FSGETXATTR
+#define XFS_IOC_FSSETXATTR FS_IOC_FSSETXATTR
#define XFS_IOC_ALLOCSP64 _IOW ('X', 36, struct xfs_flock64)
#define XFS_IOC_FREESP64 _IOW ('X', 37, struct xfs_flock64)
#define XFS_IOC_GETBMAP _IOWR('X', 38, struct getbmap)
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index aa874607618a..be38c960da85 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -74,7 +74,9 @@ __xchk_process_error(
return true;
case -EDEADLOCK:
/* Used to restart an op with deadlock avoidance. */
- trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
+ trace_xchk_deadlock_retry(
+ sc->ip ? sc->ip : XFS_I(file_inode(sc->file)),
+ sc->sm, *error);
break;
case -EFSBADCRC:
case -EFSCORRUPTED:
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index a5e9d7d34023..0936f3a96fe6 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -71,18 +71,24 @@ xfs_zero_extent(
#ifdef CONFIG_XFS_RT
int
xfs_bmap_rtalloc(
- struct xfs_bmalloca *ap) /* bmap alloc argument struct */
+ struct xfs_bmalloca *ap)
{
- int error; /* error return value */
- xfs_mount_t *mp; /* mount point structure */
- xfs_extlen_t prod = 0; /* product factor for allocators */
- xfs_extlen_t mod = 0; /* product factor for allocators */
- xfs_extlen_t ralen = 0; /* realtime allocation length */
- xfs_extlen_t align; /* minimum allocation alignment */
- xfs_rtblock_t rtb;
-
- mp = ap->ip->i_mount;
+ struct xfs_mount *mp = ap->ip->i_mount;
+ xfs_fileoff_t orig_offset = ap->offset;
+ xfs_rtblock_t rtb;
+ xfs_extlen_t prod = 0; /* product factor for allocators */
+ xfs_extlen_t mod = 0; /* product factor for allocators */
+ xfs_extlen_t ralen = 0; /* realtime allocation length */
+ xfs_extlen_t align; /* minimum allocation alignment */
+ xfs_extlen_t orig_length = ap->length;
+ xfs_extlen_t minlen = mp->m_sb.sb_rextsize;
+ xfs_extlen_t raminlen;
+ bool rtlocked = false;
+ bool ignore_locality = false;
+ int error;
+
align = xfs_get_extsz_hint(ap->ip);
+retry:
prod = align / mp->m_sb.sb_rextsize;
error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
align, 1, ap->eof, 0,
@@ -93,6 +99,15 @@ xfs_bmap_rtalloc(
ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
/*
+ * If we shifted the file offset downward to satisfy an extent size
+ * hint, increase minlen by that amount so that the allocator won't
+ * give us an allocation that's too short to cover at least one of the
+ * blocks that the caller asked for.
+ */
+ if (ap->offset != orig_offset)
+ minlen += orig_offset - ap->offset;
+
+ /*
* If the offset & length are not perfectly aligned
* then kill prod, it will just get us in trouble.
*/
@@ -116,10 +131,13 @@ xfs_bmap_rtalloc(
/*
* Lock out modifications to both the RT bitmap and summary inodes
*/
- xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
- xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
- xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
- xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
+ if (!rtlocked) {
+ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
+ xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
+ xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
+ xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
+ rtlocked = true;
+ }
/*
* If it's an allocation to an empty file at offset 0,
@@ -141,33 +159,59 @@ xfs_bmap_rtalloc(
/*
* Realtime allocation, done through xfs_rtallocate_extent.
*/
- do_div(ap->blkno, mp->m_sb.sb_rextsize);
+ if (ignore_locality)
+ ap->blkno = 0;
+ else
+ do_div(ap->blkno, mp->m_sb.sb_rextsize);
rtb = ap->blkno;
ap->length = ralen;
- error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
- &ralen, ap->wasdel, prod, &rtb);
+ raminlen = max_t(xfs_extlen_t, 1, minlen / mp->m_sb.sb_rextsize);
+ error = xfs_rtallocate_extent(ap->tp, ap->blkno, raminlen, ap->length,
+ &ralen, ap->wasdel, prod, &rtb);
if (error)
return error;
- ap->blkno = rtb;
- if (ap->blkno != NULLFSBLOCK) {
- ap->blkno *= mp->m_sb.sb_rextsize;
- ralen *= mp->m_sb.sb_rextsize;
- ap->length = ralen;
- ap->ip->i_nblocks += ralen;
+ if (rtb != NULLRTBLOCK) {
+ ap->blkno = rtb * mp->m_sb.sb_rextsize;
+ ap->length = ralen * mp->m_sb.sb_rextsize;
+ ap->ip->i_nblocks += ap->length;
xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
if (ap->wasdel)
- ap->ip->i_delayed_blks -= ralen;
+ ap->ip->i_delayed_blks -= ap->length;
/*
* Adjust the disk quota also. This was reserved
* earlier.
*/
xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
- XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
- } else {
- ap->length = 0;
+ XFS_TRANS_DQ_RTBCOUNT, ap->length);
+ return 0;
}
+
+ if (align > mp->m_sb.sb_rextsize) {
+ /*
+ * We previously enlarged the request length to try to satisfy
+ * an extent size hint. The allocator didn't return anything,
+ * so reset the parameters to the original values and try again
+ * without alignment criteria.
+ */
+ ap->offset = orig_offset;
+ ap->length = orig_length;
+ minlen = align = mp->m_sb.sb_rextsize;
+ goto retry;
+ }
+
+ if (!ignore_locality && ap->blkno != 0) {
+ /*
+ * If we can't allocate near a specific rt extent, try again
+ * without locality criteria.
+ */
+ ignore_locality = true;
+ goto retry;
+ }
+
+ ap->blkno = NULLFSBLOCK;
+ ap->length = 0;
return 0;
}
#endif /* CONFIG_XFS_RT */
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index ac5a28eff2c8..8f1350e599eb 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -896,6 +896,18 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(new_plane_state) = (__state)->planes[__i].new_state, 1))
/**
+ * for_each_new_plane_in_state_reverse - other than only tracking new state,
+ * it's the same as for_each_oldnew_plane_in_state_reverse
+ */
+#define for_each_new_plane_in_state_reverse(__state, plane, new_plane_state, __i) \
+ for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \
+ (__i) >= 0; \
+ (__i)--) \
+ for_each_if ((__state)->planes[__i].ptr && \
+ ((plane) = (__state)->planes[__i].ptr, \
+ (new_plane_state) = (__state)->planes[__i].new_state, 1))
+
+/**
* for_each_old_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer
* @plane: &struct drm_plane iteration cursor
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index e9ad4863d915..cc9de1632dd3 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -35,6 +35,8 @@
#include <linux/scatterlist.h>
+struct dma_buf_map;
+
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
void drm_clflush_sg(struct sg_table *st);
void drm_clflush_virt_range(void *addr, unsigned long length);
@@ -70,4 +72,9 @@ static inline bool drm_arch_can_wc_memory(void)
#endif
}
+void drm_memcpy_init_early(void);
+
+void drm_memcpy_from_wc(struct dma_buf_map *dst,
+ const struct dma_buf_map *src,
+ unsigned long len);
#endif
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index c87a829b6498..ddb9231d0309 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -596,11 +596,11 @@ struct drm_dp_mst_topology_mgr {
/**
* @max_lane_count: maximum number of lanes the GPU can drive.
*/
- u8 max_lane_count;
+ int max_lane_count;
/**
- * @max_link_rate: maximum link rate per lane GPU can output.
+ * @max_link_rate: maximum link rate per lane GPU can output, in kHz.
*/
- u8 max_link_rate;
+ int max_link_rate;
/**
* @conn_base_id: DRM connector ID this mgr is connected to. Only used
* to build the MST connector path value.
@@ -774,7 +774,7 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
struct drm_device *dev, struct drm_dp_aux *aux,
int max_dpcd_transaction_bytes,
int max_payloads,
- u8 max_lane_count, u8 max_link_rate,
+ int max_lane_count, int max_link_rate,
int conn_base_id);
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 795aea1d0a25..6447e34528f8 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
+struct drm_device;
struct drm_framebuffer;
struct drm_plane_state;
@@ -14,5 +15,9 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
struct drm_plane_state *state,
unsigned int plane);
+void drm_fb_cma_sync_non_coherent(struct drm_device *drm,
+ struct drm_plane_state *old_state,
+ struct drm_plane_state *state);
+
#endif
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 156b122c0ad5..3b138d4ae67e 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -136,14 +136,6 @@ struct drm_format_info {
};
/**
- * struct drm_format_name_buf - name of a DRM format
- * @str: string buffer containing the format name
- */
-struct drm_format_name_buf {
- char str[32];
-};
-
-/**
* drm_format_info_is_yuv_packed - check that the format info matches a YUV
* format with data laid in a single plane
* @info: format info
@@ -318,6 +310,5 @@ unsigned int drm_format_info_block_height(const struct drm_format_info *info,
int plane);
uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
int plane, unsigned int buffer_width);
-const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf);
#endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 0a9711caa3e8..cd13508acbc1 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -16,6 +16,7 @@ struct drm_mode_create_dumb;
* more than one entry but they are guaranteed to have contiguous
* DMA addresses.
* @vaddr: kernel virtual address of the backing memory
+ * @map_noncoherent: if true, the GEM object is backed by non-coherent memory
*/
struct drm_gem_cma_object {
struct drm_gem_object base;
@@ -24,6 +25,8 @@ struct drm_gem_cma_object {
/* For objects with DMA memory allocated by GEM CMA */
void *vaddr;
+
+ bool map_noncoherent;
};
#define to_drm_gem_cma_obj(gem_obj) \
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 10225a0a35d0..d18af49fd009 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -275,7 +275,7 @@ struct drm_sched_backend_ops {
* @pending_list: the list of jobs which are currently in the job queue.
* @job_list_lock: lock to protect the pending_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
- * guilty and it will be considered for scheduling further.
+ * guilty and it will no longer be considered for scheduling.
* @score: score to help loadbalancer pick a idle sched
* @_score: score used when the driver doesn't provide one
* @ready: marks if the underlying HW is ready to work
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 639521880c29..f681bbdbc698 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -136,7 +136,7 @@ struct ttm_buffer_object {
* Members protected by the bo::resv::reserved lock.
*/
- struct ttm_resource mem;
+ struct ttm_resource *resource;
struct ttm_tt *ttm;
bool deleted;
@@ -525,19 +525,6 @@ void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map);
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
/**
- * ttm_bo_mmap - mmap out of the ttm device address space.
- *
- * @filp: filp as input from the mmap method.
- * @vma: vma as input from the mmap method.
- * @bdev: Pointer to the ttm_device with the address space manager.
- *
- * This function is intended to be called by the device mmap method.
- * if the device address space is to be backed by the bo manager.
- */
-int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
- struct ttm_device *bdev);
-
-/**
* ttm_bo_io
*
* @bdev: Pointer to the struct ttm_device.
@@ -620,4 +607,6 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all);
+vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot);
+
#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index dbccac957f8f..68d6069572aa 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -40,6 +40,7 @@
#include <drm/ttm/ttm_device.h>
#include "ttm_bo_api.h"
+#include "ttm_kmap_iter.h"
#include "ttm_placement.h"
#include "ttm_tt.h"
#include "ttm_pool.h"
@@ -96,7 +97,7 @@ struct ttm_lru_bulk_move {
*/
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- struct ttm_resource *mem,
+ struct ttm_resource **mem,
struct ttm_operation_ctx *ctx);
/**
@@ -181,15 +182,15 @@ static inline void
ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
{
spin_lock(&bo->bdev->lru_lock);
- ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
+ ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
spin_unlock(&bo->bdev->lru_lock);
}
static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem)
{
- bo->mem = *new_mem;
- new_mem->mm_node = NULL;
+ WARN_ON(bo->resource);
+ bo->resource = new_mem;
}
/**
@@ -202,9 +203,7 @@ static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem)
{
- struct ttm_resource *old_mem = &bo->mem;
-
- WARN_ON(old_mem->mm_node != NULL);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
}
@@ -273,6 +272,23 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem);
/**
+ * ttm_bo_move_accel_cleanup.
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @new_mem: struct ttm_resource indicating where to move.
+ *
+ * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
+ * by the caller to be idle. Typically used after memcpy buffer moves.
+ */
+static inline void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem)
+{
+ int ret = ttm_bo_move_accel_cleanup(bo, NULL, true, false, new_mem);
+
+ WARN_ON(ret);
+}
+
+/**
* ttm_bo_pipeline_gutting.
*
* @bo: A pointer to a struct ttm_buffer_object.
@@ -306,30 +322,14 @@ int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
*/
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
-/**
- * ttm_range_man_init
- *
- * @bdev: ttm device
- * @type: memory manager type
- * @use_tt: if the memory manager uses tt
- * @p_size: size of area to be managed in pages.
- *
- * Initialise a generic range manager for the selected memory type.
- * The range manager is installed for this device in the type slot.
- */
-int ttm_range_man_init(struct ttm_device *bdev,
- unsigned type, bool use_tt,
- unsigned long p_size);
-
-/**
- * ttm_range_man_fini
- *
- * @bdev: ttm device
- * @type: memory manager type
- *
- * Remove the generic range manager from a slot and tear it down.
- */
-int ttm_range_man_fini(struct ttm_device *bdev,
- unsigned type);
+void ttm_move_memcpy(struct ttm_buffer_object *bo,
+ u32 num_pages,
+ struct ttm_kmap_iter *dst_iter,
+ struct ttm_kmap_iter *src_iter);
+struct ttm_kmap_iter *
+ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
+ struct io_mapping *iomap,
+ struct sg_table *st,
+ resource_size_t start);
#endif
diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h
index a0b4a49fa432..3c9dd65f5aaf 100644
--- a/include/drm/ttm/ttm_caching.h
+++ b/include/drm/ttm/ttm_caching.h
@@ -33,4 +33,6 @@ enum ttm_caching {
ttm_cached
};
+pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp);
+
#endif
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
index 7c8f87bd52d3..cd592f8e941b 100644
--- a/include/drm/ttm/ttm_device.h
+++ b/include/drm/ttm/ttm_device.h
@@ -162,21 +162,6 @@ struct ttm_device_funcs {
struct ttm_place *hop);
/**
- * struct ttm_bo_driver_member verify_access
- *
- * @bo: Pointer to a buffer object.
- * @filp: Pointer to a struct file trying to access the object.
- *
- * Called from the map / write / read methods to verify that the
- * caller is permitted to access the buffer object.
- * This member may be set to NULL, which will refuse this kind of
- * access for all buffer objects.
- * This function should return 0 if access is granted, -EPERM otherwise.
- */
- int (*verify_access)(struct ttm_buffer_object *bo,
- struct file *filp);
-
- /**
* Hook to notify driver about a resource delete.
*/
void (*delete_mem_notify)(struct ttm_buffer_object *bo);
diff --git a/include/drm/ttm/ttm_kmap_iter.h b/include/drm/ttm/ttm_kmap_iter.h
new file mode 100644
index 000000000000..8bb00fd39d6c
--- /dev/null
+++ b/include/drm/ttm/ttm_kmap_iter.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+#ifndef __TTM_KMAP_ITER_H__
+#define __TTM_KMAP_ITER_H__
+
+#include <linux/types.h>
+
+struct ttm_kmap_iter;
+struct dma_buf_map;
+
+/**
+ * struct ttm_kmap_iter_ops - Ops structure for a struct
+ * ttm_kmap_iter.
+ * @maps_tt: Whether the iterator maps TT memory directly, as opposed
+ * mapping a TT through an aperture. Both these modes have
+ * struct ttm_resource_manager::use_tt set, but the latter typically
+ * returns is_iomem == true from ttm_mem_io_reserve.
+ */
+struct ttm_kmap_iter_ops {
+ /**
+ * kmap_local() - Map a PAGE_SIZE part of the resource using
+ * kmap_local semantics.
+ * @res_iter: Pointer to the struct ttm_kmap_iter representing
+ * the resource.
+ * @dmap: The struct dma_buf_map holding the virtual address after
+ * the operation.
+ * @i: The location within the resource to map. PAGE_SIZE granularity.
+ */
+ void (*map_local)(struct ttm_kmap_iter *res_iter,
+ struct dma_buf_map *dmap, pgoff_t i);
+ /**
+ * unmap_local() - Unmap a PAGE_SIZE part of the resource previously
+ * mapped using kmap_local.
+ * @res_iter: Pointer to the struct ttm_kmap_iter representing
+ * the resource.
+ * @dmap: The struct dma_buf_map holding the virtual address after
+ * the operation.
+ */
+ void (*unmap_local)(struct ttm_kmap_iter *res_iter,
+ struct dma_buf_map *dmap);
+ bool maps_tt;
+};
+
+/**
+ * struct ttm_kmap_iter - Iterator for kmap_local type operations on a
+ * resource.
+ * @ops: Pointer to the operations struct.
+ *
+ * This struct is intended to be embedded in a resource-specific specialization
+ * implementing operations for the resource.
+ *
+ * Nothing stops us from extending the operations to vmap, vmap_pfn etc,
+ * replacing some or parts of the ttm_bo_util. cpu-map functionality.
+ */
+struct ttm_kmap_iter {
+ const struct ttm_kmap_iter_ops *ops;
+};
+
+#endif /* __TTM_KMAP_ITER_H__ */
diff --git a/include/drm/ttm/ttm_range_manager.h b/include/drm/ttm/ttm_range_manager.h
new file mode 100644
index 000000000000..22b6fa42ac20
--- /dev/null
+++ b/include/drm/ttm/ttm_range_manager.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#ifndef _TTM_RANGE_MANAGER_H_
+#define _TTM_RANGE_MANAGER_H_
+
+#include <drm/ttm/ttm_resource.h>
+#include <drm/drm_mm.h>
+
+/**
+ * struct ttm_range_mgr_node
+ *
+ * @base: base clase we extend
+ * @mm_nodes: MM nodes, usually 1
+ *
+ * Extending the ttm_resource object to manage an address space allocation with
+ * one or more drm_mm_nodes.
+ */
+struct ttm_range_mgr_node {
+ struct ttm_resource base;
+ struct drm_mm_node mm_nodes[];
+};
+
+/**
+ * to_ttm_range_mgr_node
+ *
+ * @res: the resource to upcast
+ *
+ * Upcast the ttm_resource object into a ttm_range_mgr_node object.
+ */
+static inline struct ttm_range_mgr_node *
+to_ttm_range_mgr_node(struct ttm_resource *res)
+{
+ return container_of(res, struct ttm_range_mgr_node, base);
+}
+
+int ttm_range_man_init(struct ttm_device *bdev,
+ unsigned type, bool use_tt,
+ unsigned long p_size);
+int ttm_range_man_fini(struct ttm_device *bdev,
+ unsigned type);
+
+#endif
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 890b9d369519..140b6b9a8bbe 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -27,9 +27,11 @@
#include <linux/types.h>
#include <linux/mutex.h>
+#include <linux/dma-buf-map.h>
#include <linux/dma-fence.h>
#include <drm/drm_print.h>
#include <drm/ttm/ttm_caching.h>
+#include <drm/ttm/ttm_kmap_iter.h>
#define TTM_MAX_BO_PRIORITY 4U
@@ -38,6 +40,10 @@ struct ttm_resource_manager;
struct ttm_resource;
struct ttm_place;
struct ttm_buffer_object;
+struct dma_buf_map;
+struct io_mapping;
+struct sg_table;
+struct scatterlist;
struct ttm_resource_manager_func {
/**
@@ -45,46 +51,38 @@ struct ttm_resource_manager_func {
*
* @man: Pointer to a memory type manager.
* @bo: Pointer to the buffer object we're allocating space for.
- * @placement: Placement details.
- * @flags: Additional placement flags.
- * @mem: Pointer to a struct ttm_resource to be filled in.
+ * @place: Placement details.
+ * @res: Resulting pointer to the ttm_resource.
*
* This function should allocate space in the memory type managed
- * by @man. Placement details if
- * applicable are given by @placement. If successful,
- * @mem::mm_node should be set to a non-null value, and
- * @mem::start should be set to a value identifying the beginning
+ * by @man. Placement details if applicable are given by @place. If
+ * successful, a filled in ttm_resource object should be returned in
+ * @res. @res::start should be set to a value identifying the beginning
* of the range allocated, and the function should return zero.
- * If the memory region accommodate the buffer object, @mem::mm_node
- * should be set to NULL, and the function should return 0.
+ * If the manager can't fulfill the request -ENOSPC should be returned.
* If a system error occurred, preventing the request to be fulfilled,
* the function should return a negative error code.
*
- * Note that @mem::mm_node will only be dereferenced by
- * struct ttm_resource_manager functions and optionally by the driver,
- * which has knowledge of the underlying type.
- *
- * This function may not be called from within atomic context, so
- * an implementation can and must use either a mutex or a spinlock to
- * protect any data structures managing the space.
+ * This function may not be called from within atomic context and needs
+ * to take care of its own locking to protect any data structures
+ * managing the space.
*/
int (*alloc)(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem);
+ struct ttm_resource **res);
/**
* struct ttm_resource_manager_func member free
*
* @man: Pointer to a memory type manager.
- * @mem: Pointer to a struct ttm_resource to be filled in.
+ * @res: Pointer to a struct ttm_resource to be freed.
*
- * This function frees memory type resources previously allocated
- * and that are identified by @mem::mm_node and @mem::start. May not
- * be called from within atomic context.
+ * This function frees memory type resources previously allocated.
+ * May not be called from within atomic context.
*/
void (*free)(struct ttm_resource_manager *man,
- struct ttm_resource *mem);
+ struct ttm_resource *res);
/**
* struct ttm_resource_manager_func member debug
@@ -158,9 +156,9 @@ struct ttm_bus_placement {
/**
* struct ttm_resource
*
- * @mm_node: Memory manager node.
- * @size: Requested size of memory region.
- * @num_pages: Actual size of memory region in pages.
+ * @start: Start of the allocation.
+ * @num_pages: Actual size of resource in pages.
+ * @mem_type: Resource type of the allocation.
* @placement: Placement flags.
* @bus: Placement on io bus accessible to the CPU
*
@@ -168,7 +166,6 @@ struct ttm_bus_placement {
* buffer object.
*/
struct ttm_resource {
- void *mm_node;
unsigned long start;
unsigned long num_pages;
uint32_t mem_type;
@@ -177,6 +174,45 @@ struct ttm_resource {
};
/**
+ * struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping +
+ * struct sg_table backed struct ttm_resource.
+ * @base: Embedded struct ttm_kmap_iter providing the usage interface.
+ * @iomap: struct io_mapping representing the underlying linear io_memory.
+ * @st: sg_table into @iomap, representing the memory of the struct ttm_resource.
+ * @start: Offset that needs to be subtracted from @st to make
+ * sg_dma_address(st->sgl) - @start == 0 for @iomap start.
+ * @cache: Scatterlist traversal cache for fast lookups.
+ * @cache.sg: Pointer to the currently cached scatterlist segment.
+ * @cache.i: First index of @sg. PAGE_SIZE granularity.
+ * @cache.end: Last index + 1 of @sg. PAGE_SIZE granularity.
+ * @cache.offs: First offset into @iomap of @sg. PAGE_SIZE granularity.
+ */
+struct ttm_kmap_iter_iomap {
+ struct ttm_kmap_iter base;
+ struct io_mapping *iomap;
+ struct sg_table *st;
+ resource_size_t start;
+ struct {
+ struct scatterlist *sg;
+ pgoff_t i;
+ pgoff_t end;
+ pgoff_t offs;
+ } cache;
+};
+
+/**
+ * struct ttm_kmap_iter_linear_io - Iterator specialization for linear io
+ * @base: The base iterator
+ * @dmap: Points to the starting address of the region
+ * @needs_unmap: Whether we need to unmap on fini
+ */
+struct ttm_kmap_iter_linear_io {
+ struct ttm_kmap_iter base;
+ struct dma_buf_map dmap;
+ bool needs_unmap;
+};
+
+/**
* ttm_resource_manager_set_used
*
* @man: A memory manager object.
@@ -223,10 +259,13 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
man->move = NULL;
}
+void ttm_resource_init(struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource *res);
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *res);
-void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res);
+ struct ttm_resource **res);
+void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
void ttm_resource_manager_init(struct ttm_resource_manager *man,
unsigned long p_size);
@@ -237,4 +276,20 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
void ttm_resource_manager_debug(struct ttm_resource_manager *man,
struct drm_printer *p);
+struct ttm_kmap_iter *
+ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
+ struct io_mapping *iomap,
+ struct sg_table *st,
+ resource_size_t start);
+
+struct ttm_kmap_iter_linear_io;
+
+struct ttm_kmap_iter *
+ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
+ struct ttm_device *bdev,
+ struct ttm_resource *mem);
+
+void ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
+ struct ttm_device *bdev,
+ struct ttm_resource *mem);
#endif
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index 134d09ef7766..818680c6a8ed 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include <drm/ttm/ttm_caching.h>
+#include <drm/ttm/ttm_kmap_iter.h>
struct ttm_bo_device;
struct ttm_tt;
@@ -69,6 +70,18 @@ struct ttm_tt {
enum ttm_caching caching;
};
+/**
+ * struct ttm_kmap_iter_tt - Specialization of a mappig iterator for a tt.
+ * @base: Embedded struct ttm_kmap_iter providing the usage interface
+ * @tt: Cached struct ttm_tt.
+ * @prot: Cached page protection for mapping.
+ */
+struct ttm_kmap_iter_tt {
+ struct ttm_kmap_iter base;
+ struct ttm_tt *tt;
+ pgprot_t prot;
+};
+
static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
{
return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED;
@@ -157,8 +170,24 @@ int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_oper
*/
void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
+/**
+ * ttm_tt_mark_for_clear - Mark pages for clearing on populate.
+ *
+ * @ttm: Pointer to the ttm_tt structure
+ *
+ * Marks pages for clearing so that the next time the page vector is
+ * populated, the pages will be cleared.
+ */
+static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm)
+{
+ ttm->page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+}
+
void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
+struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
+ struct ttm_tt *tt);
+
#if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h>
diff --git a/include/linux/bits.h b/include/linux/bits.h
index 7f475d59a097..87d112650dfb 100644
--- a/include/linux/bits.h
+++ b/include/linux/bits.h
@@ -22,7 +22,7 @@
#include <linux/build_bug.h>
#define GENMASK_INPUT_CHECK(h, l) \
(BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
- __builtin_constant_p((l) > (h)), (l) > (h), 0)))
+ __is_constexpr((l) > (h)), (l) > (h), 0)))
#else
/*
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 98dd7b324c35..8855b1b702b2 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -213,12 +213,11 @@ typedef struct compat_siginfo {
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
struct {
compat_uptr_t _addr; /* faulting insn/memory ref. */
-#ifdef __ARCH_SI_TRAPNO
- int _trapno; /* TRAP # which caused the signal */
-#endif
#define __COMPAT_ADDR_BND_PKEY_PAD (__alignof__(compat_uptr_t) < sizeof(short) ? \
sizeof(short) : __alignof__(compat_uptr_t))
union {
+ /* used on alpha and sparc */
+ int _trapno; /* TRAP # which caused the signal */
/*
* used when si_code=BUS_MCEERR_AR or
* used when si_code=BUS_MCEERR_AO
@@ -236,7 +235,10 @@ typedef struct compat_siginfo {
u32 _pkey;
} _addr_pkey;
/* used when si_code=TRAP_PERF */
- compat_ulong_t _perf;
+ struct {
+ compat_ulong_t _data;
+ u32 _type;
+ } _perf;
};
} _sigfault;
diff --git a/include/linux/const.h b/include/linux/const.h
index 81b8aae5a855..435ddd72d2c4 100644
--- a/include/linux/const.h
+++ b/include/linux/const.h
@@ -3,4 +3,12 @@
#include <vdso/const.h>
+/*
+ * This returns a constant expression while determining if an argument is
+ * a constant expression, most importantly without evaluating the argument.
+ * Glory to Martin Uecker <[email protected]>
+ */
+#define __is_constexpr(x) \
+ (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
+
#endif /* _LINUX_CONST_H */
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index d44a77e8a7e3..562b885cf9c3 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -78,19 +78,11 @@ struct dma_resv {
#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
-/**
- * dma_resv_get_list - get the reservation object's
- * shared fence list, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the shared fence list. Does NOT take references to
- * the fence. The obj->lock must be held.
- */
-static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
-{
- return rcu_dereference_protected(obj->fence,
- dma_resv_held(obj));
-}
+#ifdef CONFIG_DEBUG_MUTEXES
+void dma_resv_reset_shared_max(struct dma_resv *obj);
+#else
+static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {}
+#endif
/**
* dma_resv_lock - lock the reservation object
@@ -215,38 +207,29 @@ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
*/
static inline void dma_resv_unlock(struct dma_resv *obj)
{
-#ifdef CONFIG_DEBUG_MUTEXES
- /* Test shared fence slot reservation */
- if (rcu_access_pointer(obj->fence)) {
- struct dma_resv_list *fence = dma_resv_get_list(obj);
-
- fence->shared_max = fence->shared_count;
- }
-#endif
+ dma_resv_reset_shared_max(obj);
ww_mutex_unlock(&obj->lock);
}
/**
- * dma_resv_get_excl - get the reservation object's
- * exclusive fence, with update-side lock held
+ * dma_resv_exclusive - return the object's exclusive fence
* @obj: the reservation object
*
- * Returns the exclusive fence (if any). Does NOT take a
- * reference. Writers must hold obj->lock, readers may only
- * hold a RCU read side lock.
+ * Returns the exclusive fence (if any). Caller must either hold the objects
+ * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
+ * or one of the variants of each
*
* RETURNS
* The exclusive fence or NULL
*/
static inline struct dma_fence *
-dma_resv_get_excl(struct dma_resv *obj)
+dma_resv_excl_fence(struct dma_resv *obj)
{
- return rcu_dereference_protected(obj->fence_excl,
- dma_resv_held(obj));
+ return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
}
/**
- * dma_resv_get_excl_rcu - get the reservation object's
+ * dma_resv_get_excl_unlocked - get the reservation object's
* exclusive fence, without lock held.
* @obj: the reservation object
*
@@ -257,7 +240,7 @@ dma_resv_get_excl(struct dma_resv *obj)
* The exclusive fence or NULL if none
*/
static inline struct dma_fence *
-dma_resv_get_excl_rcu(struct dma_resv *obj)
+dma_resv_get_excl_unlocked(struct dma_resv *obj)
{
struct dma_fence *fence;
@@ -271,23 +254,29 @@ dma_resv_get_excl_rcu(struct dma_resv *obj)
return fence;
}
+/**
+ * dma_resv_shared_list - get the reservation object's shared fence list
+ * @obj: the reservation object
+ *
+ * Returns the shared fence list. Caller must either hold the objects
+ * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
+ * or one of the variants of each
+ */
+static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
+{
+ return rcu_dereference_check(obj->fence, dma_resv_held(obj));
+}
+
void dma_resv_init(struct dma_resv *obj);
void dma_resv_fini(struct dma_resv *obj);
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
-
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
-
-int dma_resv_get_fences_rcu(struct dma_resv *obj,
- struct dma_fence **pfence_excl,
- unsigned *pshared_count,
- struct dma_fence ***pshared);
-
+int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
+ unsigned *pshared_count, struct dma_fence ***pshared);
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
-
-long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
- unsigned long timeout);
-
-bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
+long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
+ unsigned long timeout);
+bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
#endif /* _LINUX_RESERVATION_H */
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index a57ee75342cf..dce631e678dd 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -32,6 +32,11 @@ struct _ddebug {
#define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2)
#define _DPRINTK_FLAGS_INCL_LINENO (1<<3)
#define _DPRINTK_FLAGS_INCL_TID (1<<4)
+
+#define _DPRINTK_FLAGS_INCL_ANY \
+ (_DPRINTK_FLAGS_INCL_MODNAME | _DPRINTK_FLAGS_INCL_FUNCNAME |\
+ _DPRINTK_FLAGS_INCL_LINENO | _DPRINTK_FLAGS_INCL_TID)
+
#if defined DEBUG
#define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT
#else
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 7e9660ea967d..6fc26f7bdf71 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -306,8 +306,6 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev,
}
#endif /* CONFIG_SYSFS */
-extern struct rw_semaphore bdev_lookup_sem;
-
dev_t blk_lookup_devt(const char *name, int partno);
void blk_request_module(dev_t devt);
#ifdef CONFIG_BLOCK
diff --git a/include/linux/minmax.h b/include/linux/minmax.h
index c0f57b0c64d9..5433c08fcc68 100644
--- a/include/linux/minmax.h
+++ b/include/linux/minmax.h
@@ -2,6 +2,8 @@
#ifndef _LINUX_MINMAX_H
#define _LINUX_MINMAX_H
+#include <linux/const.h>
+
/*
* min()/max()/clamp() macros must accomplish three things:
*
@@ -17,14 +19,6 @@
#define __typecheck(x, y) \
(!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
-/*
- * This returns a constant expression while determining if an argument is
- * a constant expression, most importantly without evaluating the argument.
- * Glory to Martin Uecker <[email protected]>
- */
-#define __is_constexpr(x) \
- (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
-
#define __no_side_effects(x, y) \
(__is_constexpr(x) && __is_constexpr(y))
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c20211e59a57..4c048ed5bc55 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -862,6 +862,8 @@ struct module;
* MSI-X vectors available for distribution to the VFs.
* @err_handler: See Documentation/PCI/pci-error-recovery.rst
* @groups: Sysfs attribute groups.
+ * @dev_groups: Attributes attached to the device that will be
+ * created once it is bound to the driver.
* @driver: Driver model structure.
* @dynids: List of dynamically added device IDs.
*/
@@ -879,6 +881,7 @@ struct pci_driver {
u32 (*sriov_get_vf_total_msix)(struct pci_dev *pf);
const struct pci_error_handlers *err_handler;
const struct attribute_group **groups;
+ const struct attribute_group **dev_groups;
struct device_driver driver;
struct pci_dynids dynids;
};
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 3f6a0fcaa10c..7f4278fa21fe 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -326,6 +326,7 @@ int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
int force_sig_pkuerr(void __user *addr, u32 pkey);
+int force_sig_perf(void __user *addr, u32 type, u64 sig_data);
int force_sig_ptrace_errno_trap(int errno, void __user *addr);
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 0dbfda8d99d0..201f88e3738b 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -40,6 +40,7 @@ enum siginfo_layout {
SIL_TIMER,
SIL_POLL,
SIL_FAULT,
+ SIL_FAULT_TRAPNO,
SIL_FAULT_MCEERR,
SIL_FAULT_BNDERR,
SIL_FAULT_PKUERR,
diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h
index 4441ad667c3f..6ff9c58b3e17 100644
--- a/include/linux/surface_aggregator/device.h
+++ b/include/linux/surface_aggregator/device.h
@@ -98,9 +98,9 @@ struct ssam_device_uid {
| (((fun) != SSAM_ANY_FUN) ? SSAM_MATCH_FUNCTION : 0), \
.domain = d, \
.category = cat, \
- .target = ((tid) != SSAM_ANY_TID) ? (tid) : 0, \
- .instance = ((iid) != SSAM_ANY_IID) ? (iid) : 0, \
- .function = ((fun) != SSAM_ANY_FUN) ? (fun) : 0 \
+ .target = __builtin_choose_expr((tid) != SSAM_ANY_TID, (tid), 0), \
+ .instance = __builtin_choose_expr((iid) != SSAM_ANY_IID, (iid), 0), \
+ .function = __builtin_choose_expr((fun) != SSAM_ANY_FUN, (fun), 0)
/**
* SSAM_VDEV() - Initialize a &struct ssam_device_id as virtual device with
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index 03d6f6d2c1fe..5a3c221f4c9d 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -63,9 +63,6 @@ union __sifields {
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
struct {
void __user *_addr; /* faulting insn/memory ref. */
-#ifdef __ARCH_SI_TRAPNO
- int _trapno; /* TRAP # which caused the signal */
-#endif
#ifdef __ia64__
int _imm; /* immediate value for "break" */
unsigned int _flags; /* see ia64 si_flags */
@@ -75,6 +72,8 @@ union __sifields {
#define __ADDR_BND_PKEY_PAD (__alignof__(void *) < sizeof(short) ? \
sizeof(short) : __alignof__(void *))
union {
+ /* used on alpha and sparc */
+ int _trapno; /* TRAP # which caused the signal */
/*
* used when si_code=BUS_MCEERR_AR or
* used when si_code=BUS_MCEERR_AO
@@ -92,7 +91,10 @@ union __sifields {
__u32 _pkey;
} _addr_pkey;
/* used when si_code=TRAP_PERF */
- unsigned long _perf;
+ struct {
+ unsigned long _data;
+ __u32 _type;
+ } _perf;
};
} _sigfault;
@@ -150,14 +152,13 @@ typedef struct siginfo {
#define si_int _sifields._rt._sigval.sival_int
#define si_ptr _sifields._rt._sigval.sival_ptr
#define si_addr _sifields._sigfault._addr
-#ifdef __ARCH_SI_TRAPNO
#define si_trapno _sifields._sigfault._trapno
-#endif
#define si_addr_lsb _sifields._sigfault._addr_lsb
#define si_lower _sifields._sigfault._addr_bnd._lower
#define si_upper _sifields._sigfault._addr_bnd._upper
#define si_pkey _sifields._sigfault._addr_pkey._pkey
-#define si_perf _sifields._sigfault._perf
+#define si_perf_data _sifields._sigfault._perf._data
+#define si_perf_type _sifields._sigfault._perf._type
#define si_band _sifields._sigpoll._band
#define si_fd _sifields._sigpoll._fd
#define si_call_addr _sifields._sigsys._call_addr
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 2063a1c10f79..91a23187902e 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -136,6 +136,10 @@ extern "C" {
* accessing it with various hw blocks
*/
#define AMDGPU_GEM_CREATE_ENCRYPTED (1 << 10)
+/* Flag that BO will be used only in preemptible context, which does
+ * not require GTT memory accounting
+ */
+#define AMDGPU_GEM_CREATE_PREEMPTIBLE (1 << 11)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@@ -753,6 +757,8 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VBIOS_SIZE 0x1
/* Subquery id: Query vbios image */
#define AMDGPU_INFO_VBIOS_IMAGE 0x2
+ /* Subquery id: Query vbios info */
+ #define AMDGPU_INFO_VBIOS_INFO 0x3
/* Query UVD handles */
#define AMDGPU_INFO_NUM_HANDLES 0x1C
/* Query sensor related information */
@@ -946,6 +952,15 @@ struct drm_amdgpu_info_firmware {
__u32 feature;
};
+struct drm_amdgpu_info_vbios {
+ __u8 name[64];
+ __u8 vbios_pn[64];
+ __u32 version;
+ __u32 pad;
+ __u8 vbios_ver_str[32];
+ __u8 date[32];
+};
+
#define AMDGPU_VRAM_TYPE_UNKNOWN 0
#define AMDGPU_VRAM_TYPE_GDDR1 1
#define AMDGPU_VRAM_TYPE_DDR2 2
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 67b94bc3c885..d043752a74cf 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -777,9 +777,12 @@ struct drm_get_cap {
/**
* DRM_CLIENT_CAP_STEREO_3D
*
- * if set to 1, the DRM core will expose the stereo 3D capabilities of the
+ * If set to 1, the DRM core will expose the stereo 3D capabilities of the
* monitor by advertising the supported 3D layouts in the flags of struct
- * drm_mode_modeinfo.
+ * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``.
+ *
+ * This capability is always supported for all drivers starting from kernel
+ * version 3.13.
*/
#define DRM_CLIENT_CAP_STEREO_3D 1
@@ -788,6 +791,9 @@ struct drm_get_cap {
*
* If set to 1, the DRM core will expose all planes (overlay, primary, and
* cursor) to userspace.
+ *
+ * This capability has been introduced in kernel version 3.15. Starting from
+ * kernel version 3.17, this capability is always supported for all drivers.
*/
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
@@ -797,6 +803,13 @@ struct drm_get_cap {
* If set to 1, the DRM core will expose atomic properties to userspace. This
* implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
* &DRM_CLIENT_CAP_ASPECT_RATIO.
+ *
+ * If the driver doesn't support atomic mode-setting, enabling this capability
+ * will fail with -EOPNOTSUPP.
+ *
+ * This capability has been introduced in kernel version 4.0. Starting from
+ * kernel version 4.2, this capability is always supported for atomic-capable
+ * drivers.
*/
#define DRM_CLIENT_CAP_ATOMIC 3
@@ -804,6 +817,10 @@ struct drm_get_cap {
* DRM_CLIENT_CAP_ASPECT_RATIO
*
* If set to 1, the DRM core will provide aspect ratio information in modes.
+ * See ``DRM_MODE_FLAG_PIC_AR_*``.
+ *
+ * This capability is always supported for all drivers starting from kernel
+ * version 4.18.
*/
#define DRM_CLIENT_CAP_ASPECT_RATIO 4
@@ -811,8 +828,11 @@ struct drm_get_cap {
* DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
*
* If set to 1, the DRM core will expose special connectors to be used for
- * writing back to memory the scene setup in the commit. Depends on client
- * also supporting DRM_CLIENT_CAP_ATOMIC
+ * writing back to memory the scene setup in the commit. The client must enable
+ * &DRM_CLIENT_CAP_ATOMIC first.
+ *
+ * This capability is always supported for atomic-capable drivers starting from
+ * kernel version 4.19.
*/
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index f76de49c768f..f7156322aba5 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -168,6 +168,13 @@ extern "C" {
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+/* 64 bpp RGB */
+#define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */
+
+#define DRM_FORMAT_ARGB16161616 fourcc_code('A', 'R', '4', '8') /* [63:0] A:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_ABGR16161616 fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+
/*
* Floating point 64bpp RGB
* IEEE 754-2008 binary16 half-precision float
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index ddc47bbf48b6..c2c7759b7d2e 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -62,8 +62,8 @@ extern "C" {
#define I915_ERROR_UEVENT "ERROR"
#define I915_RESET_UEVENT "RESET"
-/*
- * i915_user_extension: Base class for defining a chain of extensions
+/**
+ * struct i915_user_extension - Base class for defining a chain of extensions
*
* Many interfaces need to grow over time. In most cases we can simply
* extend the struct and have userspace pass in more data. Another option,
@@ -76,12 +76,58 @@ extern "C" {
* increasing complexity, and for large parts of that interface to be
* entirely optional. The downside is more pointer chasing; chasing across
* the __user boundary with pointers encapsulated inside u64.
+ *
+ * Example chaining:
+ *
+ * .. code-block:: C
+ *
+ * struct i915_user_extension ext3 {
+ * .next_extension = 0, // end
+ * .name = ...,
+ * };
+ * struct i915_user_extension ext2 {
+ * .next_extension = (uintptr_t)&ext3,
+ * .name = ...,
+ * };
+ * struct i915_user_extension ext1 {
+ * .next_extension = (uintptr_t)&ext2,
+ * .name = ...,
+ * };
+ *
+ * Typically the struct i915_user_extension would be embedded in some uAPI
+ * struct, and in this case we would feed it the head of the chain(i.e ext1),
+ * which would then apply all of the above extensions.
+ *
*/
struct i915_user_extension {
+ /**
+ * @next_extension:
+ *
+ * Pointer to the next struct i915_user_extension, or zero if the end.
+ */
__u64 next_extension;
+ /**
+ * @name: Name of the extension.
+ *
+ * Note that the name here is just some integer.
+ *
+ * Also note that the name space for this is not global for the whole
+ * driver, but rather its scope/meaning is limited to the specific piece
+ * of uAPI which has embedded the struct i915_user_extension.
+ */
__u32 name;
- __u32 flags; /* All undefined bits must be zero. */
- __u32 rsvd[4]; /* Reserved for future use; must be zero. */
+ /**
+ * @flags: MBZ
+ *
+ * All undefined bits must be zero.
+ */
+ __u32 flags;
+ /**
+ * @rsvd: MBZ
+ *
+ * Reserved for future use; must be zero.
+ */
+ __u32 rsvd[4];
};
/*
@@ -360,6 +406,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_QUERY 0x39
#define DRM_I915_GEM_VM_CREATE 0x3a
#define DRM_I915_GEM_VM_DESTROY 0x3b
+#define DRM_I915_GEM_CREATE_EXT 0x3c
/* Must be kept compact -- no holes */
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -392,6 +439,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_CREATE_EXT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext)
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
@@ -1054,12 +1102,12 @@ struct drm_i915_gem_exec_fence {
__u32 flags;
};
-/**
+/*
* See drm_i915_gem_execbuffer_ext_timeline_fences.
*/
#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
-/**
+/*
* This structure describes an array of drm_syncobj and associated points for
* timeline variants of drm_syncobj. It is invalid to append this structure to
* the execbuf if I915_EXEC_FENCE_ARRAY is set.
@@ -1700,7 +1748,7 @@ struct drm_i915_gem_context_param {
__u64 value;
};
-/**
+/*
* Context SSEU programming
*
* It may be necessary for either functional or performance reason to configure
@@ -2067,7 +2115,7 @@ struct drm_i915_perf_open_param {
__u64 properties_ptr;
};
-/**
+/*
* Enable data capture for a stream that was either opened in a disabled state
* via I915_PERF_FLAG_DISABLED or was later disabled via
* I915_PERF_IOCTL_DISABLE.
@@ -2081,7 +2129,7 @@ struct drm_i915_perf_open_param {
*/
#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
-/**
+/*
* Disable data capture for a stream.
*
* It is an error to try and read a stream that is disabled.
@@ -2090,7 +2138,7 @@ struct drm_i915_perf_open_param {
*/
#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
-/**
+/*
* Change metrics_set captured by a stream.
*
* If the stream is bound to a specific context, the configuration change
@@ -2103,7 +2151,7 @@ struct drm_i915_perf_open_param {
*/
#define I915_PERF_IOCTL_CONFIG _IO('i', 0x2)
-/**
+/*
* Common to all i915 perf records
*/
struct drm_i915_perf_record_header {
@@ -2151,7 +2199,7 @@ enum drm_i915_perf_record_type {
DRM_I915_PERF_RECORD_MAX /* non-ABI */
};
-/**
+/*
* Structure to upload perf dynamic configuration into the kernel.
*/
struct drm_i915_perf_oa_config {
@@ -2172,53 +2220,95 @@ struct drm_i915_perf_oa_config {
__u64 flex_regs_ptr;
};
+/**
+ * struct drm_i915_query_item - An individual query for the kernel to process.
+ *
+ * The behaviour is determined by the @query_id. Note that exactly what
+ * @data_ptr is also depends on the specific @query_id.
+ */
struct drm_i915_query_item {
+ /** @query_id: The id for this query */
__u64 query_id;
#define DRM_I915_QUERY_TOPOLOGY_INFO 1
#define DRM_I915_QUERY_ENGINE_INFO 2
#define DRM_I915_QUERY_PERF_CONFIG 3
+#define DRM_I915_QUERY_MEMORY_REGIONS 4
/* Must be kept compact -- no holes and well documented */
- /*
+ /**
+ * @length:
+ *
* When set to zero by userspace, this is filled with the size of the
- * data to be written at the data_ptr pointer. The kernel sets this
+ * data to be written at the @data_ptr pointer. The kernel sets this
* value to a negative value to signal an error on a particular query
* item.
*/
__s32 length;
- /*
+ /**
+ * @flags:
+ *
* When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
*
* When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
- * following :
- * - DRM_I915_QUERY_PERF_CONFIG_LIST
- * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
- * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
+ * following:
+ *
+ * - DRM_I915_QUERY_PERF_CONFIG_LIST
+ * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
+ * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
*/
__u32 flags;
#define DRM_I915_QUERY_PERF_CONFIG_LIST 1
#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3
- /*
- * Data will be written at the location pointed by data_ptr when the
- * value of length matches the length of the data to be written by the
+ /**
+ * @data_ptr:
+ *
+ * Data will be written at the location pointed by @data_ptr when the
+ * value of @length matches the length of the data to be written by the
* kernel.
*/
__u64 data_ptr;
};
+/**
+ * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the
+ * kernel to fill out.
+ *
+ * Note that this is generally a two step process for each struct
+ * drm_i915_query_item in the array:
+ *
+ * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct
+ * drm_i915_query_item, with &drm_i915_query_item.length set to zero. The
+ * kernel will then fill in the size, in bytes, which tells userspace how
+ * memory it needs to allocate for the blob(say for an array of properties).
+ *
+ * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the
+ * &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that
+ * the &drm_i915_query_item.length should still be the same as what the
+ * kernel previously set. At this point the kernel can fill in the blob.
+ *
+ * Note that for some query items it can make sense for userspace to just pass
+ * in a buffer/blob equal to or larger than the required size. In this case only
+ * a single ioctl call is needed. For some smaller query items this can work
+ * quite well.
+ *
+ */
struct drm_i915_query {
+ /** @num_items: The number of elements in the @items_ptr array */
__u32 num_items;
- /*
- * Unused for now. Must be cleared to zero.
+ /**
+ * @flags: Unused for now. Must be cleared to zero.
*/
__u32 flags;
- /*
- * This points to an array of num_items drm_i915_query_item structures.
+ /**
+ * @items_ptr:
+ *
+ * Pointer to an array of struct drm_i915_query_item. The number of
+ * array elements is @num_items.
*/
__u64 items_ptr;
};
@@ -2292,21 +2382,21 @@ struct drm_i915_query_topology_info {
* Describes one engine and it's capabilities as known to the driver.
*/
struct drm_i915_engine_info {
- /** Engine class and instance. */
+ /** @engine: Engine class and instance. */
struct i915_engine_class_instance engine;
- /** Reserved field. */
+ /** @rsvd0: Reserved field. */
__u32 rsvd0;
- /** Engine flags. */
+ /** @flags: Engine flags. */
__u64 flags;
- /** Capabilities of this engine. */
+ /** @capabilities: Capabilities of this engine. */
__u64 capabilities;
#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
- /** Reserved fields. */
+ /** @rsvd1: Reserved fields. */
__u64 rsvd1[4];
};
@@ -2317,13 +2407,13 @@ struct drm_i915_engine_info {
* an array of struct drm_i915_engine_info structures.
*/
struct drm_i915_query_engine_info {
- /** Number of struct drm_i915_engine_info structs following. */
+ /** @num_engines: Number of struct drm_i915_engine_info structs following. */
__u32 num_engines;
- /** MBZ */
+ /** @rsvd: MBZ */
__u32 rsvd[3];
- /** Marker for drm_i915_engine_info structures. */
+ /** @engines: Marker for drm_i915_engine_info structures. */
struct drm_i915_engine_info engines[];
};
@@ -2377,6 +2467,241 @@ struct drm_i915_query_perf_config {
__u8 data[];
};
+/**
+ * enum drm_i915_gem_memory_class - Supported memory classes
+ */
+enum drm_i915_gem_memory_class {
+ /** @I915_MEMORY_CLASS_SYSTEM: System memory */
+ I915_MEMORY_CLASS_SYSTEM = 0,
+ /** @I915_MEMORY_CLASS_DEVICE: Device local-memory */
+ I915_MEMORY_CLASS_DEVICE,
+};
+
+/**
+ * struct drm_i915_gem_memory_class_instance - Identify particular memory region
+ */
+struct drm_i915_gem_memory_class_instance {
+ /** @memory_class: See enum drm_i915_gem_memory_class */
+ __u16 memory_class;
+
+ /** @memory_instance: Which instance */
+ __u16 memory_instance;
+};
+
+/**
+ * struct drm_i915_memory_region_info - Describes one region as known to the
+ * driver.
+ *
+ * Note that we reserve some stuff here for potential future work. As an example
+ * we might want expose the capabilities for a given region, which could include
+ * things like if the region is CPU mappable/accessible, what are the supported
+ * mapping types etc.
+ *
+ * Note that to extend struct drm_i915_memory_region_info and struct
+ * drm_i915_query_memory_regions in the future the plan is to do the following:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_memory_region_info {
+ * struct drm_i915_gem_memory_class_instance region;
+ * union {
+ * __u32 rsvd0;
+ * __u32 new_thing1;
+ * };
+ * ...
+ * union {
+ * __u64 rsvd1[8];
+ * struct {
+ * __u64 new_thing2;
+ * __u64 new_thing3;
+ * ...
+ * };
+ * };
+ * };
+ *
+ * With this things should remain source compatible between versions for
+ * userspace, even as we add new fields.
+ *
+ * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
+ * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
+ * at &drm_i915_query_item.query_id.
+ */
+struct drm_i915_memory_region_info {
+ /** @region: The class:instance pair encoding */
+ struct drm_i915_gem_memory_class_instance region;
+
+ /** @rsvd0: MBZ */
+ __u32 rsvd0;
+
+ /** @probed_size: Memory probed by the driver (-1 = unknown) */
+ __u64 probed_size;
+
+ /** @unallocated_size: Estimate of memory remaining (-1 = unknown) */
+ __u64 unallocated_size;
+
+ /** @rsvd1: MBZ */
+ __u64 rsvd1[8];
+};
+
+/**
+ * struct drm_i915_query_memory_regions
+ *
+ * The region info query enumerates all regions known to the driver by filling
+ * in an array of struct drm_i915_memory_region_info structures.
+ *
+ * Example for getting the list of supported regions:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_query_memory_regions *info;
+ * struct drm_i915_query_item item = {
+ * .query_id = DRM_I915_QUERY_MEMORY_REGIONS;
+ * };
+ * struct drm_i915_query query = {
+ * .num_items = 1,
+ * .items_ptr = (uintptr_t)&item,
+ * };
+ * int err, i;
+ *
+ * // First query the size of the blob we need, this needs to be large
+ * // enough to hold our array of regions. The kernel will fill out the
+ * // item.length for us, which is the number of bytes we need.
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * info = calloc(1, item.length);
+ * // Now that we allocated the required number of bytes, we call the ioctl
+ * // again, this time with the data_ptr pointing to our newly allocated
+ * // blob, which the kernel can then populate with the all the region info.
+ * item.data_ptr = (uintptr_t)&info,
+ *
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * // We can now access each region in the array
+ * for (i = 0; i < info->num_regions; i++) {
+ * struct drm_i915_memory_region_info mr = info->regions[i];
+ * u16 class = mr.region.class;
+ * u16 instance = mr.region.instance;
+ *
+ * ....
+ * }
+ *
+ * free(info);
+ */
+struct drm_i915_query_memory_regions {
+ /** @num_regions: Number of supported regions */
+ __u32 num_regions;
+
+ /** @rsvd: MBZ */
+ __u32 rsvd[3];
+
+ /** @regions: Info about each supported region */
+ struct drm_i915_memory_region_info regions[];
+};
+
+/**
+ * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
+ * extension support using struct i915_user_extension.
+ *
+ * Note that in the future we want to have our buffer flags here, at least for
+ * the stuff that is immutable. Previously we would have two ioctls, one to
+ * create the object with gem_create, and another to apply various parameters,
+ * however this creates some ambiguity for the params which are considered
+ * immutable. Also in general we're phasing out the various SET/GET ioctls.
+ */
+struct drm_i915_gem_create_ext {
+ /**
+ * @size: Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ *
+ * Note that for some devices we have might have further minimum
+ * page-size restrictions(larger than 4K), like for device local-memory.
+ * However in general the final size here should always reflect any
+ * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS
+ * extension to place the object in device local-memory.
+ */
+ __u64 size;
+ /**
+ * @handle: Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+ /** @flags: MBZ */
+ __u32 flags;
+ /**
+ * @extensions: The chain of extensions to apply to this object.
+ *
+ * This will be useful in the future when we need to support several
+ * different extensions, and we need to apply more than one when
+ * creating the object. See struct i915_user_extension.
+ *
+ * If we don't supply any extensions then we get the same old gem_create
+ * behaviour.
+ *
+ * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
+ * struct drm_i915_gem_create_ext_memory_regions.
+ */
+#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
+ __u64 extensions;
+};
+
+/**
+ * struct drm_i915_gem_create_ext_memory_regions - The
+ * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension.
+ *
+ * Set the object with the desired set of placements/regions in priority
+ * order. Each entry must be unique and supported by the device.
+ *
+ * This is provided as an array of struct drm_i915_gem_memory_class_instance, or
+ * an equivalent layout of class:instance pair encodings. See struct
+ * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to
+ * query the supported regions for a device.
+ *
+ * As an example, on discrete devices, if we wish to set the placement as
+ * device local-memory we can do something like:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_memory_class_instance region_lmem = {
+ * .memory_class = I915_MEMORY_CLASS_DEVICE,
+ * .memory_instance = 0,
+ * };
+ * struct drm_i915_gem_create_ext_memory_regions regions = {
+ * .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
+ * .regions = (uintptr_t)&region_lmem,
+ * .num_regions = 1,
+ * };
+ * struct drm_i915_gem_create_ext create_ext = {
+ * .size = 16 * PAGE_SIZE,
+ * .extensions = (uintptr_t)&regions,
+ * };
+ *
+ * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
+ * if (err) ...
+ *
+ * At which point we get the object handle in &drm_i915_gem_create_ext.handle,
+ * along with the final object size in &drm_i915_gem_create_ext.size, which
+ * should account for any rounding up, if required.
+ */
+struct drm_i915_gem_create_ext_memory_regions {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @num_regions: Number of elements in the @regions array. */
+ __u32 num_regions;
+ /**
+ * @regions: The regions/placements array.
+ *
+ * An array of struct drm_i915_gem_memory_class_instance.
+ */
+ __u64 regions;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index ec19db1eead8..061e700dd06c 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -171,6 +171,7 @@ enum drm_panfrost_param {
DRM_PANFROST_PARAM_JS_FEATURES15,
DRM_PANFROST_PARAM_NR_CORE_GROUPS,
DRM_PANFROST_PARAM_THREAD_TLS_ALLOC,
+ DRM_PANFROST_PARAM_AFBC_FEATURES,
};
struct drm_panfrost_get_param {
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index bf8143505c49..f92880a15645 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -464,7 +464,7 @@ struct perf_event_attr {
/*
* User provided data if sigtrap=1, passed back to user via
- * siginfo_t::si_perf, e.g. to permit user to identify the event.
+ * siginfo_t::si_perf_data, e.g. to permit user to identify the event.
*/
__u64 sig_data;
};
diff --git a/include/uapi/linux/signalfd.h b/include/uapi/linux/signalfd.h
index 7e333042c7e3..83429a05b698 100644
--- a/include/uapi/linux/signalfd.h
+++ b/include/uapi/linux/signalfd.h
@@ -39,8 +39,6 @@ struct signalfd_siginfo {
__s32 ssi_syscall;
__u64 ssi_call_addr;
__u32 ssi_arch;
- __u32 __pad3;
- __u64 ssi_perf;
/*
* Pad strcture to 128 bytes. Remember to update the
@@ -51,7 +49,7 @@ struct signalfd_siginfo {
* comes out of a read(2) and we really don't want to have
* a compat on read(2).
*/
- __u8 __pad[16];
+ __u8 __pad[28];
};
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index d3e017b5f0db..6d2d34c9f375 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -239,6 +239,39 @@ enum gaudi_engine_id {
GAUDI_ENGINE_ID_SIZE
};
+/*
+ * ASIC specific PLL index
+ *
+ * Used to retrieve in frequency info of different IPs via
+ * HL_INFO_PLL_FREQUENCY under HL_IOCTL_INFO IOCTL. The enums need to be
+ * used as an index in struct hl_pll_frequency_info
+ */
+
+enum hl_goya_pll_index {
+ HL_GOYA_CPU_PLL = 0,
+ HL_GOYA_IC_PLL,
+ HL_GOYA_MC_PLL,
+ HL_GOYA_MME_PLL,
+ HL_GOYA_PCI_PLL,
+ HL_GOYA_EMMC_PLL,
+ HL_GOYA_TPC_PLL,
+ HL_GOYA_PLL_MAX
+};
+
+enum hl_gaudi_pll_index {
+ HL_GAUDI_CPU_PLL = 0,
+ HL_GAUDI_PCI_PLL,
+ HL_GAUDI_SRAM_PLL,
+ HL_GAUDI_HBM_PLL,
+ HL_GAUDI_NIC_PLL,
+ HL_GAUDI_DMA_PLL,
+ HL_GAUDI_MESH_PLL,
+ HL_GAUDI_MME_PLL,
+ HL_GAUDI_TPC_PLL,
+ HL_GAUDI_IF_PLL,
+ HL_GAUDI_PLL_MAX
+};
+
enum hl_device_status {
HL_DEVICE_STATUS_OPERATIONAL,
HL_DEVICE_STATUS_IN_RESET,
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 8031464ed4ae..4e4e61111500 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -1004,12 +1004,14 @@ static inline void __pipelined_op(struct wake_q_head *wake_q,
struct mqueue_inode_info *info,
struct ext_wait_queue *this)
{
+ struct task_struct *task;
+
list_del(&this->list);
- get_task_struct(this->task);
+ task = get_task_struct(this->task);
/* see MQ_BARRIER for purpose/pairing */
smp_store_release(&this->state, STATE_READY);
- wake_q_add_safe(wake_q, this->task);
+ wake_q_add_safe(wake_q, task);
}
/* pipelined_send() - send a message directly to the task waiting in
diff --git a/ipc/msg.c b/ipc/msg.c
index acd1bc7af55a..6e6c8e0c9380 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -251,11 +251,13 @@ static void expunge_all(struct msg_queue *msq, int res,
struct msg_receiver *msr, *t;
list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
- get_task_struct(msr->r_tsk);
+ struct task_struct *r_tsk;
+
+ r_tsk = get_task_struct(msr->r_tsk);
/* see MSG_BARRIER for purpose/pairing */
smp_store_release(&msr->r_msg, ERR_PTR(res));
- wake_q_add_safe(wake_q, msr->r_tsk);
+ wake_q_add_safe(wake_q, r_tsk);
}
}
diff --git a/ipc/sem.c b/ipc/sem.c
index e0ec239680cb..bf534c74293e 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -784,12 +784,14 @@ would_block:
static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
struct wake_q_head *wake_q)
{
- get_task_struct(q->sleeper);
+ struct task_struct *sleeper;
+
+ sleeper = get_task_struct(q->sleeper);
/* see SEM_BARRIER_2 for purpose/pairing */
smp_store_release(&q->status, error);
- wake_q_add_safe(wake_q, q->sleeper);
+ wake_q_add_safe(wake_q, sleeper);
}
static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2e947a485898..6fee4a7e88d7 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6389,8 +6389,6 @@ void perf_event_wakeup(struct perf_event *event)
static void perf_sigtrap(struct perf_event *event)
{
- struct kernel_siginfo info;
-
/*
* We'd expect this to only occur if the irq_work is delayed and either
* ctx->task or current has changed in the meantime. This can be the
@@ -6405,13 +6403,8 @@ static void perf_sigtrap(struct perf_event *event)
if (current->flags & PF_EXITING)
return;
- clear_siginfo(&info);
- info.si_signo = SIGTRAP;
- info.si_code = TRAP_PERF;
- info.si_errno = event->attr.type;
- info.si_perf = event->attr.sig_data;
- info.si_addr = (void __user *)event->pending_addr;
- force_sig_info(&info);
+ force_sig_perf((void __user *)event->pending_addr,
+ event->attr.type, event->attr.sig_data);
}
static void perf_pending_event_disable(struct perf_event *event)
diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index c1dd02f3be8b..e65de172ccf7 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -266,9 +266,10 @@ static const struct file_operations debugfs_ops =
.release = single_release
};
-static void __init kcsan_debugfs_init(void)
+static int __init kcsan_debugfs_init(void)
{
debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
+ return 0;
}
late_initcall(kcsan_debugfs_init);
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 48d736aa03b2..7641bd407239 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -5736,7 +5736,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
- trace_lock_acquired(lock, ip);
+ trace_lock_contended(lock, ip);
if (unlikely(!lock_stat || !lockdep_enabled()))
return;
@@ -5754,7 +5754,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
- trace_lock_contended(lock, ip);
+ trace_lock_acquired(lock, ip);
if (unlikely(!lock_stat || !lockdep_enabled()))
return;
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index a7276aaf2abc..db9301591e3f 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -57,7 +57,7 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
task->blocked_on = waiter;
}
-void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
@@ -65,7 +65,7 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
task->blocked_on = NULL;
- list_del_init(&waiter->list);
+ INIT_LIST_HEAD(&waiter->list);
waiter->task = NULL;
}
diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
index 1edd3f45a4ec..53e631e1d76d 100644
--- a/kernel/locking/mutex-debug.h
+++ b/kernel/locking/mutex-debug.h
@@ -22,7 +22,7 @@ extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
extern void debug_mutex_add_waiter(struct mutex *lock,
struct mutex_waiter *waiter,
struct task_struct *task);
-extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task);
extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name,
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index cb6b112ce155..013e1b08a1bf 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -194,7 +194,7 @@ static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_wait
* Add @waiter to a given location in the lock wait_list and set the
* FLAG_WAITERS flag if it's the first waiter.
*/
-static void __sched
+static void
__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct list_head *list)
{
@@ -205,6 +205,16 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
}
+static void
+__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
+{
+ list_del(&waiter->list);
+ if (likely(list_empty(&lock->wait_list)))
+ __mutex_clear_flag(lock, MUTEX_FLAGS);
+
+ debug_mutex_remove_waiter(lock, waiter, current);
+}
+
/*
* Give up ownership to a specific task, when @task = NULL, this is equivalent
* to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
@@ -1061,9 +1071,7 @@ acquired:
__ww_mutex_check_waiters(lock, ww_ctx);
}
- mutex_remove_waiter(lock, &waiter, current);
- if (likely(list_empty(&lock->wait_list)))
- __mutex_clear_flag(lock, MUTEX_FLAGS);
+ __mutex_remove_waiter(lock, &waiter);
debug_mutex_free_waiter(&waiter);
@@ -1080,7 +1088,7 @@ skip_wait:
err:
__set_current_state(TASK_RUNNING);
- mutex_remove_waiter(lock, &waiter, current);
+ __mutex_remove_waiter(lock, &waiter);
err_early_kill:
spin_unlock(&lock->wait_lock);
debug_mutex_free_waiter(&waiter);
diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
index 1c2287d3fa71..f0c710b1d192 100644
--- a/kernel/locking/mutex.h
+++ b/kernel/locking/mutex.h
@@ -10,12 +10,10 @@
* !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
*/
-#define mutex_remove_waiter(lock, waiter, task) \
- __list_del((waiter)->list.prev, (waiter)->list.next)
-
#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
#define debug_mutex_free_waiter(waiter) do { } while (0)
#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
+#define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0)
#define debug_mutex_unlock(lock) do { } while (0)
#define debug_mutex_init(lock, name, key) do { } while (0)
diff --git a/kernel/module.c b/kernel/module.c
index b5dd92e35b02..7e78dfabca97 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2401,6 +2401,15 @@ static long get_offset(struct module *mod, unsigned int *size,
return ret;
}
+static bool module_init_layout_section(const char *sname)
+{
+#ifndef CONFIG_MODULE_UNLOAD
+ if (module_exit_section(sname))
+ return true;
+#endif
+ return module_init_section(sname);
+}
+
/*
* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
* might -- code, read-only data, read-write data, small data. Tally
@@ -2435,7 +2444,7 @@ static void layout_sections(struct module *mod, struct load_info *info)
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL
- || module_init_section(sname))
+ || module_init_layout_section(sname))
continue;
s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i);
pr_debug("\t%s\n", sname);
@@ -2468,7 +2477,7 @@ static void layout_sections(struct module *mod, struct load_info *info)
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL
- || !module_init_section(sname))
+ || !module_init_layout_section(sname))
continue;
s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i)
| INIT_OFFSET_MASK);
@@ -2807,11 +2816,7 @@ void * __weak module_alloc(unsigned long size)
bool __weak module_init_section(const char *name)
{
-#ifndef CONFIG_MODULE_UNLOAD
- return strstarts(name, ".init") || module_exit_section(name);
-#else
return strstarts(name, ".init");
-#endif
}
bool __weak module_exit_section(const char *name)
diff --git a/kernel/signal.c b/kernel/signal.c
index 66e88649cf74..f7c6ffcbd044 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1236,6 +1236,7 @@ static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
case SIL_TIMER:
case SIL_POLL:
case SIL_FAULT:
+ case SIL_FAULT_TRAPNO:
case SIL_FAULT_MCEERR:
case SIL_FAULT_BNDERR:
case SIL_FAULT_PKUERR:
@@ -1804,6 +1805,21 @@ int force_sig_pkuerr(void __user *addr, u32 pkey)
}
#endif
+int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
+{
+ struct kernel_siginfo info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_PERF;
+ info.si_addr = addr;
+ info.si_perf_data = sig_data;
+ info.si_perf_type = type;
+
+ return force_sig_info(&info);
+}
+
/* For the crazy architectures that include trap information in
* the errno field, instead of an actual errno value.
*/
@@ -2564,6 +2580,7 @@ static void hide_si_addr_tag_bits(struct ksignal *ksig)
{
switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
case SIL_FAULT:
+ case SIL_FAULT_TRAPNO:
case SIL_FAULT_MCEERR:
case SIL_FAULT_BNDERR:
case SIL_FAULT_PKUERR:
@@ -3251,6 +3268,10 @@ enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
#endif
else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
layout = SIL_PERF_EVENT;
+#ifdef __ARCH_SI_TRAPNO
+ else if (layout == SIL_FAULT)
+ layout = SIL_FAULT_TRAPNO;
+#endif
}
else if (si_code <= NSIGPOLL)
layout = SIL_POLL;
@@ -3354,35 +3375,28 @@ void copy_siginfo_to_external32(struct compat_siginfo *to,
break;
case SIL_FAULT:
to->si_addr = ptr_to_compat(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
+ break;
+ case SIL_FAULT_TRAPNO:
+ to->si_addr = ptr_to_compat(from->si_addr);
to->si_trapno = from->si_trapno;
-#endif
break;
case SIL_FAULT_MCEERR:
to->si_addr = ptr_to_compat(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from->si_trapno;
-#endif
to->si_addr_lsb = from->si_addr_lsb;
break;
case SIL_FAULT_BNDERR:
to->si_addr = ptr_to_compat(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from->si_trapno;
-#endif
to->si_lower = ptr_to_compat(from->si_lower);
to->si_upper = ptr_to_compat(from->si_upper);
break;
case SIL_FAULT_PKUERR:
to->si_addr = ptr_to_compat(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from->si_trapno;
-#endif
to->si_pkey = from->si_pkey;
break;
case SIL_PERF_EVENT:
to->si_addr = ptr_to_compat(from->si_addr);
- to->si_perf = from->si_perf;
+ to->si_perf_data = from->si_perf_data;
+ to->si_perf_type = from->si_perf_type;
break;
case SIL_CHLD:
to->si_pid = from->si_pid;
@@ -3438,35 +3452,28 @@ static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
break;
case SIL_FAULT:
to->si_addr = compat_ptr(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
+ break;
+ case SIL_FAULT_TRAPNO:
+ to->si_addr = compat_ptr(from->si_addr);
to->si_trapno = from->si_trapno;
-#endif
break;
case SIL_FAULT_MCEERR:
to->si_addr = compat_ptr(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from->si_trapno;
-#endif
to->si_addr_lsb = from->si_addr_lsb;
break;
case SIL_FAULT_BNDERR:
to->si_addr = compat_ptr(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from->si_trapno;
-#endif
to->si_lower = compat_ptr(from->si_lower);
to->si_upper = compat_ptr(from->si_upper);
break;
case SIL_FAULT_PKUERR:
to->si_addr = compat_ptr(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from->si_trapno;
-#endif
to->si_pkey = from->si_pkey;
break;
case SIL_PERF_EVENT:
to->si_addr = compat_ptr(from->si_addr);
- to->si_perf = from->si_perf;
+ to->si_perf_data = from->si_perf_data;
+ to->si_perf_type = from->si_perf_type;
break;
case SIL_CHLD:
to->si_pid = from->si_pid;
@@ -4644,11 +4651,13 @@ static inline void siginfo_buildtime_checks(void)
/* sigfault */
CHECK_OFFSET(si_addr);
+ CHECK_OFFSET(si_trapno);
CHECK_OFFSET(si_addr_lsb);
CHECK_OFFSET(si_lower);
CHECK_OFFSET(si_upper);
CHECK_OFFSET(si_pkey);
- CHECK_OFFSET(si_perf);
+ CHECK_OFFSET(si_perf_data);
+ CHECK_OFFSET(si_perf_type);
/* sigpoll */
CHECK_OFFSET(si_band);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 7c397907d0e9..92d3bcc5a5e0 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -302,10 +302,10 @@ void touch_softlockup_watchdog_sync(void)
__this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
}
-static int is_softlockup(unsigned long touch_ts, unsigned long period_ts)
+static int is_softlockup(unsigned long touch_ts,
+ unsigned long period_ts,
+ unsigned long now)
{
- unsigned long now = get_timestamp();
-
if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
/* Warn about unreasonable delays. */
if (time_after(now, period_ts + get_softlockup_thresh()))
@@ -353,8 +353,7 @@ static int softlockup_fn(void *data)
/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
- unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
- unsigned long period_ts = __this_cpu_read(watchdog_report_ts);
+ unsigned long touch_ts, period_ts, now;
struct pt_regs *regs = get_irq_regs();
int duration;
int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
@@ -377,11 +376,22 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
/*
+ * Read the current timestamp first. It might become invalid anytime
+ * when a virtual machine is stopped by the host or when the watchog
+ * is touched from NMI.
+ */
+ now = get_timestamp();
+ /*
* If a virtual machine is stopped by the host it can look to
- * the watchdog like a soft lockup. Check to see if the host
- * stopped the vm before we process the timestamps.
+ * the watchdog like a soft lockup. This function touches the watchdog.
*/
kvm_check_and_clear_guest_paused();
+ /*
+ * The stored timestamp is comparable with @now only when not touched.
+ * It might get touched anytime from NMI. Make sure that is_softlockup()
+ * uses the same (valid) value.
+ */
+ period_ts = READ_ONCE(*this_cpu_ptr(&watchdog_report_ts));
/* Reset the interval when touched by known problematic code. */
if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
@@ -398,13 +408,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
return HRTIMER_RESTART;
}
- /* check for a softlockup
- * This is done by making sure a high priority task is
- * being scheduled. The task touches the watchdog to
- * indicate it is getting cpu time. If it hasn't then
- * this is a good indication some task is hogging the cpu
- */
- duration = is_softlockup(touch_ts, period_ts);
+ /* Check for a softlockup. */
+ touch_ts = __this_cpu_read(watchdog_touch_ts);
+ duration = is_softlockup(touch_ts, period_ts, now);
if (unlikely(duration)) {
/*
* Prevent multiple soft-lockup reports if one cpu is already
diff --git a/lib/Makefile b/lib/Makefile
index e11cfc18b6c0..2cc359ec1fdd 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -348,6 +348,7 @@ obj-$(CONFIG_OBJAGG) += objagg.o
obj-$(CONFIG_PLDMFW) += pldmfw/
# KUnit tests
+CFLAGS_bitfield_kunit.o := $(call cc-option,-Wframe-larger-than=10240)
obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 921d0a654243..641767b0dce2 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -586,13 +586,11 @@ static int remaining(int wrote)
return 0;
}
-static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
+static char *__dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
{
int pos_after_tid;
int pos = 0;
- *buf = '\0';
-
if (desc->flags & _DPRINTK_FLAGS_INCL_TID) {
if (in_interrupt())
pos += snprintf(buf + pos, remaining(pos), "<intr> ");
@@ -618,11 +616,18 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
return buf;
}
+static inline char *dynamic_emit_prefix(struct _ddebug *desc, char *buf)
+{
+ if (unlikely(desc->flags & _DPRINTK_FLAGS_INCL_ANY))
+ return __dynamic_emit_prefix(desc, buf);
+ return buf;
+}
+
void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
{
va_list args;
struct va_format vaf;
- char buf[PREFIX_SIZE];
+ char buf[PREFIX_SIZE] = "";
BUG_ON(!descriptor);
BUG_ON(!fmt);
@@ -655,7 +660,7 @@ void __dynamic_dev_dbg(struct _ddebug *descriptor,
if (!dev) {
printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
} else {
- char buf[PREFIX_SIZE];
+ char buf[PREFIX_SIZE] = "";
dev_printk_emit(LOGLEVEL_DEBUG, dev, "%s%s %s: %pV",
dynamic_emit_prefix(descriptor, buf),
@@ -684,7 +689,7 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor,
vaf.va = &args;
if (dev && dev->dev.parent) {
- char buf[PREFIX_SIZE];
+ char buf[PREFIX_SIZE] = "";
dev_printk_emit(LOGLEVEL_DEBUG, dev->dev.parent,
"%s%s %s %s%s: %pV",
@@ -720,7 +725,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
vaf.va = &args;
if (ibdev && ibdev->dev.parent) {
- char buf[PREFIX_SIZE];
+ char buf[PREFIX_SIZE] = "";
dev_printk_emit(LOGLEVEL_DEBUG, ibdev->dev.parent,
"%s%s %s %s: %pV",
@@ -915,7 +920,6 @@ static const struct seq_operations ddebug_proc_seqops = {
static int ddebug_proc_open(struct inode *inode, struct file *file)
{
- vpr_info("called\n");
return seq_open_private(file, &ddebug_proc_seqops,
sizeof(struct ddebug_iter));
}
diff --git a/mm/gup.c b/mm/gup.c
index 0697134b6a12..3ded6a5f26b2 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1593,10 +1593,6 @@ struct page *get_dump_page(unsigned long addr)
FOLL_FORCE | FOLL_DUMP | FOLL_GET);
if (locked)
mmap_read_unlock(mm);
-
- if (ret == 1 && is_page_poisoned(page))
- return NULL;
-
return (ret == 1) ? page : NULL;
}
#endif /* CONFIG_ELF_CORE */
diff --git a/mm/internal.h b/mm/internal.h
index 54bd0dc2c23c..2f1182948aa6 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -96,26 +96,6 @@ static inline void set_page_refcounted(struct page *page)
set_page_count(page, 1);
}
-/*
- * When kernel touch the user page, the user page may be have been marked
- * poison but still mapped in user space, if without this page, the kernel
- * can guarantee the data integrity and operation success, the kernel is
- * better to check the posion status and avoid touching it, be good not to
- * panic, coredump for process fatal signal is a sample case matching this
- * scenario. Or if kernel can't guarantee the data integrity, it's better
- * not to call this function, let kernel touch the poison page and get to
- * panic.
- */
-static inline bool is_page_poisoned(struct page *page)
-{
- if (PageHWPoison(page))
- return true;
- else if (PageHuge(page) && PageHWPoison(compound_head(page)))
- return true;
-
- return false;
-}
-
extern unsigned long highest_memmap_pfn;
/*
diff --git a/mm/shuffle.h b/mm/shuffle.h
index 71b784f0b7c3..cec62984f7d3 100644
--- a/mm/shuffle.h
+++ b/mm/shuffle.h
@@ -10,7 +10,7 @@
DECLARE_STATIC_KEY_FALSE(page_alloc_shuffle_key);
extern void __shuffle_free_memory(pg_data_t *pgdat);
extern bool shuffle_pick_tail(void);
-static inline void shuffle_free_memory(pg_data_t *pgdat)
+static inline void __meminit shuffle_free_memory(pg_data_t *pgdat)
{
if (!static_branch_unlikely(&page_alloc_shuffle_key))
return;
@@ -18,7 +18,7 @@ static inline void shuffle_free_memory(pg_data_t *pgdat)
}
extern void __shuffle_zone(struct zone *z);
-static inline void shuffle_zone(struct zone *z)
+static inline void __meminit shuffle_zone(struct zone *z)
{
if (!static_branch_unlikely(&page_alloc_shuffle_key))
return;
diff --git a/mm/slub.c b/mm/slub.c
index 438fa8d4c970..3f96e099817a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -301,6 +301,7 @@ static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
if (!debug_pagealloc_enabled_static())
return get_freepointer(s, object);
+ object = kasan_reset_tag(object);
freepointer_addr = (unsigned long)object + s->offset;
copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p));
return freelist_ptr(s, p, freepointer_addr);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index e14b3820c6a8..63a73e164d55 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -360,38 +360,38 @@ out:
* If a reservation for the page existed in the reservation
* map of a private mapping, the map was modified to indicate
* the reservation was consumed when the page was allocated.
- * We clear the PagePrivate flag now so that the global
+ * We clear the HPageRestoreReserve flag now so that the global
* reserve count will not be incremented in free_huge_page.
* The reservation map will still indicate the reservation
* was consumed and possibly prevent later page allocation.
* This is better than leaking a global reservation. If no
- * reservation existed, it is still safe to clear PagePrivate
- * as no adjustments to reservation counts were made during
- * allocation.
+ * reservation existed, it is still safe to clear
+ * HPageRestoreReserve as no adjustments to reservation counts
+ * were made during allocation.
*
* The reservation map for shared mappings indicates which
* pages have reservations. When a huge page is allocated
* for an address with a reservation, no change is made to
- * the reserve map. In this case PagePrivate will be set
- * to indicate that the global reservation count should be
+ * the reserve map. In this case HPageRestoreReserve will be
+ * set to indicate that the global reservation count should be
* incremented when the page is freed. This is the desired
* behavior. However, when a huge page is allocated for an
* address without a reservation a reservation entry is added
- * to the reservation map, and PagePrivate will not be set.
- * When the page is freed, the global reserve count will NOT
- * be incremented and it will appear as though we have leaked
- * reserved page. In this case, set PagePrivate so that the
- * global reserve count will be incremented to match the
- * reservation map entry which was created.
+ * to the reservation map, and HPageRestoreReserve will not be
+ * set. When the page is freed, the global reserve count will
+ * NOT be incremented and it will appear as though we have
+ * leaked reserved page. In this case, set HPageRestoreReserve
+ * so that the global reserve count will be incremented to
+ * match the reservation map entry which was created.
*
* Note that vm_alloc_shared is based on the flags of the vma
* for which the page was originally allocated. dst_vma could
* be different or NULL on error.
*/
if (vm_alloc_shared)
- SetPagePrivate(page);
+ SetHPageRestoreReserve(page);
else
- ClearPagePrivate(page);
+ ClearHPageRestoreReserve(page);
put_page(page);
}
BUG_ON(copied < 0);
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index 9c6e95882553..94b31f2551bc 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -402,6 +402,14 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
return NULL;
}
+ smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
+ WQ_MEM_RECLAIM, name);
+ if (!smcd->event_wq) {
+ kfree(smcd->conn);
+ kfree(smcd);
+ return NULL;
+ }
+
smcd->dev.parent = parent;
smcd->dev.release = smcd_release;
device_initialize(&smcd->dev);
@@ -415,13 +423,6 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
INIT_LIST_HEAD(&smcd->vlan);
INIT_LIST_HEAD(&smcd->lgr_list);
init_waitqueue_head(&smcd->lgrs_deleted);
- smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
- WQ_MEM_RECLAIM, name);
- if (!smcd->event_wq) {
- kfree(smcd->conn);
- kfree(smcd);
- return NULL;
- }
return smcd;
}
EXPORT_SYMBOL_GPL(smcd_alloc_dev);
diff --git a/scripts/dummy-tools/gcc b/scripts/dummy-tools/gcc
index f6d543725f1e..b2483149bbe5 100755
--- a/scripts/dummy-tools/gcc
+++ b/scripts/dummy-tools/gcc
@@ -76,7 +76,11 @@ fi
if arg_contain -S "$@"; then
# For scripts/gcc-x86-*-has-stack-protector.sh
if arg_contain -fstack-protector "$@"; then
- echo "%gs"
+ if arg_contain -mstack-protector-guard-reg=fs "$@"; then
+ echo "%fs"
+ else
+ echo "%gs"
+ fi
exit 0
fi
diff --git a/scripts/jobserver-exec b/scripts/jobserver-exec
index 48d141e3ec56..8762887a970c 100755
--- a/scripts/jobserver-exec
+++ b/scripts/jobserver-exec
@@ -10,7 +10,7 @@ from __future__ import print_function
import os, sys, errno
import subprocess
-# Extract and prepare jobserver file descriptors from envirnoment.
+# Extract and prepare jobserver file descriptors from environment.
claim = 0
jobs = b""
try:
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
index 25778765cbfe..9897bd26a438 100644
--- a/sound/firewire/Kconfig
+++ b/sound/firewire/Kconfig
@@ -38,7 +38,7 @@ config SND_OXFW
* Mackie(Loud) Onyx 1640i (former model)
* Mackie(Loud) Onyx Satellite
* Mackie(Loud) Tapco Link.Firewire
- * Mackie(Loud) d.2 pro/d.4 pro
+ * Mackie(Loud) d.4 pro
* Mackie(Loud) U.420/U.420d
* TASCAM FireOne
* Stanton Controllers & Systems 1 Deck/Mixer
@@ -84,7 +84,7 @@ config SND_BEBOB
* PreSonus FIREBOX/FIREPOD/FP10/Inspire1394
* BridgeCo RDAudio1/Audio5
* Mackie Onyx 1220/1620/1640 (FireWire I/O Card)
- * Mackie d.2 (FireWire Option)
+ * Mackie d.2 (FireWire Option) and d.2 Pro
* Stanton FinalScratch 2 (ScratchAmp)
* Tascam IF-FW/DM
* Behringer XENIX UFX 1204/1604
diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
index 26e7cb555d3c..aa53c13b89d3 100644
--- a/sound/firewire/amdtp-stream-trace.h
+++ b/sound/firewire/amdtp-stream-trace.h
@@ -14,8 +14,8 @@
#include <linux/tracepoint.h>
TRACE_EVENT(amdtp_packet,
- TP_PROTO(const struct amdtp_stream *s, u32 cycles, const __be32 *cip_header, unsigned int payload_length, unsigned int data_blocks, unsigned int data_block_counter, unsigned int index),
- TP_ARGS(s, cycles, cip_header, payload_length, data_blocks, data_block_counter, index),
+ TP_PROTO(const struct amdtp_stream *s, u32 cycles, const __be32 *cip_header, unsigned int payload_length, unsigned int data_blocks, unsigned int data_block_counter, unsigned int packet_index, unsigned int index),
+ TP_ARGS(s, cycles, cip_header, payload_length, data_blocks, data_block_counter, packet_index, index),
TP_STRUCT__entry(
__field(unsigned int, second)
__field(unsigned int, cycle)
@@ -48,7 +48,7 @@ TRACE_EVENT(amdtp_packet,
__entry->payload_quadlets = payload_length / sizeof(__be32);
__entry->data_blocks = data_blocks;
__entry->data_block_counter = data_block_counter,
- __entry->packet_index = s->packet_index;
+ __entry->packet_index = packet_index;
__entry->irq = !!in_interrupt();
__entry->index = index;
),
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 4e2f2bb7879f..e0faa6601966 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -526,7 +526,7 @@ static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
}
trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks,
- data_block_counter, index);
+ data_block_counter, s->packet_index, index);
}
static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
@@ -630,21 +630,27 @@ static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
unsigned int *payload_length,
unsigned int *data_blocks,
unsigned int *data_block_counter,
- unsigned int *syt, unsigned int index)
+ unsigned int *syt, unsigned int packet_index, unsigned int index)
{
const __be32 *cip_header;
+ unsigned int cip_header_size;
int err;
*payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
- if (*payload_length > s->ctx_data.tx.ctx_header_size +
- s->ctx_data.tx.max_ctx_payload_length) {
+
+ if (!(s->flags & CIP_NO_HEADER))
+ cip_header_size = 8;
+ else
+ cip_header_size = 0;
+
+ if (*payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
dev_err(&s->unit->device,
"Detect jumbo payload: %04x %04x\n",
- *payload_length, s->ctx_data.tx.max_ctx_payload_length);
+ *payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
return -EIO;
}
- if (!(s->flags & CIP_NO_HEADER)) {
+ if (cip_header_size > 0) {
cip_header = ctx_header + 2;
err = check_cip_header(s, cip_header, *payload_length,
data_blocks, data_block_counter, syt);
@@ -662,7 +668,7 @@ static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
}
trace_amdtp_packet(s, cycle, cip_header, *payload_length, *data_blocks,
- *data_block_counter, index);
+ *data_block_counter, packet_index, index);
return err;
}
@@ -701,12 +707,13 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
unsigned int packets)
{
unsigned int dbc = s->data_block_counter;
+ unsigned int packet_index = s->packet_index;
+ unsigned int queue_size = s->queue_size;
int i;
int err;
for (i = 0; i < packets; ++i) {
struct pkt_desc *desc = descs + i;
- unsigned int index = (s->packet_index + i) % s->queue_size;
unsigned int cycle;
unsigned int payload_length;
unsigned int data_blocks;
@@ -715,7 +722,7 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
cycle = compute_cycle_count(ctx_header[1]);
err = parse_ir_ctx_header(s, cycle, ctx_header, &payload_length,
- &data_blocks, &dbc, &syt, i);
+ &data_blocks, &dbc, &syt, packet_index, i);
if (err < 0)
return err;
@@ -723,13 +730,15 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
desc->syt = syt;
desc->data_blocks = data_blocks;
desc->data_block_counter = dbc;
- desc->ctx_payload = s->buffer.packets[index].buffer;
+ desc->ctx_payload = s->buffer.packets[packet_index].buffer;
if (!(s->flags & CIP_DBC_IS_END_EVENT))
dbc = (dbc + desc->data_blocks) & 0xff;
ctx_header +=
s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
+
+ packet_index = (packet_index + 1) % queue_size;
}
s->data_block_counter = dbc;
@@ -1065,23 +1074,22 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
s->data_block_counter = 0;
}
- /* initialize packet buffer */
+ // initialize packet buffer.
+ max_ctx_payload_size = amdtp_stream_get_max_payload(s);
if (s->direction == AMDTP_IN_STREAM) {
dir = DMA_FROM_DEVICE;
type = FW_ISO_CONTEXT_RECEIVE;
- if (!(s->flags & CIP_NO_HEADER))
+ if (!(s->flags & CIP_NO_HEADER)) {
+ max_ctx_payload_size -= 8;
ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
- else
+ } else {
ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
-
- max_ctx_payload_size = amdtp_stream_get_max_payload(s) -
- ctx_header_size;
+ }
} else {
dir = DMA_TO_DEVICE;
type = FW_ISO_CONTEXT_TRANSMIT;
ctx_header_size = 0; // No effect for IT context.
- max_ctx_payload_size = amdtp_stream_get_max_payload(s);
if (!(s->flags & CIP_NO_HEADER))
max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
}
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 2c8e3392a490..daeecfa8b9aa 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -387,7 +387,7 @@ static const struct ieee1394_device_id bebob_id_table[] = {
SND_BEBOB_DEV_ENTRY(VEN_BRIDGECO, 0x00010049, &spec_normal),
/* Mackie, Onyx 1220/1620/1640 (Firewire I/O Card) */
SND_BEBOB_DEV_ENTRY(VEN_MACKIE2, 0x00010065, &spec_normal),
- /* Mackie, d.2 (Firewire Option) */
+ // Mackie, d.2 (Firewire option card) and d.2 Pro (the card is built-in).
SND_BEBOB_DEV_ENTRY(VEN_MACKIE1, 0x00010067, &spec_normal),
/* Stanton, ScratchAmp */
SND_BEBOB_DEV_ENTRY(VEN_STANTON, 0x00000001, &spec_normal),
diff --git a/sound/firewire/dice/dice-alesis.c b/sound/firewire/dice/dice-alesis.c
index 0916864511d5..27c13b9cc9ef 100644
--- a/sound/firewire/dice/dice-alesis.c
+++ b/sound/firewire/dice/dice-alesis.c
@@ -16,7 +16,7 @@ alesis_io14_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
static const unsigned int
alesis_io26_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
{10, 10, 4}, /* Tx0 = Analog + S/PDIF. */
- {16, 8, 0}, /* Tx1 = ADAT1 + ADAT2. */
+ {16, 4, 0}, /* Tx1 = ADAT1 + ADAT2 (available at low rate). */
};
int snd_dice_detect_alesis_formats(struct snd_dice *dice)
diff --git a/sound/firewire/dice/dice-pcm.c b/sound/firewire/dice/dice-pcm.c
index af8a90ee40f3..a69ca1111b03 100644
--- a/sound/firewire/dice/dice-pcm.c
+++ b/sound/firewire/dice/dice-pcm.c
@@ -218,7 +218,7 @@ static int pcm_open(struct snd_pcm_substream *substream)
if (frames_per_period > 0) {
// For double_pcm_frame quirk.
- if (rate > 96000) {
+ if (rate > 96000 && !dice->disable_double_pcm_frames) {
frames_per_period *= 2;
frames_per_buffer *= 2;
}
@@ -273,7 +273,7 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
mutex_lock(&dice->mutex);
// For double_pcm_frame quirk.
- if (rate > 96000) {
+ if (rate > 96000 && !dice->disable_double_pcm_frames) {
events_per_period /= 2;
events_per_buffer /= 2;
}
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
index 1a14c083e8ce..c4dfe76500c2 100644
--- a/sound/firewire/dice/dice-stream.c
+++ b/sound/firewire/dice/dice-stream.c
@@ -181,7 +181,7 @@ static int keep_resources(struct snd_dice *dice, struct amdtp_stream *stream,
// as 'Dual Wire'.
// For this quirk, blocking mode is required and PCM buffer size should
// be aligned to SYT_INTERVAL.
- double_pcm_frames = rate > 96000;
+ double_pcm_frames = (rate > 96000 && !dice->disable_double_pcm_frames);
if (double_pcm_frames) {
rate /= 2;
pcm_chs *= 2;
diff --git a/sound/firewire/dice/dice-tcelectronic.c b/sound/firewire/dice/dice-tcelectronic.c
index a8875d24ba2a..43a3bcb15b3d 100644
--- a/sound/firewire/dice/dice-tcelectronic.c
+++ b/sound/firewire/dice/dice-tcelectronic.c
@@ -38,8 +38,8 @@ static const struct dice_tc_spec konnekt_24d = {
};
static const struct dice_tc_spec konnekt_live = {
- .tx_pcm_chs = {{16, 16, 16}, {0, 0, 0} },
- .rx_pcm_chs = {{16, 16, 16}, {0, 0, 0} },
+ .tx_pcm_chs = {{16, 16, 6}, {0, 0, 0} },
+ .rx_pcm_chs = {{16, 16, 6}, {0, 0, 0} },
.has_midi = true,
};
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
index 107a81691f0e..239d164b0eea 100644
--- a/sound/firewire/dice/dice.c
+++ b/sound/firewire/dice/dice.c
@@ -21,6 +21,7 @@ MODULE_LICENSE("GPL v2");
#define OUI_SSL 0x0050c2 // Actually ID reserved by IEEE.
#define OUI_PRESONUS 0x000a92
#define OUI_HARMAN 0x000fd7
+#define OUI_AVID 0x00a07e
#define DICE_CATEGORY_ID 0x04
#define WEISS_CATEGORY_ID 0x00
@@ -222,6 +223,14 @@ static int dice_probe(struct fw_unit *unit,
(snd_dice_detect_formats_t)entry->driver_data;
}
+ // Below models are compliant to IEC 61883-1/6 and have no quirk at high sampling transfer
+ // frequency.
+ // * Avid M-Box 3 Pro
+ // * M-Audio Profire 610
+ // * M-Audio Profire 2626
+ if (entry->vendor_id == OUI_MAUDIO || entry->vendor_id == OUI_AVID)
+ dice->disable_double_pcm_frames = true;
+
spin_lock_init(&dice->lock);
mutex_init(&dice->mutex);
init_completion(&dice->clock_accepted);
@@ -278,7 +287,22 @@ static void dice_bus_reset(struct fw_unit *unit)
#define DICE_INTERFACE 0x000001
+#define DICE_DEV_ENTRY_TYPICAL(vendor, model, data) \
+ { \
+ .match_flags = IEEE1394_MATCH_VENDOR_ID | \
+ IEEE1394_MATCH_MODEL_ID | \
+ IEEE1394_MATCH_SPECIFIER_ID | \
+ IEEE1394_MATCH_VERSION, \
+ .vendor_id = (vendor), \
+ .model_id = (model), \
+ .specifier_id = (vendor), \
+ .version = DICE_INTERFACE, \
+ .driver_data = (kernel_ulong_t)(data), \
+ }
+
static const struct ieee1394_device_id dice_id_table[] = {
+ // Avid M-Box 3 Pro. To match in probe function.
+ DICE_DEV_ENTRY_TYPICAL(OUI_AVID, 0x000004, snd_dice_detect_extension_formats),
/* M-Audio Profire 2626 has a different value in version field. */
{
.match_flags = IEEE1394_MATCH_VENDOR_ID |
diff --git a/sound/firewire/dice/dice.h b/sound/firewire/dice/dice.h
index adc6f7c84460..3c967d1b3605 100644
--- a/sound/firewire/dice/dice.h
+++ b/sound/firewire/dice/dice.h
@@ -109,7 +109,8 @@ struct snd_dice {
struct fw_iso_resources rx_resources[MAX_STREAMS];
struct amdtp_stream tx_stream[MAX_STREAMS];
struct amdtp_stream rx_stream[MAX_STREAMS];
- bool global_enabled;
+ bool global_enabled:1;
+ bool disable_double_pcm_frames:1;
struct completion clock_accepted;
unsigned int substreams_counter;
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 1f1e3236efb8..9eea25c46dc7 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -355,7 +355,6 @@ static const struct ieee1394_device_id oxfw_id_table[] = {
* Onyx-i series (former models): 0x081216
* Mackie Onyx Satellite: 0x00200f
* Tapco LINK.firewire 4x6: 0x000460
- * d.2 pro: Unknown
* d.4 pro: Unknown
* U.420: Unknown
* U.420d: Unknown
diff --git a/sound/isa/gus/gus_main.c b/sound/isa/gus/gus_main.c
index afc088f0377c..b7518122a10d 100644
--- a/sound/isa/gus/gus_main.c
+++ b/sound/isa/gus/gus_main.c
@@ -77,17 +77,8 @@ static const struct snd_kcontrol_new snd_gus_joystick_control = {
static void snd_gus_init_control(struct snd_gus_card *gus)
{
- int ret;
-
- if (!gus->ace_flag) {
- ret =
- snd_ctl_add(gus->card,
- snd_ctl_new1(&snd_gus_joystick_control,
- gus));
- if (ret)
- snd_printk(KERN_ERR "gus: snd_ctl_add failed: %d\n",
- ret);
- }
+ if (!gus->ace_flag)
+ snd_ctl_add(gus->card, snd_ctl_new1(&snd_gus_joystick_control, gus));
}
/*
diff --git a/sound/isa/sb/sb16_main.c b/sound/isa/sb/sb16_main.c
index 38dc1fde25f3..aa4870531023 100644
--- a/sound/isa/sb/sb16_main.c
+++ b/sound/isa/sb/sb16_main.c
@@ -846,14 +846,10 @@ int snd_sb16dsp_pcm(struct snd_sb *chip, int device)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_sb16_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_sb16_capture_ops);
- if (chip->dma16 >= 0 && chip->dma8 != chip->dma16) {
- err = snd_ctl_add(card, snd_ctl_new1(
- &snd_sb16_dma_control, chip));
- if (err)
- return err;
- } else {
+ if (chip->dma16 >= 0 && chip->dma8 != chip->dma16)
+ snd_ctl_add(card, snd_ctl_new1(&snd_sb16_dma_control, chip));
+ else
pcm->info_flags = SNDRV_PCM_INFO_HALF_DUPLEX;
- }
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
card->dev, 64*1024, 128*1024);
diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
index 6c9d534ce8b6..ed3a87ebe3f4 100644
--- a/sound/isa/sb/sb8.c
+++ b/sound/isa/sb/sb8.c
@@ -93,12 +93,12 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
acard = card->private_data;
card->private_free = snd_sb8_free;
- /* block the 0x388 port to avoid PnP conflicts */
+ /*
+ * Block the 0x388 port to avoid PnP conflicts.
+ * No need to check this value after request_region,
+ * as we never do anything with it.
+ */
acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
- if (!acard->fm_res) {
- err = -EBUSY;
- goto _err;
- }
if (port[dev] != SNDRV_AUTO_PORT) {
if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 6d58f24c9702..552e2cb73291 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -395,7 +395,6 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0282:
case 0x10ec0283:
case 0x10ec0286:
- case 0x10ec0287:
case 0x10ec0288:
case 0x10ec0285:
case 0x10ec0298:
@@ -406,6 +405,10 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0275:
alc_update_coef_idx(codec, 0xe, 0, 1<<0);
break;
+ case 0x10ec0287:
+ alc_update_coef_idx(codec, 0x10, 1<<9, 0);
+ alc_write_coef_idx(codec, 0x8, 0x4ab7);
+ break;
case 0x10ec0293:
alc_update_coef_idx(codec, 0xa, 1<<13, 0);
break;
@@ -6251,6 +6254,35 @@ static void alc294_fixup_gx502_hp(struct hda_codec *codec,
}
}
+static void alc294_gu502_toggle_output(struct hda_codec *codec,
+ struct hda_jack_callback *cb)
+{
+ /* Windows sets 0x10 to 0x8420 for Node 0x20 which is
+ * responsible from changes between speakers and headphones
+ */
+ if (snd_hda_jack_detect_state(codec, 0x21) == HDA_JACK_PRESENT)
+ alc_write_coef_idx(codec, 0x10, 0x8420);
+ else
+ alc_write_coef_idx(codec, 0x10, 0x0a20);
+}
+
+static void alc294_fixup_gu502_hp(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (!is_jack_detectable(codec, 0x21))
+ return;
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ snd_hda_jack_detect_enable_callback(codec, 0x21,
+ alc294_gu502_toggle_output);
+ break;
+ case HDA_FIXUP_ACT_INIT:
+ alc294_gu502_toggle_output(codec, NULL);
+ break;
+ }
+}
+
static void alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -6468,6 +6500,9 @@ enum {
ALC294_FIXUP_ASUS_GX502_HP,
ALC294_FIXUP_ASUS_GX502_PINS,
ALC294_FIXUP_ASUS_GX502_VERBS,
+ ALC294_FIXUP_ASUS_GU502_HP,
+ ALC294_FIXUP_ASUS_GU502_PINS,
+ ALC294_FIXUP_ASUS_GU502_VERBS,
ALC285_FIXUP_HP_GPIO_LED,
ALC285_FIXUP_HP_MUTE_LED,
ALC236_FIXUP_HP_GPIO_LED,
@@ -6507,6 +6542,7 @@ enum {
ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST,
ALC295_FIXUP_ASUS_DACS,
ALC295_FIXUP_HP_OMEN,
+ ALC285_FIXUP_HP_SPECTRE_X360,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -7709,6 +7745,35 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc294_fixup_gx502_hp,
},
+ [ALC294_FIXUP_ASUS_GU502_PINS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x01a11050 }, /* rear HP mic */
+ { 0x1a, 0x01a11830 }, /* rear external mic */
+ { 0x21, 0x012110f0 }, /* rear HP out */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC294_FIXUP_ASUS_GU502_VERBS
+ },
+ [ALC294_FIXUP_ASUS_GU502_VERBS] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ /* set 0x15 to HP-OUT ctrl */
+ { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 },
+ /* unmute the 0x15 amp */
+ { 0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000 },
+ /* set 0x1b to HP-OUT */
+ { 0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC294_FIXUP_ASUS_GU502_HP
+ },
+ [ALC294_FIXUP_ASUS_GU502_HP] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc294_fixup_gu502_hp,
+ },
[ALC294_FIXUP_ASUS_COEF_1B] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = (const struct hda_verb[]) {
@@ -8035,6 +8100,15 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_HP_LINE1_MIC1_LED,
},
+ [ALC285_FIXUP_HP_SPECTRE_X360] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x14, 0x90170110 }, /* enable top speaker */
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8195,6 +8269,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
@@ -8253,6 +8328,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+ SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
@@ -8309,12 +8385,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x50b8, "Clevo NK50SZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x50d5, "Clevo NP50D5", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x50f0, "Clevo NH50A[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x50f2, "Clevo NH50E[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x50f3, "Clevo NH58DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x50f5, "Clevo NH55EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x50f6, "Clevo NH55DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x5101, "Clevo S510WU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x5157, "Clevo W517GU1", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x51a1, "Clevo NS50MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x70f2, "Clevo NH79EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x70f3, "Clevo NH77DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x70f4, "Clevo NH77EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x70f6, "Clevo NH77DPQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -8332,9 +8415,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x8a51, "Clevo NH70RCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8d50, "Clevo NH55RCQ-M", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x951d, "Clevo N950T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x9600, "Clevo N960K[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x961d, "Clevo N960S[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL53RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL5XNU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xc018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xc019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xc022, "Clevo NH77[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
@@ -8600,6 +8691,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"},
{.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"},
{.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
+ {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
{}
};
#define ALC225_STANDARD_PINS \
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 35903d1a1cbd..5b124c4ad572 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -331,6 +331,7 @@ struct ichdev {
unsigned int ali_slot; /* ALI DMA slot */
struct ac97_pcm *pcm;
int pcm_open_flag;
+ unsigned int prepared:1;
unsigned int suspended: 1;
};
@@ -691,6 +692,9 @@ static inline void snd_intel8x0_update(struct intel8x0 *chip, struct ichdev *ich
int status, civ, i, step;
int ack = 0;
+ if (!ichdev->prepared || ichdev->suspended)
+ return;
+
spin_lock_irqsave(&chip->reg_lock, flags);
status = igetbyte(chip, port + ichdev->roff_sr);
civ = igetbyte(chip, port + ICH_REG_OFF_CIV);
@@ -881,6 +885,7 @@ static int snd_intel8x0_hw_params(struct snd_pcm_substream *substream,
if (ichdev->pcm_open_flag) {
snd_ac97_pcm_close(ichdev->pcm);
ichdev->pcm_open_flag = 0;
+ ichdev->prepared = 0;
}
err = snd_ac97_pcm_open(ichdev->pcm, params_rate(hw_params),
params_channels(hw_params),
@@ -902,6 +907,7 @@ static int snd_intel8x0_hw_free(struct snd_pcm_substream *substream)
if (ichdev->pcm_open_flag) {
snd_ac97_pcm_close(ichdev->pcm);
ichdev->pcm_open_flag = 0;
+ ichdev->prepared = 0;
}
return 0;
}
@@ -976,6 +982,7 @@ static int snd_intel8x0_pcm_prepare(struct snd_pcm_substream *substream)
ichdev->pos_shift = (runtime->sample_bits > 16) ? 2 : 1;
}
snd_intel8x0_setup_periods(chip, ichdev);
+ ichdev->prepared = 1;
return 0;
}
diff --git a/sound/soc/codecs/cs43130.c b/sound/soc/codecs/cs43130.c
index 80bc7c10ed75..80cd3ea0c157 100644
--- a/sound/soc/codecs/cs43130.c
+++ b/sound/soc/codecs/cs43130.c
@@ -1735,6 +1735,14 @@ static DEVICE_ATTR(hpload_dc_r, 0444, cs43130_show_dc_r, NULL);
static DEVICE_ATTR(hpload_ac_l, 0444, cs43130_show_ac_l, NULL);
static DEVICE_ATTR(hpload_ac_r, 0444, cs43130_show_ac_r, NULL);
+static struct attribute *hpload_attrs[] = {
+ &dev_attr_hpload_dc_l.attr,
+ &dev_attr_hpload_dc_r.attr,
+ &dev_attr_hpload_ac_l.attr,
+ &dev_attr_hpload_ac_r.attr,
+};
+ATTRIBUTE_GROUPS(hpload);
+
static struct reg_sequence hp_en_cal_seq[] = {
{CS43130_INT_MASK_4, CS43130_INT_MASK_ALL},
{CS43130_HP_MEAS_LOAD_1, 0},
@@ -2302,25 +2310,15 @@ static int cs43130_probe(struct snd_soc_component *component)
cs43130->hpload_done = false;
if (cs43130->dc_meas) {
- ret = device_create_file(component->dev, &dev_attr_hpload_dc_l);
- if (ret < 0)
- return ret;
-
- ret = device_create_file(component->dev, &dev_attr_hpload_dc_r);
- if (ret < 0)
- return ret;
-
- ret = device_create_file(component->dev, &dev_attr_hpload_ac_l);
- if (ret < 0)
- return ret;
-
- ret = device_create_file(component->dev, &dev_attr_hpload_ac_r);
- if (ret < 0)
+ ret = sysfs_create_groups(&component->dev->kobj, hpload_groups);
+ if (ret)
return ret;
cs43130->wq = create_singlethread_workqueue("cs43130_hp");
- if (!cs43130->wq)
+ if (!cs43130->wq) {
+ sysfs_remove_groups(&component->dev->kobj, hpload_groups);
return -ENOMEM;
+ }
INIT_WORK(&cs43130->work, cs43130_imp_meas);
}
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 9408ee63cb26..438fa18bcb55 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3388,30 +3388,44 @@ static int rt5645_probe(struct snd_soc_component *component)
{
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
struct rt5645_priv *rt5645 = snd_soc_component_get_drvdata(component);
+ int ret = 0;
rt5645->component = component;
switch (rt5645->codec_type) {
case CODEC_TYPE_RT5645:
- snd_soc_dapm_new_controls(dapm,
+ ret = snd_soc_dapm_new_controls(dapm,
rt5645_specific_dapm_widgets,
ARRAY_SIZE(rt5645_specific_dapm_widgets));
- snd_soc_dapm_add_routes(dapm,
+ if (ret < 0)
+ goto exit;
+
+ ret = snd_soc_dapm_add_routes(dapm,
rt5645_specific_dapm_routes,
ARRAY_SIZE(rt5645_specific_dapm_routes));
+ if (ret < 0)
+ goto exit;
+
if (rt5645->v_id < 3) {
- snd_soc_dapm_add_routes(dapm,
+ ret = snd_soc_dapm_add_routes(dapm,
rt5645_old_dapm_routes,
ARRAY_SIZE(rt5645_old_dapm_routes));
+ if (ret < 0)
+ goto exit;
}
break;
case CODEC_TYPE_RT5650:
- snd_soc_dapm_new_controls(dapm,
+ ret = snd_soc_dapm_new_controls(dapm,
rt5650_specific_dapm_widgets,
ARRAY_SIZE(rt5650_specific_dapm_widgets));
- snd_soc_dapm_add_routes(dapm,
+ if (ret < 0)
+ goto exit;
+
+ ret = snd_soc_dapm_add_routes(dapm,
rt5650_specific_dapm_routes,
ARRAY_SIZE(rt5650_specific_dapm_routes));
+ if (ret < 0)
+ goto exit;
break;
}
@@ -3419,9 +3433,17 @@ static int rt5645_probe(struct snd_soc_component *component)
/* for JD function */
if (rt5645->pdata.jd_mode) {
- snd_soc_dapm_force_enable_pin(dapm, "JD Power");
- snd_soc_dapm_force_enable_pin(dapm, "LDO2");
- snd_soc_dapm_sync(dapm);
+ ret = snd_soc_dapm_force_enable_pin(dapm, "JD Power");
+ if (ret < 0)
+ goto exit;
+
+ ret = snd_soc_dapm_force_enable_pin(dapm, "LDO2");
+ if (ret < 0)
+ goto exit;
+
+ ret = snd_soc_dapm_sync(dapm);
+ if (ret < 0)
+ goto exit;
}
if (rt5645->pdata.long_name)
@@ -3432,9 +3454,14 @@ static int rt5645_probe(struct snd_soc_component *component)
GFP_KERNEL);
if (!rt5645->eq_param)
- return -ENOMEM;
-
- return 0;
+ ret = -ENOMEM;
+exit:
+ /*
+ * If there was an error above, everything will be cleaned up by the
+ * caller if we return an error here. This will be done with a later
+ * call to rt5645_remove().
+ */
+ return ret;
}
static void rt5645_remove(struct snd_soc_component *component)
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index a030dd65eb28..9602929b7de9 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -699,6 +699,10 @@ static int line6_init_cap_control(struct usb_line6 *line6)
line6->buffer_message = kmalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL);
if (!line6->buffer_message)
return -ENOMEM;
+
+ ret = line6_init_midi(line6);
+ if (ret < 0)
+ return ret;
} else {
ret = line6_hwdep_init(line6);
if (ret < 0)
diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
index cd44cb5f1310..16e644330c4d 100644
--- a/sound/usb/line6/pod.c
+++ b/sound/usb/line6/pod.c
@@ -376,11 +376,6 @@ static int pod_init(struct usb_line6 *line6,
if (err < 0)
return err;
- /* initialize MIDI subsystem: */
- err = line6_init_midi(line6);
- if (err < 0)
- return err;
-
/* initialize PCM subsystem: */
err = line6_init_pcm(line6, &pod_pcm_properties);
if (err < 0)
diff --git a/sound/usb/line6/variax.c b/sound/usb/line6/variax.c
index ed158f04de80..c2245aa93b08 100644
--- a/sound/usb/line6/variax.c
+++ b/sound/usb/line6/variax.c
@@ -159,7 +159,6 @@ static int variax_init(struct usb_line6 *line6,
const struct usb_device_id *id)
{
struct usb_line6_variax *variax = line6_to_variax(line6);
- int err;
line6->process_message = line6_variax_process_message;
line6->disconnect = line6_variax_disconnect;
@@ -172,11 +171,6 @@ static int variax_init(struct usb_line6 *line6,
if (variax->buffer_activate == NULL)
return -ENOMEM;
- /* initialize MIDI subsystem: */
- err = line6_init_midi(&variax->line6);
- if (err < 0)
- return err;
-
/* initiate startup procedure: */
schedule_delayed_work(&line6->startup_work,
msecs_to_jiffies(VARIAX_STARTUP_DELAY1));
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index a10ac75969a8..2c01649c70f6 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -1750,7 +1750,7 @@ static struct usb_midi_in_jack_descriptor *find_usb_in_jack_descriptor(
struct usb_midi_in_jack_descriptor *injd =
(struct usb_midi_in_jack_descriptor *)extra;
- if (injd->bLength > 4 &&
+ if (injd->bLength >= sizeof(*injd) &&
injd->bDescriptorType == USB_DT_CS_INTERFACE &&
injd->bDescriptorSubtype == UAC_MIDI_IN_JACK &&
injd->bJackID == jack_id)
@@ -1773,7 +1773,7 @@ static struct usb_midi_out_jack_descriptor *find_usb_out_jack_descriptor(
struct usb_midi_out_jack_descriptor *outjd =
(struct usb_midi_out_jack_descriptor *)extra;
- if (outjd->bLength > 4 &&
+ if (outjd->bLength >= sizeof(*outjd) &&
outjd->bDescriptorType == USB_DT_CS_INTERFACE &&
outjd->bDescriptorSubtype == UAC_MIDI_OUT_JACK &&
outjd->bJackID == jack_id)
@@ -1820,7 +1820,8 @@ static void snd_usbmidi_init_substream(struct snd_usb_midi *umidi,
outjd = find_usb_out_jack_descriptor(hostif, jack_id);
if (outjd) {
sz = USB_DT_MIDI_OUT_SIZE(outjd->bNrInputPins);
- iJack = *(((uint8_t *) outjd) + sz - sizeof(uint8_t));
+ if (outjd->bLength >= sz)
+ iJack = *(((uint8_t *) outjd) + sz - sizeof(uint8_t));
}
} else {
/* and out jacks connect to ins */
@@ -1956,8 +1957,12 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi,
ms_ep = find_usb_ms_endpoint_descriptor(hostep);
if (!ms_ep)
continue;
+ if (ms_ep->bLength <= sizeof(*ms_ep))
+ continue;
if (ms_ep->bNumEmbMIDIJack > 0x10)
continue;
+ if (ms_ep->bLength < sizeof(*ms_ep) + ms_ep->bNumEmbMIDIJack)
+ continue;
if (usb_endpoint_dir_out(ep)) {
if (endpoints[epidx].out_ep) {
if (++epidx >= MIDI_MAX_ENDPOINTS) {
diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
index cd72016c3cfa..715092fc6a23 100644
--- a/tools/build/Makefile.build
+++ b/tools/build/Makefile.build
@@ -51,39 +51,39 @@ subdir-obj-y :=
build-file := $(dir)/Build
-include $(build-file)
-quiet_cmd_flex = FLEX $@
-quiet_cmd_bison = BISON $@
+quiet_cmd_flex = FLEX $@
+quiet_cmd_bison = BISON $@
# Create directory unless it exists
-quiet_cmd_mkdir = MKDIR $(dir $@)
+quiet_cmd_mkdir = MKDIR $(dir $@)
cmd_mkdir = mkdir -p $(dir $@)
rule_mkdir = $(if $(wildcard $(dir $@)),,@$(call echo-cmd,mkdir) $(cmd_mkdir))
# Compile command
-quiet_cmd_cc_o_c = CC $@
+quiet_cmd_cc_o_c = CC $@
cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
-quiet_cmd_host_cc_o_c = HOSTCC $@
+quiet_cmd_host_cc_o_c = HOSTCC $@
cmd_host_cc_o_c = $(HOSTCC) $(host_c_flags) -c -o $@ $<
-quiet_cmd_cxx_o_c = CXX $@
+quiet_cmd_cxx_o_c = CXX $@
cmd_cxx_o_c = $(CXX) $(cxx_flags) -c -o $@ $<
-quiet_cmd_cpp_i_c = CPP $@
+quiet_cmd_cpp_i_c = CPP $@
cmd_cpp_i_c = $(CC) $(c_flags) -E -o $@ $<
-quiet_cmd_cc_s_c = AS $@
+quiet_cmd_cc_s_c = AS $@
cmd_cc_s_c = $(CC) $(c_flags) -S -o $@ $<
-quiet_cmd_gen = GEN $@
+quiet_cmd_gen = GEN $@
# Link agregate command
# If there's nothing to link, create empty $@ object.
-quiet_cmd_ld_multi = LD $@
+quiet_cmd_ld_multi = LD $@
cmd_ld_multi = $(if $(strip $(obj-y)),\
$(LD) -r -o $@ $(filter $(obj-y),$^),rm -f $@; $(AR) rcs $@)
-quiet_cmd_host_ld_multi = HOSTLD $@
+quiet_cmd_host_ld_multi = HOSTLD $@
cmd_host_ld_multi = $(if $(strip $(obj-y)),\
$(HOSTLD) -r -o $@ $(filter $(obj-y),$^),rm -f $@; $(HOSTAR) rcs $@)
diff --git a/tools/include/linux/bits.h b/tools/include/linux/bits.h
index 7f475d59a097..87d112650dfb 100644
--- a/tools/include/linux/bits.h
+++ b/tools/include/linux/bits.h
@@ -22,7 +22,7 @@
#include <linux/build_bug.h>
#define GENMASK_INPUT_CHECK(h, l) \
(BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
- __builtin_constant_p((l) > (h)), (l) > (h), 0)))
+ __is_constexpr((l) > (h)), (l) > (h), 0)))
#else
/*
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,
diff --git a/tools/include/linux/const.h b/tools/include/linux/const.h
index 81b8aae5a855..435ddd72d2c4 100644
--- a/tools/include/linux/const.h
+++ b/tools/include/linux/const.h
@@ -3,4 +3,12 @@
#include <vdso/const.h>
+/*
+ * This returns a constant expression while determining if an argument is
+ * a constant expression, most importantly without evaluating the argument.
+ * Glory to Martin Uecker <[email protected]>
+ */
+#define __is_constexpr(x) \
+ (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
+
#endif /* _LINUX_CONST_H */
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index f9271f3ea912..071312f5eb92 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -131,29 +131,29 @@ QUIET_SUBDIR1 =
ifneq ($(silent),1)
ifneq ($(V),1)
- QUIET_CC = @echo ' CC '$@;
- QUIET_CC_FPIC = @echo ' CC FPIC '$@;
- QUIET_CLANG = @echo ' CLANG '$@;
- QUIET_AR = @echo ' AR '$@;
- QUIET_LINK = @echo ' LINK '$@;
- QUIET_MKDIR = @echo ' MKDIR '$@;
- QUIET_GEN = @echo ' GEN '$@;
+ QUIET_CC = @echo ' CC '$@;
+ QUIET_CC_FPIC = @echo ' CC FPIC '$@;
+ QUIET_CLANG = @echo ' CLANG '$@;
+ QUIET_AR = @echo ' AR '$@;
+ QUIET_LINK = @echo ' LINK '$@;
+ QUIET_MKDIR = @echo ' MKDIR '$@;
+ QUIET_GEN = @echo ' GEN '$@;
QUIET_SUBDIR0 = +@subdir=
QUIET_SUBDIR1 = ;$(NO_SUBDIR) \
- echo ' SUBDIR '$$subdir; \
+ echo ' SUBDIR '$$subdir; \
$(MAKE) $(PRINT_DIR) -C $$subdir
- QUIET_FLEX = @echo ' FLEX '$@;
- QUIET_BISON = @echo ' BISON '$@;
- QUIET_GENSKEL = @echo ' GEN-SKEL '$@;
+ QUIET_FLEX = @echo ' FLEX '$@;
+ QUIET_BISON = @echo ' BISON '$@;
+ QUIET_GENSKEL = @echo ' GENSKEL '$@;
descend = \
- +@echo ' DESCEND '$(1); \
+ +@echo ' DESCEND '$(1); \
mkdir -p $(OUTPUT)$(1) && \
$(MAKE) $(COMMAND_O) subdir=$(if $(subdir),$(subdir)/$(1),$(1)) $(PRINT_DIR) -C $(1) $(2)
- QUIET_CLEAN = @printf ' CLEAN %s\n' $1;
- QUIET_INSTALL = @printf ' INSTALL %s\n' $1;
- QUIET_UNINST = @printf ' UNINST %s\n' $1;
+ QUIET_CLEAN = @printf ' CLEAN %s\n' $1;
+ QUIET_INSTALL = @printf ' INSTALL %s\n' $1;
+ QUIET_UNINST = @printf ' UNINST %s\n' $1;
endif
endif
diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
index cf69b2fcce59..dd61118df66e 100644
--- a/tools/testing/selftests/exec/Makefile
+++ b/tools/testing/selftests/exec/Makefile
@@ -28,8 +28,8 @@ $(OUTPUT)/execveat.denatured: $(OUTPUT)/execveat
cp $< $@
chmod -x $@
$(OUTPUT)/load_address_4096: load_address.c
- $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie $< -o $@
+ $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie -static $< -o $@
$(OUTPUT)/load_address_2097152: load_address.c
- $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie $< -o $@
+ $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie -static $< -o $@
$(OUTPUT)/load_address_16777216: load_address.c
- $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie $< -o $@
+ $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie -static $< -o $@
diff --git a/tools/testing/selftests/perf_events/sigtrap_threads.c b/tools/testing/selftests/perf_events/sigtrap_threads.c
index 78ddf5e11625..8e83cf91513a 100644
--- a/tools/testing/selftests/perf_events/sigtrap_threads.c
+++ b/tools/testing/selftests/perf_events/sigtrap_threads.c
@@ -43,7 +43,7 @@ static struct {
siginfo_t first_siginfo; /* First observed siginfo_t. */
} ctx;
-/* Unique value to check si_perf is correctly set from perf_event_attr::sig_data. */
+/* Unique value to check si_perf_data is correctly set from perf_event_attr::sig_data. */
#define TEST_SIG_DATA(addr) (~(unsigned long)(addr))
static struct perf_event_attr make_event_attr(bool enabled, volatile void *addr)
@@ -164,8 +164,8 @@ TEST_F(sigtrap_threads, enable_event)
EXPECT_EQ(ctx.signal_count, NUM_THREADS);
EXPECT_EQ(ctx.tids_want_signal, 0);
EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
- EXPECT_EQ(ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT);
- EXPECT_EQ(ctx.first_siginfo.si_perf, TEST_SIG_DATA(&ctx.iterate_on));
+ EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
+ EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on));
/* Check enabled for parent. */
ctx.iterate_on = 0;
@@ -183,8 +183,8 @@ TEST_F(sigtrap_threads, modify_and_enable_event)
EXPECT_EQ(ctx.signal_count, NUM_THREADS);
EXPECT_EQ(ctx.tids_want_signal, 0);
EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
- EXPECT_EQ(ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT);
- EXPECT_EQ(ctx.first_siginfo.si_perf, TEST_SIG_DATA(&ctx.iterate_on));
+ EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
+ EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on));
/* Check enabled for parent. */
ctx.iterate_on = 0;
@@ -203,8 +203,8 @@ TEST_F(sigtrap_threads, signal_stress)
EXPECT_EQ(ctx.signal_count, NUM_THREADS * ctx.iterate_on);
EXPECT_EQ(ctx.tids_want_signal, 0);
EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
- EXPECT_EQ(ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT);
- EXPECT_EQ(ctx.first_siginfo.si_perf, TEST_SIG_DATA(&ctx.iterate_on));
+ EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
+ EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on));
}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 98c3b647f54d..e3d5c77a8612 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1753,16 +1753,25 @@ TEST_F(TRACE_poke, getpid_runs_normally)
# define SYSCALL_RET_SET(_regs, _val) \
do { \
typeof(_val) _result = (_val); \
- /* \
- * A syscall error is signaled by CR0 SO bit \
- * and the code is stored as a positive value. \
- */ \
- if (_result < 0) { \
- SYSCALL_RET(_regs) = -_result; \
- (_regs).ccr |= 0x10000000; \
- } else { \
+ if ((_regs.trap & 0xfff0) == 0x3000) { \
+ /* \
+ * scv 0 system call uses -ve result \
+ * for error, so no need to adjust. \
+ */ \
SYSCALL_RET(_regs) = _result; \
- (_regs).ccr &= ~0x10000000; \
+ } else { \
+ /* \
+ * A syscall error is signaled by the \
+ * CR0 SO bit and the code is stored as \
+ * a positive value. \
+ */ \
+ if (_result < 0) { \
+ SYSCALL_RET(_regs) = -_result; \
+ (_regs).ccr |= 0x10000000; \
+ } else { \
+ SYSCALL_RET(_regs) = _result; \
+ (_regs).ccr &= ~0x10000000; \
+ } \
} \
} while (0)
# define SYSCALL_RET_SET_ON_PTRACE_EXIT