diff options
author | Jani Nikula <jani.nikula@intel.com> | 2023-10-04 18:06:27 +0300 |
---|---|---|
committer | Jani Nikula <jani.nikula@intel.com> | 2023-10-04 18:06:27 +0300 |
commit | 7824a88b4286980512de2a46763646100274a5ac (patch) | |
tree | fc10f5509fa12f85b72ba14c9bdf62641278f303 /drivers/accel | |
parent | 25591b66d0a4f9277241cebe1a74b4f985bc27a9 (diff) | |
parent | caacbdc28f545744770fb2caf347b3c4be9a6299 (diff) |
Merge drm/drm-next into drm-intel-next
Backmerge to sync up with drm-intel-gt-next and drm-misc-next.
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'drivers/accel')
-rw-r--r-- | drivers/accel/drm_accel.c | 35 | ||||
-rw-r--r-- | drivers/accel/ivpu/ivpu_drv.c | 65 | ||||
-rw-r--r-- | drivers/accel/ivpu/ivpu_drv.h | 18 | ||||
-rw-r--r-- | drivers/accel/ivpu/ivpu_fw.c | 6 | ||||
-rw-r--r-- | drivers/accel/ivpu/ivpu_fw.h | 2 | ||||
-rw-r--r-- | drivers/accel/ivpu/ivpu_hw_37xx.c | 75 | ||||
-rw-r--r-- | drivers/accel/ivpu/ivpu_hw_37xx_reg.h | 187 | ||||
-rw-r--r-- | drivers/accel/ivpu/ivpu_hw_40xx.c | 7 | ||||
-rw-r--r-- | drivers/accel/ivpu/ivpu_ipc.c | 13 | ||||
-rw-r--r-- | drivers/accel/ivpu/ivpu_mmu.c | 117 | ||||
-rw-r--r-- | drivers/accel/ivpu/ivpu_mmu_context.c | 18 | ||||
-rw-r--r-- | drivers/accel/ivpu/ivpu_mmu_context.h | 2 | ||||
-rw-r--r-- | drivers/accel/ivpu/ivpu_pm.c | 16 | ||||
-rw-r--r-- | drivers/accel/ivpu/ivpu_pm.h | 2 | ||||
-rw-r--r-- | drivers/accel/qaic/qaic.h | 13 | ||||
-rw-r--r-- | drivers/accel/qaic/qaic_data.c | 187 | ||||
-rw-r--r-- | drivers/accel/qaic/qaic_drv.c | 119 |
17 files changed, 492 insertions, 390 deletions
diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c index 4a9baf02439e..94b4ac12cf24 100644 --- a/drivers/accel/drm_accel.c +++ b/drivers/accel/drm_accel.c @@ -79,29 +79,30 @@ static const struct drm_info_list accel_debugfs_list[] = { #define ACCEL_DEBUGFS_ENTRIES ARRAY_SIZE(accel_debugfs_list) /** - * accel_debugfs_init() - Initialize debugfs for accel minor - * @minor: Pointer to the drm_minor instance. - * @minor_id: The minor's id + * accel_debugfs_init() - Initialize debugfs for device + * @dev: Pointer to the device instance. * - * This function initializes the drm minor's debugfs members and creates - * a root directory for the minor in debugfs. It also creates common files - * for accelerators and calls the driver's debugfs init callback. + * This function creates a root directory for the device in debugfs. */ -void accel_debugfs_init(struct drm_minor *minor, int minor_id) +void accel_debugfs_init(struct drm_device *dev) { - struct drm_device *dev = minor->dev; - char name[64]; + drm_debugfs_dev_init(dev, accel_debugfs_root); +} + +/** + * accel_debugfs_register() - Register debugfs for device + * @dev: Pointer to the device instance. + * + * Creates common files for accelerators. + */ +void accel_debugfs_register(struct drm_device *dev) +{ + struct drm_minor *minor = dev->accel; - INIT_LIST_HEAD(&minor->debugfs_list); - mutex_init(&minor->debugfs_lock); - sprintf(name, "%d", minor_id); - minor->debugfs_root = debugfs_create_dir(name, accel_debugfs_root); + minor->debugfs_root = dev->debugfs_root; drm_debugfs_create_files(accel_debugfs_list, ACCEL_DEBUGFS_ENTRIES, - minor->debugfs_root, minor); - - if (dev->driver->debugfs_init) - dev->driver->debugfs_init(minor); + dev->debugfs_root, minor); } /** diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index ba79f397c9e8..fa0680ba9340 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -518,78 +518,52 @@ static int ivpu_dev_init(struct ivpu_device *vdev) lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key); ret = ivpu_pci_init(vdev); - if (ret) { - ivpu_err(vdev, "Failed to initialize PCI device: %d\n", ret); + if (ret) goto err_xa_destroy; - } ret = ivpu_irq_init(vdev); - if (ret) { - ivpu_err(vdev, "Failed to initialize IRQs: %d\n", ret); + if (ret) goto err_xa_destroy; - } /* Init basic HW info based on buttress registers which are accessible before power up */ ret = ivpu_hw_info_init(vdev); - if (ret) { - ivpu_err(vdev, "Failed to initialize HW info: %d\n", ret); + if (ret) goto err_xa_destroy; - } /* Power up early so the rest of init code can access VPU registers */ ret = ivpu_hw_power_up(vdev); - if (ret) { - ivpu_err(vdev, "Failed to power up HW: %d\n", ret); + if (ret) goto err_xa_destroy; - } ret = ivpu_mmu_global_context_init(vdev); - if (ret) { - ivpu_err(vdev, "Failed to initialize global MMU context: %d\n", ret); + if (ret) goto err_power_down; - } ret = ivpu_mmu_init(vdev); - if (ret) { - ivpu_err(vdev, "Failed to initialize MMU device: %d\n", ret); + if (ret) goto err_mmu_gctx_fini; - } - ret = ivpu_fw_init(vdev); - if (ret) { - ivpu_err(vdev, "Failed to initialize firmware: %d\n", ret); + ret = ivpu_mmu_reserved_context_init(vdev); + if (ret) goto err_mmu_gctx_fini; - } + + ret = ivpu_fw_init(vdev); + if (ret) + goto err_mmu_rctx_fini; ret = ivpu_ipc_init(vdev); - if (ret) { - ivpu_err(vdev, "Failed to initialize IPC: %d\n", ret); + if (ret) goto err_fw_fini; - } - ret = ivpu_pm_init(vdev); - if (ret) { - ivpu_err(vdev, "Failed to initialize PM: %d\n", ret); - goto err_ipc_fini; - } + ivpu_pm_init(vdev); ret = ivpu_job_done_thread_init(vdev); - if (ret) { - ivpu_err(vdev, "Failed to initialize job done thread: %d\n", ret); + if (ret) goto err_ipc_fini; - } - - ret = ivpu_fw_load(vdev); - if (ret) { - ivpu_err(vdev, "Failed to load firmware: %d\n", ret); - goto err_job_done_thread_fini; - } ret = ivpu_boot(vdev); - if (ret) { - ivpu_err(vdev, "Failed to boot: %d\n", ret); + if (ret) goto err_job_done_thread_fini; - } ivpu_pm_enable(vdev); @@ -601,6 +575,8 @@ err_ipc_fini: ivpu_ipc_fini(vdev); err_fw_fini: ivpu_fw_fini(vdev); +err_mmu_rctx_fini: + ivpu_mmu_reserved_context_fini(vdev); err_mmu_gctx_fini: ivpu_mmu_global_context_fini(vdev); err_power_down: @@ -624,6 +600,7 @@ static void ivpu_dev_fini(struct ivpu_device *vdev) ivpu_ipc_fini(vdev); ivpu_fw_fini(vdev); + ivpu_mmu_reserved_context_fini(vdev); ivpu_mmu_global_context_fini(vdev); drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa)); @@ -651,10 +628,8 @@ static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, vdev); ret = ivpu_dev_init(vdev); - if (ret) { - dev_err(&pdev->dev, "Failed to initialize VPU device: %d\n", ret); + if (ret) return ret; - } ret = drm_dev_register(&vdev->drm, 0); if (ret) { diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index 9e8c075fe9ef..12a63f8a73e8 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -28,12 +28,13 @@ #define IVPU_HW_37XX 37 #define IVPU_HW_40XX 40 -#define IVPU_GLOBAL_CONTEXT_MMU_SSID 0 -/* SSID 1 is used by the VPU to represent invalid context */ -#define IVPU_USER_CONTEXT_MIN_SSID 2 -#define IVPU_USER_CONTEXT_MAX_SSID (IVPU_USER_CONTEXT_MIN_SSID + 63) +#define IVPU_GLOBAL_CONTEXT_MMU_SSID 0 +/* SSID 1 is used by the VPU to represent reserved context */ +#define IVPU_RESERVED_CONTEXT_MMU_SSID 1 +#define IVPU_USER_CONTEXT_MIN_SSID 2 +#define IVPU_USER_CONTEXT_MAX_SSID (IVPU_USER_CONTEXT_MIN_SSID + 63) -#define IVPU_NUM_ENGINES 2 +#define IVPU_NUM_ENGINES 2 #define IVPU_PLATFORM_SILICON 0 #define IVPU_PLATFORM_SIMICS 2 @@ -75,6 +76,11 @@ #define IVPU_WA(wa_name) (vdev->wa.wa_name) +#define IVPU_PRINT_WA(wa_name) do { \ + if (IVPU_WA(wa_name)) \ + ivpu_dbg(vdev, MISC, "Using WA: " #wa_name "\n"); \ +} while (0) + struct ivpu_wa_table { bool punit_disabled; bool clear_runtime_mem; @@ -104,6 +110,7 @@ struct ivpu_device { struct ivpu_pm_info *pm; struct ivpu_mmu_context gctx; + struct ivpu_mmu_context rctx; struct xarray context_xa; struct xa_limit context_xa_limit; @@ -117,6 +124,7 @@ struct ivpu_device { int jsm; int tdr; int reschedule_suspend; + int autosuspend; } timeout; }; diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index 9827ea4d7b83..d57e103aae1c 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -301,6 +301,8 @@ int ivpu_fw_init(struct ivpu_device *vdev) if (ret) goto err_fw_release; + ivpu_fw_load(vdev); + return 0; err_fw_release: @@ -314,7 +316,7 @@ void ivpu_fw_fini(struct ivpu_device *vdev) ivpu_fw_release(vdev); } -int ivpu_fw_load(struct ivpu_device *vdev) +void ivpu_fw_load(struct ivpu_device *vdev) { struct ivpu_fw_info *fw = vdev->fw; u64 image_end_offset = fw->image_load_offset + fw->image_size; @@ -331,8 +333,6 @@ int ivpu_fw_load(struct ivpu_device *vdev) } wmb(); /* Flush WC buffers after writing fw->mem */ - - return 0; } static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_params *boot_params) diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h index 8567fdf925fe..10ae2847f0ef 100644 --- a/drivers/accel/ivpu/ivpu_fw.h +++ b/drivers/accel/ivpu/ivpu_fw.h @@ -31,7 +31,7 @@ struct ivpu_fw_info { int ivpu_fw_init(struct ivpu_device *vdev); void ivpu_fw_fini(struct ivpu_device *vdev); -int ivpu_fw_load(struct ivpu_device *vdev); +void ivpu_fw_load(struct ivpu_device *vdev); void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *bp); static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev) diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c index 9eae1c241bc0..edd4d860f135 100644 --- a/drivers/accel/ivpu/ivpu_hw_37xx.c +++ b/drivers/accel/ivpu/ivpu_hw_37xx.c @@ -104,6 +104,11 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev) if (ivpu_device_id(vdev) == PCI_DEVICE_ID_MTL && ivpu_revision(vdev) < 4) vdev->wa.interrupt_clear_with_0 = true; + + IVPU_PRINT_WA(punit_disabled); + IVPU_PRINT_WA(clear_runtime_mem); + IVPU_PRINT_WA(d3hot_after_power_off); + IVPU_PRINT_WA(interrupt_clear_with_0); } static void ivpu_hw_timeouts_init(struct ivpu_device *vdev) @@ -113,11 +118,13 @@ static void ivpu_hw_timeouts_init(struct ivpu_device *vdev) vdev->timeout.jsm = 50000; vdev->timeout.tdr = 2000000; vdev->timeout.reschedule_suspend = 1000; + vdev->timeout.autosuspend = -1; } else { vdev->timeout.boot = 1000; vdev->timeout.jsm = 500; vdev->timeout.tdr = 2000; vdev->timeout.reschedule_suspend = 10; + vdev->timeout.autosuspend = 10; } } @@ -345,10 +352,10 @@ static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val) { - u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QREQN); + u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN); - if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) || - !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val)) + if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) || + !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val)) return -EIO; return 0; @@ -356,10 +363,10 @@ static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val) static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) { - u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QACCEPTN); + u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QACCEPTN); - if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) || - !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val)) + if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) || + !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val)) return -EIO; return 0; @@ -367,10 +374,10 @@ static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_va static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) { - u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QDENY); + u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QDENY); - if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) || - !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val)) + if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) || + !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val)) return -EIO; return 0; @@ -423,15 +430,15 @@ static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable int ret; u32 val; - val = REGV_RD32(MTL_VPU_TOP_NOC_QREQN); + val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN); if (enable) { - val = REG_SET_FLD(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, val); - val = REG_SET_FLD(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); + val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val); + val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); } else { - val = REG_CLR_FLD(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, val); - val = REG_CLR_FLD(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); + val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val); + val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); } - REGV_WR32(MTL_VPU_TOP_NOC_QREQN, val); + REGV_WR32(VPU_37XX_TOP_NOC_QREQN, val); ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); if (ret) { @@ -563,17 +570,17 @@ static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev) { u32 val; - val = REGV_RD32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC); - val = REG_SET_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val); + val = REGV_RD32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC); + val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val); - val = REG_CLR_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val); - REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); + val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val); + REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); - val = REG_SET_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val); - REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); + val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val); + REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); - val = REG_CLR_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val); - REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); + val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val); + REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); val = vdev->fw->entry_point >> 9; REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val); @@ -777,17 +784,17 @@ static void ivpu_hw_37xx_wdt_disable(struct ivpu_device *vdev) u32 val; /* Enable writing and set non-zero WDT value */ - REGV_WR32(MTL_VPU_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); - REGV_WR32(MTL_VPU_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE); + REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); + REGV_WR32(VPU_37XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE); /* Enable writing and disable watchdog timer */ - REGV_WR32(MTL_VPU_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); - REGV_WR32(MTL_VPU_CPU_SS_TIM_WDOG_EN, 0); + REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); + REGV_WR32(VPU_37XX_CPU_SS_TIM_WDOG_EN, 0); /* Now clear the timeout interrupt */ - val = REGV_RD32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG); - val = REG_CLR_FLD(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val); - REGV_WR32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, val); + val = REGV_RD32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG); + val = REG_CLR_FLD(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val); + REGV_WR32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, val); } static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config) @@ -834,10 +841,10 @@ static u32 ivpu_hw_37xx_reg_telemetry_enable_get(struct ivpu_device *vdev) static void ivpu_hw_37xx_reg_db_set(struct ivpu_device *vdev, u32 db_id) { - u32 reg_stride = MTL_VPU_CPU_SS_DOORBELL_1 - MTL_VPU_CPU_SS_DOORBELL_0; - u32 val = REG_FLD(MTL_VPU_CPU_SS_DOORBELL_0, SET); + u32 reg_stride = VPU_37XX_CPU_SS_DOORBELL_1 - VPU_37XX_CPU_SS_DOORBELL_0; + u32 val = REG_FLD(VPU_37XX_CPU_SS_DOORBELL_0, SET); - REGV_WR32I(MTL_VPU_CPU_SS_DOORBELL_0, reg_stride, db_id, val); + REGV_WR32I(VPU_37XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val); } static u32 ivpu_hw_37xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev) @@ -854,7 +861,7 @@ static u32 ivpu_hw_37xx_reg_ipc_rx_count_get(struct ivpu_device *vdev) static void ivpu_hw_37xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr) { - REGV_WR32(MTL_VPU_CPU_SS_TIM_IPC_FIFO, vpu_addr); + REGV_WR32(VPU_37XX_CPU_SS_TIM_IPC_FIFO, vpu_addr); } static void ivpu_hw_37xx_irq_clear(struct ivpu_device *vdev) diff --git a/drivers/accel/ivpu/ivpu_hw_37xx_reg.h b/drivers/accel/ivpu/ivpu_hw_37xx_reg.h index 6e4e915948f9..4083beb5e9db 100644 --- a/drivers/accel/ivpu/ivpu_hw_37xx_reg.h +++ b/drivers/accel/ivpu/ivpu_hw_37xx_reg.h @@ -3,70 +3,70 @@ * Copyright (C) 2020-2023 Intel Corporation */ -#ifndef __IVPU_HW_MTL_REG_H__ -#define __IVPU_HW_MTL_REG_H__ +#ifndef __IVPU_HW_37XX_REG_H__ +#define __IVPU_HW_37XX_REG_H__ #include <linux/bits.h> -#define VPU_37XX_BUTTRESS_INTERRUPT_TYPE 0x00000000u +#define VPU_37XX_BUTTRESS_INTERRUPT_TYPE 0x00000000u -#define VPU_37XX_BUTTRESS_INTERRUPT_STAT 0x00000004u -#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0) +#define VPU_37XX_BUTTRESS_INTERRUPT_STAT 0x00000004u +#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0) #define VPU_37XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1) #define VPU_37XX_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2) -#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0 0x00000008u -#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0) -#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16) +#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0 0x00000008u +#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0) +#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16) -#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1 0x0000000cu -#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0) -#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16) +#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1 0x0000000cu +#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0) +#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16) -#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2 0x00000010u +#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2 0x00000010u #define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0) -#define VPU_37XX_BUTTRESS_WP_REQ_CMD 0x00000014u +#define VPU_37XX_BUTTRESS_WP_REQ_CMD 0x00000014u #define VPU_37XX_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0) #define VPU_37XX_BUTTRESS_WP_DOWNLOAD 0x00000018u #define VPU_37XX_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0) #define VPU_37XX_BUTTRESS_CURRENT_PLL 0x0000001cu -#define VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK GENMASK(15, 0) +#define VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK GENMASK(15, 0) -#define VPU_37XX_BUTTRESS_PLL_ENABLE 0x00000020u +#define VPU_37XX_BUTTRESS_PLL_ENABLE 0x00000020u -#define VPU_37XX_BUTTRESS_FMIN_FUSE 0x00000024u -#define VPU_37XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0) -#define VPU_37XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8) +#define VPU_37XX_BUTTRESS_FMIN_FUSE 0x00000024u +#define VPU_37XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0) +#define VPU_37XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8) -#define VPU_37XX_BUTTRESS_FMAX_FUSE 0x00000028u -#define VPU_37XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0) +#define VPU_37XX_BUTTRESS_FMAX_FUSE 0x00000028u +#define VPU_37XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0) -#define VPU_37XX_BUTTRESS_TILE_FUSE 0x0000002cu +#define VPU_37XX_BUTTRESS_TILE_FUSE 0x0000002cu #define VPU_37XX_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0) -#define VPU_37XX_BUTTRESS_TILE_FUSE_SKU_MASK GENMASK(3, 2) +#define VPU_37XX_BUTTRESS_TILE_FUSE_SKU_MASK GENMASK(3, 2) -#define VPU_37XX_BUTTRESS_LOCAL_INT_MASK 0x00000030u -#define VPU_37XX_BUTTRESS_GLOBAL_INT_MASK 0x00000034u +#define VPU_37XX_BUTTRESS_LOCAL_INT_MASK 0x00000030u +#define VPU_37XX_BUTTRESS_GLOBAL_INT_MASK 0x00000034u -#define VPU_37XX_BUTTRESS_PLL_STATUS 0x00000040u +#define VPU_37XX_BUTTRESS_PLL_STATUS 0x00000040u #define VPU_37XX_BUTTRESS_PLL_STATUS_LOCK_MASK BIT_MASK(1) -#define VPU_37XX_BUTTRESS_VPU_STATUS 0x00000044u +#define VPU_37XX_BUTTRESS_VPU_STATUS 0x00000044u #define VPU_37XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0) #define VPU_37XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1) -#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL 0x00000060u -#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0) -#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2) +#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL 0x00000060u +#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0) +#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2) #define VPU_37XX_BUTTRESS_VPU_IP_RESET 0x00000050u -#define VPU_37XX_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0) +#define VPU_37XX_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0) #define VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000080u -#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE 0x00000084u +#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE 0x00000084u #define VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000088u #define VPU_37XX_BUTTRESS_ATS_ERR_LOG_0 0x000000a0u @@ -74,9 +74,9 @@ #define VPU_37XX_BUTTRESS_ATS_ERR_CLEAR 0x000000a8u #define VPU_37XX_BUTTRESS_UFI_ERR_LOG 0x000000b0u -#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0) -#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12) -#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20) +#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0) +#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12) +#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20) #define VPU_37XX_BUTTRESS_UFI_ERR_CLEAR 0x000000b4u @@ -113,17 +113,17 @@ #define VPU_37XX_HOST_SS_NOC_QDENY 0x0000015cu #define VPU_37XX_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0) -#define MTL_VPU_TOP_NOC_QREQN 0x00000160u -#define MTL_VPU_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0) -#define MTL_VPU_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(1) +#define VPU_37XX_TOP_NOC_QREQN 0x00000160u +#define VPU_37XX_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0) +#define VPU_37XX_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(1) -#define MTL_VPU_TOP_NOC_QACCEPTN 0x00000164u -#define MTL_VPU_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0) -#define MTL_VPU_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(1) +#define VPU_37XX_TOP_NOC_QACCEPTN 0x00000164u +#define VPU_37XX_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0) +#define VPU_37XX_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(1) -#define MTL_VPU_TOP_NOC_QDENY 0x00000168u -#define MTL_VPU_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0) -#define MTL_VPU_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(1) +#define VPU_37XX_TOP_NOC_QDENY 0x00000168u +#define VPU_37XX_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0) +#define VPU_37XX_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(1) #define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN 0x00000170u #define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0) @@ -140,9 +140,9 @@ #define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2) #define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3) #define VPU_37XX_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4) -#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5) -#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6) -#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7) +#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5) +#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6) +#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7) #define VPU_37XX_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8) #define VPU_37XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30) #define VPU_37XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31) @@ -164,14 +164,14 @@ #define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16) #define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_RSVD0_MASK GENMASK(31, 24) -#define VPU_37XX_HOST_SS_AON_PWR_ISO_EN0 0x00030020u +#define VPU_37XX_HOST_SS_AON_PWR_ISO_EN0 0x00030020u #define VPU_37XX_HOST_SS_AON_PWR_ISO_EN0_MSS_CPU_MASK BIT_MASK(3) #define VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u -#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0_MSS_CPU_MASK BIT_MASK(3) +#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0_MSS_CPU_MASK BIT_MASK(3) #define VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u -#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_MSS_CPU_MASK BIT_MASK(3) +#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_MSS_CPU_MASK BIT_MASK(3) #define VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu #define VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0_MSS_CPU_MASK BIT_MASK(3) @@ -187,47 +187,14 @@ #define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1) #define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3) -#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u +#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u #define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0) #define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16) -#define VPU_37XX_HOST_MMU_IDR0 0x00200000u -#define VPU_37XX_HOST_MMU_IDR1 0x00200004u -#define VPU_37XX_HOST_MMU_IDR3 0x0020000cu -#define VPU_37XX_HOST_MMU_IDR5 0x00200014u -#define VPU_37XX_HOST_MMU_CR0 0x00200020u -#define VPU_37XX_HOST_MMU_CR0ACK 0x00200024u -#define VPU_37XX_HOST_MMU_CR1 0x00200028u -#define VPU_37XX_HOST_MMU_CR2 0x0020002cu -#define VPU_37XX_HOST_MMU_IRQ_CTRL 0x00200050u -#define VPU_37XX_HOST_MMU_IRQ_CTRLACK 0x00200054u - -#define VPU_37XX_HOST_MMU_GERROR 0x00200060u -#define VPU_37XX_HOST_MMU_GERROR_CMDQ_MASK BIT_MASK(0) -#define VPU_37XX_HOST_MMU_GERROR_EVTQ_ABT_MASK BIT_MASK(2) -#define VPU_37XX_HOST_MMU_GERROR_PRIQ_ABT_MASK BIT_MASK(3) -#define VPU_37XX_HOST_MMU_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4) -#define VPU_37XX_HOST_MMU_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5) -#define VPU_37XX_HOST_MMU_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6) -#define VPU_37XX_HOST_MMU_GERROR_MSI_ABT_MASK BIT_MASK(7) - -#define VPU_37XX_HOST_MMU_GERRORN 0x00200064u - -#define VPU_37XX_HOST_MMU_STRTAB_BASE 0x00200080u -#define VPU_37XX_HOST_MMU_STRTAB_BASE_CFG 0x00200088u -#define VPU_37XX_HOST_MMU_CMDQ_BASE 0x00200090u -#define VPU_37XX_HOST_MMU_CMDQ_PROD 0x00200098u -#define VPU_37XX_HOST_MMU_CMDQ_CONS 0x0020009cu -#define VPU_37XX_HOST_MMU_EVTQ_BASE 0x002000a0u -#define VPU_37XX_HOST_MMU_EVTQ_PROD 0x002000a8u -#define VPU_37XX_HOST_MMU_EVTQ_CONS 0x002000acu -#define VPU_37XX_HOST_MMU_EVTQ_PROD_SEC (0x002000a8u + SZ_64K) -#define VPU_37XX_HOST_MMU_EVTQ_CONS_SEC (0x002000acu + SZ_64K) - #define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u #define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0) -#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1) -#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2) +#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1) +#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2) #define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_NOSNOOP_OVERRIDE_EN_MASK BIT_MASK(3) #define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AW_NOSNOOP_OVERRIDE_MASK BIT_MASK(4) #define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AR_NOSNOOP_OVERRIDE_MASK BIT_MASK(5) @@ -246,36 +213,36 @@ #define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8) #define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9) -#define MTL_VPU_CPU_SS_DSU_LEON_RT_BASE 0x04000000u -#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u -#define MTL_VPU_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u -#define MTL_VPU_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u -#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u +#define VPU_37XX_CPU_SS_DSU_LEON_RT_BASE 0x04000000u +#define VPU_37XX_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u +#define VPU_37XX_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u +#define VPU_37XX_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u +#define VPU_37XX_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u -#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET 0x06010004u -#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET_CPU_DSU_MASK BIT_MASK(1) +#define VPU_37XX_CPU_SS_MSSCPU_CPR_CLK_SET 0x06010004u +#define VPU_37XX_CPU_SS_MSSCPU_CPR_CLK_SET_CPU_DSU_MASK BIT_MASK(1) -#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR 0x06010018u -#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR_CPU_DSU_MASK BIT_MASK(1) +#define VPU_37XX_CPU_SS_MSSCPU_CPR_RST_CLR 0x06010018u +#define VPU_37XX_CPU_SS_MSSCPU_CPR_RST_CLR_CPU_DSU_MASK BIT_MASK(1) -#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC 0x06010040u -#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN0_MASK BIT_MASK(0) -#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME0_MASK BIT_MASK(1) -#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN1_MASK BIT_MASK(2) -#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME1_MASK BIT_MASK(3) -#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTVEC_MASK GENMASK(31, 4) +#define VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC 0x06010040u +#define VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN0_MASK BIT_MASK(0) +#define VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME0_MASK BIT_MASK(1) +#define VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN1_MASK BIT_MASK(2) +#define VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME1_MASK BIT_MASK(3) +#define VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTVEC_MASK GENMASK(31, 4) -#define MTL_VPU_CPU_SS_TIM_WATCHDOG 0x0602009cu -#define MTL_VPU_CPU_SS_TIM_WDOG_EN 0x060200a4u -#define MTL_VPU_CPU_SS_TIM_SAFE 0x060200a8u -#define MTL_VPU_CPU_SS_TIM_IPC_FIFO 0x060200f0u +#define VPU_37XX_CPU_SS_TIM_WATCHDOG 0x0602009cu +#define VPU_37XX_CPU_SS_TIM_WDOG_EN 0x060200a4u +#define VPU_37XX_CPU_SS_TIM_SAFE 0x060200a8u +#define VPU_37XX_CPU_SS_TIM_IPC_FIFO 0x060200f0u -#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG 0x06021008u -#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9) +#define VPU_37XX_CPU_SS_TIM_GEN_CONFIG 0x06021008u +#define VPU_37XX_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9) -#define MTL_VPU_CPU_SS_DOORBELL_0 0x06300000u -#define MTL_VPU_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0) +#define VPU_37XX_CPU_SS_DOORBELL_0 0x06300000u +#define VPU_37XX_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0) -#define MTL_VPU_CPU_SS_DOORBELL_1 0x06301000u +#define VPU_37XX_CPU_SS_DOORBELL_1 0x06301000u -#endif /* __IVPU_HW_MTL_REG_H__ */ +#endif /* __IVPU_HW_37XX_REG_H__ */ diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c index 34626d66fa10..1c2528549635 100644 --- a/drivers/accel/ivpu/ivpu_hw_40xx.c +++ b/drivers/accel/ivpu/ivpu_hw_40xx.c @@ -126,6 +126,10 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev) if (ivpu_hw_gen(vdev) == IVPU_HW_40XX) vdev->wa.disable_clock_relinquish = true; + + IVPU_PRINT_WA(punit_disabled); + IVPU_PRINT_WA(clear_runtime_mem); + IVPU_PRINT_WA(disable_clock_relinquish); } static void ivpu_hw_timeouts_init(struct ivpu_device *vdev) @@ -135,16 +139,19 @@ static void ivpu_hw_timeouts_init(struct ivpu_device *vdev) vdev->timeout.jsm = 50000; vdev->timeout.tdr = 2000000; vdev->timeout.reschedule_suspend = 1000; + vdev->timeout.autosuspend = -1; } else if (ivpu_is_simics(vdev)) { vdev->timeout.boot = 50; vdev->timeout.jsm = 500; vdev->timeout.tdr = 10000; vdev->timeout.reschedule_suspend = 10; + vdev->timeout.autosuspend = -1; } else { vdev->timeout.boot = 1000; vdev->timeout.jsm = 500; vdev->timeout.tdr = 2000; vdev->timeout.reschedule_suspend = 10; + vdev->timeout.autosuspend = 10; } } diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c index fa0af59e39ab..6b2e9dbb284a 100644 --- a/drivers/accel/ivpu/ivpu_ipc.c +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -426,15 +426,20 @@ int ivpu_ipc_irq_handler(struct ivpu_device *vdev) int ivpu_ipc_init(struct ivpu_device *vdev) { struct ivpu_ipc_info *ipc = vdev->ipc; - int ret = -ENOMEM; + int ret; ipc->mem_tx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC); - if (!ipc->mem_tx) - return ret; + if (!ipc->mem_tx) { + ivpu_err(vdev, "Failed to allocate mem_tx\n"); + return -ENOMEM; + } ipc->mem_rx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC); - if (!ipc->mem_rx) + if (!ipc->mem_rx) { + ivpu_err(vdev, "Failed to allocate mem_rx\n"); + ret = -ENOMEM; goto err_free_tx; + } ipc->mm_tx = devm_gen_pool_create(vdev->drm.dev, __ffs(IVPU_IPC_ALIGNMENT), -1, "TX_IPC_JSM"); diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c index baefaf7bb3cb..473e1fc686a7 100644 --- a/drivers/accel/ivpu/ivpu_mmu.c +++ b/drivers/accel/ivpu/ivpu_mmu.c @@ -7,12 +7,45 @@ #include <linux/highmem.h> #include "ivpu_drv.h" -#include "ivpu_hw_37xx_reg.h" #include "ivpu_hw_reg_io.h" #include "ivpu_mmu.h" #include "ivpu_mmu_context.h" #include "ivpu_pm.h" +#define IVPU_MMU_REG_IDR0 0x00200000u +#define IVPU_MMU_REG_IDR1 0x00200004u +#define IVPU_MMU_REG_IDR3 0x0020000cu +#define IVPU_MMU_REG_IDR5 0x00200014u +#define IVPU_MMU_REG_CR0 0x00200020u +#define IVPU_MMU_REG_CR0ACK 0x00200024u +#define IVPU_MMU_REG_CR1 0x00200028u +#define IVPU_MMU_REG_CR2 0x0020002cu +#define IVPU_MMU_REG_IRQ_CTRL 0x00200050u +#define IVPU_MMU_REG_IRQ_CTRLACK 0x00200054u + +#define IVPU_MMU_REG_GERROR 0x00200060u +#define IVPU_MMU_REG_GERROR_CMDQ_MASK BIT_MASK(0) +#define IVPU_MMU_REG_GERROR_EVTQ_ABT_MASK BIT_MASK(2) +#define IVPU_MMU_REG_GERROR_PRIQ_ABT_MASK BIT_MASK(3) +#define IVPU_MMU_REG_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4) +#define IVPU_MMU_REG_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5) +#define IVPU_MMU_REG_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6) +#define IVPU_MMU_REG_GERROR_MSI_ABT_MASK BIT_MASK(7) + +#define IVPU_MMU_REG_GERRORN 0x00200064u + +#define IVPU_MMU_REG_STRTAB_BASE 0x00200080u +#define IVPU_MMU_REG_STRTAB_BASE_CFG 0x00200088u +#define IVPU_MMU_REG_CMDQ_BASE 0x00200090u +#define IVPU_MMU_REG_CMDQ_PROD 0x00200098u +#define IVPU_MMU_REG_CMDQ_CONS 0x0020009cu +#define IVPU_MMU_REG_EVTQ_BASE 0x002000a0u +#define IVPU_MMU_REG_EVTQ_PROD 0x002000a8u +#define IVPU_MMU_REG_EVTQ_CONS 0x002000acu +#define IVPU_MMU_REG_EVTQ_PROD_SEC (0x002000a8u + SZ_64K) +#define IVPU_MMU_REG_EVTQ_CONS_SEC (0x002000acu + SZ_64K) +#define IVPU_MMU_REG_CMDQ_CONS_ERR_MASK GENMASK(30, 24) + #define IVPU_MMU_IDR0_REF 0x080f3e0f #define IVPU_MMU_IDR0_REF_SIMICS 0x080f3e1f #define IVPU_MMU_IDR1_REF 0x0e739d18 @@ -186,13 +219,13 @@ #define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC) #define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC) -#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ)) | \ - (REG_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT)) | \ - (REG_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT)) | \ - (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \ - (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \ - (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \ - (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT))) +#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(IVPU_MMU_REG_GERROR, CMDQ)) | \ + (REG_FLD(IVPU_MMU_REG_GERROR, EVTQ_ABT)) | \ + (REG_FLD(IVPU_MMU_REG_GERROR, PRIQ_ABT)) | \ + (REG_FLD(IVPU_MMU_REG_GERROR, MSI_CMDQ_ABT)) | \ + (REG_FLD(IVPU_MMU_REG_GERROR, MSI_EVTQ_ABT)) | \ + (REG_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT)) | \ + (REG_FLD(IVPU_MMU_REG_GERROR, MSI_ABT))) static char *ivpu_mmu_event_to_str(u32 cmd) { @@ -250,15 +283,15 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev) else val_ref = IVPU_MMU_IDR0_REF; - val = REGV_RD32(VPU_37XX_HOST_MMU_IDR0); + val = REGV_RD32(IVPU_MMU_REG_IDR0); if (val != val_ref) ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref); - val = REGV_RD32(VPU_37XX_HOST_MMU_IDR1); + val = REGV_RD32(IVPU_MMU_REG_IDR1); if (val != IVPU_MMU_IDR1_REF) ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF); - val = REGV_RD32(VPU_37XX_HOST_MMU_IDR3); + val = REGV_RD32(IVPU_MMU_REG_IDR3); if (val != IVPU_MMU_IDR3_REF) ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF); @@ -269,7 +302,7 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev) else val_ref = IVPU_MMU_IDR5_REF; - val = REGV_RD32(VPU_37XX_HOST_MMU_IDR5); + val = REGV_RD32(IVPU_MMU_REG_IDR5); if (val != val_ref) ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref); } @@ -396,18 +429,18 @@ static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev) u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN; int ret; - ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, 0); + ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_IRQ_CTRL, 0); if (ret) return ret; - return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, irq_ctrl); + return ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_IRQ_CTRL, irq_ctrl); } static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev) { struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; - return REGV_POLL(VPU_37XX_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons), + return REGV_POLL(IVPU_MMU_REG_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons), IVPU_MMU_QUEUE_TIMEOUT_US); } @@ -447,7 +480,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev) return ret; clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE); - REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, q->prod); + REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, q->prod); ret = ivpu_mmu_cmdq_wait_for_cons(vdev); if (ret) @@ -495,7 +528,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev) mmu->evtq.prod = 0; mmu->evtq.cons = 0; - ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, 0); + ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, 0); if (ret) return ret; @@ -505,17 +538,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev) FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) | FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) | FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB); - REGV_WR32(VPU_37XX_HOST_MMU_CR1, val); + REGV_WR32(IVPU_MMU_REG_CR1, val); - REGV_WR64(VPU_37XX_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q); - REGV_WR32(VPU_37XX_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg); + REGV_WR64(IVPU_MMU_REG_STRTAB_BASE, mmu->strtab.dma_q); + REGV_WR32(IVPU_MMU_REG_STRTAB_BASE_CFG, mmu->strtab.base_cfg); - REGV_WR64(VPU_37XX_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q); - REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, 0); - REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_CONS, 0); + REGV_WR64(IVPU_MMU_REG_CMDQ_BASE, mmu->cmdq.dma_q); + REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, 0); + REGV_WR32(IVPU_MMU_REG_CMDQ_CONS, 0); val = IVPU_MMU_CR0_CMDQEN; - ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val); + ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val); if (ret) return ret; @@ -531,17 +564,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev) if (ret) return ret; - REGV_WR64(VPU_37XX_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q); - REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC, 0); - REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, 0); + REGV_WR64(IVPU_MMU_REG_EVTQ_BASE, mmu->evtq.dma_q); + REGV_WR32(IVPU_MMU_REG_EVTQ_PROD_SEC, 0); + REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, 0); val |= IVPU_MMU_CR0_EVTQEN; - ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val); + ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val); if (ret) return ret; val |= IVPU_MMU_CR0_ATSCHK; - ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val); + ret = ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val); if (ret) return ret; @@ -550,7 +583,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev) return ret; val |= IVPU_MMU_CR0_SMMUEN; - return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val); + return ivpu_mmu_reg_write(vdev, IVPU_MMU_REG_CR0, val); } static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid) @@ -801,14 +834,14 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev) u32 idx = IVPU_MMU_Q_IDX(evtq->cons); u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE); - evtq->prod = REGV_RD32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC); + evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC); if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT)) return NULL; clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE); evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK; - REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, evtq->cons); + REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, evtq->cons); return evt; } @@ -841,35 +874,35 @@ void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev) ivpu_dbg(vdev, IRQ, "MMU error\n"); - gerror_val = REGV_RD32(VPU_37XX_HOST_MMU_GERROR); - gerrorn_val = REGV_RD32(VPU_37XX_HOST_MMU_GERRORN); + gerror_val = REGV_RD32(IVPU_MMU_REG_GERROR); + gerrorn_val = REGV_RD32(IVPU_MMU_REG_GERRORN); active = gerror_val ^ gerrorn_val; if (!(active & IVPU_MMU_GERROR_ERR_MASK)) return; - if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT, active)) + if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_ABT, active)) ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n"); - if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT, active)) + if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT, active)) ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n"); - if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT, active)) + if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_EVTQ_ABT, active)) ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n"); - if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT, active)) + if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_CMDQ_ABT, active)) ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n"); - if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT, active)) + if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, PRIQ_ABT, active)) ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n"); - if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT, active)) + if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, EVTQ_ABT, active)) ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n"); - if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ, active)) + if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, CMDQ, active)) ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n"); - REGV_WR32(VPU_37XX_HOST_MMU_GERRORN, gerror_val); + REGV_WR32(IVPU_MMU_REG_GERRORN, gerror_val); } int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable) diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c index 1d2e554e2c4a..0c8c65351919 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.c +++ b/drivers/accel/ivpu/ivpu_mmu_context.c @@ -427,8 +427,10 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3 INIT_LIST_HEAD(&ctx->bo_list); ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable); - if (ret) + if (ret) { + ivpu_err(vdev, "Failed to initialize pgtable for ctx %u: %d\n", context_id, ret); return ret; + } if (!context_id) { start = vdev->hw->ranges.global.start; @@ -467,6 +469,16 @@ void ivpu_mmu_global_context_fini(struct ivpu_device *vdev) return ivpu_mmu_context_fini(vdev, &vdev->gctx); } +int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev) +{ + return ivpu_mmu_user_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID); +} + +void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev) +{ + return ivpu_mmu_user_context_fini(vdev, &vdev->rctx); +} + void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid) { struct ivpu_file_priv *file_priv; @@ -488,13 +500,13 @@ int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context ret = ivpu_mmu_context_init(vdev, ctx, ctx_id); if (ret) { - ivpu_err(vdev, "Failed to initialize context: %d\n", ret); + ivpu_err(vdev, "Failed to initialize context %u: %d\n", ctx_id, ret); return ret; } ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable); if (ret) { - ivpu_err(vdev, "Failed to set page table: %d\n", ret); + ivpu_err(vdev, "Failed to set page table for context %u: %d\n", ctx_id, ret); goto err_context_fini; } diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h index 961a0d6a6c7f..f15d8c630d8a 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.h +++ b/drivers/accel/ivpu/ivpu_mmu_context.h @@ -32,6 +32,8 @@ struct ivpu_mmu_context { int ivpu_mmu_global_context_init(struct ivpu_device *vdev); void ivpu_mmu_global_context_fini(struct ivpu_device *vdev); +int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev); +void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev); int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id); void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx); diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index e6f27daf5560..28a0d1111e12 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -282,10 +282,11 @@ void ivpu_pm_reset_done_cb(struct pci_dev *pdev) pm_runtime_put_autosuspend(vdev->drm.dev); } -int ivpu_pm_init(struct ivpu_device *vdev) +void ivpu_pm_init(struct ivpu_device *vdev) { struct device *dev = vdev->drm.dev; struct ivpu_pm_info *pm = vdev->pm; + int delay; pm->vdev = vdev; pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; @@ -293,16 +294,15 @@ int ivpu_pm_init(struct ivpu_device *vdev) atomic_set(&pm->in_reset, 0); INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work); - pm_runtime_use_autosuspend(dev); - if (ivpu_disable_recovery) - pm_runtime_set_autosuspend_delay(dev, -1); - else if (ivpu_is_silicon(vdev)) - pm_runtime_set_autosuspend_delay(dev, 100); + delay = -1; else - pm_runtime_set_autosuspend_delay(dev, 60000); + delay = vdev->timeout.autosuspend; - return 0; + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, delay); + + ivpu_dbg(vdev, PM, "Autosuspend delay = %d\n", delay); } void ivpu_pm_cancel_recovery(struct ivpu_device *vdev) diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h index fd4eada1290f..f41c30a14a40 100644 --- a/drivers/accel/ivpu/ivpu_pm.h +++ b/drivers/accel/ivpu/ivpu_pm.h @@ -19,7 +19,7 @@ struct ivpu_pm_info { u32 suspend_reschedule_counter; }; -int ivpu_pm_init(struct ivpu_device *vdev); +void ivpu_pm_init(struct ivpu_device *vdev); void ivpu_pm_enable(struct ivpu_device *vdev); void ivpu_pm_disable(struct ivpu_device *vdev); void ivpu_pm_cancel_recovery(struct ivpu_device *vdev); diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h index f2bd637a0d4e..e3f4c30f3ffd 100644 --- a/drivers/accel/qaic/qaic.h +++ b/drivers/accel/qaic/qaic.h @@ -27,6 +27,9 @@ #define QAIC_DBC_OFF(i) ((i) * QAIC_DBC_SIZE + QAIC_DBC_BASE) #define to_qaic_bo(obj) container_of(obj, struct qaic_bo, base) +#define to_qaic_drm_device(dev) container_of(dev, struct qaic_drm_device, drm) +#define to_drm(qddev) (&(qddev)->drm) +#define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */ extern bool datapath_polling; @@ -137,6 +140,8 @@ struct qaic_device { }; struct qaic_drm_device { + /* The drm device struct of this drm device */ + struct drm_device drm; /* Pointer to the root device struct driven by this driver */ struct qaic_device *qdev; /* @@ -146,8 +151,6 @@ struct qaic_drm_device { * device is the actual physical device */ s32 partition_id; - /* Pointer to the drm device struct of this drm device */ - struct drm_device *ddev; /* Head in list of users who have opened this drm device */ struct list_head users; /* Synchronizes access to users list */ @@ -158,8 +161,6 @@ struct qaic_bo { struct drm_gem_object base; /* Scatter/gather table for allocate/imported BO */ struct sg_table *sgt; - /* BO size requested by user. GEM object might be bigger in size. */ - u64 size; /* Head in list of slices of this BO */ struct list_head slices; /* Total nents, for all slices of this BO */ @@ -221,7 +222,8 @@ struct qaic_bo { */ u32 queue_level_before; } perf_stats; - + /* Synchronizes BO operations */ + struct mutex lock; }; struct bo_slice { @@ -277,6 +279,7 @@ int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *f int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void irq_polling_work(struct work_struct *work); #endif /* _QAIC_H_ */ diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index f4b06792c6f1..4a8e43a7a6a4 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -154,6 +154,7 @@ static void free_slice(struct kref *kref) { struct bo_slice *slice = container_of(kref, struct bo_slice, ref_count); + slice->bo->total_slice_nents -= slice->nents; list_del(&slice->slice); drm_gem_object_put(&slice->bo->base); sg_free_table(slice->sgt); @@ -579,7 +580,7 @@ static void qaic_gem_print_info(struct drm_printer *p, unsigned int indent, { struct qaic_bo *bo = to_qaic_bo(obj); - drm_printf_indent(p, indent, "user requested size=%llu\n", bo->size); + drm_printf_indent(p, indent, "BO DMA direction %d\n", bo->dir); } static const struct vm_operations_struct drm_vm_ops = { @@ -623,6 +624,7 @@ static void qaic_free_object(struct drm_gem_object *obj) qaic_free_sgt(bo->sgt); } + mutex_destroy(&bo->lock); drm_gem_object_release(obj); kfree(bo); } @@ -634,6 +636,19 @@ static const struct drm_gem_object_funcs qaic_gem_funcs = { .vm_ops = &drm_vm_ops, }; +static void qaic_init_bo(struct qaic_bo *bo, bool reinit) +{ + if (reinit) { + bo->sliced = false; + reinit_completion(&bo->xfer_done); + } else { + mutex_init(&bo->lock); + init_completion(&bo->xfer_done); + } + complete_all(&bo->xfer_done); + INIT_LIST_HEAD(&bo->slices); +} + static struct qaic_bo *qaic_alloc_init_bo(void) { struct qaic_bo *bo; @@ -642,9 +657,7 @@ static struct qaic_bo *qaic_alloc_init_bo(void) if (!bo) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&bo->slices); - init_completion(&bo->xfer_done); - complete_all(&bo->xfer_done); + qaic_init_bo(bo, false); return bo; } @@ -695,8 +708,6 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi if (ret) goto free_bo; - bo->size = args->size; - ret = drm_gem_handle_create(file_priv, obj, &args->handle); if (ret) goto free_sgt; @@ -828,7 +839,6 @@ static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_h } bo->sgt = sgt; - bo->size = hdr->size; return 0; } @@ -838,7 +848,7 @@ static int qaic_prepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo, { int ret; - if (bo->size != hdr->size) + if (bo->base.size < hdr->size) return -EINVAL; ret = dma_map_sgtable(&qdev->pdev->dev, bo->sgt, hdr->dir, 0); @@ -857,9 +867,9 @@ static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo, ret = qaic_prepare_import_bo(bo, hdr); else ret = qaic_prepare_export_bo(qdev, bo, hdr); - - if (ret == 0) - bo->dir = hdr->dir; + bo->dir = hdr->dir; + bo->dbc = &qdev->dbc[hdr->dbc_id]; + bo->nr_slice = hdr->count; return ret; } @@ -868,7 +878,6 @@ static void qaic_unprepare_import_bo(struct qaic_bo *bo) { dma_buf_unmap_attachment(bo->base.import_attach, bo->sgt, bo->dir); bo->sgt = NULL; - bo->size = 0; } static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo) @@ -884,6 +893,8 @@ static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo) qaic_unprepare_export_bo(qdev, bo); bo->dir = 0; + bo->dbc = NULL; + bo->nr_slice = 0; } static void qaic_free_slices_bo(struct qaic_bo *bo) @@ -892,6 +903,9 @@ static void qaic_free_slices_bo(struct qaic_bo *bo) list_for_each_entry_safe(slice, temp, &bo->slices, slice) kref_put(&slice->ref_count, free_slice); + if (WARN_ON_ONCE(bo->total_slice_nents != 0)) + bo->total_slice_nents = 0; + bo->nr_slice = 0; } static int qaic_attach_slicing_bo(struct qaic_device *qdev, struct qaic_bo *bo, @@ -908,15 +922,11 @@ static int qaic_attach_slicing_bo(struct qaic_device *qdev, struct qaic_bo *bo, } } - if (bo->total_slice_nents > qdev->dbc[hdr->dbc_id].nelem) { + if (bo->total_slice_nents > bo->dbc->nelem) { qaic_free_slices_bo(bo); return -ENOSPC; } - bo->sliced = true; - bo->nr_slice = hdr->count; - list_add_tail(&bo->bo_list, &qdev->dbc[hdr->dbc_id].bo_lists); - return 0; } @@ -994,10 +1004,13 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi } bo = to_qaic_bo(obj); + ret = mutex_lock_interruptible(&bo->lock); + if (ret) + goto put_bo; if (bo->sliced) { ret = -EINVAL; - goto put_bo; + goto unlock_bo; } dbc = &qdev->dbc[args->hdr.dbc_id]; @@ -1018,9 +1031,10 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi if (args->hdr.dir == DMA_TO_DEVICE) dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, args->hdr.dir); - bo->dbc = dbc; + bo->sliced = true; + list_add_tail(&bo->bo_list, &bo->dbc->bo_lists); srcu_read_unlock(&dbc->ch_lock, rcu_id); - drm_gem_object_put(obj); + mutex_unlock(&bo->lock); kfree(slice_ent); srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); @@ -1031,6 +1045,8 @@ unprepare_bo: qaic_unprepare_bo(qdev, bo); unlock_ch_srcu: srcu_read_unlock(&dbc->ch_lock, rcu_id); +unlock_bo: + mutex_unlock(&bo->lock); put_bo: drm_gem_object_put(obj); free_slice_ent: @@ -1185,15 +1201,18 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil } bo = to_qaic_bo(obj); + ret = mutex_lock_interruptible(&bo->lock); + if (ret) + goto failed_to_send_bo; if (!bo->sliced) { ret = -EINVAL; - goto failed_to_send_bo; + goto unlock_bo; } - if (is_partial && pexec[i].resize > bo->size) { + if (is_partial && pexec[i].resize > bo->base.size) { ret = -EINVAL; - goto failed_to_send_bo; + goto unlock_bo; } spin_lock_irqsave(&dbc->xfer_lock, flags); @@ -1202,7 +1221,7 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil if (queued) { spin_unlock_irqrestore(&dbc->xfer_lock, flags); ret = -EINVAL; - goto failed_to_send_bo; + goto unlock_bo; } bo->req_id = dbc->next_req_id++; @@ -1233,17 +1252,20 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil if (ret) { bo->queued = false; spin_unlock_irqrestore(&dbc->xfer_lock, flags); - goto failed_to_send_bo; + goto unlock_bo; } } reinit_completion(&bo->xfer_done); list_add_tail(&bo->xfer_list, &dbc->xfer_list); spin_unlock_irqrestore(&dbc->xfer_lock, flags); dma_sync_sgtable_for_device(&qdev->pdev->dev, bo->sgt, bo->dir); + mutex_unlock(&bo->lock); } return 0; +unlock_bo: + mutex_unlock(&bo->lock); failed_to_send_bo: if (likely(obj)) drm_gem_object_put(obj); @@ -1799,6 +1821,91 @@ unlock_usr_srcu: return ret; } +static void detach_slice_bo(struct qaic_device *qdev, struct qaic_bo *bo) +{ + qaic_free_slices_bo(bo); + qaic_unprepare_bo(qdev, bo); + qaic_init_bo(bo, true); + list_del(&bo->bo_list); + drm_gem_object_put(&bo->base); +} + +int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct qaic_detach_slice *args = data; + int rcu_id, usr_rcu_id, qdev_rcu_id; + struct dma_bridge_chan *dbc; + struct drm_gem_object *obj; + struct qaic_device *qdev; + struct qaic_user *usr; + unsigned long flags; + struct qaic_bo *bo; + int ret; + + if (args->pad != 0) + return -EINVAL; + + usr = file_priv->driver_priv; + usr_rcu_id = srcu_read_lock(&usr->qddev_lock); + if (!usr->qddev) { + ret = -ENODEV; + goto unlock_usr_srcu; + } + + qdev = usr->qddev->qdev; + qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); + if (qdev->in_reset) { + ret = -ENODEV; + goto unlock_dev_srcu; + } + + obj = drm_gem_object_lookup(file_priv, args->handle); + if (!obj) { + ret = -ENOENT; + goto unlock_dev_srcu; + } + + bo = to_qaic_bo(obj); + ret = mutex_lock_interruptible(&bo->lock); + if (ret) + goto put_bo; + + if (!bo->sliced) { + ret = -EINVAL; + goto unlock_bo; + } + + dbc = bo->dbc; + rcu_id = srcu_read_lock(&dbc->ch_lock); + if (dbc->usr != usr) { + ret = -EINVAL; + goto unlock_ch_srcu; + } + + /* Check if BO is committed to H/W for DMA */ + spin_lock_irqsave(&dbc->xfer_lock, flags); + if (bo->queued) { + spin_unlock_irqrestore(&dbc->xfer_lock, flags); + ret = -EBUSY; + goto unlock_ch_srcu; + } + spin_unlock_irqrestore(&dbc->xfer_lock, flags); + + detach_slice_bo(qdev, bo); + +unlock_ch_srcu: + srcu_read_unlock(&dbc->ch_lock, rcu_id); +unlock_bo: + mutex_unlock(&bo->lock); +put_bo: + drm_gem_object_put(obj); +unlock_dev_srcu: + srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); +unlock_usr_srcu: + srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); + return ret; +} + static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc) { unsigned long flags; @@ -1810,6 +1917,12 @@ static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *db bo->queued = false; list_del(&bo->xfer_list); spin_unlock_irqrestore(&dbc->xfer_lock, flags); + bo->nr_slice_xfer_done = 0; + bo->req_id = 0; + bo->perf_stats.req_received_ts = 0; + bo->perf_stats.req_submit_ts = 0; + bo->perf_stats.req_processed_ts = 0; + bo->perf_stats.queue_level_before = 0; dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir); complete_all(&bo->xfer_done); drm_gem_object_put(&bo->base); @@ -1857,7 +1970,6 @@ void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id) void release_dbc(struct qaic_device *qdev, u32 dbc_id) { - struct bo_slice *slice, *slice_temp; struct qaic_bo *bo, *bo_temp; struct dma_bridge_chan *dbc; @@ -1875,24 +1987,11 @@ void release_dbc(struct qaic_device *qdev, u32 dbc_id) dbc->usr = NULL; list_for_each_entry_safe(bo, bo_temp, &dbc->bo_lists, bo_list) { - list_for_each_entry_safe(slice, slice_temp, &bo->slices, slice) - kref_put(&slice->ref_count, free_slice); - bo->sliced = false; - INIT_LIST_HEAD(&bo->slices); - bo->total_slice_nents = 0; - bo->dir = 0; - bo->dbc = NULL; - bo->nr_slice = 0; - bo->nr_slice_xfer_done = 0; - bo->queued = false; - bo->req_id = 0; - init_completion(&bo->xfer_done); - complete_all(&bo->xfer_done); - list_del(&bo->bo_list); - bo->perf_stats.req_received_ts = 0; - bo->perf_stats.req_submit_ts = 0; - bo->perf_stats.req_processed_ts = 0; - bo->perf_stats.queue_level_before = 0; + drm_gem_object_get(&bo->base); + mutex_lock(&bo->lock); + detach_slice_bo(qdev, bo); + mutex_unlock(&bo->lock); + drm_gem_object_put(&bo->base); } dbc->in_use = false; diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index b5de82e6eb4d..6f58095767df 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -22,6 +22,7 @@ #include <drm/drm_file.h> #include <drm/drm_gem.h> #include <drm/drm_ioctl.h> +#include <drm/drm_managed.h> #include <uapi/drm/qaic_accel.h> #include "mhi_controller.h" @@ -55,7 +56,7 @@ static void free_usr(struct kref *kref) static int qaic_open(struct drm_device *dev, struct drm_file *file) { - struct qaic_drm_device *qddev = dev->dev_private; + struct qaic_drm_device *qddev = to_qaic_drm_device(dev); struct qaic_device *qdev = qddev->qdev; struct qaic_user *usr; int rcu_id; @@ -150,6 +151,7 @@ static const struct drm_ioctl_desc qaic_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(QAIC_PARTIAL_EXECUTE_BO, qaic_partial_execute_bo_ioctl, 0), DRM_IOCTL_DEF_DRV(QAIC_WAIT_BO, qaic_wait_bo_ioctl, 0), DRM_IOCTL_DEF_DRV(QAIC_PERF_STATS_BO, qaic_perf_stats_bo_ioctl, 0), + DRM_IOCTL_DEF_DRV(QAIC_DETACH_SLICE_BO, qaic_detach_slice_bo_ioctl, 0), }; static const struct drm_driver qaic_accel_driver = { @@ -170,64 +172,39 @@ static const struct drm_driver qaic_accel_driver = { static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id) { - struct qaic_drm_device *qddev; - struct drm_device *ddev; - struct device *pdev; + struct qaic_drm_device *qddev = qdev->qddev; + struct drm_device *drm = to_drm(qddev); int ret; /* Hold off implementing partitions until the uapi is determined */ if (partition_id != QAIC_NO_PARTITION) return -EINVAL; - pdev = &qdev->pdev->dev; - - qddev = kzalloc(sizeof(*qddev), GFP_KERNEL); - if (!qddev) - return -ENOMEM; - - ddev = drm_dev_alloc(&qaic_accel_driver, pdev); - if (IS_ERR(ddev)) { - ret = PTR_ERR(ddev); - goto ddev_fail; - } - - ddev->dev_private = qddev; - qddev->ddev = ddev; - - qddev->qdev = qdev; qddev->partition_id = partition_id; - INIT_LIST_HEAD(&qddev->users); - mutex_init(&qddev->users_mutex); - - qdev->qddev = qddev; - - ret = drm_dev_register(ddev, 0); - if (ret) { - pci_dbg(qdev->pdev, "%s: drm_dev_register failed %d\n", __func__, ret); - goto drm_reg_fail; - } - return 0; + /* + * drm_dev_unregister() sets the driver data to NULL and + * drm_dev_register() does not update the driver data. During a SOC + * reset drm dev is unregistered and registered again leaving the + * driver data to NULL. + */ + dev_set_drvdata(to_accel_kdev(qddev), drm->accel); + ret = drm_dev_register(drm, 0); + if (ret) + pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret); -drm_reg_fail: - mutex_destroy(&qddev->users_mutex); - qdev->qddev = NULL; - drm_dev_put(ddev); -ddev_fail: - kfree(qddev); return ret; } static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id) { - struct qaic_drm_device *qddev; + struct qaic_drm_device *qddev = qdev->qddev; + struct drm_device *drm = to_drm(qddev); struct qaic_user *usr; - qddev = qdev->qddev; - qdev->qddev = NULL; - if (!qddev) - return; - + drm_dev_get(drm); + drm_dev_unregister(drm); + qddev->partition_id = 0; /* * Existing users get unresolvable errors till they close FDs. * Need to sync carefully with users calling close(). The @@ -254,13 +231,7 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id) mutex_lock(&qddev->users_mutex); } mutex_unlock(&qddev->users_mutex); - - if (qddev->ddev) { - drm_dev_unregister(qddev->ddev); - drm_dev_put(qddev->ddev); - } - - kfree(qddev); + drm_dev_put(drm); } static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) @@ -344,8 +315,20 @@ void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset) qdev->in_reset = false; } +static void cleanup_qdev(struct qaic_device *qdev) +{ + int i; + + for (i = 0; i < qdev->num_dbc; ++i) + cleanup_srcu_struct(&qdev->dbc[i].ch_lock); + cleanup_srcu_struct(&qdev->dev_lock); + pci_set_drvdata(qdev->pdev, NULL); + destroy_workqueue(qdev->cntl_wq); +} + static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id) { + struct qaic_drm_device *qddev; struct qaic_device *qdev; int i; @@ -381,18 +364,18 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de INIT_LIST_HEAD(&qdev->dbc[i].bo_lists); } - return qdev; -} + qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm); + if (IS_ERR(qddev)) { + cleanup_qdev(qdev); + return NULL; + } -static void cleanup_qdev(struct qaic_device *qdev) -{ - int i; + drmm_mutex_init(to_drm(qddev), &qddev->users_mutex); + INIT_LIST_HEAD(&qddev->users); + qddev->qdev = qdev; + qdev->qddev = qddev; - for (i = 0; i < qdev->num_dbc; ++i) - cleanup_srcu_struct(&qdev->dbc[i].ch_lock); - cleanup_srcu_struct(&qdev->dev_lock); - pci_set_drvdata(qdev->pdev, NULL); - destroy_workqueue(qdev->cntl_wq); + return qdev; } static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev) @@ -591,22 +574,22 @@ static int __init qaic_init(void) { int ret; - ret = mhi_driver_register(&qaic_mhi_driver); + ret = pci_register_driver(&qaic_pci_driver); if (ret) { - pr_debug("qaic: mhi_driver_register failed %d\n", ret); + pr_debug("qaic: pci_register_driver failed %d\n", ret); return ret; } - ret = pci_register_driver(&qaic_pci_driver); + ret = mhi_driver_register(&qaic_mhi_driver); if (ret) { - pr_debug("qaic: pci_register_driver failed %d\n", ret); - goto free_mhi; + pr_debug("qaic: mhi_driver_register failed %d\n", ret); + goto free_pci; } return 0; -free_mhi: - mhi_driver_unregister(&qaic_mhi_driver); +free_pci: + pci_unregister_driver(&qaic_pci_driver); return ret; } @@ -628,8 +611,8 @@ static void __exit qaic_exit(void) * reinitializing the link_up state after the cleanup is done. */ link_up = true; - pci_unregister_driver(&qaic_pci_driver); mhi_driver_unregister(&qaic_mhi_driver); + pci_unregister_driver(&qaic_pci_driver); } module_init(qaic_init); |