diff options
73 files changed, 1589 insertions, 400 deletions
diff --git a/Documentation/devicetree/bindings/display/msm/gmu.txt b/Documentation/devicetree/bindings/display/msm/gmu.txt index 90af5b0a56a9..bf9c7a2a495c 100644 --- a/Documentation/devicetree/bindings/display/msm/gmu.txt +++ b/Documentation/devicetree/bindings/display/msm/gmu.txt @@ -31,6 +31,10 @@ Required properties: - iommus: phandle to the adreno iommu - operating-points-v2: phandle to the OPP operating points +Optional properties: +- sram: phandle to the On Chip Memory (OCMEM) that's present on some Snapdragon + SoCs. See Documentation/devicetree/bindings/sram/qcom,ocmem.yaml. + Example: / { @@ -63,3 +67,50 @@ Example: operating-points-v2 = <&gmu_opp_table>; }; }; + +a3xx example with OCMEM support: + +/ { + ... + + gpu: adreno@fdb00000 { + compatible = "qcom,adreno-330.2", + "qcom,adreno"; + reg = <0xfdb00000 0x10000>; + reg-names = "kgsl_3d0_reg_memory"; + interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "kgsl_3d0_irq"; + clock-names = "core", + "iface", + "mem_iface"; + clocks = <&mmcc OXILI_GFX3D_CLK>, + <&mmcc OXILICX_AHB_CLK>, + <&mmcc OXILICX_AXI_CLK>; + sram = <&gmu_sram>; + power-domains = <&mmcc OXILICX_GDSC>; + operating-points-v2 = <&gpu_opp_table>; + iommus = <&gpu_iommu 0>; + }; + + ocmem@fdd00000 { + compatible = "qcom,msm8974-ocmem"; + + reg = <0xfdd00000 0x2000>, + <0xfec00000 0x180000>; + reg-names = "ctrl", + "mem"; + + clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>, + <&mmcc OCMEMCX_OCMEMNOC_CLK>; + clock-names = "core", + "iface"; + + #address-cells = <1>; + #size-cells = <1>; + + gmu_sram: gmu-sram@0 { + reg = <0x0 0x100000>; + ranges = <0 0 0xfec00000 0x100000>; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/display/msm/mdp5.txt b/Documentation/devicetree/bindings/display/msm/mdp5.txt index 4e11338548aa..43d11279c925 100644 --- a/Documentation/devicetree/bindings/display/msm/mdp5.txt +++ b/Documentation/devicetree/bindings/display/msm/mdp5.txt @@ -76,6 +76,8 @@ Required properties: Optional properties: - clock-names: the following clocks are optional: * "lut" + * "tbu" + * "tbu_rt" Example: diff --git a/Documentation/devicetree/bindings/sram/qcom,ocmem.yaml b/Documentation/devicetree/bindings/sram/qcom,ocmem.yaml new file mode 100644 index 000000000000..222990f9923c --- /dev/null +++ b/Documentation/devicetree/bindings/sram/qcom,ocmem.yaml @@ -0,0 +1,96 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sram/qcom,ocmem.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: On Chip Memory (OCMEM) that is present on some Qualcomm Snapdragon SoCs. + +maintainers: + - Brian Masney <[email protected]> + +description: | + The On Chip Memory (OCMEM) is typically used by the GPU, camera/video, and + audio components on some Snapdragon SoCs. + +properties: + compatible: + const: qcom,msm8974-ocmem + + reg: + items: + - description: Control registers + - description: OCMEM address range + + reg-names: + items: + - const: ctrl + - const: mem + + clocks: + items: + - description: Core clock + - description: Interface clock + + clock-names: + items: + - const: core + - const: iface + + '#address-cells': + const: 1 + + '#size-cells': + const: 1 + +required: + - compatible + - reg + - reg-names + - clocks + - clock-names + - '#address-cells' + - '#size-cells' + +patternProperties: + "^.+-sram$": + type: object + description: A region of reserved memory. + + properties: + reg: + maxItems: 1 + + ranges: + maxItems: 1 + + required: + - reg + - ranges + +examples: + - | + #include <dt-bindings/clock/qcom,rpmcc.h> + #include <dt-bindings/clock/qcom,mmcc-msm8974.h> + + ocmem: ocmem@fdd00000 { + compatible = "qcom,msm8974-ocmem"; + + reg = <0xfdd00000 0x2000>, + <0xfec00000 0x180000>; + reg-names = "ctrl", + "mem"; + + clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>, + <&mmcc OCMEMCX_OCMEMNOC_CLK>; + clock-names = "core", + "iface"; + + #address-cells = <1>; + #size-cells = <1>; + + gmu-sram@0 { + reg = <0x0 0x100000>; + ranges = <0 0 0xfec00000 0x100000>; + }; + }; diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c index 215061c581e1..5d90b7f5ab5a 100644 --- a/drivers/firmware/qcom_scm-32.c +++ b/drivers/firmware/qcom_scm-32.c @@ -442,6 +442,41 @@ int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req, req, req_cnt * sizeof(*req), resp, sizeof(*resp)); } +int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset, u32 size, + u32 mode) +{ + struct ocmem_tz_lock { + __le32 id; + __le32 offset; + __le32 size; + __le32 mode; + } request; + + request.id = cpu_to_le32(id); + request.offset = cpu_to_le32(offset); + request.size = cpu_to_le32(size); + request.mode = cpu_to_le32(mode); + + return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_LOCK_CMD, + &request, sizeof(request), NULL, 0); +} + +int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset, u32 size) +{ + struct ocmem_tz_unlock { + __le32 id; + __le32 offset; + __le32 size; + } request; + + request.id = cpu_to_le32(id); + request.offset = cpu_to_le32(offset); + request.size = cpu_to_le32(size); + + return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_UNLOCK_CMD, + &request, sizeof(request), NULL, 0); +} + void __qcom_scm_init(void) { } @@ -582,7 +617,22 @@ int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, u32 spare) { - return -ENODEV; + struct msm_scm_sec_cfg { + __le32 id; + __le32 ctx_bank_num; + } cfg; + int ret, scm_ret = 0; + + cfg.id = cpu_to_le32(device_id); + cfg.ctx_bank_num = cpu_to_le32(spare); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG, + &cfg, sizeof(cfg), &scm_ret, sizeof(scm_ret)); + + if (ret || scm_ret) + return ret ? ret : -EINVAL; + + return 0; } int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare, diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c index 91d5ad7cf58b..c3a3d9874def 100644 --- a/drivers/firmware/qcom_scm-64.c +++ b/drivers/firmware/qcom_scm-64.c @@ -241,6 +241,18 @@ int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req, return ret; } +int __qcom_scm_ocmem_lock(struct device *dev, uint32_t id, uint32_t offset, + uint32_t size, uint32_t mode) +{ + return -ENOTSUPP; +} + +int __qcom_scm_ocmem_unlock(struct device *dev, uint32_t id, uint32_t offset, + uint32_t size) +{ + return -ENOTSUPP; +} + void __qcom_scm_init(void) { u64 cmd; diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 4802ab170fe5..27c1d98a34e6 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -192,6 +192,46 @@ bool qcom_scm_pas_supported(u32 peripheral) EXPORT_SYMBOL(qcom_scm_pas_supported); /** + * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available + */ +bool qcom_scm_ocmem_lock_available(void) +{ + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_OCMEM_SVC, + QCOM_SCM_OCMEM_LOCK_CMD); +} +EXPORT_SYMBOL(qcom_scm_ocmem_lock_available); + +/** + * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM + * region to the specified initiator + * + * @id: tz initiator id + * @offset: OCMEM offset + * @size: OCMEM size + * @mode: access mode (WIDE/NARROW) + */ +int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, + u32 mode) +{ + return __qcom_scm_ocmem_lock(__scm->dev, id, offset, size, mode); +} +EXPORT_SYMBOL(qcom_scm_ocmem_lock); + +/** + * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM + * region from the specified initiator + * + * @id: tz initiator id + * @offset: OCMEM offset + * @size: OCMEM size + */ +int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) +{ + return __qcom_scm_ocmem_unlock(__scm->dev, id, offset, size); +} +EXPORT_SYMBOL(qcom_scm_ocmem_unlock); + +/** * qcom_scm_pas_init_image() - Initialize peripheral authentication service * state machine for a given peripheral, using the * metadata @@ -327,6 +367,19 @@ static const struct reset_control_ops qcom_scm_pas_reset_ops = { .deassert = qcom_scm_pas_reset_deassert, }; +/** + * qcom_scm_restore_sec_cfg_available() - Check if secure environment + * supports restore security config interface. + * + * Return true if restore-cfg interface is supported, false if not. + */ +bool qcom_scm_restore_sec_cfg_available(void) +{ + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, + QCOM_SCM_RESTORE_SEC_CFG); +} +EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available); + int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare); diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index 99506bd873c0..ef293ee67ec1 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h @@ -42,6 +42,15 @@ extern int __qcom_scm_hdcp_req(struct device *dev, extern void __qcom_scm_init(void); +#define QCOM_SCM_OCMEM_SVC 0xf +#define QCOM_SCM_OCMEM_LOCK_CMD 0x1 +#define QCOM_SCM_OCMEM_UNLOCK_CMD 0x2 + +extern int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset, + u32 size, u32 mode); +extern int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset, + u32 size); + #define QCOM_SCM_SVC_PIL 0x2 #define QCOM_SCM_PAS_INIT_IMAGE_CMD 0x1 #define QCOM_SCM_PAS_MEM_SETUP_CMD 0x2 diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile index 1799537a3228..c280b6ae38eb 100644 --- a/drivers/gpu/drm/i915/Kconfig.profile +++ b/drivers/gpu/drm/i915/Kconfig.profile @@ -25,7 +25,7 @@ config DRM_I915_HEARTBEAT_INTERVAL config DRM_I915_PREEMPT_TIMEOUT int "Preempt timeout (ms, jiffy granularity)" - default 100 # milliseconds + default 640 # milliseconds help How long to wait (in milliseconds) for a preemption event to occur when submitting a new context via execlists. If the current context diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 0caef2592a7e..ed8c7ce62119 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1273,7 +1273,9 @@ static u8 icl_calc_voltage_level(int cdclk) static u8 ehl_calc_voltage_level(int cdclk) { - if (cdclk > 312000) + if (cdclk > 326400) + return 3; + else if (cdclk > 312000) return 2; else if (cdclk > 180000) return 1; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 0d6e494b4508..c7c2b349858d 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -593,7 +593,7 @@ struct tgl_dkl_phy_ddi_buf_trans { u32 dkl_de_emphasis_control; }; -static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_ddi_translations[] = { +static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans[] = { /* VS pre-emp Non-trans mV Pre-emph dB */ { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */ { 0x5, 0x0, 0x03 }, /* 0 1 400mV 3.5 dB */ @@ -607,6 +607,20 @@ static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_ddi_translations[] = { { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */ }; +static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_hdmi_ddi_trans[] = { + /* HDMI Preset VS Pre-emph */ + { 0x7, 0x0, 0x0 }, /* 1 400mV 0dB */ + { 0x6, 0x0, 0x0 }, /* 2 500mV 0dB */ + { 0x4, 0x0, 0x0 }, /* 3 650mV 0dB */ + { 0x2, 0x0, 0x0 }, /* 4 800mV 0dB */ + { 0x0, 0x0, 0x0 }, /* 5 1000mV 0dB */ + { 0x0, 0x0, 0x5 }, /* 6 Full -1.5 dB */ + { 0x0, 0x0, 0x6 }, /* 7 Full -1.8 dB */ + { 0x0, 0x0, 0x7 }, /* 8 Full -2 dB */ + { 0x0, 0x0, 0x8 }, /* 9 Full -2.5 dB */ + { 0x0, 0x0, 0xA }, /* 10 Full -3 dB */ +}; + static const struct ddi_buf_trans * bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) { @@ -898,7 +912,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0, &n_entries); else - n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations); + n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans); default_entry = n_entries - 1; } else if (INTEL_GEN(dev_priv) == 11) { if (intel_phy_is_combo(dev_priv, phy)) @@ -2371,7 +2385,7 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) icl_get_combo_buf_trans(dev_priv, encoder->type, intel_dp->link_rate, &n_entries); else - n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations); + n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans); } else if (INTEL_GEN(dev_priv) == 11) { if (intel_phy_is_combo(dev_priv, phy)) icl_get_combo_buf_trans(dev_priv, encoder->type, @@ -2823,8 +2837,13 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock, const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations; u32 n_entries, val, ln, dpcnt_mask, dpcnt_val; - n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations); - ddi_translations = tgl_dkl_phy_ddi_translations; + if (encoder->type == INTEL_OUTPUT_HDMI) { + n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans); + ddi_translations = tgl_dkl_phy_hdmi_ddi_trans; + } else { + n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans); + ddi_translations = tgl_dkl_phy_dp_ddi_trans; + } if (level >= n_entries) level = n_entries - 1; diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index ee9d2bcd2c13..ef7bc41ffffa 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -310,10 +310,23 @@ int intel_context_prepare_remote_request(struct intel_context *ce, GEM_BUG_ON(rq->hw_context == ce); if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */ - err = mutex_lock_interruptible_nested(&tl->mutex, - SINGLE_DEPTH_NESTING); - if (err) - return err; + /* + * Ideally, we just want to insert our foreign fence as + * a barrier into the remove context, such that this operation + * occurs after all current operations in that context, and + * all future operations must occur after this. + * + * Currently, the timeline->last_request tracking is guarded + * by its mutex and so we must obtain that to atomically + * insert our barrier. However, since we already hold our + * timeline->mutex, we must be careful against potential + * inversion if we are the kernel_context as the remote context + * will itself poke at the kernel_context when it needs to + * unpin. Ergo, if already locked, we drop both locks and + * try again (through the magic of userspace repeating EAGAIN). + */ + if (!mutex_trylock(&tl->mutex)) + return -EAGAIN; /* Queue this switch after current activity by this context. */ err = i915_active_fence_set(&tl->last_request, rq); diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index bc3b72bfa9e3..01765a7ec18f 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -100,9 +100,7 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists) static inline struct i915_request * execlists_active(const struct intel_engine_execlists *execlists) { - GEM_BUG_ON(execlists->active - execlists->inflight > - execlists_num_ports(execlists)); - return READ_ONCE(*execlists->active); + return *READ_ONCE(execlists->active); } static inline void diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 5ca3ec911e50..813bd3a610d2 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -28,13 +28,13 @@ #include "i915_drv.h" -#include "gt/intel_gt.h" - +#include "intel_context.h" #include "intel_engine.h" #include "intel_engine_pm.h" #include "intel_engine_pool.h" #include "intel_engine_user.h" -#include "intel_context.h" +#include "intel_gt.h" +#include "intel_gt_requests.h" #include "intel_lrc.h" #include "intel_reset.h" #include "intel_ring.h" @@ -616,6 +616,7 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine) intel_engine_init_execlists(engine); intel_engine_init_cmd_parser(engine); intel_engine_init__pm(engine); + intel_engine_init_retire(engine); intel_engine_pool_init(&engine->pool); @@ -838,6 +839,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) cleanup_status_page(engine); + intel_engine_fini_retire(engine); intel_engine_pool_fini(&engine->pool); intel_engine_fini_breadcrumbs(engine); intel_engine_cleanup_cmd_parser(engine); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 3c0f490ff2c7..0e1ad4a4bd97 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -73,8 +73,42 @@ static inline void __timeline_mark_unlock(struct intel_context *ce, #endif /* !IS_ENABLED(CONFIG_LOCKDEP) */ +static void +__queue_and_release_pm(struct i915_request *rq, + struct intel_timeline *tl, + struct intel_engine_cs *engine) +{ + struct intel_gt_timelines *timelines = &engine->gt->timelines; + + GEM_TRACE("%s\n", engine->name); + + /* + * We have to serialise all potential retirement paths with our + * submission, as we don't want to underflow either the + * engine->wakeref.counter or our timeline->active_count. + * + * Equally, we cannot allow a new submission to start until + * after we finish queueing, nor could we allow that submitter + * to retire us before we are ready! + */ + spin_lock(&timelines->lock); + + /* Let intel_gt_retire_requests() retire us (acquired under lock) */ + if (!atomic_fetch_inc(&tl->active_count)) + list_add_tail(&tl->link, &timelines->active_list); + + /* Hand the request over to HW and so engine_retire() */ + __i915_request_queue(rq, NULL); + + /* Let new submissions commence (and maybe retire this timeline) */ + __intel_wakeref_defer_park(&engine->wakeref); + + spin_unlock(&timelines->lock); +} + static bool switch_to_kernel_context(struct intel_engine_cs *engine) { + struct intel_context *ce = engine->kernel_context; struct i915_request *rq; unsigned long flags; bool result = true; @@ -98,16 +132,31 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) * This should hold true as we can only park the engine after * retiring the last request, thus all rings should be empty and * all timelines idle. + * + * For unlocking, there are 2 other parties and the GPU who have a + * stake here. + * + * A new gpu user will be waiting on the engine-pm to start their + * engine_unpark. New waiters are predicated on engine->wakeref.count + * and so intel_wakeref_defer_park() acts like a mutex_unlock of the + * engine->wakeref. + * + * The other party is intel_gt_retire_requests(), which is walking the + * list of active timelines looking for completions. Meanwhile as soon + * as we call __i915_request_queue(), the GPU may complete our request. + * Ergo, if we put ourselves on the timelines.active_list + * (se intel_timeline_enter()) before we increment the + * engine->wakeref.count, we may see the request completion and retire + * it causing an undeflow of the engine->wakeref. */ - flags = __timeline_mark_lock(engine->kernel_context); + flags = __timeline_mark_lock(ce); + GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0); - rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT); + rq = __i915_request_create(ce, GFP_NOWAIT); if (IS_ERR(rq)) /* Context switch failed, hope for the best! Maybe reset? */ goto out_unlock; - intel_timeline_enter(i915_request_timeline(rq)); - /* Check again on the next retirement. */ engine->wakeref_serial = engine->serial + 1; i915_request_add_active_barriers(rq); @@ -116,13 +165,12 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) rq->sched.attr.priority = I915_PRIORITY_BARRIER; __i915_request_commit(rq); - /* Release our exclusive hold on the engine */ - __intel_wakeref_defer_park(&engine->wakeref); - __i915_request_queue(rq, NULL); + /* Expose ourselves to the world */ + __queue_and_release_pm(rq, ce->timeline, engine); result = false; out_unlock: - __timeline_mark_unlock(engine->kernel_context, flags); + __timeline_mark_unlock(ce, flags); return result; } @@ -177,7 +225,8 @@ static int __engine_park(struct intel_wakeref *wf) engine->execlists.no_priolist = false; - intel_gt_pm_put(engine->gt); + /* While gt calls i915_vma_parked(), we have to break the lock cycle */ + intel_gt_pm_put_async(engine->gt); return 0; } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h index 739c50fefcef..24e20344dc22 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h @@ -31,6 +31,16 @@ static inline void intel_engine_pm_put(struct intel_engine_cs *engine) intel_wakeref_put(&engine->wakeref); } +static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine) +{ + intel_wakeref_put_async(&engine->wakeref); +} + +static inline void intel_engine_pm_flush(struct intel_engine_cs *engine) +{ + intel_wakeref_unlock_wait(&engine->wakeref); +} + void intel_engine_init__pm(struct intel_engine_cs *engine); #endif /* INTEL_ENGINE_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 758f0e8ec672..17f1f1441efc 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -451,6 +451,14 @@ struct intel_engine_cs { struct intel_engine_execlists execlists; + /* + * Keep track of completed timelines on this engine for early + * retirement with the goal of quickly enabling powersaving as + * soon as the engine is idle. + */ + struct intel_timeline *retire; + struct work_struct retire_work; + /* status_notifier: list of callbacks for context-switch changes */ struct atomic_notifier_head context_status_notifier; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 6187cdd06646..a459a42ad5c2 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -105,7 +105,6 @@ static int __gt_park(struct intel_wakeref *wf) static const struct intel_wakeref_ops wf_ops = { .get = __gt_unpark, .put = __gt_park, - .flags = INTEL_WAKEREF_PUT_ASYNC, }; void intel_gt_pm_init_early(struct intel_gt *gt) @@ -272,7 +271,7 @@ void intel_gt_suspend_prepare(struct intel_gt *gt) static suspend_state_t pm_suspend_target(void) { -#if IS_ENABLED(CONFIG_PM_SLEEP) +#if IS_ENABLED(CONFIG_SUSPEND) && IS_ENABLED(CONFIG_PM_SLEEP) return pm_suspend_target_state; #else return PM_SUSPEND_TO_IDLE; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index b3e17399be9b..990efc27a4e4 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -32,6 +32,11 @@ static inline void intel_gt_pm_put(struct intel_gt *gt) intel_wakeref_put(>->wakeref); } +static inline void intel_gt_pm_put_async(struct intel_gt *gt) +{ + intel_wakeref_put_async(>->wakeref); +} + static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt) { return intel_wakeref_wait_for_idle(>->wakeref); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c index 353809ac2754..3dc13ecf41bf 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -4,6 +4,8 @@ * Copyright © 2019 Intel Corporation */ +#include <linux/workqueue.h> + #include "i915_drv.h" /* for_each_engine() */ #include "i915_request.h" #include "intel_gt.h" @@ -29,6 +31,79 @@ static void flush_submission(struct intel_gt *gt) intel_engine_flush_submission(engine); } +static void engine_retire(struct work_struct *work) +{ + struct intel_engine_cs *engine = + container_of(work, typeof(*engine), retire_work); + struct intel_timeline *tl = xchg(&engine->retire, NULL); + + do { + struct intel_timeline *next = xchg(&tl->retire, NULL); + + /* + * Our goal here is to retire _idle_ timelines as soon as + * possible (as they are idle, we do not expect userspace + * to be cleaning up anytime soon). + * + * If the timeline is currently locked, either it is being + * retired elsewhere or about to be! + */ + if (mutex_trylock(&tl->mutex)) { + retire_requests(tl); + mutex_unlock(&tl->mutex); + } + intel_timeline_put(tl); + + GEM_BUG_ON(!next); + tl = ptr_mask_bits(next, 1); + } while (tl); +} + +static bool add_retire(struct intel_engine_cs *engine, + struct intel_timeline *tl) +{ + struct intel_timeline *first; + + /* + * We open-code a llist here to include the additional tag [BIT(0)] + * so that we know when the timeline is already on a + * retirement queue: either this engine or another. + * + * However, we rely on that a timeline can only be active on a single + * engine at any one time and that add_retire() is called before the + * engine releases the timeline and transferred to another to retire. + */ + + if (READ_ONCE(tl->retire)) /* already queued */ + return false; + + intel_timeline_get(tl); + first = READ_ONCE(engine->retire); + do + tl->retire = ptr_pack_bits(first, 1, 1); + while (!try_cmpxchg(&engine->retire, &first, tl)); + + return !first; +} + +void intel_engine_add_retire(struct intel_engine_cs *engine, + struct intel_timeline *tl) +{ + if (add_retire(engine, tl)) + schedule_work(&engine->retire_work); +} + +void intel_engine_init_retire(struct intel_engine_cs *engine) +{ + INIT_WORK(&engine->retire_work, engine_retire); +} + +void intel_engine_fini_retire(struct intel_engine_cs *engine) +{ + flush_work(&engine->retire_work); + GEM_BUG_ON(engine->retire); +} + long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) { struct intel_gt_timelines *timelines = >->timelines; @@ -52,8 +127,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) } intel_timeline_get(tl); - GEM_BUG_ON(!tl->active_count); - tl->active_count++; /* pin the list element */ + GEM_BUG_ON(!atomic_read(&tl->active_count)); + atomic_inc(&tl->active_count); /* pin the list element */ spin_unlock_irqrestore(&timelines->lock, flags); if (timeout > 0) { @@ -74,7 +149,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) /* Resume iteration after dropping lock */ list_safe_reset_next(tl, tn, link); - if (!--tl->active_count) + if (atomic_dec_and_test(&tl->active_count)) list_del(&tl->link); else active_count += !!rcu_access_pointer(tl->last_request.fence); @@ -83,7 +158,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) /* Defer the final release to after the spinlock */ if (refcount_dec_and_test(&tl->kref.refcount)) { - GEM_BUG_ON(tl->active_count); + GEM_BUG_ON(atomic_read(&tl->active_count)); list_add(&tl->link, &free); } } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h index bd31cbce47e0..d626fb115386 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h @@ -7,7 +7,9 @@ #ifndef INTEL_GT_REQUESTS_H #define INTEL_GT_REQUESTS_H +struct intel_engine_cs; struct intel_gt; +struct intel_timeline; long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout); static inline void intel_gt_retire_requests(struct intel_gt *gt) @@ -15,6 +17,11 @@ static inline void intel_gt_retire_requests(struct intel_gt *gt) intel_gt_retire_requests_timeout(gt, 0); } +void intel_engine_init_retire(struct intel_engine_cs *engine); +void intel_engine_add_retire(struct intel_engine_cs *engine, + struct intel_timeline *tl); +void intel_engine_fini_retire(struct intel_engine_cs *engine); + int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout); void intel_gt_init_requests(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 0ac3b26674ad..9fdefbdc3546 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -142,6 +142,7 @@ #include "intel_engine_pm.h" #include "intel_gt.h" #include "intel_gt_pm.h" +#include "intel_gt_requests.h" #include "intel_lrc_reg.h" #include "intel_mocs.h" #include "intel_reset.h" @@ -1115,9 +1116,17 @@ __execlists_schedule_out(struct i915_request *rq, * refrain from doing non-trivial work here. */ + /* + * If we have just completed this context, the engine may now be + * idle and we want to re-enter powersaving. + */ + if (list_is_last(&rq->link, &ce->timeline->requests) && + i915_request_completed(rq)) + intel_engine_add_retire(engine, ce->timeline); + intel_engine_context_out(engine); execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); - intel_gt_pm_put(engine->gt); + intel_gt_pm_put_async(engine->gt); /* * If this is part of a virtual engine, its next request may @@ -1937,16 +1946,17 @@ skip_submit: static void cancel_port_requests(struct intel_engine_execlists * const execlists) { - struct i915_request * const *port, *rq; + struct i915_request * const *port; - for (port = execlists->pending; (rq = *port); port++) - execlists_schedule_out(rq); + for (port = execlists->pending; *port; port++) + execlists_schedule_out(*port); memset(execlists->pending, 0, sizeof(execlists->pending)); - for (port = execlists->active; (rq = *port); port++) - execlists_schedule_out(rq); - execlists->active = - memset(execlists->inflight, 0, sizeof(execlists->inflight)); + /* Mark the end of active before we overwrite *active */ + for (port = xchg(&execlists->active, execlists->pending); *port; port++) + execlists_schedule_out(*port); + WRITE_ONCE(execlists->active, + memset(execlists->inflight, 0, sizeof(execlists->inflight))); } static inline void @@ -2099,23 +2109,27 @@ static void process_csb(struct intel_engine_cs *engine) else promote = gen8_csb_parse(execlists, buf + 2 * head); if (promote) { + struct i915_request * const *old = execlists->active; + + /* Point active to the new ELSP; prevent overwriting */ + WRITE_ONCE(execlists->active, execlists->pending); + set_timeslice(engine); + if (!inject_preempt_hang(execlists)) ring_set_paused(engine, 0); /* cancel old inflight, prepare for switch */ - trace_ports(execlists, "preempted", execlists->active); - while (*execlists->active) - execlists_schedule_out(*execlists->active++); + trace_ports(execlists, "preempted", old); + while (*old) + execlists_schedule_out(*old++); /* switch pending to inflight */ GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); - execlists->active = - memcpy(execlists->inflight, - execlists->pending, - execlists_num_ports(execlists) * - sizeof(*execlists->pending)); - - set_timeslice(engine); + WRITE_ONCE(execlists->active, + memcpy(execlists->inflight, + execlists->pending, + execlists_num_ports(execlists) * + sizeof(*execlists->pending))); WRITE_ONCE(execlists->pending[0], NULL); } else { diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index f03e000051c1..c97423a76642 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1114,7 +1114,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg) out: intel_engine_cancel_stop_cs(engine); reset_finish_engine(engine); - intel_engine_pm_put(engine); + intel_engine_pm_put_async(engine); return ret; } diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c index ece20504d240..374b28f13ca0 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring.c +++ b/drivers/gpu/drm/i915/gt/intel_ring.c @@ -57,9 +57,10 @@ int intel_ring_pin(struct intel_ring *ring) i915_vma_make_unshrinkable(vma); - GEM_BUG_ON(ring->vaddr); - ring->vaddr = addr; + /* Discard any unused bytes beyond that submitted to hw. */ + intel_ring_reset(ring, ring->emit); + ring->vaddr = addr; return 0; err_ring: @@ -85,20 +86,14 @@ void intel_ring_unpin(struct intel_ring *ring) if (!atomic_dec_and_test(&ring->pin_count)) return; - /* Discard any unused bytes beyond that submitted to hw. */ - intel_ring_reset(ring, ring->emit); - i915_vma_unset_ggtt_write(vma); if (i915_vma_is_map_and_fenceable(vma)) i915_vma_unpin_iomap(vma); else i915_gem_object_unpin_map(vma->obj); - GEM_BUG_ON(!ring->vaddr); - ring->vaddr = NULL; - - i915_vma_unpin(vma); i915_vma_make_purgeable(vma); + i915_vma_unpin(vma); } static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 14ad10acd548..649798c184fb 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -282,6 +282,7 @@ void intel_timeline_fini(struct intel_timeline *timeline) { GEM_BUG_ON(atomic_read(&timeline->pin_count)); GEM_BUG_ON(!list_empty(&timeline->requests)); + GEM_BUG_ON(timeline->retire); if (timeline->hwsp_cacheline) cacheline_free(timeline->hwsp_cacheline); @@ -339,15 +340,33 @@ void intel_timeline_enter(struct intel_timeline *tl) struct intel_gt_timelines *timelines = &tl->gt->timelines; unsigned long flags; + /* + * Pretend we are serialised by the timeline->mutex. + * + * While generally true, there are a few exceptions to the rule + * for the engine->kernel_context being used to manage power + * transitions. As the engine_park may be called from under any + * timeline, it uses the power mutex as a global serialisation + * lock to prevent any other request entering its timeline. + * + * The rule is generally tl->mutex, otherwise engine->wakeref.mutex. + * + * However, intel_gt_retire_request() does not know which engine + * it is retiring along and so cannot partake in the engine-pm + * barrier, and there we use the tl->active_count as a means to + * pin the timeline in the active_list while the locks are dropped. + * Ergo, as that is outside of the engine-pm barrier, we need to + * use atomic to manipulate tl->active_count. + */ lockdep_assert_held(&tl->mutex); - GEM_BUG_ON(!atomic_read(&tl->pin_count)); - if (tl->active_count++) + + if (atomic_add_unless(&tl->active_count, 1, 0)) return; - GEM_BUG_ON(!tl->active_count); /* overflow? */ spin_lock_irqsave(&timelines->lock, flags); - list_add(&tl->link, &timelines->active_list); + if (!atomic_fetch_inc(&tl->active_count)) + list_add_tail(&tl->link, &timelines->active_list); spin_unlock_irqrestore(&timelines->lock, flags); } @@ -356,14 +375,16 @@ void intel_timeline_exit(struct intel_timeline *tl) struct intel_gt_timelines *timelines = &tl->gt->timelines; unsigned long flags; + /* See intel_timeline_enter() */ lockdep_assert_held(&tl->mutex); - GEM_BUG_ON(!tl->active_count); - if (--tl->active_count) + GEM_BUG_ON(!atomic_read(&tl->active_count)); + if (atomic_add_unless(&tl->active_count, -1, 1)) return; spin_lock_irqsave(&timelines->lock, flags); - list_del(&tl->link); + if (atomic_dec_and_test(&tl->active_count)) + list_del(&tl->link); spin_unlock_irqrestore(&timelines->lock, flags); /* diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h index 98d9ee166379..aaf15cbe1ce1 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h +++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h @@ -42,7 +42,7 @@ struct intel_timeline { * from the intel_context caller plus internal atomicity. */ atomic_t pin_count; - unsigned int active_count; + atomic_t active_count; const u32 *hwsp_seqno; struct i915_vma *hwsp_ggtt; @@ -66,6 +66,9 @@ struct intel_timeline { */ struct i915_active_fence last_request; + /** A chain of completed timelines ready for early retirement. */ + struct intel_timeline *retire; + /** * We track the most recent seqno that we wait on in every context so * that we only have to emit a new await and dependency on a more diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c index 20b9c83f43ad..cbf6b0735272 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c @@ -51,11 +51,12 @@ static int live_engine_pm(void *arg) pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n", engine->name, p->name); else - intel_engine_pm_put(engine); - intel_engine_pm_put(engine); + intel_engine_pm_put_async(engine); + intel_engine_pm_put_async(engine); p->critical_section_end(); - /* engine wakeref is sync (instant) */ + intel_engine_pm_flush(engine); + if (intel_engine_pm_is_awake(engine)) { pr_err("%s is still awake after flushing pm\n", engine->name); diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 5448f37c8102..dca15ace88f6 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -672,12 +672,13 @@ void i915_active_acquire_barrier(struct i915_active *ref) * populated by i915_request_add_active_barriers() to point to the * request that will eventually release them. */ - spin_lock_irqsave_nested(&ref->tree_lock, flags, SINGLE_DEPTH_NESTING); llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) { struct active_node *node = barrier_from_ll(pos); struct intel_engine_cs *engine = barrier_to_engine(node); struct rb_node **p, *parent; + spin_lock_irqsave_nested(&ref->tree_lock, flags, + SINGLE_DEPTH_NESTING); parent = NULL; p = &ref->tree.rb_node; while (*p) { @@ -693,12 +694,12 @@ void i915_active_acquire_barrier(struct i915_active *ref) } rb_link_node(&node->node, parent, p); rb_insert_color(&node->node, &ref->tree); + spin_unlock_irqrestore(&ref->tree_lock, flags); GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); llist_add(barrier_to_ll(node), &engine->barrier_tasks); intel_engine_pm_put(engine); } - spin_unlock_irqrestore(&ref->tree_lock, flags); } void i915_request_add_active_barriers(struct i915_request *rq) diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 0d40dccd1409..2814218c5ba1 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -190,7 +190,7 @@ static u64 get_rc6(struct intel_gt *gt) val = 0; if (intel_gt_pm_get_if_awake(gt)) { val = __get_rc6(gt); - intel_gt_pm_put(gt); + intel_gt_pm_put_async(gt); } spin_lock_irqsave(&pmu->lock, flags); @@ -343,7 +343,7 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns) skip: spin_unlock_irqrestore(&engine->uncore->lock, flags); - intel_engine_pm_put(engine); + intel_engine_pm_put_async(engine); } } @@ -368,7 +368,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns) if (intel_gt_pm_get_if_awake(gt)) { val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1); val = intel_get_cagf(rps, val); - intel_gt_pm_put(gt); + intel_gt_pm_put_async(gt); } add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT], diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index c27cfef9281c..ef25ce6e395e 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -103,15 +103,18 @@ query_engine_info(struct drm_i915_private *i915, struct drm_i915_engine_info __user *info_ptr; struct drm_i915_query_engine_info query; struct drm_i915_engine_info info = { }; + unsigned int num_uabi_engines = 0; struct intel_engine_cs *engine; int len, ret; if (query_item->flags) return -EINVAL; + for_each_uabi_engine(engine, i915) + num_uabi_engines++; + len = sizeof(struct drm_i915_query_engine_info) + - RUNTIME_INFO(i915)->num_engines * - sizeof(struct drm_i915_engine_info); + num_uabi_engines * sizeof(struct drm_i915_engine_info); ret = copy_query_item(&query, sizeof(query), len, query_item); if (ret != 0) diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index 868cc78048d0..59aa1b6f1827 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -54,7 +54,8 @@ int __intel_wakeref_get_first(struct intel_wakeref *wf) static void ____intel_wakeref_put_last(struct intel_wakeref *wf) { - if (!atomic_dec_and_test(&wf->count)) + INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); + if (unlikely(!atomic_dec_and_test(&wf->count))) goto unlock; /* ops->put() must reschedule its own release on error/deferral */ @@ -67,13 +68,12 @@ unlock: mutex_unlock(&wf->mutex); } -void __intel_wakeref_put_last(struct intel_wakeref *wf) +void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags) { INTEL_WAKEREF_BUG_ON(work_pending(&wf->work)); /* Assume we are not in process context and so cannot sleep. */ - if (wf->ops->flags & INTEL_WAKEREF_PUT_ASYNC || - !mutex_trylock(&wf->mutex)) { + if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) { schedule_work(&wf->work); return; } @@ -109,8 +109,17 @@ void __intel_wakeref_init(struct intel_wakeref *wf, int intel_wakeref_wait_for_idle(struct intel_wakeref *wf) { - return wait_var_event_killable(&wf->wakeref, - !intel_wakeref_is_active(wf)); + int err; + + might_sleep(); + + err = wait_var_event_killable(&wf->wakeref, + !intel_wakeref_is_active(wf)); + if (err) + return err; + + intel_wakeref_unlock_wait(wf); + return 0; } static void wakeref_auto_timeout(struct timer_list *t) diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index 5f0c972a80fb..da6e8fd506e6 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -9,6 +9,7 @@ #include <linux/atomic.h> #include <linux/bits.h> +#include <linux/lockdep.h> #include <linux/mutex.h> #include <linux/refcount.h> #include <linux/stackdepot.h> @@ -29,9 +30,6 @@ typedef depot_stack_handle_t intel_wakeref_t; struct intel_wakeref_ops { int (*get)(struct intel_wakeref *wf); int (*put)(struct intel_wakeref *wf); - - unsigned long flags; -#define INTEL_WAKEREF_PUT_ASYNC BIT(0) }; struct intel_wakeref { @@ -57,7 +55,7 @@ void __intel_wakeref_init(struct intel_wakeref *wf, } while (0) int __intel_wakeref_get_first(struct intel_wakeref *wf); -void __intel_wakeref_put_last(struct intel_wakeref *wf); +void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags); /** * intel_wakeref_get: Acquire the wakeref @@ -100,10 +98,9 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf) } /** - * intel_wakeref_put: Release the wakeref - * @i915: the drm_i915_private device + * intel_wakeref_put_flags: Release the wakeref * @wf: the wakeref - * @fn: callback for releasing the wakeref, called only on final release. + * @flags: control flags * * Release our hold on the wakeref. When there are no more users, * the runtime pm wakeref will be released after the @fn callback is called @@ -116,11 +113,25 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf) * code otherwise. */ static inline void -intel_wakeref_put(struct intel_wakeref *wf) +__intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags) +#define INTEL_WAKEREF_PUT_ASYNC BIT(0) { INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); if (unlikely(!atomic_add_unless(&wf->count, -1, 1))) - __intel_wakeref_put_last(wf); + __intel_wakeref_put_last(wf, flags); +} + +static inline void +intel_wakeref_put(struct intel_wakeref *wf) +{ + might_sleep(); + __intel_wakeref_put(wf, 0); +} + +static inline void +intel_wakeref_put_async(struct intel_wakeref *wf) +{ + __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC); } /** @@ -152,6 +163,21 @@ intel_wakeref_unlock(struct intel_wakeref *wf) } /** + * intel_wakeref_unlock_wait: Wait until the active callback is complete + * @wf: the wakeref + * + * Waits for the active callback (under the @wf->mutex or another CPU) is + * complete. + */ +static inline void +intel_wakeref_unlock_wait(struct intel_wakeref *wf) +{ + mutex_lock(&wf->mutex); + mutex_unlock(&wf->mutex); + flush_work(&wf->work); +} + +/** * intel_wakeref_is_active: Query whether the wakeref is currently held * @wf: the wakeref * @@ -170,6 +196,7 @@ intel_wakeref_is_active(const struct intel_wakeref *wf) static inline void __intel_wakeref_defer_park(struct intel_wakeref *wf) { + lockdep_assert_held(&wf->mutex); INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count)); atomic_set_release(&wf->count, 1); } diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index e9160ce39cbb..6deaa7d01654 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -7,6 +7,7 @@ config DRM_MSM depends on OF && COMMON_CLK depends on MMU depends on INTERCONNECT || !INTERCONNECT + depends on QCOM_OCMEM || QCOM_OCMEM=n select QCOM_MDT_LOADER if ARCH_QCOM select REGULATOR select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 5f7e98028eaf..7ad14937fcdf 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -6,10 +6,6 @@ * Copyright (c) 2014 The Linux Foundation. All rights reserved. */ -#ifdef CONFIG_MSM_OCMEM -# include <mach/ocmem.h> -#endif - #include "a3xx_gpu.h" #define A3XX_INT0_MASK \ @@ -195,9 +191,9 @@ static int a3xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000); /* Set the OCMEM base address for A330, etc */ - if (a3xx_gpu->ocmem_hdl) { + if (a3xx_gpu->ocmem.hdl) { gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR, - (unsigned int)(a3xx_gpu->ocmem_base >> 14)); + (unsigned int)(a3xx_gpu->ocmem.base >> 14)); } /* Turn on performance counters: */ @@ -318,10 +314,7 @@ static void a3xx_destroy(struct msm_gpu *gpu) adreno_gpu_cleanup(adreno_gpu); -#ifdef CONFIG_MSM_OCMEM - if (a3xx_gpu->ocmem_base) - ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl); -#endif + adreno_gpu_ocmem_cleanup(&a3xx_gpu->ocmem); kfree(a3xx_gpu); } @@ -494,17 +487,10 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) /* if needed, allocate gmem: */ if (adreno_is_a330(adreno_gpu)) { -#ifdef CONFIG_MSM_OCMEM - /* TODO this is different/missing upstream: */ - struct ocmem_buf *ocmem_hdl = - ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem); - - a3xx_gpu->ocmem_hdl = ocmem_hdl; - a3xx_gpu->ocmem_base = ocmem_hdl->addr; - adreno_gpu->gmem = ocmem_hdl->len; - DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024, - a3xx_gpu->ocmem_base); -#endif + ret = adreno_gpu_ocmem_init(&adreno_gpu->base.pdev->dev, + adreno_gpu, &a3xx_gpu->ocmem); + if (ret) + goto fail; } if (!gpu->aspace) { diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h index 5dc33e5ea53b..c555fb13e0d7 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h @@ -19,8 +19,7 @@ struct a3xx_gpu { struct adreno_gpu base; /* if OCMEM is used for GMEM: */ - uint32_t ocmem_base; - void *ocmem_hdl; + struct adreno_ocmem ocmem; }; #define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base) diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index ab2b752566d8..b01388a9e89e 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -2,9 +2,6 @@ /* Copyright (c) 2014 The Linux Foundation. All rights reserved. */ #include "a4xx_gpu.h" -#ifdef CONFIG_MSM_OCMEM -# include <soc/qcom/ocmem.h> -#endif #define A4XX_INT0_MASK \ (A4XX_INT0_RBBM_AHB_ERROR | \ @@ -188,7 +185,7 @@ static int a4xx_hw_init(struct msm_gpu *gpu) (1 << 30) | 0xFFFF); gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR, - (unsigned int)(a4xx_gpu->ocmem_base >> 14)); + (unsigned int)(a4xx_gpu->ocmem.base >> 14)); /* Turn on performance counters: */ gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01); @@ -318,10 +315,7 @@ static void a4xx_destroy(struct msm_gpu *gpu) adreno_gpu_cleanup(adreno_gpu); -#ifdef CONFIG_MSM_OCMEM - if (a4xx_gpu->ocmem_base) - ocmem_free(OCMEM_GRAPHICS, a4xx_gpu->ocmem_hdl); -#endif + adreno_gpu_ocmem_cleanup(&a4xx_gpu->ocmem); kfree(a4xx_gpu); } @@ -578,17 +572,10 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) /* if needed, allocate gmem: */ if (adreno_is_a4xx(adreno_gpu)) { -#ifdef CONFIG_MSM_OCMEM - /* TODO this is different/missing upstream: */ - struct ocmem_buf *ocmem_hdl = - ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem); - - a4xx_gpu->ocmem_hdl = ocmem_hdl; - a4xx_gpu->ocmem_base = ocmem_hdl->addr; - adreno_gpu->gmem = ocmem_hdl->len; - DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024, - a4xx_gpu->ocmem_base); -#endif + ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu, + &a4xx_gpu->ocmem); + if (ret) + goto fail; } if (!gpu->aspace) { diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h index d506311ee240..a01448cba2ea 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h @@ -16,8 +16,7 @@ struct a4xx_gpu { struct adreno_gpu base; /* if OCMEM is used for GMEM: */ - uint32_t ocmem_base; - void *ocmem_hdl; + struct adreno_ocmem ocmem; }; #define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index e9c55d1d6c04..b02e2042547f 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -353,6 +353,9 @@ static int a5xx_me_init(struct msm_gpu *gpu) * 2D mode 3 draw */ OUT_RING(ring, 0x0000000B); + } else if (adreno_is_a510(adreno_gpu)) { + /* Workaround for token and syncs */ + OUT_RING(ring, 0x00000001); } else { /* No workarounds enabled */ OUT_RING(ring, 0x00000000); @@ -568,15 +571,24 @@ static int a5xx_hw_init(struct msm_gpu *gpu) 0x00100000 + adreno_gpu->gmem - 1); gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000); - gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40); - if (adreno_is_a530(adreno_gpu)) - gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40); - if (adreno_is_a540(adreno_gpu)) - gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400); - gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060); - gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16); - - gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22)); + if (adreno_is_a510(adreno_gpu)) { + gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20); + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20); + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030); + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A); + gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, + (0x200 << 11 | 0x200 << 22)); + } else { + gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40); + if (adreno_is_a530(adreno_gpu)) + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40); + if (adreno_is_a540(adreno_gpu)) + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400); + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060); + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16); + gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, + (0x400 << 11 | 0x300 << 22)); + } if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8)); @@ -589,6 +601,19 @@ static int a5xx_hw_init(struct msm_gpu *gpu) /* Enable ME/PFP split notification */ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); + /* + * In A5x, CCU can send context_done event of a particular context to + * UCHE which ultimately reaches CP even when there is valid + * transaction of that context inside CCU. This can let CP to program + * config registers, which will make the "valid transaction" inside + * CCU to be interpreted differently. This can cause gpu fault. This + * bug is fixed in latest A510 revision. To enable this bug fix - + * bit[11] of RB_DBG_ECO_CNTL need to be set to 0, default is 1 + * (disable). For older A510 version this bit is unused. + */ + if (adreno_is_a510(adreno_gpu)) + gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, (1 << 11), 0); + /* Enable HWCG */ a5xx_set_hwcg(gpu, true); @@ -635,7 +660,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) /* UCHE */ gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16)); - if (adreno_is_a530(adreno_gpu)) + if (adreno_is_a530(adreno_gpu) || adreno_is_a510(adreno_gpu)) gpu_write(gpu, REG_A5XX_CP_PROTECT(17), ADRENO_PROTECT_RW(0x10000, 0x8000)); @@ -679,7 +704,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu) a5xx_preempt_hw_init(gpu); - a5xx_gpmu_ucode_init(gpu); + if (!adreno_is_a510(adreno_gpu)) + a5xx_gpmu_ucode_init(gpu); ret = a5xx_ucode_init(gpu); if (ret) @@ -712,7 +738,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu) } /* - * Try to load a zap shader into the secure world. If successful + * If the chip that we are using does support loading one, then + * try to load a zap shader into the secure world. If successful * we can use the CP to switch out of secure mode. If not then we * have no resource but to try to switch ourselves out manually. If we * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will @@ -1066,6 +1093,7 @@ static void a5xx_dump(struct msm_gpu *gpu) static int a5xx_pm_resume(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); int ret; /* Turn on the core power */ @@ -1073,6 +1101,15 @@ static int a5xx_pm_resume(struct msm_gpu *gpu) if (ret) return ret; + if (adreno_is_a510(adreno_gpu)) { + /* Halt the sp_input_clk at HM level */ + gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055); + a5xx_set_hwcg(gpu, true); + /* Turn on sp_input_clk at HM level */ + gpu_rmw(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xff, 0); + return 0; + } + /* Turn the RBCCU domain first to limit the chances of voltage droop */ gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000); @@ -1101,9 +1138,17 @@ static int a5xx_pm_resume(struct msm_gpu *gpu) static int a5xx_pm_suspend(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + u32 mask = 0xf; + + /* A510 has 3 XIN ports in VBIF */ + if (adreno_is_a510(adreno_gpu)) + mask = 0x7; + /* Clear the VBIF pipe before shutting down */ - gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF); - spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF); + gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, mask); + spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & + mask) == mask); gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0); @@ -1289,7 +1334,7 @@ static void a5xx_gpu_state_destroy(struct kref *kref) kfree(a5xx_state); } -int a5xx_gpu_state_put(struct msm_gpu_state *state) +static int a5xx_gpu_state_put(struct msm_gpu_state *state) { if (IS_ERR_OR_NULL(state)) return 1; @@ -1299,8 +1344,8 @@ int a5xx_gpu_state_put(struct msm_gpu_state *state) #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) -void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state, - struct drm_printer *p) +static void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state, + struct drm_printer *p) { int i, j; u32 pos = 0; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index a3a06db675ba..321a8061fd32 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -297,6 +297,10 @@ int a5xx_power_init(struct msm_gpu *gpu) struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); int ret; + /* Not all A5xx chips have a GPMU */ + if (adreno_is_a510(adreno_gpu)) + return 0; + /* Set up the limits management */ if (adreno_is_a530(adreno_gpu)) a530_lm_setup(gpu); @@ -326,6 +330,9 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) unsigned int *data, *ptr, *cmds; unsigned int cmds_size; + if (adreno_is_a510(adreno_gpu)) + return; + if (a5xx_gpu->gpmu_bo) return; diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 0888e0df660d..fbbdf86504f5 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -115,6 +115,21 @@ static const struct adreno_info gpulist[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .init = a4xx_gpu_init, }, { + .rev = ADRENO_REV(5, 1, 0, ANY_ID), + .revn = 510, + .name = "A510", + .fw = { + [ADRENO_FW_PM4] = "a530_pm4.fw", + [ADRENO_FW_PFP] = "a530_pfp.fw", + }, + .gmem = SZ_256K, + /* + * Increase inactive period to 250 to avoid bouncing + * the GDSC which appears to make it grumpy + */ + .inactive_period = 250, + .init = a5xx_gpu_init, + }, { .rev = ADRENO_REV(5, 3, 0, 2), .revn = 530, .name = "A530", diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 048c8be426f3..0783e4b5486a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -14,6 +14,7 @@ #include <linux/pm_opp.h> #include <linux/slab.h> #include <linux/soc/qcom/mdt_loader.h> +#include <soc/qcom/ocmem.h> #include "adreno_gpu.h" #include "msm_gem.h" #include "msm_mmu.h" @@ -893,6 +894,45 @@ static int adreno_get_pwrlevels(struct device *dev, return 0; } +int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu, + struct adreno_ocmem *adreno_ocmem) +{ + struct ocmem_buf *ocmem_hdl; + struct ocmem *ocmem; + + ocmem = of_get_ocmem(dev); + if (IS_ERR(ocmem)) { + if (PTR_ERR(ocmem) == -ENODEV) { + /* + * Return success since either the ocmem property was + * not specified in device tree, or ocmem support is + * not compiled into the kernel. + */ + return 0; + } + + return PTR_ERR(ocmem); + } + + ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->gmem); + if (IS_ERR(ocmem_hdl)) + return PTR_ERR(ocmem_hdl); + + adreno_ocmem->ocmem = ocmem; + adreno_ocmem->base = ocmem_hdl->addr; + adreno_ocmem->hdl = ocmem_hdl; + adreno_gpu->gmem = ocmem_hdl->len; + + return 0; +} + +void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *adreno_ocmem) +{ + if (adreno_ocmem && adreno_ocmem->base) + ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS, + adreno_ocmem->hdl); +} + int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs, int nr_rings) diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index c7441fb8313e..e71a7570ef72 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -126,6 +126,12 @@ struct adreno_gpu { }; #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base) +struct adreno_ocmem { + struct ocmem *ocmem; + unsigned long base; + void *hdl; +}; + /* platform config data (ie. from DT, or pdata) */ struct adreno_platform_config { struct adreno_rev rev; @@ -206,6 +212,11 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu) return gpu->revn == 430; } +static inline int adreno_is_a510(struct adreno_gpu *gpu) +{ + return gpu->revn == 510; +} + static inline int adreno_is_a530(struct adreno_gpu *gpu) { return gpu->revn == 530; @@ -236,6 +247,10 @@ void adreno_dump(struct msm_gpu *gpu); void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords); struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu); +int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu, + struct adreno_ocmem *ocmem); +void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem); + int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, int nr_rings); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c index cdbea38b8697..f1bc6a1af7a7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c @@ -55,8 +55,7 @@ static void dpu_core_irq_callback_handler(void *arg, int irq_idx) int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms, enum dpu_intr_type intr_type, u32 instance_idx) { - if (!dpu_kms || !dpu_kms->hw_intr || - !dpu_kms->hw_intr->ops.irq_idx_lookup) + if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.irq_idx_lookup) return -EINVAL; return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type, @@ -73,7 +72,7 @@ static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx) unsigned long irq_flags; int ret = 0, enable_count; - if (!dpu_kms || !dpu_kms->hw_intr || + if (!dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts || !dpu_kms->irq_obj.irq_counts) { DPU_ERROR("invalid params\n"); @@ -114,7 +113,7 @@ int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count) { int i, ret = 0, counts; - if (!dpu_kms || !irq_idxs || !irq_count) { + if (!irq_idxs || !irq_count) { DPU_ERROR("invalid params\n"); return -EINVAL; } @@ -138,7 +137,7 @@ static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx) { int ret = 0, enable_count; - if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) { + if (!dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) { DPU_ERROR("invalid params\n"); return -EINVAL; } @@ -169,7 +168,7 @@ int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count) { int i, ret = 0, counts; - if (!dpu_kms || !irq_idxs || !irq_count) { + if (!irq_idxs || !irq_count) { DPU_ERROR("invalid params\n"); return -EINVAL; } @@ -186,7 +185,7 @@ int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count) u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear) { - if (!dpu_kms || !dpu_kms->hw_intr || + if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.get_interrupt_status) return 0; @@ -205,7 +204,7 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx, { unsigned long irq_flags; - if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) { + if (!dpu_kms->irq_obj.irq_cb_tbl) { DPU_ERROR("invalid params\n"); return -EINVAL; } @@ -240,7 +239,7 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx, { unsigned long irq_flags; - if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) { + if (!dpu_kms->irq_obj.irq_cb_tbl) { DPU_ERROR("invalid params\n"); return -EINVAL; } @@ -274,8 +273,7 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx, static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms) { - if (!dpu_kms || !dpu_kms->hw_intr || - !dpu_kms->hw_intr->ops.clear_all_irqs) + if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.clear_all_irqs) return; dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr); @@ -283,8 +281,7 @@ static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms) static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms) { - if (!dpu_kms || !dpu_kms->hw_intr || - !dpu_kms->hw_intr->ops.disable_all_irqs) + if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.disable_all_irqs) return; dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr); @@ -343,18 +340,8 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms) { - struct msm_drm_private *priv; int i; - if (!dpu_kms->dev) { - DPU_ERROR("invalid drm device\n"); - return; - } else if (!dpu_kms->dev->dev_private) { - DPU_ERROR("invalid device private\n"); - return; - } - priv = dpu_kms->dev->dev_private; - pm_runtime_get_sync(&dpu_kms->pdev->dev); dpu_clear_all_irqs(dpu_kms); dpu_disable_all_irqs(dpu_kms); @@ -379,18 +366,8 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms) void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms) { - struct msm_drm_private *priv; int i; - if (!dpu_kms->dev) { - DPU_ERROR("invalid drm device\n"); - return; - } else if (!dpu_kms->dev->dev_private) { - DPU_ERROR("invalid device private\n"); - return; - } - priv = dpu_kms->dev->dev_private; - pm_runtime_get_sync(&dpu_kms->pdev->dev); for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) || diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c index 09a49b59bb5b..11f2bebe3869 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c @@ -32,18 +32,7 @@ enum dpu_perf_mode { static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) { struct msm_drm_private *priv; - - if (!crtc->dev || !crtc->dev->dev_private) { - DPU_ERROR("invalid device\n"); - return NULL; - } - priv = crtc->dev->dev_private; - if (!priv || !priv->kms) { - DPU_ERROR("invalid kms\n"); - return NULL; - } - return to_dpu_kms(priv->kms); } @@ -116,7 +105,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc, } kms = _dpu_crtc_get_kms(crtc); - if (!kms || !kms->catalog) { + if (!kms->catalog) { DPU_ERROR("invalid parameters\n"); return 0; } @@ -215,7 +204,6 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc) { struct dpu_crtc *dpu_crtc; - struct dpu_crtc_state *dpu_cstate; struct dpu_kms *kms; if (!crtc) { @@ -224,13 +212,12 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc) } kms = _dpu_crtc_get_kms(crtc); - if (!kms || !kms->catalog) { + if (!kms->catalog) { DPU_ERROR("invalid kms\n"); return; } dpu_crtc = to_dpu_crtc(crtc); - dpu_cstate = to_dpu_crtc_state(crtc->state); if (atomic_dec_return(&kms->bandwidth_ref) > 0) return; @@ -287,7 +274,6 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc, u64 clk_rate = 0; struct dpu_crtc *dpu_crtc; struct dpu_crtc_state *dpu_cstate; - struct msm_drm_private *priv; struct dpu_kms *kms; int ret; @@ -297,11 +283,10 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc, } kms = _dpu_crtc_get_kms(crtc); - if (!kms || !kms->catalog) { + if (!kms->catalog) { DPU_ERROR("invalid kms\n"); return -EINVAL; } - priv = kms->dev->dev_private; dpu_crtc = to_dpu_crtc(crtc); dpu_cstate = to_dpu_crtc_state(crtc->state); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index ce59adff06aa..f197dce54576 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -266,11 +266,20 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) { struct drm_encoder *encoder; - if (!crtc || !crtc->dev) { + if (!crtc) { DPU_ERROR("invalid crtc\n"); return INTF_MODE_NONE; } + /* + * TODO: This function is called from dpu debugfs and as part of atomic + * check. When called from debugfs, the crtc->mutex must be held to + * read crtc->state. However reading crtc->state from atomic check isn't + * allowed (unless you have a good reason, a big comment, and a deep + * understanding of how the atomic/modeset locks work (<- and this is + * probably not possible)). So we'll keep the WARN_ON here for now, but + * really we need to figure out a better way to track our operating mode + */ WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); /* TODO: Returns the first INTF_MODE, could there be multiple values? */ @@ -694,7 +703,7 @@ static void dpu_crtc_disable(struct drm_crtc *crtc, unsigned long flags; bool release_bandwidth = false; - if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) { + if (!crtc || !crtc->state) { DPU_ERROR("invalid crtc\n"); return; } @@ -766,7 +775,7 @@ static void dpu_crtc_enable(struct drm_crtc *crtc, struct msm_drm_private *priv; bool request_bandwidth; - if (!crtc || !crtc->dev || !crtc->dev->dev_private) { + if (!crtc) { DPU_ERROR("invalid crtc\n"); return; } @@ -1288,13 +1297,8 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, { struct drm_crtc *crtc = NULL; struct dpu_crtc *dpu_crtc = NULL; - struct msm_drm_private *priv = NULL; - struct dpu_kms *kms = NULL; int i; - priv = dev->dev_private; - kms = to_dpu_kms(priv->kms); - dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL); if (!dpu_crtc) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index d82ea994063f..f96e142c4361 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -645,11 +645,6 @@ static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc, priv = drm_enc->dev->dev_private; dpu_kms = to_dpu_kms(priv->kms); - if (!dpu_kms) { - DPU_ERROR("invalid dpu_kms\n"); - return; - } - hw_mdptop = dpu_kms->hw_mdp; if (!hw_mdptop) { DPU_ERROR("invalid mdptop\n"); @@ -735,8 +730,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, struct msm_drm_private *priv; bool is_vid_mode = false; - if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private || - !drm_enc->crtc) { + if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) { DPU_ERROR("invalid parameters\n"); return -EINVAL; } @@ -1092,17 +1086,13 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) struct msm_drm_private *priv; struct dpu_kms *dpu_kms; - if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) { + if (!drm_enc || !drm_enc->dev) { DPU_ERROR("invalid parameters\n"); return; } priv = drm_enc->dev->dev_private; dpu_kms = to_dpu_kms(priv->kms); - if (!dpu_kms) { - DPU_ERROR("invalid dpu_kms\n"); - return; - } dpu_enc = to_dpu_encoder_virt(drm_enc); if (!dpu_enc || !dpu_enc->cur_master) { @@ -1184,7 +1174,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) struct dpu_encoder_virt *dpu_enc = NULL; struct msm_drm_private *priv; struct dpu_kms *dpu_kms; - struct drm_display_mode *mode; int i = 0; if (!drm_enc) { @@ -1193,9 +1182,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) } else if (!drm_enc->dev) { DPU_ERROR("invalid dev\n"); return; - } else if (!drm_enc->dev->dev_private) { - DPU_ERROR("invalid dev_private\n"); - return; } dpu_enc = to_dpu_encoder_virt(drm_enc); @@ -1204,8 +1190,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) mutex_lock(&dpu_enc->enc_lock); dpu_enc->enabled = false; - mode = &drm_enc->crtc->state->adjusted_mode; - priv = drm_enc->dev->dev_private; dpu_kms = to_dpu_kms(priv->kms); @@ -1734,8 +1718,7 @@ static void dpu_encoder_vsync_event_handler(struct timer_list *t) struct msm_drm_private *priv; struct msm_drm_thread *event_thread; - if (!drm_enc->dev || !drm_enc->dev->dev_private || - !drm_enc->crtc) { + if (!drm_enc->dev || !drm_enc->crtc) { DPU_ERROR("invalid parameters\n"); return; } @@ -1914,8 +1897,6 @@ static int _dpu_encoder_debugfs_status_open(struct inode *inode, static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); - struct msm_drm_private *priv; - struct dpu_kms *dpu_kms; int i; static const struct file_operations debugfs_status_fops = { @@ -1927,14 +1908,11 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) char name[DPU_NAME_SIZE]; - if (!drm_enc->dev || !drm_enc->dev->dev_private) { + if (!drm_enc->dev) { DPU_ERROR("invalid encoder or kms\n"); return -EINVAL; } - priv = drm_enc->dev->dev_private; - dpu_kms = to_dpu_kms(priv->kms); - snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id); /* create overall sub-directory for the encoder */ @@ -2042,9 +2020,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, enum dpu_intf_type intf_type; struct dpu_enc_phys_init_params phys_params; - if (!dpu_enc || !dpu_kms) { - DPU_ERROR("invalid arg(s), enc %d kms %d\n", - dpu_enc != 0, dpu_kms != 0); + if (!dpu_enc) { + DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != 0); return -EINVAL; } @@ -2133,14 +2110,12 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t) struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t, frame_done_timer); struct drm_encoder *drm_enc = &dpu_enc->base; - struct msm_drm_private *priv; u32 event; - if (!drm_enc->dev || !drm_enc->dev->dev_private) { + if (!drm_enc->dev) { DPU_ERROR("invalid parameters\n"); return; } - priv = drm_enc->dev->dev_private; if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) { DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n", diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index 2923b63d95fe..047960949fbb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -124,13 +124,11 @@ static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx) static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx) { struct dpu_encoder_phys *phys_enc = arg; - struct dpu_encoder_phys_cmd *cmd_enc; if (!phys_enc || !phys_enc->hw_ctl) return; DPU_ATRACE_BEGIN("ctl_start_irq"); - cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0); @@ -316,13 +314,9 @@ end: static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc, bool enable) { - struct dpu_encoder_phys_cmd *cmd_enc; - if (!phys_enc) return; - cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); - trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0, enable, atomic_read(&phys_enc->vblank_refcount)); @@ -355,7 +349,6 @@ static void dpu_encoder_phys_cmd_tearcheck_config( struct drm_display_mode *mode; bool tc_enable = true; u32 vsync_hz; - struct msm_drm_private *priv; struct dpu_kms *dpu_kms; if (!phys_enc || !phys_enc->hw_pp) { @@ -373,11 +366,6 @@ static void dpu_encoder_phys_cmd_tearcheck_config( } dpu_kms = phys_enc->dpu_kms; - if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) { - DPU_ERROR("invalid device\n"); - return; - } - priv = dpu_kms->dev->dev_private; /* * TE default: dsi byte clock calculated base on 70 fps; @@ -650,13 +638,10 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete( struct dpu_encoder_phys *phys_enc) { int rc; - struct dpu_encoder_phys_cmd *cmd_enc; if (!phys_enc) return -EINVAL; - cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); - rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc); if (rc) { DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n", diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index b9c84fb4d4a1..3123ef873cdf 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -374,7 +374,7 @@ static void dpu_encoder_phys_vid_mode_set( struct drm_display_mode *mode, struct drm_display_mode *adj_mode) { - if (!phys_enc || !phys_enc->dpu_kms) { + if (!phys_enc) { DPU_ERROR("invalid encoder/kms\n"); return; } @@ -566,16 +566,13 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff( static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc) { - struct msm_drm_private *priv; unsigned long lock_flags; int ret; - if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev || - !phys_enc->parent->dev->dev_private) { + if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev) { DPU_ERROR("invalid encoder/device\n"); return; } - priv = phys_enc->parent->dev->dev_private; if (!phys_enc->hw_intf || !phys_enc->hw_ctl) { DPU_ERROR("invalid hw_intf %d hw_ctl %d\n", diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 58b0485dc375..6c92f0fbeac9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -30,10 +30,6 @@ #define CREATE_TRACE_POINTS #include "dpu_trace.h" -static const char * const iommu_ports[] = { - "mdp_0", -}; - /* * To enable overall DRM driver logging * # echo 0x2 > /sys/module/drm/parameters/debug @@ -68,16 +64,14 @@ static int _dpu_danger_signal_status(struct seq_file *s, bool danger_status) { struct dpu_kms *kms = (struct dpu_kms *)s->private; - struct msm_drm_private *priv; struct dpu_danger_safe_status status; int i; - if (!kms->dev || !kms->dev->dev_private || !kms->hw_mdp) { + if (!kms->hw_mdp) { DPU_ERROR("invalid arg(s)\n"); return 0; } - priv = kms->dev->dev_private; memset(&status, 0, sizeof(struct dpu_danger_safe_status)); pm_runtime_get_sync(&kms->pdev->dev); @@ -153,13 +147,7 @@ static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data) return 0; dev = dpu_kms->dev; - if (!dev) - return 0; - priv = dev->dev_private; - if (!priv) - return 0; - base = dpu_kms->mmio + regset->offset; /* insert padding spaces, if needed */ @@ -280,7 +268,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) { struct dpu_kms *dpu_kms; - struct msm_drm_private *priv; struct drm_device *dev; struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; @@ -292,10 +279,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms, dpu_kms = to_dpu_kms(kms); dev = dpu_kms->dev; - if (!dev || !dev->dev_private) - return; - priv = dev->dev_private; - /* Call prepare_commit for all affected encoders */ for_each_new_crtc_in_state(state, crtc, crtc_state, i) { drm_for_each_encoder_mask(encoder, crtc->dev, @@ -333,7 +316,6 @@ void dpu_kms_encoder_enable(struct drm_encoder *encoder) if (funcs && funcs->commit) funcs->commit(encoder); - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); drm_for_each_crtc(crtc, dev) { if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder))) continue; @@ -464,16 +446,6 @@ static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms) struct msm_drm_private *priv; int i; - if (!dpu_kms) { - DPU_ERROR("invalid dpu_kms\n"); - return; - } else if (!dpu_kms->dev) { - DPU_ERROR("invalid dev\n"); - return; - } else if (!dpu_kms->dev->dev_private) { - DPU_ERROR("invalid dev_private\n"); - return; - } priv = dpu_kms->dev->dev_private; for (i = 0; i < priv->num_crtcs; i++) @@ -505,7 +477,6 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms) int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret; int max_crtc_count; - dev = dpu_kms->dev; priv = dev->dev_private; catalog = dpu_kms->catalog; @@ -585,8 +556,6 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms) int i; dev = dpu_kms->dev; - if (!dev) - return; if (dpu_kms->hw_intr) dpu_hw_intr_destroy(dpu_kms->hw_intr); @@ -725,8 +694,7 @@ static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms) mmu = dpu_kms->base.aspace->mmu; - mmu->funcs->detach(mmu, (const char **)iommu_ports, - ARRAY_SIZE(iommu_ports)); + mmu->funcs->detach(mmu); msm_gem_address_space_put(dpu_kms->base.aspace); dpu_kms->base.aspace = NULL; @@ -752,8 +720,7 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) return PTR_ERR(aspace); } - ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, - ARRAY_SIZE(iommu_ports)); + ret = aspace->mmu->funcs->attach(aspace->mmu); if (ret) { DPU_ERROR("failed to attach iommu %d\n", ret); msm_gem_address_space_put(aspace); @@ -803,16 +770,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms) dpu_kms = to_dpu_kms(kms); dev = dpu_kms->dev; - if (!dev) { - DPU_ERROR("invalid device\n"); - return rc; - } - priv = dev->dev_private; - if (!priv) { - DPU_ERROR("invalid private data\n"); - return rc; - } atomic_set(&dpu_kms->bandwidth_ref, 0); @@ -974,7 +932,7 @@ struct msm_kms *dpu_kms_init(struct drm_device *dev) struct dpu_kms *dpu_kms; int irq; - if (!dev || !dev->dev_private) { + if (!dev) { DPU_ERROR("drm device node invalid\n"); return ERR_PTR(-EINVAL); } @@ -1064,11 +1022,6 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev) struct dss_module_power *mp = &dpu_kms->mp; ddev = dpu_kms->dev; - if (!ddev) { - DPU_ERROR("invalid drm_device\n"); - return rc; - } - rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false); if (rc) DPU_ERROR("clock disable failed rc:%d\n", rc); @@ -1086,11 +1039,6 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev) struct dss_module_power *mp = &dpu_kms->mp; ddev = dpu_kms->dev; - if (!ddev) { - DPU_ERROR("invalid drm_device\n"); - return rc; - } - rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); if (rc) { DPU_ERROR("clock enable failed rc:%d\n", rc); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index 959d03e007fa..c6169e7df19d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -139,10 +139,6 @@ struct vsync_info { #define to_dpu_kms(x) container_of(x, struct dpu_kms, base) -/* get struct msm_kms * from drm_device * */ -#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \ - ((struct msm_drm_private *)((D)->dev_private))->kms : NULL) - /** * Debugfs functions - extra helper functions for debugfs support * diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c index 8d24b79fd400..991f4c8f8a12 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c @@ -154,10 +154,6 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, u32 ot_lim; int ret, i; - if (!dpu_kms) { - DPU_ERROR("invalid arguments\n"); - return; - } mdp = dpu_kms->hw_mdp; for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { @@ -214,7 +210,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, const struct dpu_vbif_qos_tbl *qos_tbl; int i; - if (!dpu_kms || !params || !dpu_kms->hw_mdp) { + if (!params || !dpu_kms->hw_mdp) { DPU_ERROR("invalid arguments\n"); return; } diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c index 50711ccc8691..dda05436f716 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -157,10 +157,6 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, } } -static const char * const iommu_ports[] = { - "mdp_port0_cb0", "mdp_port1_cb0", -}; - static void mdp4_destroy(struct msm_kms *kms) { struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); @@ -172,8 +168,7 @@ static void mdp4_destroy(struct msm_kms *kms) drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo); if (aspace) { - aspace->mmu->funcs->detach(aspace->mmu, - iommu_ports, ARRAY_SIZE(iommu_ports)); + aspace->mmu->funcs->detach(aspace->mmu); msm_gem_address_space_put(aspace); } @@ -524,8 +519,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) kms->aspace = aspace; - ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, - ARRAY_SIZE(iommu_ports)); + ret = aspace->mmu->funcs->attach(aspace->mmu); if (ret) goto fail; } else { diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c index f6e71ff539ca..1f48f64539a2 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c @@ -14,7 +14,7 @@ struct mdp5_cfg_handler { /* mdp5_cfg must be exposed (used in mdp5.xml.h) */ const struct mdp5_cfg_hw *mdp5_cfg = NULL; -const struct mdp5_cfg_hw msm8x74v1_config = { +static const struct mdp5_cfg_hw msm8x74v1_config = { .name = "msm8x74v1", .mdp = { .count = 1, @@ -98,7 +98,7 @@ const struct mdp5_cfg_hw msm8x74v1_config = { .max_clk = 200000000, }; -const struct mdp5_cfg_hw msm8x74v2_config = { +static const struct mdp5_cfg_hw msm8x74v2_config = { .name = "msm8x74", .mdp = { .count = 1, @@ -180,7 +180,7 @@ const struct mdp5_cfg_hw msm8x74v2_config = { .max_clk = 200000000, }; -const struct mdp5_cfg_hw apq8084_config = { +static const struct mdp5_cfg_hw apq8084_config = { .name = "apq8084", .mdp = { .count = 1, @@ -275,7 +275,7 @@ const struct mdp5_cfg_hw apq8084_config = { .max_clk = 320000000, }; -const struct mdp5_cfg_hw msm8x16_config = { +static const struct mdp5_cfg_hw msm8x16_config = { .name = "msm8x16", .mdp = { .count = 1, @@ -342,7 +342,7 @@ const struct mdp5_cfg_hw msm8x16_config = { .max_clk = 320000000, }; -const struct mdp5_cfg_hw msm8x94_config = { +static const struct mdp5_cfg_hw msm8x94_config = { .name = "msm8x94", .mdp = { .count = 1, @@ -437,7 +437,7 @@ const struct mdp5_cfg_hw msm8x94_config = { .max_clk = 400000000, }; -const struct mdp5_cfg_hw msm8x96_config = { +static const struct mdp5_cfg_hw msm8x96_config = { .name = "msm8x96", .mdp = { .count = 1, @@ -545,7 +545,104 @@ const struct mdp5_cfg_hw msm8x96_config = { .max_clk = 412500000, }; -const struct mdp5_cfg_hw msm8917_config = { +const struct mdp5_cfg_hw msm8x76_config = { + .name = "msm8x76", + .mdp = { + .count = 1, + .caps = MDP_CAP_SMP | + MDP_CAP_DSC | + MDP_CAP_SRC_SPLIT | + 0, + }, + .ctl = { + .count = 3, + .base = { 0x01000, 0x01200, 0x01400 }, + .flush_hw_mask = 0xffffffff, + }, + .smp = { + .mmb_count = 10, + .mmb_size = 10240, + .clients = { + [SSPP_VIG0] = 1, [SSPP_VIG1] = 9, + [SSPP_DMA0] = 4, + [SSPP_RGB0] = 7, [SSPP_RGB1] = 8, + }, + }, + .pipe_vig = { + .count = 2, + .base = { 0x04000, 0x06000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_rgb = { + .count = 2, + .base = { 0x14000, 0x16000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_dma = { + .count = 1, + .base = { 0x24000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_cursor = { + .count = 1, + .base = { 0x440DC }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + MDP_PIPE_CAP_CURSOR | + 0, + }, + .lm = { + .count = 2, + .base = { 0x44000, 0x45000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 1, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB }, + }, + .nb_stages = 8, + .max_width = 2560, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 1, + .base = { 0x54000 }, + + }, + .pp = { + .count = 3, + .base = { 0x70000, 0x70800, 0x72000 }, + }, + .dsc = { + .count = 2, + .base = { 0x80000, 0x80400 }, + }, + .intf = { + .base = { 0x6a000, 0x6a800, 0x6b000 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + [2] = INTF_DSI, + }, + }, + .max_clk = 360000000, +}; + +static const struct mdp5_cfg_hw msm8917_config = { .name = "msm8917", .mdp = { .count = 1, @@ -630,7 +727,7 @@ const struct mdp5_cfg_hw msm8917_config = { .max_clk = 320000000, }; -const struct mdp5_cfg_hw msm8998_config = { +static const struct mdp5_cfg_hw msm8998_config = { .name = "msm8998", .mdp = { .count = 1, @@ -745,6 +842,7 @@ static const struct mdp5_cfg_handler cfg_handlers_v1[] = { { .revision = 6, .config = { .hw = &msm8x16_config } }, { .revision = 9, .config = { .hw = &msm8x94_config } }, { .revision = 7, .config = { .hw = &msm8x96_config } }, + { .revision = 11, .config = { .hw = &msm8x76_config } }, { .revision = 15, .config = { .hw = &msm8917_config } }, }; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index eb0b4b7dc7cc..05cc04f729d6 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -214,7 +214,6 @@ static void blend_setup(struct drm_crtc *crtc) struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; struct mdp5_kms *mdp5_kms = get_kms(crtc); struct drm_plane *plane; - const struct mdp5_cfg_hw *hw_cfg; struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL}; const struct mdp_format *format; struct mdp5_hw_mixer *mixer = pipeline->mixer; @@ -232,8 +231,6 @@ static void blend_setup(struct drm_crtc *crtc) u32 val; #define blender(stage) ((stage) - STAGE0) - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - spin_lock_irqsave(&mdp5_crtc->lm_lock, flags); /* ctl could be released already when we are shutting down: */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 91cd76a2bab1..e43ecd4be10a 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -19,10 +19,6 @@ #include "msm_mmu.h" #include "mdp5_kms.h" -static const char *iommu_ports[] = { - "mdp_0", -}; - static int mdp5_hw_init(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); @@ -233,8 +229,7 @@ static void mdp5_kms_destroy(struct msm_kms *kms) mdp5_pipe_destroy(mdp5_kms->hwpipes[i]); if (aspace) { - aspace->mmu->funcs->detach(aspace->mmu, - iommu_ports, ARRAY_SIZE(iommu_ports)); + aspace->mmu->funcs->detach(aspace->mmu); msm_gem_address_space_put(aspace); } } @@ -314,6 +309,10 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms) mdp5_kms->enable_count--; WARN_ON(mdp5_kms->enable_count < 0); + if (mdp5_kms->tbu_rt_clk) + clk_disable_unprepare(mdp5_kms->tbu_rt_clk); + if (mdp5_kms->tbu_clk) + clk_disable_unprepare(mdp5_kms->tbu_clk); clk_disable_unprepare(mdp5_kms->ahb_clk); clk_disable_unprepare(mdp5_kms->axi_clk); clk_disable_unprepare(mdp5_kms->core_clk); @@ -334,6 +333,10 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms) clk_prepare_enable(mdp5_kms->core_clk); if (mdp5_kms->lut_clk) clk_prepare_enable(mdp5_kms->lut_clk); + if (mdp5_kms->tbu_clk) + clk_prepare_enable(mdp5_kms->tbu_clk); + if (mdp5_kms->tbu_rt_clk) + clk_prepare_enable(mdp5_kms->tbu_rt_clk); return 0; } @@ -466,14 +469,11 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) { struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; - const struct mdp5_cfg_hw *hw_cfg; unsigned int num_crtcs; int i, ret, pi = 0, ci = 0; struct drm_plane *primary[MAX_BASES] = { NULL }; struct drm_plane *cursor[MAX_BASES] = { NULL }; - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - /* * Construct encoders and modeset initialize connector devices * for each external display interface. @@ -737,8 +737,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) kms->aspace = aspace; - ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, - ARRAY_SIZE(iommu_ports)); + ret = aspace->mmu->funcs->attach(aspace->mmu); if (ret) { DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n", ret); @@ -974,6 +973,8 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) /* optional clocks: */ get_clk(pdev, &mdp5_kms->lut_clk, "lut", false); + get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false); + get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false); /* we need to set a default rate before enabling. Set a safe * rate first, then figure out hw revision, and then set a diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h index d1bf4fdfc815..128866742593 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h @@ -53,6 +53,8 @@ struct mdp5_kms { struct clk *ahb_clk; struct clk *core_clk; struct clk *lut_clk; + struct clk *tbu_clk; + struct clk *tbu_rt_clk; struct clk *vsync_clk; /* diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c index b31cfb554fa2..d7fa2c49e741 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c @@ -121,7 +121,6 @@ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, struct mdp5_kms *mdp5_kms = get_kms(smp); int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg); int i, hsub, nplanes, nlines; - u32 fmt = format->base.pixel_format; uint32_t blkcfg = 0; nplanes = info->num_planes; @@ -135,7 +134,6 @@ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, * them together, writes to SMP using a single client. */ if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) { - fmt = DRM_FORMAT_NV24; nplanes = 2; /* if decimation is enabled, HW decimates less on the diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index b7b7c1a9164a..86ad3fdf207d 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -66,6 +66,26 @@ static const struct msm_dsi_config msm8916_dsi_cfg = { .num_dsi = 1, }; +static const char * const dsi_8976_bus_clk_names[] = { + "mdp_core", "iface", "bus", +}; + +static const struct msm_dsi_config msm8976_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .reg_cfg = { + .num = 3, + .regs = { + {"gdsc", -1, -1}, + {"vdda", 100000, 100}, /* 1.2 V */ + {"vddio", 100000, 100}, /* 1.8 V */ + }, + }, + .bus_clk_names = dsi_8976_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_8976_bus_clk_names), + .io_start = { 0x1a94000, 0x1a96000 }, + .num_dsi = 2, +}; + static const struct msm_dsi_config msm8994_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, .reg_cfg = { @@ -147,7 +167,7 @@ static const struct msm_dsi_config sdm845_dsi_cfg = { .num_dsi = 2, }; -const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = { +static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = { .link_clk_enable = dsi_link_clk_enable_v2, .link_clk_disable = dsi_link_clk_disable_v2, .clk_init_ver = dsi_clk_init_v2, @@ -158,7 +178,7 @@ const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = { .calc_clk_rate = dsi_calc_clk_rate_v2, }; -const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = { +static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = { .link_clk_enable = dsi_link_clk_enable_6g, .link_clk_disable = dsi_link_clk_disable_6g, .clk_init_ver = NULL, @@ -169,7 +189,7 @@ const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = { .calc_clk_rate = dsi_calc_clk_rate_6g, }; -const static struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = { +static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = { .link_clk_enable = dsi_link_clk_enable_6g, .link_clk_disable = dsi_link_clk_disable_6g, .clk_init_ver = dsi_clk_init_6g_v2, @@ -197,6 +217,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { &msm8916_dsi_cfg, &msm_dsi_6g_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_2, + &msm8976_dsi_cfg, &msm_dsi_6g_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0, &msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h index e2b7a7dfbe49..50a37ceb6a25 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -17,6 +17,7 @@ #define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000 #define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 #define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001 +#define MSM_DSI_6G_VER_MINOR_V1_4_2 0x10040002 #define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000 #define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001 diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 1e7b1be25bb0..458cec82ae13 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -1293,14 +1293,13 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len) static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host, u8 *buf, int rx_byte, int pkt_size) { - u32 *lp, *temp, data; + u32 *temp, data; int i, j = 0, cnt; u32 read_cnt; u8 reg[16]; int repeated_bytes = 0; int buf_offset = buf - msm_host->rx_buf; - lp = (u32 *)buf; temp = (u32 *)reg; cnt = (rx_byte + 3) >> 2; if (cnt > 4) diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 3522863a4984..b0cfa67d2a57 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -145,7 +145,7 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, { const unsigned long bit_rate = clk_req->bitclk_rate; const unsigned long esc_rate = clk_req->escclk_rate; - s32 ui, ui_x8, lpx; + s32 ui, ui_x8; s32 tmax, tmin; s32 pcnt0 = 50; s32 pcnt1 = 50; @@ -175,7 +175,6 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); ui_x8 = ui << 3; - lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000); temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8); tmin = max_t(s32, temp, 0); @@ -262,7 +261,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, { const unsigned long bit_rate = clk_req->bitclk_rate; const unsigned long esc_rate = clk_req->escclk_rate; - s32 ui, ui_x8, lpx; + s32 ui, ui_x8; s32 tmax, tmin; s32 pcnt0 = 50; s32 pcnt1 = 50; @@ -284,7 +283,6 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); ui_x8 = ui << 3; - lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000); temp = S_DIV_ROUND_UP(38 * coeff, ui_x8); tmin = max_t(s32, temp, 0); @@ -485,6 +483,8 @@ static const struct of_device_id dsi_phy_dt_match[] = { #ifdef CONFIG_DRM_MSM_DSI_28NM_PHY { .compatible = "qcom,dsi-phy-28nm-hpm", .data = &dsi_phy_28nm_hpm_cfgs }, + { .compatible = "qcom,dsi-phy-28nm-hpm-fam-b", + .data = &dsi_phy_28nm_hpm_famb_cfgs }, { .compatible = "qcom,dsi-phy-28nm-lp", .data = &dsi_phy_28nm_lp_cfgs }, #endif diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index c4069ce6afe6..24b294ed3059 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -40,6 +40,7 @@ struct msm_dsi_phy_cfg { }; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c index b3f678f6c2aa..c3c580cfd8b1 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c @@ -39,15 +39,10 @@ static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy, DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0)); } -static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) +static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy) { void __iomem *base = phy->reg_base; - if (!enable) { - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0); - return; - } - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0); dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1); dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0); @@ -56,6 +51,39 @@ static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9); dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7); dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20); + dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00); +} + +static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy) +{ + void __iomem *base = phy->reg_base; + + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0x7); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x1); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x1); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20); + + if (phy->cfg->type == MSM_DSI_PHY_28NM_LP) + dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x05); + else + dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x0d); +} + +static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) +{ + if (!enable) { + dsi_phy_write(phy->reg_base + + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0); + return; + } + + if (phy->regulator_ldo_mode) + dsi_28nm_phy_regulator_enable_ldo(phy); + else + dsi_28nm_phy_regulator_enable_dcdc(phy); } static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, @@ -77,8 +105,6 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, dsi_28nm_phy_regulator_ctrl(phy, true); - dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00); - dsi_28nm_dphy_set_timing(phy, timing); dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00); @@ -142,6 +168,24 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = { .num_dsi_phy = 2, }; +const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = { + .type = MSM_DSI_PHY_28NM_HPM, + .src_pll_truthtable = { {true, true}, {false, true} }, + .reg_cfg = { + .num = 1, + .regs = { + {"vddio", 100000, 100}, + }, + }, + .ops = { + .enable = dsi_28nm_phy_enable, + .disable = dsi_28nm_phy_disable, + .init = msm_dsi_phy_init_common, + }, + .io_start = { 0x1a94400, 0x1a96400 }, + .num_dsi_phy = 2, +}; + const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = { .type = MSM_DSI_PHY_28NM_LP, .src_pll_truthtable = { {true, true}, {true, true} }, diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c index 1697e61f9c2f..8a38d4b95102 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c @@ -29,8 +29,12 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy) reg = devm_regulator_get(dev, cfg->reg_names[i]); if (IS_ERR(reg)) { ret = PTR_ERR(reg); - DRM_DEV_ERROR(dev, "failed to get phy regulator: %s (%d)\n", - cfg->reg_names[i], ret); + if (ret != -EPROBE_DEFER) { + DRM_DEV_ERROR(dev, + "failed to get phy regulator: %s (%d)\n", + cfg->reg_names[i], ret); + } + return ret; } diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index a052364a5d74..18f3a5c53ffb 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -16,6 +16,7 @@ #include <linux/pm_opp.h> #include <linux/devfreq.h> #include <linux/devcoredump.h> +#include <linux/sched/task.h> /* * Power Management: @@ -838,7 +839,7 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev, return ERR_CAST(aspace); } - ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0); + ret = aspace->mmu->funcs->attach(aspace->mmu); if (ret) { msm_gem_address_space_put(aspace); return ERR_PTR(ret); @@ -995,8 +996,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false); if (!IS_ERR_OR_NULL(gpu->aspace)) { - gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, - NULL, 0); + gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu); msm_gem_address_space_put(gpu->aspace); } } diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c index 34f643a0c28a..34980d8eb7ad 100644 --- a/drivers/gpu/drm/msm/msm_gpummu.c +++ b/drivers/gpu/drm/msm/msm_gpummu.c @@ -21,14 +21,12 @@ struct msm_gpummu { #define GPUMMU_PAGE_SIZE SZ_4K #define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE) -static int msm_gpummu_attach(struct msm_mmu *mmu, const char * const *names, - int cnt) +static int msm_gpummu_attach(struct msm_mmu *mmu) { return 0; } -static void msm_gpummu_detach(struct msm_mmu *mmu, const char * const *names, - int cnt) +static void msm_gpummu_detach(struct msm_mmu *mmu) { } diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 8c95c31e2b12..ad58cfe5998e 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -23,16 +23,14 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev, return 0; } -static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names, - int cnt) +static int msm_iommu_attach(struct msm_mmu *mmu) { struct msm_iommu *iommu = to_msm_iommu(mmu); return iommu_attach_device(iommu->domain, mmu->dev); } -static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names, - int cnt) +static void msm_iommu_detach(struct msm_mmu *mmu) { struct msm_iommu *iommu = to_msm_iommu(mmu); diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index 871d56303697..67a623f14319 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -10,8 +10,8 @@ #include <linux/iommu.h> struct msm_mmu_funcs { - int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt); - void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt); + int (*attach)(struct msm_mmu *mmu); + void (*detach)(struct msm_mmu *mmu); int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, unsigned len, int prot); int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len); diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index c7832a951039..af7ceb246c7c 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -298,7 +298,7 @@ void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) static void snapshot_buf(struct msm_rd_state *rd, struct msm_gem_submit *submit, int idx, - uint64_t iova, uint32_t size) + uint64_t iova, uint32_t size, bool full) { struct msm_gem_object *obj = submit->bos[idx].obj; unsigned offset = 0; @@ -318,6 +318,9 @@ static void snapshot_buf(struct msm_rd_state *rd, rd_write_section(rd, RD_GPUADDR, (uint32_t[3]){ iova, size, iova >> 32 }, 12); + if (!full) + return; + /* But only dump the contents of buffers marked READ */ if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ)) return; @@ -381,18 +384,21 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4)); for (i = 0; i < submit->nr_bos; i++) - if (should_dump(submit, i)) - snapshot_buf(rd, submit, i, 0, 0); + snapshot_buf(rd, submit, i, 0, 0, should_dump(submit, i)); for (i = 0; i < submit->nr_cmds; i++) { - uint64_t iova = submit->cmd[i].iova; uint32_t szd = submit->cmd[i].size; /* in dwords */ /* snapshot cmdstream bo's (if we haven't already): */ if (!should_dump(submit, i)) { snapshot_buf(rd, submit, submit->cmd[i].idx, - submit->cmd[i].iova, szd * 4); + submit->cmd[i].iova, szd * 4, true); } + } + + for (i = 0; i < submit->nr_cmds; i++) { + uint64_t iova = submit->cmd[i].iova; + uint32_t szd = submit->cmd[i].size; /* in dwords */ switch (submit->cmd[i].type) { case MSM_SUBMIT_CMD_IB_TARGET_BUF: diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 661e47acc354..adbf3bfae805 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -74,6 +74,16 @@ config QCOM_MDT_LOADER tristate select QCOM_SCM +config QCOM_OCMEM + tristate "Qualcomm On Chip Memory (OCMEM) driver" + depends on ARCH_QCOM + select QCOM_SCM + help + The On Chip Memory (OCMEM) allocator allows various clients to + allocate memory from OCMEM based on performance, latency and power + requirements. This is typically used by the GPU, camera/video, and + audio components on some Snapdragon SoCs. + config QCOM_PM bool "Qualcomm Power Management" depends on ARCH_QCOM && !ARM64 diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 162788701a77..aab333720bfd 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o +obj-$(CONFIG_QCOM_OCMEM) += ocmem.o obj-$(CONFIG_QCOM_PM) += spm.o obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o qmi_helpers-y += qmi_encdec.o qmi_interface.o diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c new file mode 100644 index 000000000000..7f9e9944d1ea --- /dev/null +++ b/drivers/soc/qcom/ocmem.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * The On Chip Memory (OCMEM) allocator allows various clients to allocate + * memory from OCMEM based on performance, latency and power requirements. + * This is typically used by the GPU, camera/video, and audio components on + * some Snapdragon SoCs. + * + * Copyright (C) 2019 Brian Masney <[email protected]> + * Copyright (C) 2015 Red Hat. Author: Rob Clark <[email protected]> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/qcom_scm.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <soc/qcom/ocmem.h> + +enum region_mode { + WIDE_MODE = 0x0, + THIN_MODE, + MODE_DEFAULT = WIDE_MODE, +}; + +enum ocmem_macro_state { + PASSTHROUGH = 0, + PERI_ON = 1, + CORE_ON = 2, + CLK_OFF = 4, +}; + +struct ocmem_region { + bool interleaved; + enum region_mode mode; + unsigned int num_macros; + enum ocmem_macro_state macro_state[4]; + unsigned long macro_size; + unsigned long region_size; +}; + +struct ocmem_config { + uint8_t num_regions; + unsigned long macro_size; +}; + +struct ocmem { + struct device *dev; + const struct ocmem_config *config; + struct resource *memory; + void __iomem *mmio; + unsigned int num_ports; + unsigned int num_macros; + bool interleaved; + struct ocmem_region *regions; + unsigned long active_allocations; +}; + +#define OCMEM_MIN_ALIGN SZ_64K +#define OCMEM_MIN_ALLOC SZ_64K + +#define OCMEM_REG_HW_VERSION 0x00000000 +#define OCMEM_REG_HW_PROFILE 0x00000004 + +#define OCMEM_REG_REGION_MODE_CTL 0x00001000 +#define OCMEM_REGION_MODE_CTL_REG0_THIN 0x00000001 +#define OCMEM_REGION_MODE_CTL_REG1_THIN 0x00000002 +#define OCMEM_REGION_MODE_CTL_REG2_THIN 0x00000004 +#define OCMEM_REGION_MODE_CTL_REG3_THIN 0x00000008 + +#define OCMEM_REG_GFX_MPU_START 0x00001004 +#define OCMEM_REG_GFX_MPU_END 0x00001008 + +#define OCMEM_HW_PROFILE_NUM_PORTS(val) FIELD_PREP(0x0000000f, (val)) +#define OCMEM_HW_PROFILE_NUM_MACROS(val) FIELD_PREP(0x00003f00, (val)) + +#define OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE 0x00010000 +#define OCMEM_HW_PROFILE_INTERLEAVING 0x00020000 +#define OCMEM_REG_GEN_STATUS 0x0000000c + +#define OCMEM_REG_PSGSC_STATUS 0x00000038 +#define OCMEM_REG_PSGSC_CTL(i0) (0x0000003c + 0x1*(i0)) + +#define OCMEM_PSGSC_CTL_MACRO0_MODE(val) FIELD_PREP(0x00000007, (val)) +#define OCMEM_PSGSC_CTL_MACRO1_MODE(val) FIELD_PREP(0x00000070, (val)) +#define OCMEM_PSGSC_CTL_MACRO2_MODE(val) FIELD_PREP(0x00000700, (val)) +#define OCMEM_PSGSC_CTL_MACRO3_MODE(val) FIELD_PREP(0x00007000, (val)) + +#define OCMEM_CLK_CORE_IDX 0 +static struct clk_bulk_data ocmem_clks[] = { + { + .id = "core", + }, + { + .id = "iface", + }, +}; + +static inline void ocmem_write(struct ocmem *ocmem, u32 reg, u32 data) +{ + writel(data, ocmem->mmio + reg); +} + +static inline u32 ocmem_read(struct ocmem *ocmem, u32 reg) +{ + return readl(ocmem->mmio + reg); +} + +static void update_ocmem(struct ocmem *ocmem) +{ + uint32_t region_mode_ctrl = 0x0; + int i; + + if (!qcom_scm_ocmem_lock_available()) { + for (i = 0; i < ocmem->config->num_regions; i++) { + struct ocmem_region *region = &ocmem->regions[i]; + + if (region->mode == THIN_MODE) + region_mode_ctrl |= BIT(i); + } + + dev_dbg(ocmem->dev, "ocmem_region_mode_control %x\n", + region_mode_ctrl); + ocmem_write(ocmem, OCMEM_REG_REGION_MODE_CTL, region_mode_ctrl); + } + + for (i = 0; i < ocmem->config->num_regions; i++) { + struct ocmem_region *region = &ocmem->regions[i]; + u32 data; + + data = OCMEM_PSGSC_CTL_MACRO0_MODE(region->macro_state[0]) | + OCMEM_PSGSC_CTL_MACRO1_MODE(region->macro_state[1]) | + OCMEM_PSGSC_CTL_MACRO2_MODE(region->macro_state[2]) | + OCMEM_PSGSC_CTL_MACRO3_MODE(region->macro_state[3]); + + ocmem_write(ocmem, OCMEM_REG_PSGSC_CTL(i), data); + } +} + +static unsigned long phys_to_offset(struct ocmem *ocmem, + unsigned long addr) +{ + if (addr < ocmem->memory->start || addr >= ocmem->memory->end) + return 0; + + return addr - ocmem->memory->start; +} + +static unsigned long device_address(struct ocmem *ocmem, + enum ocmem_client client, + unsigned long addr) +{ + WARN_ON(client != OCMEM_GRAPHICS); + + /* TODO: gpu uses phys_to_offset, but others do not.. */ + return phys_to_offset(ocmem, addr); +} + +static void update_range(struct ocmem *ocmem, struct ocmem_buf *buf, + enum ocmem_macro_state mstate, enum region_mode rmode) +{ + unsigned long offset = 0; + int i, j; + + for (i = 0; i < ocmem->config->num_regions; i++) { + struct ocmem_region *region = &ocmem->regions[i]; + + if (buf->offset <= offset && offset < buf->offset + buf->len) + region->mode = rmode; + + for (j = 0; j < region->num_macros; j++) { + if (buf->offset <= offset && + offset < buf->offset + buf->len) + region->macro_state[j] = mstate; + + offset += region->macro_size; + } + } + + update_ocmem(ocmem); +} + +struct ocmem *of_get_ocmem(struct device *dev) +{ + struct platform_device *pdev; + struct device_node *devnode; + + devnode = of_parse_phandle(dev->of_node, "sram", 0); + if (!devnode || !devnode->parent) { + dev_err(dev, "Cannot look up sram phandle\n"); + return ERR_PTR(-ENODEV); + } + + pdev = of_find_device_by_node(devnode->parent); + if (!pdev) { + dev_err(dev, "Cannot find device node %s\n", devnode->name); + return ERR_PTR(-EPROBE_DEFER); + } + + return platform_get_drvdata(pdev); +} +EXPORT_SYMBOL(of_get_ocmem); + +struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client, + unsigned long size) +{ + struct ocmem_buf *buf; + int ret; + + /* TODO: add support for other clients... */ + if (WARN_ON(client != OCMEM_GRAPHICS)) + return ERR_PTR(-ENODEV); + + if (size < OCMEM_MIN_ALLOC || !IS_ALIGNED(size, OCMEM_MIN_ALIGN)) + return ERR_PTR(-EINVAL); + + if (test_and_set_bit_lock(BIT(client), &ocmem->active_allocations)) + return ERR_PTR(-EBUSY); + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto err_unlock; + } + + buf->offset = 0; + buf->addr = device_address(ocmem, client, buf->offset); + buf->len = size; + + update_range(ocmem, buf, CORE_ON, WIDE_MODE); + + if (qcom_scm_ocmem_lock_available()) { + ret = qcom_scm_ocmem_lock(QCOM_SCM_OCMEM_GRAPHICS_ID, + buf->offset, buf->len, WIDE_MODE); + if (ret) { + dev_err(ocmem->dev, "could not lock: %d\n", ret); + ret = -EINVAL; + goto err_kfree; + } + } else { + ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, buf->offset); + ocmem_write(ocmem, OCMEM_REG_GFX_MPU_END, + buf->offset + buf->len); + } + + dev_dbg(ocmem->dev, "using %ldK of OCMEM at 0x%08lx for client %d\n", + size / 1024, buf->addr, client); + + return buf; + +err_kfree: + kfree(buf); +err_unlock: + clear_bit_unlock(BIT(client), &ocmem->active_allocations); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL(ocmem_allocate); + +void ocmem_free(struct ocmem *ocmem, enum ocmem_client client, + struct ocmem_buf *buf) +{ + /* TODO: add support for other clients... */ + if (WARN_ON(client != OCMEM_GRAPHICS)) + return; + + update_range(ocmem, buf, CLK_OFF, MODE_DEFAULT); + + if (qcom_scm_ocmem_lock_available()) { + int ret; + + ret = qcom_scm_ocmem_unlock(QCOM_SCM_OCMEM_GRAPHICS_ID, + buf->offset, buf->len); + if (ret) + dev_err(ocmem->dev, "could not unlock: %d\n", ret); + } else { + ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, 0x0); + ocmem_write(ocmem, OCMEM_REG_GFX_MPU_END, 0x0); + } + + kfree(buf); + + clear_bit_unlock(BIT(client), &ocmem->active_allocations); +} +EXPORT_SYMBOL(ocmem_free); + +static int ocmem_dev_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + unsigned long reg, region_size; + int i, j, ret, num_banks; + struct resource *res; + struct ocmem *ocmem; + + if (!qcom_scm_is_available()) + return -EPROBE_DEFER; + + ocmem = devm_kzalloc(dev, sizeof(*ocmem), GFP_KERNEL); + if (!ocmem) + return -ENOMEM; + + ocmem->dev = dev; + ocmem->config = device_get_match_data(dev); + + ret = devm_clk_bulk_get(dev, ARRAY_SIZE(ocmem_clks), ocmem_clks); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(dev, "Unable to get clocks\n"); + + return ret; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); + ocmem->mmio = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ocmem->mmio)) { + dev_err(&pdev->dev, "Failed to ioremap ocmem_ctrl resource\n"); + return PTR_ERR(ocmem->mmio); + } + + ocmem->memory = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "mem"); + if (!ocmem->memory) { + dev_err(dev, "Could not get mem region\n"); + return -ENXIO; + } + + /* The core clock is synchronous with graphics */ + WARN_ON(clk_set_rate(ocmem_clks[OCMEM_CLK_CORE_IDX].clk, 1000) < 0); + + ret = clk_bulk_prepare_enable(ARRAY_SIZE(ocmem_clks), ocmem_clks); + if (ret) { + dev_info(ocmem->dev, "Failed to enable clocks\n"); + return ret; + } + + if (qcom_scm_restore_sec_cfg_available()) { + dev_dbg(dev, "configuring scm\n"); + ret = qcom_scm_restore_sec_cfg(QCOM_SCM_OCMEM_DEV_ID, 0); + if (ret) { + dev_err(dev, "Could not enable secure configuration\n"); + goto err_clk_disable; + } + } + + reg = ocmem_read(ocmem, OCMEM_REG_HW_PROFILE); + ocmem->num_ports = OCMEM_HW_PROFILE_NUM_PORTS(reg); + ocmem->num_macros = OCMEM_HW_PROFILE_NUM_MACROS(reg); + ocmem->interleaved = !!(reg & OCMEM_HW_PROFILE_INTERLEAVING); + + num_banks = ocmem->num_ports / 2; + region_size = ocmem->config->macro_size * num_banks; + + dev_info(dev, "%u ports, %u regions, %u macros, %sinterleaved\n", + ocmem->num_ports, ocmem->config->num_regions, + ocmem->num_macros, ocmem->interleaved ? "" : "not "); + + ocmem->regions = devm_kcalloc(dev, ocmem->config->num_regions, + sizeof(struct ocmem_region), GFP_KERNEL); + if (!ocmem->regions) { + ret = -ENOMEM; + goto err_clk_disable; + } + + for (i = 0; i < ocmem->config->num_regions; i++) { + struct ocmem_region *region = &ocmem->regions[i]; + + if (WARN_ON(num_banks > ARRAY_SIZE(region->macro_state))) { + ret = -EINVAL; + goto err_clk_disable; + } + + region->mode = MODE_DEFAULT; + region->num_macros = num_banks; + + if (i == (ocmem->config->num_regions - 1) && + reg & OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE) { + region->macro_size = ocmem->config->macro_size / 2; + region->region_size = region_size / 2; + } else { + region->macro_size = ocmem->config->macro_size; + region->region_size = region_size; + } + + for (j = 0; j < ARRAY_SIZE(region->macro_state); j++) + region->macro_state[j] = CLK_OFF; + } + + platform_set_drvdata(pdev, ocmem); + + return 0; + +err_clk_disable: + clk_bulk_disable_unprepare(ARRAY_SIZE(ocmem_clks), ocmem_clks); + return ret; +} + +static int ocmem_dev_remove(struct platform_device *pdev) +{ + clk_bulk_disable_unprepare(ARRAY_SIZE(ocmem_clks), ocmem_clks); + + return 0; +} + +static const struct ocmem_config ocmem_8974_config = { + .num_regions = 3, + .macro_size = SZ_128K, +}; + +static const struct of_device_id ocmem_of_match[] = { + { .compatible = "qcom,msm8974-ocmem", .data = &ocmem_8974_config }, + { } +}; + +MODULE_DEVICE_TABLE(of, ocmem_of_match); + +static struct platform_driver ocmem_driver = { + .probe = ocmem_dev_probe, + .remove = ocmem_dev_remove, + .driver = { + .name = "ocmem", + .of_match_table = ocmem_of_match, + }, +}; + +module_platform_driver(ocmem_driver); + +MODULE_DESCRIPTION("On Chip Memory (OCMEM) allocator for some Snapdragon SoCs"); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 2d5eff506e13..04382e1798e4 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -24,6 +24,26 @@ struct qcom_scm_vmperm { int perm; }; +enum qcom_scm_ocmem_client { + QCOM_SCM_OCMEM_UNUSED_ID = 0x0, + QCOM_SCM_OCMEM_GRAPHICS_ID, + QCOM_SCM_OCMEM_VIDEO_ID, + QCOM_SCM_OCMEM_LP_AUDIO_ID, + QCOM_SCM_OCMEM_SENSORS_ID, + QCOM_SCM_OCMEM_OTHER_OS_ID, + QCOM_SCM_OCMEM_DEBUG_ID, +}; + +enum qcom_scm_sec_dev_id { + QCOM_SCM_MDSS_DEV_ID = 1, + QCOM_SCM_OCMEM_DEV_ID = 5, + QCOM_SCM_PCIE0_DEV_ID = 11, + QCOM_SCM_PCIE1_DEV_ID = 12, + QCOM_SCM_GFX_DEV_ID = 18, + QCOM_SCM_UFS_DEV_ID = 19, + QCOM_SCM_ICE_DEV_ID = 20, +}; + #define QCOM_SCM_VMID_HLOS 0x3 #define QCOM_SCM_VMID_MSS_MSA 0xF #define QCOM_SCM_VMID_WLAN 0x18 @@ -41,6 +61,11 @@ extern bool qcom_scm_is_available(void); extern bool qcom_scm_hdcp_available(void); extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); +extern bool qcom_scm_ocmem_lock_available(void); +extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, + u32 size, u32 mode); +extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, + u32 size); extern bool qcom_scm_pas_supported(u32 peripheral); extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size); @@ -55,6 +80,7 @@ extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, extern void qcom_scm_cpu_power_down(u32 flags); extern u32 qcom_scm_get_version(void); extern int qcom_scm_set_remote_state(u32 state, u32 id); +extern bool qcom_scm_restore_sec_cfg_available(void); extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare); extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size); extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); diff --git a/include/soc/qcom/ocmem.h b/include/soc/qcom/ocmem.h new file mode 100644 index 000000000000..02a8bc2677b1 --- /dev/null +++ b/include/soc/qcom/ocmem.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * The On Chip Memory (OCMEM) allocator allows various clients to allocate + * memory from OCMEM based on performance, latency and power requirements. + * This is typically used by the GPU, camera/video, and audio components on + * some Snapdragon SoCs. + * + * Copyright (C) 2019 Brian Masney <[email protected]> + * Copyright (C) 2015 Red Hat. Author: Rob Clark <[email protected]> + */ + +#include <linux/device.h> +#include <linux/err.h> + +#ifndef __OCMEM_H__ +#define __OCMEM_H__ + +enum ocmem_client { + /* GMEM clients */ + OCMEM_GRAPHICS = 0x0, + /* + * TODO add more once ocmem_allocate() is clever enough to + * deal with multiple clients. + */ + OCMEM_CLIENT_MAX, +}; + +struct ocmem; + +struct ocmem_buf { + unsigned long offset; + unsigned long addr; + unsigned long len; +}; + +#if IS_ENABLED(CONFIG_QCOM_OCMEM) + +struct ocmem *of_get_ocmem(struct device *dev); +struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client, + unsigned long size); +void ocmem_free(struct ocmem *ocmem, enum ocmem_client client, + struct ocmem_buf *buf); + +#else /* IS_ENABLED(CONFIG_QCOM_OCMEM) */ + +static inline struct ocmem *of_get_ocmem(struct device *dev) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, + enum ocmem_client client, + unsigned long size) +{ + return ERR_PTR(-ENODEV); +} + +static inline void ocmem_free(struct ocmem *ocmem, enum ocmem_client client, + struct ocmem_buf *buf) +{ +} + +#endif /* IS_ENABLED(CONFIG_QCOM_OCMEM) */ + +#endif /* __OCMEM_H__ */ |