diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display')
475 files changed, 19247 insertions, 6306 deletions
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile index af17ab8027df..9a5bcafbf730 100644 --- a/drivers/gpu/drm/amd/display/Makefile +++ b/drivers/gpu/drm/amd/display/Makefile @@ -30,6 +30,10 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/clk_mgr subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/hwss +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/resource +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dsc +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/optc +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dpp subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color diff --git a/drivers/gpu/drm/amd/display/TODO b/drivers/gpu/drm/amd/display/TODO deleted file mode 100644 index a8a6c106e8c7..000000000000 --- a/drivers/gpu/drm/amd/display/TODO +++ /dev/null @@ -1,110 +0,0 @@ -=============================================================================== -TODOs -=============================================================================== - -1. Base this on drm-next - WIP - - -2. Cleanup commit history - - -3. WIP - Drop page flip helper and use DRM's version - - -4. DONE - Flatten all DC objects - * dc_stream/core_stream/stream should just be dc_stream - * Same for other DC objects - - "Is there any major reason to keep all those abstractions? - - Could you collapse everything into struct dc_stream? - - I haven't looked recently but I didn't get the impression there was a - lot of design around what was public/protected, more whatever needed - to be used by someone else was in public." - ~ Dave Airlie - - -5. DONE - Rename DC objects to align more with DRM - * dc_surface -> dc_plane_state - * dc_stream -> dc_stream_state - - -6. DONE - Per-plane and per-stream validation - - -7. WIP - Per-plane and per-stream commit - - -8. WIP - Split pipe_ctx into plane and stream resource structs - - -9. Attach plane and stream reources to state object instead of validate_context - - -10. Remove dc_edid_caps and drm_helpers_parse_edid_caps - * Use drm_display_info instead - * Remove DC's edid quirks and rely on DRM's quirks (add quirks if needed) - - "Making sure you use the sink-specific helper libraries and kernel - subsystems, since there's really no good reason to have 2nd - implementation of those in the kernel. Looks likes that's done for mst - and edid parsing. There's still a bit a midlayer feeling to the edid - parsing side (e.g. dc_edid_caps and dm_helpers_parse_edid_caps, I - think it'd be much better if you convert that over to reading stuff - from drm_display_info and if needed, push stuff into the core). Also, - I can't come up with a good reason why DC needs all this (except to - reimplement half of our edid quirk table, which really isn't a good - idea). Might be good if you put this onto the list of things to fix - long-term, but imo not a blocker. Definitely make sure new stuff - doesn't slip in (i.e. if you start adding edid quirks to DC instead of - the drm core, refactoring to use the core edid stuff was pointless)." - ~ Daniel Vetter - - -11. Remove dc/i2caux. This folder can be somewhat misleading. It's basically an -overy complicated HW programming function for sendind and receiving i2c/aux -commands. We can greatly simplify that and move it into dc/dceXYZ like other -HW blocks. - -12. drm_modeset_lock in MST should no longer be needed in recent kernels - * Adopt appropriate locking scheme - -13. get_modes and best_encoder callbacks look a bit funny. Can probably rip out -a few indirections, and consider removing entirely and using the -drm_atomic_helper_best_encoder default behaviour. - -14. core/dc_debug.c, consider switching to the atomic state debug helpers and -moving all your driver state printing into the various atomic_print_state -callbacks. There's also plans to expose this stuff in a standard way across all -drivers, to make debugging userspace compositors easier across different hw. - -15. Move DP/HDMI dual mode adaptors to drm_dp_dual_mode_helper.c. See -dal_ddc_service_i2c_query_dp_dual_mode_adaptor. - -16. Move to core SCDC helpers (I think those are new since initial DC review). - -17. There's still a pretty massive layer cake around dp aux and DPCD handling, -with like 3 levels of abstraction and using your own structures instead of the -stuff in drm_dp_helper.h. drm_dp_helper.h isn't really great and already has 2 -incompatible styles, just means more reasons not to add a third (or well third -one gets to do the cleanup refactor). - -18. There's a pile of sink handling code, both for DP and HDMI where I didn't -immediately recognize the standard. I think long term it'd be best for the drm -subsystem if we try to move as much of that into helpers/core as possible, and -share it with drivers. But that's a very long term goal, and by far not just an -issue with DC - other drivers, especially around DP sink handling, are equally -guilty. - -19. DONE - The DC logger is still a rather sore thing, but I know that the -DRM_DEBUG stuff just isn't up to the challenges either. We need to figure out -something that integrates better with DRM and linux debug printing, while not -being useless with filtering output. dynamic debug printing might be an option. - -20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI -retimer that we need to program to pass PHY compliance. Currently that's -bypassing the i2c device and goes directly to HW. This should be changed. - -21. Remove vector.c from dc/basics. It's used in DDC code which can probably -be simplified enough to no longer need a vector implementation. diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile index 8bf94920d23e..ab2a97e354da 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -25,22 +25,25 @@ +ifneq ($(CONFIG_DRM_AMD_DC),) AMDGPUDM = \ amdgpu_dm.o \ amdgpu_dm_plane.o \ amdgpu_dm_crtc.o \ amdgpu_dm_irq.o \ amdgpu_dm_mst_types.o \ - amdgpu_dm_color.o + amdgpu_dm_color.o \ + amdgpu_dm_services.o \ + amdgpu_dm_helpers.o \ + amdgpu_dm_pp_smu.o \ + amdgpu_dm_psr.o \ + amdgpu_dm_replay.o \ + amdgpu_dm_wb.o ifdef CONFIG_DRM_AMD_DC_FP AMDGPUDM += dc_fpu.o endif -ifneq ($(CONFIG_DRM_AMD_DC),) -AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o amdgpu_dm_replay.o -endif - AMDGPUDM += amdgpu_dm_hdcp.o ifneq ($(CONFIG_DEBUG_FS),) @@ -52,3 +55,4 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM)) AMD_DISPLAY_FILES += $(AMDGPU_DM) +endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6f99f6754c11..f1d67c6f4b98 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -37,6 +37,7 @@ #include "dc/dc_dmub_srv.h" #include "dc/dc_edid_parser.h" #include "dc/dc_stat.h" +#include "dc/dc_state.h" #include "amdgpu_dm_trace.h" #include "dpcd_defs.h" #include "link/protocols/link_dpcd.h" @@ -54,6 +55,7 @@ #include "amdgpu_dm_crtc.h" #include "amdgpu_dm_hdcp.h" #include <drm/display/drm_hdcp_helper.h> +#include "amdgpu_dm_wb.h" #include "amdgpu_pm.h" #include "amdgpu_atombios.h" @@ -85,12 +87,13 @@ #include <drm/drm_atomic_uapi.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> +#include <drm/drm_fixed.h> #include <drm/drm_fourcc.h> #include <drm/drm_edid.h> +#include <drm/drm_eld.h> #include <drm/drm_vblank.h> #include <drm/drm_audio_component.h> #include <drm/drm_gem_atomic_helper.h> -#include <drm/drm_plane_helper.h> #include <acpi/video.h> @@ -145,6 +148,9 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); #define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB); +#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB); + /* Number of bytes in PSP header for firmware. */ #define PSP_HEADER_BYTES 0x100 @@ -268,8 +274,9 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, u32 *vbl, u32 *position) { - u32 v_blank_start, v_blank_end, h_position, v_position; + u32 v_blank_start = 0, v_blank_end = 0, h_position = 0, v_position = 0; struct amdgpu_crtc *acrtc = NULL; + struct dc *dc = adev->dm.dc; if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) return -EINVAL; @@ -282,6 +289,9 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, return 0; } + if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed) + dc_allow_idle_optimizations(dc, false); + /* * TODO rework base driver to use values directly. * for now parse it back into reg-format @@ -575,6 +585,7 @@ static void dm_crtc_high_irq(void *interrupt_params) { struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; + struct drm_writeback_job *job; struct amdgpu_crtc *acrtc; unsigned long flags; int vrr_active; @@ -583,6 +594,33 @@ static void dm_crtc_high_irq(void *interrupt_params) if (!acrtc) return; + if (acrtc->wb_pending) { + if (acrtc->wb_conn) { + spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags); + job = list_first_entry_or_null(&acrtc->wb_conn->job_queue, + struct drm_writeback_job, + list_entry); + spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags); + + if (job) { + unsigned int v_total, refresh_hz; + struct dc_stream_state *stream = acrtc->dm_irq_params.stream; + + v_total = stream->adjust.v_total_max ? + stream->adjust.v_total_max : stream->timing.v_total; + refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz * + 100LL, (v_total * stream->timing.h_total)); + mdelay(1000 / refresh_hz); + + drm_writeback_signal_completion(acrtc->wb_conn, 0); + dc_stream_fc_disable_writeback(adev->dm.dc, + acrtc->dm_irq_params.stream, 0); + } + } else + DRM_ERROR("%s: no amdgpu_crtc wb_conn\n", __func__); + acrtc->wb_pending = false; + } + vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); drm_dbg_vbl(adev_to_drm(adev), @@ -725,6 +763,10 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (link && aconnector->dc_link == link) { if (notify->type == DMUB_NOTIFICATION_HPD) @@ -806,7 +848,7 @@ static void dm_handle_hpd_work(struct work_struct *work) */ static void dm_dmub_outbox1_low_irq(void *interrupt_params) { - struct dmub_notification notify; + struct dmub_notification notify = {0}; struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; struct amdgpu_display_manager *dm = &adev->dm; @@ -894,8 +936,7 @@ static int dm_early_init(void *handle); /* Allocate memory for FBC compressed data */ static void amdgpu_dm_fbc_init(struct drm_connector *connector) { - struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = drm_to_adev(connector->dev); struct dm_compressor_info *compressor = &adev->dm.compressor; struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); struct drm_display_mode *mode; @@ -949,6 +990,10 @@ static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (aconnector->audio_inst != port) continue; @@ -989,8 +1034,7 @@ static int amdgpu_dm_audio_component_bind(struct device *kdev, static void amdgpu_dm_audio_component_unbind(struct device *kdev, struct device *hda_kdev, void *data) { - struct drm_device *dev = dev_get_drvdata(kdev); - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev)); struct drm_audio_component *acomp = data; acomp->ops = NULL; @@ -1178,6 +1222,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 4): case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): hw_params.dpia_supported = true; hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; break; @@ -1185,6 +1230,15 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) break; } + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): + hw_params.ips_sequential_ono = adev->external_rev_id > 0x10; + break; + default: + break; + } + status = dmub_srv_hw_init(dmub_srv, &hw_params); if (status != DMUB_STATUS_OK) { DRM_ERROR("Error initializing DMUB HW: %d\n", status); @@ -1258,7 +1312,9 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_ /* AGP aperture is disabled */ if (agp_bot > agp_top) { logical_addr_low = adev->gmc.fb_start >> 18; - if (adev->apu_flags & AMD_APU_IS_RAVEN2) + if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | + AMD_APU_IS_RENOIR | + AMD_APU_IS_GREEN_SARDINE)) /* * Raven2 has a HW issue that it is unable to use the vram which * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the @@ -1270,7 +1326,9 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_ logical_addr_high = adev->gmc.fb_end >> 18; } else { logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; - if (adev->apu_flags & AMD_APU_IS_RAVEN2) + if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | + AMD_APU_IS_RENOIR | + AMD_APU_IS_GREEN_SARDINE)) /* * Raven2 has a HW issue that it is unable to use the vram which * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the @@ -1675,6 +1733,17 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0]; + if (amdgpu_dc_debug_mask & DC_DISABLE_IPS) + init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL; + else + init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; + + init_data.flags.disable_ips_in_vpb = 0; + + /* Enable DWB for tested platforms only */ + if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) + init_data.num_virtual_links = 1; + INIT_LIST_HEAD(&adev->dm.da_list); retrieve_dmi_info(&adev->dm); @@ -1712,28 +1781,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) adev->dm.dc->debug.force_subvp_mclk_switch = true; + if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) + adev->dm.dc->debug.using_dml2 = true; + adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ adev->dm.dc->debug.ignore_cable_id = true; - /* TODO: There is a new drm mst change where the freedom of - * vc_next_start_slot update is revoked/moved into drm, instead of in - * driver. This forces us to make sure to get vc_next_start_slot updated - * in drm function each time without considering if mst_state is active - * or not. Otherwise, next time hotplug will give wrong start_slot - * number. We are implementing a temporary solution to even notify drm - * mst deallocation when link is no longer of MST type when uncommitting - * the stream so we will have more time to work on a proper solution. - * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we - * should notify drm to do a complete "reset" of its states and stop - * calling further drm mst functions when link is no longer of an MST - * type. This could happen when we unplug an MST hubs/displays. When - * uncommit stream comes later after unplug, we should just reset - * hardware states only. - */ - adev->dm.dc->debug.temp_mst_deallocation_sequence = true; - if (adev->dm.dc->caps.dp_hdmi21_pcon_support) DRM_INFO("DP-HDMI FRL PCON supported\n"); @@ -1807,21 +1862,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) DRM_ERROR("amdgpu: fail to register dmub aux callback"); goto error; } - if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { - DRM_ERROR("amdgpu: fail to register dmub hpd callback"); - goto error; - } - if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { - DRM_ERROR("amdgpu: fail to register dmub hpd callback"); - goto error; - } - } - - /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. - * It is expected that DMUB will resend any pending notifications at this point, for - * example HPD from DPIA. - */ - if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. + * It is expected that DMUB will resend any pending notifications at this point. Note + * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to + * align legacy interface initialization sequence. Connection status will be proactivly + * detected once in the amdgpu_dm_initialize_drm_device. + */ dc_enable_dmub_outbox(adev->dm.dc); /* DPIA trace goes to dmesg logs only if outbox is enabled */ @@ -1902,17 +1948,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) adev->dm.hdcp_workqueue = NULL; } - if (adev->dm.dc) + if (adev->dm.dc) { dc_deinit_callbacks(adev->dm.dc); - - if (adev->dm.dc) dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); - - if (dc_enable_dmub_notifications(adev->dm.dc)) { - kfree(adev->dm.dmub_notify); - adev->dm.dmub_notify = NULL; - destroy_workqueue(adev->dm.delayed_hpd_wq); - adev->dm.delayed_hpd_wq = NULL; + if (dc_enable_dmub_notifications(adev->dm.dc)) { + kfree(adev->dm.dmub_notify); + adev->dm.dmub_notify = NULL; + destroy_workqueue(adev->dm.delayed_hpd_wq); + adev->dm.delayed_hpd_wq = NULL; + } } if (adev->dm.dmub_bo) @@ -1920,7 +1964,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) &adev->dm.dmub_bo_gpu_addr, &adev->dm.dmub_bo_cpu_addr); - if (adev->dm.hpd_rx_offload_wq) { + if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) { for (i = 0; i < adev->dm.dc->caps.max_links; i++) { if (adev->dm.hpd_rx_offload_wq[i].wq) { destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); @@ -2014,6 +2058,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev) case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): return 0; default: break; @@ -2079,12 +2124,23 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) struct dmub_srv_create_params create_params; struct dmub_srv_region_params region_params; struct dmub_srv_region_info region_info; - struct dmub_srv_fb_params fb_params; + struct dmub_srv_memory_params memory_params; struct dmub_srv_fb_info *fb_info; struct dmub_srv *dmub_srv; const struct dmcub_firmware_header_v1_0 *hdr; enum dmub_asic dmub_asic; enum dmub_status status; + static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = { + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_0_INST_CONST + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_1_STACK + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_2_BSS_DATA + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_3_VBIOS + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_4_MAILBOX + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE + }; int r; switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { @@ -2123,6 +2179,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) dmub_asic = DMUB_ASIC_DCN321; break; case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): dmub_asic = DMUB_ASIC_DCN35; break; default: @@ -2182,6 +2239,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) adev->dm.dmub_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + PSP_HEADER_BYTES; + region_params.window_memory_type = window_memory_type; status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, ®ion_info); @@ -2205,10 +2263,11 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) return r; /* Rebase the regions on the framebuffer address. */ - memset(&fb_params, 0, sizeof(fb_params)); - fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr; - fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr; - fb_params.region_info = ®ion_info; + memset(&memory_params, 0, sizeof(memory_params)); + memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr; + memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr; + memory_params.region_info = ®ion_info; + memory_params.window_memory_type = window_memory_type; adev->dm.dmub_fb_info = kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); @@ -2220,7 +2279,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) return -ENOMEM; } - status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info); + status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info); if (status != DMUB_STATUS_OK) { DRM_ERROR("Error calculating DMUB FB info: %d\n", status); return -EINVAL; @@ -2250,6 +2309,7 @@ static int dm_sw_fini(void *handle) if (adev->dm.dmub_srv) { dmub_srv_destroy(adev->dm.dmub_srv); + kfree(adev->dm.dmub_srv); adev->dm.dmub_srv = NULL; } @@ -2268,6 +2328,10 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type == dc_connection_mst_branch && aconnector->mst_mgr.aux) { @@ -2396,6 +2460,10 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type != dc_connection_mst_branch || aconnector->mst_root) @@ -2572,15 +2640,14 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) int i; struct dc_stream_state *del_streams[MAX_PIPES]; int del_streams_count = 0; + struct dc_commit_streams_params params = {}; memset(del_streams, 0, sizeof(del_streams)); - context = dc_create_state(dc); + context = dc_state_create_current_copy(dc); if (context == NULL) goto context_alloc_fail; - dc_resource_state_copy_construct_current(dc, context); - /* First remove from context all streams */ for (i = 0; i < context->stream_count; i++) { struct dc_stream_state *stream = context->streams[i]; @@ -2590,20 +2657,22 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) /* Remove all planes for removed streams and then remove the streams */ for (i = 0; i < del_streams_count; i++) { - if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { + if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) { res = DC_FAIL_DETACH_SURFACES; goto fail; } - res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); + res = dc_state_remove_stream(dc, context, del_streams[i]); if (res != DC_OK) goto fail; } - res = dc_commit_streams(dc, context->streams, context->stream_count); + params.streams = context->streams; + params.stream_count = context->stream_count; + res = dc_commit_streams(dc, ¶ms); fail: - dc_release_state(context); + dc_state_release(context); context_alloc_fail: return res; @@ -2630,7 +2699,7 @@ static int dm_suspend(void *handle) dc_allow_idle_optimizations(adev->dm.dc, false); - dm->cached_dc_state = dc_copy_state(dm->dc->current_state); + dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state); dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); @@ -2655,11 +2724,12 @@ static int dm_suspend(void *handle) hpd_rx_irq_work_suspend(dm); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); + dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3); return 0; } -struct amdgpu_dm_connector * +struct drm_connector * amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, struct drm_crtc *crtc) { @@ -2672,7 +2742,7 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, crtc_from_state = new_con_state->crtc; if (crtc_from_state == crtc) - return to_amdgpu_dm_connector(connector); + return connector; } return NULL; @@ -2821,9 +2891,10 @@ static int dm_resume(void *handle) struct dc_state *dc_state; int i, r, j, ret; bool need_hotplug = false; + struct dc_commit_streams_params commit_params = {}; if (dm->dc->caps.ips_support) { - dc_dmub_srv_exit_low_power_state(dm->dc); + dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false); } if (amdgpu_in_reset(adev)) { @@ -2850,6 +2921,7 @@ static int dm_resume(void *handle) if (r) DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); + dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); dc_resume(dm->dc); @@ -2869,13 +2941,15 @@ static int dm_resume(void *handle) dc_enable_dmub_outbox(adev->dm.dc); } - WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); + commit_params.streams = dc_state->streams; + commit_params.stream_count = dc_state->stream_count; + WARN_ON(!dc_commit_streams(dm->dc, &commit_params)); dm_gpureset_commit_state(dm->cached_dc_state, dm); dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); - dc_release_state(dm->cached_dc_state); + dc_state_release(dm->cached_dc_state); dm->cached_dc_state = NULL; amdgpu_dm_irq_resume_late(adev); @@ -2885,10 +2959,9 @@ static int dm_resume(void *handle) return 0; } /* Recreate dc_state - DC invalidates it when setting power state to S3. */ - dc_release_state(dm_state->context); - dm_state->context = dc_create_state(dm->dc); + dc_state_release(dm_state->context); + dm_state->context = dc_state_create(dm->dc, NULL); /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ - dc_resource_state_construct(dm->dc, dm_state->context); /* Before powering on DC we need to re-initialize DMUB. */ dm_dmub_hw_resume(adev); @@ -2900,6 +2973,7 @@ static int dm_resume(void *handle) } /* power on hardware */ + dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); /* program HPD filter */ @@ -2917,6 +2991,10 @@ static int dm_resume(void *handle) /* Do detection*/ drm_connector_list_iter_begin(ddev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (!aconnector->dc_link) @@ -2968,6 +3046,7 @@ static int dm_resume(void *handle) dc_stream_release(dm_new_crtc_state->stream); dm_new_crtc_state->stream = NULL; } + dm_new_crtc_state->base.color_mgmt_changed = true; } for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { @@ -2986,6 +3065,10 @@ static int dm_resume(void *handle) /* Do mst topology probing after resuming cached state*/ drm_connector_list_iter_begin(ddev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type != dc_connection_mst_branch || aconnector->mst_root) @@ -3038,6 +3121,8 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = { .soft_reset = dm_soft_reset, .set_clockgating_state = dm_set_clockgating_state, .set_powergating_state = dm_set_powergating_state, + .dump_ip_state = NULL, + .print_ip_state = NULL, }; const struct amdgpu_ip_block_version dm_ip_block = { @@ -3487,9 +3572,20 @@ static void register_hpd_handlers(struct amdgpu_device *adev) int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) + DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) + DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + } + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); dc_link = aconnector->dc_link; @@ -3512,10 +3608,6 @@ static void register_hpd_handlers(struct amdgpu_device *adev) handle_hpd_rx_irq, (void *) aconnector); } - - if (adev->dm.hpd_rx_offload_wq) - adev->dm.hpd_rx_offload_wq[connector->index].aconnector = - aconnector; } } @@ -3956,7 +4048,7 @@ dm_atomic_duplicate_state(struct drm_private_obj *obj) old_state = to_dm_atomic_state(obj->state); if (old_state && old_state->context) - new_state->context = dc_copy_state(old_state->context); + new_state->context = dc_state_create_copy(old_state->context); if (!new_state->context) { kfree(new_state); @@ -3972,7 +4064,7 @@ static void dm_atomic_destroy_state(struct drm_private_obj *obj, struct dm_atomic_state *dm_state = to_dm_atomic_state(state); if (dm_state && dm_state->context) - dc_release_state(dm_state->context); + dc_state_release(dm_state->context); kfree(dm_state); } @@ -4008,14 +4100,12 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) if (!state) return -ENOMEM; - state->context = dc_create_state(adev->dm.dc); + state->context = dc_state_create_current_copy(adev->dm.dc); if (!state->context) { kfree(state); return -ENOMEM; } - dc_resource_state_copy_construct_current(adev->dm.dc, state->context); - drm_atomic_private_obj_init(adev_to_drm(adev), &adev->dm.atomic_obj, &state->base, @@ -4023,14 +4113,19 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) r = amdgpu_display_modeset_create_props(adev); if (r) { - dc_release_state(state->context); + dc_state_release(state->context); kfree(state); return r; } +#ifdef AMD_PRIVATE_COLOR + if (amdgpu_dm_create_color_properties(adev)) + return -ENOMEM; +#endif + r = amdgpu_dm_audio_init(adev); if (r) { - dc_release_state(state->context); + dc_state_release(state->context); kfree(state); return r; } @@ -4427,6 +4522,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 2, 1): case IP_VERSION(2, 1, 0): case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): if (register_outbox_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; @@ -4448,6 +4544,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): psr_feature_enabled = true; break; default: @@ -4456,20 +4553,27 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } } + /* Determine whether to enable Replay support by default. */ if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { - switch (adev->ip_versions[DCE_HWIP][0]) { - case IP_VERSION(3, 1, 4): - case IP_VERSION(3, 1, 5): - case IP_VERSION(3, 1, 6): - case IP_VERSION(3, 2, 0): - case IP_VERSION(3, 2, 1): - replay_feature_enabled = true; - break; + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { +/* + * Disabled by default due to https://gitlab.freedesktop.org/drm/amd/-/issues/3344 + * case IP_VERSION(3, 1, 4): + * case IP_VERSION(3, 1, 5): + * case IP_VERSION(3, 1, 6): + * case IP_VERSION(3, 2, 0): + * case IP_VERSION(3, 2, 1): + * case IP_VERSION(3, 5, 0): + * case IP_VERSION(3, 5, 1): + * replay_feature_enabled = true; + * break; + */ default: replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK; break; } } + /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { struct dc_link *link = NULL; @@ -4481,6 +4585,28 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) continue; } + link = dc_get_link_at_index(dm->dc, i); + + if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) { + struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL); + + if (!wbcon) { + DRM_ERROR("KMS: Failed to allocate writeback connector\n"); + continue; + } + + if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) { + DRM_ERROR("KMS: Failed to initialize writeback connector\n"); + kfree(wbcon); + continue; + } + + link->psr_settings.psr_feature_enabled = false; + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + + continue; + } + aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); if (!aconnector) goto fail; @@ -4499,7 +4625,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) goto fail; } - link = dc_get_link_at_index(dm->dc, i); + if (dm->hpd_rx_offload_wq) + dm->hpd_rx_offload_wq[aconnector->base.index].aconnector = + aconnector; if (!dc_link_detect_connection_type(link, &new_connection_type)) DRM_ERROR("KMS: Failed to detect connector\n"); @@ -4518,11 +4646,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) amdgpu_dm_update_connector_after_detect(aconnector); setup_backlight_device(dm, aconnector); - /* - * Disable psr if replay can be enabled - */ - if (replay_feature_enabled && amdgpu_dm_setup_replay(link, aconnector)) - psr_feature_enabled = false; + /* Disable PSR if Replay can be enabled */ + if (replay_feature_enabled) + if (amdgpu_dm_set_replay_caps(link, aconnector)) + psr_feature_enabled = false; if (psr_feature_enabled) amdgpu_dm_set_psr_caps(link); @@ -4591,6 +4718,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): if (dcn10_register_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; @@ -4724,6 +4852,9 @@ static int dm_init_microcode(struct amdgpu_device *adev) case IP_VERSION(3, 5, 0): fw_name_dmub = FIRMWARE_DCN_35_DMUB; break; + case IP_VERSION(3, 5, 1): + fw_name_dmub = FIRMWARE_DCN_351_DMUB; + break; default: /* ASIC doesn't support DMUB. */ return 0; @@ -4847,6 +4978,7 @@ static int dm_early_init(void *handle) case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 4; adev->mode_info.num_dig = 4; @@ -5105,7 +5237,9 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, * Always set input transfer function, since plane state is refreshed * every time. */ - ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state); + ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, + plane_state, + dc_plane_state); if (ret) return ret; @@ -5145,6 +5279,10 @@ static inline void fill_dc_dirty_rect(struct drm_plane *plane, * @new_plane_state: New state of @plane * @crtc_state: New state of CRTC connected to the @plane * @flip_addrs: DC flip tracking struct, which also tracts dirty rects + * @is_psr_su: Flag indicating whether Panel Self Refresh Selective Update (PSR SU) is enabled. + * If PSR SU is enabled and damage clips are available, only the regions of the screen + * that have changed will be updated. If PSR SU is not enabled, + * or if damage clips are not available, the entire screen will be updated. * @dirty_regions_changed: dirty regions changed * * For PSR SU, DC informs the DMUB uController of dirty rectangle regions @@ -5163,6 +5301,7 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, struct drm_plane_state *new_plane_state, struct drm_crtc_state *crtc_state, struct dc_flip_addrs *flip_addrs, + bool is_psr_su, bool *dirty_regions_changed) { struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); @@ -5181,9 +5320,16 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, if (plane->type == DRM_PLANE_TYPE_CURSOR) return; + if (new_plane_state->rotation != DRM_MODE_ROTATE_0) + goto ffu; + num_clips = drm_plane_get_damage_clips_count(new_plane_state); clips = drm_plane_get_damage_clips(new_plane_state); + if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 && + is_psr_su))) + goto ffu; + if (!dm_crtc_state->mpo_requested) { if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS) goto ffu; @@ -5507,10 +5653,13 @@ static void fill_stream_properties_from_drm_display_mode( { struct dc_crtc_timing *timing_out = &stream->timing; const struct drm_display_info *info = &connector->display_info; - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *aconnector = NULL; struct hdmi_vendor_infoframe hv_frame; struct hdmi_avi_infoframe avi_frame; + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + aconnector = to_amdgpu_dm_connector(connector); + memset(&hv_frame, 0, sizeof(hv_frame)); memset(&avi_frame, 0, sizeof(avi_frame)); @@ -5523,6 +5672,7 @@ static void fill_stream_properties_from_drm_display_mode( && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; else if (drm_mode_is_420_also(info, mode_in) + && aconnector && aconnector->force_yuv420_output) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) @@ -5558,7 +5708,7 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->hdmi_vic = hv_frame.vic; } - if (is_freesync_video_mode(mode_in, aconnector)) { + if (aconnector && is_freesync_video_mode(mode_in, aconnector)) { timing_out->h_addressable = mode_in->hdisplay; timing_out->h_total = mode_in->htotal; timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; @@ -5582,8 +5732,8 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->aspect_ratio = get_aspect_ratio(mode_in); - stream->out_transfer_func->type = TF_TYPE_PREDEFINED; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; + stream->out_transfer_func.type = TF_TYPE_PREDEFINED; + stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB; if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { if (!adjust_colour_depth_from_display_info(timing_out, info) && drm_mode_is_420_also(info, mode_in) && @@ -5679,13 +5829,13 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, } static struct dc_sink * -create_fake_sink(struct amdgpu_dm_connector *aconnector) +create_fake_sink(struct dc_link *link) { struct dc_sink_init_data sink_init_data = { 0 }; struct dc_sink *sink = NULL; - sink_init_data.link = aconnector->dc_link; - sink_init_data.sink_signal = aconnector->dc_link->connector_signal; + sink_init_data.link = link; + sink_init_data.sink_signal = link->connector_signal; sink = dc_sink_create(&sink_init_data); if (!sink) { @@ -5803,6 +5953,9 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, &aconnector->base.probed_modes : &aconnector->base.modes; + if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + return NULL; + if (aconnector->freesync_vid_base.clock != 0) return &aconnector->freesync_vid_base; @@ -6035,14 +6188,14 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, } static struct dc_stream_state * -create_stream_for_sink(struct amdgpu_dm_connector *aconnector, +create_stream_for_sink(struct drm_connector *connector, const struct drm_display_mode *drm_mode, const struct dm_connector_state *dm_state, const struct dc_stream_state *old_stream, int requested_bpc) { + struct amdgpu_dm_connector *aconnector = NULL; struct drm_display_mode *preferred_mode = NULL; - struct drm_connector *drm_connector; const struct drm_connector_state *con_state = &dm_state->base; struct dc_stream_state *stream = NULL; struct drm_display_mode mode; @@ -6056,22 +6209,35 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; struct dsc_dec_dpcd_caps dsc_caps; + struct dc_link *link = NULL; struct dc_sink *sink = NULL; drm_mode_init(&mode, drm_mode); memset(&saved_mode, 0, sizeof(saved_mode)); - if (aconnector == NULL) { - DRM_ERROR("aconnector is NULL!\n"); + if (connector == NULL) { + DRM_ERROR("connector is NULL!\n"); return stream; } - drm_connector = &aconnector->base; + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) { + aconnector = NULL; + aconnector = to_amdgpu_dm_connector(connector); + link = aconnector->dc_link; + } else { + struct drm_writeback_connector *wbcon = NULL; + struct amdgpu_dm_wb_connector *dm_wbcon = NULL; - if (!aconnector->dc_sink) { - sink = create_fake_sink(aconnector); + wbcon = drm_connector_to_writeback(connector); + dm_wbcon = to_amdgpu_dm_wb_connector(wbcon); + link = dm_wbcon->link; + } + + if (!aconnector || !aconnector->dc_sink) { + sink = create_fake_sink(link); if (!sink) return stream; + } else { sink = aconnector->dc_sink; dc_sink_retain(sink); @@ -6084,12 +6250,13 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, goto finish; } + /* We leave this NULL for writeback connectors */ stream->dm_stream_context = aconnector; stream->timing.flags.LTE_340MCSC_SCRAMBLE = - drm_connector->display_info.hdmi.scdc.scrambling.low_rates; + connector->display_info.hdmi.scdc.scrambling.low_rates; - list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { + list_for_each_entry(preferred_mode, &connector->modes, head) { /* Search for preferred mode */ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { native_mode_found = true; @@ -6098,7 +6265,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, } if (!native_mode_found) preferred_mode = list_first_entry_or_null( - &aconnector->base.modes, + &connector->modes, struct drm_display_mode, head); @@ -6112,12 +6279,15 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, * and the modelist may not be filled in time. */ DRM_DEBUG_DRIVER("No preferred mode found\n"); - } else { - recalculate_timing = is_freesync_video_mode(&mode, aconnector); + } else if (aconnector) { + recalculate_timing = amdgpu_freesync_vid_mode && + is_freesync_video_mode(&mode, aconnector); if (recalculate_timing) { freesync_mode = get_highest_refresh_rate_mode(aconnector, false); drm_mode_copy(&saved_mode, &mode); + saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio; drm_mode_copy(&mode, freesync_mode); + mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio; } else { decide_crtc_timing_for_drm_display_mode( &mode, preferred_mode, scale); @@ -6135,13 +6305,17 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, */ if (!scale || mode_refresh != preferred_refresh) fill_stream_properties_from_drm_display_mode( - stream, &mode, &aconnector->base, con_state, NULL, + stream, &mode, connector, con_state, NULL, requested_bpc); else fill_stream_properties_from_drm_display_mode( - stream, &mode, &aconnector->base, con_state, old_stream, + stream, &mode, connector, con_state, old_stream, requested_bpc); + /* The rest isn't needed for writeback connectors */ + if (!aconnector) + goto finish; + if (aconnector->timing_changed) { drm_dbg(aconnector->base.dev, "overriding timing for automated test, bpc %d, changing to %d\n", @@ -6159,7 +6333,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, fill_audio_info( &stream->audio_info, - drm_connector, + connector, sink); update_stream_signal(stream, sink); @@ -6167,20 +6341,17 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); - if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) { + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || + stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || + stream->signal == SIGNAL_TYPE_EDP) { // // should decide stream support vsc sdp colorimetry capability // before building vsc info packet // - stream->use_vsc_sdp_for_colorimetry = false; - if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - stream->use_vsc_sdp_for_colorimetry = - aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; - } else { - if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) - stream->use_vsc_sdp_for_colorimetry = true; - } - if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) + stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && + stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED; + + if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) tf = TRANSFER_FUNC_GAMMA_22; mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; @@ -6265,9 +6436,6 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, } else if (property == adev->mode_info.underscan_property) { dm_new_state->underscan_enable = val; ret = 0; - } else if (property == adev->mode_info.abm_level_property) { - dm_new_state->abm_level = val; - ret = 0; } return ret; @@ -6310,18 +6478,87 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, } else if (property == adev->mode_info.underscan_property) { *val = dm_state->underscan_enable; ret = 0; - } else if (property == adev->mode_info.abm_level_property) { - *val = dm_state->abm_level; - ret = 0; } return ret; } +/** + * DOC: panel power savings + * + * The display manager allows you to set your desired **panel power savings** + * level (between 0-4, with 0 representing off), e.g. using the following:: + * + * # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings + * + * Modifying this value can have implications on color accuracy, so tread + * carefully. + */ + +static ssize_t panel_power_savings_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct drm_connector *connector = dev_get_drvdata(device); + struct drm_device *dev = connector->dev; + u8 val; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + val = to_dm_connector_state(connector->state)->abm_level == + ABM_LEVEL_IMMEDIATE_DISABLE ? 0 : + to_dm_connector_state(connector->state)->abm_level; + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + return sysfs_emit(buf, "%u\n", val); +} + +static ssize_t panel_power_savings_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct drm_connector *connector = dev_get_drvdata(device); + struct drm_device *dev = connector->dev; + long val; + int ret; + + ret = kstrtol(buf, 0, &val); + + if (ret) + return ret; + + if (val < 0 || val > 4) + return -EINVAL; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + to_dm_connector_state(connector->state)->abm_level = val ?: + ABM_LEVEL_IMMEDIATE_DISABLE; + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + drm_kms_helper_hotplug_event(dev); + + return count; +} + +static DEVICE_ATTR_RW(panel_power_savings); + +static struct attribute *amdgpu_attrs[] = { + &dev_attr_panel_power_savings.attr, + NULL +}; + +static const struct attribute_group amdgpu_group = { + .name = "amdgpu", + .attrs = amdgpu_attrs +}; + static void amdgpu_dm_connector_unregister(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && + amdgpu_dm_abm_level < 0) + sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group); + drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); } @@ -6383,8 +6620,12 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) state->vcpi_slots = 0; state->pbn = 0; - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) - state->abm_level = amdgpu_dm_abm_level; + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { + if (amdgpu_dm_abm_level <= 0) + state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE; + else + state->abm_level = amdgpu_dm_abm_level; + } __drm_atomic_helper_connector_reset(connector, &state->base); } @@ -6422,6 +6663,14 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector) to_amdgpu_dm_connector(connector); int r; + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && + amdgpu_dm_abm_level < 0) { + r = sysfs_create_group(&connector->kdev->kobj, + &amdgpu_group); + if (r) + return r; + } + amdgpu_dm_register_backlight_device(amdgpu_dm_connector); if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || @@ -6442,10 +6691,15 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector) static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct dc_link *dc_link = aconnector->dc_link; struct dc_sink *dc_em_sink = aconnector->dc_em_sink; struct edid *edid; + struct i2c_adapter *ddc; + + if (dc_link && dc_link->aux_mode) + ddc = &aconnector->dm_dp_aux.aux.ddc; + else + ddc = &aconnector->i2c->base; /* * Note: drm_get_edid gets edid in the following order: @@ -6453,7 +6707,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) * 2) firmware EDID if set via edid_firmware module parameter * 3) regular DDC read. */ - edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc); + edid = drm_get_edid(connector, ddc); if (!edid) { DRM_ERROR("No EDID found on connector: %s.\n", connector->name); return; @@ -6494,12 +6748,18 @@ static int get_modes(struct drm_connector *connector) static void create_eml_sink(struct amdgpu_dm_connector *aconnector) { struct drm_connector *connector = &aconnector->base; - struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(&aconnector->base); + struct dc_link *dc_link = aconnector->dc_link; struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_VIRTUAL }; struct edid *edid; + struct i2c_adapter *ddc; + + if (dc_link->aux_mode) + ddc = &aconnector->dm_dp_aux.aux.ddc; + else + ddc = &aconnector->i2c->base; /* * Note: drm_get_edid gets edid in the following order: @@ -6507,7 +6767,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) * 2) firmware EDID if set via edid_firmware module parameter * 3) regular DDC read. */ - edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc); + edid = drm_get_edid(connector, ddc); if (!edid) { DRM_ERROR("No EDID found on connector: %s.\n", connector->name); return; @@ -6562,7 +6822,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc, if (!dc_plane_state) goto cleanup; - dc_state = dc_create_state(dc); + dc_state = dc_state_create(dc, NULL); if (!dc_state) goto cleanup; @@ -6589,9 +6849,9 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc, dc_result = dc_validate_plane(dc, dc_plane_state); if (dc_result == DC_OK) - dc_result = dc_add_stream_to_ctx(dc, dc_state, stream); + dc_result = dc_state_add_stream(dc, dc_state, stream); - if (dc_result == DC_OK && !dc_add_plane_to_context( + if (dc_result == DC_OK && !dc_state_add_plane( dc, stream, dc_plane_state, @@ -6603,7 +6863,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc, cleanup: if (dc_state) - dc_release_state(dc_state); + dc_state_release(dc_state); if (dc_plane_state) dc_plane_state_release(dc_plane_state); @@ -6625,7 +6885,7 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, enum dc_status dc_result = DC_OK; do { - stream = create_stream_for_sink(aconnector, drm_mode, + stream = create_stream_for_sink(connector, drm_mode, dm_state, old_stream, requested_bpc); if (stream == NULL) { @@ -6633,6 +6893,9 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, break; } + if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + return stream; + dc_result = dc_validate_stream(adev->dm.dc, stream); if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); @@ -6908,8 +7171,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, if (IS_ERR(mst_state)) return PTR_ERR(mst_state); - if (!mst_state->pbn_div) - mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link); + mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link)); if (!state->duplicated) { int max_bpc = conn_state->max_requested_bpc; @@ -6921,7 +7183,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, max_bpc); bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; clock = adjusted_mode->clock; - dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false); + dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4); } dm_new_connector_state->vcpi_slots = @@ -6949,10 +7211,13 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, struct amdgpu_dm_connector *aconnector; struct dm_connector_state *dm_conn_state; int i, j, ret; - int vcpi, pbn_div, pbn, slot_num = 0; + int vcpi, pbn_div, pbn = 0, slot_num = 0; for_each_new_connector_in_state(state, connector, new_con_state, i) { + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (!aconnector->mst_output_port) @@ -7322,7 +7587,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (!edid) + if (!(amdgpu_freesync_vid_mode && edid)) return; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) @@ -7439,12 +7704,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, aconnector->base.state->max_bpc = 16; aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; - if (connector_type == DRM_MODE_CONNECTOR_eDP && - (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) { - drm_object_attach_property(&aconnector->base.base, - adev->mode_info.abm_level_property, 0); - } - if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { /* Content Type is currently only implemented for HDMI. */ drm_connector_attach_content_type_property(&aconnector->base); @@ -7481,6 +7740,9 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, int i; int result = -EIO; + if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported) + return result; + cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); if (!cmd.payloads) @@ -7529,7 +7791,6 @@ create_i2c(struct ddc_service *ddc_service, if (!i2c) return NULL; i2c->base.owner = THIS_MODULE; - i2c->base.class = I2C_CLASS_DDC; i2c->base.dev.parent = &adev->pdev->dev; i2c->base.algo = &amdgpu_dm_i2c_algo; snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); @@ -7555,6 +7816,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, struct dc_link *link = dc_get_link_at_index(dc, link_index); struct amdgpu_i2c_adapter *i2c; + /* Not needed for writeback connector */ link->priv = aconnector; @@ -8162,9 +8424,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].surface = dc_plane; if (new_pcrtc_state->color_mgmt_changed) { - bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; - bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; + bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction; + bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func; bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; + bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult; + bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func; + bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func; + bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf; } amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state, @@ -8201,6 +8467,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, fill_dc_dirty_rects(plane, old_plane_state, new_plane_state, new_crtc_state, &bundle->flip_addrs[planes_count], + acrtc_state->stream->link->psr_settings.psr_version == + DC_PSR_VERSION_SU_1, &dirty_rects_changed); /* @@ -8375,7 +8643,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->stream_update.output_csc_transform = &acrtc_state->stream->csc_color_matrix; bundle->stream_update.out_transfer_func = - acrtc_state->stream->out_transfer_func; + &acrtc_state->stream->out_transfer_func; + bundle->stream_update.lut3d_func = + (struct dc_3dlut *) acrtc_state->stream->lut3d_func; + bundle->stream_update.func_shaper = + (struct dc_transfer_func *) acrtc_state->stream->func_shaper; } acrtc_state->stream->abm_level = acrtc_state->abm_level; @@ -8425,10 +8697,22 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, dm_update_pflip_irq_state(drm_to_adev(dev), acrtc_attach); - if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && - acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && - !acrtc_state->stream->link->psr_settings.psr_feature_enabled) - amdgpu_dm_link_setup_psr(acrtc_state->stream); + if (acrtc_state->update_type > UPDATE_TYPE_FAST) { + if (acrtc_state->stream->link->replay_settings.config.replay_supported && + !acrtc_state->stream->link->replay_settings.replay_feature_enabled) { + struct amdgpu_dm_connector *aconn = + (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; + amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn); + } else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && + !acrtc_state->stream->link->psr_settings.psr_feature_enabled) { + + struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *) + acrtc_state->stream->dm_stream_context; + + if (!aconn->disallow_edp_enter_psr) + amdgpu_dm_link_setup_psr(acrtc_state->stream); + } + } /* Decrement skip count when PSR is enabled and we're doing fast updates. */ if (acrtc_state->update_type == UPDATE_TYPE_FAST && @@ -8455,6 +8739,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && #endif !acrtc_state->stream->link->psr_settings.psr_allow_active && + !aconn->disallow_edp_enter_psr && (timestamp_ns - acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) > 500000000) @@ -8510,6 +8795,9 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev, continue; notify: + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); mutex_lock(&adev->dm.audio_lock); @@ -8542,6 +8830,9 @@ notify: if (!status) continue; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); mutex_lock(&adev->dm.audio_lock); @@ -8567,6 +8858,12 @@ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_stat stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); } +static void dm_clear_writeback(struct amdgpu_display_manager *dm, + struct dm_crtc_state *crtc_state) +{ + dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0); +} + static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, struct dc_state *dc_state) { @@ -8576,8 +8873,38 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + struct drm_connector_state *old_con_state; + struct drm_connector *connector; bool mode_set_reset_required = false; u32 i; + struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; + + /* Disable writeback */ + for_each_old_connector_in_state(state, connector, old_con_state, i) { + struct dm_connector_state *dm_old_con_state; + struct amdgpu_crtc *acrtc; + + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + old_crtc_state = NULL; + + dm_old_con_state = to_dm_connector_state(old_con_state); + if (!dm_old_con_state->base.crtc) + continue; + + acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc); + if (acrtc) + old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); + + if (!acrtc->wb_enabled) + continue; + + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + + dm_clear_writeback(dm, dm_old_crtc_state); + acrtc->wb_enabled = false; + } for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { @@ -8676,17 +9003,18 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, } } /* for_each_crtc_in_state() */ - /* if there mode set or reset, disable eDP PSR */ + /* if there mode set or reset, disable eDP PSR, Replay */ if (mode_set_reset_required) { if (dm->vblank_control_workqueue) flush_workqueue(dm->vblank_control_workqueue); + amdgpu_dm_replay_disable_all(dm); amdgpu_dm_psr_disable_all(dm); } dm_enable_per_frame_crtc_master_sync(dc_state); mutex_lock(&dm->dc_lock); - WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); + WARN_ON(!dc_commit_streams(dm->dc, ¶ms)); /* Allow idle optimization when vblank count is 0 for display off */ if (dm->active_vblank_irq_count == 0) @@ -8703,7 +9031,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, dc_stream_get_status(dm_new_crtc_state->stream); if (!status) - status = dc_stream_get_status_from_state(dc_state, + status = dc_state_get_stream_status(dc_state, dm_new_crtc_state->stream); if (!status) drm_err(dev, @@ -8715,6 +9043,105 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, } } +static void dm_set_writeback(struct amdgpu_display_manager *dm, + struct dm_crtc_state *crtc_state, + struct drm_connector *connector, + struct drm_connector_state *new_con_state) +{ + struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector); + struct amdgpu_device *adev = dm->adev; + struct amdgpu_crtc *acrtc; + struct dc_writeback_info *wb_info; + struct pipe_ctx *pipe = NULL; + struct amdgpu_framebuffer *afb; + int i = 0; + + wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL); + if (!wb_info) { + DRM_ERROR("Failed to allocate wb_info\n"); + return; + } + + acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc); + if (!acrtc) { + DRM_ERROR("no amdgpu_crtc found\n"); + kfree(wb_info); + return; + } + + afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb); + if (!afb) { + DRM_ERROR("No amdgpu_framebuffer found\n"); + kfree(wb_info); + return; + } + + for (i = 0; i < MAX_PIPES; i++) { + if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) { + pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i]; + break; + } + } + + /* fill in wb_info */ + wb_info->wb_enabled = true; + + wb_info->dwb_pipe_inst = 0; + wb_info->dwb_params.dwbscl_black_color = 0; + wb_info->dwb_params.hdr_mult = 0x1F000; + wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS; + wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13; + wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC; + wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC; + + /* width & height from crtc */ + wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay; + wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay; + wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay; + wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay; + + wb_info->dwb_params.cnv_params.crop_en = false; + wb_info->dwb_params.stereo_params.stereo_enabled = false; + + wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits + wb_info->dwb_params.cnv_params.out_min_pix_val = 0; + wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB; + wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS; + + wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444; + + wb_info->dwb_params.capture_rate = dwb_capture_rate_0; + + wb_info->dwb_params.scaler_taps.h_taps = 4; + wb_info->dwb_params.scaler_taps.v_taps = 4; + wb_info->dwb_params.scaler_taps.h_taps_c = 2; + wb_info->dwb_params.scaler_taps.v_taps_c = 2; + wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING; + + wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0]; + wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1]; + + for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) { + wb_info->mcif_buf_params.luma_address[i] = afb->address; + wb_info->mcif_buf_params.chroma_address[i] = 0; + } + + wb_info->mcif_buf_params.p_vmid = 1; + if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) { + wb_info->mcif_warmup_params.start_address.quad_part = afb->address; + wb_info->mcif_warmup_params.region_size = + wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height; + } + wb_info->mcif_warmup_params.p_vmid = 1; + wb_info->writeback_source_plane = pipe->plane_state; + + dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info); + + acrtc->wb_pending = true; + acrtc->wb_conn = wb_conn; + drm_writeback_queue_job(wb_conn, new_con_state); +} + /** * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. * @state: The atomic state to commit @@ -8742,16 +9169,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) trace_amdgpu_dm_atomic_commit_tail_begin(state); - if (dm->dc->caps.ips_support) { - for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { - if (new_con_state->crtc && - new_con_state->crtc->state->active && - drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) { - dc_dmub_srv_exit_low_power_state(dm->dc); - break; - } - } - } + if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed) + dc_allow_idle_optimizations(dm->dc, false); drm_atomic_helper_update_legacy_modeset_state(dev, state); drm_dp_mst_atomic_wait_for_dependencies(state); @@ -8765,7 +9184,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *aconnector; + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + aconnector = to_amdgpu_dm_connector(connector); if (!adev->dm.hdcp_workqueue) continue; @@ -8949,6 +9373,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * To fix this, DC should permit updating only stream properties. */ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); + if (!dummy_updates) { + DRM_ERROR("Failed to allocate memory for dummy_updates.\n"); + continue; + } for (j = 0; j < status->plane_count; j++) dummy_updates[j].surface = status->plane_states[0]; @@ -9042,6 +9470,31 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank); } + /* Enable writeback */ + for_each_new_connector_in_state(state, connector, new_con_state, i) { + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + if (!new_con_state->writeback_job) + continue; + + new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + + if (!new_crtc_state) + continue; + + if (acrtc->wb_enabled) + continue; + + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state); + acrtc->wb_enabled = true; + } + /* Update audio instances for each connector. */ amdgpu_dm_commit_audio(dev, state); @@ -9159,10 +9612,15 @@ out: void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector) { - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *aconnector; struct amdgpu_crtc *disconnected_acrtc; struct dm_crtc_state *acrtc_state; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + return; + + aconnector = to_amdgpu_dm_connector(connector); + if (!aconnector->dc_sink || !connector->state || !connector->encoder) return; @@ -9239,12 +9697,16 @@ static void get_freesync_config_for_crtc( struct dm_connector_state *new_con_state) { struct mod_freesync_config config = {0}; - struct amdgpu_dm_connector *aconnector = - to_amdgpu_dm_connector(new_con_state->base.connector); + struct amdgpu_dm_connector *aconnector; struct drm_display_mode *mode = &new_crtc_state->base.mode; int vrefresh = drm_mode_vrefresh(mode); bool fs_vid_mode = false; + if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + return; + + aconnector = to_amdgpu_dm_connector(new_con_state->base.connector); + new_crtc_state->vrr_supported = new_con_state->freesync_capable && vrefresh >= aconnector->min_vfreq && vrefresh <= aconnector->max_vfreq; @@ -9344,6 +9806,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * update changed items */ struct amdgpu_crtc *acrtc = NULL; + struct drm_connector *connector = NULL; struct amdgpu_dm_connector *aconnector = NULL; struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; @@ -9353,15 +9816,17 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); acrtc = to_amdgpu_crtc(crtc); - aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); + connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); + if (connector) + aconnector = to_amdgpu_dm_connector(connector); /* TODO This hack should go away */ - if (aconnector && enable) { + if (connector && enable) { /* Make sure fake sink is created in plug-in scenario */ drm_new_conn_state = drm_atomic_get_new_connector_state(state, - &aconnector->base); + connector); drm_old_conn_state = drm_atomic_get_old_connector_state(state, - &aconnector->base); + connector); if (IS_ERR(drm_new_conn_state)) { ret = PTR_ERR_OR_ZERO(drm_new_conn_state); @@ -9416,7 +9881,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * TODO: Refactor this function to allow this check to work * in all conditions. */ - if (dm_new_crtc_state->stream && + if (amdgpu_freesync_vid_mode && + dm_new_crtc_state->stream && is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) goto skip_modeset; @@ -9456,7 +9922,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, } /* Now check if we should set freesync video mode */ - if (dm_new_crtc_state->stream && + if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) && is_timing_unchanged_for_freesync(new_crtc_state, @@ -9469,7 +9935,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, set_freesync_fixed_config(dm_new_crtc_state); goto skip_modeset; - } else if (aconnector && + } else if (amdgpu_freesync_vid_mode && aconnector && is_freesync_video_mode(&new_crtc_state->mode, aconnector)) { struct drm_display_mode *high_mode; @@ -9487,7 +9953,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, crtc->base.id); /* i.e. reset mode */ - if (dc_remove_stream_from_ctx( + if (dc_state_remove_stream( dm->dc, dm_state->context, dm_old_crtc_state->stream) != DC_OK) { @@ -9508,7 +9974,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * added MST connectors not found in existing crtc_state in the chained mode * TODO: need to dig out the root cause of that */ - if (!aconnector) + if (!connector) goto skip_modeset; if (modereset_required(new_crtc_state)) @@ -9530,7 +9996,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n", crtc->base.id); - if (dc_add_stream_to_ctx( + if (dc_state_add_stream( dm->dc, dm_state->context, dm_new_crtc_state->stream) != DC_OK) { @@ -9551,7 +10017,7 @@ skip_modeset: * We want to do dc stream updates that do not require a * full modeset below. */ - if (!(enable && aconnector && new_crtc_state->active)) + if (!(enable && connector && new_crtc_state->active)) return 0; /* * Given above conditions, the dc state cannot be NULL because: @@ -9577,6 +10043,7 @@ skip_modeset: * when a modeset is needed, to ensure it gets reprogrammed. */ if (dm_new_crtc_state->base.color_mgmt_changed || + dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf || drm_atomic_crtc_needs_modeset(new_crtc_state)) { ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); if (ret) @@ -9603,14 +10070,15 @@ static bool should_reset_plane(struct drm_atomic_state *state, struct drm_plane *other; struct drm_plane_state *old_other_state, *new_other_state; struct drm_crtc_state *new_crtc_state; + struct amdgpu_device *adev = drm_to_adev(plane->dev); int i; /* - * TODO: Remove this hack once the checks below are sufficient - * enough to determine when we need to reset all the planes on - * the stream. + * TODO: Remove this hack for all asics once it proves that the + * fast updates works fine on DCN3.2+. */ - if (state->allow_modeset) + if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) && + state->allow_modeset) return true; /* Exit early if we know that we're adding or removing the plane. */ @@ -9644,6 +10112,10 @@ static bool should_reset_plane(struct drm_atomic_state *state, */ for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { struct amdgpu_framebuffer *old_afb, *new_afb; + struct dm_plane_state *dm_new_other_state, *dm_old_other_state; + + dm_new_other_state = to_dm_plane_state(new_other_state); + dm_old_other_state = to_dm_plane_state(old_other_state); if (other->type == DRM_PLANE_TYPE_CURSOR) continue; @@ -9680,6 +10152,18 @@ static bool should_reset_plane(struct drm_atomic_state *state, old_other_state->color_encoding != new_other_state->color_encoding) return true; + /* HDR/Transfer Function changes. */ + if (dm_old_other_state->degamma_tf != dm_new_other_state->degamma_tf || + dm_old_other_state->degamma_lut != dm_new_other_state->degamma_lut || + dm_old_other_state->hdr_mult != dm_new_other_state->hdr_mult || + dm_old_other_state->ctm != dm_new_other_state->ctm || + dm_old_other_state->shaper_lut != dm_new_other_state->shaper_lut || + dm_old_other_state->shaper_tf != dm_new_other_state->shaper_tf || + dm_old_other_state->lut3d != dm_new_other_state->lut3d || + dm_old_other_state->blend_lut != dm_new_other_state->blend_lut || + dm_old_other_state->blend_tf != dm_new_other_state->blend_tf) + return true; + /* Framebuffer checks fall at the end. */ if (!old_other_state->fb || !new_other_state->fb) continue; @@ -9834,7 +10318,7 @@ static int dm_update_plane_state(struct dc *dc, if (ret) return ret; - if (!dc_remove_plane_from_context( + if (!dc_state_remove_plane( dc, dm_old_crtc_state->stream, dm_old_plane_state->dc_state, @@ -9912,7 +10396,7 @@ static int dm_update_plane_state(struct dc *dc, * state. It'll be released when the atomic state is * cleaned. */ - if (!dc_add_plane_to_context( + if (!dc_state_add_plane( dc, dm_new_crtc_state->stream, dc_new_plane_state, @@ -10074,6 +10558,9 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm if (conn_state->crtc != crtc) continue; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (!aconnector->mst_output_port || !aconnector->mst_root) aconnector = NULL; @@ -10131,7 +10618,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; struct drm_dp_mst_topology_mgr *mgr; struct drm_dp_mst_topology_state *mst_state; - struct dsc_mst_fairness_vars vars[MAX_PIPES]; + struct dsc_mst_fairness_vars vars[MAX_PIPES] = {0}; trace_amdgpu_dm_atomic_check_begin(state); @@ -10431,11 +10918,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; } - ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); - if (ret) { - DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); - ret = -EINVAL; - goto fail; + if (dc_resource_is_dsc_encoding_supported(dc)) { + ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); + if (ret) { + DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); + ret = -EINVAL; + goto fail; + } } ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); @@ -10593,7 +11082,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, input->cea_total_length = total_length; memcpy(input->payload, data, length); - res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); + res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); if (!res) { DRM_ERROR("EDID CEA parser failed\n"); return false; @@ -10784,8 +11273,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct dm_connector_state *dm_con_state = NULL; struct dc_sink *sink; - struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = drm_to_adev(connector->dev); struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; bool freesync_capable = false; enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; @@ -10817,18 +11305,24 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (!adev->dm.freesync_module) goto update; - if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT - || sink->sink_signal == SIGNAL_TYPE_EDP) { + if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || + sink->sink_signal == SIGNAL_TYPE_EDP)) { bool edid_check_required = false; - if (edid) { - edid_check_required = is_dp_capable_without_timing_msa( - adev->dm.dc, - amdgpu_dm_connector); + if (is_dp_capable_without_timing_msa(adev->dm.dc, + amdgpu_dm_connector)) { + if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) { + freesync_capable = true; + amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; + amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; + } else { + edid_check_required = edid->version > 1 || + (edid->version == 1 && + edid->revision > 1); + } } - if (edid_check_required == true && (edid->version > 1 || - (edid->version == 1 && edid->revision > 1))) { + if (edid_check_required) { for (i = 0; i < 4; i++) { timing = &edid->detailed_timings[i]; @@ -10848,14 +11342,23 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (range->flags != 1) continue; - amdgpu_dm_connector->min_vfreq = range->min_vfreq; - amdgpu_dm_connector->max_vfreq = range->max_vfreq; - amdgpu_dm_connector->pixel_clock_mhz = - range->pixel_clock_mhz * 10; - connector->display_info.monitor_range.min_vfreq = range->min_vfreq; connector->display_info.monitor_range.max_vfreq = range->max_vfreq; + if (edid->revision >= 4) { + if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ) + connector->display_info.monitor_range.min_vfreq += 255; + if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ) + connector->display_info.monitor_range.max_vfreq += 255; + } + + amdgpu_dm_connector->min_vfreq = + connector->display_info.monitor_range.min_vfreq; + amdgpu_dm_connector->max_vfreq = + connector->display_info.monitor_range.max_vfreq; + amdgpu_dm_connector->pixel_clock_mhz = + range->pixel_clock_mhz * 10; + break; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 3d480be802cb..09519b7abf67 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -32,6 +32,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_plane.h> #include "link_service_types.h" +#include <drm/drm_writeback.h> /* * This file contains the definition for amdgpu_display_manager @@ -54,6 +55,9 @@ #define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A #define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40 #define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3 0x3 + +#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL) + /* #include "include/amdgpu_dal_power_if.h" #include "amdgpu_dm_irq.h" @@ -689,6 +693,7 @@ struct amdgpu_dm_connector { struct drm_display_mode freesync_vid_base; int psr_skip_count; + bool disallow_edp_enter_psr; /* Record progress status of mst*/ uint8_t mst_status; @@ -714,11 +719,107 @@ static inline void amdgpu_dm_set_mst_status(uint8_t *status, #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) +struct amdgpu_dm_wb_connector { + struct drm_writeback_connector base; + struct dc_link *link; +}; + +#define to_amdgpu_dm_wb_connector(x) container_of(x, struct amdgpu_dm_wb_connector, base) + extern const struct amdgpu_ip_block_version dm_ip_block; +/* enum amdgpu_transfer_function: pre-defined transfer function supported by AMD. + * + * It includes standardized transfer functions and pure power functions. The + * transfer function coefficients are available at modules/color/color_gamma.c + */ +enum amdgpu_transfer_function { + AMDGPU_TRANSFER_FUNCTION_DEFAULT, + AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF, + AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF, + AMDGPU_TRANSFER_FUNCTION_PQ_EOTF, + AMDGPU_TRANSFER_FUNCTION_IDENTITY, + AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF, + AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_BT709_OETF, + AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_COUNT +}; + struct dm_plane_state { struct drm_plane_state base; struct dc_plane_state *dc_state; + + /* Plane color mgmt */ + /** + * @degamma_lut: + * + * 1D LUT for mapping framebuffer/plane pixel data before sampling or + * blending operations. It's usually applied to linearize input space. + * The blob (if not NULL) is an array of &struct drm_color_lut. + */ + struct drm_property_blob *degamma_lut; + /** + * @degamma_tf: + * + * Predefined transfer function to tell DC driver the input space to + * linearize. + */ + enum amdgpu_transfer_function degamma_tf; + /** + * @hdr_mult: + * + * Multiplier to 'gain' the plane. When PQ is decoded using the fixed + * func transfer function to the internal FP16 fb, 1.0 -> 80 nits (on + * AMD at least). When sRGB is decoded, 1.0 -> 1.0, obviously. + * Therefore, 1.0 multiplier = 80 nits for SDR content. So if you + * want, 203 nits for SDR content, pass in (203.0 / 80.0). Format is + * S31.32 sign-magnitude. + * + * HDR multiplier can wide range beyond [0.0, 1.0]. This means that PQ + * TF is needed for any subsequent linear-to-non-linear transforms. + */ + __u64 hdr_mult; + /** + * @ctm: + * + * Color transformation matrix. The blob (if not NULL) is a &struct + * drm_color_ctm_3x4. + */ + struct drm_property_blob *ctm; + /** + * @shaper_lut: shaper lookup table blob. The blob (if not NULL) is an + * array of &struct drm_color_lut. + */ + struct drm_property_blob *shaper_lut; + /** + * @shaper_tf: + * + * Predefined transfer function to delinearize color space. + */ + enum amdgpu_transfer_function shaper_tf; + /** + * @lut3d: 3D lookup table blob. The blob (if not NULL) is an array of + * &struct drm_color_lut. + */ + struct drm_property_blob *lut3d; + /** + * @blend_lut: blend lut lookup table blob. The blob (if not NULL) is an + * array of &struct drm_color_lut. + */ + struct drm_property_blob *blend_lut; + /** + * @blend_tf: + * + * Pre-defined transfer function for converting plane pixel data before + * applying blend LUT. + */ + enum amdgpu_transfer_function blend_tf; }; struct dm_crtc_state { @@ -743,6 +844,14 @@ struct dm_crtc_state { struct dc_info_packet vrr_infopacket; int abm_level; + + /** + * @regamma_tf: + * + * Pre-defined transfer function for converting internal FB -> wire + * encoding. + */ + enum amdgpu_transfer_function regamma_tf; }; #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base) @@ -804,14 +913,22 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, void amdgpu_dm_trigger_timing_sync(struct drm_device *dev); +/* 3D LUT max size is 17x17x17 (4913 entries) */ +#define MAX_COLOR_3DLUT_SIZE 17 +#define MAX_COLOR_3DLUT_BITDEPTH 12 +int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev, + struct drm_plane_state *plane_state); +/* 1D LUT size */ #define MAX_COLOR_LUT_ENTRIES 4096 /* Legacy gamm LUT users such as X doesn't like large LUT sizes */ #define MAX_COLOR_LEGACY_LUT_ENTRIES 256 void amdgpu_dm_init_color_mod(void); +int amdgpu_dm_create_color_properties(struct amdgpu_device *adev); int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state); int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc); int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, + struct drm_plane_state *plane_state, struct dc_plane_state *dc_plane_state); void amdgpu_dm_update_connector_after_detect( @@ -834,7 +951,7 @@ struct dc_stream_state * int dm_atomic_get_state(struct drm_atomic_state *state, struct dm_atomic_state **dm_state); -struct amdgpu_dm_connector * +struct drm_connector * amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index a4cb23d059bd..ebabfe3a512f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -72,6 +72,7 @@ */ #define MAX_DRM_LUT_VALUE 0xFFFF +#define SDR_WHITE_LEVEL_INIT_VALUE 80 /** * amdgpu_dm_init_color_mod - Initialize the color module. @@ -84,6 +85,247 @@ void amdgpu_dm_init_color_mod(void) setup_x_points_distribution(); } +static inline struct fixed31_32 amdgpu_dm_fixpt_from_s3132(__u64 x) +{ + struct fixed31_32 val; + + /* If negative, convert to 2's complement. */ + if (x & (1ULL << 63)) + x = -(x & ~(1ULL << 63)); + + val.value = x; + return val; +} + +#ifdef AMD_PRIVATE_COLOR +/* Pre-defined Transfer Functions (TF) + * + * AMD driver supports pre-defined mathematical functions for transferring + * between encoded values and optical/linear space. Depending on HW color caps, + * ROMs and curves built by the AMD color module support these transforms. + * + * The driver-specific color implementation exposes properties for pre-blending + * degamma TF, shaper TF (before 3D LUT), and blend(dpp.ogam) TF and + * post-blending regamma (mpc.ogam) TF. However, only pre-blending degamma + * supports ROM curves. AMD color module uses pre-defined coefficients to build + * curves for the other blocks. What can be done by each color block is + * described by struct dpp_color_capsand struct mpc_color_caps. + * + * AMD driver-specific color API exposes the following pre-defined transfer + * functions: + * + * - Identity: linear/identity relationship between pixel value and + * luminance value; + * - Gamma 2.2, Gamma 2.4, Gamma 2.6: pure power functions; + * - sRGB: 2.4: The piece-wise transfer function from IEC 61966-2-1:1999; + * - BT.709: has a linear segment in the bottom part and then a power function + * with a 0.45 (~1/2.22) gamma for the rest of the range; standardized by + * ITU-R BT.709-6; + * - PQ (Perceptual Quantizer): used for HDR display, allows luminance range + * capability of 0 to 10,000 nits; standardized by SMPTE ST 2084. + * + * The AMD color model is designed with an assumption that SDR (sRGB, BT.709, + * Gamma 2.2, etc.) peak white maps (normalized to 1.0 FP) to 80 nits in the PQ + * system. This has the implication that PQ EOTF (non-linear to linear) maps to + * [0.0..125.0] where 125.0 = 10,000 nits / 80 nits. + * + * Non-linear and linear forms are described in the table below: + * + * ┌───────────┬─────────────────────┬──────────────────────┐ + * │ │ Non-linear │ Linear │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ sRGB │ UNORM or [0.0, 1.0] │ [0.0, 1.0] │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ BT709 │ UNORM or [0.0, 1.0] │ [0.0, 1.0] │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ Gamma 2.x │ UNORM or [0.0, 1.0] │ [0.0, 1.0] │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ PQ │ UNORM or FP16 CCCS* │ [0.0, 125.0] │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ Identity │ UNORM or FP16 CCCS* │ [0.0, 1.0] or CCCS** │ + * └───────────┴─────────────────────┴──────────────────────┘ + * * CCCS: Windows canonical composition color space + * ** Respectively + * + * In the driver-specific API, color block names attached to TF properties + * suggest the intention regarding non-linear encoding pixel's luminance + * values. As some newer encodings don't use gamma curve, we make encoding and + * decoding explicit by defining an enum list of transfer functions supported + * in terms of EOTF and inverse EOTF, where: + * + * - EOTF (electro-optical transfer function): is the transfer function to go + * from the encoded value to an optical (linear) value. De-gamma functions + * traditionally do this. + * - Inverse EOTF (simply the inverse of the EOTF): is usually intended to go + * from an optical/linear space (which might have been used for blending) + * back to the encoded values. Gamma functions traditionally do this. + */ +static const char * const +amdgpu_transfer_function_names[] = { + [AMDGPU_TRANSFER_FUNCTION_DEFAULT] = "Default", + [AMDGPU_TRANSFER_FUNCTION_IDENTITY] = "Identity", + [AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF] = "sRGB EOTF", + [AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF] = "BT.709 inv_OETF", + [AMDGPU_TRANSFER_FUNCTION_PQ_EOTF] = "PQ EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF] = "Gamma 2.2 EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF] = "Gamma 2.4 EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF] = "Gamma 2.6 EOTF", + [AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF] = "sRGB inv_EOTF", + [AMDGPU_TRANSFER_FUNCTION_BT709_OETF] = "BT.709 OETF", + [AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF] = "PQ inv_EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF] = "Gamma 2.2 inv_EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF] = "Gamma 2.4 inv_EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF] = "Gamma 2.6 inv_EOTF", +}; + +static const u32 amdgpu_eotf = + BIT(AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF) | + BIT(AMDGPU_TRANSFER_FUNCTION_PQ_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF); + +static const u32 amdgpu_inv_eotf = + BIT(AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_BT709_OETF) | + BIT(AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF); + +static struct drm_property * +amdgpu_create_tf_property(struct drm_device *dev, + const char *name, + u32 supported_tf) +{ + u32 transfer_functions = supported_tf | + BIT(AMDGPU_TRANSFER_FUNCTION_DEFAULT) | + BIT(AMDGPU_TRANSFER_FUNCTION_IDENTITY); + struct drm_prop_enum_list enum_list[AMDGPU_TRANSFER_FUNCTION_COUNT]; + int i, len; + + len = 0; + for (i = 0; i < AMDGPU_TRANSFER_FUNCTION_COUNT; i++) { + if ((transfer_functions & BIT(i)) == 0) + continue; + + enum_list[len].type = i; + enum_list[len].name = amdgpu_transfer_function_names[i]; + len++; + } + + return drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, + name, enum_list, len); +} + +int +amdgpu_dm_create_color_properties(struct amdgpu_device *adev) +{ + struct drm_property *prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_DEGAMMA_LUT", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_degamma_lut_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + DRM_MODE_PROP_IMMUTABLE, + "AMD_PLANE_DEGAMMA_LUT_SIZE", + 0, UINT_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_degamma_lut_size_property = prop; + + prop = amdgpu_create_tf_property(adev_to_drm(adev), + "AMD_PLANE_DEGAMMA_TF", + amdgpu_eotf); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_degamma_tf_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + 0, "AMD_PLANE_HDR_MULT", 0, U64_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_hdr_mult_property = prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_CTM", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_ctm_property = prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_SHAPER_LUT", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_shaper_lut_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + DRM_MODE_PROP_IMMUTABLE, + "AMD_PLANE_SHAPER_LUT_SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_shaper_lut_size_property = prop; + + prop = amdgpu_create_tf_property(adev_to_drm(adev), + "AMD_PLANE_SHAPER_TF", + amdgpu_inv_eotf); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_shaper_tf_property = prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_LUT3D", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_lut3d_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + DRM_MODE_PROP_IMMUTABLE, + "AMD_PLANE_LUT3D_SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_lut3d_size_property = prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_BLEND_LUT", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_blend_lut_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + DRM_MODE_PROP_IMMUTABLE, + "AMD_PLANE_BLEND_LUT_SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_blend_lut_size_property = prop; + + prop = amdgpu_create_tf_property(adev_to_drm(adev), + "AMD_PLANE_BLEND_TF", + amdgpu_eotf); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_blend_tf_property = prop; + + prop = amdgpu_create_tf_property(adev_to_drm(adev), + "AMD_CRTC_REGAMMA_TF", + amdgpu_inv_eotf); + if (!prop) + return -ENOMEM; + adev->mode_info.regamma_tf_property = prop; + + return 0; +} +#endif + /** * __extract_blob_lut - Extracts the DRM lut and lut size from a blob. * @blob: DRM color mgmt property blob @@ -182,7 +424,6 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut, static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm, struct fixed31_32 *matrix) { - int64_t val; int i; /* @@ -201,12 +442,29 @@ static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm, } /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */ - val = ctm->matrix[i - (i / 4)]; - /* If negative, convert to 2's complement. */ - if (val & (1ULL << 63)) - val = -(val & ~(1ULL << 63)); + matrix[i] = amdgpu_dm_fixpt_from_s3132(ctm->matrix[i - (i / 4)]); + } +} + +/** + * __drm_ctm_3x4_to_dc_matrix - converts a DRM CTM 3x4 to a DC CSC float matrix + * @ctm: DRM color transformation matrix with 3x4 dimensions + * @matrix: DC CSC float matrix + * + * The matrix needs to be a 3x4 (12 entry) matrix. + */ +static void __drm_ctm_3x4_to_dc_matrix(const struct drm_color_ctm_3x4 *ctm, + struct fixed31_32 *matrix) +{ + int i; - matrix[i].value = val; + /* The format provided is S31.32, using signed-magnitude representation. + * Our fixed31_32 is also S31.32, but is using 2's complement. We have + * to convert from signed-magnitude to 2's complement. + */ + for (i = 0; i < 12; i++) { + /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */ + matrix[i] = amdgpu_dm_fixpt_from_s3132(ctm->matrix[i]); } } @@ -268,16 +526,18 @@ static int __set_output_tf(struct dc_transfer_func *func, struct calculate_buffer cal_buffer = {0}; bool res; - ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES); - cal_buffer.buffer_index = -1; - gamma = dc_create_gamma(); - if (!gamma) - return -ENOMEM; + if (lut_size) { + ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES); - gamma->num_entries = lut_size; - __drm_lut_to_dc_gamma(lut, gamma, false); + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; + + gamma->num_entries = lut_size; + __drm_lut_to_dc_gamma(lut, gamma, false); + } if (func->tf == TRANSFER_FUNCTION_LINEAR) { /* @@ -285,27 +545,68 @@ static int __set_output_tf(struct dc_transfer_func *func, * on top of a linear input. But degamma params can be used * instead to simulate this. */ - gamma->type = GAMMA_CUSTOM; + if (gamma) + gamma->type = GAMMA_CUSTOM; res = mod_color_calculate_degamma_params(NULL, func, - gamma, true); + gamma, gamma != NULL); } else { /* * Assume sRGB. The actual mapping will depend on whether the * input was legacy or not. */ - gamma->type = GAMMA_CS_TFM_1D; - res = mod_color_calculate_regamma_params(func, gamma, false, + if (gamma) + gamma->type = GAMMA_CS_TFM_1D; + res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL, has_rom, NULL, &cal_buffer); } - dc_gamma_release(&gamma); + if (gamma) + dc_gamma_release(&gamma); return res ? 0 : -ENOMEM; } +static int amdgpu_dm_set_atomic_regamma(struct dc_stream_state *stream, + const struct drm_color_lut *regamma_lut, + uint32_t regamma_size, bool has_rom, + enum dc_transfer_func_predefined tf) +{ + struct dc_transfer_func *out_tf = &stream->out_transfer_func; + int ret = 0; + + if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) { + /* + * CRTC RGM goes into RGM LUT. + * + * Note: there is no implicit sRGB regamma here. We are using + * degamma calculation from color module to calculate the curve + * from a linear base if gamma TF is not set. However, if gamma + * TF (!= Linear) and LUT are set at the same time, we will use + * regamma calculation, and the color module will combine the + * pre-defined TF and the custom LUT values into the LUT that's + * actually programmed. + */ + out_tf->type = TF_TYPE_DISTRIBUTED_POINTS; + out_tf->tf = tf; + out_tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + + ret = __set_output_tf(out_tf, regamma_lut, regamma_size, has_rom); + } else { + /* + * No CRTC RGM means we can just put the block into bypass + * since we don't have any plane level adjustments using it. + */ + out_tf->type = TF_TYPE_BYPASS; + out_tf->tf = TRANSFER_FUNCTION_LINEAR; + } + + return ret; +} + /** * __set_input_tf - calculates the input transfer function based on expected * input space. + * @caps: dc color capabilities * @func: transfer function * @lut: lookup table that defines the color space * @lut_size: size of respective lut. @@ -313,27 +614,240 @@ static int __set_output_tf(struct dc_transfer_func *func, * Returns: * 0 in case of success. -ENOMEM if fails. */ -static int __set_input_tf(struct dc_transfer_func *func, +static int __set_input_tf(struct dc_color_caps *caps, struct dc_transfer_func *func, const struct drm_color_lut *lut, uint32_t lut_size) { struct dc_gamma *gamma = NULL; bool res; - gamma = dc_create_gamma(); - if (!gamma) - return -ENOMEM; + if (lut_size) { + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; - gamma->type = GAMMA_CUSTOM; - gamma->num_entries = lut_size; + gamma->type = GAMMA_CUSTOM; + gamma->num_entries = lut_size; - __drm_lut_to_dc_gamma(lut, gamma, false); + __drm_lut_to_dc_gamma(lut, gamma, false); + } - res = mod_color_calculate_degamma_params(NULL, func, gamma, true); - dc_gamma_release(&gamma); + res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL); + + if (gamma) + dc_gamma_release(&gamma); return res ? 0 : -ENOMEM; } +static enum dc_transfer_func_predefined +amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf) +{ + switch (tf) { + default: + case AMDGPU_TRANSFER_FUNCTION_DEFAULT: + case AMDGPU_TRANSFER_FUNCTION_IDENTITY: + return TRANSFER_FUNCTION_LINEAR; + case AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF: + case AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF: + return TRANSFER_FUNCTION_SRGB; + case AMDGPU_TRANSFER_FUNCTION_BT709_OETF: + case AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF: + return TRANSFER_FUNCTION_BT709; + case AMDGPU_TRANSFER_FUNCTION_PQ_EOTF: + case AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF: + return TRANSFER_FUNCTION_PQ; + case AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF: + case AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF: + return TRANSFER_FUNCTION_GAMMA22; + case AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF: + case AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF: + return TRANSFER_FUNCTION_GAMMA24; + case AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF: + case AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF: + return TRANSFER_FUNCTION_GAMMA26; + } +} + +static void __to_dc_lut3d_color(struct dc_rgb *rgb, + const struct drm_color_lut lut, + int bit_precision) +{ + rgb->red = drm_color_lut_extract(lut.red, bit_precision); + rgb->green = drm_color_lut_extract(lut.green, bit_precision); + rgb->blue = drm_color_lut_extract(lut.blue, bit_precision); +} + +static void __drm_3dlut_to_dc_3dlut(const struct drm_color_lut *lut, + uint32_t lut3d_size, + struct tetrahedral_params *params, + bool use_tetrahedral_9, + int bit_depth) +{ + struct dc_rgb *lut0; + struct dc_rgb *lut1; + struct dc_rgb *lut2; + struct dc_rgb *lut3; + int lut_i, i; + + + if (use_tetrahedral_9) { + lut0 = params->tetrahedral_9.lut0; + lut1 = params->tetrahedral_9.lut1; + lut2 = params->tetrahedral_9.lut2; + lut3 = params->tetrahedral_9.lut3; + } else { + lut0 = params->tetrahedral_17.lut0; + lut1 = params->tetrahedral_17.lut1; + lut2 = params->tetrahedral_17.lut2; + lut3 = params->tetrahedral_17.lut3; + } + + for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) { + /* + * We should consider the 3D LUT RGB values are distributed + * along four arrays lut0-3 where the first sizes 1229 and the + * other 1228. The bit depth supported for 3dlut channel is + * 12-bit, but DC also supports 10-bit. + * + * TODO: improve color pipeline API to enable the userspace set + * bit depth and 3D LUT size/stride, as specified by VA-API. + */ + __to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth); + __to_dc_lut3d_color(&lut1[lut_i], lut[i + 1], bit_depth); + __to_dc_lut3d_color(&lut2[lut_i], lut[i + 2], bit_depth); + __to_dc_lut3d_color(&lut3[lut_i], lut[i + 3], bit_depth); + } + /* lut0 has 1229 points (lut_size/4 + 1) */ + __to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth); +} + +/* amdgpu_dm_atomic_lut3d - set DRM 3D LUT to DC stream + * @drm_lut3d: user 3D LUT + * @drm_lut3d_size: size of 3D LUT + * @lut3d: DC 3D LUT + * + * Map user 3D LUT data to DC 3D LUT and all necessary bits to program it + * on DCN accordingly. + */ +static void amdgpu_dm_atomic_lut3d(const struct drm_color_lut *drm_lut3d, + uint32_t drm_lut3d_size, + struct dc_3dlut *lut) +{ + if (!drm_lut3d_size) { + lut->state.bits.initialized = 0; + } else { + /* Stride and bit depth are not programmable by API yet. + * Therefore, only supports 17x17x17 3D LUT (12-bit). + */ + lut->lut_3d.use_tetrahedral_9 = false; + lut->lut_3d.use_12bits = true; + lut->state.bits.initialized = 1; + __drm_3dlut_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d, + lut->lut_3d.use_tetrahedral_9, + MAX_COLOR_3DLUT_BITDEPTH); + } +} + +static int amdgpu_dm_atomic_shaper_lut(const struct drm_color_lut *shaper_lut, + bool has_rom, + enum dc_transfer_func_predefined tf, + uint32_t shaper_size, + struct dc_transfer_func *func_shaper) +{ + int ret = 0; + + if (shaper_size || tf != TRANSFER_FUNCTION_LINEAR) { + /* + * If user shaper LUT is set, we assume a linear color space + * (linearized by degamma 1D LUT or not). + */ + func_shaper->type = TF_TYPE_DISTRIBUTED_POINTS; + func_shaper->tf = tf; + func_shaper->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + + ret = __set_output_tf(func_shaper, shaper_lut, shaper_size, has_rom); + } else { + func_shaper->type = TF_TYPE_BYPASS; + func_shaper->tf = TRANSFER_FUNCTION_LINEAR; + } + + return ret; +} + +static int amdgpu_dm_atomic_blend_lut(const struct drm_color_lut *blend_lut, + bool has_rom, + enum dc_transfer_func_predefined tf, + uint32_t blend_size, + struct dc_transfer_func *func_blend) +{ + int ret = 0; + + if (blend_size || tf != TRANSFER_FUNCTION_LINEAR) { + /* + * DRM plane gamma LUT or TF means we are linearizing color + * space before blending (similar to degamma programming). As + * we don't have hardcoded curve support, or we use AMD color + * module to fill the parameters that will be translated to HW + * points. + */ + func_blend->type = TF_TYPE_DISTRIBUTED_POINTS; + func_blend->tf = tf; + func_blend->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + + ret = __set_input_tf(NULL, func_blend, blend_lut, blend_size); + } else { + func_blend->type = TF_TYPE_BYPASS; + func_blend->tf = TRANSFER_FUNCTION_LINEAR; + } + + return ret; +} + +/** + * amdgpu_dm_verify_lut3d_size - verifies if 3D LUT is supported and if user + * shaper and 3D LUTs match the hw supported size + * @adev: amdgpu device + * @plane_state: the DRM plane state + * + * Verifies if pre-blending (DPP) 3D LUT is supported by the HW (DCN 2.0 or + * newer) and if the user shaper and 3D LUTs match the supported size. + * + * Returns: + * 0 on success. -EINVAL if lut size are invalid. + */ +int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev, + struct drm_plane_state *plane_state) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); + const struct drm_color_lut *shaper = NULL, *lut3d = NULL; + uint32_t exp_size, size, dim_size = MAX_COLOR_3DLUT_SIZE; + bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut; + + /* shaper LUT is only available if 3D LUT color caps */ + exp_size = has_3dlut ? MAX_COLOR_LUT_ENTRIES : 0; + shaper = __extract_blob_lut(dm_plane_state->shaper_lut, &size); + + if (shaper && size != exp_size) { + drm_dbg(&adev->ddev, + "Invalid Shaper LUT size. Should be %u but got %u.\n", + exp_size, size); + return -EINVAL; + } + + /* The number of 3D LUT entries is the dimension size cubed */ + exp_size = has_3dlut ? dim_size * dim_size * dim_size : 0; + lut3d = __extract_blob_lut(dm_plane_state->lut3d, &size); + + if (lut3d && size != exp_size) { + drm_dbg(&adev->ddev, + "Invalid 3D LUT size. Should be %u but got %u.\n", + exp_size, size); + return -EINVAL; + } + + return 0; +} + /** * amdgpu_dm_verify_lut_sizes - verifies if DRM luts match the hw supported sizes * @crtc_state: the DRM CRTC state @@ -401,9 +915,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) const struct drm_color_lut *degamma_lut, *regamma_lut; uint32_t degamma_size, regamma_size; bool has_regamma, has_degamma; + enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_LINEAR; bool is_legacy; int r; + tf = amdgpu_tf_to_dc_tf(crtc->regamma_tf); + r = amdgpu_dm_verify_lut_sizes(&crtc->base); if (r) return r; @@ -437,29 +954,25 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) * inverse color ramp in legacy userspace. */ crtc->cm_is_degamma_srgb = true; - stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; - - r = __set_legacy_tf(stream->out_transfer_func, regamma_lut, + stream->out_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS; + stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB; + /* + * Note: although we pass has_rom as parameter here, we never + * actually use ROM because the color module only takes the ROM + * path if transfer_func->type == PREDEFINED. + * + * See more in mod_color_calculate_regamma_params() + */ + r = __set_legacy_tf(&stream->out_transfer_func, regamma_lut, regamma_size, has_rom); if (r) return r; - } else if (has_regamma) { - /* If atomic regamma, CRTC RGM goes into RGM LUT. */ - stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; - - r = __set_output_tf(stream->out_transfer_func, regamma_lut, - regamma_size, has_rom); + } else { + regamma_size = has_regamma ? regamma_size : 0; + r = amdgpu_dm_set_atomic_regamma(stream, regamma_lut, + regamma_size, has_rom, tf); if (r) return r; - } else { - /* - * No CRTC RGM means we can just put the block into bypass - * since we don't have any plane level adjustments using it. - */ - stream->out_transfer_func->type = TF_TYPE_BYPASS; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; } /* @@ -495,20 +1008,10 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) return 0; } -/** - * amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane. - * @crtc: amdgpu_dm crtc state - * @dc_plane_state: target DC surface - * - * Update the underlying dc_stream_state's input transfer function (ITF) in - * preparation for hardware commit. The transfer function used depends on - * the preparation done on the stream for color management. - * - * Returns: - * 0 on success. -ENOMEM if mem allocation fails. - */ -int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, - struct dc_plane_state *dc_plane_state) +static int +map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc, + struct dc_plane_state *dc_plane_state, + struct dc_color_caps *caps) { const struct drm_color_lut *degamma_lut; enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; @@ -531,8 +1034,7 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, °amma_size); ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES); - dc_plane_state->in_transfer_func->type = - TF_TYPE_DISTRIBUTED_POINTS; + dc_plane_state->in_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS; /* * This case isn't fully correct, but also fairly @@ -559,32 +1061,227 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, * map these to the atomic one instead. */ if (crtc->cm_is_degamma_srgb) - dc_plane_state->in_transfer_func->tf = tf; + dc_plane_state->in_transfer_func.tf = tf; else - dc_plane_state->in_transfer_func->tf = + dc_plane_state->in_transfer_func.tf = TRANSFER_FUNCTION_LINEAR; - r = __set_input_tf(dc_plane_state->in_transfer_func, + r = __set_input_tf(caps, &dc_plane_state->in_transfer_func, degamma_lut, degamma_size); if (r) return r; - } else if (crtc->cm_is_degamma_srgb) { + } else { /* * For legacy gamma support we need the regamma input * in linear space. Assume that the input is sRGB. */ - dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED; - dc_plane_state->in_transfer_func->tf = tf; + dc_plane_state->in_transfer_func.type = TF_TYPE_PREDEFINED; + dc_plane_state->in_transfer_func.tf = tf; if (tf != TRANSFER_FUNCTION_SRGB && - !mod_color_calculate_degamma_params(NULL, - dc_plane_state->in_transfer_func, NULL, false)) + !mod_color_calculate_degamma_params(caps, + &dc_plane_state->in_transfer_func, + NULL, false)) + return -ENOMEM; + } + + return 0; +} + +static int +__set_dm_plane_degamma(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct dc_color_caps *color_caps) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); + const struct drm_color_lut *degamma_lut; + enum amdgpu_transfer_function tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + uint32_t degamma_size; + bool has_degamma_lut; + int ret; + + degamma_lut = __extract_blob_lut(dm_plane_state->degamma_lut, + °amma_size); + + has_degamma_lut = degamma_lut && + !__is_lut_linear(degamma_lut, degamma_size); + + tf = dm_plane_state->degamma_tf; + + /* If we don't have plane degamma LUT nor TF to set on DC, we have + * nothing to do here, return. + */ + if (!has_degamma_lut && tf == AMDGPU_TRANSFER_FUNCTION_DEFAULT) + return -EINVAL; + + dc_plane_state->in_transfer_func.tf = amdgpu_tf_to_dc_tf(tf); + + if (has_degamma_lut) { + ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES); + + dc_plane_state->in_transfer_func.type = + TF_TYPE_DISTRIBUTED_POINTS; + + ret = __set_input_tf(color_caps, &dc_plane_state->in_transfer_func, + degamma_lut, degamma_size); + if (ret) + return ret; + } else { + dc_plane_state->in_transfer_func.type = + TF_TYPE_PREDEFINED; + + if (!mod_color_calculate_degamma_params(color_caps, + &dc_plane_state->in_transfer_func, NULL, false)) return -ENOMEM; - } else { - /* ...Otherwise we can just bypass the DGM block. */ - dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS; - dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; + } + return 0; +} + +static int +amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); + enum amdgpu_transfer_function shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + enum amdgpu_transfer_function blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + const struct drm_color_lut *shaper_lut, *lut3d, *blend_lut; + uint32_t shaper_size, lut3d_size, blend_size; + int ret; + + dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(dm_plane_state->hdr_mult); + + shaper_lut = __extract_blob_lut(dm_plane_state->shaper_lut, &shaper_size); + shaper_size = shaper_lut != NULL ? shaper_size : 0; + shaper_tf = dm_plane_state->shaper_tf; + lut3d = __extract_blob_lut(dm_plane_state->lut3d, &lut3d_size); + lut3d_size = lut3d != NULL ? lut3d_size : 0; + + amdgpu_dm_atomic_lut3d(lut3d, lut3d_size, &dc_plane_state->lut3d_func); + ret = amdgpu_dm_atomic_shaper_lut(shaper_lut, false, + amdgpu_tf_to_dc_tf(shaper_tf), + shaper_size, + &dc_plane_state->in_shaper_func); + if (ret) { + drm_dbg_kms(plane_state->plane->dev, + "setting plane %d shaper LUT failed.\n", + plane_state->plane->index); + + return ret; + } + + blend_tf = dm_plane_state->blend_tf; + blend_lut = __extract_blob_lut(dm_plane_state->blend_lut, &blend_size); + blend_size = blend_lut != NULL ? blend_size : 0; + + ret = amdgpu_dm_atomic_blend_lut(blend_lut, false, + amdgpu_tf_to_dc_tf(blend_tf), + blend_size, &dc_plane_state->blend_tf); + if (ret) { + drm_dbg_kms(plane_state->plane->dev, + "setting plane %d gamma lut failed.\n", + plane_state->plane->index); + + return ret; } return 0; } + +/** + * amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane. + * @crtc: amdgpu_dm crtc state + * @plane_state: DRM plane state + * @dc_plane_state: target DC surface + * + * Update the underlying dc_stream_state's input transfer function (ITF) in + * preparation for hardware commit. The transfer function used depends on + * the preparation done on the stream for color management. + * + * Returns: + * 0 on success. -ENOMEM if mem allocation fails. + */ +int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, + struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state) +{ + struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev); + struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); + struct drm_color_ctm_3x4 *ctm = NULL; + struct dc_color_caps *color_caps = NULL; + bool has_crtc_cm_degamma; + int ret; + + ret = amdgpu_dm_verify_lut3d_size(adev, plane_state); + if (ret) { + drm_dbg_driver(&adev->ddev, "amdgpu_dm_verify_lut3d_size() failed\n"); + return ret; + } + + if (dc_plane_state->ctx && dc_plane_state->ctx->dc) + color_caps = &dc_plane_state->ctx->dc->caps.color; + + /* Initially, we can just bypass the DGM block. */ + dc_plane_state->in_transfer_func.type = TF_TYPE_BYPASS; + dc_plane_state->in_transfer_func.tf = TRANSFER_FUNCTION_LINEAR; + + /* After, we start to update values according to color props */ + has_crtc_cm_degamma = (crtc->cm_has_degamma || crtc->cm_is_degamma_srgb); + + ret = __set_dm_plane_degamma(plane_state, dc_plane_state, color_caps); + if (ret == -ENOMEM) + return ret; + + /* We only have one degamma block available (pre-blending) for the + * whole color correction pipeline, so that we can't actually perform + * plane and CRTC degamma at the same time. Explicitly reject atomic + * updates when userspace sets both plane and CRTC degamma properties. + */ + if (has_crtc_cm_degamma && ret != -EINVAL) { + drm_dbg_kms(crtc->base.crtc->dev, + "doesn't support plane and CRTC degamma at the same time\n"); + return -EINVAL; + } + + /* If we are here, it means we don't have plane degamma settings, check + * if we have CRTC degamma waiting for mapping to pre-blending degamma + * block + */ + if (has_crtc_cm_degamma) { + /* + * AMD HW doesn't have post-blending degamma caps. When DRM + * CRTC atomic degamma is set, we maps it to DPP degamma block + * (pre-blending) or, on legacy gamma, we use DPP degamma to + * linearize (implicit degamma) from sRGB/BT709 according to + * the input space. + */ + ret = map_crtc_degamma_to_dc_plane(crtc, dc_plane_state, color_caps); + if (ret) + return ret; + } + + /* Setup CRTC CTM. */ + if (dm_plane_state->ctm) { + ctm = (struct drm_color_ctm_3x4 *)dm_plane_state->ctm->data; + /* + * DCN2 and older don't support both pre-blending and + * post-blending gamut remap. For this HW family, if we have + * the plane and CRTC CTMs simultaneously, CRTC CTM takes + * priority, and we discard plane CTM, as implemented in + * dcn10_program_gamut_remap(). However, DCN3+ has DPP + * (pre-blending) and MPC (post-blending) `gamut remap` blocks; + * therefore, we can program plane and CRTC CTMs together by + * mapping CRTC CTM to MPC and keeping plane CTM setup at DPP, + * as it's done by dcn30_program_gamut_remap(). + */ + __drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix); + + dc_plane_state->gamut_remap_matrix.enable_remap = true; + dc_plane_state->input_csc_color_matrix.enable_adjustment = false; + } else { + /* Bypass CTM. */ + dc_plane_state->gamut_remap_matrix.enable_remap = false; + dc_plane_state->input_csc_color_matrix.enable_adjustment = false; + } + + return amdgpu_dm_plane_set_color_properties(plane_state, dc_plane_state); +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 52ecfa746b54..f936a35fa9eb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -326,6 +326,9 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) if (!connector->state || connector->state->crtc != crtc) continue; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconn = to_amdgpu_dm_connector(connector); break; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index cb0b48bb2a7d..e23a0a276e33 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -96,6 +96,61 @@ bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state) dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; } +/** + * amdgpu_dm_crtc_set_panel_sr_feature() - Manage panel self-refresh features. + * + * @vblank_work: is a pointer to a struct vblank_control_work object. + * @vblank_enabled: indicates whether the DRM vblank counter is currently + * enabled (true) or disabled (false). + * @allow_sr_entry: represents whether entry into the self-refresh mode is + * allowed (true) or not allowed (false). + * + * The DRM vblank counter enable/disable action is used as the trigger to enable + * or disable various panel self-refresh features: + * + * Panel Replay and PSR SU + * - Enable when: + * - vblank counter is disabled + * - entry is allowed: usermode demonstrates an adequate number of fast + * commits) + * - CRC capture window isn't active + * - Keep enabled even when vblank counter gets enabled + * + * PSR1 + * - Enable condition same as above + * - Disable when vblank counter is enabled + */ +static void amdgpu_dm_crtc_set_panel_sr_feature( + struct vblank_control_work *vblank_work, + bool vblank_enabled, bool allow_sr_entry) +{ + struct dc_link *link = vblank_work->stream->link; + bool is_sr_active = (link->replay_settings.replay_allow_active || + link->psr_settings.psr_allow_active); + bool is_crc_window_active = false; + +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + is_crc_window_active = + amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base); +#endif + + if (link->replay_settings.replay_feature_enabled && + allow_sr_entry && !is_sr_active && !is_crc_window_active) { + amdgpu_dm_replay_enable(vblank_work->stream, true); + } else if (vblank_enabled) { + if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active) + amdgpu_dm_psr_disable(vblank_work->stream); + } else if (link->psr_settings.psr_feature_enabled && + allow_sr_entry && !is_sr_active && !is_crc_window_active) { + + struct amdgpu_dm_connector *aconn = + (struct amdgpu_dm_connector *) vblank_work->stream->dm_stream_context; + + if (!aconn->disallow_edp_enter_psr) + amdgpu_dm_psr_enable(vblank_work->stream); + } +} + static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) { struct vblank_control_work *vblank_work = @@ -124,24 +179,10 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) * fill_dc_dirty_rects(). */ if (vblank_work->stream && vblank_work->stream->link) { - /* - * Prioritize replay, instead of psr - */ - if (vblank_work->stream->link->replay_settings.replay_feature_enabled) - amdgpu_dm_replay_enable(vblank_work->stream, false); - else if (vblank_work->enable) { - if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && - vblank_work->stream->link->psr_settings.psr_allow_active) - amdgpu_dm_psr_disable(vblank_work->stream); - } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled && - !vblank_work->stream->link->psr_settings.psr_allow_active && -#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY - !amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) && -#endif - vblank_work->stream->link->panel_config.psr.disallow_replay && - vblank_work->acrtc->dm_irq_params.allow_psr_entry) { - amdgpu_dm_psr_enable(vblank_work->stream); - } + amdgpu_dm_crtc_set_panel_sr_feature( + vblank_work, vblank_work->enable, + vblank_work->acrtc->dm_irq_params.allow_psr_entry || + vblank_work->stream->link->replay_settings.replay_feature_enabled); } mutex_unlock(&dm->dc_lock); @@ -260,6 +301,7 @@ static struct drm_crtc_state *amdgpu_dm_crtc_duplicate_state(struct drm_crtc *cr state->freesync_config = cur->freesync_config; state->cm_has_degamma = cur->cm_has_degamma; state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; + state->regamma_tf = cur->regamma_tf; state->crc_skip_count = cur->crc_skip_count; state->mpo_requested = cur->mpo_requested; /* TODO Duplicate dc_stream after objects are stream object is flattened */ @@ -296,6 +338,70 @@ static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) } #endif +#ifdef AMD_PRIVATE_COLOR +/** + * dm_crtc_additional_color_mgmt - enable additional color properties + * @crtc: DRM CRTC + * + * This function lets the driver enable post-blending CRTC regamma transfer + * function property in addition to DRM CRTC gamma LUT. Default value means + * linear transfer function, which is the default CRTC gamma LUT behaviour + * without this property. + */ +static void +dm_crtc_additional_color_mgmt(struct drm_crtc *crtc) +{ + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + + if (adev->dm.dc->caps.color.mpc.ogam_ram) + drm_object_attach_property(&crtc->base, + adev->mode_info.regamma_tf_property, + AMDGPU_TRANSFER_FUNCTION_DEFAULT); +} + +static int +amdgpu_dm_atomic_crtc_set_property(struct drm_crtc *crtc, + struct drm_crtc_state *state, + struct drm_property *property, + uint64_t val) +{ + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + struct dm_crtc_state *acrtc_state = to_dm_crtc_state(state); + + if (property == adev->mode_info.regamma_tf_property) { + if (acrtc_state->regamma_tf != val) { + acrtc_state->regamma_tf = val; + acrtc_state->base.color_mgmt_changed |= 1; + } + } else { + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n", + crtc->base.id, crtc->name, + property->base.id, property->name); + return -EINVAL; + } + + return 0; +} + +static int +amdgpu_dm_atomic_crtc_get_property(struct drm_crtc *crtc, + const struct drm_crtc_state *state, + struct drm_property *property, + uint64_t *val) +{ + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + struct dm_crtc_state *acrtc_state = to_dm_crtc_state(state); + + if (property == adev->mode_info.regamma_tf_property) + *val = acrtc_state->regamma_tf; + else + return -EINVAL; + + return 0; +} +#endif + /* Implemented only the options currently available for the driver */ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { .reset = amdgpu_dm_crtc_reset_state, @@ -314,6 +420,10 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { #if defined(CONFIG_DEBUG_FS) .late_register = amdgpu_dm_crtc_late_register, #endif +#ifdef AMD_PRIVATE_COLOR + .atomic_set_property = amdgpu_dm_atomic_crtc_set_property, + .atomic_get_property = amdgpu_dm_atomic_crtc_get_property, +#endif }; static void amdgpu_dm_crtc_helper_disable(struct drm_crtc *crtc) @@ -489,6 +599,9 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); +#ifdef AMD_PRIVATE_COLOR + dm_crtc_additional_color_mgmt(&acrtc->base); +#endif return 0; fail: diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 13a177d34376..4d7a5d470b1e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -1249,7 +1249,7 @@ static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *b size_t size, loff_t *pos) { int r; - uint8_t data[36]; + uint8_t data[36] = {0}; struct amdgpu_dm_connector *connector = file_inode(f)->i_private; struct dm_crtc_state *acrtc_state; uint32_t write_size = 36; @@ -1483,7 +1483,7 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, const uint32_t rd_buf_size = 10; struct pipe_ctx *pipe_ctx; ssize_t result = 0; - int i, r, str_len = 30; + int i, r, str_len = 10; rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); @@ -1495,7 +1495,9 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -1596,7 +1598,9 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -1681,7 +1685,9 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -1780,7 +1786,9 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -1865,7 +1873,9 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -1964,7 +1974,9 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -2045,7 +2057,9 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -2141,7 +2155,9 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -2220,7 +2236,9 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -2276,7 +2294,9 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -2347,7 +2367,9 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -2418,7 +2440,9 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -2936,7 +2960,7 @@ static int psr_read_residency(void *data, u64 *val) { struct amdgpu_dm_connector *connector = data; struct dc_link *link = connector->dc_link; - u32 residency; + u32 residency = 0; link->dc->link_srv->edp_get_psr_residency(link, &residency); @@ -2971,6 +2995,132 @@ static int allow_edp_hotplug_detection_set(void *data, u64 val) return 0; } +/* check if kernel disallow eDP enter psr state + * cat /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr + * 0: allow edp enter psr; 1: disallow + */ +static int disallow_edp_enter_psr_get(void *data, u64 *val) +{ + struct amdgpu_dm_connector *aconnector = data; + + *val = (u64) aconnector->disallow_edp_enter_psr; + return 0; +} + +/* set kernel disallow eDP enter psr state + * echo 0x0 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr + * 0: allow edp enter psr; 1: disallow + * + * usage: test app read crc from PSR eDP rx. + * + * during kernel boot up, kernel write dpcd 0x170 = 5. + * this notify eDP rx psr enable and let rx check crc. + * rx fw will start checking crc for rx internal logic. + * crc read count within dpcd 0x246 is not updated and + * value is 0. when eDP tx driver wants to read rx crc + * from dpcd 0x246, 0x270, read count 0 lead tx driver + * timeout. + * + * to avoid this, we add this debugfs to let test app to disbable + * rx crc checking for rx internal logic. then test app can read + * non-zero crc read count. + * + * expected app sequence is as below: + * 1. disable eDP PHY and notify eDP rx with dpcd 0x600 = 2. + * 2. echo 0x1 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr + * 3. enable eDP PHY and notify eDP rx with dpcd 0x600 = 1 but + * without dpcd 0x170 = 5. + * 4. read crc from rx dpcd 0x270, 0x246, etc. + * 5. echo 0x0 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr. + * this will let eDP back to normal with psr setup dpcd 0x170 = 5. + */ +static int disallow_edp_enter_psr_set(void *data, u64 val) +{ + struct amdgpu_dm_connector *aconnector = data; + + aconnector->disallow_edp_enter_psr = val ? true : false; + return 0; +} + +static int dmub_trace_mask_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub; + enum dmub_gpint_command cmd; + u64 mask = 0xffff; + u8 shift = 0; + u32 res; + int i; + + if (!srv->fw_version) + return -EINVAL; + + for (i = 0; i < 4; i++) { + res = (val & mask) >> shift; + + switch (i) { + case 0: + cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD0; + break; + case 1: + cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1; + break; + case 2: + cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD2; + break; + case 3: + cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD3; + break; + } + + if (!dc_wake_and_execute_gpint(adev->dm.dc->ctx, cmd, res, NULL, DM_DMUB_WAIT_TYPE_WAIT)) + return -EIO; + + usleep_range(100, 1000); + + mask <<= 16; + shift += 16; + } + + return 0; +} + +static int dmub_trace_mask_show(void *data, u64 *val) +{ + enum dmub_gpint_command cmd = DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD0; + struct amdgpu_device *adev = data; + struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub; + u8 shift = 0; + u64 raw = 0; + u64 res = 0; + int i = 0; + + if (!srv->fw_version) + return -EINVAL; + + while (i < 4) { + uint32_t response; + + if (!dc_wake_and_execute_gpint(adev->dm.dc->ctx, cmd, 0, &response, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + return -EIO; + + raw = response; + usleep_range(100, 1000); + + cmd++; + res |= (raw << shift); + shift += 16; + i++; + } + + *val = res; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(dmub_trace_mask_fops, dmub_trace_mask_show, + dmub_trace_mask_set, "0x%llx\n"); + /* * Set dmcub trace event IRQ enable or disable. * Usage to enable dmcub trace event IRQ: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en @@ -3013,6 +3163,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(allow_edp_hotplug_detection_fops, allow_edp_hotplug_detection_get, allow_edp_hotplug_detection_set, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(disallow_edp_enter_psr_fops, + disallow_edp_enter_psr_get, + disallow_edp_enter_psr_set, "%llu\n"); + DEFINE_SHOW_ATTRIBUTE(current_backlight); DEFINE_SHOW_ATTRIBUTE(target_backlight); @@ -3186,6 +3340,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) &edp_ilr_debugfs_fops); debugfs_create_file("allow_edp_hotplug_detection", 0644, dir, connector, &allow_edp_hotplug_detection_fops); + debugfs_create_file("disallow_edp_enter_psr", 0644, dir, connector, + &disallow_edp_enter_psr_fops); } for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) { @@ -3647,12 +3803,16 @@ static int capabilities_show(struct seq_file *m, void *unused) bool mall_supported = dc->caps.mall_size_total; bool subvp_supported = dc->caps.subvp_fw_processing_delay_us; unsigned int mall_in_use = false; - unsigned int subvp_in_use = dc->cap_funcs.get_subvp_en(dc, dc->current_state); + unsigned int subvp_in_use = false; + struct hubbub *hubbub = dc->res_pool->hubbub; if (hubbub->funcs->get_mall_en) hubbub->funcs->get_mall_en(hubbub, &mall_in_use); + if (dc->cap_funcs.get_subvp_en) + subvp_in_use = dc->cap_funcs.get_subvp_en(dc, dc->current_state); + seq_printf(m, "mall supported: %s, enabled: %s\n", mall_supported ? "yes" : "no", mall_in_use ? "yes" : "no"); seq_printf(m, "sub-viewport supported: %s, enabled: %s\n", @@ -3880,6 +4040,9 @@ void dtn_debugfs_init(struct amdgpu_device *adev) debugfs_create_file_unsafe("amdgpu_dm_force_timing_sync", 0644, root, adev, &force_timing_sync_ops); + debugfs_create_file_unsafe("amdgpu_dm_dmub_trace_mask", 0644, root, + adev, &dmub_trace_mask_fops); + debugfs_create_file_unsafe("amdgpu_dm_dmcub_trace_event_en", 0644, root, adev, &dmcub_trace_event_state_fops); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index b54d646a7c73..e339c7a8d541 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -741,6 +741,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, dc->ctx->dce_version == DCN_VERSION_3_14 || dc->ctx->dce_version == DCN_VERSION_3_15 || dc->ctx->dce_version == DCN_VERSION_3_5 || + dc->ctx->dce_version == DCN_VERSION_3_51 || dc->ctx->dce_version == DCN_VERSION_3_16) hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1; hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 9cd83b780102..c27063305a13 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -31,6 +31,7 @@ #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include <drm/drm_edid.h> +#include <drm/drm_fixed.h> #include "dm_services.h" #include "amdgpu.h" @@ -63,6 +64,14 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id); edid_caps->panel_patch.disable_fams = true; break; + /* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */ + case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB): + case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B): + case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A): + case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1): + DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id); + edid_caps->panel_patch.remove_sink_ext_caps = true; + break; default: return; } @@ -113,6 +122,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps( edid_caps->edid_hdmi = connector->display_info.is_hdmi; + apply_edid_quirks(edid_buf, edid_caps); + sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); if (sad_count <= 0) return result; @@ -139,8 +150,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps( else edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; - apply_edid_quirks(edid_buf, edid_caps); - kfree(sads); kfree(sadb); @@ -210,7 +219,7 @@ static void dm_helpers_construct_old_payload( struct drm_dp_mst_atomic_payload *old_payload) { struct drm_dp_mst_atomic_payload *pos; - int pbn_per_slot = mst_state->pbn_div; + int pbn_per_slot = dfixed_trunc(mst_state->pbn_div); u8 next_payload_vc_start = mgr->next_start_slot; u8 payload_vc_start = new_payload->vc_start_slot; u8 allocated_time_slots; @@ -333,15 +342,14 @@ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( return ACT_SUCCESS; } -bool dm_helpers_dp_mst_send_payload_allocation( +void dm_helpers_dp_mst_send_payload_allocation( struct dc_context *ctx, - const struct dc_stream_state *stream, - bool enable) + const struct dc_stream_state *stream) { struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_state *mst_state; struct drm_dp_mst_topology_mgr *mst_mgr; - struct drm_dp_mst_atomic_payload *new_payload, old_payload; + struct drm_dp_mst_atomic_payload *new_payload; enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD; enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD; int ret = 0; @@ -349,25 +357,13 @@ bool dm_helpers_dp_mst_send_payload_allocation( aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector || !aconnector->mst_root) - return false; + return; mst_mgr = &aconnector->mst_root->mst_mgr; mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); - new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); - if (!enable) { - set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; - clr_flag = MST_ALLOCATE_NEW_PAYLOAD; - } - - if (enable) { - ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload); - } else { - dm_helpers_construct_old_payload(mst_mgr, mst_state, - new_payload, &old_payload); - drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload); - } + ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload); if (ret) { amdgpu_dm_set_mst_status(&aconnector->mst_status, @@ -378,10 +374,36 @@ bool dm_helpers_dp_mst_send_payload_allocation( amdgpu_dm_set_mst_status(&aconnector->mst_status, clr_flag, false); } - - return true; } +void dm_helpers_dp_mst_update_mst_mgr_for_deallocation( + struct dc_context *ctx, + const struct dc_stream_state *stream) +{ + struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_mst_topology_mgr *mst_mgr; + struct drm_dp_mst_atomic_payload *new_payload, old_payload; + enum mst_progress_status set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; + enum mst_progress_status clr_flag = MST_ALLOCATE_NEW_PAYLOAD; + + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + + if (!aconnector || !aconnector->mst_root) + return; + + mst_mgr = &aconnector->mst_root->mst_mgr; + mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); + new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); + dm_helpers_construct_old_payload(mst_mgr, mst_state, + new_payload, &old_payload); + + drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload); + + amdgpu_dm_set_mst_status(&aconnector->mst_status, set_flag, true); + amdgpu_dm_set_mst_status(&aconnector->mst_status, clr_flag, false); + } + void dm_dtn_log_begin(struct dc_context *ctx, struct dc_log_buffer_ctx *log_ctx) { @@ -536,11 +558,8 @@ bool dm_helpers_dp_read_dpcd( struct amdgpu_dm_connector *aconnector = link->priv; - if (!aconnector) { - drm_dbg_dp(aconnector->base.dev, - "Failed to find connector for link!\n"); + if (!aconnector) return false; - } return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data, size) == size; @@ -962,6 +981,11 @@ int dm_helper_dmub_aux_transfer_sync( struct aux_payload *payload, enum aux_return_code_type *operation_result) { + if (!link->hpd_status) { + *operation_result = AUX_RET_ERROR_HPD_DISCON; + return -1; + } + return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload, operation_result); } @@ -1216,6 +1240,9 @@ bool dm_helpers_dp_handle_test_pattern_request( } + pipe_ctx->stream->test_pattern.type = test_pattern; + pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space; + dc_link_dp_set_test_pattern( (struct dc_link *) link, test_pattern, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 51467f132c26..3390f0d8420a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -711,7 +711,7 @@ static inline int dm_irq_state(struct amdgpu_device *adev, { bool st; enum dc_irq_source irq_source; - + struct dc *dc = adev->dm.dc; struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id]; if (!acrtc) { @@ -729,6 +729,9 @@ static inline int dm_irq_state(struct amdgpu_device *adev, st = (state == AMDGPU_IRQ_STATE_ENABLE); + if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed) + dc_allow_idle_optimizations(dc, false); + dc_interrupt_set(adev->dm.dc, irq_source, st); return 0; } @@ -894,10 +897,15 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { - struct amdgpu_dm_connector *amdgpu_dm_connector = - to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *amdgpu_dm_connector; + const struct dc_link *dc_link; + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; - const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; + amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + + dc_link = amdgpu_dm_connector->dc_link; if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, @@ -930,9 +938,14 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { - struct amdgpu_dm_connector *amdgpu_dm_connector = - to_amdgpu_dm_connector(connector); - const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; + struct amdgpu_dm_connector *amdgpu_dm_connector; + const struct dc_link *dc_link; + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + dc_link = amdgpu_dm_connector->dc_link; if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index d3b13d362eda..35733d5c5193 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -27,6 +27,8 @@ #include <drm/display/drm_dp_mst_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_fixed.h> +#include <drm/drm_edid.h> #include "dm_services.h" #include "amdgpu.h" #include "amdgpu_dm.h" @@ -44,7 +46,7 @@ #include "amdgpu_dm_debugfs.h" #endif -#include "dc/dcn20/dcn20_resource.h" +#include "dc/resource/dcn20/dcn20_resource.h" #define PEAK_FACTOR_X1000 1006 @@ -424,8 +426,7 @@ dm_mst_atomic_best_encoder(struct drm_connector *connector, { struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, connector); - struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = drm_to_adev(connector->dev); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc); return &adev->dm.mst_encoders[acrtc->crtc_id].base; @@ -790,25 +791,12 @@ struct dsc_mst_fairness_params { struct amdgpu_dm_connector *aconnector; }; -static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link) -{ - u8 link_coding_cap; - uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B; - - link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link); - if (link_coding_cap == DP_128b_132b_ENCODING) - fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B; - - return fec_overhead_multiplier_x1000; -} - -static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000) +static int kbps_to_peak_pbn(int kbps) { u64 peak_kbps = kbps; peak_kbps *= 1006; - peak_kbps *= fec_overhead_multiplier_x1000; - peak_kbps = div_u64(peak_kbps, 1000 * 1000); + peak_kbps = div_u64(peak_kbps, 1000); return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); } @@ -909,12 +897,11 @@ static int increase_dsc_bpp(struct drm_atomic_state *state, int link_timeslots_used; int fair_pbn_alloc; int ret = 0; - uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled) { initial_slack[i] = - kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn; + kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn; bpp_increased[i] = false; remaining_to_increase += 1; } else { @@ -941,10 +928,10 @@ static int increase_dsc_bpp(struct drm_atomic_state *state, link_timeslots_used = 0; for (i = 0; i < count; i++) - link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div); + link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, dfixed_trunc(mst_state->pbn_div)); fair_pbn_alloc = - (63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div; + (63 - link_timeslots_used) / remaining_to_increase * dfixed_trunc(mst_state->pbn_div); if (initial_slack[next_index] > fair_pbn_alloc) { vars[next_index].pbn += fair_pbn_alloc; @@ -1010,7 +997,6 @@ static int try_disable_dsc(struct drm_atomic_state *state, int next_index; int remaining_to_try = 0; int ret; - uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled @@ -1040,7 +1026,7 @@ static int try_disable_dsc(struct drm_atomic_state *state, if (next_index == -1) break; - vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000); + vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps); ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, @@ -1053,7 +1039,8 @@ static int try_disable_dsc(struct drm_atomic_state *state, vars[next_index].dsc_enabled = false; vars[next_index].bpp_x16 = 0; } else { - vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000); + vars[next_index].pbn = kbps_to_peak_pbn( + params[next_index].bw_range.max_kbps); ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, @@ -1082,7 +1069,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, int count = 0; int i, k, ret; bool debugfs_overwrite = false; - uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); memset(params, 0, sizeof(params)); @@ -1147,7 +1133,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, /* Try no compression */ for (i = 0; i < count; i++) { vars[i + k].aconnector = params[i].aconnector; - vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, @@ -1166,7 +1152,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, /* Try max compression */ for (i = 0; i < count; i++) { if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { - vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000); + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); vars[i + k].dsc_enabled = true; vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, @@ -1174,7 +1160,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (ret < 0) return ret; } else { - vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, @@ -1218,8 +1204,10 @@ static bool is_dsc_need_re_compute( if (dc_link->type != dc_connection_mst_branch) return false; - if (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT || - dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) + /* add a check for older MST DSC with no virtual DPCDs */ + if (needs_dsc_aux_workaround(dc_link) && + (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT || + dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))) return false; for (i = 0; i < MAX_PIPES; i++) @@ -1239,7 +1227,15 @@ static bool is_dsc_need_re_compute( continue; aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context; - if (!aconnector) + if (!aconnector || !aconnector->dsc_aux) + continue; + + /* + * check if cached virtual MST DSC caps are available and DSC is supported + * as per specifications in their Virtual DPCD registers. + */ + if (!(aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported || + aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) continue; stream_on_link[new_stream_on_link_num] = aconnector; @@ -1500,14 +1496,16 @@ int pre_validate_dsc(struct drm_atomic_state *state, int ind = find_crtc_index_in_state_by_stream(state, stream); if (ind >= 0) { + struct drm_connector *connector; struct amdgpu_dm_connector *aconnector; struct drm_connector_state *drm_new_conn_state; struct dm_connector_state *dm_new_conn_state; struct dm_crtc_state *dm_old_crtc_state; - aconnector = + connector = amdgpu_dm_find_first_crtc_matching_connector(state, state->crtcs[ind].ptr); + aconnector = to_amdgpu_dm_connector(connector); drm_new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); @@ -1598,54 +1596,84 @@ enum dc_status dm_dp_mst_is_port_support_mode( struct amdgpu_dm_connector *aconnector, struct dc_stream_state *stream) { - int bpp, pbn, branch_max_throughput_mps = 0; + int pbn, branch_max_throughput_mps = 0; struct dc_link_settings cur_link_settings; unsigned int end_to_end_bw_in_kbps = 0; unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0; - unsigned int max_compressed_bw_in_kbps = 0; struct dc_dsc_bw_range bw_range = {0}; - struct drm_dp_mst_topology_mgr *mst_mgr; + struct dc_dsc_config_options dsc_options = {0}; /* - * check if the mode could be supported if DSC pass-through is supported - * AND check if there enough bandwidth available to support the mode - * with DSC enabled. + * Consider the case with the depth of the mst topology tree is equal or less than 2 + * A. When dsc bitstream can be transmitted along the entire path + * 1. dsc is possible between source and branch/leaf device (common dsc params is possible), AND + * 2. dsc passthrough supported at MST branch, or + * 3. dsc decoding supported at leaf MST device + * Use maximum dsc compression as bw constraint + * B. When dsc bitstream cannot be transmitted along the entire path + * Use native bw as bw constraint */ if (is_dsc_common_config_possible(stream, &bw_range) && - aconnector->mst_output_port->passthrough_aux) { - mst_mgr = aconnector->mst_output_port->mgr; - mutex_lock(&mst_mgr->lock); - + (aconnector->mst_output_port->passthrough_aux || + aconnector->dsc_aux == &aconnector->mst_output_port->aux)) { cur_link_settings = stream->link->verified_link_cap; - - upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, - &cur_link_settings - ); + upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings); down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn); - /* pick the bottleneck */ - end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, - down_link_bw_in_kbps); + /* pick the end to end bw bottleneck */ + end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, down_link_bw_in_kbps); - mutex_unlock(&mst_mgr->lock); + if (end_to_end_bw_in_kbps < bw_range.min_kbps) { + DRM_DEBUG_DRIVER("maximum dsc compression cannot fit into end-to-end bandwidth\n"); + return DC_FAIL_BANDWIDTH_VALIDATE; + } - /* - * use the maximum dsc compression bandwidth as the required - * bandwidth for the mode + if (end_to_end_bw_in_kbps < bw_range.stream_kbps) { + dc_dsc_get_default_config_option(stream->link->dc, &dsc_options); + dsc_options.max_target_bpp_limit_override_x16 = aconnector->base.display_info.max_dsc_bpp * 16; + if (dc_dsc_compute_config(stream->sink->ctx->dc->res_pool->dscs[0], + &stream->sink->dsc_caps.dsc_dec_caps, + &dsc_options, + end_to_end_bw_in_kbps, + &stream->timing, + dc_link_get_highest_encoding_format(stream->link), + &stream->timing.dsc_cfg)) { + stream->timing.flags.DSC = 1; + DRM_DEBUG_DRIVER("end-to-end bandwidth require dsc and dsc config found\n"); + } else { + DRM_DEBUG_DRIVER("end-to-end bandwidth require dsc but dsc config not found\n"); + return DC_FAIL_BANDWIDTH_VALIDATE; + } + } + } else { + /* Check if mode could be supported within max slot + * number of current mst link and full_pbn of mst links. */ - max_compressed_bw_in_kbps = bw_range.min_kbps; - - if (end_to_end_bw_in_kbps < max_compressed_bw_in_kbps) { - DRM_DEBUG_DRIVER("Mode does not fit into DSC pass-through bandwidth validation\n"); + int pbn_div, slot_num, max_slot_num; + enum dc_link_encoding_format link_encoding; + uint32_t stream_kbps = + dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(stream->link)); + + pbn = kbps_to_peak_pbn(stream_kbps); + pbn_div = dm_mst_get_pbn_divider(stream->link); + slot_num = DIV_ROUND_UP(pbn, pbn_div); + + link_encoding = dc_link_get_highest_encoding_format(stream->link); + if (link_encoding == DC_LINK_ENCODING_DP_8b_10b) + max_slot_num = 63; + else if (link_encoding == DC_LINK_ENCODING_DP_128b_132b) + max_slot_num = 64; + else { + DRM_DEBUG_DRIVER("Invalid link encoding format\n"); return DC_FAIL_BANDWIDTH_VALIDATE; } - } else { - /* check if mode could be supported within full_pbn */ - bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3; - pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false); - if (pbn > aconnector->mst_output_port->full_pbn) + if (slot_num > max_slot_num || + pbn > aconnector->mst_output_port->full_pbn) { + DRM_DEBUG_DRIVER("Mode can not be supported within mst links!"); return DC_FAIL_BANDWIDTH_VALIDATE; + } } /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 37c820ab0fdb..fa84d34b7373 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -46,9 +46,6 @@ #define SYNAPTICS_CASCADED_HUB_ID 0x5A #define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0) -#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031 -#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000 - enum mst_msg_ready_type { NONE_MSG_RDY_EVENT = 0, DOWN_REP_MSG_RDY_EVENT = 1, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index 116121e647ca..8a4c40b4c27e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -1337,8 +1337,14 @@ static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane) amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); WARN_ON(amdgpu_state == NULL); - if (amdgpu_state) - __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); + if (!amdgpu_state) + return; + + __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); + amdgpu_state->degamma_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + amdgpu_state->hdr_mult = AMDGPU_HDR_MULT_DEFAULT; + amdgpu_state->shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + amdgpu_state->blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; } static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane) @@ -1357,6 +1363,27 @@ static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct dc_plane_state_retain(dm_plane_state->dc_state); } + if (old_dm_plane_state->degamma_lut) + dm_plane_state->degamma_lut = + drm_property_blob_get(old_dm_plane_state->degamma_lut); + if (old_dm_plane_state->ctm) + dm_plane_state->ctm = + drm_property_blob_get(old_dm_plane_state->ctm); + if (old_dm_plane_state->shaper_lut) + dm_plane_state->shaper_lut = + drm_property_blob_get(old_dm_plane_state->shaper_lut); + if (old_dm_plane_state->lut3d) + dm_plane_state->lut3d = + drm_property_blob_get(old_dm_plane_state->lut3d); + if (old_dm_plane_state->blend_lut) + dm_plane_state->blend_lut = + drm_property_blob_get(old_dm_plane_state->blend_lut); + + dm_plane_state->degamma_tf = old_dm_plane_state->degamma_tf; + dm_plane_state->hdr_mult = old_dm_plane_state->hdr_mult; + dm_plane_state->shaper_tf = old_dm_plane_state->shaper_tf; + dm_plane_state->blend_tf = old_dm_plane_state->blend_tf; + return &dm_plane_state->base; } @@ -1424,12 +1451,206 @@ static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane, { struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); + if (dm_plane_state->degamma_lut) + drm_property_blob_put(dm_plane_state->degamma_lut); + if (dm_plane_state->ctm) + drm_property_blob_put(dm_plane_state->ctm); + if (dm_plane_state->lut3d) + drm_property_blob_put(dm_plane_state->lut3d); + if (dm_plane_state->shaper_lut) + drm_property_blob_put(dm_plane_state->shaper_lut); + if (dm_plane_state->blend_lut) + drm_property_blob_put(dm_plane_state->blend_lut); + if (dm_plane_state->dc_state) dc_plane_state_release(dm_plane_state->dc_state); drm_atomic_helper_plane_destroy_state(plane, state); } +#ifdef AMD_PRIVATE_COLOR +static void +dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm, + struct drm_plane *plane) +{ + struct amdgpu_mode_info mode_info = dm->adev->mode_info; + struct dpp_color_caps dpp_color_caps = dm->dc->caps.color.dpp; + + /* Check HW color pipeline capabilities on DPP block (pre-blending) + * before exposing related properties. + */ + if (dpp_color_caps.dgam_ram || dpp_color_caps.gamma_corr) { + drm_object_attach_property(&plane->base, + mode_info.plane_degamma_lut_property, + 0); + drm_object_attach_property(&plane->base, + mode_info.plane_degamma_lut_size_property, + MAX_COLOR_LUT_ENTRIES); + drm_object_attach_property(&plane->base, + dm->adev->mode_info.plane_degamma_tf_property, + AMDGPU_TRANSFER_FUNCTION_DEFAULT); + } + /* HDR MULT is always available */ + drm_object_attach_property(&plane->base, + dm->adev->mode_info.plane_hdr_mult_property, + AMDGPU_HDR_MULT_DEFAULT); + + /* Only enable plane CTM if both DPP and MPC gamut remap is available. */ + if (dm->dc->caps.color.mpc.gamut_remap) + drm_object_attach_property(&plane->base, + dm->adev->mode_info.plane_ctm_property, 0); + + if (dpp_color_caps.hw_3d_lut) { + drm_object_attach_property(&plane->base, + mode_info.plane_shaper_lut_property, 0); + drm_object_attach_property(&plane->base, + mode_info.plane_shaper_lut_size_property, + MAX_COLOR_LUT_ENTRIES); + drm_object_attach_property(&plane->base, + mode_info.plane_shaper_tf_property, + AMDGPU_TRANSFER_FUNCTION_DEFAULT); + drm_object_attach_property(&plane->base, + mode_info.plane_lut3d_property, 0); + drm_object_attach_property(&plane->base, + mode_info.plane_lut3d_size_property, + MAX_COLOR_3DLUT_SIZE); + } + + if (dpp_color_caps.ogam_ram) { + drm_object_attach_property(&plane->base, + mode_info.plane_blend_lut_property, 0); + drm_object_attach_property(&plane->base, + mode_info.plane_blend_lut_size_property, + MAX_COLOR_LUT_ENTRIES); + drm_object_attach_property(&plane->base, + mode_info.plane_blend_tf_property, + AMDGPU_TRANSFER_FUNCTION_DEFAULT); + } +} + +static int +dm_atomic_plane_set_property(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); + struct amdgpu_device *adev = drm_to_adev(plane->dev); + bool replaced = false; + int ret; + + if (property == adev->mode_info.plane_degamma_lut_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->degamma_lut, + val, -1, + sizeof(struct drm_color_lut), + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_degamma_tf_property) { + if (dm_plane_state->degamma_tf != val) { + dm_plane_state->degamma_tf = val; + dm_plane_state->base.color_mgmt_changed = 1; + } + } else if (property == adev->mode_info.plane_hdr_mult_property) { + if (dm_plane_state->hdr_mult != val) { + dm_plane_state->hdr_mult = val; + dm_plane_state->base.color_mgmt_changed = 1; + } + } else if (property == adev->mode_info.plane_ctm_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->ctm, + val, + sizeof(struct drm_color_ctm_3x4), -1, + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_shaper_lut_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->shaper_lut, + val, -1, + sizeof(struct drm_color_lut), + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_shaper_tf_property) { + if (dm_plane_state->shaper_tf != val) { + dm_plane_state->shaper_tf = val; + dm_plane_state->base.color_mgmt_changed = 1; + } + } else if (property == adev->mode_info.plane_lut3d_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->lut3d, + val, -1, + sizeof(struct drm_color_lut), + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_blend_lut_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->blend_lut, + val, -1, + sizeof(struct drm_color_lut), + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_blend_tf_property) { + if (dm_plane_state->blend_tf != val) { + dm_plane_state->blend_tf = val; + dm_plane_state->base.color_mgmt_changed = 1; + } + } else { + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n", + plane->base.id, plane->name, + property->base.id, property->name); + return -EINVAL; + } + + return 0; +} + +static int +dm_atomic_plane_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); + struct amdgpu_device *adev = drm_to_adev(plane->dev); + + if (property == adev->mode_info.plane_degamma_lut_property) { + *val = (dm_plane_state->degamma_lut) ? + dm_plane_state->degamma_lut->base.id : 0; + } else if (property == adev->mode_info.plane_degamma_tf_property) { + *val = dm_plane_state->degamma_tf; + } else if (property == adev->mode_info.plane_hdr_mult_property) { + *val = dm_plane_state->hdr_mult; + } else if (property == adev->mode_info.plane_ctm_property) { + *val = (dm_plane_state->ctm) ? + dm_plane_state->ctm->base.id : 0; + } else if (property == adev->mode_info.plane_shaper_lut_property) { + *val = (dm_plane_state->shaper_lut) ? + dm_plane_state->shaper_lut->base.id : 0; + } else if (property == adev->mode_info.plane_shaper_tf_property) { + *val = dm_plane_state->shaper_tf; + } else if (property == adev->mode_info.plane_lut3d_property) { + *val = (dm_plane_state->lut3d) ? + dm_plane_state->lut3d->base.id : 0; + } else if (property == adev->mode_info.plane_blend_lut_property) { + *val = (dm_plane_state->blend_lut) ? + dm_plane_state->blend_lut->base.id : 0; + } else if (property == adev->mode_info.plane_blend_tf_property) { + *val = dm_plane_state->blend_tf; + + } else { + return -EINVAL; + } + + return 0; +} +#endif + static const struct drm_plane_funcs dm_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, @@ -1438,6 +1659,10 @@ static const struct drm_plane_funcs dm_plane_funcs = { .atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state, .atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state, .format_mod_supported = amdgpu_dm_plane_format_mod_supported, +#ifdef AMD_PRIVATE_COLOR + .atomic_set_property = dm_atomic_plane_set_property, + .atomic_get_property = dm_atomic_plane_get_property, +#endif }; int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, @@ -1517,6 +1742,9 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, drm_plane_helper_add(plane, &dm_plane_helper_funcs); +#ifdef AMD_PRIVATE_COLOR + dm_atomic_plane_attach_color_mgmt_properties(dm, plane); +#endif /* Create (reset) the plane state */ if (plane->funcs->reset) plane->funcs->reset(plane); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 08ce3bb8f640..bfa090432ce2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -51,6 +51,9 @@ static bool link_supports_psrsu(struct dc_link *link) !link->dpcd_caps.psr_info.psr2_su_y_granularity_cap) return false; + if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU) + return false; + return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub); } @@ -138,9 +141,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) * amdgpu_dm_psr_enable() - enable psr f/w * @stream: stream state * - * Return: true if success */ -bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) +void amdgpu_dm_psr_enable(struct dc_stream_state *stream) { struct dc_link *link = stream->link; unsigned int vsync_rate_hz = 0; @@ -187,7 +189,10 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1) power_opt |= psr_power_opt_z10_static_screen; - return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt); + dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt); + + if (link->ctx->dc->caps.ips_support) + dc_allow_idle_optimizations(link->ctx->dc, true); } /* @@ -207,7 +212,7 @@ bool amdgpu_dm_psr_disable(struct dc_stream_state *stream) } /* - * amdgpu_dm_psr_disable() - disable psr f/w + * amdgpu_dm_psr_disable_all() - disable psr f/w for all streams * if psr is enabled on any stream * * Return: true if success diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h index 6806b3c9c84b..1fdfd183c0d9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h @@ -32,7 +32,7 @@ #define AMDGPU_DM_PSR_ENTRY_DELAY 5 void amdgpu_dm_set_psr_caps(struct dc_link *link); -bool amdgpu_dm_psr_enable(struct dc_stream_state *stream); +void amdgpu_dm_psr_enable(struct dc_stream_state *stream); bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream); bool amdgpu_dm_psr_disable(struct dc_stream_state *stream); bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c index 5ce542b1f860..738a58eebba7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c @@ -60,21 +60,26 @@ static bool link_supports_replay(struct dc_link *link, struct amdgpu_dm_connecto if (!as_caps->dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT) return false; + // Sink shall populate line deviation information + if (dpcd_caps->pr_info.pixel_deviation_per_line == 0 || + dpcd_caps->pr_info.max_deviation_line == 0) + return false; + return true; } /* - * amdgpu_dm_setup_replay() - setup replay configuration + * amdgpu_dm_set_replay_caps() - setup Replay capabilities * @link: link * @aconnector: aconnector * */ -bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector) +bool amdgpu_dm_set_replay_caps(struct dc_link *link, struct amdgpu_dm_connector *aconnector) { - struct replay_config pr_config; + struct replay_config pr_config = { 0 }; union replay_debug_flags *debug_flags = NULL; - // For eDP, if Replay is supported, return true to skip checks + // If Replay is already set to support, return true to skip checks if (link->replay_settings.config.replay_supported) return true; @@ -87,27 +92,50 @@ bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *ac if (!link_supports_replay(link, aconnector)) return false; - // Mark Replay is supported in link and update related attributes + // Mark Replay is supported in pr_config pr_config.replay_supported = true; - pr_config.replay_power_opt_supported = 0; - pr_config.replay_enable_option |= pr_enable_option_static_screen; - pr_config.replay_timing_sync_supported = aconnector->max_vfreq >= 2 * aconnector->min_vfreq; - - if (!pr_config.replay_timing_sync_supported) - pr_config.replay_enable_option &= ~pr_enable_option_general_ui; debug_flags = (union replay_debug_flags *)&pr_config.debug_flags; debug_flags->u32All = 0; debug_flags->bitfields.visual_confirm = link->ctx->dc->debug.visual_confirm == VISUAL_CONFIRM_REPLAY; - link->replay_settings.replay_feature_enabled = true; - init_replay_config(link, &pr_config); return true; } +/* + * amdgpu_dm_link_setup_replay() - configure replay link + * @link: link + * @aconnector: aconnector + * + */ +bool amdgpu_dm_link_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector) +{ + struct replay_config *pr_config; + + if (link == NULL || aconnector == NULL) + return false; + + pr_config = &link->replay_settings.config; + + if (!pr_config->replay_supported) + return false; + + pr_config->replay_power_opt_supported = 0x11; + pr_config->replay_smu_opt_supported = false; + pr_config->replay_enable_option |= pr_enable_option_static_screen; + pr_config->replay_support_fast_resync_in_ultra_sleep_mode = aconnector->max_vfreq >= 2 * aconnector->min_vfreq; + pr_config->replay_timing_sync_supported = false; + + if (!pr_config->replay_timing_sync_supported) + pr_config->replay_enable_option &= ~pr_enable_option_general_ui; + + link->replay_settings.replay_feature_enabled = true; + + return true; +} /* * amdgpu_dm_replay_enable() - enable replay f/w @@ -117,51 +145,23 @@ bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *ac */ bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait) { - uint64_t state; - unsigned int retry_count; bool replay_active = true; - const unsigned int max_retry = 1000; - bool force_static = true; struct dc_link *link = NULL; - if (stream == NULL) return false; link = stream->link; - if (link == NULL) - return false; - - link->dc->link_srv->edp_setup_replay(link, stream); - - link->dc->link_srv->edp_set_replay_allow_active(link, NULL, false, false, NULL); - - link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, false, true, NULL); - - if (wait == true) { - - for (retry_count = 0; retry_count <= max_retry; retry_count++) { - dc_link_get_replay_state(link, &state); - if (replay_active) { - if (state != REPLAY_STATE_0 && - (!force_static || state == REPLAY_STATE_3)) - break; - } else { - if (state == REPLAY_STATE_0) - break; - } - udelay(500); - } - - /* assert if max retry hit */ - if (retry_count >= max_retry) - ASSERT(0); - } else { - /* To-do: Add trace log */ + if (link) { + link->dc->link_srv->edp_setup_replay(link, stream); + link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total); + DRM_DEBUG_DRIVER("Enabling replay...\n"); + link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, wait, false, NULL); + return true; } - return true; + return false; } /* @@ -172,12 +172,31 @@ bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait) */ bool amdgpu_dm_replay_disable(struct dc_stream_state *stream) { + bool replay_active = false; + struct dc_link *link = NULL; - if (stream->link) { + if (stream == NULL) + return false; + + link = stream->link; + + if (link) { DRM_DEBUG_DRIVER("Disabling replay...\n"); - stream->link->dc->link_srv->edp_set_replay_allow_active(stream->link, NULL, false, false, NULL); + link->dc->link_srv->edp_set_replay_allow_active(stream->link, &replay_active, true, false, NULL); return true; } return false; } + +/* + * amdgpu_dm_replay_disable_all() - disable replay f/w + * if replay is enabled on any stream + * + * Return: true if success + */ +bool amdgpu_dm_replay_disable_all(struct amdgpu_display_manager *dm) +{ + DRM_DEBUG_DRIVER("Disabling replay if replay is enabled on any stream\n"); + return dc_set_replay_allow_active(dm->dc, false); +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h index 01cba3cd6246..f0d30eb47312 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h @@ -40,7 +40,9 @@ enum replay_enable_option { bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool enable); -bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector); +bool amdgpu_dm_set_replay_caps(struct dc_link *link, struct amdgpu_dm_connector *aconnector); +bool amdgpu_dm_link_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector); bool amdgpu_dm_replay_disable(struct dc_stream_state *stream); +bool amdgpu_dm_replay_disable_all(struct amdgpu_display_manager *dm); #endif /* AMDGPU_DM_AMDGPU_DM_REPLAY_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c index d9e33c6bccd9..0005f5f8f34f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c @@ -52,4 +52,12 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc func_name, line); } +void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx) +{ +} + +void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx) +{ +} + /**** power component interfaces ****/ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h index 0f580ea37576..133af994a08c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h @@ -37,7 +37,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_encoder.h> #include <drm/drm_atomic.h> -#include "dcn10/dcn10_optc.h" +#include "dc/inc/hw/optc.h" #include "dc/inc/core_types.h" diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c new file mode 100644 index 000000000000..08c494a7a21b --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services_types.h" + +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_wb.h" +#include "amdgpu_display.h" +#include "dc.h" + +#include <drm/drm_edid.h> +#include <drm/drm_atomic_state_helper.h> +#include <drm/drm_modeset_helper_vtables.h> + +static const u32 amdgpu_dm_wb_formats[] = { + DRM_FORMAT_XRGB2101010, +}; + +static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct drm_framebuffer *fb; + const struct drm_display_mode *mode = &crtc_state->mode; + bool found = false; + uint8_t i; + + if (!conn_state->writeback_job || !conn_state->writeback_job->fb) + return 0; + + fb = conn_state->writeback_job->fb; + if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) { + DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n", + fb->width, fb->height); + return -EINVAL; + } + + for (i = 0; i < sizeof(amdgpu_dm_wb_formats) / sizeof(u32); i++) { + if (fb->format->format == amdgpu_dm_wb_formats[i]) + found = true; + } + + if (!found) { + DRM_DEBUG_KMS("Invalid pixel format %p4cc\n", + &fb->format->format); + return -EINVAL; + } + + return 0; +} + + +static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector) +{ + /* Maximum resolution supported by DWB */ + return drm_add_modes_noedid(connector, 3840, 2160); +} + +static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector, + struct drm_writeback_job *job) +{ + struct amdgpu_framebuffer *afb; + struct drm_gem_object *obj; + struct amdgpu_device *adev; + struct amdgpu_bo *rbo; + uint32_t domain; + int r; + + if (!job->fb) { + DRM_DEBUG_KMS("No FB bound\n"); + return 0; + } + + afb = to_amdgpu_framebuffer(job->fb); + obj = job->fb->obj[0]; + rbo = gem_to_amdgpu_bo(obj); + adev = amdgpu_ttm_adev(rbo->tbo.bdev); + + r = amdgpu_bo_reserve(rbo, true); + if (r) { + dev_err(adev->dev, "fail to reserve bo (%d)\n", r); + return r; + } + + r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); + if (r) { + dev_err(adev->dev, "reserving fence slot failed (%d)\n", r); + goto error_unlock; + } + + domain = amdgpu_display_supported_domains(adev, rbo->flags); + + r = amdgpu_bo_pin(rbo, domain); + if (unlikely(r != 0)) { + if (r != -ERESTARTSYS) + DRM_ERROR("Failed to pin framebuffer with error %d\n", r); + goto error_unlock; + } + + r = amdgpu_ttm_alloc_gart(&rbo->tbo); + if (unlikely(r != 0)) { + DRM_ERROR("%p bind failed\n", rbo); + goto error_unpin; + } + + amdgpu_bo_unreserve(rbo); + + afb->address = amdgpu_bo_gpu_offset(rbo); + + amdgpu_bo_ref(rbo); + + return 0; + +error_unpin: + amdgpu_bo_unpin(rbo); + +error_unlock: + amdgpu_bo_unreserve(rbo); + return r; +} + +static void amdgpu_dm_wb_cleanup_job(struct drm_writeback_connector *connector, + struct drm_writeback_job *job) +{ + struct amdgpu_bo *rbo; + int r; + + if (!job->fb) + return; + + rbo = gem_to_amdgpu_bo(job->fb->obj[0]); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r)) { + DRM_ERROR("failed to reserve rbo before unpin\n"); + return; + } + + amdgpu_bo_unpin(rbo); + amdgpu_bo_unreserve(rbo); + amdgpu_bo_unref(&rbo); +} + +static const struct drm_encoder_helper_funcs amdgpu_dm_wb_encoder_helper_funcs = { + .atomic_check = amdgpu_dm_wb_encoder_atomic_check, +}; + +static const struct drm_connector_funcs amdgpu_dm_wb_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = amdgpu_dm_connector_funcs_reset, + .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_connector_helper_funcs amdgpu_dm_wb_conn_helper_funcs = { + .get_modes = amdgpu_dm_wb_connector_get_modes, + .prepare_writeback_job = amdgpu_dm_wb_prepare_job, + .cleanup_writeback_job = amdgpu_dm_wb_cleanup_job, +}; + +int amdgpu_dm_wb_connector_init(struct amdgpu_display_manager *dm, + struct amdgpu_dm_wb_connector *wbcon, + uint32_t link_index) +{ + struct dc *dc = dm->dc; + struct dc_link *link = dc_get_link_at_index(dc, link_index); + int res = 0; + + wbcon->link = link; + + drm_connector_helper_add(&wbcon->base.base, &amdgpu_dm_wb_conn_helper_funcs); + + res = drm_writeback_connector_init(&dm->adev->ddev, &wbcon->base, + &amdgpu_dm_wb_connector_funcs, + &amdgpu_dm_wb_encoder_helper_funcs, + amdgpu_dm_wb_formats, + ARRAY_SIZE(amdgpu_dm_wb_formats), + amdgpu_dm_get_encoder_crtc_mask(dm->adev)); + + if (res) + return res; + /* + * Some of the properties below require access to state, like bpc. + * Allocate some default initial connector state with our reset helper. + */ + if (wbcon->base.base.funcs->reset) + wbcon->base.base.funcs->reset(&wbcon->base.base); + + return 0; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h new file mode 100644 index 000000000000..13d31c857dee --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_DM_WB_H__ +#define __AMDGPU_DM_WB_H__ + +#include <drm/drm_writeback.h> + +int amdgpu_dm_wb_connector_init(struct amdgpu_display_manager *dm, + struct amdgpu_dm_wb_connector *dm_wbcon, + uint32_t link_index); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 3a169b78e7e4..4e9fb1742877 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -22,7 +22,7 @@ # # Makefile for Display Core (dc) component. -DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc +DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc dpp ifdef CONFIG_DRM_AMD_DC_FP @@ -34,12 +34,8 @@ DC_LIBS += dcn21 DC_LIBS += dcn201 DC_LIBS += dcn30 DC_LIBS += dcn301 -DC_LIBS += dcn302 -DC_LIBS += dcn303 DC_LIBS += dcn31 DC_LIBS += dcn314 -DC_LIBS += dcn315 -DC_LIBS += dcn316 DC_LIBS += dcn32 DC_LIBS += dcn321 DC_LIBS += dcn35 @@ -51,7 +47,6 @@ DC_LIBS += dce120 DC_LIBS += dce112 DC_LIBS += dce110 -DC_LIBS += dce100 DC_LIBS += dce80 ifdef CONFIG_DRM_AMD_DC_SI @@ -65,7 +60,7 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI include $(AMD_DC) DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ -dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o +dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o dc_state.o DISPLAY_CORE += dc_vm_helper.o diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c index e295a839ab47..bd1f60ecaba4 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c +++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c @@ -101,9 +101,44 @@ void convert_float_matrix( } } +static struct fixed31_32 int_frac_to_fixed_point(uint16_t arg, + uint8_t integer_bits, + uint8_t fractional_bits) +{ + struct fixed31_32 result; + uint16_t sign_mask = 1 << (fractional_bits + integer_bits); + uint16_t value_mask = sign_mask - 1; + + result.value = (long long)(arg & value_mask) << + (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits); + + if (arg & sign_mask) + result = dc_fixpt_neg(result); + + return result; +} + +/** + * convert_hw_matrix - converts HW values into fixed31_32 matrix. + * @matrix: fixed point 31.32 matrix + * @reg: array of register values + * @buffer_size: size of the array of register values + * + * Converts HW register spec defined format S2D13 into a fixed-point 31.32 + * matrix. + */ +void convert_hw_matrix(struct fixed31_32 *matrix, + uint16_t *reg, + uint32_t buffer_size) +{ + for (int i = 0; i < buffer_size; ++i) + matrix[i] = int_frac_to_fixed_point(reg[i], 2, 13); +} + static uint32_t find_gcd(uint32_t a, uint32_t b) { - uint32_t remainder = 0; + uint32_t remainder; + while (b != 0) { remainder = a % b; a = b; diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.h b/drivers/gpu/drm/amd/display/dc/basics/conversion.h index 81da4e6f7a1a..a433cef78496 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/conversion.h +++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.h @@ -41,6 +41,10 @@ void convert_float_matrix( void reduce_fraction(uint32_t num, uint32_t den, uint32_t *out_num, uint32_t *out_den); +void convert_hw_matrix(struct fixed31_32 *matrix, + uint16_t *reg, + uint32_t buffer_size); + static inline unsigned int log_2(unsigned int num) { return ilog2(num); diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c index f2dfa96f9ef5..b30c2cdc1a61 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c @@ -23,8 +23,6 @@ * */ -#include <linux/slab.h> - #include "resource.h" #include "dm_services.h" #include "dce_calcs.h" @@ -94,7 +92,7 @@ static void calculate_bandwidth( const uint32_t s_high = 7; const uint32_t dmif_chunk_buff_margin = 1; - uint32_t max_chunks_fbc_mode; + uint32_t max_chunks_fbc_mode = 0; int32_t num_cursor_lines; int32_t i, j, k; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 6450853fea94..bc16db69a663 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -44,8 +44,6 @@ #include "bios_parser_common.h" -#include "dc.h" - #define THREE_PERCENT_OF_10000 300 #define LAST_RECORD_TYPE 0xff @@ -1731,6 +1729,7 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1( return 0; } + /** * get_ss_entry_number_from_internal_ss_info_tbl_V3_1 * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 7cdb1a8a0ba0..9fe0020bcb9c 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1014,13 +1014,20 @@ static enum bp_result get_ss_info_v4_5( DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage); break; case AS_SIGNAL_TYPE_DISPLAY_PORT: - ss_info->spread_spectrum_percentage = + if (bp->base.integrated_info) { + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", bp->base.integrated_info->gpuclk_ss_percentage); + ss_info->spread_spectrum_percentage = + bp->base.integrated_info->gpuclk_ss_percentage; + ss_info->type.CENTER_MODE = + bp->base.integrated_info->gpuclk_ss_type; + } else { + ss_info->spread_spectrum_percentage = disp_cntl_tbl->dp_ss_percentage; - ss_info->spread_spectrum_range = + ss_info->spread_spectrum_range = disp_cntl_tbl->dp_ss_rate_10hz * 10; - if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) - ss_info->type.CENTER_MODE = true; - + if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) + ss_info->type.CENTER_MODE = true; + } DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage); break; case AS_SIGNAL_TYPE_GPU_PLL: @@ -1587,8 +1594,6 @@ static bool bios_parser_is_device_id_supported( return (le16_to_cpu(bp->object_info_tbl.v1_5->supporteddevices) & mask) != 0; break; } - - return false; } static uint32_t bios_parser_get_ss_entry_number( @@ -1691,7 +1696,7 @@ static enum bp_result bios_parser_enable_disp_power_gating( static enum bp_result bios_parser_enable_lvtma_control( struct dc_bios *dcb, uint8_t uc_pwr_on, - uint8_t panel_instance, + uint8_t pwrseq_instance, uint8_t bypass_panel_control_wait) { struct bios_parser *bp = BP_FROM_DCB(dcb); @@ -1699,7 +1704,7 @@ static enum bp_result bios_parser_enable_lvtma_control( if (!bp->cmd_tbl.enable_lvtma_control) return BP_RESULT_FAILURE; - return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance, bypass_panel_control_wait); + return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, pwrseq_instance, bypass_panel_control_wait); } static bool bios_parser_is_accelerated_mode( @@ -1843,19 +1848,21 @@ static enum bp_result get_firmware_info_v3_2( /* Vega12 */ smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2, DATA_TABLES(smu_info)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage); if (!smu_info_v3_2) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage); + info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10; } else if (revision.minor == 3) { /* Vega20 */ smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3, DATA_TABLES(smu_info)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage); if (!smu_info_v3_3) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage); + info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10; } @@ -2214,22 +2221,22 @@ static enum bp_result bios_parser_get_disp_connector_caps_info( switch (bp->object_info_tbl.revision.minor) { case 4: - default: - object = get_bios_object(bp, object_id); - - if (!object) - return BP_RESULT_BADINPUT; - - record = get_disp_connector_caps_record(bp, object); - if (!record) - return BP_RESULT_NORECORD; - - info->INTERNAL_DISPLAY = - (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY) ? 1 : 0; - info->INTERNAL_DISPLAY_BL = - (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY_BL) ? 1 : 0; - break; - case 5: + default: + object = get_bios_object(bp, object_id); + + if (!object) + return BP_RESULT_BADINPUT; + + record = get_disp_connector_caps_record(bp, object); + if (!record) + return BP_RESULT_NORECORD; + + info->INTERNAL_DISPLAY = + (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY) ? 1 : 0; + info->INTERNAL_DISPLAY_BL = + (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY_BL) ? 1 : 0; + break; + case 5: object_path_v3 = get_bios_object_from_path_v3(bp, object_id); if (!object_path_v3) @@ -2391,7 +2398,6 @@ static enum bp_result get_vram_info_v30( return result; } - /* * get_integrated_info_v11 * @@ -2416,10 +2422,11 @@ static enum bp_result get_integrated_info_v11( info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11, DATA_TABLES(integratedsysteminfo)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage); if (info_v11 == NULL) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage); + info->gpu_cap_info = le32_to_cpu(info_v11->gpucapinfo); /* @@ -2631,11 +2638,12 @@ static enum bp_result get_integrated_info_v2_1( info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1, DATA_TABLES(integratedsysteminfo)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage); if (info_v2_1 == NULL) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage); + info->gpu_cap_info = le32_to_cpu(info_v2_1->gpucapinfo); /* @@ -2793,11 +2801,11 @@ static enum bp_result get_integrated_info_v2_2( info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2, DATA_TABLES(integratedsysteminfo)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage); - if (info_v2_2 == NULL) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage); + info->gpu_cap_info = le32_to_cpu(info_v2_2->gpucapinfo); /* @@ -2814,6 +2822,8 @@ static enum bp_result get_integrated_info_v2_2( info->ma_channel_number = info_v2_2->umachannelnumber; info->dp_ss_control = le16_to_cpu(info_v2_2->reserved1); + info->gpuclk_ss_percentage = info_v2_2->gpuclk_ss_percentage; + info->gpuclk_ss_type = info_v2_2->gpuclk_ss_type; for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) { info->ext_disp_conn_info.gu_id[i] = @@ -2936,6 +2946,7 @@ static enum bp_result construct_integrated_info( result = get_integrated_info_v2_1(bp, info); break; case 2: + case 3: result = get_integrated_info_v2_2(bp, info); break; default: @@ -3323,27 +3334,28 @@ static enum bp_result get_bracket_layout_record( DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n"); return BP_RESULT_BADINPUT; } + tbl = &bp->object_info_tbl; v1_4 = tbl->v1_4; v1_5 = tbl->v1_5; result = BP_RESULT_NORECORD; switch (bp->object_info_tbl.revision.minor) { - case 4: - default: - for (i = 0; i < v1_4->number_of_path; ++i) { - if (bracket_layout_id == - v1_4->display_path[i].display_objid) { - result = update_slot_layout_info(dcb, i, slot_layout_info); - break; - } + case 4: + default: + for (i = 0; i < v1_4->number_of_path; ++i) { + if (bracket_layout_id == v1_4->display_path[i].display_objid) { + result = update_slot_layout_info(dcb, i, slot_layout_info); + break; } - break; - case 5: - for (i = 0; i < v1_5->number_of_path; ++i) - result = update_slot_layout_info_v2(dcb, i, slot_layout_info); - break; + } + break; + case 5: + for (i = 0; i < v1_5->number_of_path; ++i) + result = update_slot_layout_info_v2(dcb, i, slot_layout_info); + break; } + return result; } @@ -3352,9 +3364,7 @@ static enum bp_result bios_get_board_layout_info( struct board_layout_info *board_layout_info) { unsigned int i; - struct bios_parser *bp; - static enum bp_result record_result; unsigned int max_slots; @@ -3364,7 +3374,6 @@ static enum bp_result bios_get_board_layout_info( 0, 0 }; - bp = BP_FROM_DCB(dcb); if (board_layout_info == NULL) { @@ -3545,7 +3554,6 @@ static const struct dc_vbios_funcs vbios_funcs = { .bios_parser_destroy = firmware_parser_destroy, .get_board_layout_info = bios_get_board_layout_info, - /* TODO: use this fn in hw init?*/ .pack_data_tables = bios_parser_pack_data_tables, .get_atom_dc_golden_table = bios_get_atom_dc_golden_table, diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index 818a529cacc3..2bcae0643e61 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -37,7 +37,7 @@ #define EXEC_BIOS_CMD_TABLE(command, params)\ (amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ GetIndexIntoMasterTable(COMMAND, command), \ - (uint32_t *)¶ms) == 0) + (uint32_t *)¶ms, sizeof(params)) == 0) #define BIOS_CMD_TABLE_REVISION(command, frev, crev)\ amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ @@ -399,7 +399,7 @@ static enum bp_result transmitter_control_v1_6( static void init_transmitter_control(struct bios_parser *bp) { uint8_t frev; - uint8_t crev; + uint8_t crev = 0; if (BIOS_CMD_TABLE_REVISION(UNIPHYTransmitterControl, frev, crev) == false) diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 90a02d7bd3da..cc000833d300 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -49,7 +49,7 @@ #define EXEC_BIOS_CMD_TABLE(fname, params)\ (amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ GET_INDEX_INTO_MASTER_TABLE(command, fname), \ - (uint32_t *)¶ms) == 0) + (uint32_t *)¶ms, sizeof(params)) == 0) #define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\ amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ @@ -123,7 +123,7 @@ static void encoder_control_dmcub( sizeof(cmd.digx_encoder_control.header); cmd.digx_encoder_control.encoder_control.dig.stream_param = *dig; - dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result encoder_control_digx_v1_5( @@ -225,7 +225,7 @@ static enum bp_result transmitter_control_fallback( static void init_transmitter_control(struct bios_parser *bp) { uint8_t frev; - uint8_t crev; + uint8_t crev = 0; BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev); @@ -259,7 +259,7 @@ static void transmitter_control_dmcub( sizeof(cmd.dig1_transmitter_control.header); cmd.dig1_transmitter_control.transmitter_control.dig = *dig; - dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result transmitter_control_v1_6( @@ -321,7 +321,7 @@ static void transmitter_control_dmcub_v1_7( sizeof(cmd.dig1_transmitter_control.header); cmd.dig1_transmitter_control.transmitter_control.dig_v1_7 = *dig; - dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result transmitter_control_v1_7( @@ -429,7 +429,7 @@ static void set_pixel_clock_dmcub( sizeof(cmd.set_pixel_clock.header); cmd.set_pixel_clock.pixel_clock.clk = *clk; - dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result set_pixel_clock_v7( @@ -796,7 +796,7 @@ static void enable_disp_power_gating_dmcub( sizeof(cmd.enable_disp_power_gating.header); cmd.enable_disp_power_gating.power_gating.pwr = *pwr; - dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result enable_disp_power_gating_v2_1( @@ -976,7 +976,7 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id) static enum bp_result enable_lvtma_control( struct bios_parser *bp, uint8_t uc_pwr_on, - uint8_t panel_instance, + uint8_t pwrseq_instance, uint8_t bypass_panel_control_wait); static void init_enable_lvtma_control(struct bios_parser *bp) @@ -989,7 +989,7 @@ static void init_enable_lvtma_control(struct bios_parser *bp) static void enable_lvtma_control_dmcub( struct dc_dmub_srv *dmcub, uint8_t uc_pwr_on, - uint8_t panel_instance, + uint8_t pwrseq_instance, uint8_t bypass_panel_control_wait) { @@ -1002,17 +1002,17 @@ static void enable_lvtma_control_dmcub( DMUB_CMD__VBIOS_LVTMA_CONTROL; cmd.lvtma_control.data.uc_pwr_action = uc_pwr_on; - cmd.lvtma_control.data.panel_inst = - panel_instance; + cmd.lvtma_control.data.pwrseq_inst = + pwrseq_instance; cmd.lvtma_control.data.bypass_panel_control_wait = bypass_panel_control_wait; - dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static enum bp_result enable_lvtma_control( struct bios_parser *bp, uint8_t uc_pwr_on, - uint8_t panel_instance, + uint8_t pwrseq_instance, uint8_t bypass_panel_control_wait) { enum bp_result result = BP_RESULT_FAILURE; @@ -1021,7 +1021,7 @@ static enum bp_result enable_lvtma_control( bp->base.ctx->dc->debug.dmub_command_table) { enable_lvtma_control_dmcub(bp->base.ctx->dmub_srv, uc_pwr_on, - panel_instance, + pwrseq_instance, bypass_panel_control_wait); return BP_RESULT_OK; } diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h index b6d09bf6cf72..41c8c014397f 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h @@ -96,7 +96,7 @@ struct cmd_tbl { struct bios_parser *bp, uint8_t id); enum bp_result (*enable_lvtma_control)(struct bios_parser *bp, uint8_t uc_pwr_on, - uint8_t panel_instance, + uint8_t pwrseq_instance, uint8_t bypass_panel_control_wait); }; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index 9d347960e2b0..117fc6d4c1de 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -81,6 +81,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2( case DCN_VERSION_3_2: case DCN_VERSION_3_21: case DCN_VERSION_3_5: + case DCN_VERSION_3_51: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 3e73c4e59d40..a2b4ff2cff16 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -23,12 +23,11 @@ * */ -#include <linux/slab.h> - #include "dal_asic_id.h" #include "dc_types.h" #include "dccg.h" #include "clk_mgr_internal.h" +#include "dc_state_priv.h" #include "link.h" #include "dce100/dce_clk_mgr.h" @@ -63,7 +62,7 @@ int clk_mgr_helper_get_active_display_cnt( /* Don't count SubVP phantom pipes as part of active * display count */ - if (stream->mall_stream_config.type == SUBVP_PHANTOM) + if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM) continue; /* @@ -273,7 +272,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base; } - if (asic_id.chip_id == DEVICE_ID_NV_13FE) { + if (ctx->dce_version == DCN_VERSION_2_01) { dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base; } @@ -330,16 +329,14 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p } break; case AMDGPU_FAMILY_GC_11_0_0: { - struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL); - - if (clk_mgr == NULL) { - BREAK_TO_DEBUGGER(); - return NULL; - } + struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL); - dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); - return &clk_mgr->base; - break; + if (clk_mgr == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); + return &clk_mgr->base; } case AMDGPU_FAMILY_GC_11_0_1: { @@ -368,7 +365,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p } break; -#endif /* CONFIG_DRM_AMD_DC_FP - Family RV */ +#endif /* CONFIG_DRM_AMD_DC_FP */ default: ASSERT(0); /* Unknown Asic */ break; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index 26feefbb8990..2a5dd3a296b2 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -131,7 +131,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); int dprefclk_wdivider; int dprefclk_src_sel; - int dp_ref_clk_khz; + int dp_ref_clk_khz = 600000; int target_div; /* ASSERT DP Reference Clock source is from DFS*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index 60761ff3cbf1..369421e46c52 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@ -23,9 +23,6 @@ * */ -#include <linux/slab.h> - -#include "reg_helper.h" #include "core_types.h" #include "clk_mgr_internal.h" #include "rv1_clk_mgr.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c deleted file mode 100644 index 61dd12198a3c..000000000000 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2012-16 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "reg_helper.h" -#include "clk_mgr_internal.h" -#include "rv1_clk_mgr_clk.h" - -#include "ip/Discovery/hwid.h" -#include "ip/Discovery/v1/ip_offset_1.h" -#include "ip/CLK/clk_10_0_default.h" -#include "ip/CLK/clk_10_0_offset.h" -#include "ip/CLK/clk_10_0_reg.h" -#include "ip/CLK/clk_10_0_sh_mask.h" - -#include "dce100/dce_clk_mgr.h" - -#define CLK_BASE_INNER(inst) \ - CLK_BASE__INST ## inst ## _SEG0 - - -#define CLK_REG(reg_name, block, inst)\ - CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## _ ## inst ## _ ## reg_name - -#define REG(reg_name) \ - CLK_REG(reg_name, CLK0, 0) - - -/* Only used by testing framework*/ -void rv1_dump_clk_registers(struct clk_state_registers *regs, struct clk_bypass *bypass, struct clk_mgr *clk_mgr_base) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - - regs->CLK0_CLK8_CURRENT_CNT = REG_READ(CLK0_CLK8_CURRENT_CNT) / 10; //dcf clk - - bypass->dcfclk_bypass = REG_READ(CLK0_CLK8_BYPASS_CNTL) & 0x0007; - if (bypass->dcfclk_bypass < 0 || bypass->dcfclk_bypass > 4) - bypass->dcfclk_bypass = 0; - - - regs->CLK0_CLK8_DS_CNTL = REG_READ(CLK0_CLK8_DS_CNTL) / 10; //dcf deep sleep divider - - regs->CLK0_CLK8_ALLOW_DS = REG_READ(CLK0_CLK8_ALLOW_DS); //dcf deep sleep allow - - regs->CLK0_CLK10_CURRENT_CNT = REG_READ(CLK0_CLK10_CURRENT_CNT) / 10; //dpref clk - - bypass->dispclk_pypass = REG_READ(CLK0_CLK10_BYPASS_CNTL) & 0x0007; - if (bypass->dispclk_pypass < 0 || bypass->dispclk_pypass > 4) - bypass->dispclk_pypass = 0; - - regs->CLK0_CLK11_CURRENT_CNT = REG_READ(CLK0_CLK11_CURRENT_CNT) / 10; //disp clk - - bypass->dprefclk_bypass = REG_READ(CLK0_CLK11_BYPASS_CNTL) & 0x0007; - if (bypass->dprefclk_bypass < 0 || bypass->dprefclk_bypass > 4) - bypass->dprefclk_bypass = 0; - -} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index 89b79dd39628..19897fa52e7e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -26,7 +26,6 @@ #include "core_types.h" #include "clk_mgr_internal.h" #include "reg_helper.h" -#include <linux/delay.h> #include "rv1_clk_mgr_vbios_smu.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 5ee87965a078..bb4f3bd7532e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -503,7 +503,7 @@ static void dcn2_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - for (i = 0; i < MAX_PIPES * 2; i++) { + for (i = 0; i < MAX_LINKS; i++) { if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req) max_phyclk_req = clk_mgr->cur_phyclk_req_table[i]; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c index 9c90090e7351..f77840dd051e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c @@ -100,7 +100,15 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base, if (clk_mgr_base->clks.dispclk_khz == 0 || dc->debug.force_clock_mode & 0x1) { + /* this is from resume or boot up, if forced_clock cfg option + * used, we bypass program dispclk and DPPCLK, but need set them + * for S3. + */ + force_reset = true; + /* force_clock_mode 0x1: force reset the clock even it is the + * same clock as long as it is in Passive level. + */ dcn2_read_clocks_from_hw_dentist(clk_mgr_base); } @@ -150,11 +158,14 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base, if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) { if (dpp_clock_lowered) { + // if clock is being lowered, increase DTO before lowering refclk dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); dcn20_update_clocks_update_dentist(clk_mgr, context); } else { + // if clock is being raised, increase refclk before lowering DTO if (update_dppclk || update_dispclk) dcn20_update_clocks_update_dentist(clk_mgr, context); + // always update dtos unless clock is lowered and not safe to lower dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 0c6a4ab72b1d..5ef0879f6ad9 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -548,7 +548,7 @@ static void rn_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc_l clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - for (i = 0; i < MAX_PIPES * 2; i++) { + for (i = 0; i < MAX_LINKS; i++) { if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req) max_phyclk_req = clk_mgr->cur_phyclk_req_table[i]; } @@ -642,7 +642,8 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params j = -1; - ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL); + static_assert(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL, + "number of reported FCLK DPM levels exceed maximum"); /* Find lowest DPM, FCLK is filled in reverse order*/ @@ -707,9 +708,7 @@ void rn_clk_mgr_construct( int is_green_sardine = 0; struct clk_log_info log_info = {0}; -#if defined(CONFIG_DRM_AMD_DC_FP) is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev); -#endif clk_mgr->base.ctx = ctx; clk_mgr->base.funcs = &dcn21_funcs; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index 8c9d45e5b13b..23b390245b5d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -26,6 +26,10 @@ #include "core_types.h" #include "clk_mgr_internal.h" #include "reg_helper.h" +#include "dm_helpers.h" + +#include "rn_clk_mgr_vbios_smu.h" + #include <linux/delay.h> #include "renoir_ip_offset.h" @@ -33,8 +37,6 @@ #include "mp/mp_12_0_0_offset.h" #include "mp/mp_12_0_0_sh_mask.h" -#include "rn_clk_mgr_vbios_smu.h" - #define REG(reg_name) \ (MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) @@ -120,7 +122,10 @@ static int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, result = rn_smu_wait_for_response(clk_mgr, 10, 200000); - ASSERT(result == VBIOSSMC_Result_OK || result == VBIOSSMC_Result_UnknownCmd); + if (IS_SMU_TIMEOUT(result)) { + ASSERT(0); + dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000); + } /* Actual dispclk set is returned in the parameter register */ return REG_READ(MP1_SMN_C2PMSG_83); @@ -185,10 +190,6 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque VBIOSSMC_MSG_SetHardMinDcfclkByFreq, khz_to_mhz_ceil(requested_dcfclk_khz)); -#ifdef DBG - smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -#endif - return actual_dcfclk_set_mhz * 1000; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 3271c8c7905d..8083a553c60e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -474,7 +474,7 @@ static void dcn30_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct d clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - for (i = 0; i < MAX_PIPES * 2; i++) { + for (i = 0; i < MAX_LINKS; i++) { if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req) max_phyclk_req = clk_mgr->cur_phyclk_req_table[i]; } @@ -560,11 +560,19 @@ void dcn3_clk_mgr_construct( dce_clock_read_ss_info(clk_mgr); clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL); + if (!clk_mgr->base.bw_params) { + BREAK_TO_DEBUGGER(); + return; + } /* need physical address of table to give to PMFW */ clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t), &clk_mgr->wm_range_table_addr); + if (!clk_mgr->wm_range_table) { + BREAK_TO_DEBUGGER(); + return; + } } void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c index bdbf18306698..3253115a153d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c @@ -23,7 +23,6 @@ * */ -#include <linux/delay.h> #include "dcn30_clk_mgr_smu_msg.h" #include "clk_mgr_internal.h" @@ -54,6 +53,7 @@ */ static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries) { + const uint32_t initial_max_retries = max_retries; uint32_t reg = 0; do { @@ -69,7 +69,7 @@ static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un /* handle DALSMC_Result_CmdRejectedBusy? */ - /* Log? */ + TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx); return reg; } @@ -89,6 +89,8 @@ static bool dcn30_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint /* Trigger the message transaction by writing the message ID */ REG_WRITE(DAL_MSG_REG, msg_id); + TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx); + result = dcn30_smu_wait_for_response(clk_mgr, 10, 200000); if (IS_SMU_TIMEOUT(result)) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c index e4f96b6fd79d..b4fb17b7a096 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c @@ -29,6 +29,7 @@ #include <linux/delay.h> #include "dcn301_smu.h" +#include "dm_helpers.h" #include "vangogh_ip_offset.h" @@ -120,7 +121,10 @@ static int dcn301_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, result = dcn301_smu_wait_for_response(clk_mgr, 10, 200000); - ASSERT(result == VBIOSSMC_Result_OK); + if (IS_SMU_TIMEOUT(result)) { + ASSERT(0); + dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000); + } /* Actual dispclk set is returned in the parameter register */ return REG_READ(MP1_SMN_C2PMSG_83); @@ -180,10 +184,6 @@ int dcn301_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request VBIOSSMC_MSG_SetHardMinDcfclkByFreq, khz_to_mhz_ceil(requested_dcfclk_khz)); -#ifdef DBG - smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -#endif - return actual_dcfclk_set_mhz * 1000; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index a5489fe6875f..191d8b969d19 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -546,6 +546,8 @@ static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_ta int i; for (i = 0; i < VG_NUM_SOC_VOLTAGE_LEVELS; i++) { + if (i >= VG_NUM_DCFCLK_DPM_LEVELS) + break; if (clock_table->SocVoltage[i] == voltage) return clock_table->DcfClocks[i]; } @@ -564,7 +566,8 @@ static void vg_clk_mgr_helper_populate_bw_params( j = -1; - ASSERT(VG_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL); + static_assert(VG_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL, + "number of reported FCLK DPM levels exceeds maximum"); /* Find lowest DPM, FCLK is filled in reverse order*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index 3db4ef564b99..12a7752758b8 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -253,7 +253,7 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) @@ -562,7 +562,8 @@ static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk j = -1; - ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL); + static_assert(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL, + "number of reported pstate levels exceeds maximum"); /* Find lowest DPM, FCLK is filled in reverse order*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index 32279c5db724..f201628e4e98 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -23,7 +23,6 @@ * */ -#include <linux/delay.h> #include "core_types.h" #include "clk_mgr_internal.h" #include "reg_helper.h" @@ -202,10 +201,6 @@ int dcn31_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requeste VBIOSSMC_MSG_SetHardMinDcfclkByFreq, khz_to_mhz_ceil(requested_dcfclk_khz)); -#ifdef DBG - smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -#endif - return actual_dcfclk_set_mhz * 1000; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 7326b7565846..a84f1e376dee 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -87,6 +87,20 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, #define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L #define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L +#define regCLK1_CLK2_BYPASS_CNTL 0x029c +#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0 + +#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0 +#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10 +#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L +#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L + +#define regCLK6_0_CLK6_spll_field_8 0x464b +#define regCLK6_0_CLK6_spll_field_8_BASE_IDX 0 + +#define CLK6_0_CLK6_spll_field_8__spll_ssc_en__SHIFT 0xd +#define CLK6_0_CLK6_spll_field_8__spll_ssc_en_MASK 0x00002000L + #define REG(reg_name) \ (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) @@ -131,35 +145,63 @@ static int dcn314_get_active_display_cnt_wa( return display_count; } -static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable) +static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, + bool safe_to_lower, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; int i; for (i = 0; i < dc->res_pool->pipe_count; ++i) { - struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe = safe_to_lower + ? &context->res_ctx.pipe_ctx[i] + : &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->top_pipe || pipe->prev_odm_pipe) continue; if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) { - struct stream_encoder *stream_enc = pipe->stream_res.stream_enc; - if (disable) { - if (stream_enc && stream_enc->funcs->disable_fifo) - pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc); + if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc) + pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); - pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); reset_sync_context_for_pipe(dc, context, i); } else { pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); - - if (stream_enc && stream_enc->funcs->enable_fifo) - pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc); } } } } +bool dcn314_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + uint32_t ssc_enable; + + REG_GET(CLK6_0_CLK6_spll_field_8, spll_ssc_en, &ssc_enable); + + return ssc_enable == 1; +} + +void dcn314_init_clocks(struct clk_mgr *clk_mgr) +{ + struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); + uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; + + memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + // Assumption is that boot state always supports pstate + clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk + clk_mgr->clks.p_state_change_support = true; + clk_mgr->clks.prev_p_state_change_support = true; + clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; + clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; + + // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk + if (dcn314_is_spll_ssc_enabled(clk_mgr)) + clk_mgr->dp_dto_source_clock_in_khz = + dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz); + else + clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz; +} + void dcn314_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower) @@ -252,11 +294,11 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base, } if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { - dcn314_disable_otg_wa(clk_mgr_base, context, true); + dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); - dcn314_disable_otg_wa(clk_mgr_base, context, false); + dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false); update_dispclk = true; } @@ -284,7 +326,7 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base, cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) @@ -436,6 +478,11 @@ static DpmClocks314_t dummy_clocks; static struct dcn314_watermarks dummy_wms = { 0 }; +static struct dcn314_ss_info_table ss_info_table = { + .ss_divider = 1000, + .ss_percentage = {0, 0, 375, 375, 375} +}; + static void dcn314_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn314_watermarks *table) { int i, num_valid_sets; @@ -708,13 +755,31 @@ static struct clk_mgr_funcs dcn314_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, .update_clocks = dcn314_update_clocks, - .init_clocks = dcn31_init_clocks, + .init_clocks = dcn314_init_clocks, .enable_pme_wa = dcn314_enable_pme_wa, .are_clock_states_equal = dcn314_are_clock_states_equal, .notify_wm_ranges = dcn314_notify_wm_ranges }; extern struct clk_mgr_funcs dcn3_fpga_funcs; +static void dcn314_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr) +{ + uint32_t clock_source; + //uint32_t ssc_enable; + + REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source); + //REG_GET(CLK6_0_CLK6_spll_field_8, spll_ssc_en, &ssc_enable); + + if (dcn314_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) { + clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source]; + + if (clk_mgr->dprefclk_ss_percentage != 0) { + clk_mgr->ss_on_dprefclk = true; + clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider; + } + } +} + void dcn314_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_dcn314 *clk_mgr, @@ -782,6 +847,7 @@ void dcn314_clk_mgr_construct( clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; dce_clock_read_ss_info(&clk_mgr->base); + dcn314_read_ss_info_from_lut(&clk_mgr->base); /*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/ clk_mgr->base.base.bw_params = &dcn314_bw_params; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h index 171f84340eb2..002c28e80720 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h @@ -28,6 +28,8 @@ #define __DCN314_CLK_MGR_H__ #include "clk_mgr_internal.h" +#define DCN314_NUM_CLOCK_SOURCES 5 + struct dcn314_watermarks; struct dcn314_smu_watermark_set { @@ -40,9 +42,18 @@ struct clk_mgr_dcn314 { struct dcn314_smu_watermark_set smu_wm_set; }; +struct dcn314_ss_info_table { + uint32_t ss_divider; + uint32_t ss_percentage[DCN314_NUM_CLOCK_SOURCES]; +}; + bool dcn314_are_clock_states_equal(struct dc_clocks *a, struct dc_clocks *b); +bool dcn314_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base); + +void dcn314_init_clocks(struct clk_mgr *clk_mgr); + void dcn314_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c index 07baa10a8647..c4af406146b7 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c @@ -220,12 +220,6 @@ int dcn314_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request VBIOSSMC_MSG_SetHardMinDcfclkByFreq, khz_to_mhz_ceil(requested_dcfclk_khz)); -#ifdef DBG - smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", - actual_dcfclk_set_mhz, - actual_dcfclk_set_mhz * 1000); -#endif - return actual_dcfclk_set_mhz * 1000; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h index 047d19ea919c..78ca1e5c5e9e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h @@ -37,34 +37,34 @@ typedef enum { } WCK_RATIO_e; typedef struct { - uint32_t FClk; - uint32_t MemClk; - uint32_t Voltage; - uint8_t WckRatio; - uint8_t Spare[3]; + uint32_t FClk; + uint32_t MemClk; + uint32_t Voltage; + uint8_t WckRatio; + uint8_t Spare[3]; } DfPstateTable314_t; //Freq in MHz //Voltage in milli volts with 2 fractional bits typedef struct { - uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; - uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; - uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; - uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; - uint32_t VClocks[NUM_VCN_DPM_LEVELS]; - uint32_t DClocks[NUM_VCN_DPM_LEVELS]; - uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; - DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS]; + uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; + uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; + uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; + uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; + uint32_t VClocks[NUM_VCN_DPM_LEVELS]; + uint32_t DClocks[NUM_VCN_DPM_LEVELS]; + uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; + DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS]; - uint8_t NumDcfClkLevelsEnabled; - uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk - uint8_t NumSocClkLevelsEnabled; - uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk - uint8_t NumDfPstatesEnabled; - uint8_t spare[3]; + uint8_t NumDcfClkLevelsEnabled; + uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk + uint8_t NumSocClkLevelsEnabled; + uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk + uint8_t NumDfPstatesEnabled; + uint8_t spare[3]; - uint32_t MinGfxClk; - uint32_t MaxGfxClk; + uint32_t MinGfxClk; + uint32_t MaxGfxClk; } DpmClocks314_t; struct dcn314_watermarks { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index b2c4f97afc8b..5506cf9b3672 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -145,6 +145,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, */ clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; if (safe_to_lower) { + if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) { + dcn315_smu_set_dtbclk(clk_mgr, false); + clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; + } /* check that we're not already in lower */ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { display_count = dcn315_get_active_display_cnt_wa(dc, context); @@ -160,6 +164,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, } } } else { + if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) { + dcn315_smu_set_dtbclk(clk_mgr, true); + clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; + } /* check that we're not already in D0 */ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) { union display_idle_optimization_u idle_info = { 0 }; @@ -232,7 +240,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, @@ -334,7 +342,7 @@ static struct wm_table lpddr5_wm_table = { { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.65333, + .pstate_latency_us = 129.0, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, @@ -342,7 +350,7 @@ static struct wm_table lpddr5_wm_table = { { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.65333, + .pstate_latency_us = 129.0, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, @@ -350,7 +358,7 @@ static struct wm_table lpddr5_wm_table = { { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.65333, + .pstate_latency_us = 129.0, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, @@ -358,7 +366,7 @@ static struct wm_table lpddr5_wm_table = { { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.65333, + .pstate_latency_us = 129.0, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c index 1042cf1a3ab0..2d14346b680e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c @@ -29,6 +29,7 @@ #include "dm_helpers.h" #include "dcn315_smu.h" #include "mp/mp_13_0_5_offset.h" +#include "logger_types.h" #define MAX_INSTANCE 6 #define MAX_SEGMENT 6 @@ -69,7 +70,6 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D #define REG_NBIO(reg_name) \ (NBIO_BASE.instance[0].segment[regBIF_BX_PF2_ ## reg_name ## _BASE_IDX] + regBIF_BX_PF2_ ## reg_name) -#include "logger_types.h" #undef DC_LOGGER #define DC_LOGGER \ CTX->logger @@ -215,10 +215,6 @@ int dcn315_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request VBIOSSMC_MSG_SetHardMinDcfclkByFreq, khz_to_mhz_ceil(requested_dcfclk_khz)); -#ifdef DBG - smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -#endif - return actual_dcfclk_set_mhz * 1000; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 09151cc56ce4..20ca7afa9cb4 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa( return display_count; } -static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable) +static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, + bool safe_to_lower, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; int i; for (i = 0; i < dc->res_pool->pipe_count; ++i) { - struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe = safe_to_lower + ? &context->res_ctx.pipe_ctx[i] + : &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->top_pipe || pipe->prev_odm_pipe) continue; - if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL || - dc_is_virtual_signal(pipe->stream->signal))) { + if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || + !pipe->stream->link_enc)) { if (disable) { - pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); + if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc) + pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); + reset_sync_context_for_pipe(dc, context, i); } else pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); @@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, } if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { - dcn316_disable_otg_wa(clk_mgr_base, context, true); + dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); - dcn316_disable_otg_wa(clk_mgr_base, context, false); + dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false); update_dispclk = true; } @@ -239,7 +244,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static void dcn316_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, @@ -480,7 +485,8 @@ static void dcn316_clk_mgr_helper_populate_bw_params( j = -1; - ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL); + static_assert(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL, + "number of reported pstate levels exceeds maximum"); /* Find lowest DPM, FCLK is filled in reverse order*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c index 3ed19197a755..8b82092b91cd 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c @@ -189,10 +189,6 @@ int dcn316_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request VBIOSSMC_MSG_SetHardMinDcfclkByFreq, khz_to_mhz_ceil(requested_dcfclk_khz)); -#ifdef DBG - smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -#endif - return actual_dcfclk_set_mhz * 1000; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index a496930b1f9c..ff5fdc7b1198 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -25,23 +25,22 @@ #include "dccg.h" #include "clk_mgr_internal.h" - #include "dcn32/dcn32_clk_mgr_smu_msg.h" #include "dcn20/dcn20_clk_mgr.h" #include "dce100/dce_clk_mgr.h" #include "dcn31/dcn31_clk_mgr.h" +#include "dcn32/dcn32_clk_mgr.h" #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" #include "link.h" - +#include "dc_state_priv.h" #include "atomfirmware.h" #include "smu13_driver_if.h" #include "dcn/dcn_3_2_0_offset.h" #include "dcn/dcn_3_2_0_sh_mask.h" -#include "dcn32/dcn32_clk_mgr.h" #include "dml/dcn32/dcn32_fpu.h" #define DCN_BASE__INST0_SEG1 0x000000C0 @@ -217,6 +216,16 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950) clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950; + /* DPPCLK */ + dcn32_init_single_clock(clk_mgr, PPCLK_DPPCLK, + &clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz, + &num_entries_per_clk->num_dppclk_levels); + num_levels = num_entries_per_clk->num_dppclk_levels; + clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DPPCLK); + //HW recommends limit of 1950 MHz in display clock for all DCN3.2.x + if (clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz > 1950) + clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = 1950; + if (num_entries_per_clk->num_dcfclk_levels && num_entries_per_clk->num_dtbclk_levels && num_entries_per_clk->num_dispclk_levels) @@ -241,13 +250,15 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) = khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz); } + for (i = 0; i < num_levels; i++) + if (clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz > 1950) + clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz = 1950; + /* Get UCLK, update bounding box */ clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base); - DC_FP_START(); /* WM range table */ dcn32_build_wm_range_table(clk_mgr); - DC_FP_END(); } static void dcn32_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr, @@ -388,7 +399,15 @@ static void dcn32_update_clocks_update_dentist( uint32_t temp_dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / temp_disp_divider; if (clk_mgr->smu_present) - dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(temp_dispclk_khz)); + /* + * SMU uses discrete dispclk presets. We applied + * the same formula to increase our dppclk_khz + * to the next matching discrete value. By + * contract, we should use the preset dispclk + * floored in Mhz to describe the intended clock. + */ + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, + khz_to_mhz_floor(temp_dispclk_khz)); if (dc->debug.override_dispclk_programming) { REG_GET(DENTIST_DISPCLK_CNTL, @@ -427,7 +446,15 @@ static void dcn32_update_clocks_update_dentist( /* do requested DISPCLK updates*/ if (clk_mgr->smu_present) - dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr->base.clks.dispclk_khz)); + /* + * SMU uses discrete dispclk presets. We applied + * the same formula to increase our dppclk_khz + * to the next matching discrete value. By + * contract, we should use the preset dispclk + * floored in Mhz to describe the intended clock. + */ + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, + khz_to_mhz_floor(clk_mgr->base.clks.dispclk_khz)); if (dc->debug.override_dispclk_programming) { REG_GET(DENTIST_DISPCLK_CNTL, @@ -458,20 +485,58 @@ static int dcn32_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base) return 0; } -static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr_internal *clk_mgr) +static bool dcn32_check_native_scaling(struct pipe_ctx *pipe) +{ + bool is_native_scaling = false; + int width = pipe->plane_state->src_rect.width; + int height = pipe->plane_state->src_rect.height; + + if (pipe->stream->timing.h_addressable == width && + pipe->stream->timing.v_addressable == height && + pipe->plane_state->dst_rect.width == width && + pipe->plane_state->dst_rect.height == height) + is_native_scaling = true; + + return is_native_scaling; +} + +static void dcn32_auto_dpm_test_log( + struct dc_clocks *new_clocks, + struct clk_mgr_internal *clk_mgr, + struct dc_state *context) { - unsigned int dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK - unsigned int dppclk_khz_reg = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK - unsigned int dprefclk_khz_reg = REG_READ(CLK1_CLK2_CURRENT_CNT); // DPREFCLK - unsigned int dcfclk_khz_reg = REG_READ(CLK1_CLK3_CURRENT_CNT); // DCFCLK - unsigned int dtbclk_khz_reg = REG_READ(CLK1_CLK4_CURRENT_CNT); // DTBCLK - unsigned int fclk_khz_reg = REG_READ(CLK4_CLK0_CURRENT_CNT); // FCLK + unsigned int dispclk_khz_reg, dppclk_khz_reg, dprefclk_khz_reg, dcfclk_khz_reg, dtbclk_khz_reg, + fclk_khz_reg, mall_ss_size_bytes; + int dramclk_khz_override, fclk_khz_override, num_fclk_levels; + + struct pipe_ctx *pipe_ctx_list[MAX_PIPES]; + int active_pipe_count = 0; + + for (int i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) { + pipe_ctx_list[active_pipe_count] = pipe_ctx; + active_pipe_count++; + } + } + + msleep(5); + + mall_ss_size_bytes = context->bw_ctx.bw.dcn.mall_ss_size_bytes; + + dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK + dppclk_khz_reg = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK + dprefclk_khz_reg = REG_READ(CLK1_CLK2_CURRENT_CNT); // DPREFCLK + dcfclk_khz_reg = REG_READ(CLK1_CLK3_CURRENT_CNT); // DCFCLK + dtbclk_khz_reg = REG_READ(CLK1_CLK4_CURRENT_CNT); // DTBCLK + fclk_khz_reg = REG_READ(CLK4_CLK0_CURRENT_CNT); // FCLK // Overrides for these clocks in case there is no p_state change support - int dramclk_khz_override = new_clocks->dramclk_khz; - int fclk_khz_override = new_clocks->fclk_khz; + dramclk_khz_override = new_clocks->dramclk_khz; + fclk_khz_override = new_clocks->fclk_khz; - int num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1; + num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1; if (!new_clocks->p_state_change_support) { dramclk_khz_override = clk_mgr->base.bw_params->max_memclk_mhz * 1000; @@ -488,16 +553,49 @@ static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr // // AutoDPMTest: clk1:%d - clk2:%d - clk3:%d - clk4:%d\n" //////////////////////////////////////////////////////////////////////////// - if (new_clocks && + if (new_clocks && active_pipe_count > 0 && new_clocks->dramclk_khz > 0 && new_clocks->fclk_khz > 0 && new_clocks->dcfclk_khz > 0 && new_clocks->dppclk_khz > 0) { + uint32_t pix_clk_list[MAX_PIPES] = {0}; + int p_state_list[MAX_PIPES] = {0}; + int disp_src_width_list[MAX_PIPES] = {0}; + int disp_src_height_list[MAX_PIPES] = {0}; + uint64_t disp_src_refresh_list[MAX_PIPES] = {0}; + bool is_scaled_list[MAX_PIPES] = {0}; + + for (int i = 0; i < active_pipe_count; i++) { + struct pipe_ctx *curr_pipe_ctx = pipe_ctx_list[i]; + uint64_t refresh_rate; + + pix_clk_list[i] = curr_pipe_ctx->stream->timing.pix_clk_100hz; + p_state_list[i] = curr_pipe_ctx->p_state_type; + + refresh_rate = (curr_pipe_ctx->stream->timing.pix_clk_100hz * (uint64_t)100 + + curr_pipe_ctx->stream->timing.v_total * curr_pipe_ctx->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.h_total); + disp_src_refresh_list[i] = refresh_rate; + + if (curr_pipe_ctx->plane_state) { + is_scaled_list[i] = !(dcn32_check_native_scaling(curr_pipe_ctx)); + disp_src_width_list[i] = curr_pipe_ctx->plane_state->src_rect.width; + disp_src_height_list[i] = curr_pipe_ctx->plane_state->src_rect.height; + } + } + DC_LOG_AUTO_DPM_TEST("AutoDPMTest: dramclk:%d - fclk:%d - " "dcfclk:%d - dppclk:%d - dispclk_hw:%d - " "dppclk_hw:%d - dprefclk_hw:%d - dcfclk_hw:%d - " - "dtbclk_hw:%d - fclk_hw:%d\n", + "dtbclk_hw:%d - fclk_hw:%d - pix_clk_0:%d - pix_clk_1:%d - " + "pix_clk_2:%d - pix_clk_3:%d - mall_ss_size:%d - p_state_type_0:%d - " + "p_state_type_1:%d - p_state_type_2:%d - p_state_type_3:%d - " + "pix_width_0:%d - pix_height_0:%d - refresh_rate_0:%lld - is_scaled_0:%d - " + "pix_width_1:%d - pix_height_1:%d - refresh_rate_1:%lld - is_scaled_1:%d - " + "pix_width_2:%d - pix_height_2:%d - refresh_rate_2:%lld - is_scaled_2:%d - " + "pix_width_3:%d - pix_height_3:%d - refresh_rate_3:%lld - is_scaled_3:%d - LOG_END\n", dramclk_khz_override, fclk_khz_override, new_clocks->dcfclk_khz, @@ -507,7 +605,14 @@ static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr dprefclk_khz_reg, dcfclk_khz_reg, dtbclk_khz_reg, - fclk_khz_reg); + fclk_khz_reg, + pix_clk_list[0], pix_clk_list[1], pix_clk_list[3], pix_clk_list[2], + mall_ss_size_bytes, + p_state_list[0], p_state_list[1], p_state_list[2], p_state_list[3], + disp_src_width_list[0], disp_src_height_list[0], disp_src_refresh_list[0], is_scaled_list[0], + disp_src_width_list[1], disp_src_height_list[1], disp_src_refresh_list[1], is_scaled_list[1], + disp_src_width_list[2], disp_src_height_list[2], disp_src_refresh_list[2], is_scaled_list[2], + disp_src_width_list[3], disp_src_height_list[3], disp_src_refresh_list[3], is_scaled_list[3]); } } @@ -607,8 +712,12 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, * since we calculate mode support based on softmax being the max UCLK * frequency. */ - dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, - dc->clk_mgr->bw_params->dc_mode_softmax_memclk); + if (dc->debug.disable_dc_mode_overwrite) { + dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz); + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz); + } else + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, + dc->clk_mgr->bw_params->dc_mode_softmax_memclk); } else { dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz); } @@ -641,8 +750,13 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, /* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */ if (clk_mgr_base->clks.p_state_change_support && (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) && - !dc->work_arounds.clock_update_disable_mask.uclk) + !dc->work_arounds.clock_update_disable_mask.uclk) { + if (dc->clk_mgr->dc_mode_softmax_enabled && dc->debug.disable_dc_mode_overwrite) + dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, + max((int)dc->clk_mgr->bw_params->dc_mode_softmax_memclk, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz))); + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz)); + } if (clk_mgr_base->clks.num_ways != new_clocks->num_ways && clk_mgr_base->clks.num_ways > new_clocks->num_ways) { @@ -659,7 +773,15 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz; if (clk_mgr->smu_present && !dpp_clock_lowered) - dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz)); + /* + * SMU uses discrete dppclk presets. We applied + * the same formula to increase our dppclk_khz + * to the next matching discrete value. By + * contract, we should use the preset dppclk + * floored in Mhz to describe the intended clock. + */ + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, + khz_to_mhz_floor(clk_mgr_base->clks.dppclk_khz)); update_dppclk = true; } @@ -680,6 +802,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, /* DCCG requires KHz precision for DTBCLK */ clk_mgr_base->clks.ref_dtbclk_khz = dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz)); + dcn32_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz); } @@ -689,7 +812,15 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, dcn32_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); dcn32_update_clocks_update_dentist(clk_mgr, context); if (clk_mgr->smu_present) - dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz)); + /* + * SMU uses discrete dppclk presets. We applied + * the same formula to increase our dppclk_khz + * to the next matching discrete value. By + * contract, we should use the preset dppclk + * floored in Mhz to describe the intended clock. + */ + dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, + khz_to_mhz_floor(clk_mgr_base->clks.dppclk_khz)); } else { /* if clock is being raised, increase refclk before lowering DTO */ if (update_dppclk || update_dispclk) @@ -708,7 +839,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, clk_mgr_base->clks.dispclk_khz / 1000 / 7); if (dc->config.enable_auto_dpm_test_logs) { - dcn32_auto_dpm_test_log(new_clocks, clk_mgr); + dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context); } } @@ -1077,11 +1208,19 @@ void dcn32_clk_mgr_construct( clk_mgr->smu_present = false; clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL); + if (!clk_mgr->base.bw_params) { + BREAK_TO_DEBUGGER(); + return; + } /* need physical address of table to give to PMFW */ clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t), &clk_mgr->wm_range_table_addr); + if (!clk_mgr->wm_range_table) { + BREAK_TO_DEBUGGER(); + return; + } } void dcn32_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c index df244b175fdb..f2f60478b1a6 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c @@ -49,6 +49,7 @@ */ static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries) { + const uint32_t initial_max_retries = max_retries; uint32_t reg = 0; do { @@ -62,6 +63,8 @@ static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un udelay(delay_us); } while (max_retries--); + TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx); + return reg; } @@ -79,6 +82,8 @@ static bool dcn32_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint /* Trigger the message transaction by writing the message ID */ REG_WRITE(DAL_MSG_REG, msg_id); + TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx); + /* Wait for response */ if (dcn32_smu_wait_for_response(clk_mgr, 10, 200000) == DALSMC_Result_OK) { if (param_out) @@ -115,6 +120,8 @@ static uint32_t dcn32_smu_wait_for_response_delay(struct clk_mgr_internal *clk_m *total_delay_us += delay_us; } while (max_retries--); + TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx); + return reg; } @@ -135,6 +142,8 @@ static bool dcn32_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mgr /* Trigger the message transaction by writing the message ID */ REG_WRITE(DAL_MSG_REG, msg_id); + TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx); + /* Wait for response */ if (dcn32_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay2_us) == DALSMC_Result_OK) { if (param_out) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h index a34c258c19dc..5c44ab0e8667 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h @@ -36,12 +36,10 @@ #define DALSMC_MSG_SetCabForUclkPstate 0x12 #define DALSMC_Result_OK 0x1 -void -dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable); -void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); -void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr); +void dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable); void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways); void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); +void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr); unsigned int dcn32_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz); void dcn32_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index f80917f6153b..6c9b4e6491a5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -50,6 +50,7 @@ #include "dc_dmub_srv.h" #include "link.h" #include "logger_types.h" + #undef DC_LOGGER #define DC_LOGGER \ clk_mgr->base.base.ctx->logger @@ -72,6 +73,14 @@ #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L +#define regCLK5_0_CLK5_spll_field_8 0x464b +#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0 + +#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd +#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L + +#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0 + #define REG(reg_name) \ (ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) @@ -80,12 +89,12 @@ static int dcn35_get_active_display_cnt_wa( struct dc *dc, - struct dc_state *context) + struct dc_state *context, + int *all_active_disps) { - int i, display_count; + int i, display_count = 0; bool tmds_present = false; - display_count = 0; for (i = 0; i < context->stream_count; i++) { const struct dc_stream_state *stream = context->streams[i]; @@ -103,7 +112,8 @@ static int dcn35_get_active_display_cnt_wa( link->link_enc->funcs->is_dig_enabled(link->link_enc)) display_count++; } - + if (all_active_disps != NULL) + *all_active_disps = display_count; /* WA for hang on HDMI after display off back on*/ if (display_count == 0 && tmds_present) display_count = 1; @@ -111,32 +121,28 @@ static int dcn35_get_active_display_cnt_wa( return display_count; } -static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable) +static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, + bool safe_to_lower, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; int i; for (i = 0; i < dc->res_pool->pipe_count; ++i) { - struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe = safe_to_lower + ? &context->res_ctx.pipe_ctx[i] + : &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->top_pipe || pipe->prev_odm_pipe) continue; - if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) { - struct stream_encoder *stream_enc = pipe->stream_res.stream_enc; - + if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || + !pipe->stream->link_enc)) { if (disable) { - if (stream_enc && stream_enc->funcs->disable_fifo) - pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc); - if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc) pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); reset_sync_context_for_pipe(dc, context, i); } else { pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); - - if (stream_enc && stream_enc->funcs->enable_fifo) - pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc); } } } @@ -220,14 +226,19 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; - int display_count; + int display_count = 0; bool update_dppclk = false; bool update_dispclk = false; bool dpp_clock_lowered = false; + int all_active_disps = 0; if (dc->work_arounds.skip_clock_update) return; + display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps); + if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz) + new_clocks->ref_dtbclk_khz = 600000; + /* * if it is safe to lower, but we are already in the lower state, we don't have to do anything * also if safe to lower is false, we just go in the higher state @@ -241,12 +252,12 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, } if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) { - dcn35_smu_set_dtbclk(clk_mgr, false); + if (clk_mgr->base.ctx->dc->config.allow_0_dtb_clk) + dcn35_smu_set_dtbclk(clk_mgr, false); clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; } /* check that we're not already in lower */ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { - display_count = dcn35_get_active_display_cnt_wa(dc, context); /* if we can go lower, go lower */ if (display_count == 0) clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; @@ -261,8 +272,10 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) { dcn35_smu_set_dtbclk(clk_mgr, true); - dcn35_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz); clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; + + dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz); + clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz; } /* check that we're not already in D0 */ @@ -301,26 +314,21 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, } if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { - dcn35_disable_otg_wa(clk_mgr_base, context, true); + dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; dcn35_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); - dcn35_disable_otg_wa(clk_mgr_base, context, false); + dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false); update_dispclk = true; } - if (!new_clocks->dtbclk_en) { - new_clocks->ref_dtbclk_khz = 600000; - } - /* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */ if (!dc->debug.disable_dtb_ref_clk_switch && - should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000, clk_mgr_base->clks.ref_dtbclk_khz / 1000)) { - /* DCCG requires KHz precision for DTBCLK */ - dcn35_smu_set_dtbclk(clk_mgr, true); - - dcn35_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz); + should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000, + clk_mgr_base->clks.ref_dtbclk_khz / 1000)) { + dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz); + clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz; } if (dpp_clock_lowered) { @@ -344,7 +352,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) @@ -385,19 +393,6 @@ static void dcn35_enable_pme_wa(struct clk_mgr *clk_mgr_base) dcn35_smu_enable_pme_wa(clk_mgr); } -void dcn35_init_clocks(struct clk_mgr *clk_mgr) -{ - uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; - - memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); - - // Assumption is that boot state always supports pstate - clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk - clk_mgr->clks.p_state_change_support = true; - clk_mgr->clks.prev_p_state_change_support = true; - clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; - clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; -} bool dcn35_are_clock_states_equal(struct dc_clocks *a, struct dc_clocks *b) @@ -419,11 +414,49 @@ bool dcn35_are_clock_states_equal(struct dc_clocks *a, } static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, - struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) + struct clk_mgr_dcn35 *clk_mgr) { +} + +static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dc_context *ctx = clk_mgr->base.ctx; + uint32_t ssc_enable; + + REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable); + + return ssc_enable == 1; +} +static void init_clk_states(struct clk_mgr *clk_mgr) +{ + struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); + uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; + memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + + if (clk_mgr_int->smu_ver >= SMU_VER_THRESHOLD) + clk_mgr->clks.dtbclk_en = true; // request DTBCLK disable on first commit + clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk + clk_mgr->clks.p_state_change_support = true; + clk_mgr->clks.prev_p_state_change_support = true; + clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; + clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; } +void dcn35_init_clocks(struct clk_mgr *clk_mgr) +{ + struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); + init_clk_states(clk_mgr); + + // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk + if (dcn35_is_spll_ssc_enabled(clk_mgr)) + clk_mgr->dp_dto_source_clock_in_khz = + dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz); + else + clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz; + +} static struct clk_bw_params dcn35_bw_params = { .vram_type = Ddr4MemType, .num_channels = 1, @@ -439,32 +472,32 @@ static struct wm_table ddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 9, - .sr_enter_plus_exit_time_us = 11, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 9, - .sr_enter_plus_exit_time_us = 11, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 9, - .sr_enter_plus_exit_time_us = 11, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 9, - .sr_enter_plus_exit_time_us = 11, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, } @@ -476,32 +509,32 @@ static struct wm_table lpddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 11.5, - .sr_enter_plus_exit_time_us = 14.5, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 11.5, - .sr_enter_plus_exit_time_us = 14.5, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 11.5, - .sr_enter_plus_exit_time_us = 14.5, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 11.5, - .sr_enter_plus_exit_time_us = 14.5, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, } @@ -516,6 +549,23 @@ static struct dcn35_ss_info_table ss_info_table = { .ss_percentage = {0, 0, 375, 375, 375} }; +static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr) +{ + struct dc_context *ctx = clk_mgr->base.ctx; + uint32_t clock_source; + + REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source); + // If it's DFS mode, clock_source is 0. + if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) { + clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source]; + + if (clk_mgr->dprefclk_ss_percentage != 0) { + clk_mgr->ss_on_dprefclk = true; + clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider; + } + } +} + static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table) { int i, num_valid_sets; @@ -649,27 +699,50 @@ static unsigned int convert_wck_ratio(uint8_t wck_ratio) return 1; } +static inline uint32_t calc_dram_speed_mts(const MemPstateTable_t *entry) +{ + return entry->UClk * convert_wck_ratio(entry->WckRatio) * 2; +} + static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr, struct integrated_info *bios_info, DpmClocks_t_dcn35 *clock_table) { struct clk_bw_params *bw_params = clk_mgr->base.bw_params; struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1]; - uint32_t max_pstate = 0, max_uclk = 0, max_fclk = 0; - uint32_t min_pstate = 0, max_dispclk = 0, max_dppclk = 0; + uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0; + uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0; + uint32_t num_memps, num_fclk, num_dcfclk; int i; - for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) { - if (is_valid_clock_value(clock_table->MemPstateTable[i].UClk) && - clock_table->MemPstateTable[i].UClk > max_uclk) { - max_uclk = clock_table->MemPstateTable[i].UClk; + /* Determine min/max p-state values. */ + num_memps = (clock_table->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS) ? NUM_MEM_PSTATE_LEVELS : + clock_table->NumMemPstatesEnabled; + for (i = 0; i < num_memps; i++) { + uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]); + + if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) { + max_dram_speed_mts = dram_speed_mts; max_pstate = i; } } - /* We expect the table to contain at least one valid Uclk entry. */ - ASSERT(is_valid_clock_value(max_uclk)); + min_dram_speed_mts = max_dram_speed_mts; + min_pstate = max_pstate; + + for (i = 0; i < num_memps; i++) { + uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]); + + if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) { + min_dram_speed_mts = dram_speed_mts; + min_pstate = i; + } + } + /* We expect the table to contain at least one valid P-state entry. */ + ASSERT(clock_table->NumMemPstatesEnabled && + is_valid_clock_value(max_dram_speed_mts) && + is_valid_clock_value(min_dram_speed_mts)); /* dispclk and dppclk can be max at any voltage, same number of levels for both */ if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS && @@ -679,47 +752,50 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled); } else { + /* Invalid number of entries in the table from PMFW. */ ASSERT(0); } - if (clock_table->NumFclkLevelsEnabled <= NUM_FCLK_DPM_LEVELS) - max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, - clock_table->NumFclkLevelsEnabled); - for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) { - uint32_t min_uclk = clock_table->MemPstateTable[0].UClk; - int j; + /* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */ + ASSERT(clock_table->NumDcfClkLevelsEnabled > 0); - for (j = 1; j < clock_table->NumMemPstatesEnabled; j++) { - if (is_valid_clock_value(clock_table->MemPstateTable[j].UClk) && - clock_table->MemPstateTable[j].UClk < min_uclk && - clock_table->MemPstateTable[j].Voltage <= clock_table->SocVoltage[i]) { - min_uclk = clock_table->MemPstateTable[j].UClk; - min_pstate = j; - } - } + num_fclk = (clock_table->NumFclkLevelsEnabled > NUM_FCLK_DPM_LEVELS) ? NUM_FCLK_DPM_LEVELS : + clock_table->NumFclkLevelsEnabled; + max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk); + num_dcfclk = (clock_table->NumDcfClkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS : + clock_table->NumDcfClkLevelsEnabled; + for (i = 0; i < num_dcfclk; i++) { + int j; + + /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */ for (j = bw_params->clk_table.num_entries - 1; j > 0; j--) if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i]) - break; + break; bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz; bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz; bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz; - bw_params->clk_table.entries[i].fclk_mhz = max_fclk; + + /* Now update clocks we do read */ bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[min_pstate].MemClk; bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[min_pstate].Voltage; bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i]; bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i]; bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk; bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk; - bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio( - clock_table->MemPstateTable[min_pstate].WckRatio); - } + bw_params->clk_table.entries[i].wck_ratio = + convert_wck_ratio(clock_table->MemPstateTable[min_pstate].WckRatio); + + /* Dcfclk and Fclk are tied, but at a different ratio */ + bw_params->clk_table.entries[i].fclk_mhz = min(max_fclk, 2 * clock_table->DcfClocks[i]); + } /* Make sure to include at least one entry at highest pstate */ if (max_pstate != min_pstate || i == 0) { if (i > MAX_NUM_DPM_LVL - 1) i = MAX_NUM_DPM_LVL - 1; + bw_params->clk_table.entries[i].fclk_mhz = max_fclk; bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[max_pstate].MemClk; bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[max_pstate].Voltage; @@ -735,6 +811,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk } bw_params->clk_table.num_entries = i--; + /* Make sure all highest clocks are included*/ bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS); bw_params->clk_table.entries[i].dispclk_mhz = @@ -753,6 +830,11 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk bw_params->clk_table.num_entries_per_clk.num_fclk_levels = clock_table->NumFclkLevelsEnabled; bw_params->clk_table.num_entries_per_clk.num_memclk_levels = clock_table->NumMemPstatesEnabled; bw_params->clk_table.num_entries_per_clk.num_socclk_levels = clock_table->NumSocClkLevelsEnabled; + + /* + * Set any 0 clocks to max default setting. Not an issue for + * power since we aren't doing switching in such case anyway + */ for (i = 0; i < bw_params->clk_table.num_entries; i++) { if (!bw_params->clk_table.entries[i].fclk_mhz) { bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz; @@ -801,41 +883,13 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base) struct dc_state *context = dc->current_state; if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { - display_count = dcn35_get_active_display_cnt_wa(dc, context); + display_count = dcn35_get_active_display_cnt_wa(dc, context, NULL); /* if we can go lower, go lower */ if (display_count == 0) clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; } } -static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - struct dc *dc = clk_mgr_base->ctx->dc; - uint32_t val = dcn35_smu_read_ips_scratch(clk_mgr); - - if (dc->config.disable_ips == 0) { - val |= DMUB_IPS1_ALLOW_MASK; - val |= DMUB_IPS2_ALLOW_MASK; - } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { - val = val & ~DMUB_IPS1_ALLOW_MASK; - val = val & ~DMUB_IPS2_ALLOW_MASK; - } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { - val |= DMUB_IPS1_ALLOW_MASK; - val = val & ~DMUB_IPS2_ALLOW_MASK; - } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { - val |= DMUB_IPS1_ALLOW_MASK; - val |= DMUB_IPS2_ALLOW_MASK; - } - - if (!allow_idle) { - val = val & ~DMUB_IPS1_ALLOW_MASK; - val = val & ~DMUB_IPS2_ALLOW_MASK; - } - - dcn35_smu_write_ips_scratch(clk_mgr, val); -} - static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); @@ -855,16 +909,9 @@ static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base) return ips_supported; } -static uint32_t dcn35_get_idle_state(struct clk_mgr *clk_mgr_base) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - - return dcn35_smu_read_ips_scratch(clk_mgr); -} - static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr) { - dcn35_init_clocks(clk_mgr); + init_clk_states(clk_mgr); /* TODO: Implement the functions and remove the ifndef guard */ } @@ -949,8 +996,6 @@ static struct clk_mgr_funcs dcn35_funcs = { .set_low_power_state = dcn35_set_low_power_state, .exit_low_power_state = dcn35_exit_low_power_state, .is_ips_supported = dcn35_is_ips_supported, - .set_idle_state = dcn35_set_idle_state, - .get_idle_state = dcn35_get_idle_state }; struct clk_mgr_funcs dcn35_fpga_funcs = { @@ -960,21 +1005,6 @@ struct clk_mgr_funcs dcn35_fpga_funcs = { .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, }; -static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr) -{ - uint32_t clock_source; - struct dc_context *ctx = clk_mgr->base.ctx; - - REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source); - - clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source]; - - if (clk_mgr->dprefclk_ss_percentage != 0) { - clk_mgr->ss_on_dprefclk = true; - clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider; - } -} - void dcn35_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_dcn35 *clk_mgr, @@ -982,7 +1012,6 @@ void dcn35_clk_mgr_construct( struct dccg *dccg) { struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 }; - struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &dcn35_funcs; @@ -1035,15 +1064,11 @@ void dcn35_clk_mgr_construct( dcn35_bw_params.wm_table = ddr5_wm_table; } /* Saved clocks configured at boot for debug purposes */ - dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); + dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr); clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base); - clk_mgr->base.base.clks.ref_dtbclk_khz = dcn35_smu_get_dtbclk(&clk_mgr->base); - - if (!clk_mgr->base.base.clks.ref_dtbclk_khz) - dcn35_smu_set_dtbclk(&clk_mgr->base, true); + clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; - clk_mgr->base.base.clks.dtbclk_en = true; dce_clock_read_ss_info(&clk_mgr->base); /*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/ @@ -1114,7 +1139,7 @@ void dcn35_clk_mgr_construct( dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, smu_dpm_clks.dpm_clks); - if (ctx->dc->config.disable_ips == 0) { + if (ctx->dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) { bool ips_support = false; /*avoid call pmfw at init*/ @@ -1124,10 +1149,9 @@ void dcn35_clk_mgr_construct( ctx->dc->debug.disable_dpp_power_gate = false; ctx->dc->debug.disable_hubp_power_gate = false; ctx->dc->debug.disable_dsc_power_gate = false; - ctx->dc->debug.disable_hpo_power_gate = false; } else { /*let's reset the config control flag*/ - ctx->dc->config.disable_ips = 1; /*pmfw not support it, disable it all*/ + ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/ } } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c index b6b8c3ca1572..1399b41dfd1c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c @@ -116,6 +116,9 @@ static uint32_t dcn35_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un msleep(delay_us/1000); else if (delay_us > 0) udelay(delay_us); + + if (clk_mgr->base.ctx->dc->debug.disable_timeout) + max_retries++; } while (max_retries--); return res_val; @@ -276,7 +279,7 @@ void dcn35_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, u clk_mgr, VBIOSSMC_MSG_SetDisplayIdleOptimizations, idle_info); - smu_print("VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info = %d\n", idle_info); + smu_print("%s: VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info = %x\n", __func__, idle_info); } void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable) @@ -295,7 +298,7 @@ void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool e clk_mgr, VBIOSSMC_MSG_SetDisplayIdleOptimizations, idle_info.data); - smu_print("dcn35_smu_enable_phy_refclk_pwrdwn = %d\n", enable ? 1 : 0); + smu_print("%s smu_enable_phy_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0); } void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr) @@ -307,6 +310,7 @@ void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr) clk_mgr, VBIOSSMC_MSG_UpdatePmeRestore, 0); + smu_print("%s: SMC_MSG_UpdatePmeRestore\n", __func__); } void dcn35_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high) @@ -347,7 +351,7 @@ void dcn35_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr) void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support) { - unsigned int msg_id, param; + unsigned int msg_id, param, retv; if (!clk_mgr->smu_present) return; @@ -357,27 +361,32 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst case DCN_ZSTATE_SUPPORT_ALLOW: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 10) | (1 << 9) | (1 << 8); + smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = 0x%x\n", __func__, param); break; case DCN_ZSTATE_SUPPORT_DISALLOW: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = 0; + smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = 0x%x\n", __func__, param); break; case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 10); + smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = 0x%x\n", __func__, param); break; case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 10) | (1 << 8); + smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = 0x%x\n", __func__, param); break; case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 8); + smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = 0x%x\n", __func__, param); break; default: //DCN_ZSTATE_SUPPORT_UNKNOWN @@ -387,11 +396,11 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst } - dcn35_smu_send_msg_with_param( + retv = dcn35_smu_send_msg_with_param( clk_mgr, msg_id, param); - smu_print("dcn35_smu_set_zstate_support msg_id = %d, param = %d\n", msg_id, param); + smu_print("%s: msg_id = %d, param = 0x%x, return = 0x%x\n", __func__, msg_id, param, retv); } int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr) @@ -405,7 +414,7 @@ int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr) VBIOSSMC_MSG_GetDprefclkFreq, 0); - smu_print("dcn35_smu_get_DPREF clk = %d mhz\n", dprefclk); + smu_print("%s: SMU DPREF clk = %d mhz\n", __func__, dprefclk); return dprefclk * 1000; } @@ -420,7 +429,7 @@ int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr) VBIOSSMC_MSG_GetDtbclkFreq, 0); - smu_print("dcn35_smu_get_dtbclk = %d mhz\n", dtbclk); + smu_print("%s: get_dtbclk = %dmhz\n", __func__, dtbclk); return dtbclk * 1000; } /* Arg = 1: Turn DTB on; 0: Turn DTB CLK OFF. when it is on, it is 600MHZ */ @@ -433,39 +442,48 @@ void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable) clk_mgr, VBIOSSMC_MSG_SetDtbClk, enable); - smu_print("dcn35_smu_set_dtbclk = %d \n", enable ? 1 : 0); + smu_print("%s: smu_set_dtbclk = %d\n", __func__, enable ? 1 : 0); } void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable) { + if (!clk_mgr->smu_present) + return; + dcn35_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown, enable); + smu_print("%s: smu_enable_48mhz_tmdp_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0); } int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr) { - return dcn35_smu_send_msg_with_param( + int retv; + + if (!clk_mgr->smu_present) + return 0; + + retv = dcn35_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_DispPsrExit, 0); + smu_print("%s: smu_exit_low_power_state return = %d\n", __func__, retv); + return retv; } int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr) { - return dcn35_smu_send_msg_with_param( + int retv; + + if (!clk_mgr->smu_present) + return 0; + + retv = dcn35_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_QueryIPS2Support, 0); -} -void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param) -{ - REG_WRITE(MP1_SMN_C2PMSG_71, param); -} - -uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr) -{ - return REG_READ(MP1_SMN_C2PMSG_71); + //smu_print("%s: VBIOSSMC_MSG_QueryIPS2Support return = %x\n", __func__, retv); + return retv; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h index 2b8e6959a03d..06cd3cc6d36e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h @@ -198,6 +198,4 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr); int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr); int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr); int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr); -void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param); -uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr); #endif /* DAL_DC_35_SMU_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 74c21d98b4de..236876d95185 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -34,6 +34,9 @@ #include "dce/dce_hwseq.h" #include "resource.h" +#include "dc_state.h" +#include "dc_state_priv.h" +#include "dc_plane_priv.h" #include "gpio_service_interface.h" #include "clk_mgr.h" @@ -210,7 +213,8 @@ static bool create_links( connectors_num, num_virtual_links); - for (i = 0; i < connectors_num; i++) { + // condition loop on link_count to allow skipping invalid indices + for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) { struct link_init_data link_init_params = {0}; struct dc_link *link; @@ -384,6 +388,30 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) *perf_trace = NULL; } +static bool set_long_vtotal(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust) +{ + if (!dc || !stream || !adjust) + return false; + + if (!dc->current_state) + return false; + + int i; + + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->stream == stream && pipe->stream_res.tg) { + if (dc->hwss.set_long_vtotal) + dc->hwss.set_long_vtotal(&pipe, 1, adjust->v_total_min, adjust->v_total_max); + + return true; + } + } + + return false; +} + /** * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR * @dc: dc reference @@ -412,10 +440,21 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, if (dc->optimized_required || dc->wm_optimized_required) return false; + dc_exit_ips_for_hw_access(dc); + stream->adjust.v_total_max = adjust->v_total_max; stream->adjust.v_total_mid = adjust->v_total_mid; stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; stream->adjust.v_total_min = adjust->v_total_min; + stream->adjust.allow_otg_v_count_halt = adjust->allow_otg_v_count_halt; + + if (dc->caps.max_v_total != 0 && + (adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) { + if (adjust->allow_otg_v_count_halt) + return set_long_vtotal(dc, stream, adjust); + else + return false; + } for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -452,6 +491,8 @@ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, int i = 0; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -482,6 +523,8 @@ bool dc_stream_get_crtc_position(struct dc *dc, bool ret = false; struct crtc_position position; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -519,7 +562,7 @@ dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv, cmd.secure_display.roi_info.y_end = rect->y + rect->height; } - dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); + dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); } static inline void @@ -601,6 +644,8 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, if (pipe == NULL) return false; + dc_exit_ips_for_hw_access(dc); + /* By default, capture the full frame */ param.windowa_x_start = 0; param.windowa_y_start = 0; @@ -660,6 +705,8 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, struct pipe_ctx *pipe; struct timing_generator *tg; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream) @@ -684,6 +731,8 @@ void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, int i; struct pipe_ctx *pipe_ctx; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { @@ -719,6 +768,8 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream, if (option > DITHER_OPTION_MAX) return; + dc_exit_ips_for_hw_access(stream->ctx->dc); + stream->dither_option = option; memset(¶ms, 0, sizeof(params)); @@ -743,6 +794,8 @@ bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stre bool ret = false; struct pipe_ctx *pipes; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { pipes = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -760,6 +813,8 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) bool ret = false; struct pipe_ctx *pipes; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { @@ -786,6 +841,8 @@ void dc_stream_set_static_screen_params(struct dc *dc, struct pipe_ctx *pipes_affected[MAX_PIPES]; int num_pipes_affected = 0; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < num_streams; i++) { struct dc_stream_state *stream = streams[i]; @@ -808,7 +865,7 @@ static void dc_destruct(struct dc *dc) link_enc_cfg_init(dc, dc->current_state); if (dc->current_state) { - dc_release_state(dc->current_state); + dc_state_release(dc->current_state); dc->current_state = NULL; } @@ -1020,18 +1077,6 @@ static bool dc_construct(struct dc *dc, } #endif - /* Creation of current_state must occur after dc->dml - * is initialized in dc_create_resource_pool because - * on creation it copies the contents of dc->dml - */ - - dc->current_state = dc_create_state(dc); - - if (!dc->current_state) { - dm_error("%s: failed to create validate ctx\n", __func__); - goto fail; - } - if (!create_links(dc, init_params->num_virtual_links)) goto fail; @@ -1041,7 +1086,16 @@ static bool dc_construct(struct dc *dc, if (!create_link_encoders(dc)) goto fail; - dc_resource_state_construct(dc, dc->current_state); + /* Creation of current_state must occur after dc->dml + * is initialized in dc_create_resource_pool because + * on creation it copies the contents of dc->dml + */ + dc->current_state = dc_state_create(dc, NULL); + + if (!dc->current_state) { + dm_error("%s: failed to create validate ctx\n", __func__); + goto fail; + } return true; @@ -1085,7 +1139,7 @@ static void apply_ctx_interdependent_lock(struct dc *dc, } } -static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) +static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) { if (dc->ctx->dce_version >= DCN_VERSION_1_0) { memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color)); @@ -1105,9 +1159,9 @@ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *conte if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) - get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); + get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) - get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); + get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); } } } @@ -1115,7 +1169,7 @@ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *conte static void disable_dangling_plane(struct dc *dc, struct dc_state *context) { int i, j; - struct dc_state *dangling_context = dc_create_state(dc); + struct dc_state *dangling_context = dc_state_create_current_copy(dc); struct dc_state *current_ctx; struct pipe_ctx *pipe; struct timing_generator *tg; @@ -1123,8 +1177,6 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) if (dangling_context == NULL) return; - dc_resource_state_copy_construct(dc->current_state, dangling_context); - for (i = 0; i < dc->res_pool->pipe_count; i++) { struct dc_stream_state *old_stream = dc->current_state->res_ctx.pipe_ctx[i].stream; @@ -1161,6 +1213,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) } if (should_disable && old_stream) { + bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM; pipe = &dc->current_state->res_ctx.pipe_ctx[i]; tg = pipe->stream_res.tg; /* When disabling plane for a phantom pipe, we must turn on the @@ -1169,22 +1222,29 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) * state that can result in underflow or hang when enabling it * again for different use. */ - if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (is_phantom) { if (tg->funcs->enable_crtc) { int main_pipe_width, main_pipe_height; + struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream); - main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width; - main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height; + main_pipe_width = old_paired_stream->dst.width; + main_pipe_height = old_paired_stream->dst.height; if (dc->hwss.blank_phantom) dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height); tg->funcs->enable_crtc(tg); } } - dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); + + if (is_phantom) + dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true); + else + dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context); disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); - if (pipe->stream && pipe->plane_state) - dc_update_viusal_confirm_color(dc, context, pipe); + if (pipe->stream && pipe->plane_state) { + set_p_state_switch_method(dc, context, pipe); + dc_update_visual_confirm_color(dc, context, pipe); + } if (dc->hwss.apply_ctx_for_surface) { apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); @@ -1203,7 +1263,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) * The OTG is set to disable on falling edge of VUPDATE so the plane disable * will still get it's double buffer update. */ - if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (is_phantom) { if (tg->funcs->disable_phantom_crtc) tg->funcs->disable_phantom_crtc(tg); } @@ -1212,7 +1272,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) current_ctx = dc->current_state; dc->current_state = dangling_context; - dc_release_state(current_ctx); + dc_state_release(current_ctx); } static void disable_vbios_mode_if_required( @@ -1246,7 +1306,7 @@ static void disable_vbios_mode_if_required( if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { unsigned int enc_inst, tg_inst = 0; - unsigned int pix_clk_100hz; + unsigned int pix_clk_100hz = 0; enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); if (enc_inst != ENGINE_ID_UNKNOWN) { @@ -1276,6 +1336,54 @@ static void disable_vbios_mode_if_required( } } +/** + * wait_for_blank_complete - wait for all active OPPs to finish pending blank + * pattern updates + * + * @dc: [in] dc reference + * @context: [in] hardware context in use + */ +static void wait_for_blank_complete(struct dc *dc, + struct dc_state *context) +{ + struct pipe_ctx *opp_head; + struct dce_hwseq *hws = dc->hwseq; + int i; + + if (!hws->funcs.wait_for_blank_complete) + return; + + for (i = 0; i < MAX_PIPES; i++) { + opp_head = &context->res_ctx.pipe_ctx[i]; + + if (!resource_is_pipe_type(opp_head, OPP_HEAD) || + dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM) + continue; + + hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp); + } +} + +static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context) +{ + struct pipe_ctx *otg_master; + struct timing_generator *tg; + int i; + + for (i = 0; i < MAX_PIPES; i++) { + otg_master = &context->res_ctx.pipe_ctx[i]; + if (!resource_is_pipe_type(otg_master, OTG_MASTER) || + dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM) + continue; + tg = otg_master->stream_res.tg; + if (tg->funcs->wait_odm_doublebuffer_pending_clear) + tg->funcs->wait_odm_doublebuffer_pending_clear(tg); + } + + /* ODM update may require to reprogram blank pattern for each OPP */ + wait_for_blank_complete(dc, context); +} + static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) { int i; @@ -1284,7 +1392,7 @@ static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) int count = 0; struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) continue; /* Timeout 100 ms */ @@ -1510,7 +1618,7 @@ static void program_timing_sync( } for (k = 0; k < group_size; k++) { - struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream); + struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream); status->timing_sync_info.group_id = num_group; status->timing_sync_info.group_size = group_size; @@ -1521,7 +1629,7 @@ static void program_timing_sync( } - /* remove any other pipes that are already been synced */ + /* remove any other unblanked pipes as they have already been synced */ if (dc->config.use_pipe_ctx_sync_logic) { /* check pipe's syncd to decide which pipe to be removed */ for (j = 1; j < group_size; j++) { @@ -1534,6 +1642,7 @@ static void program_timing_sync( pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; } } else { + /* remove any other pipes by checking valid plane */ for (j = j + 1; j < group_size; j++) { bool is_blanked; @@ -1554,7 +1663,7 @@ static void program_timing_sync( if (group_size > 1) { if (sync_type == TIMING_SYNCHRONIZABLE) { dc->hwss.enable_timing_synchronization( - dc, group_index, group_size, pipe_set); + dc, ctx, group_index, group_size, pipe_set); } else if (sync_type == VBLANK_SYNCHRONIZABLE) { dc->hwss.enable_vblanks_synchronization( @@ -1684,7 +1793,7 @@ bool dc_validate_boot_timing(const struct dc *dc, return false; if (dc_is_dp_signal(link->connector_signal)) { - unsigned int pix_clk_100hz; + unsigned int pix_clk_100hz = 0; uint32_t numOdmPipes = 1; uint32_t id_src[4] = {0}; @@ -1726,6 +1835,9 @@ bool dc_validate_boot_timing(const struct dc *dc, return false; } + if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) + return false; + if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) { DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); return false; @@ -1759,6 +1871,8 @@ void dc_enable_stereo( int i, j; struct pipe_ctx *pipe; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { if (context != NULL) { pipe = &context->res_ctx.pipe_ctx[i]; @@ -1778,6 +1892,8 @@ void dc_enable_stereo( void dc_trigger_sync(struct dc *dc, struct dc_state *context) { if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { + dc_exit_ips_for_hw_access(dc); + enable_timing_multisync(dc, context); program_timing_sync(dc, context); } @@ -1836,7 +1952,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; /* Check old context for SubVP */ - subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); + subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); if (subvp_prev_use) break; } @@ -1962,8 +2078,17 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c context->stream_count == 0) { /* Must wait for no flips to be pending before doing optimize bw */ wait_for_no_pipes_pending(dc, context); + /* + * optimized dispclk depends on ODM setup. Need to wait for ODM + * update pending complete before optimizing bandwidth. + */ + wait_for_odm_update_pending_complete(dc, context); /* pplib is notified if disp_num changed */ dc->hwss.optimize_bandwidth(dc, context); + /* Need to do otg sync again as otg could be out of sync due to otg + * workaround applied during clock update + */ + dc_trigger_sync(dc, context); } if (dc->hwss.update_dsc_pg) @@ -1990,9 +2115,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c old_state = dc->current_state; dc->current_state = context; - dc_release_state(old_state); + dc_state_release(old_state); - dc_retain_state(dc->current_state); + dc_state_retain(dc->current_state); return result; } @@ -2004,8 +2129,7 @@ static bool commit_minimal_transition_state(struct dc *dc, * dc_commit_streams - Commit current stream state * * @dc: DC object with the commit state to be configured in the hardware - * @streams: Array with a list of stream state - * @stream_count: Total of streams + * @params: Parameters for the commit, including the streams to be committed * * Function responsible for commit streams change to the hardware. * @@ -2013,9 +2137,7 @@ static bool commit_minimal_transition_state(struct dc *dc, * Return DC_OK if everything work as expected, otherwise, return a dc_status * code. */ -enum dc_status dc_commit_streams(struct dc *dc, - struct dc_stream_state *streams[], - uint8_t stream_count) +enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params) { int i, j; struct dc_state *context; @@ -2024,16 +2146,22 @@ enum dc_status dc_commit_streams(struct dc *dc, struct pipe_ctx *pipe; bool handle_exit_odm2to1 = false; + if (!params) + return DC_ERROR_UNEXPECTED; + if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) return res; - if (!streams_changed(dc, streams, stream_count)) + if (!streams_changed(dc, params->streams, params->stream_count) && + dc->current_state->power_source == params->power_source) return res; - DC_LOG_DC("%s: %d streams\n", __func__, stream_count); + dc_exit_ips_for_hw_access(dc); - for (i = 0; i < stream_count; i++) { - struct dc_stream_state *stream = streams[i]; + DC_LOG_DC("%s: %d streams\n", __func__, params->stream_count); + + for (i = 0; i < params->stream_count; i++) { + struct dc_stream_state *stream = params->streams[i]; struct dc_stream_status *status = dc_stream_get_status(stream); dc_stream_log(dc, stream); @@ -2051,7 +2179,7 @@ enum dc_status dc_commit_streams(struct dc *dc, * scenario, it uses extra pipes than needed to reduce power consumption * We need to switch off this feature to make room for new streams. */ - if (stream_count > dc->current_state->stream_count && + if (params->stream_count > dc->current_state->stream_count && dc->current_state->stream_count == 1) { for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -2063,13 +2191,13 @@ enum dc_status dc_commit_streams(struct dc *dc, if (handle_exit_odm2to1) res = commit_minimal_transition_state(dc, dc->current_state); - context = dc_create_state(dc); + context = dc_state_create_current_copy(dc); if (!context) goto context_alloc_fail; - dc_resource_state_copy_construct_current(dc, context); + context->power_source = params->power_source; - res = dc_validate_with_context(dc, set, stream_count, context, false); + res = dc_validate_with_context(dc, set, params->stream_count, context, false); if (res != DC_OK) { BREAK_TO_DEBUGGER(); goto fail; @@ -2077,16 +2205,16 @@ enum dc_status dc_commit_streams(struct dc *dc, res = dc_commit_state_no_check(dc, context); - for (i = 0; i < stream_count; i++) { + for (i = 0; i < params->stream_count; i++) { for (j = 0; j < context->stream_count; j++) { - if (streams[i]->stream_id == context->streams[j]->stream_id) - streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; + if (params->streams[i]->stream_id == context->streams[j]->stream_id) + params->streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; - if (dc_is_embedded_signal(streams[i]->signal)) { - struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]); + if (dc_is_embedded_signal(params->streams[i]->signal)) { + struct dc_stream_status *status = dc_state_get_stream_status(context, params->streams[i]); if (dc->hwss.is_abm_supported) - status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]); + status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, params->streams[i]); else status->is_abm_supported = true; } @@ -2094,7 +2222,7 @@ enum dc_status dc_commit_streams(struct dc *dc, } fail: - dc_release_state(context); + dc_state_release(context); context_alloc_fail: @@ -2148,7 +2276,7 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) pipe = &context->res_ctx.pipe_ctx[i]; // Don't check flip pending on phantom pipes - if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)) + if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)) continue; /* Must set to false to start with, due to OR in update function */ @@ -2206,7 +2334,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc) if (context->res_ctx.pipe_ctx[i].stream == NULL || context->res_ctx.pipe_ctx[i].plane_state == NULL) { context->res_ctx.pipe_ctx[i].pipe_idx = i; - dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); + dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]); } process_deferred_updates(dc); @@ -2221,110 +2349,6 @@ void dc_post_update_surfaces_to_stream(struct dc *dc) dc->wm_optimized_required = false; } -static void init_state(struct dc *dc, struct dc_state *context) -{ - /* Each context must have their own instance of VBA and in order to - * initialize and obtain IP and SOC the base DML instance from DC is - * initially copied into every context - */ - memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); -} - -struct dc_state *dc_create_state(struct dc *dc) -{ - struct dc_state *context = kvzalloc(sizeof(struct dc_state), - GFP_KERNEL); - - if (!context) - return NULL; - - init_state(dc, context); - -#ifdef CONFIG_DRM_AMD_DC_FP - if (dc->debug.using_dml2) { - dml2_create(dc, &dc->dml2_options, &context->bw_ctx.dml2); - } -#endif - kref_init(&context->refcount); - - return context; -} - -struct dc_state *dc_copy_state(struct dc_state *src_ctx) -{ - int i, j; - struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL); -#ifdef CONFIG_DRM_AMD_DC_FP - struct dml2_context *dml2 = NULL; -#endif - - if (!new_ctx) - return NULL; - memcpy(new_ctx, src_ctx, sizeof(struct dc_state)); - -#ifdef CONFIG_DRM_AMD_DC_FP - if (new_ctx->bw_ctx.dml2) { - dml2 = kzalloc(sizeof(struct dml2_context), GFP_KERNEL); - if (!dml2) - return NULL; - - memcpy(dml2, src_ctx->bw_ctx.dml2, sizeof(struct dml2_context)); - new_ctx->bw_ctx.dml2 = dml2; - } -#endif - - for (i = 0; i < MAX_PIPES; i++) { - struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; - - if (cur_pipe->top_pipe) - cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; - - if (cur_pipe->bottom_pipe) - cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; - - if (cur_pipe->prev_odm_pipe) - cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; - - if (cur_pipe->next_odm_pipe) - cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; - - } - - for (i = 0; i < new_ctx->stream_count; i++) { - dc_stream_retain(new_ctx->streams[i]); - for (j = 0; j < new_ctx->stream_status[i].plane_count; j++) - dc_plane_state_retain( - new_ctx->stream_status[i].plane_states[j]); - } - - kref_init(&new_ctx->refcount); - - return new_ctx; -} - -void dc_retain_state(struct dc_state *context) -{ - kref_get(&context->refcount); -} - -static void dc_state_free(struct kref *kref) -{ - struct dc_state *context = container_of(kref, struct dc_state, refcount); - dc_resource_state_destruct(context); - -#ifdef CONFIG_DRM_AMD_DC_FP - dml2_destroy(context->bw_ctx.dml2); - context->bw_ctx.dml2 = 0; -#endif - - kvfree(context); -} - -void dc_release_state(struct dc_state *context) -{ - kref_put(&context->refcount, dc_state_free); -} - bool dc_set_generic_gpio_for_stereo(bool enable, struct gpio_service *gpio_service) { @@ -2523,6 +2547,10 @@ static enum surface_update_type get_scaling_info_update_type( /* Changing clip size of a large surface may result in MPC slice count change */ update_flags->bits.bandwidth_change = 1; + if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width || + u->scaling_info->clip_rect.height != u->surface->clip_rect.height) + update_flags->bits.clip_size_change = 1; + if (u->scaling_info->src_rect.x != u->surface->src_rect.x || u->scaling_info->src_rect.y != u->surface->src_rect.y || u->scaling_info->clip_rect.x != u->surface->clip_rect.x @@ -2536,7 +2564,8 @@ static enum surface_update_type get_scaling_info_update_type( || update_flags->bits.scaling_change) return UPDATE_TYPE_FULL; - if (update_flags->bits.position_change) + if (update_flags->bits.position_change || + update_flags->bits.clip_size_change) return UPDATE_TYPE_MED; return UPDATE_TYPE_FAST; @@ -2582,6 +2611,9 @@ static enum surface_update_type det_surface_update(const struct dc *dc, if (u->gamut_remap_matrix) update_flags->bits.gamut_remap_change = 1; + if (u->blend_tf) + update_flags->bits.gamma_change = 1; + if (u->gamma) { enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; @@ -2826,55 +2858,45 @@ static void copy_surface_update_to_plane( srf_update->plane_info->layer_index; } - if (srf_update->gamma && - (surface->gamma_correction != - srf_update->gamma)) { - memcpy(&surface->gamma_correction->entries, + if (srf_update->gamma) { + memcpy(&surface->gamma_correction.entries, &srf_update->gamma->entries, sizeof(struct dc_gamma_entries)); - surface->gamma_correction->is_identity = + surface->gamma_correction.is_identity = srf_update->gamma->is_identity; - surface->gamma_correction->num_entries = + surface->gamma_correction.num_entries = srf_update->gamma->num_entries; - surface->gamma_correction->type = + surface->gamma_correction.type = srf_update->gamma->type; } - if (srf_update->in_transfer_func && - (surface->in_transfer_func != - srf_update->in_transfer_func)) { - surface->in_transfer_func->sdr_ref_white_level = + if (srf_update->in_transfer_func) { + surface->in_transfer_func.sdr_ref_white_level = srf_update->in_transfer_func->sdr_ref_white_level; - surface->in_transfer_func->tf = + surface->in_transfer_func.tf = srf_update->in_transfer_func->tf; - surface->in_transfer_func->type = + surface->in_transfer_func.type = srf_update->in_transfer_func->type; - memcpy(&surface->in_transfer_func->tf_pts, + memcpy(&surface->in_transfer_func.tf_pts, &srf_update->in_transfer_func->tf_pts, sizeof(struct dc_transfer_func_distributed_points)); } - if (srf_update->func_shaper && - (surface->in_shaper_func != - srf_update->func_shaper)) - memcpy(surface->in_shaper_func, srf_update->func_shaper, - sizeof(*surface->in_shaper_func)); + if (srf_update->func_shaper) + memcpy(&surface->in_shaper_func, srf_update->func_shaper, + sizeof(surface->in_shaper_func)); - if (srf_update->lut3d_func && - (surface->lut3d_func != - srf_update->lut3d_func)) - memcpy(surface->lut3d_func, srf_update->lut3d_func, - sizeof(*surface->lut3d_func)); + if (srf_update->lut3d_func) + memcpy(&surface->lut3d_func, srf_update->lut3d_func, + sizeof(surface->lut3d_func)); if (srf_update->hdr_mult.value) surface->hdr_mult = srf_update->hdr_mult; - if (srf_update->blend_tf && - (surface->blend_tf != - srf_update->blend_tf)) - memcpy(surface->blend_tf, srf_update->blend_tf, - sizeof(*surface->blend_tf)); + if (srf_update->blend_tf) + memcpy(&surface->blend_tf, srf_update->blend_tf, + sizeof(surface->blend_tf)); if (srf_update->input_csc_color_matrix) surface->input_csc_color_matrix = @@ -2905,14 +2927,13 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->dst.height && update->dst.width) stream->dst = update->dst; - if (update->out_transfer_func && - stream->out_transfer_func != update->out_transfer_func) { - stream->out_transfer_func->sdr_ref_white_level = + if (update->out_transfer_func) { + stream->out_transfer_func.sdr_ref_white_level = update->out_transfer_func->sdr_ref_white_level; - stream->out_transfer_func->tf = update->out_transfer_func->tf; - stream->out_transfer_func->type = + stream->out_transfer_func.tf = update->out_transfer_func->tf; + stream->out_transfer_func.type = update->out_transfer_func->type; - memcpy(&stream->out_transfer_func->tf_pts, + memcpy(&stream->out_transfer_func.tf_pts, &update->out_transfer_func->tf_pts, sizeof(struct dc_transfer_func_distributed_points)); } @@ -2994,11 +3015,9 @@ static void copy_stream_update_to_stream(struct dc *dc, update->dsc_config->num_slices_v != 0); /* Use temporarry context for validating new DSC config */ - struct dc_state *dsc_validate_context = dc_create_state(dc); + struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state); if (dsc_validate_context) { - dc_resource_state_copy_construct(dc->current_state, dsc_validate_context); - stream->timing.dsc_cfg = *update->dsc_config; stream->timing.flags.DSC = enable_dsc; if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { @@ -3007,7 +3026,7 @@ static void copy_stream_update_to_stream(struct dc *dc, update->dsc_config = NULL; } - dc_release_state(dsc_validate_context); + dc_state_release(dsc_validate_context); } else { DC_ERROR("Failed to allocate new validate context for DSC change\n"); update->dsc_config = NULL; @@ -3015,8 +3034,8 @@ static void copy_stream_update_to_stream(struct dc *dc, } } -static void backup_plane_states_for_stream( - struct dc_plane_state plane_states[MAX_SURFACE_NUM], +static void backup_planes_and_stream_state( + struct dc_scratch_space *scratch, struct dc_stream_state *stream) { int i; @@ -3025,12 +3044,14 @@ static void backup_plane_states_for_stream( if (!status) return; - for (i = 0; i < status->plane_count; i++) - plane_states[i] = *status->plane_states[i]; + for (i = 0; i < status->plane_count; i++) { + scratch->plane_states[i] = *status->plane_states[i]; + } + scratch->stream_state = *stream; } -static void restore_plane_states_for_stream( - struct dc_plane_state plane_states[MAX_SURFACE_NUM], +static void restore_planes_and_stream_state( + struct dc_scratch_space *scratch, struct dc_stream_state *stream) { int i; @@ -3039,10 +3060,69 @@ static void restore_plane_states_for_stream( if (!status) return; - for (i = 0; i < status->plane_count; i++) - *status->plane_states[i] = plane_states[i]; + for (i = 0; i < status->plane_count; i++) { + *status->plane_states[i] = scratch->plane_states[i]; + } + *stream = scratch->stream_state; +} + +/** + * update_seamless_boot_flags() - Helper function for updating seamless boot flags + * + * @dc: Current DC state + * @context: New DC state to be programmed + * @surface_count: Number of surfaces that have an updated + * @stream: Corresponding stream to be updated in the current flip + * + * Updating seamless boot flags do not need to be part of the commit sequence. This + * helper function will update the seamless boot flags on each flip (if required) + * outside of the HW commit sequence (fast or slow). + * + * Return: void + */ +static void update_seamless_boot_flags(struct dc *dc, + struct dc_state *context, + int surface_count, + struct dc_stream_state *stream) +{ + if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { + /* Optimize seamless boot flag keeps clocks and watermarks high until + * first flip. After first flip, optimization is required to lower + * bandwidth. Important to note that it is expected UEFI will + * only light up a single display on POST, therefore we only expect + * one stream with seamless boot flag set. + */ + if (stream->apply_seamless_boot_optimization) { + stream->apply_seamless_boot_optimization = false; + + if (get_seamless_boot_stream_count(context) == 0) + dc->optimized_required = true; + } + } } +/** + * update_planes_and_stream_state() - The function takes planes and stream + * updates as inputs and determines the appropriate update type. If update type + * is FULL, the function allocates a new context, populates and validates it. + * Otherwise, it updates current dc context. The function will return both + * new_context and new_update_type back to the caller. The function also backs + * up both current and new contexts into corresponding dc state scratch memory. + * TODO: The function does too many things, and even conditionally allocates dc + * context memory implicitly. We should consider to break it down. + * + * @dc: Current DC state + * @srf_updates: an array of surface updates + * @surface_count: surface update count + * @stream: Corresponding stream to be updated + * @stream_update: stream update + * @new_update_type: [out] determined update type by the function + * @new_context: [out] new context allocated and validated if update type is + * FULL, reference to current context if update type is less than FULL. + * + * Return: true if a valid update is populated into new_context, false + * otherwise. + */ static bool update_planes_and_stream_state(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, @@ -3066,9 +3146,10 @@ static bool update_planes_and_stream_state(struct dc *dc, } context = dc->current_state; - backup_plane_states_for_stream(dc->current_state->scratch.plane_states, stream); update_type = dc_check_update_surfaces_for_stream( dc, srf_updates, surface_count, stream_update, stream_status); + if (update_type == UPDATE_TYPE_FULL) + backup_planes_and_stream_state(&dc->scratch.current_state, stream); /* update current stream with the new updates */ copy_stream_update_to_stream(dc, context, stream, stream_update); @@ -3106,30 +3187,27 @@ static bool update_planes_and_stream_state(struct dc *dc, new_planes[i] = srf_updates[i].surface; /* initialize scratch memory for building context */ - context = dc_create_state(dc); + context = dc_state_create_copy(dc->current_state); if (context == NULL) { DC_ERROR("Failed to allocate new validate context!\n"); return false; } - dc_resource_state_copy_construct( - dc->current_state, context); - /* For each full update, remove all existing phantom pipes first. * Ensures that we have enough pipes for newly added MPO planes */ - if (dc->res_pool->funcs->remove_phantom_pipes) - dc->res_pool->funcs->remove_phantom_pipes(dc, context, false); + dc_state_remove_phantom_streams_and_planes(dc, context); + dc_state_release_phantom_streams_and_planes(dc, context); /*remove old surfaces from context */ - if (!dc_rem_all_planes_for_stream(dc, stream, context)) { + if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) { BREAK_TO_DEBUGGER(); goto fail; } /* add surface to context */ - if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { + if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { BREAK_TO_DEBUGGER(); goto fail; @@ -3140,7 +3218,10 @@ static bool update_planes_and_stream_state(struct dc *dc, for (i = 0; i < surface_count; i++) { struct dc_plane_state *surface = srf_updates[i].surface; - if (update_type >= UPDATE_TYPE_MED) { + if (update_type != UPDATE_TYPE_MED) + continue; + if (surface->update_flags.bits.clip_size_change || + surface->update_flags.bits.position_change) { for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; @@ -3154,40 +3235,21 @@ static bool update_planes_and_stream_state(struct dc *dc, if (update_type == UPDATE_TYPE_FULL) { if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { - /* For phantom pipes we remove and create a new set of phantom pipes - * for each full update (because we don't know if we'll need phantom - * pipes until after the first round of validation). However, if validation - * fails we need to keep the existing phantom pipes (because we don't update - * the dc->current_state). - * - * The phantom stream/plane refcount is decremented for validation because - * we assume it'll be removed (the free comes when the dc_state is freed), - * but if validation fails we have to increment back the refcount so it's - * consistent. - */ - if (dc->res_pool->funcs->retain_phantom_pipes) - dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state); BREAK_TO_DEBUGGER(); goto fail; } - - for (i = 0; i < context->stream_count; i++) { - struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx, - context->streams[i]); - - if (otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) - resource_build_test_pattern_params(&context->res_ctx, otg_master); - } } + update_seamless_boot_flags(dc, context, surface_count, stream); *new_context = context; *new_update_type = update_type; - backup_plane_states_for_stream(context->scratch.plane_states, stream); + if (update_type == UPDATE_TYPE_FULL) + backup_planes_and_stream_state(&dc->scratch.new_state, stream); return true; fail: - dc_release_state(context); + dc_state_release(context); return false; @@ -3271,12 +3333,26 @@ static void commit_planes_do_stream_update(struct dc *dc, } if (stream_update->pending_test_pattern) { - dc_link_dp_set_test_pattern(stream->link, + /* + * test pattern params depends on ODM topology + * changes that we could be applying to front + * end. Since at the current stage front end + * changes are not yet applied. We can only + * apply test pattern in hw based on current + * state and populate the final test pattern + * params in new state. If current and new test + * pattern params are different as result of + * different ODM topology being used, it will be + * detected and handle during front end + * programming update. + */ + dc->link_srv->dp_set_test_pattern(stream->link, stream->test_pattern.type, stream->test_pattern.color_space, stream->test_pattern.p_link_settings, stream->test_pattern.p_custom_pattern, stream->test_pattern.cust_pattern_size); + resource_build_test_pattern_params(&context->res_ctx, pipe_ctx); } if (stream_update->dpms_off) { @@ -3333,6 +3409,9 @@ static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_s if (stream->link->replay_settings.config.replay_supported) return true; + if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level) + return true; + return false; } @@ -3370,6 +3449,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc, if (srf_updates[i].surface->flip_immediate) continue; + update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, sizeof(flip_addr->dirty_rects)); @@ -3383,7 +3463,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc, update_dirty_rect->panel_inst = panel_inst; update_dirty_rect->pipe_idx = j; - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); } } } @@ -3485,18 +3565,27 @@ static void commit_planes_for_stream_fast(struct dc *dc, { int i, j; struct pipe_ctx *top_pipe_to_program = NULL; + struct dc_stream_status *stream_status = NULL; + + dc_exit_ips_for_hw_access(dc); + dc_z10_restore(dc); top_pipe_to_program = resource_get_otg_master_for_stream( &context->res_ctx, stream); - if (dc->debug.visual_confirm) { - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + if (!top_pipe_to_program) + return; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->plane_state) { + set_p_state_switch_method(dc, context, pipe); - if (pipe->stream && pipe->plane_state) - dc_update_viusal_confirm_color(dc, context, pipe); + if (dc->debug.visual_confirm) + dc_update_visual_confirm_color(dc, context, pipe); } } @@ -3520,6 +3609,8 @@ static void commit_planes_for_stream_fast(struct dc *dc, } } + stream_status = dc_state_get_stream_status(context, stream); + build_dmub_cmd_list(dc, srf_updates, surface_count, @@ -3532,7 +3623,9 @@ static void commit_planes_for_stream_fast(struct dc *dc, context->dmub_cmd_count, context->block_sequence, &(context->block_sequence_steps), - top_pipe_to_program); + top_pipe_to_program, + stream_status, + context); hwss_execute_sequence(dc, context->block_sequence, context->block_sequence_steps); @@ -3545,7 +3638,7 @@ static void commit_planes_for_stream_fast(struct dc *dc, top_pipe_to_program->stream->update_flags.raw = 0; } -static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context) +static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context) { /* * This function calls HWSS to wait for any potentially double buffered @@ -3583,6 +3676,7 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state } } } + wait_for_odm_update_pending_complete(dc, dc_context); } static void commit_planes_for_stream(struct dc *dc, @@ -3604,10 +3698,23 @@ static void commit_planes_for_stream(struct dc *dc, // dc->current_state anymore, so we have to cache it before we apply // the new SubVP context subvp_prev_use = false; + dc_exit_ips_for_hw_access(dc); + dc_z10_restore(dc); if (update_type == UPDATE_TYPE_FULL) wait_for_outstanding_hw_updates(dc, context); + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->plane_state) { + set_p_state_switch_method(dc, context, pipe); + + if (dc->debug.visual_confirm) + dc_update_visual_confirm_color(dc, context, pipe); + } + } + if (update_type == UPDATE_TYPE_FULL) { dc_allow_idle_optimizations(dc, false); @@ -3623,12 +3730,12 @@ static void commit_planes_for_stream(struct dc *dc, top_pipe_to_program = resource_get_otg_master_for_stream( &context->res_ctx, stream); - + ASSERT(top_pipe_to_program != NULL); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; // Check old context for SubVP - subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); + subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); if (subvp_prev_use) break; } @@ -3636,20 +3743,12 @@ static void commit_planes_for_stream(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { subvp_curr_use = true; break; } } - if (dc->debug.visual_confirm) - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - if (pipe->stream && pipe->plane_state) - dc_update_viusal_confirm_color(dc, context, pipe); - } - if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { struct pipe_ctx *mpcc_pipe; struct pipe_ctx *odm_pipe; @@ -3915,7 +4014,9 @@ static void commit_planes_for_stream(struct dc *dc, * programming has completed (we turn on phantom OTG in order * to complete the plane disable for phantom pipes). */ - dc->hwss.apply_ctx_to_hw(dc, context); + + if (dc->hwss.disable_phantom_streams) + dc->hwss.disable_phantom_streams(dc, context); } if (update_type != UPDATE_TYPE_FAST) @@ -4021,7 +4122,7 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { + if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) { subvp_active = true; break; } @@ -4053,24 +4154,14 @@ struct pipe_split_policy_backup { bool dynamic_odm_policy; bool subvp_policy; enum pipe_split_policy mpc_policy; + char force_odm[MAX_PIPES]; }; -static void release_minimal_transition_state(struct dc *dc, - struct dc_state *context, struct pipe_split_policy_backup *policy) -{ - dc_release_state(context); - /* restore previous pipe split and odm policy */ - if (!dc->config.is_vmin_only_asic) - dc->debug.pipe_split_policy = policy->mpc_policy; - dc->debug.enable_single_display_2to1_odm_policy = policy->dynamic_odm_policy; - dc->debug.force_disable_subvp = policy->subvp_policy; -} - -static struct dc_state *create_minimal_transition_state(struct dc *dc, - struct dc_state *base_context, struct pipe_split_policy_backup *policy) +static void backup_and_set_minimal_pipe_split_policy(struct dc *dc, + struct dc_state *context, + struct pipe_split_policy_backup *policy) { - struct dc_state *minimal_transition_context = dc_create_state(dc); - unsigned int i, j; + int i; if (!dc->config.is_vmin_only_asic) { policy->mpc_policy = dc->debug.pipe_split_policy; @@ -4080,73 +4171,252 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc, dc->debug.enable_single_display_2to1_odm_policy = false; policy->subvp_policy = dc->debug.force_disable_subvp; dc->debug.force_disable_subvp = true; + for (i = 0; i < context->stream_count; i++) { + policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments; + context->streams[i]->debug.force_odm_combine_segments = 0; + } +} - dc_resource_state_copy_construct(base_context, minimal_transition_context); +static void restore_minimal_pipe_split_policy(struct dc *dc, + struct dc_state *context, + struct pipe_split_policy_backup *policy) +{ + uint8_t i; - /* commit minimal state */ - if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) { - for (i = 0; i < minimal_transition_context->stream_count; i++) { - struct dc_stream_status *stream_status = &minimal_transition_context->stream_status[i]; + if (!dc->config.is_vmin_only_asic) + dc->debug.pipe_split_policy = policy->mpc_policy; + dc->debug.enable_single_display_2to1_odm_policy = + policy->dynamic_odm_policy; + dc->debug.force_disable_subvp = policy->subvp_policy; + for (i = 0; i < context->stream_count; i++) + context->streams[i]->debug.force_odm_combine_segments = policy->force_odm[i]; +} - for (j = 0; j < stream_status->plane_count; j++) { - struct dc_plane_state *plane_state = stream_status->plane_states[j]; +static void release_minimal_transition_state(struct dc *dc, + struct dc_state *minimal_transition_context, + struct dc_state *base_context, + struct pipe_split_policy_backup *policy) +{ + restore_minimal_pipe_split_policy(dc, base_context, policy); + dc_state_release(minimal_transition_context); +} - /* force vsync flip when reconfiguring pipes to prevent underflow - * and corruption - */ - plane_state->flip_immediate = false; - } - } +static void force_vsync_flip_in_minimal_transition_context(struct dc_state *context) +{ + uint8_t i; + int j; + struct dc_stream_status *stream_status; + + for (i = 0; i < context->stream_count; i++) { + stream_status = &context->stream_status[i]; + + for (j = 0; j < stream_status->plane_count; j++) + stream_status->plane_states[j]->flip_immediate = false; + } +} + +static struct dc_state *create_minimal_transition_state(struct dc *dc, + struct dc_state *base_context, struct pipe_split_policy_backup *policy) +{ + struct dc_state *minimal_transition_context = NULL; + + minimal_transition_context = dc_state_create_copy(base_context); + if (!minimal_transition_context) + return NULL; + + backup_and_set_minimal_pipe_split_policy(dc, base_context, policy); + /* commit minimal state */ + if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) { + /* prevent underflow and corruption when reconfiguring pipes */ + force_vsync_flip_in_minimal_transition_context(minimal_transition_context); } else { - /* this should never happen */ - release_minimal_transition_state(dc, minimal_transition_context, policy); + /* + * This should never happen, minimal transition state should + * always be validated first before adding pipe split features. + */ + release_minimal_transition_state(dc, minimal_transition_context, base_context, policy); BREAK_TO_DEBUGGER(); minimal_transition_context = NULL; } return minimal_transition_context; } -static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc, - struct dc_state *context, - struct dc_stream_state *stream) +static bool is_pipe_topology_transition_seamless_with_intermediate_step( + struct dc *dc, + struct dc_state *initial_state, + struct dc_state *intermediate_state, + struct dc_state *final_state) +{ + return dc->hwss.is_pipe_topology_transition_seamless(dc, initial_state, + intermediate_state) && + dc->hwss.is_pipe_topology_transition_seamless(dc, + intermediate_state, final_state); +} + +static void swap_and_release_current_context(struct dc *dc, + struct dc_state *new_context, struct dc_stream_state *stream) +{ + + int i; + struct dc_state *old = dc->current_state; + struct pipe_ctx *pipe_ctx; + + /* Since memory free requires elevated IRQ, an interrupt + * request is generated by mem free. If this happens + * between freeing and reassigning the context, our vsync + * interrupt will call into dc and cause a memory + * corruption. Hence, we first reassign the context, + * then free the old context. + */ + dc->current_state = new_context; + dc_state_release(old); + + // clear any forced full updates + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe_ctx = &new_context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->plane_state && pipe_ctx->stream == stream) + pipe_ctx->plane_state->force_full_update = false; + } +} + +static int initialize_empty_surface_updates( + struct dc_stream_state *stream, + struct dc_surface_update *srf_updates) +{ + struct dc_stream_status *status = dc_stream_get_status(stream); + int i; + + if (!status) + return 0; + + for (i = 0; i < status->plane_count; i++) + srf_updates[i].surface = status->plane_states[i]; + + return status->plane_count; +} + +static bool commit_minimal_transition_based_on_new_context(struct dc *dc, + struct dc_state *new_context, + struct dc_stream_state *stream, + struct dc_surface_update *srf_updates, + int surface_count) { bool success = false; - struct dc_state *minimal_transition_context; struct pipe_split_policy_backup policy; + struct dc_state *intermediate_context = + create_minimal_transition_state(dc, new_context, + &policy); - /* commit based on new context */ - minimal_transition_context = create_minimal_transition_state(dc, - context, &policy); - if (minimal_transition_context) { - if (dc->hwss.is_pipe_topology_transition_seamless( - dc, dc->current_state, minimal_transition_context) && - dc->hwss.is_pipe_topology_transition_seamless( - dc, minimal_transition_context, context)) { - DC_LOG_DC("%s base = new state\n", __func__); - success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK; + if (intermediate_context) { + if (is_pipe_topology_transition_seamless_with_intermediate_step( + dc, + dc->current_state, + intermediate_context, + new_context)) { + DC_LOG_DC("commit minimal transition state: base = new state\n"); + commit_planes_for_stream(dc, srf_updates, + surface_count, stream, NULL, + UPDATE_TYPE_FULL, intermediate_context); + swap_and_release_current_context( + dc, intermediate_context, stream); + dc_state_retain(dc->current_state); + success = true; } - release_minimal_transition_state(dc, minimal_transition_context, &policy); - } - - if (!success) { - /* commit based on current context */ - restore_plane_states_for_stream(dc->current_state->scratch.plane_states, stream); - minimal_transition_context = create_minimal_transition_state(dc, - dc->current_state, &policy); - if (minimal_transition_context) { - if (dc->hwss.is_pipe_topology_transition_seamless( - dc, dc->current_state, minimal_transition_context) && - dc->hwss.is_pipe_topology_transition_seamless( - dc, minimal_transition_context, context)) { - DC_LOG_DC("%s base = current state\n", __func__); - success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK; - } - release_minimal_transition_state(dc, minimal_transition_context, &policy); + release_minimal_transition_state( + dc, intermediate_context, new_context, &policy); + } + return success; +} + +static bool commit_minimal_transition_based_on_current_context(struct dc *dc, + struct dc_state *new_context, struct dc_stream_state *stream) +{ + bool success = false; + struct pipe_split_policy_backup policy; + struct dc_state *intermediate_context; + struct dc_state *old_current_state = dc->current_state; + struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0}; + int surface_count; + + /* + * Both current and new contexts share the same stream and plane state + * pointers. When new context is validated, stream and planes get + * populated with new updates such as new plane addresses. This makes + * the current context no longer valid because stream and planes are + * modified from the original. We backup current stream and plane states + * into scratch space whenever we are populating new context. So we can + * restore the original values back by calling the restore function now. + * This restores back the original stream and plane states associated + * with the current state. + */ + restore_planes_and_stream_state(&dc->scratch.current_state, stream); + dc_state_retain(old_current_state); + intermediate_context = create_minimal_transition_state(dc, + old_current_state, &policy); + + if (intermediate_context) { + if (is_pipe_topology_transition_seamless_with_intermediate_step( + dc, + dc->current_state, + intermediate_context, + new_context)) { + DC_LOG_DC("commit minimal transition state: base = current state\n"); + surface_count = initialize_empty_surface_updates( + stream, srf_updates); + commit_planes_for_stream(dc, srf_updates, + surface_count, stream, NULL, + UPDATE_TYPE_FULL, intermediate_context); + swap_and_release_current_context( + dc, intermediate_context, stream); + dc_state_retain(dc->current_state); + success = true; } - restore_plane_states_for_stream(context->scratch.plane_states, stream); + release_minimal_transition_state(dc, intermediate_context, + old_current_state, &policy); } + dc_state_release(old_current_state); + /* + * Restore stream and plane states back to the values associated with + * new context. + */ + restore_planes_and_stream_state(&dc->scratch.new_state, stream); + return success; +} - ASSERT(success); +/** + * commit_minimal_transition_state_in_dc_update - Commit a minimal state based + * on current or new context + * + * @dc: DC structure, used to get the current state + * @new_context: New context + * @stream: Stream getting the update for the flip + * @srf_updates: Surface updates + * @surface_count: Number of surfaces + * + * The function takes in current state and new state and determine a minimal + * transition state as the intermediate step which could make the transition + * between current and new states seamless. If found, it will commit the minimal + * transition state and update current state to this minimal transition state + * and return true, if not, it will return false. + * + * Return: + * Return True if the minimal transition succeeded, false otherwise + */ +static bool commit_minimal_transition_state_in_dc_update(struct dc *dc, + struct dc_state *new_context, + struct dc_stream_state *stream, + struct dc_surface_update *srf_updates, + int surface_count) +{ + bool success = commit_minimal_transition_based_on_new_context( + dc, new_context, stream, srf_updates, + surface_count); + if (!success) + success = commit_minimal_transition_based_on_current_context(dc, + new_context, stream); + if (!success) + DC_LOG_ERROR("Fail to commit a seamless minimal transition state between current and new states.\nThis pipe topology update is non-seamless!\n"); return success; } @@ -4193,7 +4463,7 @@ static bool commit_minimal_transition_state(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) { subvp_in_use = true; break; } @@ -4229,12 +4499,14 @@ static bool commit_minimal_transition_state(struct dc *dc, dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" : "Unknown"); + dc_state_retain(transition_base_context); transition_context = create_minimal_transition_state(dc, transition_base_context, &policy); if (transition_context) { ret = dc_commit_state_no_check(dc, transition_context); - release_minimal_transition_state(dc, transition_context, &policy); + release_minimal_transition_state(dc, transition_context, transition_base_context, &policy); } + dc_state_release(transition_base_context); if (ret != DC_OK) { /* this should never happen */ @@ -4252,41 +4524,6 @@ static bool commit_minimal_transition_state(struct dc *dc, return true; } -/** - * update_seamless_boot_flags() - Helper function for updating seamless boot flags - * - * @dc: Current DC state - * @context: New DC state to be programmed - * @surface_count: Number of surfaces that have an updated - * @stream: Corresponding stream to be updated in the current flip - * - * Updating seamless boot flags do not need to be part of the commit sequence. This - * helper function will update the seamless boot flags on each flip (if required) - * outside of the HW commit sequence (fast or slow). - * - * Return: void - */ -static void update_seamless_boot_flags(struct dc *dc, - struct dc_state *context, - int surface_count, - struct dc_stream_state *stream) -{ - if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { - /* Optimize seamless boot flag keeps clocks and watermarks high until - * first flip. After first flip, optimization is required to lower - * bandwidth. Important to note that it is expected UEFI will - * only light up a single display on POST, therefore we only expect - * one stream with seamless boot flag set. - */ - if (stream->apply_seamless_boot_optimization) { - stream->apply_seamless_boot_optimization = false; - - if (get_seamless_boot_stream_count(context) == 0) - dc->optimized_required = true; - } - } -} - static void populate_fast_updates(struct dc_fast_update *fast_update, struct dc_surface_update *srf_updates, int surface_count, @@ -4348,7 +4585,6 @@ static bool full_update_required(struct dc *dc, srf_updates[i].in_transfer_func || srf_updates[i].func_shaper || srf_updates[i].lut3d_func || - srf_updates[i].blend_tf || srf_updates[i].surface->force_full_update || (srf_updates[i].flip_addr && srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || @@ -4407,60 +4643,131 @@ static bool fast_update_only(struct dc *dc, && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); } -static bool should_commit_minimal_transition_for_windowed_mpo_odm(struct dc *dc, +static bool update_planes_and_stream_v1(struct dc *dc, + struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, - struct dc_state *context) + struct dc_stream_update *stream_update, + struct dc_state *state) { - struct pipe_ctx *cur_pipe, *new_pipe; - bool cur_is_odm_in_use, new_is_odm_in_use; - struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); - struct dc_stream_status *new_stream_status = stream_get_status(context, stream); + const struct dc_stream_status *stream_status; + enum surface_update_type update_type; + struct dc_state *context; + struct dc_context *dc_ctx = dc->ctx; + int i, j; + struct dc_fast_update fast_update[MAX_SURFACES] = {0}; - if (!dc->debug.enable_single_display_2to1_odm_policy || - !dc->config.enable_windowed_mpo_odm) - /* skip the check if windowed MPO ODM or dynamic ODM is turned - * off. - */ - return false; + dc_exit_ips_for_hw_access(dc); - if (context == dc->current_state) - /* skip the check for fast update */ - return false; + populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); + stream_status = dc_stream_get_status(stream); + context = dc->current_state; - if (new_stream_status->plane_count != cur_stream_status->plane_count) - /* plane count changed, not a plane scaling update so not the - * case we are looking for - */ - return false; + update_type = dc_check_update_surfaces_for_stream( + dc, srf_updates, surface_count, stream_update, stream_status); - cur_pipe = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, stream); - new_pipe = resource_get_otg_master_for_stream(&context->res_ctx, stream); - cur_is_odm_in_use = resource_get_odm_slice_count(cur_pipe) > 1; - new_is_odm_in_use = resource_get_odm_slice_count(new_pipe) > 1; - if (cur_is_odm_in_use == new_is_odm_in_use) - /* ODM state isn't changed, not the case we are looking for */ - return false; + if (update_type >= UPDATE_TYPE_FULL) { - if (dc->hwss.is_pipe_topology_transition_seamless && - dc->hwss.is_pipe_topology_transition_seamless( - dc, dc->current_state, context)) - /* transition can be achieved without the need for committing - * minimal transition state first + /* initialize scratch memory for building context */ + context = dc_state_create_copy(state); + if (context == NULL) { + DC_ERROR("Failed to allocate new validate context!\n"); + return false; + } + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) + new_pipe->plane_state->force_full_update = true; + } + } else if (update_type == UPDATE_TYPE_FAST) { + /* + * Previous frame finished and HW is ready for optimization. */ - return false; + dc_post_update_surfaces_to_stream(dc); + } + + for (i = 0; i < surface_count; i++) { + struct dc_plane_state *surface = srf_updates[i].surface; + + copy_surface_update_to_plane(surface, &srf_updates[i]); + + if (update_type >= UPDATE_TYPE_MED) { + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = + &context->res_ctx.pipe_ctx[j]; + + if (pipe_ctx->plane_state != surface) + continue; + + resource_build_scaling_params(pipe_ctx); + } + } + } + + copy_stream_update_to_stream(dc, context, stream, stream_update); + + if (update_type >= UPDATE_TYPE_FULL) { + if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { + DC_ERROR("Mode validation failed for stream update!\n"); + dc_state_release(context); + return false; + } + } + + TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); + + if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && + !dc->debug.enable_legacy_fast_update) { + commit_planes_for_stream_fast(dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + context); + } else { + commit_planes_for_stream( + dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + context); + } + /*update current_State*/ + if (dc->current_state != context) { + + struct dc_state *old = dc->current_state; + dc->current_state = context; + dc_state_release(old); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->plane_state && pipe_ctx->stream == stream) + pipe_ctx->plane_state->force_full_update = false; + } + } + + /* Legacy optimization path for DCE. */ + if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) { + dc_post_update_surfaces_to_stream(dc); + TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); + } return true; } -bool dc_update_planes_and_stream(struct dc *dc, +static bool update_planes_and_stream_v2(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, struct dc_stream_update *stream_update) { struct dc_state *context; enum surface_update_type update_type; - int i; - struct mall_temp_config mall_temp_config; struct dc_fast_update fast_update[MAX_SURFACES] = {0}; /* In cases where MPO and split or ODM are used transitions can @@ -4484,7 +4791,7 @@ bool dc_update_planes_and_stream(struct dc *dc, /* on plane addition, minimal state is the current one */ if (force_minimal_pipe_splitting && is_plane_addition && !commit_minimal_transition_state(dc, dc->current_state)) - return false; + return false; if (!update_planes_and_stream_state( dc, @@ -4498,46 +4805,19 @@ bool dc_update_planes_and_stream(struct dc *dc, /* on plane removal, minimal state is the new one */ if (force_minimal_pipe_splitting && !is_plane_addition) { - /* Since all phantom pipes are removed in full validation, - * we have to save and restore the subvp/mall config when - * we do a minimal transition since the flags marking the - * pipe as subvp/phantom will be cleared (dc copy constructor - * creates a shallow copy). - */ - if (dc->res_pool->funcs->save_mall_state) - dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config); if (!commit_minimal_transition_state(dc, context)) { - dc_release_state(context); + dc_state_release(context); return false; } - if (dc->res_pool->funcs->restore_mall_state) - dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config); - - /* If we do a minimal transition with plane removal and the context - * has subvp we also have to retain back the phantom stream / planes - * since the refcount is decremented as part of the min transition - * (we commit a state with no subvp, so the phantom streams / planes - * had to be removed). - */ - if (dc->res_pool->funcs->retain_phantom_pipes) - dc->res_pool->funcs->retain_phantom_pipes(dc, context); update_type = UPDATE_TYPE_FULL; } - /* when windowed MPO ODM is supported, we need to handle a special case - * where we can transition between ODM combine and MPC combine due to - * plane scaling update. This transition will require us to commit - * minimal transition state. The condition to trigger this update can't - * be predicted by could_mpcc_tree_change_for_active_pipes because we - * can only determine it after DML validation. Therefore we can't rely - * on the existing commit minimal transition state sequence. Instead - * we have to add additional handling here to handle this transition - * with its own special sequence. - */ - if (should_commit_minimal_transition_for_windowed_mpo_odm(dc, stream, context)) - commit_minimal_transition_state_for_windowed_mpo_odm(dc, - context, stream); - update_seamless_boot_flags(dc, context, surface_count, stream); + if (dc->hwss.is_pipe_topology_transition_seamless && + !dc->hwss.is_pipe_topology_transition_seamless( + dc, dc->current_state, context)) + commit_minimal_transition_state_in_dc_update(dc, context, stream, + srf_updates, surface_count); + if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) { commit_planes_for_stream_fast(dc, srf_updates, @@ -4563,143 +4843,33 @@ bool dc_update_planes_and_stream(struct dc *dc, update_type, context); } - - if (dc->current_state != context) { - - /* Since memory free requires elevated IRQL, an interrupt - * request is generated by mem free. If this happens - * between freeing and reassigning the context, our vsync - * interrupt will call into dc and cause a memory - * corruption BSOD. Hence, we first reassign the context, - * then free the old context. - */ - - struct dc_state *old = dc->current_state; - - dc->current_state = context; - dc_release_state(old); - - // clear any forced full updates - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - - if (pipe_ctx->plane_state && pipe_ctx->stream == stream) - pipe_ctx->plane_state->force_full_update = false; - } - } + if (dc->current_state != context) + swap_and_release_current_context(dc, context, stream); return true; } -void dc_commit_updates_for_stream(struct dc *dc, - struct dc_surface_update *srf_updates, - int surface_count, +static void commit_planes_and_stream_update_on_current_context(struct dc *dc, + struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, struct dc_stream_update *stream_update, - struct dc_state *state) + enum surface_update_type update_type) { - const struct dc_stream_status *stream_status; - enum surface_update_type update_type; - struct dc_state *context; - struct dc_context *dc_ctx = dc->ctx; - int i, j; struct dc_fast_update fast_update[MAX_SURFACES] = {0}; - populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); - stream_status = dc_stream_get_status(stream); - context = dc->current_state; - - update_type = dc_check_update_surfaces_for_stream( - dc, srf_updates, surface_count, stream_update, stream_status); - - /* TODO: Since change commit sequence can have a huge impact, - * we decided to only enable it for DCN3x. However, as soon as - * we get more confident about this change we'll need to enable - * the new sequence for all ASICs. - */ - if (dc->ctx->dce_version >= DCN_VERSION_3_2) { - /* - * Previous frame finished and HW is ready for optimization. - */ - if (update_type == UPDATE_TYPE_FAST) - dc_post_update_surfaces_to_stream(dc); - - dc_update_planes_and_stream(dc, srf_updates, - surface_count, stream, - stream_update); - return; - } - - if (update_type >= update_surface_trace_level) - update_surface_trace(dc, srf_updates, surface_count); - - - if (update_type >= UPDATE_TYPE_FULL) { - - /* initialize scratch memory for building context */ - context = dc_create_state(dc); - if (context == NULL) { - DC_ERROR("Failed to allocate new validate context!\n"); - return; - } - - dc_resource_state_copy_construct(state, context); - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - - if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) - new_pipe->plane_state->force_full_update = true; - } - } else if (update_type == UPDATE_TYPE_FAST) { - /* - * Previous frame finished and HW is ready for optimization. - */ - dc_post_update_surfaces_to_stream(dc); - } - - - for (i = 0; i < surface_count; i++) { - struct dc_plane_state *surface = srf_updates[i].surface; - - copy_surface_update_to_plane(surface, &srf_updates[i]); - - if (update_type >= UPDATE_TYPE_MED) { - for (j = 0; j < dc->res_pool->pipe_count; j++) { - struct pipe_ctx *pipe_ctx = - &context->res_ctx.pipe_ctx[j]; - - if (pipe_ctx->plane_state != surface) - continue; - - resource_build_scaling_params(pipe_ctx); - } - } - } - - copy_stream_update_to_stream(dc, context, stream, stream_update); - - if (update_type >= UPDATE_TYPE_FULL) { - if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { - DC_ERROR("Mode validation failed for stream update!\n"); - dc_release_state(context); - return; - } - } - - TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); - - update_seamless_boot_flags(dc, context, surface_count, stream); - if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && - !dc->debug.enable_legacy_fast_update) { + ASSERT(update_type < UPDATE_TYPE_FULL); + populate_fast_updates(fast_update, srf_updates, surface_count, + stream_update); + if (fast_update_only(dc, fast_update, srf_updates, surface_count, + stream_update, stream) && + !dc->debug.enable_legacy_fast_update) commit_planes_for_stream_fast(dc, srf_updates, surface_count, stream, stream_update, update_type, - context); - } else { + dc->current_state); + else commit_planes_for_stream( dc, srf_updates, @@ -4707,32 +4877,133 @@ void dc_commit_updates_for_stream(struct dc *dc, stream, stream_update, update_type, - context); - } - /*update current_State*/ - if (dc->current_state != context) { + dc->current_state); +} - struct dc_state *old = dc->current_state; +static void commit_planes_and_stream_update_with_new_context(struct dc *dc, + struct dc_surface_update *srf_updates, int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update, + enum surface_update_type update_type, + struct dc_state *new_context) +{ + ASSERT(update_type >= UPDATE_TYPE_FULL); + if (!dc->hwss.is_pipe_topology_transition_seamless(dc, + dc->current_state, new_context)) + /* + * It is required by the feature design that all pipe topologies + * using extra free pipes for power saving purposes such as + * dynamic ODM or SubVp shall only be enabled when it can be + * transitioned seamlessly to AND from its minimal transition + * state. A minimal transition state is defined as the same dc + * state but with all power saving features disabled. So it uses + * the minimum pipe topology. When we can't seamlessly + * transition from state A to state B, we will insert the + * minimal transition state A' or B' in between so seamless + * transition between A and B can be made possible. + */ + commit_minimal_transition_state_in_dc_update(dc, new_context, + stream, srf_updates, surface_count); - dc->current_state = context; - dc_release_state(old); + commit_planes_for_stream( + dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + new_context); +} - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; +static bool update_planes_and_stream_v3(struct dc *dc, + struct dc_surface_update *srf_updates, int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update) +{ + struct dc_state *new_context; + enum surface_update_type update_type; - if (pipe_ctx->plane_state && pipe_ctx->stream == stream) - pipe_ctx->plane_state->force_full_update = false; - } - } + /* + * When this function returns true and new_context is not equal to + * current state, the function allocates and validates a new dc state + * and assigns it to new_context. The function expects that the caller + * is responsible to free this memory when new_context is no longer + * used. We swap current with new context and free current instead. So + * new_context's memory will live until the next full update after it is + * replaced by a newer context. Refer to the use of + * swap_and_free_current_context below. + */ + if (!update_planes_and_stream_state(dc, srf_updates, surface_count, + stream, stream_update, &update_type, + &new_context)) + return false; - /* Legacy optimization path for DCE. */ - if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) { - dc_post_update_surfaces_to_stream(dc); - TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); + if (new_context == dc->current_state) { + commit_planes_and_stream_update_on_current_context(dc, + srf_updates, surface_count, stream, + stream_update, update_type); + } else { + commit_planes_and_stream_update_with_new_context(dc, + srf_updates, surface_count, stream, + stream_update, update_type, new_context); + swap_and_release_current_context(dc, new_context, stream); } - return; + return true; +} + +bool dc_update_planes_and_stream(struct dc *dc, + struct dc_surface_update *srf_updates, int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update) +{ + dc_exit_ips_for_hw_access(dc); + /* + * update planes and stream version 3 separates FULL and FAST updates + * to their own sequences. It aims to clean up frequent checks for + * update type resulting unnecessary branching in logic flow. It also + * adds a new commit minimal transition sequence, which detects the need + * for minimal transition based on the actual comparison of current and + * new states instead of "predicting" it based on per feature software + * policy.i.e could_mpcc_tree_change_for_active_pipes. The new commit + * minimal transition sequence is made universal to any power saving + * features that would use extra free pipes such as Dynamic ODM/MPC + * Combine, MPO or SubVp. Therefore there is no longer a need to + * specially handle compatibility problems with transitions among those + * features as they are now transparent to the new sequence. + */ + if (dc->ctx->dce_version > DCN_VERSION_3_51) + return update_planes_and_stream_v3(dc, srf_updates, + surface_count, stream, stream_update); + return update_planes_and_stream_v2(dc, srf_updates, + surface_count, stream, stream_update); +} +void dc_commit_updates_for_stream(struct dc *dc, + struct dc_surface_update *srf_updates, + int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update, + struct dc_state *state) +{ + dc_exit_ips_for_hw_access(dc); + /* TODO: Since change commit sequence can have a huge impact, + * we decided to only enable it for DCN3x. However, as soon as + * we get more confident about this change we'll need to enable + * the new sequence for all ASICs. + */ + if (dc->ctx->dce_version > DCN_VERSION_3_51) { + update_planes_and_stream_v3(dc, srf_updates, surface_count, + stream, stream_update); + return; + } + if (dc->ctx->dce_version >= DCN_VERSION_3_2) { + update_planes_and_stream_v2(dc, srf_updates, surface_count, + stream, stream_update); + return; + } + update_planes_and_stream_v1(dc, srf_updates, surface_count, stream, + stream_update, state); } uint8_t dc_get_current_stream_count(struct dc *dc) @@ -4775,8 +5046,13 @@ void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) void dc_power_down_on_boot(struct dc *dc) { if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && - dc->hwss.power_down_on_boot) + dc->hwss.power_down_on_boot) { + + if (dc->caps.ips_support) + dc_exit_ips_for_hw_access(dc); + dc->hwss.power_down_on_boot(dc); + } } void dc_set_power_state( @@ -4788,7 +5064,9 @@ void dc_set_power_state( switch (power_state) { case DC_ACPI_CM_POWER_STATE_D0: - dc_resource_state_construct(dc, dc->current_state); + dc_state_construct(dc, dc->current_state); + + dc_exit_ips_for_hw_access(dc); dc_z10_restore(dc); @@ -4803,7 +5081,7 @@ void dc_set_power_state( default: ASSERT(dc->current_state->stream_count == 0); - dc_resource_state_destruct(dc->current_state); + dc_state_destruct(dc->current_state); break; } @@ -4880,12 +5158,48 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable) return true; } -void dc_allow_idle_optimizations(struct dc *dc, bool allow) +/* enable/disable eDP Replay without specify stream for eDP */ +bool dc_set_replay_allow_active(struct dc *dc, bool active) +{ + int i; + bool allow_active; + + for (i = 0; i < dc->current_state->stream_count; i++) { + struct dc_link *link; + struct dc_stream_state *stream = dc->current_state->streams[i]; + + link = stream->link; + if (!link) + continue; + + if (link->replay_settings.replay_feature_enabled) { + if (active && !link->replay_settings.replay_allow_active) { + allow_active = true; + if (!dc_link_set_replay_allow_active(link, &allow_active, + false, false, NULL)) + return false; + } else if (!active && link->replay_settings.replay_allow_active) { + allow_active = false; + if (!dc_link_set_replay_allow_active(link, &allow_active, + true, false, NULL)) + return false; + } + } + } + + return true; +} + +void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name) { if (dc->debug.disable_idle_power_optimizations) return; - if (dc->caps.ips_support && dc->config.disable_ips) + if (allow != dc->idle_optimizations_allowed) + DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__, + dc->idle_optimizations_allowed, allow, caller_name); + + if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) return; if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) @@ -4899,24 +5213,24 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow) dc->idle_optimizations_allowed = allow; } -bool dc_dmub_is_ips_idle_state(struct dc *dc) +void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name) { - uint32_t idle_state = 0; + if (dc->caps.ips_support) + dc_allow_idle_optimizations_internal(dc, false, caller_name); +} +bool dc_dmub_is_ips_idle_state(struct dc *dc) +{ if (dc->debug.disable_idle_power_optimizations) return false; - if (!dc->caps.ips_support || dc->config.disable_ips) + if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) return false; - if (dc->hwss.get_idle_state) - idle_state = dc->hwss.get_idle_state(dc); - - if ((idle_state & DMUB_IPS1_ALLOW_MASK) || - (idle_state & DMUB_IPS2_ALLOW_MASK)) - return true; + if (!dc->ctx->dmub_srv) + return false; - return false; + return dc->ctx->dmub_srv->idle_allowed; } /* set min and max memory clock to lowest and highest DPM level, respectively */ @@ -5036,10 +5350,13 @@ void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) } dc->clk_mgr->dc_mode_softmax_enabled = enable; } -bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, +bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, + unsigned int pitch, + unsigned int height, + enum surface_pixel_format format, struct dc_cursor_attributes *cursor_attr) { - if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr)) + if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, pitch, height, format, cursor_attr)) return true; return false; } @@ -5073,18 +5390,28 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc) */ bool dc_is_dmub_outbox_supported(struct dc *dc) { - /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ - if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && - dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && - !dc->debug.dpia_debug.bits.disable_dpia) - return true; + switch (dc->ctx->asic_id.chip_family) { - if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 && - !dc->debug.dpia_debug.bits.disable_dpia) - return true; + case FAMILY_YELLOW_CARP: + /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ + if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && + !dc->debug.dpia_debug.bits.disable_dpia) + return true; + break; + + case AMDGPU_FAMILY_GC_11_0_1: + case AMDGPU_FAMILY_GC_11_5_0: + if (!dc->debug.dpia_debug.bits.disable_dpia) + return true; + break; + + default: + break; + } /* dmub aux needs dmub notifications to be enabled */ return dc->debug.enable_dmub_aux_for_legacy_ddc; + } /** @@ -5181,7 +5508,7 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc, ); } - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -5235,7 +5562,7 @@ bool dc_process_dmub_set_config_async(struct dc *dc, cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; - if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) { + if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) { /* command is not processed by dmub */ notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; return is_cmd_complete; @@ -5278,7 +5605,7 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; - if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) /* command is not processed by dmub */ return DC_ERROR_UNEXPECTED; @@ -5316,7 +5643,7 @@ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE; cmd.dpia_hpd_int_enable.enable = hpd_int_enable; - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); } @@ -5415,6 +5742,8 @@ bool dc_abm_save_restore( struct dc_link *link = stream->sink->link; struct dc_link *edp_links[MAX_NUM_EDP]; + if (link->replay_settings.replay_feature_enabled) + return false; /*find primary pipe associated with stream*/ for (i = 0; i < MAX_PIPES; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index fe07160932d6..5c1d3017aefd 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -31,6 +31,7 @@ #include "basics/dc_common.h" #include "resource.h" #include "dc_dmub_srv.h" +#include "dc_state_priv.h" #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) @@ -391,10 +392,10 @@ void get_hdr_visual_confirm_color( switch (top_pipe_ctx->plane_res.scl_data.format) { case PIXEL_FORMAT_ARGB2101010: - if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) { + if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_PQ) { /* HDR10, ARGB2101010 - set border color to red */ color->color_r_cr = color_value; - } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) { + } else if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) { /* FreeSync 2 ARGB2101010 - set border color to pink */ color->color_r_cr = color_value; color->color_b_cb = color_value; @@ -402,10 +403,10 @@ void get_hdr_visual_confirm_color( is_sdr = true; break; case PIXEL_FORMAT_FP16: - if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) { + if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_PQ) { /* HDR10, FP16 - set border color to blue */ color->color_b_cb = color_value; - } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) { + } else if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) { /* FreeSync 2 HDR - set border color to green */ color->color_g_y = color_value; } else @@ -425,45 +426,130 @@ void get_hdr_visual_confirm_color( } void get_subvp_visual_confirm_color( - struct dc *dc, - struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color) { uint32_t color_value = MAX_TG_COLOR_VALUE; - bool enable_subvp = false; - int i; - - if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !context) - return; + if (pipe_ctx) { + switch (pipe_ctx->p_state_type) { + case P_STATE_SUB_VP: + color->color_r_cr = color_value; + color->color_g_y = 0; + color->color_b_cb = 0; + break; + case P_STATE_DRR_SUB_VP: + color->color_r_cr = 0; + color->color_g_y = color_value; + color->color_b_cb = 0; + break; + case P_STATE_V_BLANK_SUB_VP: + color->color_r_cr = 0; + color->color_g_y = 0; + color->color_b_cb = color_value; + break; + default: + break; + } + } +} - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; +void get_mclk_switch_visual_confirm_color( + struct pipe_ctx *pipe_ctx, + struct tg_color *color) +{ + uint32_t color_value = MAX_TG_COLOR_VALUE; - if (pipe->stream && pipe->stream->mall_stream_config.paired_stream && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { - /* SubVP enable - red */ - color->color_g_y = 0; + if (pipe_ctx) { + switch (pipe_ctx->p_state_type) { + case P_STATE_V_BLANK: + color->color_r_cr = color_value; + color->color_g_y = color_value; color->color_b_cb = 0; + break; + case P_STATE_FPO: + color->color_r_cr = 0; + color->color_g_y = color_value; + color->color_b_cb = color_value; + break; + case P_STATE_V_ACTIVE: color->color_r_cr = color_value; - enable_subvp = true; - - if (pipe_ctx->stream == pipe->stream) - return; + color->color_g_y = 0; + color->color_b_cb = color_value; + break; + case P_STATE_SUB_VP: + color->color_r_cr = color_value; + color->color_g_y = 0; + color->color_b_cb = 0; + break; + case P_STATE_DRR_SUB_VP: + color->color_r_cr = 0; + color->color_g_y = color_value; + color->color_b_cb = 0; + break; + case P_STATE_V_BLANK_SUB_VP: + color->color_r_cr = 0; + color->color_g_y = 0; + color->color_b_cb = color_value; + break; + default: break; } } +} - if (enable_subvp && pipe_ctx->stream->mall_stream_config.type == SUBVP_NONE) { - color->color_r_cr = 0; - if (pipe_ctx->stream->allow_freesync == 1) { - /* SubVP enable and DRR on - green */ - color->color_b_cb = 0; - color->color_g_y = color_value; +void set_p_state_switch_method( + struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx) +{ + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + bool enable_subvp; + + if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context) + return; + + if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] != + dm_dram_clock_change_unsupported) { + /* MCLK switching is supported */ + if (!pipe_ctx->has_vactive_margin) { + /* In Vblank - yellow */ + pipe_ctx->p_state_type = P_STATE_V_BLANK; + + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { + /* FPO + Vblank - cyan */ + pipe_ctx->p_state_type = P_STATE_FPO; + } } else { - /* SubVP enable and No DRR - blue */ - color->color_g_y = 0; - color->color_b_cb = color_value; + /* In Vactive - pink */ + pipe_ctx->p_state_type = P_STATE_V_ACTIVE; + } + + /* SubVP */ + enable_subvp = false; + + for (int i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && dc_state_get_paired_subvp_stream(context, pipe->stream) && + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { + /* SubVP enable - red */ + pipe_ctx->p_state_type = P_STATE_SUB_VP; + enable_subvp = true; + + if (pipe_ctx->stream == pipe->stream) + return; + break; + } + } + + if (enable_subvp && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_NONE) { + if (pipe_ctx->stream->allow_freesync == 1) { + /* SubVP enable and DRR on - green */ + pipe_ctx->p_state_type = P_STATE_DRR_SUB_VP; + } else { + /* SubVP enable and No DRR - blue */ + pipe_ctx->p_state_type = P_STATE_V_BLANK_SUB_VP; + } } } } @@ -472,8 +558,10 @@ void hwss_build_fast_sequence(struct dc *dc, struct dc_dmub_cmd *dc_dmub_cmd, unsigned int dmub_cmd_count, struct block_sequence block_sequence[], - int *num_steps, - struct pipe_ctx *pipe_ctx) + unsigned int *num_steps, + struct pipe_ctx *pipe_ctx, + struct dc_stream_status *stream_status, + struct dc_state *context) { struct dc_plane_state *plane = pipe_ctx->plane_state; struct dc_stream_state *stream = pipe_ctx->stream; @@ -490,7 +578,8 @@ void hwss_build_fast_sequence(struct dc *dc, if (dc->hwss.subvp_pipe_control_lock_fast) { block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc; block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = true; - block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.subvp_immediate_flip = + plane->flip_immediate && stream_status->mall_stream_config.type == SUBVP_MAIN; block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; (*num_steps)++; } @@ -529,7 +618,7 @@ void hwss_build_fast_sequence(struct dc *dc, } if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) { if (resource_is_pipe_type(current_mpc_pipe, OTG_MASTER) && - current_mpc_pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + stream_status->mall_stream_config.type == SUBVP_MAIN) { block_sequence[*num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv; block_sequence[*num_steps].params.subvp_save_surf_addr.addr = ¤t_mpc_pipe->plane_state->address; block_sequence[*num_steps].params.subvp_save_surf_addr.subvp_index = current_mpc_pipe->subvp_index; @@ -612,7 +701,8 @@ void hwss_build_fast_sequence(struct dc *dc, if (dc->hwss.subvp_pipe_control_lock_fast) { block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc; block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = false; - block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.subvp_immediate_flip = + plane->flip_immediate && stream_status->mall_stream_config.type == SUBVP_MAIN; block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; (*num_steps)++; } @@ -724,7 +814,7 @@ void hwss_send_dmcub_cmd(union block_sequence_params *params) union dmub_rb_cmd *cmd = params->send_dmcub_cmd_params.cmd; enum dm_dmub_wait_type wait_type = params->send_dmcub_cmd_params.wait_type; - dm_execute_dmub_cmd(ctx, cmd, wait_type); + dc_wake_and_execute_dmub_cmd(ctx, cmd, wait_type); } void hwss_program_manual_trigger(union block_sequence_params *params) @@ -812,42 +902,6 @@ void hwss_subvp_save_surf_addr(union block_sequence_params *params) dc_dmub_srv_subvp_save_surf_addr(dc_dmub_srv, addr, subvp_index); } -void get_mclk_switch_visual_confirm_color( - struct dc *dc, - struct dc_state *context, - struct pipe_ctx *pipe_ctx, - struct tg_color *color) -{ - uint32_t color_value = MAX_TG_COLOR_VALUE; - struct vba_vars_st *vba = &context->bw_ctx.dml.vba; - - if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context) - return; - - if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] != - dm_dram_clock_change_unsupported) { - /* MCLK switching is supported */ - if (!pipe_ctx->has_vactive_margin) { - /* In Vblank - yellow */ - color->color_r_cr = color_value; - color->color_g_y = color_value; - - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { - /* FPO + Vblank - cyan */ - color->color_r_cr = 0; - color->color_g_y = color_value; - color->color_b_cb = color_value; - } - } else { - /* In Vactive - pink */ - color->color_r_cr = color_value; - color->color_b_cb = color_value; - } - /* SubVP */ - get_subvp_visual_confirm_color(dc, context, pipe_ctx, color); - } -} - void get_surface_tile_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c index ed94187c2afa..c6c35037bdb8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c @@ -467,6 +467,13 @@ bool dc_link_setup_psr(struct dc_link *link, return link->dc->link_srv->edp_setup_psr(link, stream, psr_config, psr_context); } +bool dc_link_set_replay_allow_active(struct dc_link *link, const bool *allow_active, + bool wait, bool force_static, const unsigned int *power_opts) +{ + return link->dc->link_srv->edp_set_replay_allow_active(link, allow_active, wait, + force_static, power_opts); +} + bool dc_link_get_replay_state(const struct dc_link *link, uint64_t *state) { return link->dc->link_srv->edp_get_replay_state(link, state); @@ -497,7 +504,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable) link->dc->link_srv->enable_hpd_filter(link, enable); } -bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count) +bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count) { return dc->link_srv->validate_dpia_bandwidth(streams, count); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 1d48278cba96..15819416a2f3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -42,6 +42,7 @@ #include "link_enc_cfg.h" #include "link.h" #include "clk_mgr.h" +#include "dc_state_priv.h" #include "virtual/virtual_link_hwss.h" #include "link/hwss/link_hwss_dio.h" #include "link/hwss/link_hwss_dpia.h" @@ -69,9 +70,10 @@ #include "dcn314/dcn314_resource.h" #include "dcn315/dcn315_resource.h" #include "dcn316/dcn316_resource.h" -#include "../dcn32/dcn32_resource.h" -#include "../dcn321/dcn321_resource.h" +#include "dcn32/dcn32_resource.h" +#include "dcn321/dcn321_resource.h" #include "dcn35/dcn35_resource.h" +#include "dcn351/dcn351_resource.h" #define VISUAL_CONFIRM_BASE_DEFAULT 3 #define VISUAL_CONFIRM_BASE_MIN 1 @@ -194,6 +196,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) break; case AMDGPU_FAMILY_GC_11_5_0: dc_version = DCN_VERSION_3_5; + if (ASICREV_IS_GC_11_0_4(asic_id.hw_internal_rev)) + dc_version = DCN_VERSION_3_51; break; default: dc_version = DCE_VERSION_UNKNOWN; @@ -302,6 +306,9 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc, case DCN_VERSION_3_5: res_pool = dcn35_create_resource_pool(init_data, dc); break; + case DCN_VERSION_3_51: + res_pool = dcn351_create_resource_pool(init_data, dc); + break; #endif /* CONFIG_DRM_AMD_DC_FP */ default: break; @@ -333,7 +340,7 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc, return res_pool; } -void dc_destroy_resource_pool(struct dc *dc) +void dc_destroy_resource_pool(struct dc *dc) { if (dc) { if (dc->res_pool) @@ -1450,6 +1457,9 @@ void resource_build_test_pattern_params(struct resource_context *res_ctx, controller_color_space = convert_dp_to_controller_color_space( otg_master->stream->test_pattern.color_space); + if (controller_test_pattern == CONTROLLER_DP_TEST_PATTERN_VIDEOMODE) + return; + odm_cnt = resource_get_opp_heads_for_otg_master(otg_master, res_ctx, opp_heads); odm_slice_width = h_active / odm_cnt; @@ -1478,6 +1488,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; const struct rect odm_slice_rec = calculate_odm_slice_in_timing_active(pipe_ctx); bool res = false; + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); /* Invalid input */ @@ -1489,9 +1500,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) return false; } - pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( - pipe_ctx->plane_state->format); - /* Timing borders are part of vactive that we are also supposed to skip in addition * to any stream dst offset. Since dm logic assumes dst is in addressable * space we need to add the left and top borders to dst offsets temporarily. @@ -1503,6 +1511,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) /* Calculate H and V active size */ pipe_ctx->plane_res.scl_data.h_active = odm_slice_rec.width; pipe_ctx->plane_res.scl_data.v_active = odm_slice_rec.height; + pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( + pipe_ctx->plane_state->format); /* depends on h_active */ calculate_recout(pipe_ctx); @@ -1764,6 +1774,53 @@ int recource_find_free_pipe_not_used_in_cur_res_ctx( return free_pipe_idx; } +int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool) +{ + int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; + const struct pipe_ctx *new_pipe, *cur_pipe; + int i; + + for (i = 0; i < pool->pipe_count; i++) { + cur_pipe = &cur_res_ctx->pipe_ctx[i]; + new_pipe = &new_res_ctx->pipe_ctx[i]; + + if (resource_is_pipe_type(cur_pipe, OTG_MASTER) && + resource_is_pipe_type(new_pipe, FREE_PIPE)) { + free_pipe_idx = i; + break; + } + } + + return free_pipe_idx; +} + +int resource_find_free_pipe_used_as_cur_sec_dpp( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool) +{ + int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; + const struct pipe_ctx *new_pipe, *cur_pipe; + int i; + + for (i = 0; i < pool->pipe_count; i++) { + cur_pipe = &cur_res_ctx->pipe_ctx[i]; + new_pipe = &new_res_ctx->pipe_ctx[i]; + + if (resource_is_pipe_type(cur_pipe, DPP_PIPE) && + !resource_is_pipe_type(cur_pipe, OPP_HEAD) && + resource_is_pipe_type(new_pipe, FREE_PIPE)) { + free_pipe_idx = i; + break; + } + } + + return free_pipe_idx; +} + int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine( const struct resource_context *cur_res_ctx, struct resource_context *new_res_ctx, @@ -1810,23 +1867,6 @@ int resource_find_any_free_pipe(struct resource_context *new_res_ctx, bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type) { -#ifdef DBG - if (pipe_ctx->stream == NULL) { - /* a free pipe with dangling states */ - ASSERT(!pipe_ctx->plane_state); - ASSERT(!pipe_ctx->prev_odm_pipe); - ASSERT(!pipe_ctx->next_odm_pipe); - ASSERT(!pipe_ctx->top_pipe); - ASSERT(!pipe_ctx->bottom_pipe); - } else if (pipe_ctx->top_pipe) { - /* a secondary DPP pipe must be signed to a plane */ - ASSERT(pipe_ctx->plane_state) - } - /* Add more checks here to prevent corrupted pipe ctx. It is very hard - * to debug this issue afterwards because we can't pinpoint the code - * location causing inconsistent pipe context states. - */ -#endif switch (type) { case OTG_MASTER: return !pipe_ctx->prev_odm_pipe && @@ -2155,46 +2195,91 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe, } } -void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state) +static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state, + struct pipe_ctx *otg_master, int stream_idx) { - struct pipe_ctx *otg_master; struct pipe_ctx *opp_heads[MAX_PIPES]; struct pipe_ctx *dpp_pipes[MAX_PIPES]; - int stream_idx, slice_idx, dpp_idx, plane_idx, slice_count, dpp_count; + int slice_idx, dpp_idx, plane_idx, slice_count, dpp_count; bool is_primary; DC_LOGGER_INIT(dc->ctx->logger); + slice_count = resource_get_opp_heads_for_otg_master(otg_master, + &state->res_ctx, opp_heads); + for (slice_idx = 0; slice_idx < slice_count; slice_idx++) { + plane_idx = -1; + if (opp_heads[slice_idx]->plane_state) { + dpp_count = resource_get_dpp_pipes_for_opp_head( + opp_heads[slice_idx], + &state->res_ctx, + dpp_pipes); + for (dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) { + is_primary = !dpp_pipes[dpp_idx]->top_pipe || + dpp_pipes[dpp_idx]->top_pipe->plane_state != dpp_pipes[dpp_idx]->plane_state; + if (is_primary) + plane_idx++; + resource_log_pipe(dc, dpp_pipes[dpp_idx], + stream_idx, slice_idx, + plane_idx, slice_count, + is_primary); + } + } else { + resource_log_pipe(dc, opp_heads[slice_idx], + stream_idx, slice_idx, plane_idx, + slice_count, true); + } + + } +} + +static int resource_stream_to_stream_idx(struct dc_state *state, + struct dc_stream_state *stream) +{ + int i, stream_idx = -1; + + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] == stream) { + stream_idx = i; + break; + } + + /* never return negative array index */ + if (stream_idx == -1) { + ASSERT(0); + return 0; + } + + return stream_idx; +} + +void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state) +{ + struct pipe_ctx *otg_master; + int stream_idx, phantom_stream_idx; + DC_LOGGER_INIT(dc->ctx->logger); + DC_LOG_DC(" pipe topology update"); DC_LOG_DC(" ________________________"); for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) { + if (state->streams[stream_idx]->is_phantom) + continue; + otg_master = resource_get_otg_master_for_stream( &state->res_ctx, state->streams[stream_idx]); - slice_count = resource_get_opp_heads_for_otg_master(otg_master, - &state->res_ctx, opp_heads); - for (slice_idx = 0; slice_idx < slice_count; slice_idx++) { - plane_idx = -1; - if (opp_heads[slice_idx]->plane_state) { - dpp_count = resource_get_dpp_pipes_for_opp_head( - opp_heads[slice_idx], - &state->res_ctx, - dpp_pipes); - for (dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) { - is_primary = !dpp_pipes[dpp_idx]->top_pipe || - dpp_pipes[dpp_idx]->top_pipe->plane_state != dpp_pipes[dpp_idx]->plane_state; - if (is_primary) - plane_idx++; - resource_log_pipe(dc, dpp_pipes[dpp_idx], - stream_idx, slice_idx, - plane_idx, slice_count, - is_primary); - } - } else { - resource_log_pipe(dc, opp_heads[slice_idx], - stream_idx, slice_idx, plane_idx, - slice_count, true); - } + resource_log_pipe_for_stream(dc, state, otg_master, stream_idx); + } + if (state->phantom_stream_count > 0) { + DC_LOG_DC(" | (phantom pipes) |"); + for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) { + if (state->stream_status[stream_idx].mall_stream_config.type != SUBVP_MAIN) + continue; + phantom_stream_idx = resource_stream_to_stream_idx(state, + state->stream_status[stream_idx].mall_stream_config.paired_stream); + otg_master = resource_get_otg_master_for_stream( + &state->res_ctx, state->streams[phantom_stream_idx]); + resource_log_pipe_for_stream(dc, state, otg_master, stream_idx); } } DC_LOG_DC(" |________________________|\n"); @@ -2233,7 +2318,7 @@ static struct pipe_ctx *get_last_dpp_pipe_in_mpcc_combine( } static bool update_pipe_params_after_odm_slice_count_change( - const struct dc_stream_state *stream, + struct pipe_ctx *otg_master, struct dc_state *context, const struct resource_pool *pool) { @@ -2243,9 +2328,15 @@ static bool update_pipe_params_after_odm_slice_count_change( for (i = 0; i < pool->pipe_count && result; i++) { pipe = &context->res_ctx.pipe_ctx[i]; - if (pipe->stream == stream && pipe->plane_state) + if (pipe->stream == otg_master->stream && pipe->plane_state) result = resource_build_scaling_params(pipe); } + + if (pool->funcs->build_pipe_pix_clk_params) + pool->funcs->build_pipe_pix_clk_params(otg_master); + + resource_build_test_pattern_params(&context->res_ctx, otg_master); + return result; } @@ -2433,6 +2524,9 @@ void resource_remove_otg_master_for_stream_output(struct dc_state *context, struct pipe_ctx *otg_master = resource_get_otg_master_for_stream( &context->res_ctx, stream); + if (!otg_master) + return; + ASSERT(resource_get_odm_slice_count(otg_master) == 1); ASSERT(otg_master->plane_state == NULL); ASSERT(otg_master->stream_res.stream_enc); @@ -2601,13 +2695,19 @@ bool resource_append_dpp_pipes_for_plane_composition( struct pipe_ctx *otg_master_pipe, struct dc_plane_state *plane_state) { + bool success; if (otg_master_pipe->plane_state == NULL) - return add_plane_to_opp_head_pipes(otg_master_pipe, + success = add_plane_to_opp_head_pipes(otg_master_pipe, plane_state, new_ctx); else - return acquire_secondary_dpp_pipes_and_add_plane( + success = acquire_secondary_dpp_pipes_and_add_plane( otg_master_pipe, plane_state, new_ctx, cur_ctx, pool); + if (success) + /* when appending a plane mpc slice count changes from 0 to 1 */ + success = update_pipe_params_after_mpc_slice_count_change( + plane_state, new_ctx, pool); + return success; } void resource_remove_dpp_pipes_for_plane_composition( @@ -2928,7 +3028,7 @@ bool resource_update_pipes_for_stream_with_slice_count( otg_master, new_ctx, pool); if (result) result = update_pipe_params_after_odm_slice_count_change( - otg_master->stream, new_ctx, pool); + otg_master, new_ctx, pool); return result; } @@ -2942,7 +3042,7 @@ bool resource_update_pipes_for_plane_with_slice_count( int i; int dpp_pipe_count; int cur_slice_count; - struct pipe_ctx *dpp_pipes[MAX_PIPES]; + struct pipe_ctx *dpp_pipes[MAX_PIPES] = {0}; bool result = true; dpp_pipe_count = resource_get_dpp_pipes_for_plane(plane, @@ -2967,189 +3067,6 @@ bool resource_update_pipes_for_plane_with_slice_count( return result; } -bool dc_add_plane_to_context( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_plane_state *plane_state, - struct dc_state *context) -{ - struct resource_pool *pool = dc->res_pool; - struct pipe_ctx *otg_master_pipe; - struct dc_stream_status *stream_status = NULL; - bool added = false; - - stream_status = dc_stream_get_status_from_state(context, stream); - if (stream_status == NULL) { - dm_error("Existing stream not found; failed to attach surface!\n"); - goto out; - } else if (stream_status->plane_count == MAX_SURFACE_NUM) { - dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n", - plane_state, MAX_SURFACE_NUM); - goto out; - } - - otg_master_pipe = resource_get_otg_master_for_stream( - &context->res_ctx, stream); - added = resource_append_dpp_pipes_for_plane_composition(context, - dc->current_state, pool, otg_master_pipe, plane_state); - - if (added) { - stream_status->plane_states[stream_status->plane_count] = - plane_state; - stream_status->plane_count++; - dc_plane_state_retain(plane_state); - } - -out: - return added; -} - -bool dc_remove_plane_from_context( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_plane_state *plane_state, - struct dc_state *context) -{ - int i; - struct dc_stream_status *stream_status = NULL; - struct resource_pool *pool = dc->res_pool; - - if (!plane_state) - return true; - - for (i = 0; i < context->stream_count; i++) - if (context->streams[i] == stream) { - stream_status = &context->stream_status[i]; - break; - } - - if (stream_status == NULL) { - dm_error("Existing stream not found; failed to remove plane.\n"); - return false; - } - - resource_remove_dpp_pipes_for_plane_composition( - context, pool, plane_state); - - for (i = 0; i < stream_status->plane_count; i++) { - if (stream_status->plane_states[i] == plane_state) { - dc_plane_state_release(stream_status->plane_states[i]); - break; - } - } - - if (i == stream_status->plane_count) { - dm_error("Existing plane_state not found; failed to detach it!\n"); - return false; - } - - stream_status->plane_count--; - - /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */ - for (; i < stream_status->plane_count; i++) - stream_status->plane_states[i] = stream_status->plane_states[i + 1]; - - stream_status->plane_states[stream_status->plane_count] = NULL; - - if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm) - /* ODM combine could prevent us from supporting more planes - * we will reset ODM slice count back to 1 when all planes have - * been removed to maximize the amount of planes supported when - * new planes are added. - */ - resource_update_pipes_for_stream_with_slice_count( - context, dc->current_state, dc->res_pool, stream, 1); - - return true; -} - -/** - * dc_rem_all_planes_for_stream - Remove planes attached to the target stream. - * - * @dc: Current dc state. - * @stream: Target stream, which we want to remove the attached plans. - * @context: New context. - * - * Return: - * Return true if DC was able to remove all planes from the target - * stream, otherwise, return false. - */ -bool dc_rem_all_planes_for_stream( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_state *context) -{ - int i, old_plane_count; - struct dc_stream_status *stream_status = NULL; - struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 }; - - for (i = 0; i < context->stream_count; i++) - if (context->streams[i] == stream) { - stream_status = &context->stream_status[i]; - break; - } - - if (stream_status == NULL) { - dm_error("Existing stream %p not found!\n", stream); - return false; - } - - old_plane_count = stream_status->plane_count; - - for (i = 0; i < old_plane_count; i++) - del_planes[i] = stream_status->plane_states[i]; - - for (i = 0; i < old_plane_count; i++) - if (!dc_remove_plane_from_context(dc, stream, del_planes[i], context)) - return false; - - return true; -} - -static bool add_all_planes_for_stream( - const struct dc *dc, - struct dc_stream_state *stream, - const struct dc_validation_set set[], - int set_count, - struct dc_state *context) -{ - int i, j; - - for (i = 0; i < set_count; i++) - if (set[i].stream == stream) - break; - - if (i == set_count) { - dm_error("Stream %p not found in set!\n", stream); - return false; - } - - for (j = 0; j < set[i].plane_count; j++) - if (!dc_add_plane_to_context(dc, stream, set[i].plane_states[j], context)) - return false; - - return true; -} - -bool dc_add_all_planes_for_stream( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_plane_state * const *plane_states, - int plane_count, - struct dc_state *context) -{ - struct dc_validation_set set; - int i; - - set.stream = stream; - set.plane_count = plane_count; - - for (i = 0; i < plane_count; i++) - set.plane_states[i] = plane_states[i]; - - return add_all_planes_for_stream(dc, stream, &set, 1, context); -} - bool dc_is_timing_changed(struct dc_stream_state *cur_stream, struct dc_stream_state *new_stream) { @@ -3277,6 +3194,9 @@ static struct audio *find_first_free_audio( { int i, available_audio_count; + if (id == ENGINE_ID_UNKNOWN) + return NULL; + available_audio_count = pool->audio_count; for (i = 0; i < available_audio_count; i++) { @@ -3301,84 +3221,6 @@ static struct audio *find_first_free_audio( return NULL; } -/* - * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state. - */ -enum dc_status dc_add_stream_to_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *stream) -{ - enum dc_status res; - DC_LOGGER_INIT(dc->ctx->logger); - - if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) { - DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream); - return DC_ERROR_UNEXPECTED; - } - - new_ctx->streams[new_ctx->stream_count] = stream; - dc_stream_retain(stream); - new_ctx->stream_count++; - - res = resource_add_otg_master_for_stream_output( - new_ctx, dc->res_pool, stream); - if (res != DC_OK) - DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res); - - return res; -} - -/* - * dc_remove_stream_from_ctx() - Remove a stream from a dc_state. - */ -enum dc_status dc_remove_stream_from_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *stream) -{ - int i; - struct dc_context *dc_ctx = dc->ctx; - struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream( - &new_ctx->res_ctx, stream); - - if (!del_pipe) { - DC_ERROR("Pipe not found for stream %p !\n", stream); - return DC_ERROR_UNEXPECTED; - } - - resource_update_pipes_for_stream_with_slice_count(new_ctx, - dc->current_state, dc->res_pool, stream, 1); - resource_remove_otg_master_for_stream_output( - new_ctx, dc->res_pool, stream); - - for (i = 0; i < new_ctx->stream_count; i++) - if (new_ctx->streams[i] == stream) - break; - - if (new_ctx->streams[i] != stream) { - DC_ERROR("Context doesn't have stream %p !\n", stream); - return DC_ERROR_UNEXPECTED; - } - - dc_stream_release(new_ctx->streams[i]); - new_ctx->stream_count--; - - /* Trim back arrays */ - for (; i < new_ctx->stream_count; i++) { - new_ctx->streams[i] = new_ctx->streams[i + 1]; - new_ctx->stream_status[i] = new_ctx->stream_status[i + 1]; - } - - new_ctx->streams[new_ctx->stream_count] = NULL; - memset( - &new_ctx->stream_status[new_ctx->stream_count], - 0, - sizeof(new_ctx->stream_status[0])); - - return DC_OK; -} - static struct dc_stream_state *find_pll_sharable_stream( struct dc_stream_state *stream_needs_pll, struct dc_state *context) @@ -3586,6 +3428,7 @@ static void mark_seamless_boot_stream( * |________|_______________|___________|_____________| */ static bool acquire_otg_master_pipe_for_stream( + const struct dc_state *cur_ctx, struct dc_state *new_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) @@ -3599,7 +3442,42 @@ static bool acquire_otg_master_pipe_for_stream( int pipe_idx; struct pipe_ctx *pipe_ctx = NULL; - pipe_idx = resource_find_any_free_pipe(&new_ctx->res_ctx, pool); + /* + * Upper level code is responsible to optimize unnecessary addition and + * removal for unchanged streams. So unchanged stream will keep the same + * OTG master instance allocated. When current stream is removed and a + * new stream is added, we want to reuse the OTG instance made available + * by the removed stream first. If not found, we try to avoid of using + * any free pipes already used in current context as this could tear + * down exiting ODM/MPC/MPO configuration unnecessarily. + */ + + /* + * Try to acquire the same OTG master already in use. This is not + * optimal because resetting an enabled OTG master pipe for a new stream + * requires an extra frame of wait. However there are test automation + * and eDP assumptions that rely on reusing the same OTG master pipe + * during mode change. We have to keep this logic as is for now. + */ + pipe_idx = recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx( + &cur_ctx->res_ctx, &new_ctx->res_ctx, pool); + /* + * Try to acquire a pipe not used in current resource context to avoid + * pipe swapping. + */ + if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx( + &cur_ctx->res_ctx, &new_ctx->res_ctx, pool); + /* + * If pipe swapping is unavoidable, try to acquire pipe used as + * secondary DPP pipe in current state as we prioritize to support more + * streams over supporting MPO planes. + */ + if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + pipe_idx = resource_find_free_pipe_used_as_cur_sec_dpp( + &cur_ctx->res_ctx, &new_ctx->res_ctx, pool); + if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + pipe_idx = resource_find_any_free_pipe(&new_ctx->res_ctx, pool); if (pipe_idx != FREE_PIPE_INDEX_NOT_FOUND) { pipe_ctx = &new_ctx->res_ctx.pipe_ctx[pipe_idx]; memset(pipe_ctx, 0, sizeof(*pipe_ctx)); @@ -3659,7 +3537,7 @@ enum dc_status resource_map_pool_resources( if (!acquired) /* acquire new resources */ - acquired = acquire_otg_master_pipe_for_stream( + acquired = acquire_otg_master_pipe_for_stream(dc->current_state, context, pool, stream); pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); @@ -3742,34 +3620,6 @@ enum dc_status resource_map_pool_resources( return DC_ERROR_UNEXPECTED; } -/** - * dc_resource_state_copy_construct_current() - Creates a new dc_state from existing state - * - * @dc: copy out of dc->current_state - * @dst_ctx: copy into this - * - * This function makes a shallow copy of the current DC state and increments - * refcounts on existing streams and planes. - */ -void dc_resource_state_copy_construct_current( - const struct dc *dc, - struct dc_state *dst_ctx) -{ - dc_resource_state_copy_construct(dc->current_state, dst_ctx); -} - - -void dc_resource_state_construct( - const struct dc *dc, - struct dc_state *dst_ctx) -{ - dst_ctx->clk_mgr = dc->clk_mgr; - - /* Initialise DIG link encoder resource tracking variables. */ - link_enc_cfg_init(dc, dst_ctx); -} - - bool dc_resource_is_dsc_encoding_supported(const struct dc *dc) { if (dc->res_pool == NULL) @@ -3813,6 +3663,31 @@ static bool planes_changed_for_existing_stream(struct dc_state *context, return false; } +static bool add_all_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *stream, + const struct dc_validation_set set[], + int set_count, + struct dc_state *state) +{ + int i, j; + + for (i = 0; i < set_count; i++) + if (set[i].stream == stream) + break; + + if (i == set_count) { + dm_error("Stream %p not found in set!\n", stream); + return false; + } + + for (j = 0; j < set[i].plane_count; j++) + if (!dc_state_add_plane(dc, stream, set[i].plane_states[j], state)) + return false; + + return true; +} + /** * dc_validate_with_context - Validate and update the potential new stream in the context object * @@ -3918,7 +3793,8 @@ enum dc_status dc_validate_with_context(struct dc *dc, unchanged_streams[i], set, set_count)) { - if (!dc_rem_all_planes_for_stream(dc, + + if (!dc_state_rem_all_planes_for_stream(dc, unchanged_streams[i], context)) { res = DC_FAIL_DETACH_SURFACES; @@ -3940,12 +3816,24 @@ enum dc_status dc_validate_with_context(struct dc *dc, } } - if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { - res = DC_FAIL_DETACH_SURFACES; - goto fail; + if (dc_state_get_stream_subvp_type(context, del_streams[i]) == SUBVP_PHANTOM) { + /* remove phantoms specifically */ + if (!dc_state_rem_all_phantom_planes_for_stream(dc, del_streams[i], context, true)) { + res = DC_FAIL_DETACH_SURFACES; + goto fail; + } + + res = dc_state_remove_phantom_stream(dc, context, del_streams[i]); + dc_state_release_phantom_stream(dc, context, del_streams[i]); + } else { + if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) { + res = DC_FAIL_DETACH_SURFACES; + goto fail; + } + + res = dc_state_remove_stream(dc, context, del_streams[i]); } - res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); if (res != DC_OK) goto fail; } @@ -3968,7 +3856,7 @@ enum dc_status dc_validate_with_context(struct dc *dc, /* Add new streams and then add all planes for the new stream */ for (i = 0; i < add_streams_count; i++) { calculate_phy_pix_clks(add_streams[i]); - res = dc_add_stream_to_ctx(dc, context, add_streams[i]); + res = dc_state_add_stream(dc, context, add_streams[i]); if (res != DC_OK) goto fail; @@ -4202,7 +4090,7 @@ static void set_avi_info_frame( } if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR && - stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) { + stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) { hdmi_info.bits.EC0_EC2 = 0; hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709; } @@ -4474,84 +4362,6 @@ static void set_vtem_info_packet( *info_packet = stream->vtem_infopacket; } -void dc_resource_state_destruct(struct dc_state *context) -{ - int i, j; - - for (i = 0; i < context->stream_count; i++) { - for (j = 0; j < context->stream_status[i].plane_count; j++) - dc_plane_state_release( - context->stream_status[i].plane_states[j]); - - context->stream_status[i].plane_count = 0; - dc_stream_release(context->streams[i]); - context->streams[i] = NULL; - } - context->stream_count = 0; - context->stream_mask = 0; - memset(&context->res_ctx, 0, sizeof(context->res_ctx)); - memset(&context->pp_display_cfg, 0, sizeof(context->pp_display_cfg)); - memset(&context->dcn_bw_vars, 0, sizeof(context->dcn_bw_vars)); - context->clk_mgr = NULL; - memset(&context->bw_ctx.bw, 0, sizeof(context->bw_ctx.bw)); - memset(context->block_sequence, 0, sizeof(context->block_sequence)); - context->block_sequence_steps = 0; - memset(context->dc_dmub_cmd, 0, sizeof(context->dc_dmub_cmd)); - context->dmub_cmd_count = 0; - memset(&context->perf_params, 0, sizeof(context->perf_params)); - memset(&context->scratch, 0, sizeof(context->scratch)); -} - -void dc_resource_state_copy_construct( - const struct dc_state *src_ctx, - struct dc_state *dst_ctx) -{ - int i, j; - struct kref refcount = dst_ctx->refcount; -#ifdef CONFIG_DRM_AMD_DC_FP - struct dml2_context *dml2 = NULL; - - // Need to preserve allocated dml2 context - if (src_ctx->clk_mgr->ctx->dc->debug.using_dml2) - dml2 = dst_ctx->bw_ctx.dml2; -#endif - - *dst_ctx = *src_ctx; - -#ifdef CONFIG_DRM_AMD_DC_FP - // Preserve allocated dml2 context - if (src_ctx->clk_mgr->ctx->dc->debug.using_dml2) - dst_ctx->bw_ctx.dml2 = dml2; -#endif - - for (i = 0; i < MAX_PIPES; i++) { - struct pipe_ctx *cur_pipe = &dst_ctx->res_ctx.pipe_ctx[i]; - - if (cur_pipe->top_pipe) - cur_pipe->top_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; - - if (cur_pipe->bottom_pipe) - cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; - - if (cur_pipe->next_odm_pipe) - cur_pipe->next_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; - - if (cur_pipe->prev_odm_pipe) - cur_pipe->prev_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; - } - - for (i = 0; i < dst_ctx->stream_count; i++) { - dc_stream_retain(dst_ctx->streams[i]); - for (j = 0; j < dst_ctx->stream_status[i].plane_count; j++) - dc_plane_state_retain( - dst_ctx->stream_status[i].plane_states[j]); - } - - /* context refcount should not be overridden */ - dst_ctx->refcount = refcount; - -} - struct clock_source *dc_resource_find_first_free_pll( struct resource_context *res_ctx, const struct resource_pool *pool) @@ -4731,7 +4541,7 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream, option = DITHER_OPTION_SPATIAL8; break; case COLOR_DEPTH_101010: - option = DITHER_OPTION_SPATIAL10; + option = DITHER_OPTION_TRUN10; break; default: option = DITHER_OPTION_DISABLE; @@ -4757,6 +4567,8 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream, option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; fmt_bit_depth->flags.TRUNCATE_DEPTH = 2; + if (option == DITHER_OPTION_TRUN10) + fmt_bit_depth->flags.TRUNCATE_MODE = 1; } /* special case - Formatter can only reduce by 4 bits at most. @@ -5190,6 +5002,9 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy( sec_next = sec_pipe->next_odm_pipe; sec_prev = sec_pipe->prev_odm_pipe; + if (pri_pipe == NULL) + return false; + *sec_pipe = *pri_pipe; sec_pipe->top_pipe = sec_top; @@ -5271,9 +5086,45 @@ bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_st if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 && ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) return true; - else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 2160 && + else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 1080 && ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) return true; return false; } + +void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options) +{ + dml2_options->callbacks.dc = dc; + dml2_options->callbacks.build_scaling_params = &resource_build_scaling_params; + dml2_options->callbacks.build_test_pattern_params = &resource_build_test_pattern_params; + dml2_options->callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; + dml2_options->callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; + dml2_options->callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; + dml2_options->callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; + dml2_options->callbacks.get_mpc_slice_count = &resource_get_mpc_slice_count; + dml2_options->callbacks.get_odm_slice_index = &resource_get_odm_slice_index; + dml2_options->callbacks.get_odm_slice_count = &resource_get_odm_slice_count; + dml2_options->callbacks.get_opp_head = &resource_get_opp_head; + dml2_options->callbacks.get_otg_master_for_stream = &resource_get_otg_master_for_stream; + dml2_options->callbacks.get_opp_heads_for_otg_master = &resource_get_opp_heads_for_otg_master; + dml2_options->callbacks.get_dpp_pipes_for_plane = &resource_get_dpp_pipes_for_plane; + dml2_options->callbacks.get_stream_status = &dc_state_get_stream_status; + dml2_options->callbacks.get_stream_from_id = &dc_state_get_stream_from_id; + + dml2_options->svp_pstate.callbacks.dc = dc; + dml2_options->svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane; + dml2_options->svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream; + dml2_options->svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params; + dml2_options->svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane; + dml2_options->svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane; + dml2_options->svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream; + dml2_options->svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream; + dml2_options->svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane; + dml2_options->svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream; + dml2_options->svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type; + dml2_options->svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type; + dml2_options->svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream; + dml2_options->svp_pstate.callbacks.remove_phantom_streams_and_planes = &dc_state_remove_phantom_streams_and_planes; + dml2_options->svp_pstate.callbacks.release_phantom_streams_and_planes = &dc_state_release_phantom_streams_and_planes; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c index 5f6392ae31a6..cd6570a1e20e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c @@ -61,7 +61,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification /* For HPD/HPD RX, convert dpia port index into link index */ if (notify->type == DMUB_NOTIFICATION_HPD || notify->type == DMUB_NOTIFICATION_HPD_IRQ || - notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION || + notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION || notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) { notify->link_index = get_link_index_from_dpia_port_index(dc, notify->link_index); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c new file mode 100644 index 000000000000..76bb05f4d6bf --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -0,0 +1,918 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "core_types.h" +#include "core_status.h" +#include "dc_state.h" +#include "dc_state_priv.h" +#include "dc_stream_priv.h" +#include "dc_plane_priv.h" + +#include "dm_services.h" +#include "resource.h" +#include "link_enc_cfg.h" + +#include "dml2/dml2_wrapper.h" +#include "dml2/dml2_internal_types.h" + +#define DC_LOGGER \ + dc->ctx->logger +#define DC_LOGGER_INIT(logger) + +/* Private dc_state helper functions */ +static bool dc_state_track_phantom_stream(struct dc_state *state, + struct dc_stream_state *phantom_stream) +{ + if (state->phantom_stream_count >= MAX_PHANTOM_PIPES) + return false; + + state->phantom_streams[state->phantom_stream_count++] = phantom_stream; + + return true; +} + +static bool dc_state_untrack_phantom_stream(struct dc_state *state, struct dc_stream_state *phantom_stream) +{ + bool res = false; + int i; + + /* first find phantom stream in the dc_state */ + for (i = 0; i < state->phantom_stream_count; i++) { + if (state->phantom_streams[i] == phantom_stream) { + state->phantom_streams[i] = NULL; + res = true; + break; + } + } + + /* failed to find stream in state */ + if (!res) + return res; + + /* trim back phantom streams */ + state->phantom_stream_count--; + for (; i < state->phantom_stream_count; i++) + state->phantom_streams[i] = state->phantom_streams[i + 1]; + + return res; +} + +static bool dc_state_is_phantom_stream_tracked(struct dc_state *state, struct dc_stream_state *phantom_stream) +{ + int i; + + for (i = 0; i < state->phantom_stream_count; i++) { + if (state->phantom_streams[i] == phantom_stream) + return true; + } + + return false; +} + +static bool dc_state_track_phantom_plane(struct dc_state *state, + struct dc_plane_state *phantom_plane) +{ + if (state->phantom_plane_count >= MAX_PHANTOM_PIPES) + return false; + + state->phantom_planes[state->phantom_plane_count++] = phantom_plane; + + return true; +} + +static bool dc_state_untrack_phantom_plane(struct dc_state *state, struct dc_plane_state *phantom_plane) +{ + bool res = false; + int i; + + /* first find phantom plane in the dc_state */ + for (i = 0; i < state->phantom_plane_count; i++) { + if (state->phantom_planes[i] == phantom_plane) { + state->phantom_planes[i] = NULL; + res = true; + break; + } + } + + /* failed to find plane in state */ + if (!res) + return res; + + /* trim back phantom planes */ + state->phantom_plane_count--; + for (; i < state->phantom_plane_count; i++) + state->phantom_planes[i] = state->phantom_planes[i + 1]; + + return res; +} + +static bool dc_state_is_phantom_plane_tracked(struct dc_state *state, struct dc_plane_state *phantom_plane) +{ + int i; + + for (i = 0; i < state->phantom_plane_count; i++) { + if (state->phantom_planes[i] == phantom_plane) + return true; + } + + return false; +} + +static void dc_state_copy_internal(struct dc_state *dst_state, struct dc_state *src_state) +{ + int i, j; + + memcpy(dst_state, src_state, sizeof(struct dc_state)); + + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *cur_pipe = &dst_state->res_ctx.pipe_ctx[i]; + + if (cur_pipe->top_pipe) + cur_pipe->top_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; + + if (cur_pipe->bottom_pipe) + cur_pipe->bottom_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; + + if (cur_pipe->prev_odm_pipe) + cur_pipe->prev_odm_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; + + if (cur_pipe->next_odm_pipe) + cur_pipe->next_odm_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; + } + + /* retain phantoms */ + for (i = 0; i < dst_state->phantom_stream_count; i++) + dc_stream_retain(dst_state->phantom_streams[i]); + + for (i = 0; i < dst_state->phantom_plane_count; i++) + dc_plane_state_retain(dst_state->phantom_planes[i]); + + /* retain streams and planes */ + for (i = 0; i < dst_state->stream_count; i++) { + dc_stream_retain(dst_state->streams[i]); + for (j = 0; j < dst_state->stream_status[i].plane_count; j++) + dc_plane_state_retain( + dst_state->stream_status[i].plane_states[j]); + } + +} + +static void init_state(struct dc *dc, struct dc_state *state) +{ + /* Each context must have their own instance of VBA and in order to + * initialize and obtain IP and SOC the base DML instance from DC is + * initially copied into every context + */ + memcpy(&state->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); +} + +/* Public dc_state functions */ +struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params) +{ +#ifdef CONFIG_DRM_AMD_DC_FP + struct dml2_configuration_options *dml2_opt = &dc->dml2_options; +#endif + struct dc_state *state = kvzalloc(sizeof(struct dc_state), + GFP_KERNEL); + + if (!state) + return NULL; + + init_state(dc, state); + dc_state_construct(dc, state); + state->power_source = params ? params->power_source : DC_POWER_SOURCE_AC; + +#ifdef CONFIG_DRM_AMD_DC_FP + if (dc->debug.using_dml2) { + dml2_opt->use_clock_dc_limits = false; + dml2_create(dc, dml2_opt, &state->bw_ctx.dml2); + + dml2_opt->use_clock_dc_limits = true; + dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source); + } +#endif + + kref_init(&state->refcount); + + return state; +} + +void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state) +{ + struct kref refcount = dst_state->refcount; +#ifdef CONFIG_DRM_AMD_DC_FP + struct dml2_context *dst_dml2 = dst_state->bw_ctx.dml2; + struct dml2_context *dst_dml2_dc_power_source = dst_state->bw_ctx.dml2_dc_power_source; +#endif + + dc_state_copy_internal(dst_state, src_state); + +#ifdef CONFIG_DRM_AMD_DC_FP + dst_state->bw_ctx.dml2 = dst_dml2; + if (src_state->bw_ctx.dml2) + dml2_copy(dst_state->bw_ctx.dml2, src_state->bw_ctx.dml2); + + dst_state->bw_ctx.dml2_dc_power_source = dst_dml2_dc_power_source; + if (src_state->bw_ctx.dml2_dc_power_source) + dml2_copy(dst_state->bw_ctx.dml2_dc_power_source, src_state->bw_ctx.dml2_dc_power_source); +#endif + + /* context refcount should not be overridden */ + dst_state->refcount = refcount; +} + +struct dc_state *dc_state_create_copy(struct dc_state *src_state) +{ + struct dc_state *new_state; + + new_state = kvmalloc(sizeof(struct dc_state), + GFP_KERNEL); + if (!new_state) + return NULL; + + dc_state_copy_internal(new_state, src_state); + +#ifdef CONFIG_DRM_AMD_DC_FP + if (src_state->bw_ctx.dml2 && + !dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) { + dc_state_release(new_state); + return NULL; + } + + if (src_state->bw_ctx.dml2_dc_power_source && + !dml2_create_copy(&new_state->bw_ctx.dml2_dc_power_source, src_state->bw_ctx.dml2_dc_power_source)) { + dc_state_release(new_state); + return NULL; + } +#endif + + kref_init(&new_state->refcount); + + return new_state; +} + +void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state) +{ + dc_state_copy(dst_state, dc->current_state); +} + +struct dc_state *dc_state_create_current_copy(struct dc *dc) +{ + return dc_state_create_copy(dc->current_state); +} + +void dc_state_construct(struct dc *dc, struct dc_state *state) +{ + state->clk_mgr = dc->clk_mgr; + + /* Initialise DIG link encoder resource tracking variables. */ + if (dc->res_pool) + link_enc_cfg_init(dc, state); +} + +void dc_state_destruct(struct dc_state *state) +{ + int i, j; + + for (i = 0; i < state->stream_count; i++) { + for (j = 0; j < state->stream_status[i].plane_count; j++) + dc_plane_state_release( + state->stream_status[i].plane_states[j]); + + state->stream_status[i].plane_count = 0; + dc_stream_release(state->streams[i]); + state->streams[i] = NULL; + } + state->stream_count = 0; + + /* release tracked phantoms */ + for (i = 0; i < state->phantom_stream_count; i++) { + dc_stream_release(state->phantom_streams[i]); + state->phantom_streams[i] = NULL; + } + state->phantom_stream_count = 0; + + for (i = 0; i < state->phantom_plane_count; i++) { + dc_plane_state_release(state->phantom_planes[i]); + state->phantom_planes[i] = NULL; + } + state->phantom_plane_count = 0; + + state->stream_mask = 0; + memset(&state->res_ctx, 0, sizeof(state->res_ctx)); + memset(&state->pp_display_cfg, 0, sizeof(state->pp_display_cfg)); + memset(&state->dcn_bw_vars, 0, sizeof(state->dcn_bw_vars)); + state->clk_mgr = NULL; + memset(&state->bw_ctx.bw, 0, sizeof(state->bw_ctx.bw)); + memset(state->block_sequence, 0, sizeof(state->block_sequence)); + state->block_sequence_steps = 0; + memset(state->dc_dmub_cmd, 0, sizeof(state->dc_dmub_cmd)); + state->dmub_cmd_count = 0; + memset(&state->perf_params, 0, sizeof(state->perf_params)); +} + +void dc_state_retain(struct dc_state *state) +{ + kref_get(&state->refcount); +} + +static void dc_state_free(struct kref *kref) +{ + struct dc_state *state = container_of(kref, struct dc_state, refcount); + + dc_state_destruct(state); + +#ifdef CONFIG_DRM_AMD_DC_FP + dml2_destroy(state->bw_ctx.dml2); + state->bw_ctx.dml2 = 0; + + dml2_destroy(state->bw_ctx.dml2_dc_power_source); + state->bw_ctx.dml2_dc_power_source = 0; +#endif + + kvfree(state); +} + +void dc_state_release(struct dc_state *state) +{ + if (state != NULL) + kref_put(&state->refcount, dc_state_free); +} +/* + * dc_state_add_stream() - Add a new dc_stream_state to a dc_state. + */ +enum dc_status dc_state_add_stream( + const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *stream) +{ + enum dc_status res; + + DC_LOGGER_INIT(dc->ctx->logger); + + if (state->stream_count >= dc->res_pool->timing_generator_count) { + DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream); + return DC_ERROR_UNEXPECTED; + } + + state->streams[state->stream_count] = stream; + dc_stream_retain(stream); + state->stream_count++; + + res = resource_add_otg_master_for_stream_output( + state, dc->res_pool, stream); + if (res != DC_OK) + DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res); + + return res; +} + +/* + * dc_state_remove_stream() - Remove a stream from a dc_state. + */ +enum dc_status dc_state_remove_stream( + const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *stream) +{ + int i; + struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream( + &state->res_ctx, stream); + + if (!del_pipe) { + dm_error("Pipe not found for stream %p !\n", stream); + return DC_ERROR_UNEXPECTED; + } + + resource_update_pipes_for_stream_with_slice_count(state, + dc->current_state, dc->res_pool, stream, 1); + resource_remove_otg_master_for_stream_output( + state, dc->res_pool, stream); + + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] == stream) + break; + + if (state->streams[i] != stream) { + dm_error("Context doesn't have stream %p !\n", stream); + return DC_ERROR_UNEXPECTED; + } + + dc_stream_release(state->streams[i]); + state->stream_count--; + + /* Trim back arrays */ + for (; i < state->stream_count; i++) { + state->streams[i] = state->streams[i + 1]; + state->stream_status[i] = state->stream_status[i + 1]; + } + + state->streams[state->stream_count] = NULL; + memset( + &state->stream_status[state->stream_count], + 0, + sizeof(state->stream_status[0])); + + return DC_OK; +} + +bool dc_state_add_plane( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state *plane_state, + struct dc_state *state) +{ + struct resource_pool *pool = dc->res_pool; + struct pipe_ctx *otg_master_pipe; + struct dc_stream_status *stream_status = NULL; + bool added = false; + + stream_status = dc_state_get_stream_status(state, stream); + if (stream_status == NULL) { + dm_error("Existing stream not found; failed to attach surface!\n"); + goto out; + } else if (stream_status->plane_count == MAX_SURFACE_NUM) { + dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n", + plane_state, MAX_SURFACE_NUM); + goto out; + } + + if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm) + /* ODM combine could prevent us from supporting more planes + * we will reset ODM slice count back to 1 when all planes have + * been removed to maximize the amount of planes supported when + * new planes are added. + */ + resource_update_pipes_for_stream_with_slice_count( + state, dc->current_state, dc->res_pool, stream, 1); + + otg_master_pipe = resource_get_otg_master_for_stream( + &state->res_ctx, stream); + if (otg_master_pipe) + added = resource_append_dpp_pipes_for_plane_composition(state, + dc->current_state, pool, otg_master_pipe, plane_state); + + if (added) { + stream_status->plane_states[stream_status->plane_count] = + plane_state; + stream_status->plane_count++; + dc_plane_state_retain(plane_state); + } + +out: + return added; +} + +bool dc_state_remove_plane( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state *plane_state, + struct dc_state *state) +{ + int i; + struct dc_stream_status *stream_status = NULL; + struct resource_pool *pool = dc->res_pool; + + if (!plane_state) + return true; + + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] == stream) { + stream_status = &state->stream_status[i]; + break; + } + + if (stream_status == NULL) { + dm_error("Existing stream not found; failed to remove plane.\n"); + return false; + } + + resource_remove_dpp_pipes_for_plane_composition( + state, pool, plane_state); + + for (i = 0; i < stream_status->plane_count; i++) { + if (stream_status->plane_states[i] == plane_state) { + dc_plane_state_release(stream_status->plane_states[i]); + break; + } + } + + if (i == stream_status->plane_count) { + dm_error("Existing plane_state not found; failed to detach it!\n"); + return false; + } + + stream_status->plane_count--; + + /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */ + for (; i < stream_status->plane_count; i++) + stream_status->plane_states[i] = stream_status->plane_states[i + 1]; + + stream_status->plane_states[stream_status->plane_count] = NULL; + + if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm) + /* ODM combine could prevent us from supporting more planes + * we will reset ODM slice count back to 1 when all planes have + * been removed to maximize the amount of planes supported when + * new planes are added. + */ + resource_update_pipes_for_stream_with_slice_count( + state, dc->current_state, dc->res_pool, stream, 1); + + return true; +} + +/** + * dc_state_rem_all_planes_for_stream - Remove planes attached to the target stream. + * + * @dc: Current dc state. + * @stream: Target stream, which we want to remove the attached plans. + * @state: context from which the planes are to be removed. + * + * Return: + * Return true if DC was able to remove all planes from the target + * stream, otherwise, return false. + */ +bool dc_state_rem_all_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_state *state) +{ + int i, old_plane_count; + struct dc_stream_status *stream_status = NULL; + struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 }; + + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] == stream) { + stream_status = &state->stream_status[i]; + break; + } + + if (stream_status == NULL) { + dm_error("Existing stream %p not found!\n", stream); + return false; + } + + old_plane_count = stream_status->plane_count; + + for (i = 0; i < old_plane_count; i++) + del_planes[i] = stream_status->plane_states[i]; + + for (i = 0; i < old_plane_count; i++) + if (!dc_state_remove_plane(dc, stream, del_planes[i], state)) + return false; + + return true; +} + +bool dc_state_add_all_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state * const *plane_states, + int plane_count, + struct dc_state *state) +{ + int i; + bool result = true; + + for (i = 0; i < plane_count; i++) + if (!dc_state_add_plane(dc, stream, plane_states[i], state)) { + result = false; + break; + } + + return result; +} + +/* Private dc_state functions */ + +/** + * dc_state_get_stream_status - Get stream status from given dc state + * @state: DC state to find the stream status in + * @stream: The stream to get the stream status for + * + * The given stream is expected to exist in the given dc state. Otherwise, NULL + * will be returned. + */ +struct dc_stream_status *dc_state_get_stream_status( + struct dc_state *state, + const struct dc_stream_state *stream) +{ + uint8_t i; + + if (state == NULL) + return NULL; + + for (i = 0; i < state->stream_count; i++) { + if (stream == state->streams[i]) + return &state->stream_status[i]; + } + + return NULL; +} + +enum mall_stream_type dc_state_get_pipe_subvp_type(const struct dc_state *state, + const struct pipe_ctx *pipe_ctx) +{ + return dc_state_get_stream_subvp_type(state, pipe_ctx->stream); +} + +enum mall_stream_type dc_state_get_stream_subvp_type(const struct dc_state *state, + const struct dc_stream_state *stream) +{ + int i; + + enum mall_stream_type type = SUBVP_NONE; + + for (i = 0; i < state->stream_count; i++) { + if (state->streams[i] == stream) { + type = state->stream_status[i].mall_stream_config.type; + break; + } + } + + return type; +} + +struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *state, + const struct dc_stream_state *stream) +{ + int i; + + struct dc_stream_state *paired_stream = NULL; + + for (i = 0; i < state->stream_count; i++) { + if (state->streams[i] == stream) { + paired_stream = state->stream_status[i].mall_stream_config.paired_stream; + break; + } + } + + return paired_stream; +} + +struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *main_stream) +{ + struct dc_stream_state *phantom_stream; + + DC_LOGGER_INIT(dc->ctx->logger); + + phantom_stream = dc_create_stream_for_sink(main_stream->sink); + + if (!phantom_stream) { + DC_LOG_ERROR("Failed to allocate phantom stream.\n"); + return NULL; + } + + /* track phantom stream in dc_state */ + dc_state_track_phantom_stream(state, phantom_stream); + + phantom_stream->is_phantom = true; + phantom_stream->signal = SIGNAL_TYPE_VIRTUAL; + phantom_stream->dpms_off = true; + + return phantom_stream; +} + +void dc_state_release_phantom_stream(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream) +{ + DC_LOGGER_INIT(dc->ctx->logger); + + if (!dc_state_untrack_phantom_stream(state, phantom_stream)) { + DC_LOG_ERROR("Failed to free phantom stream %p in dc state %p.\n", phantom_stream, state); + return; + } + + dc_stream_release(phantom_stream); +} + +struct dc_plane_state *dc_state_create_phantom_plane(const struct dc *dc, + struct dc_state *state, + struct dc_plane_state *main_plane) +{ + struct dc_plane_state *phantom_plane = dc_create_plane_state(dc); + + DC_LOGGER_INIT(dc->ctx->logger); + + if (!phantom_plane) { + DC_LOG_ERROR("Failed to allocate phantom plane.\n"); + return NULL; + } + + /* track phantom inside dc_state */ + dc_state_track_phantom_plane(state, phantom_plane); + + phantom_plane->is_phantom = true; + + return phantom_plane; +} + +void dc_state_release_phantom_plane(const struct dc *dc, + struct dc_state *state, + struct dc_plane_state *phantom_plane) +{ + DC_LOGGER_INIT(dc->ctx->logger); + + if (!dc_state_untrack_phantom_plane(state, phantom_plane)) { + DC_LOG_ERROR("Failed to free phantom plane %p in dc state %p.\n", phantom_plane, state); + return; + } + + dc_plane_state_release(phantom_plane); +} + +/* add phantom streams to context and generate correct meta inside dc_state */ +enum dc_status dc_state_add_phantom_stream(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream, + struct dc_stream_state *main_stream) +{ + struct dc_stream_status *main_stream_status; + struct dc_stream_status *phantom_stream_status; + enum dc_status res = dc_state_add_stream(dc, state, phantom_stream); + + /* check if stream is tracked */ + if (res == DC_OK && !dc_state_is_phantom_stream_tracked(state, phantom_stream)) { + /* stream must be tracked if added to state */ + dc_state_track_phantom_stream(state, phantom_stream); + } + + /* setup subvp meta */ + main_stream_status = dc_state_get_stream_status(state, main_stream); + phantom_stream_status = dc_state_get_stream_status(state, phantom_stream); + phantom_stream_status->mall_stream_config.type = SUBVP_PHANTOM; + phantom_stream_status->mall_stream_config.paired_stream = main_stream; + main_stream_status->mall_stream_config.type = SUBVP_MAIN; + main_stream_status->mall_stream_config.paired_stream = phantom_stream; + + return res; +} + +enum dc_status dc_state_remove_phantom_stream(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream) +{ + struct dc_stream_status *main_stream_status; + struct dc_stream_status *phantom_stream_status; + + /* reset subvp meta */ + phantom_stream_status = dc_state_get_stream_status(state, phantom_stream); + main_stream_status = dc_state_get_stream_status(state, phantom_stream_status->mall_stream_config.paired_stream); + phantom_stream_status->mall_stream_config.type = SUBVP_NONE; + phantom_stream_status->mall_stream_config.paired_stream = NULL; + if (main_stream_status) { + main_stream_status->mall_stream_config.type = SUBVP_NONE; + main_stream_status->mall_stream_config.paired_stream = NULL; + } + + /* remove stream from state */ + return dc_state_remove_stream(dc, state, phantom_stream); +} + +bool dc_state_add_phantom_plane( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_plane_state *phantom_plane, + struct dc_state *state) +{ + bool res = dc_state_add_plane(dc, phantom_stream, phantom_plane, state); + + /* check if stream is tracked */ + if (res && !dc_state_is_phantom_plane_tracked(state, phantom_plane)) { + /* stream must be tracked if added to state */ + dc_state_track_phantom_plane(state, phantom_plane); + } + + return res; +} + +bool dc_state_remove_phantom_plane( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_plane_state *phantom_plane, + struct dc_state *state) +{ + return dc_state_remove_plane(dc, phantom_stream, phantom_plane, state); +} + +bool dc_state_rem_all_phantom_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_state *state, + bool should_release_planes) +{ + int i, old_plane_count; + struct dc_stream_status *stream_status = NULL; + struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 }; + + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] == phantom_stream) { + stream_status = &state->stream_status[i]; + break; + } + + if (stream_status == NULL) { + dm_error("Existing stream %p not found!\n", phantom_stream); + return false; + } + + old_plane_count = stream_status->plane_count; + + for (i = 0; i < old_plane_count; i++) + del_planes[i] = stream_status->plane_states[i]; + + for (i = 0; i < old_plane_count; i++) { + if (!dc_state_remove_plane(dc, phantom_stream, del_planes[i], state)) + return false; + if (should_release_planes) + dc_state_release_phantom_plane(dc, state, del_planes[i]); + } + + return true; +} + +bool dc_state_add_all_phantom_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_plane_state * const *phantom_planes, + int plane_count, + struct dc_state *state) +{ + return dc_state_add_all_planes_for_stream(dc, phantom_stream, phantom_planes, plane_count, state); +} + +bool dc_state_remove_phantom_streams_and_planes( + const struct dc *dc, + struct dc_state *state) +{ + int i; + bool removed_phantom = false; + struct dc_stream_state *phantom_stream = NULL; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state && pipe->stream && dc_state_get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) { + phantom_stream = pipe->stream; + + dc_state_rem_all_phantom_planes_for_stream(dc, phantom_stream, state, false); + dc_state_remove_phantom_stream(dc, state, phantom_stream); + removed_phantom = true; + } + } + return removed_phantom; +} + +void dc_state_release_phantom_streams_and_planes( + const struct dc *dc, + struct dc_state *state) +{ + int i; + + for (i = 0; i < state->phantom_stream_count; i++) + dc_state_release_phantom_stream(dc, state, state->phantom_streams[i]); + + for (i = 0; i < state->phantom_plane_count; i++) + dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]); +} + +struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id) +{ + struct dc_stream_state *stream = NULL; + int i; + + for (i = 0; i < state->stream_count; i++) { + if (state->streams[i] && state->streams[i]->stream_id == id) { + stream = state->streams[i]; + break; + } + } + + return stream; +} + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 6ed40b6c6178..5c7e4884cac2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -31,6 +31,8 @@ #include "ipp.h" #include "timing_generator.h" #include "dc_dmub_srv.h" +#include "dc_state_priv.h" +#include "dc_stream_priv.h" #define DC_LOGGER dc->ctx->logger @@ -54,7 +56,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink) } } -static bool dc_stream_construct(struct dc_stream_state *stream, +bool dc_stream_construct(struct dc_stream_state *stream, struct dc_sink *dc_sink_data) { uint32_t i = 0; @@ -114,26 +116,23 @@ static bool dc_stream_construct(struct dc_stream_state *stream, update_stream_signal(stream, dc_sink_data); - stream->out_transfer_func = dc_create_transfer_func(); - if (stream->out_transfer_func == NULL) { - dc_sink_release(dc_sink_data); - return false; - } - stream->out_transfer_func->type = TF_TYPE_BYPASS; + stream->out_transfer_func.type = TF_TYPE_BYPASS; - stream->stream_id = stream->ctx->dc_stream_id_count; - stream->ctx->dc_stream_id_count++; + dc_stream_assign_stream_id(stream); return true; } -static void dc_stream_destruct(struct dc_stream_state *stream) +void dc_stream_destruct(struct dc_stream_state *stream) { dc_sink_release(stream->sink); - if (stream->out_transfer_func != NULL) { - dc_transfer_func_release(stream->out_transfer_func); - stream->out_transfer_func = NULL; - } +} + +void dc_stream_assign_stream_id(struct dc_stream_state *stream) +{ + /* MSB is reserved to indicate phantoms */ + stream->stream_id = stream->ctx->dc_stream_id_count; + stream->ctx->dc_stream_id_count++; } void dc_stream_retain(struct dc_stream_state *stream) @@ -193,11 +192,7 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream) if (new_stream->sink) dc_sink_retain(new_stream->sink); - if (new_stream->out_transfer_func) - dc_transfer_func_retain(new_stream->out_transfer_func); - - new_stream->stream_id = new_stream->ctx->dc_stream_id_count; - new_stream->ctx->dc_stream_id_count++; + dc_stream_assign_stream_id(new_stream); /* If using dynamic encoder assignment, wait till stream committed to assign encoder. */ if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign) @@ -209,31 +204,6 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream) } /** - * dc_stream_get_status_from_state - Get stream status from given dc state - * @state: DC state to find the stream status in - * @stream: The stream to get the stream status for - * - * The given stream is expected to exist in the given dc state. Otherwise, NULL - * will be returned. - */ -struct dc_stream_status *dc_stream_get_status_from_state( - struct dc_state *state, - struct dc_stream_state *stream) -{ - uint8_t i; - - if (state == NULL) - return NULL; - - for (i = 0; i < state->stream_count; i++) { - if (stream == state->streams[i]) - return &state->stream_status[i]; - } - - return NULL; -} - -/** * dc_stream_get_status() - Get current stream status of the given stream state * @stream: The stream to get the stream status for. * @@ -244,7 +214,7 @@ struct dc_stream_status *dc_stream_get_status( struct dc_stream_state *stream) { struct dc *dc = stream->ctx->dc; - return dc_stream_get_status_from_state(dc->current_state, stream); + return dc_state_get_stream_status(dc->current_state, stream); } static void program_cursor_attributes( @@ -337,7 +307,7 @@ bool dc_stream_set_cursor_attributes( program_cursor_attributes(dc, stream, attributes); /* re-enable idle optimizations if necessary */ - if (reset_idle_optimizations) + if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle) dc_allow_idle_optimizations(dc, true); return true; @@ -412,7 +382,7 @@ bool dc_stream_set_cursor_position( program_cursor_position(dc, stream, position); /* re-enable idle optimizations if necessary */ - if (reset_idle_optimizations) + if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle) dc_allow_idle_optimizations(dc, true); return true; @@ -441,7 +411,9 @@ bool dc_stream_add_writeback(struct dc *dc, return false; } - wb_info->dwb_params.out_transfer_func = stream->out_transfer_func; + dc_exit_ips_for_hw_access(dc); + + wb_info->dwb_params.out_transfer_func = &stream->out_transfer_func; dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; dwb->dwb_is_drc = false; @@ -465,16 +437,37 @@ bool dc_stream_add_writeback(struct dc *dc, if (dc->hwss.enable_writeback) { struct dc_stream_status *stream_status = dc_stream_get_status(stream); struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; - dwb->otg_inst = stream_status->primary_otg_inst; + if (stream_status) + dwb->otg_inst = stream_status->primary_otg_inst; + } + + if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { + dm_error("DC: update_bandwidth failed!\n"); + return false; + } + + /* enable writeback */ + if (dc->hwss.enable_writeback) { + struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + + if (dwb->funcs->is_enabled(dwb)) { + /* writeback pipe already enabled, only need to update */ + dc->hwss.update_writeback(dc, wb_info, dc->current_state); + } else { + /* Enable writeback pipe from scratch*/ + dc->hwss.enable_writeback(dc, wb_info, dc->current_state); + } } + return true; } -bool dc_stream_remove_writeback(struct dc *dc, +bool dc_stream_fc_disable_writeback(struct dc *dc, struct dc_stream_state *stream, uint32_t dwb_pipe_inst) { - int i = 0, j = 0; + struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst]; + if (stream == NULL) { dm_error("DC: dc_stream is NULL!\n"); return false; @@ -490,27 +483,67 @@ bool dc_stream_remove_writeback(struct dc *dc, return false; } -// stream->writeback_info[dwb_pipe_inst].wb_enabled = false; - for (i = 0; i < stream->num_wb_info; i++) { - /*dynamic update*/ - if (stream->writeback_info[i].wb_enabled && - stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst) { - stream->writeback_info[i].wb_enabled = false; - } + dc_exit_ips_for_hw_access(dc); + + if (dwb->funcs->set_fc_enable) + dwb->funcs->set_fc_enable(dwb, DWB_FRAME_CAPTURE_DISABLE); + + return true; +} + +bool dc_stream_remove_writeback(struct dc *dc, + struct dc_stream_state *stream, + uint32_t dwb_pipe_inst) +{ + unsigned int i, j; + if (stream == NULL) { + dm_error("DC: dc_stream is NULL!\n"); + return false; + } + + if (dwb_pipe_inst >= MAX_DWB_PIPES) { + dm_error("DC: writeback pipe is invalid!\n"); + return false; + } + + if (stream->num_wb_info > MAX_DWB_PIPES) { + dm_error("DC: num_wb_info is invalid!\n"); + return false; } /* remove writeback info for disabled writeback pipes from stream */ for (i = 0, j = 0; i < stream->num_wb_info; i++) { if (stream->writeback_info[i].wb_enabled) { - if (j < i) - /* trim the array */ + + if (stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst) + stream->writeback_info[i].wb_enabled = false; + + /* trim the array */ + if (j < i) { memcpy(&stream->writeback_info[j], &stream->writeback_info[i], sizeof(struct dc_writeback_info)); - j++; + j++; + } } } stream->num_wb_info = j; + /* recalculate and apply DML parameters */ + if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { + dm_error("DC: update_bandwidth failed!\n"); + return false; + } + + dc_exit_ips_for_hw_access(dc); + + /* disable writeback */ + if (dc->hwss.disable_writeback) { + struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst]; + + if (dwb->funcs->is_enabled(dwb)) + dc->hwss.disable_writeback(dc, dwb_pipe_inst); + } + return true; } @@ -518,6 +551,8 @@ bool dc_stream_warmup_writeback(struct dc *dc, int num_dwb, struct dc_writeback_info *wb_info) { + dc_exit_ips_for_hw_access(dc); + if (dc->hwss.mmhubbub_warmup) return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info); else @@ -530,10 +565,12 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream) struct resource_context *res_ctx = &dc->current_state->res_ctx; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; - if (res_ctx->pipe_ctx[i].stream != stream) + if (res_ctx->pipe_ctx[i].stream != stream || !tg) continue; return tg->funcs->get_frame_count(tg); @@ -558,6 +595,8 @@ bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream, dc = stream->ctx->dc; res_ctx = &dc->current_state->res_ctx; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; @@ -589,10 +628,12 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, struct resource_context *res_ctx = &dc->current_state->res_ctx; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; - if (res_ctx->pipe_ctx[i].stream != stream) + if (res_ctx->pipe_ctx[i].stream != stream || !tg) continue; tg->funcs->get_scanoutpos(tg, @@ -625,6 +666,8 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream) if (i == MAX_PIPES) return true; + dc_exit_ips_for_hw_access(dc); + return dc->hwss.dmdata_status_done(pipe); } @@ -659,6 +702,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc, pipe_ctx->stream->dmdata_address = attr->address; + dc_exit_ips_for_hw_access(dc); + dc->hwss.program_dmdata_engine(pipe_ctx); if (hubp->funcs->dmdata_set_attributes != NULL && diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index a80e45300783..067f6555cfdf 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -32,63 +32,52 @@ #include "transform.h" #include "dpp.h" +#include "dc_plane_priv.h" + /******************************************************************************* * Private functions ******************************************************************************/ -static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state) +void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state) { plane_state->ctx = ctx; - plane_state->gamma_correction = dc_create_gamma(); - if (plane_state->gamma_correction != NULL) - plane_state->gamma_correction->is_identity = true; + plane_state->gamma_correction.is_identity = true; - plane_state->in_transfer_func = dc_create_transfer_func(); - if (plane_state->in_transfer_func != NULL) { - plane_state->in_transfer_func->type = TF_TYPE_BYPASS; - } - plane_state->in_shaper_func = dc_create_transfer_func(); - if (plane_state->in_shaper_func != NULL) { - plane_state->in_shaper_func->type = TF_TYPE_BYPASS; - } + plane_state->in_transfer_func.type = TF_TYPE_BYPASS; - plane_state->lut3d_func = dc_create_3dlut_func(); + plane_state->in_shaper_func.type = TF_TYPE_BYPASS; - plane_state->blend_tf = dc_create_transfer_func(); - if (plane_state->blend_tf != NULL) { - plane_state->blend_tf->type = TF_TYPE_BYPASS; - } + plane_state->lut3d_func.state.raw = 0; + + plane_state->blend_tf.type = TF_TYPE_BYPASS; plane_state->pre_multiplied_alpha = true; } -static void dc_plane_destruct(struct dc_plane_state *plane_state) +void dc_plane_destruct(struct dc_plane_state *plane_state) { - if (plane_state->gamma_correction != NULL) { - dc_gamma_release(&plane_state->gamma_correction); - } - if (plane_state->in_transfer_func != NULL) { - dc_transfer_func_release( - plane_state->in_transfer_func); - plane_state->in_transfer_func = NULL; - } - if (plane_state->in_shaper_func != NULL) { - dc_transfer_func_release( - plane_state->in_shaper_func); - plane_state->in_shaper_func = NULL; - } - if (plane_state->lut3d_func != NULL) { - dc_3dlut_func_release( - plane_state->lut3d_func); - plane_state->lut3d_func = NULL; - } - if (plane_state->blend_tf != NULL) { - dc_transfer_func_release( - plane_state->blend_tf); - plane_state->blend_tf = NULL; + // no more pointers to free within dc_plane_state +} + + +/* dc_state is passed in separately since it may differ from the current dc state accessible from plane_state e.g. + * if the driver is doing an update from an old context to a new one and the caller wants the pipe mask for the new + * context rather than the existing one + */ +uint8_t dc_plane_get_pipe_mask(struct dc_state *dc_state, const struct dc_plane_state *plane_state) +{ + uint8_t pipe_mask = 0; + int i; + + for (i = 0; i < plane_state->ctx->dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc_state->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->plane_state == plane_state && pipe_ctx->plane_res.hubp) + pipe_mask |= 1 << pipe_ctx->plane_res.hubp->inst; } + return pipe_mask; } /******************************************************************************* @@ -101,7 +90,7 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state, /*register_flip_interrupt(surface);*/ } -struct dc_plane_state *dc_create_plane_state(struct dc *dc) +struct dc_plane_state *dc_create_plane_state(const struct dc *dc) { struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state), GFP_KERNEL); @@ -159,6 +148,8 @@ const struct dc_plane_status *dc_plane_get_status( break; } + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 6e54ca055fcb..3c33c3bcbe2c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -27,6 +27,8 @@ #define DC_INTERFACE_H_ #include "dc_types.h" +#include "dc_state.h" +#include "dc_plane.h" #include "grph_object_defs.h" #include "logger_types.h" #include "hdcp_msg_types.h" @@ -42,6 +44,8 @@ #include "dml2/dml2_wrapper.h" +#include "dmub/inc/dmub_cmd.h" + struct abm_save_restore; /* forward declaration */ @@ -49,7 +53,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.256" +#define DC_VER "3.2.281" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -217,6 +221,7 @@ struct dc_dmub_caps { bool mclk_sw; bool subvp_psr; bool gecc_enable; + uint8_t fams_ver; }; struct dc_caps { @@ -304,12 +309,12 @@ struct dc_dcc_setting { unsigned int max_compressed_blk_size; unsigned int max_uncompressed_blk_size; bool independent_64b_blks; - //These bitfields to be used starting with DCN + //These bitfields to be used starting with DCN 3.0 struct { - uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN (the worst compression case) - uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN - uint32_t dcc_256_128_128 : 1; //available starting with DCN - uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN (the best compression case) + uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN 3.0 (the worst compression case) + uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 3.0 + uint32_t dcc_256_128_128 : 1; //available starting with DCN 3.0 + uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN 3.0 (the best compression case) } dcc_controls; }; @@ -427,11 +432,15 @@ struct dc_config { bool force_bios_enable_lttpr; uint8_t force_bios_fixed_vs; int sdpif_request_limit_words_per_umc; - bool use_old_fixed_vs_sequence; bool dc_mode_clk_limit_support; bool EnableMinDispClkODM; bool enable_auto_dpm_test_logs; unsigned int disable_ips; + unsigned int disable_ips_in_vpb; + bool usb4_bw_alloc_support; + bool allow_0_dtb_clk; + bool use_assr_psp_message; + bool support_edp0_on_dp1; }; enum visual_confirm { @@ -461,6 +470,12 @@ enum dml_hostvm_override_opts { DML_HOSTVM_OVERRIDE_TRUE = 0x2, }; +enum dc_replay_power_opts { + replay_power_opt_invalid = 0x0, + replay_power_opt_smu_opt_static_screen = 0x1, + replay_power_opt_z10_static_screen = 0x10, +}; + enum dcc_option { DCC_ENABLE = 0, DCC_DISABLE = 1, @@ -684,6 +699,8 @@ enum pg_hw_pipe_resources { PG_MPCC, PG_OPP, PG_OPTC, + PG_DPSTREAM, + PG_HDMISTREAM, PG_HW_PIPE_RESOURCES_NUM_ELEMENT }; @@ -874,6 +891,7 @@ struct dc_debug_options { unsigned int seamless_boot_odm_combine; unsigned int force_odm_combine_4to1; //bit vector based on otg inst int minimum_z8_residency_time; + int minimum_z10_residency_time; bool disable_z9_mpc; unsigned int force_fclk_khz; bool enable_tri_buf; @@ -955,7 +973,6 @@ struct dc_debug_options { unsigned int min_prefetch_in_strobe_ns; bool disable_unbounded_requesting; bool dig_fifo_off_in_blank; - bool temp_mst_deallocation_sequence; bool override_dispclk_programming; bool otg_crc_db; bool disallow_dispclk_dppclk_ds; @@ -978,9 +995,17 @@ struct dc_debug_options { bool psp_disabled_wa; unsigned int ips2_eval_delay_us; unsigned int ips2_entry_delay_us; + bool optimize_ips_handshake; + bool disable_dmub_reallow_idle; + bool disable_timeout; + bool disable_extblankadj; + bool enable_idle_reg_checks; + unsigned int static_screen_wait_frames; + bool force_chroma_subsampling_1tap; + bool disable_422_left_edge_pixel; + unsigned int force_cositing; }; -struct gpu_info_soc_bounding_box_v1_0; /* Generic structure that can be used to query properties of DC. More fields * can be added as required. @@ -989,75 +1014,6 @@ struct dc_current_properties { unsigned int cursor_size_limit; }; -struct dc { - struct dc_debug_options debug; - struct dc_versions versions; - struct dc_caps caps; - struct dc_cap_funcs cap_funcs; - struct dc_config config; - struct dc_bounding_box_overrides bb_overrides; - struct dc_bug_wa work_arounds; - struct dc_context *ctx; - struct dc_phy_addr_space_config vm_pa_config; - - uint8_t link_count; - struct dc_link *links[MAX_PIPES * 2]; - struct link_service *link_srv; - - struct dc_state *current_state; - struct resource_pool *res_pool; - - struct clk_mgr *clk_mgr; - - /* Display Engine Clock levels */ - struct dm_pp_clock_levels sclk_lvls; - - /* Inputs into BW and WM calculations. */ - struct bw_calcs_dceip *bw_dceip; - struct bw_calcs_vbios *bw_vbios; - struct dcn_soc_bounding_box *dcn_soc; - struct dcn_ip_params *dcn_ip; - struct display_mode_lib dml; - - /* HW functions */ - struct hw_sequencer_funcs hwss; - struct dce_hwseq *hwseq; - - /* Require to optimize clocks and bandwidth for added/removed planes */ - bool optimized_required; - bool wm_optimized_required; - bool idle_optimizations_allowed; - bool enable_c20_dtm_b0; - - /* Require to maintain clocks and bandwidth for UEFI enabled HW */ - - /* FBC compressor */ - struct compressor *fbc_compressor; - - struct dc_debug_data debug_data; - struct dpcd_vendor_signature vendor_signature; - - const char *build_id; - struct vm_helper *vm_helper; - - uint32_t *dcn_reg_offsets; - uint32_t *nbio_reg_offsets; - uint32_t *clk_reg_offsets; - - /* Scratch memory */ - struct { - struct { - /* - * For matching clock_limits table in driver with table - * from PMFW. - */ - struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; - } update_bw_bounding_box; - } scratch; - - struct dml2_configuration_options dml2_options; -}; - enum frame_buffer_mode { FRAME_BUFFER_MODE_LOCAL_ONLY = 0, FRAME_BUFFER_MODE_ZFB_ONLY, @@ -1237,6 +1193,7 @@ union surface_update_flags { uint32_t rotation_change:1; uint32_t swizzle_change:1; uint32_t scaling_change:1; + uint32_t clip_size_change: 1; uint32_t position_change:1; uint32_t in_transfer_func_change:1; uint32_t input_csc_change:1; @@ -1261,6 +1218,8 @@ union surface_update_flags { uint32_t raw; }; +#define DC_REMOVE_PLANE_POINTERS 1 + struct dc_plane_state { struct dc_plane_address address; struct dc_plane_flip_time time; @@ -1275,8 +1234,8 @@ struct dc_plane_state { struct dc_plane_dcc_param dcc; - struct dc_gamma *gamma_correction; - struct dc_transfer_func *in_transfer_func; + struct dc_gamma gamma_correction; + struct dc_transfer_func in_transfer_func; struct dc_bias_and_scale *bias_and_scale; struct dc_csc_transform input_csc_color_matrix; struct fixed31_32 coeff_reduction_factor; @@ -1288,9 +1247,9 @@ struct dc_plane_state { enum dc_color_space color_space; - struct dc_3dlut *lut3d_func; - struct dc_transfer_func *in_shaper_func; - struct dc_transfer_func *blend_tf; + struct dc_3dlut lut3d_func; + struct dc_transfer_func in_shaper_func; + struct dc_transfer_func blend_tf; struct dc_transfer_func *gamcor_tf; enum surface_pixel_format format; @@ -1326,6 +1285,7 @@ struct dc_plane_state { struct tg_color visual_confirm_color; bool is_statically_allocated; + enum chroma_cositing cositing; }; struct dc_plane_info { @@ -1344,6 +1304,96 @@ struct dc_plane_info { int global_alpha_value; bool input_csc_enabled; int layer_index; + enum chroma_cositing cositing; +}; + +#include "dc_stream.h" + +struct dc_scratch_space { + /* used to temporarily backup plane states of a stream during + * dc update. The reason is that plane states are overwritten + * with surface updates in dc update. Once they are overwritten + * current state is no longer valid. We want to temporarily + * store current value in plane states so we can still recover + * a valid current state during dc update. + */ + struct dc_plane_state plane_states[MAX_SURFACE_NUM]; + + struct dc_stream_state stream_state; +}; + +struct dc { + struct dc_debug_options debug; + struct dc_versions versions; + struct dc_caps caps; + struct dc_cap_funcs cap_funcs; + struct dc_config config; + struct dc_bounding_box_overrides bb_overrides; + struct dc_bug_wa work_arounds; + struct dc_context *ctx; + struct dc_phy_addr_space_config vm_pa_config; + + uint8_t link_count; + struct dc_link *links[MAX_LINKS]; + struct link_service *link_srv; + + struct dc_state *current_state; + struct resource_pool *res_pool; + + struct clk_mgr *clk_mgr; + + /* Display Engine Clock levels */ + struct dm_pp_clock_levels sclk_lvls; + + /* Inputs into BW and WM calculations. */ + struct bw_calcs_dceip *bw_dceip; + struct bw_calcs_vbios *bw_vbios; + struct dcn_soc_bounding_box *dcn_soc; + struct dcn_ip_params *dcn_ip; + struct display_mode_lib dml; + + /* HW functions */ + struct hw_sequencer_funcs hwss; + struct dce_hwseq *hwseq; + + /* Require to optimize clocks and bandwidth for added/removed planes */ + bool optimized_required; + bool wm_optimized_required; + bool idle_optimizations_allowed; + bool enable_c20_dtm_b0; + + /* Require to maintain clocks and bandwidth for UEFI enabled HW */ + + /* FBC compressor */ + struct compressor *fbc_compressor; + + struct dc_debug_data debug_data; + struct dpcd_vendor_signature vendor_signature; + + const char *build_id; + struct vm_helper *vm_helper; + + uint32_t *dcn_reg_offsets; + uint32_t *nbio_reg_offsets; + uint32_t *clk_reg_offsets; + + /* Scratch memory */ + struct { + struct { + /* + * For matching clock_limits table in driver with table + * from PMFW. + */ + struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; + } update_bw_bounding_box; + struct dc_scratch_space current_state; + struct dc_scratch_space new_state; + struct dc_stream_state temp_stream; // Used so we don't need to allocate stream on the stack + } scratch; + + struct dml2_configuration_options dml2_options; + enum dc_acpi_cm_power_state power_state; + }; struct dc_scaling_info { @@ -1388,13 +1438,6 @@ struct dc_surface_update { /* * Create a new surface with default parameters; */ -struct dc_plane_state *dc_create_plane_state(struct dc *dc); -const struct dc_plane_status *dc_plane_get_status( - const struct dc_plane_state *plane_state); - -void dc_plane_state_retain(struct dc_plane_state *plane_state); -void dc_plane_state_release(struct dc_plane_state *plane_state); - void dc_gamma_retain(struct dc_gamma *dc_gamma); void dc_gamma_release(struct dc_gamma **dc_gamma); struct dc_gamma *dc_create_gamma(void); @@ -1458,37 +1501,25 @@ enum dc_status dc_validate_global_state( struct dc_state *new_ctx, bool fast_validate); - -void dc_resource_state_construct( - const struct dc *dc, - struct dc_state *dst_ctx); - bool dc_acquire_release_mpc_3dlut( struct dc *dc, bool acquire, struct dc_stream_state *stream, struct dc_3dlut **lut, struct dc_transfer_func **shaper); -void dc_resource_state_copy_construct( - const struct dc_state *src_ctx, - struct dc_state *dst_ctx); - -void dc_resource_state_copy_construct_current( - const struct dc *dc, - struct dc_state *dst_ctx); - -void dc_resource_state_destruct(struct dc_state *context); - bool dc_resource_is_dsc_encoding_supported(const struct dc *dc); +void get_audio_check(struct audio_info *aud_modes, + struct audio_check *aud_chk); +/* + * Set up streams and links associated to drive sinks + * The streams parameter is an absolute set of all active streams. + * + * After this call: + * Phy, Encoder, Timing Generator are programmed and enabled. + * New streams are enabled with blank stream; no memory read. + */ +enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params); -enum dc_status dc_commit_streams(struct dc *dc, - struct dc_stream_state *streams[], - uint8_t stream_count); - -struct dc_state *dc_create_state(struct dc *dc); -struct dc_state *dc_copy_state(struct dc_state *src_ctx); -void dc_retain_state(struct dc_state *context); -void dc_release_state(struct dc_state *context); struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc, struct dc_stream_state *stream, @@ -1540,7 +1571,13 @@ struct dc_link { bool is_dig_mapping_flexible; bool hpd_status; /* HPD status of link without physical HPD pin. */ bool is_hpd_pending; /* Indicates a new received hpd */ - bool is_automated; /* Indicates automated testing */ + + /* USB4 DPIA links skip verifying link cap, instead performing the fallback method + * for every link training. This is incompatible with DP LL compliance automation, + * which expects the same link settings to be used every retry on a link loss. + * This flag is used to skip the fallback when link loss occurs during automation. + */ + bool skip_fallback_on_link_loss; bool edp_sink_present; @@ -1574,7 +1611,19 @@ struct dc_link { enum engine_id dpia_preferred_eng_id; bool test_pattern_enabled; + /* Pending/Current test pattern are only used to perform and track + * FIXED_VS retimer test pattern/lane adjustment override state. + * Pending allows link HWSS to differentiate PHY vs non-PHY pattern, + * to perform specific lane adjust overrides before setting certain + * PHY test patterns. In cases when lane adjust and set test pattern + * calls are not performed atomically (i.e. performing link training), + * pending_test_pattern will be invalid or contain a non-PHY test pattern + * and current_test_pattern will contain required context for any future + * set pattern/set lane adjust to transition between override state(s). + * */ enum dp_test_pattern current_test_pattern; + enum dp_test_pattern pending_test_pattern; + union compliance_test_state compliance_test_state; void *priv; @@ -1608,7 +1657,6 @@ struct dc_link { enum edp_revision edp_revision; union dpcd_sink_ext_caps dpcd_sink_ext_caps; - struct backlight_settings backlight_settings; struct psr_settings psr_settings; struct replay_settings replay_settings; @@ -2092,6 +2140,20 @@ bool dc_link_setup_psr(struct dc_link *dc_link, const struct dc_stream_state *stream, struct psr_config *psr_config, struct psr_context *psr_context); +/* + * Communicate with DMUB to allow or disallow Panel Replay on the specified link: + * + * @link: pointer to the dc_link struct instance + * @enable: enable(active) or disable(inactive) replay + * @wait: state transition need to wait the active set completed. + * @force_static: force disable(inactive) the replay + * @power_opts: set power optimazation parameters to DMUB. + * + * return: allow Replay active will return true, else will return false. + */ +bool dc_link_set_replay_allow_active(struct dc_link *dc_link, const bool *enable, + bool wait, bool force_static, const unsigned int *power_opts); + bool dc_link_get_replay_state(const struct dc_link *dc_link, uint64_t *state); /* On eDP links this function call will stall until T12 has elapsed. @@ -2187,11 +2249,11 @@ int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link( * * @dc: pointer to dc struct * @stream: pointer to all possible streams - * @num_streams: number of valid DPIA streams + * @count: number of valid DPIA streams * * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE */ -bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams, +bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count); /* Sink Interfaces - A sink corresponds to a display output device */ @@ -2212,11 +2274,9 @@ struct dc_sink_dsc_caps { // 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology), // 'false' if they are sink's DSC caps bool is_virtual_dpcd_dsc; -#if defined(CONFIG_DRM_AMD_DC_FP) // 'true' if MST topology supports DSC passthrough for sink // 'false' if MST topology does not support DSC passthrough bool is_dsc_passthrough_supported; -#endif struct dsc_dec_dpcd_caps dsc_dec_caps; }; @@ -2314,10 +2374,17 @@ bool dc_is_dmcu_initialized(struct dc *dc); enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping); void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); -bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, - struct dc_cursor_attributes *cursor_attr); +bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, + unsigned int pitch, + unsigned int height, + enum surface_pixel_format format, + struct dc_cursor_attributes *cursor_attr); + +#define dc_allow_idle_optimizations(dc, allow) dc_allow_idle_optimizations_internal(dc, allow, __func__) +#define dc_exit_ips_for_hw_access(dc) dc_exit_ips_for_hw_access_internal(dc, __func__) -void dc_allow_idle_optimizations(struct dc *dc, bool allow); +void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, const char *caller_name); +void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name); bool dc_dmub_is_ips_idle_state(struct dc *dc); /* set min and max memory clock to lowest and highest DPM level, respectively */ @@ -2336,6 +2403,9 @@ void dc_hardware_release(struct dc *dc); void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc); bool dc_set_psr_allow_active(struct dc *dc, bool enable); + +bool dc_set_replay_allow_active(struct dc *dc, bool active); + void dc_z10_restore(const struct dc *dc); void dc_z10_save_init(struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h index be9aa1a71847..26940d94d8fb 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h @@ -140,7 +140,7 @@ struct dc_vbios_funcs { enum bp_result (*enable_lvtma_control)( struct dc_bios *bios, uint8_t uc_pwr_on, - uint8_t panel_instance, + uint8_t pwrseq_instance, uint8_t bypass_panel_control_wait); enum bp_result (*get_soc_bb_info)( diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index ba142bef626b..2293a92df3be 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -23,6 +23,7 @@ * */ +#include "dm_services.h" #include "dc.h" #include "dc_dmub_srv.h" #include "../dmub/dmub_srv.h" @@ -33,6 +34,8 @@ #include "cursor_reg_cache.h" #include "resource.h" #include "clk_mgr.h" +#include "dc_state_priv.h" +#include "dc_plane_priv.h" #define CTX dc_dmub_srv->ctx #define DC_LOGGER CTX->logger @@ -73,7 +76,10 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) struct dc_context *dc_ctx = dc_dmub_srv->ctx; enum dmub_status status; - status = dmub_srv_wait_for_idle(dmub, 100000); + do { + status = dmub_srv_wait_for_idle(dmub, 100000); + } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); + if (status != DMUB_STATUS_OK) { DC_ERROR("Error waiting for DMUB idle: status=%d\n", status); dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); @@ -120,6 +126,97 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv, } } +bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, + unsigned int count, + union dmub_rb_cmd *cmd_list) +{ + struct dc_context *dc_ctx; + struct dmub_srv *dmub; + enum dmub_status status; + int i; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return false; + + dc_ctx = dc_dmub_srv->ctx; + dmub = dc_dmub_srv->dmub; + + for (i = 0 ; i < count; i++) { + // Queue command + status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); + + if (status == DMUB_STATUS_QUEUE_FULL) { + /* Execute and wait for queue to become empty again. */ + status = dmub_srv_cmd_execute(dmub); + if (status == DMUB_STATUS_POWER_STATE_D3) + return false; + + do { + status = dmub_srv_wait_for_idle(dmub, 100000); + } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); + + /* Requeue the command. */ + status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); + } + + if (status != DMUB_STATUS_OK) { + if (status != DMUB_STATUS_POWER_STATE_D3) { + DC_ERROR("Error queueing DMUB command: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + } + return false; + } + } + + status = dmub_srv_cmd_execute(dmub); + if (status != DMUB_STATUS_OK) { + if (status != DMUB_STATUS_POWER_STATE_D3) { + DC_ERROR("Error starting DMUB execution: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + } + return false; + } + + return true; +} + +bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, + enum dm_dmub_wait_type wait_type, + union dmub_rb_cmd *cmd_list) +{ + struct dmub_srv *dmub; + enum dmub_status status; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return false; + + dmub = dc_dmub_srv->dmub; + + // Wait for DMUB to process command + if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { + do { + status = dmub_srv_wait_for_idle(dmub, 100000); + } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); + + if (status != DMUB_STATUS_OK) { + DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); + if (!dmub->debug.timeout_occured) { + dmub->debug.timeout_occured = true; + dmub->debug.timeout_cmd = *cmd_list; + dmub->debug.timestamp = dm_get_timestamp(dc_dmub_srv->ctx); + } + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + return false; + } + + // Copy data back from ring buffer into command + if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) + dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list); + } + + return true; +} + bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) { return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type); @@ -144,7 +241,10 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun if (status == DMUB_STATUS_QUEUE_FULL) { /* Execute and wait for queue to become empty again. */ - dmub_srv_cmd_execute(dmub); + status = dmub_srv_cmd_execute(dmub); + if (status == DMUB_STATUS_POWER_STATE_D3) + return false; + dmub_srv_wait_for_idle(dmub, 100000); /* Requeue the command. */ @@ -152,22 +252,31 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun } if (status != DMUB_STATUS_OK) { - DC_ERROR("Error queueing DMUB command: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + if (status != DMUB_STATUS_POWER_STATE_D3) { + DC_ERROR("Error queueing DMUB command: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + } return false; } } status = dmub_srv_cmd_execute(dmub); if (status != DMUB_STATUS_OK) { - DC_ERROR("Error starting DMUB execution: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + if (status != DMUB_STATUS_POWER_STATE_D3) { + DC_ERROR("Error starting DMUB execution: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + } return false; } // Wait for DMUB to process command if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { - status = dmub_srv_wait_for_idle(dmub, 100000); + if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { + do { + status = dmub_srv_wait_for_idle(dmub, 100000); + } while (status != DMUB_STATUS_OK); + } else + status = dmub_srv_wait_for_idle(dmub, 100000); if (status != DMUB_STATUS_OK) { DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); @@ -208,17 +317,11 @@ bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv) bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, unsigned int stream_mask) { - struct dmub_srv *dmub; - const uint32_t timeout = 30; - if (!dc_dmub_srv || !dc_dmub_srv->dmub) return false; - dmub = dc_dmub_srv->dmub; - - return dmub_srv_send_gpint_command( - dmub, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK, - stream_mask, timeout) == DMUB_STATUS_OK; + return dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK, + stream_mask, NULL, DM_DMUB_WAIT_TYPE_WAIT); } bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv) @@ -267,7 +370,7 @@ void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); // Send the command to the DMCUB. - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst) @@ -281,7 +384,7 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst) cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); // Send the command to the DMCUB. - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream) @@ -374,7 +477,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header); // Send the command to the DMCUB. - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -395,7 +498,7 @@ void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv) cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data); /* If command was processed, copy feature caps to dmub srv */ - if (dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && + if (dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.query_feature_caps.header.ret_status == 0) { memcpy(&dc_dmub_srv->dmub->feature_caps, &cmd.query_feature_caps.query_feature_caps_data, @@ -420,7 +523,7 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst; // If command was processed, copy feature caps to dmub srv - if (dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && + if (dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.visual_confirm_color.header.ret_status == 0) { memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color, &cmd.visual_confirm_color.visual_confirm_color_data, @@ -431,10 +534,11 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi /** * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command * - * @dc: [in] current dc state + * @dc: [in] pointer to dc object * @subvp_pipe: [in] pipe_ctx for the SubVP pipe * @vblank_pipe: [in] pipe_ctx for the DRR pipe * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info + * @context: [in] DC state for access to phantom stream * * Populate the DMCUB SubVP command with DRR pipe info. All the information * required for calculating the SubVP + DRR microschedule is populated here. @@ -445,12 +549,14 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi * 3. Populate the drr_info with the min and max supported vtotal values */ static void populate_subvp_cmd_drr_info(struct dc *dc, + struct dc_state *context, struct pipe_ctx *subvp_pipe, struct pipe_ctx *vblank_pipe, struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data) { + struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; - struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; + struct dc_crtc_timing *phantom_timing = &phantom_stream->timing; struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing; uint16_t drr_frame_us = 0; uint16_t min_drr_supported_us = 0; @@ -538,7 +644,7 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc, continue; // Find the SubVP pipe - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) break; } @@ -555,7 +661,7 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc, if (vblank_pipe->stream->ignore_msa_timing_param && (vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed)) - populate_subvp_cmd_drr_info(dc, pipe, vblank_pipe, pipe_data); + populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data); } /** @@ -580,10 +686,17 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc, uint32_t subvp0_prefetch_us = 0; uint32_t subvp1_prefetch_us = 0; uint32_t prefetch_delta_us = 0; - struct dc_crtc_timing *phantom_timing0 = &subvp_pipes[0]->stream->mall_stream_config.paired_stream->timing; - struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing; + struct dc_stream_state *phantom_stream0 = NULL; + struct dc_stream_state *phantom_stream1 = NULL; + struct dc_crtc_timing *phantom_timing0 = NULL; + struct dc_crtc_timing *phantom_timing1 = NULL; struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL; + phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream); + phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream); + phantom_timing0 = &phantom_stream0->timing; + phantom_timing1 = &phantom_stream1->timing; + subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) * (uint64_t)phantom_timing0->h_total * 1000000), (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); @@ -633,8 +746,9 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc, uint32_t j; struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index]; + struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; - struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; + struct dc_crtc_timing *phantom_timing = &phantom_stream->timing; uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den; pipe_data->mode = SUBVP; @@ -681,21 +795,22 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc, } else if (subvp_pipe->next_odm_pipe) { pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx; } else { - pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0; + pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF; } // Find phantom pipe index based on phantom stream for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j]; - if (phantom_pipe->stream == subvp_pipe->stream->mall_stream_config.paired_stream) { + if (resource_is_pipe_type(phantom_pipe, OTG_MASTER) && + phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) { pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst; if (phantom_pipe->bottom_pipe) { pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst; } else if (phantom_pipe->next_odm_pipe) { pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst; } else { - pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0; + pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF; } break; } @@ -722,6 +837,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, union dmub_rb_cmd cmd; struct pipe_ctx *subvp_pipes[2]; uint32_t wm_val_refclk = 0; + enum mall_stream_type pipe_mall_type; memset(&cmd, 0, sizeof(cmd)); // FW command for SUBVP @@ -737,7 +853,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, */ if (resource_is_pipe_type(pipe, OTG_MASTER) && resource_is_pipe_type(pipe, DPP_PIPE) && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) subvp_pipes[subvp_count++] = pipe; } @@ -745,6 +861,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, // For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); if (!pipe->stream) continue; @@ -755,12 +872,11 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, */ if (resource_is_pipe_type(pipe, OTG_MASTER) && resource_is_pipe_type(pipe, DPP_PIPE) && - pipe->stream->mall_stream_config.paired_stream && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + pipe_mall_type == SUBVP_MAIN) { populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); } else if (resource_is_pipe_type(pipe, OTG_MASTER) && resource_is_pipe_type(pipe, DPP_PIPE) && - pipe->stream->mall_stream_config.type == SUBVP_NONE) { + pipe_mall_type == SUBVP_NONE) { // Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where // we run through DML without calculating "natural" P-state support populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); @@ -782,7 +898,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF; } - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data) @@ -795,12 +911,15 @@ bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmu void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) { struct dmub_diagnostic_data diag_data = {0}; + uint32_t i; if (!dc_dmub_srv || !dc_dmub_srv->dmub) { DC_LOG_ERROR("%s: invalid parameters.", __func__); return; } + DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__); + if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) { DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__); return; @@ -824,7 +943,8 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) DC_LOG_DEBUG(" scratch [13] : %08x", diag_data.scratch[13]); DC_LOG_DEBUG(" scratch [14] : %08x", diag_data.scratch[14]); DC_LOG_DEBUG(" scratch [15] : %08x", diag_data.scratch[15]); - DC_LOG_DEBUG(" pc : %08x", diag_data.pc); + for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++) + DC_LOG_DEBUG(" pc[%d] : %08x", i, diag_data.pc[i]); DC_LOG_DEBUG(" unk_fault_addr : %08x", diag_data.undefined_address_fault_addr); DC_LOG_DEBUG(" inst_fault_addr : %08x", diag_data.inst_fetch_fault_addr); DC_LOG_DEBUG(" data_fault_addr : %08x", diag_data.data_write_fault_addr); @@ -1019,7 +1139,7 @@ void dc_send_update_cursor_info_to_dmu( pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp); /* Combine 2nd cmds update_curosr_info to DMU */ - dm_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT); } } @@ -1033,25 +1153,20 @@ bool dc_dmub_check_min_version(struct dmub_srv *srv) void dc_dmub_srv_enable_dpia_trace(const struct dc *dc) { struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; - struct dmub_srv *dmub; - enum dmub_status status; - static const uint32_t timeout_us = 30; if (!dc_dmub_srv || !dc_dmub_srv->dmub) { DC_LOG_ERROR("%s: invalid parameters.", __func__); return; } - dmub = dc_dmub_srv->dmub; - - status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1, 0x0010, timeout_us); - if (status != DMUB_STATUS_OK) { + if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1, + 0x0010, NULL, DM_DMUB_WAIT_TYPE_WAIT)) { DC_LOG_ERROR("timeout updating trace buffer mask word\n"); return; } - status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK, 0x0000, timeout_us); - if (status != DMUB_STATUS_OK) { + if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK, + 0x0000, NULL, DM_DMUB_WAIT_TYPE_WAIT)) { DC_LOG_ERROR("timeout updating trace buffer mask word\n"); return; } @@ -1066,17 +1181,28 @@ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, con bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) { - struct dc_context *dc_ctx = dc_dmub_srv->ctx; + struct dc_context *dc_ctx; enum dmub_status status; + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return true; + if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) return true; + dc_ctx = dc_dmub_srv->ctx; + if (wait) { - status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000); - if (status != DMUB_STATUS_OK) { - DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status); - return false; + if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { + do { + status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000); + } while (status != DMUB_STATUS_OK); + } else { + status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000); + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status); + return false; + } } } else return dmub_srv_is_hw_pwr_up(dc_dmub_srv->dmub); @@ -1084,13 +1210,35 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) return true; } -void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) +static int count_active_streams(const struct dc *dc) +{ + int i, count = 0; + + for (i = 0; i < dc->current_state->stream_count; ++i) { + struct dc_stream_state *stream = dc->current_state->streams[i]; + + if (stream && !stream->dpms_off) + count += 1; + } + + return count; +} + +static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) { + volatile const struct dmub_shared_state_ips_fw *ips_fw; + struct dc_dmub_srv *dc_dmub_srv; union dmub_rb_cmd cmd = {0}; if (dc->debug.dmcub_emulation) return; + if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub) + return; + + dc_dmub_srv = dc->ctx->dmub_srv; + ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; + memset(&cmd, 0, sizeof(cmd)); cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT; cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE; @@ -1101,79 +1249,349 @@ void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle; if (allow_idle) { - if (dc->hwss.set_idle_state) - dc->hwss.set_idle_state(dc, true); + volatile struct dmub_shared_state_ips_driver *ips_driver = + &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; + union dmub_shared_state_ips_driver_signals new_signals; + + DC_LOG_IPS( + "%s wait idle (ips1_commit=%d ips2_commit=%d)", + __func__, + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + + dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); + + memset(&new_signals, 0, sizeof(new_signals)); + + if (dc->config.disable_ips == DMUB_IPS_ENABLE || + dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) { + new_signals.bits.allow_pg = 1; + new_signals.bits.allow_ips1 = 1; + new_signals.bits.allow_ips2 = 1; + new_signals.bits.allow_z10 = 1; + } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { + new_signals.bits.allow_ips1 = 1; + } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { + new_signals.bits.allow_pg = 1; + new_signals.bits.allow_ips1 = 1; + } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { + new_signals.bits.allow_pg = 1; + new_signals.bits.allow_ips1 = 1; + new_signals.bits.allow_ips2 = 1; + } else if (dc->config.disable_ips == DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF) { + /* TODO: Move this logic out to hwseq */ + if (count_active_streams(dc) == 0) { + /* IPS2 - Display off */ + new_signals.bits.allow_pg = 1; + new_signals.bits.allow_ips1 = 1; + new_signals.bits.allow_ips2 = 1; + new_signals.bits.allow_z10 = 1; + } else { + /* RCG only */ + new_signals.bits.allow_pg = 0; + new_signals.bits.allow_ips1 = 1; + new_signals.bits.allow_ips2 = 0; + new_signals.bits.allow_z10 = 0; + } + } + + ips_driver->signals = new_signals; } - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + DC_LOG_IPS( + "%s send allow_idle=%d (ips1_commit=%d ips2_commit=%d)", + __func__, + allow_idle, + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + + /* NOTE: This does not use the "wake" interface since this is part of the wake path. */ + /* We also do not perform a wait since DMCUB could enter idle after the notification. */ + dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT); + + /* Register access should stop at this point. */ + if (allow_idle) + dc_dmub_srv->needs_idle_wake = true; } -void dc_dmub_srv_exit_low_power_state(const struct dc *dc) +static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) { - const uint32_t max_num_polls = 10000; - uint32_t allow_state = 0; - uint32_t commit_state = 0; - uint32_t i; + struct dc_dmub_srv *dc_dmub_srv; + uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0; if (dc->debug.dmcub_emulation) return; - if (!dc->idle_optimizations_allowed) + if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub) return; - if (dc->hwss.get_idle_state && - dc->hwss.set_idle_state && - dc->clk_mgr->funcs->exit_low_power_state) { + dc_dmub_srv = dc->ctx->dmub_srv; + + if (dc->clk_mgr->funcs->exit_low_power_state) { + volatile const struct dmub_shared_state_ips_fw *ips_fw = + &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; + volatile struct dmub_shared_state_ips_driver *ips_driver = + &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; + union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals; + + rcg_exit_count = ips_fw->rcg_exit_count; + ips1_exit_count = ips_fw->ips1_exit_count; + ips2_exit_count = ips_fw->ips2_exit_count; + + ips_driver->signals.all = 0; + + DC_LOG_IPS( + "%s (allow ips1=%d ips2=%d) (commit ips1=%d ips2=%d) (count rcg=%d ips1=%d ips2=%d)", + __func__, + ips_driver->signals.bits.allow_ips1, + ips_driver->signals.bits.allow_ips2, + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit, + ips_fw->rcg_entry_count, + ips_fw->ips1_entry_count, + ips_fw->ips2_entry_count); + + /* Note: register access has technically not resumed for DCN here, but we + * need to be message PMFW through our standard register interface. + */ + dc_dmub_srv->needs_idle_wake = false; - allow_state = dc->hwss.get_idle_state(dc); - dc->hwss.set_idle_state(dc, false); + if (prev_driver_signals.bits.allow_ips2 && + (!dc->debug.optimize_ips_handshake || + ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) { + DC_LOG_IPS( + "wait IPS2 eval (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + + if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit) + udelay(dc->debug.ips2_eval_delay_us); + + if (ips_fw->signals.bits.ips2_commit) { + DC_LOG_IPS( + "exit IPS2 #1 (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); - if (allow_state & DMUB_IPS2_ALLOW_MASK) { - // Wait for evaluation time - udelay(dc->debug.ips2_eval_delay_us); - commit_state = dc->hwss.get_idle_state(dc); - if (commit_state & DMUB_IPS2_COMMIT_MASK) { // Tell PMFW to exit low power state dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); + DC_LOG_IPS( + "wait IPS2 entry delay (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + // Wait for IPS2 entry upper bound udelay(dc->debug.ips2_entry_delay_us); + + DC_LOG_IPS( + "exit IPS2 #2 (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); - for (i = 0; i < max_num_polls; ++i) { - commit_state = dc->hwss.get_idle_state(dc); - if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) - break; + DC_LOG_IPS( + "wait IPS2 commit clear (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + while (ips_fw->signals.bits.ips2_commit) udelay(1); - } - ASSERT(i < max_num_polls); + + DC_LOG_IPS( + "wait hw_pwr_up (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) ASSERT(0); - /* TODO: See if we can return early here - IPS2 should go - * back directly to IPS0 and clear the flags, but it will - * be safer to directly notify DMCUB of this. - */ - allow_state = dc->hwss.get_idle_state(dc); + DC_LOG_IPS( + "resync inbox1 (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + + dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub); } } dc_dmub_srv_notify_idle(dc, false); - if (allow_state & DMUB_IPS1_ALLOW_MASK) { - for (i = 0; i < max_num_polls; ++i) { - commit_state = dc->hwss.get_idle_state(dc); - if (!(commit_state & DMUB_IPS1_COMMIT_MASK)) - break; + if (prev_driver_signals.bits.allow_ips1) { + DC_LOG_IPS( + "wait for IPS1 commit clear (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + while (ips_fw->signals.bits.ips1_commit) udelay(1); - } - ASSERT(i < max_num_polls); + + DC_LOG_IPS( + "wait for IPS1 commit clear done (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); } } if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) ASSERT(0); + + DC_LOG_IPS("%s exit (count rcg=%d ips1=%d ips2=%d)", + __func__, + rcg_exit_count, + ips1_exit_count, + ips2_exit_count); +} + +void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState) +{ + struct dmub_srv *dmub; + + if (!dc_dmub_srv) + return; + + dmub = dc_dmub_srv->dmub; + + if (powerState == DC_ACPI_CM_POWER_STATE_D0) + dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D0); + else + dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3); +} + +void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle) +{ + struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return; + + if (dc_dmub_srv->idle_allowed == allow_idle) + return; + + DC_LOG_IPS("%s state change: old=%d new=%d", __func__, dc_dmub_srv->idle_allowed, allow_idle); + + /* + * Entering a low power state requires a driver notification. + * Powering up the hardware requires notifying PMFW and DMCUB. + * Clearing the driver idle allow requires a DMCUB command. + * DMCUB commands requires the DMCUB to be powered up and restored. + */ + + if (!allow_idle) { + dc_dmub_srv->idle_exit_counter += 1; + + dc_dmub_srv_exit_low_power_state(dc); + /* + * Idle is considered fully exited only after the sequence above + * fully completes. If we have a race of two threads exiting + * at the same time then it's safe to perform the sequence + * twice as long as we're not re-entering. + * + * Infinite command submission is avoided by using the + * dm_execute_dmub_cmd submission instead of the "wake" helpers. + */ + dc_dmub_srv->idle_allowed = false; + + dc_dmub_srv->idle_exit_counter -= 1; + if (dc_dmub_srv->idle_exit_counter < 0) { + ASSERT(0); + dc_dmub_srv->idle_exit_counter = 0; + } + } else { + /* Consider idle as notified prior to the actual submission to + * prevent multiple entries. */ + dc_dmub_srv->idle_allowed = true; + + dc_dmub_srv_notify_idle(dc, allow_idle); + } +} + +bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, + enum dm_dmub_wait_type wait_type) +{ + return dc_wake_and_execute_dmub_cmd_list(ctx, 1, cmd, wait_type); +} + +bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, + union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) +{ + struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv; + bool result = false, reallow_idle = false; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return false; + + if (count == 0) + return true; + + if (dc_dmub_srv->idle_allowed) { + dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false); + reallow_idle = true; + } + + /* + * These may have different implementations in DM, so ensure + * that we guide it to the expected helper. + */ + if (count > 1) + result = dm_execute_dmub_cmd_list(ctx, count, cmd, wait_type); + else + result = dm_execute_dmub_cmd(ctx, cmd, wait_type); + + if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 && + !ctx->dc->debug.disable_dmub_reallow_idle) + dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); + + return result; +} + +static bool dc_dmub_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code, + uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type) +{ + struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv; + const uint32_t wait_us = wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT ? 0 : 30; + enum dmub_status status; + + if (response) + *response = 0; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return false; + + status = dmub_srv_send_gpint_command(dc_dmub_srv->dmub, command_code, param, wait_us); + if (status != DMUB_STATUS_OK) { + if (status == DMUB_STATUS_TIMEOUT && wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT) + return true; + + return false; + } + + if (response && wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) + dmub_srv_get_gpint_response(dc_dmub_srv->dmub, response); + + return true; +} + +bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code, + uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type) +{ + struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv; + bool result = false, reallow_idle = false; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return false; + + if (dc_dmub_srv->idle_allowed) { + dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false); + reallow_idle = true; + } + + result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type); + + if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 && + !ctx->dc->debug.disable_dmub_reallow_idle) + dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); + + return result; } diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 31150b214394..2c5866211f60 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -35,6 +35,7 @@ struct pipe_ctx; struct dc_crtc_timing_adjust; struct dc_crtc_timing; struct dc_state; +struct dc_surface_update; struct dc_reg_helper_state { bool gather_in_progress; @@ -50,12 +51,24 @@ struct dc_dmub_srv { struct dc_context *ctx; void *dm; + + int32_t idle_exit_counter; + bool idle_allowed; + bool needs_idle_wake; }; void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv); bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv); +bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, + unsigned int count, + union dmub_rb_cmd *cmd_list); + +bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, + enum dm_dmub_wait_type wait_type, + union dmub_rb_cmd *cmd_list); + bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type); @@ -92,6 +105,59 @@ void dc_dmub_srv_enable_dpia_trace(const struct dc *dc); void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index); bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait); -void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle); -void dc_dmub_srv_exit_low_power_state(const struct dc *dc); + +void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle); + +void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState); + +/** + * dc_wake_and_execute_dmub_cmd() - Wrapper for DMUB command execution. + * + * Refer to dc_wake_and_execute_dmub_cmd_list() for usage and limitations, + * This function is a convenience wrapper for a single command execution. + * + * @ctx: DC context + * @cmd: The command to send/receive + * @wait_type: The wait behavior for the execution + * + * Return: true on command submission success, false otherwise + */ +bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, + enum dm_dmub_wait_type wait_type); + +/** + * dc_wake_and_execute_dmub_cmd_list() - Wrapper for DMUB command list execution. + * + * If the DMCUB hardware was asleep then it wakes the DMUB before + * executing the command and attempts to re-enter if the command + * submission was successful. + * + * This should be the preferred command submission interface provided + * the DC lock is acquired. + * + * Entry/exit out of idle power optimizations would need to be + * manually performed otherwise through dc_allow_idle_optimizations(). + * + * @ctx: DC context + * @count: Number of commands to send/receive + * @cmd: Array of commands to send + * @wait_type: The wait behavior for the execution + * + * Return: true on command submission success, false otherwise + */ +bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, + union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); + +/** + * dc_wake_and_execute_gpint() + * + * @ctx: DC context + * @command_code: The command ID to send to DMCUB + * @param: The parameter to message DMCUB + * @response: Optional response out value - may be NULL. + * @wait_type: The wait behavior for the execution + */ +bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code, + uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type); + #endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 35ae245ef722..519c3df78ee5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -137,12 +137,18 @@ enum dp_link_encoding { enum dp_test_link_rate { DP_TEST_LINK_RATE_RBR = 0x06, + DP_TEST_LINK_RATE_RATE_2 = 0x08, // Rate_2 - 2.16 Gbps/Lane + DP_TEST_LINK_RATE_RATE_3 = 0x09, // Rate_3 - 2.43 Gbps/Lane DP_TEST_LINK_RATE_HBR = 0x0A, + DP_TEST_LINK_RATE_RBR2 = 0x0C, // Rate_5 (RBR2) - 3.24 Gbps/Lane + DP_TEST_LINK_RATE_RATE_6 = 0x10, // Rate_6 - 4.32 Gbps/Lane DP_TEST_LINK_RATE_HBR2 = 0x14, + DP_TEST_LINK_RATE_RATE_8 = 0x19, // Rate_8 - 6.75 Gbps/Lane DP_TEST_LINK_RATE_HBR3 = 0x1E, DP_TEST_LINK_RATE_UHBR10 = 0x01, DP_TEST_LINK_RATE_UHBR20 = 0x02, - DP_TEST_LINK_RATE_UHBR13_5 = 0x03, + DP_TEST_LINK_RATE_UHBR13_5_LEGACY = 0x03, /* For backward compatibility*/ + DP_TEST_LINK_RATE_UHBR13_5 = 0x04, }; struct dc_link_settings { @@ -916,16 +922,6 @@ struct dpcd_usb4_dp_tunneling_info { uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN]; }; -#ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT -#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3 -#endif -#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_7_0 -#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0X2230 -#endif -#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_263_256 -#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0X2250 -#endif - union dp_main_line_channel_coding_cap { struct { uint8_t DP_8b_10b_SUPPORTED :1; @@ -1231,8 +1227,7 @@ union replay_enable_and_configuration { unsigned char FREESYNC_PANEL_REPLAY_MODE :1; unsigned char TIMING_DESYNC_ERROR_VERIFICATION :1; unsigned char STATE_TRANSITION_ERROR_DETECTION :1; - unsigned char RESERVED0 :1; - unsigned char RESERVED1 :4; + unsigned char RESERVED :5; } bits; unsigned char raw; }; @@ -1376,6 +1371,12 @@ struct dp_trace { #ifndef DP_TUNNELING_STATUS #define DP_TUNNELING_STATUS 0xE0025 /* 1.4a */ #endif +#ifndef DP_TUNNELING_MAX_LINK_RATE +#define DP_TUNNELING_MAX_LINK_RATE 0xE0028 /* 1.4a */ +#endif +#ifndef DP_TUNNELING_MAX_LANE_COUNT +#define DP_TUNNELING_MAX_LANE_COUNT 0xE0029 /* 1.4a */ +#endif #ifndef DPTX_BW_ALLOCATION_MODE_CONTROL #define DPTX_BW_ALLOCATION_MODE_CONTROL 0xE0030 /* 1.4a */ #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index cb6eaddab720..8f9a67825615 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -50,7 +50,7 @@ static inline void submit_dmub_read_modify_write( cmd_buf->header.payload_bytes = sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count; - dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); + dc_wake_and_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); memset(cmd_buf, 0, sizeof(*cmd_buf)); @@ -67,7 +67,7 @@ static inline void submit_dmub_burst_write( cmd_buf->header.payload_bytes = sizeof(uint32_t) * offload->reg_seq_count; - dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); + dc_wake_and_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); memset(cmd_buf, 0, sizeof(*cmd_buf)); @@ -80,7 +80,7 @@ static inline void submit_dmub_reg_wait( { struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; - dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); + dc_wake_and_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); memset(cmd_buf, 0, sizeof(*cmd_buf)); offload->reg_seq_count = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 9649934ea186..2ad7f60805f5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -244,7 +244,7 @@ enum pixel_format { #define DC_MAX_DIRTY_RECTS 3 struct dc_flip_addrs { struct dc_plane_address address; - unsigned int flip_timestamp_in_us; + unsigned long long flip_timestamp_in_us; bool flip_immediate; /* TODO: add flip duration for FreeSync */ bool triplebuffer_flips; @@ -465,6 +465,7 @@ struct dc_cursor_mi_param { struct fixed31_32 v_scale_ratio; enum dc_rotation_angle rotation; bool mirror; + struct dc_stream_state *stream; }; /* IPP related types */ @@ -737,6 +738,13 @@ enum scanning_type { SCANNING_TYPE_UNDEFINED }; +enum chroma_cositing { + CHROMA_COSITING_NONE, + CHROMA_COSITING_LEFT, + CHROMA_COSITING_TOPLEFT, + CHROMA_COSITING_COUNT +}; + struct dc_crtc_timing_flags { uint32_t INTERLACE :1; uint32_t HSYNC_POSITIVE_POLARITY :1; /* when set to 1, @@ -826,9 +834,7 @@ struct dc_dsc_config { uint32_t version_minor; /* DSC minor version. Full version is formed as 1.version_minor. */ bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */ int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */ -#if defined(CONFIG_DRM_AMD_DC_FP) bool is_frl; /* indicate if DSC is applied based on HDMI FRL sink's capability */ -#endif bool is_dp; /* indicate if DSC is applied based on DP's capability */ uint32_t mst_pbn; /* pbn of display on dsc mst hub */ const struct dc_dsc_rc_params_override *rc_params_ovrd; /* DM owned memory. If not NULL, apply custom dsc rc params */ @@ -941,6 +947,7 @@ struct dc_crtc_timing { uint32_t hdmi_vic; uint32_t rid; uint32_t fr_index; + uint32_t frl_uncompressed_video_bandwidth_in_kbps; enum dc_timing_3d_format timing_3d_format; enum dc_color_depth display_color_depth; enum dc_pixel_encoding pixel_encoding; @@ -974,6 +981,7 @@ struct dc_crtc_timing_adjust { uint32_t v_total_max; uint32_t v_total_mid; uint32_t v_total_mid_frame_num; + uint32_t allow_otg_v_count_halt; }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h new file mode 100644 index 000000000000..44afcd989224 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h @@ -0,0 +1,38 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DC_PLANE_H_ +#define _DC_PLANE_H_ + +#include "dc.h" +#include "dc_hw_types.h" + +struct dc_plane_state *dc_create_plane_state(const struct dc *dc); +const struct dc_plane_status *dc_plane_get_status( + const struct dc_plane_state *plane_state); +void dc_plane_state_retain(struct dc_plane_state *plane_state); +void dc_plane_state_release(struct dc_plane_state *plane_state); + +#endif /* _DC_PLANE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h new file mode 100644 index 000000000000..ab13335f1d01 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h @@ -0,0 +1,35 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DC_PLANE_PRIV_H_ +#define _DC_PLANE_PRIV_H_ + +#include "dc_plane.h" + +void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state); +void dc_plane_destruct(struct dc_plane_state *plane_state); +uint8_t dc_plane_get_pipe_mask(struct dc_state *dc_state, const struct dc_plane_state *plane_state); + +#endif /* _DC_PLANE_PRIV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h new file mode 100644 index 000000000000..caa45db50232 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_state.h @@ -0,0 +1,78 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DC_STATE_H_ +#define _DC_STATE_H_ + +#include "dc.h" +#include "inc/core_status.h" + +struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params); +void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state); +struct dc_state *dc_state_create_copy(struct dc_state *src_state); +void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state); +struct dc_state *dc_state_create_current_copy(struct dc *dc); +void dc_state_construct(struct dc *dc, struct dc_state *state); +void dc_state_destruct(struct dc_state *state); +void dc_state_retain(struct dc_state *state); +void dc_state_release(struct dc_state *state); + +enum dc_status dc_state_add_stream(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *stream); + +enum dc_status dc_state_remove_stream( + const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *stream); + +bool dc_state_add_plane( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state *plane_state, + struct dc_state *state); + +bool dc_state_remove_plane( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state *plane_state, + struct dc_state *state); + +bool dc_state_rem_all_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_state *state); + +bool dc_state_add_all_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state * const *plane_states, + int plane_count, + struct dc_state *state); + +struct dc_stream_status *dc_state_get_stream_status( + struct dc_state *state, + const struct dc_stream_state *stream); +#endif /* _DC_STATE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h new file mode 100644 index 000000000000..615086d74d32 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h @@ -0,0 +1,104 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DC_STATE_PRIV_H_ +#define _DC_STATE_PRIV_H_ + +#include "dc_state.h" +#include "dc_stream.h" + +struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id); + +/* Get the type of the provided resource (none, phantom, main) based on the provided + * context. If the context is unavailable, determine only if phantom or not. + */ +enum mall_stream_type dc_state_get_pipe_subvp_type(const struct dc_state *state, + const struct pipe_ctx *pipe_ctx); +enum mall_stream_type dc_state_get_stream_subvp_type(const struct dc_state *state, + const struct dc_stream_state *stream); + +/* Gets the phantom stream if main is provided, gets the main if phantom is provided.*/ +struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *state, + const struct dc_stream_state *stream); + +/* allocate's phantom stream or plane and returns pointer to the object */ +struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *main_stream); +struct dc_plane_state *dc_state_create_phantom_plane(const struct dc *dc, + struct dc_state *state, + struct dc_plane_state *main_plane); + +/* deallocate's phantom stream or plane */ +void dc_state_release_phantom_stream(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream); +void dc_state_release_phantom_plane(const struct dc *dc, + struct dc_state *state, + struct dc_plane_state *phantom_plane); + +/* add/remove phantom stream to context and generate subvp meta data */ +enum dc_status dc_state_add_phantom_stream(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream, + struct dc_stream_state *main_stream); +enum dc_status dc_state_remove_phantom_stream(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream); + +bool dc_state_add_phantom_plane( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_plane_state *phantom_plane, + struct dc_state *state); + +bool dc_state_remove_phantom_plane( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_plane_state *phantom_plane, + struct dc_state *state); + +bool dc_state_rem_all_phantom_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_state *state, + bool should_release_planes); + +bool dc_state_add_all_phantom_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_plane_state * const *phantom_planes, + int plane_count, + struct dc_state *state); + +bool dc_state_remove_phantom_streams_and_planes( + const struct dc *dc, + struct dc_state *state); + +void dc_state_release_phantom_streams_and_planes( + const struct dc *dc, + struct dc_state *state); + +#endif /* _DC_STATE_PRIV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index e61eea6db29c..e5dbbc6089a5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -38,6 +38,14 @@ struct timing_sync_info { bool master; }; +struct mall_stream_config { + /* MALL stream config to indicate if the stream is phantom or not. + * We will use a phantom stream to indicate that the pipe is phantom. + */ + enum mall_stream_type type; + struct dc_stream_state *paired_stream; // master / slave stream +}; + struct dc_stream_status { int primary_otg_inst; int stream_enc_inst; @@ -50,6 +58,7 @@ struct dc_stream_status { struct timing_sync_info timing_sync_info; struct dc_plane_state *plane_states[MAX_SURFACE_NUM]; bool is_abm_supported; + struct mall_stream_config mall_stream_config; }; enum hubp_dmdata_mode { @@ -147,31 +156,6 @@ struct test_pattern { #define SUBVP_DRR_MARGIN_US 100 // 100us for DRR margin (SubVP + DRR) -enum mall_stream_type { - SUBVP_NONE, // subvp not in use - SUBVP_MAIN, // subvp in use, this stream is main stream - SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream -}; - -struct mall_stream_config { - /* MALL stream config to indicate if the stream is phantom or not. - * We will use a phantom stream to indicate that the pipe is phantom. - */ - enum mall_stream_type type; - struct dc_stream_state *paired_stream; // master / slave stream -}; - -/* Temp struct used to save and restore MALL config - * during validation. - * - * TODO: Move MALL config into dc_state instead of stream struct - * to avoid needing to save/restore. - */ -struct mall_temp_config { - struct mall_stream_config mall_stream_config[MAX_PIPES]; - bool is_phantom_plane[MAX_PIPES]; -}; - struct dc_stream_debug_options { char force_odm_combine_segments; }; @@ -206,7 +190,7 @@ struct dc_stream_state { PHYSICAL_ADDRESS_LOC dmdata_address; bool use_dynamic_meta; - struct dc_transfer_func *out_transfer_func; + struct dc_transfer_func out_transfer_func; struct colorspace_transform gamut_remap_matrix; struct dc_csc_transform csc_color_matrix; @@ -301,7 +285,7 @@ struct dc_stream_state { bool has_non_synchronizable_pclk; bool vblank_synchronized; bool fpo_in_use; - struct mall_stream_config mall_stream_config; + bool is_phantom; }; #define ABM_LEVEL_IMMEDIATE_DISABLE 255 @@ -415,45 +399,14 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, uint32_t *h_position, uint32_t *v_position); -enum dc_status dc_add_stream_to_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *stream); - -enum dc_status dc_remove_stream_from_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *stream); - - -bool dc_add_plane_to_context( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_plane_state *plane_state, - struct dc_state *context); - -bool dc_remove_plane_from_context( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_plane_state *plane_state, - struct dc_state *context); - -bool dc_rem_all_planes_for_stream( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_state *context); - -bool dc_add_all_planes_for_stream( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_plane_state * const *plane_states, - int plane_count, - struct dc_state *context); - bool dc_stream_add_writeback(struct dc *dc, struct dc_stream_state *stream, struct dc_writeback_info *wb_info); +bool dc_stream_fc_disable_writeback(struct dc *dc, + struct dc_stream_state *stream, + uint32_t dwb_pipe_inst); + bool dc_stream_remove_writeback(struct dc *dc, struct dc_stream_state *stream, uint32_t dwb_pipe_inst); @@ -475,14 +428,6 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc, enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream); /* - * Set up streams and links associated to drive sinks - * The streams parameter is an absolute set of all active streams. - * - * After this call: - * Phy, Encoder, Timing Generator are programmed and enabled. - * New streams are enabled with blank stream; no memory read. - */ -/* * Enable stereo when commit_streams is not required, * for example, frame alternate. */ @@ -514,9 +459,6 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink); void dc_stream_retain(struct dc_stream_state *dc_stream); void dc_stream_release(struct dc_stream_state *dc_stream); -struct dc_stream_status *dc_stream_get_status_from_state( - struct dc_state *state, - struct dc_stream_state *stream); struct dc_stream_status *dc_stream_get_status( struct dc_stream_state *dc_stream); diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h index 3341ef71009b..7476fd52ce2b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: MIT */ /* * Copyright 2023 Advanced Micro Devices, Inc. * @@ -24,30 +23,15 @@ * */ -#include "core_types.h" -#include "dcn35_dpp.h" -#include "reg_helper.h" +#ifndef _DC_STREAM_PRIV_H_ +#define _DC_STREAM_PRIV_H_ -#define REG(reg) dpp->tf_regs->reg +#include "dc_stream.h" -#define CTX dpp->base.ctx +bool dc_stream_construct(struct dc_stream_state *stream, + struct dc_sink *dc_sink_data); +void dc_stream_destruct(struct dc_stream_state *stream); -#undef FN -#define FN(reg_name, field_name) \ - ((const struct dcn35_dpp_shift *)(dpp->tf_shift))->field_name, \ - ((const struct dcn35_dpp_mask *)(dpp->tf_mask))->field_name +void dc_stream_assign_stream_id(struct dc_stream_state *stream); -bool dpp35_construct(struct dcn3_dpp *dpp, struct dc_context *ctx, - uint32_t inst, const struct dcn3_dpp_registers *tf_regs, - const struct dcn35_dpp_shift *tf_shift, - const struct dcn35_dpp_mask *tf_mask) -{ - return dpp32_construct(dpp, ctx, inst, tf_regs, - (const struct dcn3_dpp_shift *)(tf_shift), - (const struct dcn3_dpp_mask *)(tf_mask)); -} - -void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable) -{ - REG_UPDATE(DPP_CONTROL, DPP_FGCG_REP_DIS, !enable); -} +#endif // _DC_STREAM_PRIV_H_ diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 40dc51853d62..0f66d00ef80f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -177,6 +177,7 @@ struct dc_panel_patch { unsigned int disable_fams; unsigned int skip_avmute; unsigned int mst_start_top_delay; + unsigned int remove_sink_ext_caps; }; struct dc_edid_caps { @@ -421,7 +422,7 @@ struct dc_dwb_params { enum dwb_capture_rate capture_rate; /* controls the frame capture rate */ struct scaling_taps scaler_taps; /* Scaling taps */ enum dwb_subsample_position subsample_position; - struct dc_transfer_func *out_transfer_func; + const struct dc_transfer_func *out_transfer_func; }; /* audio*/ @@ -990,10 +991,6 @@ struct link_mst_stream_allocation_table { struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM]; }; -struct backlight_settings { - uint32_t backlight_millinits; -}; - /* PSR feature flags */ struct psr_settings { bool psr_feature_enabled; // PSR is supported by sink @@ -1021,6 +1018,25 @@ enum replay_coasting_vtotal_type { PR_COASTING_TYPE_NUM, }; +enum replay_link_off_frame_count_level { + PR_LINK_OFF_FRAME_COUNT_FAIL = 0x0, + PR_LINK_OFF_FRAME_COUNT_GOOD = 0x2, + PR_LINK_OFF_FRAME_COUNT_BEST = 0x6, +}; + +/* + * This is general Interface for Replay to + * set an 32 bit variable to dmub + * The Message_type indicates which variable + * passed to DMUB. + */ +enum replay_FW_Message_type { + Replay_Msg_Not_Support = -1, + Replay_Set_Timing_Sync_Supported, + Replay_Set_Residency_Frameupdate_Timer, + Replay_Set_Pseudo_VTotal, +}; + union replay_error_status { struct { unsigned char STATE_TRANSITION_ERROR :1; @@ -1032,24 +1048,54 @@ union replay_error_status { }; struct replay_config { - bool replay_supported; // Replay feature is supported - unsigned int replay_power_opt_supported; // Power opt flags that are supported - bool replay_smu_opt_supported; // SMU optimization is supported - unsigned int replay_enable_option; // Replay enablement option - uint32_t debug_flags; // Replay debug flags - bool replay_timing_sync_supported; // Replay desync is supported - union replay_error_status replay_error_status; // Replay error status -}; - -/* Replay feature flags */ + /* Replay feature is supported */ + bool replay_supported; + /* Replay caps support DPCD & EDID caps*/ + bool replay_cap_support; + /* Power opt flags that are supported */ + unsigned int replay_power_opt_supported; + /* SMU optimization is supported */ + bool replay_smu_opt_supported; + /* Replay enablement option */ + unsigned int replay_enable_option; + /* Replay debug flags */ + uint32_t debug_flags; + /* Replay sync is supported */ + bool replay_timing_sync_supported; + /* Replay Disable desync error check. */ + bool force_disable_desync_error_check; + /* Replay Received Desync Error HPD. */ + bool received_desync_error_hpd; + /* Replay feature is supported long vblank */ + bool replay_support_fast_resync_in_ultra_sleep_mode; + /* Replay error status */ + union replay_error_status replay_error_status; +}; + +/* Replay feature flags*/ struct replay_settings { - struct replay_config config; // Replay configuration - bool replay_feature_enabled; // Replay feature is ready for activating - bool replay_allow_active; // Replay is currently active - unsigned int replay_power_opt_active; // Power opt flags that are activated currently - bool replay_smu_opt_enable; // SMU optimization is enabled - uint16_t coasting_vtotal; // Current Coasting vtotal - uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; // Coasting vtotal table + /* Replay configuration */ + struct replay_config config; + /* Replay feature is ready for activating */ + bool replay_feature_enabled; + /* Replay is currently active */ + bool replay_allow_active; + /* Replay is currently active */ + bool replay_allow_long_vblank; + /* Power opt flags that are activated currently */ + unsigned int replay_power_opt_active; + /* SMU optimization is enabled */ + bool replay_smu_opt_enable; + /* Current Coasting vtotal */ + uint32_t coasting_vtotal; + /* Coasting vtotal table */ + uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; + /* Maximum link off frame count */ + enum replay_link_off_frame_count_level link_off_frame_count_level; + /* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */ + uint16_t abm_with_ips_on_full_screen_video_pseudo_vtotal; + /* Replay last pseudo vtotal set to DMUB */ + uint16_t last_pseudo_vtotal; }; /* To split out "global" and "per-panel" config settings. @@ -1101,25 +1147,50 @@ struct dc_panel_config { } ilr; }; +#define MAX_SINKS_PER_LINK 4 + /* * USB4 DPIA BW ALLOCATION STRUCTS */ struct dc_dpia_bw_alloc { - int sink_verified_bw; // The Verified BW that sink can allocated and use that has been verified already - int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated - int sink_max_bw; // The Max BW that sink can require/support + int remote_sink_req_bw[MAX_SINKS_PER_LINK]; // BW requested by remote sinks + int link_verified_bw; // The Verified BW that link can allocated and use that has been verified already + int link_max_bw; // The Max BW that link can require/support + int allocated_bw; // The Actual Allocated BW for this DPIA int estimated_bw; // The estimated available BW for this DPIA int bw_granularity; // BW Granularity + int dp_overhead; // DP overhead in dp tunneling bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM bool response_ready; // Response ready from the CM side + uint8_t nrd_max_lane_count; // Non-reduced max lane count + uint8_t nrd_max_link_rate; // Non-reduced max link rate }; -#define MAX_SINKS_PER_LINK 4 - enum dc_hpd_enable_select { HPD_EN_FOR_ALL_EDP = 0, HPD_EN_FOR_PRIMARY_EDP_ONLY, HPD_EN_FOR_SECONDARY_EDP_ONLY, }; +enum mall_stream_type { + SUBVP_NONE, // subvp not in use + SUBVP_MAIN, // subvp in use, this stream is main stream + SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream +}; + +enum dc_power_source_type { + DC_POWER_SOURCE_AC, // wall power + DC_POWER_SOURCE_DC, // battery power +}; + +struct dc_state_create_params { + enum dc_power_source_type power_source; +}; + +struct dc_commit_streams_params { + struct dc_stream_state **streams; + uint8_t stream_count; + enum dc_power_source_type power_source; +}; + #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index 874b132fe1d7..a6006776333d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -135,7 +135,7 @@ static void dmcu_set_backlight_level( 0, 1, 80000); } -static void dce_abm_init(struct abm *abm, uint32_t backlight) +static void dce_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level) { struct dce_abm *abm_dce = TO_DCE_ABM(abm); @@ -162,7 +162,7 @@ static void dce_abm_init(struct abm *abm, uint32_t backlight) BL1_PWM_TARGET_ABM_LEVEL, backlight); REG_UPDATE(BL1_PWM_USER_LEVEL, - BL1_PWM_USER_LEVEL, backlight); + BL1_PWM_USER_LEVEL, user_level); REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM1_LS_MIN_PIXEL_VALUE_THRES, 0, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h index c50aa30614be..051e4c2b4cf2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h @@ -128,21 +128,6 @@ SRI(DC_ABM1_ACE_THRES_12, ABM, id), \ NBIO_SR(BIOS_SCRATCH_2) -#define ABM_DCN32_REG_LIST(id)\ - SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \ - SRI(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \ - SRI(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \ - SRI(DC_ABM1_HG_MISC_CTRL, ABM, id), \ - SRI(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \ - SRI(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \ - SRI(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \ - SRI(BL1_PWM_USER_LEVEL, ABM, id), \ - SRI(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \ - SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \ - SRI(DC_ABM1_ACE_OFFSET_SLOPE_0, ABM, id), \ - SRI(DC_ABM1_ACE_THRES_12, ABM, id), \ - NBIO_SR(BIOS_SCRATCH_2) - #define ABM_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c index 140598f18bbd..12f3c35b3a34 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c @@ -239,27 +239,294 @@ static void check_audio_bandwidth_hdmi( } } } +static struct fixed31_32 get_link_symbol_clk_freq_mhz(enum dc_link_rate link_rate) +{ + switch (link_rate) { + case LINK_RATE_LOW: + return dc_fixpt_from_int(162); /* 162 MHz */ + case LINK_RATE_HIGH: + return dc_fixpt_from_int(270); /* 270 MHz */ + case LINK_RATE_HIGH2: + return dc_fixpt_from_int(540); /* 540 MHz */ + case LINK_RATE_HIGH3: + return dc_fixpt_from_int(810); /* 810 MHz */ + case LINK_RATE_UHBR10: + return dc_fixpt_from_fraction(3125, 10); /* 312.5 MHz */ + case LINK_RATE_UHBR13_5: + return dc_fixpt_from_fraction(421875, 1000); /* 421.875 MHz */ + case LINK_RATE_UHBR20: + return dc_fixpt_from_int(625); /* 625 MHz */ + default: + /* Unexpected case, this requires debug if encountered. */ + ASSERT(0); + return dc_fixpt_from_int(0); + } +} + +struct dp_audio_layout_config { + uint8_t layouts_per_sample_denom; + uint8_t symbols_per_layout; + uint8_t max_layouts_per_audio_sdp; +}; + +static void get_audio_layout_config( + uint32_t channel_count, + enum dp_link_encoding encoding, + struct dp_audio_layout_config *output) +{ + /* Assuming L-PCM audio. Current implementation uses max 1 layout per SDP, + * with each layout being the same size (8ch layout). + */ + if (encoding == DP_8b_10b_ENCODING) { + if (channel_count == 2) { + output->layouts_per_sample_denom = 4; + output->symbols_per_layout = 40; + output->max_layouts_per_audio_sdp = 1; + } else if (channel_count == 8 || channel_count == 6) { + output->layouts_per_sample_denom = 1; + output->symbols_per_layout = 40; + output->max_layouts_per_audio_sdp = 1; + } + } else if (encoding == DP_128b_132b_ENCODING) { + if (channel_count == 2) { + output->layouts_per_sample_denom = 4; + output->symbols_per_layout = 10; + output->max_layouts_per_audio_sdp = 1; + } else if (channel_count == 8 || channel_count == 6) { + output->layouts_per_sample_denom = 1; + output->symbols_per_layout = 10; + output->max_layouts_per_audio_sdp = 1; + } + } +} -/*For DP SST, calculate if specified sample rates can fit into a given timing */ -static void check_audio_bandwidth_dpsst( +static uint32_t get_av_stream_map_lane_count( + enum dp_link_encoding encoding, + enum dc_lane_count lane_count, + bool is_mst) +{ + uint32_t av_stream_map_lane_count = 0; + + if (encoding == DP_8b_10b_ENCODING) { + if (!is_mst) + av_stream_map_lane_count = lane_count; + else + av_stream_map_lane_count = 4; + } else if (encoding == DP_128b_132b_ENCODING) { + av_stream_map_lane_count = 4; + } + + ASSERT(av_stream_map_lane_count != 0); + + return av_stream_map_lane_count; +} + +static uint32_t get_audio_sdp_overhead( + enum dp_link_encoding encoding, + enum dc_lane_count lane_count, + bool is_mst) +{ + uint32_t audio_sdp_overhead = 0; + + if (encoding == DP_8b_10b_ENCODING) { + if (is_mst) + audio_sdp_overhead = 16; /* 4 * 2 + 8 */ + else + audio_sdp_overhead = lane_count * 2 + 8; + } else if (encoding == DP_128b_132b_ENCODING) { + audio_sdp_overhead = 10; /* 4 x 2.5 */ + } + + ASSERT(audio_sdp_overhead != 0); + + return audio_sdp_overhead; +} + +static uint32_t calculate_required_audio_bw_in_symbols( const struct audio_crtc_info *crtc_info, + const struct dp_audio_layout_config *layout_config, uint32_t channel_count, - union audio_sample_rates *sample_rates) + uint32_t sample_rate_hz, + uint32_t av_stream_map_lane_count, + uint32_t audio_sdp_overhead) +{ + /* DP spec recommends between 1.05 to 1.1 safety margin to prevent sample under-run */ + struct fixed31_32 audio_sdp_margin = dc_fixpt_from_fraction(110, 100); + struct fixed31_32 horizontal_line_freq_khz = dc_fixpt_from_fraction( + crtc_info->requested_pixel_clock_100Hz, crtc_info->h_total * 10); + struct fixed31_32 samples_per_line; + struct fixed31_32 layouts_per_line; + struct fixed31_32 symbols_per_sdp_max_layout; + struct fixed31_32 remainder; + uint32_t num_sdp_with_max_layouts; + uint32_t required_symbols_per_hblank; + + samples_per_line = dc_fixpt_from_fraction(sample_rate_hz, 1000); + samples_per_line = dc_fixpt_div(samples_per_line, horizontal_line_freq_khz); + layouts_per_line = dc_fixpt_div_int(samples_per_line, layout_config->layouts_per_sample_denom); + + num_sdp_with_max_layouts = dc_fixpt_floor( + dc_fixpt_div_int(layouts_per_line, layout_config->max_layouts_per_audio_sdp)); + symbols_per_sdp_max_layout = dc_fixpt_from_int( + layout_config->max_layouts_per_audio_sdp * layout_config->symbols_per_layout); + symbols_per_sdp_max_layout = dc_fixpt_add_int(symbols_per_sdp_max_layout, audio_sdp_overhead); + symbols_per_sdp_max_layout = dc_fixpt_mul(symbols_per_sdp_max_layout, audio_sdp_margin); + required_symbols_per_hblank = num_sdp_with_max_layouts; + required_symbols_per_hblank *= ((dc_fixpt_ceil(symbols_per_sdp_max_layout) + av_stream_map_lane_count) / + av_stream_map_lane_count) * av_stream_map_lane_count; + + if (num_sdp_with_max_layouts != dc_fixpt_ceil( + dc_fixpt_div_int(layouts_per_line, layout_config->max_layouts_per_audio_sdp))) { + remainder = dc_fixpt_sub_int(layouts_per_line, + num_sdp_with_max_layouts * layout_config->max_layouts_per_audio_sdp); + remainder = dc_fixpt_mul_int(remainder, layout_config->symbols_per_layout); + remainder = dc_fixpt_add_int(remainder, audio_sdp_overhead); + remainder = dc_fixpt_mul(remainder, audio_sdp_margin); + required_symbols_per_hblank += ((dc_fixpt_ceil(remainder) + av_stream_map_lane_count) / + av_stream_map_lane_count) * av_stream_map_lane_count; + } + + return required_symbols_per_hblank; +} + +/* Current calculation only applicable for 8b/10b MST and 128b/132b SST/MST. + */ +static uint32_t calculate_available_hblank_bw_in_symbols( + const struct audio_crtc_info *crtc_info, + const struct audio_dp_link_info *dp_link_info) { - /* do nothing */ + uint64_t hblank = crtc_info->h_total - crtc_info->h_active; + struct fixed31_32 hblank_time_msec = + dc_fixpt_from_fraction(hblank * 10, crtc_info->requested_pixel_clock_100Hz); + struct fixed31_32 lsclkfreq_mhz = + get_link_symbol_clk_freq_mhz(dp_link_info->link_rate); + struct fixed31_32 average_stream_sym_bw_frac; + struct fixed31_32 peak_stream_bw_kbps; + struct fixed31_32 bits_per_pixel; + struct fixed31_32 link_bw_kbps; + struct fixed31_32 available_stream_sym_count; + uint32_t available_hblank_bw = 0; /* in stream symbols */ + + if (crtc_info->dsc_bits_per_pixel) { + bits_per_pixel = dc_fixpt_from_fraction(crtc_info->dsc_bits_per_pixel, 16); + } else { + switch (crtc_info->color_depth) { + case COLOR_DEPTH_666: + bits_per_pixel = dc_fixpt_from_int(6); + break; + case COLOR_DEPTH_888: + bits_per_pixel = dc_fixpt_from_int(8); + break; + case COLOR_DEPTH_101010: + bits_per_pixel = dc_fixpt_from_int(10); + break; + case COLOR_DEPTH_121212: + bits_per_pixel = dc_fixpt_from_int(12); + break; + default: + /* Default to commonly supported color depth. */ + bits_per_pixel = dc_fixpt_from_int(8); + break; + } + + bits_per_pixel = dc_fixpt_mul_int(bits_per_pixel, 3); + + if (crtc_info->pixel_encoding == PIXEL_ENCODING_YCBCR422) { + bits_per_pixel = dc_fixpt_div_int(bits_per_pixel, 3); + bits_per_pixel = dc_fixpt_mul_int(bits_per_pixel, 2); + } else if (crtc_info->pixel_encoding == PIXEL_ENCODING_YCBCR420) { + bits_per_pixel = dc_fixpt_div_int(bits_per_pixel, 2); + } + } + + /* Use simple stream BW calculation because mainlink overhead is + * accounted for separately in the audio BW calculations. + */ + peak_stream_bw_kbps = dc_fixpt_from_fraction(crtc_info->requested_pixel_clock_100Hz, 10); + peak_stream_bw_kbps = dc_fixpt_mul(peak_stream_bw_kbps, bits_per_pixel); + link_bw_kbps = dc_fixpt_from_int(dp_link_info->link_bandwidth_kbps); + average_stream_sym_bw_frac = dc_fixpt_div(peak_stream_bw_kbps, link_bw_kbps); + + available_stream_sym_count = dc_fixpt_mul_int(hblank_time_msec, 1000); + available_stream_sym_count = dc_fixpt_mul(available_stream_sym_count, lsclkfreq_mhz); + available_stream_sym_count = dc_fixpt_mul(available_stream_sym_count, average_stream_sym_bw_frac); + available_hblank_bw = dc_fixpt_floor(available_stream_sym_count); + available_hblank_bw *= dp_link_info->lane_count; + available_hblank_bw -= crtc_info->dsc_num_slices * 4; /* EOC overhead */ + + if (available_hblank_bw < dp_link_info->hblank_min_symbol_width) + available_hblank_bw = dp_link_info->hblank_min_symbol_width; + + if (available_hblank_bw < 12) + available_hblank_bw = 0; + else + available_hblank_bw -= 12; /* Main link overhead */ + + return available_hblank_bw; } -/*For DP MST, calculate if specified sample rates can fit into a given timing */ -static void check_audio_bandwidth_dpmst( +static void check_audio_bandwidth_dp( const struct audio_crtc_info *crtc_info, + const struct audio_dp_link_info *dp_link_info, uint32_t channel_count, union audio_sample_rates *sample_rates) { - /* do nothing */ + struct dp_audio_layout_config layout_config = {0}; + uint32_t available_hblank_bw; + uint32_t av_stream_map_lane_count; + uint32_t audio_sdp_overhead; + + /* TODO: Add validation for SST 8b/10 case */ + if (!dp_link_info->is_mst && dp_link_info->encoding == DP_8b_10b_ENCODING) + return; + + available_hblank_bw = calculate_available_hblank_bw_in_symbols( + crtc_info, dp_link_info); + av_stream_map_lane_count = get_av_stream_map_lane_count( + dp_link_info->encoding, dp_link_info->lane_count, dp_link_info->is_mst); + audio_sdp_overhead = get_audio_sdp_overhead( + dp_link_info->encoding, dp_link_info->lane_count, dp_link_info->is_mst); + get_audio_layout_config( + channel_count, dp_link_info->encoding, &layout_config); + + if (layout_config.max_layouts_per_audio_sdp == 0 || + layout_config.symbols_per_layout == 0 || + layout_config.layouts_per_sample_denom == 0) { + return; + } + if (available_hblank_bw < calculate_required_audio_bw_in_symbols( + crtc_info, &layout_config, channel_count, 192000, + av_stream_map_lane_count, audio_sdp_overhead)) + sample_rates->rate.RATE_192 = 0; + if (available_hblank_bw < calculate_required_audio_bw_in_symbols( + crtc_info, &layout_config, channel_count, 176400, + av_stream_map_lane_count, audio_sdp_overhead)) + sample_rates->rate.RATE_176_4 = 0; + if (available_hblank_bw < calculate_required_audio_bw_in_symbols( + crtc_info, &layout_config, channel_count, 96000, + av_stream_map_lane_count, audio_sdp_overhead)) + sample_rates->rate.RATE_96 = 0; + if (available_hblank_bw < calculate_required_audio_bw_in_symbols( + crtc_info, &layout_config, channel_count, 88200, + av_stream_map_lane_count, audio_sdp_overhead)) + sample_rates->rate.RATE_88_2 = 0; + if (available_hblank_bw < calculate_required_audio_bw_in_symbols( + crtc_info, &layout_config, channel_count, 48000, + av_stream_map_lane_count, audio_sdp_overhead)) + sample_rates->rate.RATE_48 = 0; + if (available_hblank_bw < calculate_required_audio_bw_in_symbols( + crtc_info, &layout_config, channel_count, 44100, + av_stream_map_lane_count, audio_sdp_overhead)) + sample_rates->rate.RATE_44_1 = 0; + if (available_hblank_bw < calculate_required_audio_bw_in_symbols( + crtc_info, &layout_config, channel_count, 32000, + av_stream_map_lane_count, audio_sdp_overhead)) + sample_rates->rate.RATE_32 = 0; } static void check_audio_bandwidth( const struct audio_crtc_info *crtc_info, + const struct audio_dp_link_info *dp_link_info, uint32_t channel_count, enum signal_type signal, union audio_sample_rates *sample_rates) @@ -271,12 +538,9 @@ static void check_audio_bandwidth( break; case SIGNAL_TYPE_EDP: case SIGNAL_TYPE_DISPLAY_PORT: - check_audio_bandwidth_dpsst( - crtc_info, channel_count, sample_rates); - break; case SIGNAL_TYPE_DISPLAY_PORT_MST: - check_audio_bandwidth_dpmst( - crtc_info, channel_count, sample_rates); + check_audio_bandwidth_dp( + crtc_info, dp_link_info, channel_count, sample_rates); break; default: break; @@ -394,7 +658,8 @@ void dce_aud_az_configure( struct audio *audio, enum signal_type signal, const struct audio_crtc_info *crtc_info, - const struct audio_info *audio_info) + const struct audio_info *audio_info, + const struct audio_dp_link_info *dp_link_info) { struct dce_audio *aud = DCE_AUD(audio); @@ -529,6 +794,7 @@ void dce_aud_az_configure( check_audio_bandwidth( crtc_info, + dp_link_info, channel_count, signal, &sample_rates); @@ -588,6 +854,7 @@ void dce_aud_az_configure( check_audio_bandwidth( crtc_info, + dp_link_info, 8, signal, &sample_rate); @@ -782,7 +1049,7 @@ static void get_azalia_clock_info_dp( /*audio_dto_module = dpDtoSourceClockInkhz * 10,000; * [khz] ->[100Hz] */ azalia_clock_info->audio_dto_module = - pll_info->dp_dto_source_clock_in_khz * 10; + pll_info->audio_dto_source_clock_in_khz * 10; } void dce_aud_wall_dto_setup( diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h index dbd2cfed0603..539f881928d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h @@ -170,7 +170,8 @@ void dce_aud_az_disable(struct audio *audio); void dce_aud_az_configure(struct audio *audio, enum signal_type signal, const struct audio_crtc_info *crtc_info, - const struct audio_info *audio_info); + const struct audio_info *audio_info, + const struct audio_dp_link_info *dp_link_info); void dce_aud_wall_dto_setup(struct audio *audio, enum signal_type signal, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 5d3f6fa1011e..b5e0289d2fe8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -975,6 +975,12 @@ static bool dcn31_program_pix_clk( look_up_in_video_optimized_rate_tlb(pix_clk_params->requested_pix_clk_100hz / 10); struct bp_pixel_clock_parameters bp_pc_params = {0}; enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24; + + // Apply ssed(spread spectrum) dpref clock for edp only. + if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0 + && pix_clk_params->signal_type == SIGNAL_TYPE_EDP + && encoding == DP_8b_10b_ENCODING) + dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz; // For these signal types Driver to program DP_DTO without calling VBIOS Command table if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) { if (e) { @@ -1088,6 +1094,7 @@ static bool get_pixel_clk_frequency_100hz( struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); unsigned int clock_hz = 0; unsigned int modulo_hz = 0; + unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz; if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) { clock_hz = REG_READ(PHASE[inst]); @@ -1100,7 +1107,7 @@ static bool get_pixel_clk_frequency_100hz( modulo_hz = REG_READ(MODULO[inst]); if (modulo_hz) *pixel_clk_khz = div_u64((uint64_t)clock_hz* - clock_source->ctx->dc->clk_mgr->dprefclk_khz*10, + dp_dto_ref_khz*10, modulo_hz); else *pixel_clk_khz = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index a2f48d46d199..ee601a6897a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -22,9 +22,6 @@ * Authors: AMD * */ - -#include <linux/delay.h> - #include "resource.h" #include "dce_i2c.h" #include "dce_i2c_hw.h" @@ -315,9 +312,6 @@ static bool setup_engine( /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); - /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ - REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); - /*set SW requested I2c speed to default, if API calls in it will be override later*/ set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h index f98400efdd9b..e34e445a4013 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h @@ -181,6 +181,7 @@ struct dce_mem_input_registers { SFB(blk, GRPH_ENABLE, GRPH_ENABLE, mask_sh),\ SFB(blk, GRPH_CONTROL, GRPH_DEPTH, mask_sh),\ SFB(blk, GRPH_CONTROL, GRPH_FORMAT, mask_sh),\ + SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\ SFB(blk, GRPH_X_START, GRPH_X_START, mask_sh),\ SFB(blk, GRPH_Y_START, GRPH_Y_START, mask_sh),\ SFB(blk, GRPH_X_END, GRPH_X_END, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h index bf1ffc3629c7..3d9be87aae45 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h @@ -111,6 +111,7 @@ enum dce110_opp_reg_type { OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\ OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\ OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c index e8570060d007..5bca67407c5b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c @@ -290,4 +290,5 @@ void dce_panel_cntl_construct( dce_panel_cntl->base.funcs = &dce_link_panel_cntl_funcs; dce_panel_cntl->base.ctx = init_data->ctx; dce_panel_cntl->base.inst = init_data->inst; + dce_panel_cntl->base.pwrseq_inst = 0; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c index 670d5ab9d998..2b1673d69ea8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c @@ -1408,7 +1408,7 @@ void dce110_opp_set_csc_default( static void program_pwl(struct dce_transform *xfm_dce, const struct pwl_params *params) { - int retval; + uint32_t retval; uint8_t max_tries = 10; uint8_t counter = 0; uint32_t i = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c index d3e6544022b7..ccc154b0281c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c @@ -57,18 +57,22 @@ static unsigned int abm_feature_support(struct abm *abm, unsigned int panel_inst return ret; } -static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight) +static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight, uint32_t user_level) { - dmub_abm_init(abm, backlight); + dmub_abm_init(abm, backlight, user_level); } static unsigned int dmub_abm_get_current_backlight_ex(struct abm *abm) { + dc_allow_idle_optimizations(abm->ctx->dc, false); + return dmub_abm_get_current_backlight(abm); } static unsigned int dmub_abm_get_target_backlight_ex(struct abm *abm) { + dc_allow_idle_optimizations(abm->ctx->dc, false); + return dmub_abm_get_target_backlight(abm); } @@ -145,7 +149,11 @@ static bool dmub_abm_save_restore_ex( return ret; } -static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst) +static bool dmub_abm_set_pipe_ex(struct abm *abm, + uint32_t otg_inst, + uint32_t option, + uint32_t panel_inst, + uint32_t pwrseq_inst) { bool ret = false; unsigned int feature_support; @@ -153,7 +161,7 @@ static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t op feature_support = abm_feature_support(abm, panel_inst); if (feature_support == ABM_LCD_SUPPORT) - ret = dmub_abm_set_pipe(abm, otg_inst, option, panel_inst); + ret = dmub_abm_set_pipe(abm, otg_inst, option, panel_inst, pwrseq_inst); return ret; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c index 592a8f7a1c6d..b851fc65f5b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c @@ -34,11 +34,7 @@ #include "reg_helper.h" #include "fixed31_32.h" -#ifdef _WIN32 -#include "atombios.h" -#else #include "atom.h" -#endif #define TO_DMUB_ABM(abm)\ container_of(abm, struct dce_abm, base) @@ -76,10 +72,10 @@ static void dmub_abm_enable_fractional_pwm(struct dc_context *dc) cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask; cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data); - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } -void dmub_abm_init(struct abm *abm, uint32_t backlight) +void dmub_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level) { struct dce_abm *dce_abm = TO_DMUB_ABM(abm); @@ -106,7 +102,7 @@ void dmub_abm_init(struct abm *abm, uint32_t backlight) BL1_PWM_TARGET_ABM_LEVEL, backlight); REG_UPDATE(BL1_PWM_USER_LEVEL, - BL1_PWM_USER_LEVEL, backlight); + BL1_PWM_USER_LEVEL, user_level); REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM1_LS_MIN_PIXEL_VALUE_THRES, 0, @@ -155,7 +151,7 @@ bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask) cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask; cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data); - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -186,7 +182,7 @@ void dmub_abm_init_config(struct abm *abm, cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data); - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } @@ -203,7 +199,7 @@ bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, un cmd.abm_pause.abm_pause_data.panel_mask = panel_mask; cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data); - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -246,7 +242,7 @@ bool dmub_abm_save_restore( cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore); - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); // Copy iramtable data into local structure memcpy((void *)pData, dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes); @@ -254,7 +250,11 @@ bool dmub_abm_save_restore( return true; } -bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst) +bool dmub_abm_set_pipe(struct abm *abm, + uint32_t otg_inst, + uint32_t option, + uint32_t panel_inst, + uint32_t pwrseq_inst) { union dmub_rb_cmd cmd; struct dc_context *dc = abm->ctx; @@ -264,12 +264,13 @@ bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint cmd.abm_set_pipe.header.type = DMUB_CMD__ABM; cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE; cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst; + cmd.abm_set_pipe.abm_set_pipe_data.pwrseq_inst = pwrseq_inst; cmd.abm_set_pipe.abm_set_pipe_data.set_pipe_option = option; cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst; cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary; cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data); - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -291,7 +292,7 @@ bool dmub_abm_set_backlight_level(struct abm *abm, cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst); cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h index 853564d7f471..761685e5b8c9 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h @@ -30,7 +30,7 @@ struct abm_save_restore; -void dmub_abm_init(struct abm *abm, uint32_t backlight); +void dmub_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level); bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask); unsigned int dmub_abm_get_current_backlight(struct abm *abm); unsigned int dmub_abm_get_target_backlight(struct abm *abm); @@ -44,7 +44,7 @@ bool dmub_abm_save_restore( struct dc_context *dc, unsigned int panel_inst, struct abm_save_restore *pData); -bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst); +bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst, uint32_t pwrseq_inst); bool dmub_abm_set_backlight_level(struct abm *abm, unsigned int backlight_pwm_u16_16, unsigned int frame_ramp, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c index 2aa0e01a6891..bf636b28e3e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c @@ -47,7 +47,7 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv, if (!lock) cmd.lock_hw.lock_hw_data.should_release = 1; - dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv, @@ -65,5 +65,9 @@ bool should_use_dmub_lock(struct dc_link *link) { if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) return true; + + if (link->replay_settings.replay_feature_enabled) + return true; + return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c index d8009b2dc56a..98a778996e1a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c @@ -48,5 +48,5 @@ void dmub_enable_outbox_notification(struct dc_dmub_srv *dmub_srv) sizeof(cmd.outbox1_enable.header); cmd.outbox1_enable.enable = true; - dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index 9d4170a356a2..3e243e407bb8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -105,23 +105,18 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state) */ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state, uint8_t panel_inst) { - struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub; uint32_t raw_state = 0; uint32_t retry_count = 0; - enum dmub_status status; do { // Send gpint command and wait for ack - status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, panel_inst, 30); - - if (status == DMUB_STATUS_OK) { - // GPINT was executed, get response - dmub_srv_get_gpint_response(srv, &raw_state); + if (dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__GET_PSR_STATE, panel_inst, &raw_state, + DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) { *state = convert_psr_state(raw_state); - } else + } else { // Return invalid state when GPINT times out *state = PSR_STATE_INVALID; - + } } while (++retry_count <= 1000 && *state == PSR_STATE_INVALID); // Assert if max retry hit @@ -171,7 +166,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state * cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst; cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data); - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -199,7 +194,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8 cmd.psr_enable.header.payload_bytes = 0; // Send header only - dm_execute_dmub_cmd(dc->dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc->dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); /* Below loops 1000 x 500us = 500 ms. * Exit PSR may need to wait 1-2 frames to power up. Timeout after at @@ -248,7 +243,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_ cmd.psr_set_level.psr_set_level_data.psr_level = psr_level; cmd.psr_set_level.psr_set_level_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; cmd.psr_set_level.psr_set_level_data.panel_inst = panel_inst; - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* @@ -267,7 +262,7 @@ static void dmub_psr_set_sink_vtotal_in_psr_active(struct dmub_psr *dmub, cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_idle = psr_vtotal_idle; cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_su = psr_vtotal_su; - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* @@ -286,7 +281,7 @@ static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt; cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst; - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* @@ -423,7 +418,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, copy_settings_data->relock_delay_frame_cnt = 2; copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height; - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -444,7 +439,7 @@ static void dmub_psr_force_static(struct dmub_psr *dmub, uint8_t panel_inst) cmd.psr_force_static.header.sub_type = DMUB_CMD__PSR_FORCE_STATIC; cmd.psr_enable.header.payload_bytes = 0; - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* @@ -452,13 +447,11 @@ static void dmub_psr_force_static(struct dmub_psr *dmub, uint8_t panel_inst) */ static void dmub_psr_get_residency(struct dmub_psr *dmub, uint32_t *residency, uint8_t panel_inst) { - struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub; uint16_t param = (uint16_t)(panel_inst << 8); /* Send gpint command and wait for ack */ - dmub_srv_send_gpint_command(srv, DMUB_GPINT__PSR_RESIDENCY, param, 30); - - dmub_srv_get_gpint_response(srv, residency); + dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__PSR_RESIDENCY, param, residency, + DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); } static const struct dmub_psr_funcs psr_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index 28149e53c2a6..4f559a025cf0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -244,7 +244,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst, uint16_t param = (uint16_t)(panel_inst << 8); if (is_alpm) - param |= REPLAY_RESIDENCY_MODE_ALPM; + param |= REPLAY_RESIDENCY_FIELD_MODE_ALPM; if (is_start) param |= REPLAY_RESIDENCY_ENABLE; @@ -258,13 +258,97 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst, *residency = 0; } +/* + * Set REPLAY power optimization flags and coasting vtotal. + */ +static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub, + unsigned int power_opt, uint8_t panel_inst, uint16_t coasting_vtotal) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dmub->ctx; + + memset(&cmd, 0, sizeof(cmd)); + cmd.replay_set_power_opt_and_coasting_vtotal.header.type = DMUB_CMD__REPLAY; + cmd.replay_set_power_opt_and_coasting_vtotal.header.sub_type = + DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL; + cmd.replay_set_power_opt_and_coasting_vtotal.header.payload_bytes = + sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal); + cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_power_opt_data.power_opt = power_opt; + cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_power_opt_data.panel_inst = panel_inst; + cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_coasting_vtotal_data.coasting_vtotal = coasting_vtotal; + + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +/* + * send Replay general cmd to DMUB. + */ +static void dmub_replay_send_cmd(struct dmub_replay *dmub, + enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element) +{ + union dmub_rb_cmd cmd; + struct dc_context *ctx = NULL; + + if (dmub == NULL || cmd_element == NULL) + return; + + ctx = dmub->ctx; + if (ctx != NULL) { + + if (msg != Replay_Msg_Not_Support) { + memset(&cmd, 0, sizeof(cmd)); + //Header + cmd.replay_set_timing_sync.header.type = DMUB_CMD__REPLAY; + } else + return; + } else + return; + + switch (msg) { + case Replay_Set_Timing_Sync_Supported: + //Header + cmd.replay_set_timing_sync.header.sub_type = + DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED; + cmd.replay_set_timing_sync.header.payload_bytes = + sizeof(struct dmub_rb_cmd_replay_set_timing_sync); + //Cmd Body + cmd.replay_set_timing_sync.replay_set_timing_sync_data.panel_inst = + cmd_element->sync_data.panel_inst; + cmd.replay_set_timing_sync.replay_set_timing_sync_data.timing_sync_supported = + cmd_element->sync_data.timing_sync_supported; + break; + case Replay_Set_Residency_Frameupdate_Timer: + //Header + cmd.replay_set_frameupdate_timer.header.sub_type = + DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER; + cmd.replay_set_frameupdate_timer.header.payload_bytes = + sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer); + //Cmd Body + cmd.replay_set_frameupdate_timer.data.panel_inst = + cmd_element->panel_inst; + cmd.replay_set_frameupdate_timer.data.enable = + cmd_element->timer_data.enable; + cmd.replay_set_frameupdate_timer.data.frameupdate_count = + cmd_element->timer_data.frameupdate_count; + break; + case Replay_Msg_Not_Support: + default: + return; + break; + } + + dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + static const struct dmub_replay_funcs replay_funcs = { - .replay_copy_settings = dmub_replay_copy_settings, - .replay_enable = dmub_replay_enable, - .replay_get_state = dmub_replay_get_state, - .replay_set_power_opt = dmub_replay_set_power_opt, - .replay_set_coasting_vtotal = dmub_replay_set_coasting_vtotal, - .replay_residency = dmub_replay_residency, + .replay_copy_settings = dmub_replay_copy_settings, + .replay_enable = dmub_replay_enable, + .replay_get_state = dmub_replay_get_state, + .replay_set_power_opt = dmub_replay_set_power_opt, + .replay_set_coasting_vtotal = dmub_replay_set_coasting_vtotal, + .replay_residency = dmub_replay_residency, + .replay_set_power_opt_and_coasting_vtotal = dmub_replay_set_power_opt_and_coasting_vtotal, + .replay_send_cmd = dmub_replay_send_cmd, }; /* diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h index e8385bbf51fc..3613aff994d7 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h @@ -45,10 +45,14 @@ struct dmub_replay_funcs { struct replay_context *replay_context, uint8_t panel_inst); void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt, uint8_t panel_inst); + void (*replay_send_cmd)(struct dmub_replay *dmub, + enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element); void (*replay_set_coasting_vtotal)(struct dmub_replay *dmub, uint16_t coasting_vtotal, uint8_t panel_inst); void (*replay_residency)(struct dmub_replay *dmub, uint8_t panel_inst, uint32_t *residency, const bool is_start, const bool is_alpm); + void (*replay_set_power_opt_and_coasting_vtotal)(struct dmub_replay *dmub, + unsigned int power_opt, uint8_t panel_inst, uint16_t coasting_vtotal); }; struct dmub_replay *dmub_replay_create(struct dc_context *ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dce100/Makefile b/drivers/gpu/drm/amd/display/dc/dce100/Makefile deleted file mode 100644 index 0d2f6bbf7558..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dce100/Makefile +++ /dev/null @@ -1,46 +0,0 @@ -# -# Copyright 2017 Advanced Micro Devices, Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -# -# Makefile for the 'controller' sub-component of DAL. -# It provides the control and status of HW CRTC block. - -CFLAGS_$(AMDDALPATH)/dc/dce100/dce100_resource.o = $(call cc-disable-warning, override-init) - -DCE100 = dce100_resource.o - -AMD_DAL_DCE100 = $(addprefix $(AMDDALPATH)/dc/dce100/,$(DCE100)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DCE100) - - -############################################################################### -# DCE 10x -############################################################################### -ifdef 0#CONFIG_DRM_AMD_DC_DCE11_0 -TG_DCE100 = dce100_resource.o - -AMD_DAL_TG_DCE100 = $(addprefix \ - $(AMDDALPATH)/dc/dce100/,$(TG_DCE100)) - -AMD_DISPLAY_FILES += $(AMD_DAL_TG_DCE100) -endif - diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile index 695a50ed5ad2..c307f040e48f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile @@ -23,11 +23,11 @@ # Makefile for the 'controller' sub-component of DAL. # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = -Wno-override-init DCE110 = dce110_timing_generator.o \ -dce110_compressor.o dce110_resource.o \ -dce110_opp_regamma_v.o dce110_opp_csc_v.o dce110_timing_generator_v.o \ +dce110_compressor.o dce110_opp_regamma_v.o \ +dce110_opp_csc_v.o dce110_timing_generator_v.o \ dce110_mem_input_v.o dce110_opp_v.o dce110_transform_v.o AMD_DAL_DCE110 = $(addprefix $(AMDDALPATH)/dc/dce110/,$(DCE110)) diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile index e846ef58cab3..683866797709 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile @@ -23,10 +23,9 @@ # Makefile for the 'controller' sub-component of DAL. # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = -Wno-override-init -DCE112 = dce112_compressor.o \ -dce112_resource.o +DCE112 = dce112_compressor.o AMD_DAL_DCE112 = $(addprefix $(AMDDALPATH)/dc/dce112/,$(DCE112)) diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile index 097cf407a15d..8f508e662748 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile @@ -24,9 +24,9 @@ # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = -Wno-override-init -DCE120 = dce120_resource.o dce120_timing_generator.o \ +DCE120 = dce120_timing_generator.o AMD_DAL_DCE120 = $(addprefix $(AMDDALPATH)/dc/dce120/,$(DCE120)) diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile index fee331accc0e..eede83ad91fa 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile @@ -23,7 +23,7 @@ # Makefile for the 'controller' sub-component of DAL. # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = -Wno-override-init DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \ dce60_resource.o diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile index 93dd68c31275..fba189d26652 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile @@ -23,10 +23,9 @@ # Makefile for the 'controller' sub-component of DAL. # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = -Wno-override-init -DCE80 = dce80_timing_generator.o \ - dce80_resource.o +DCE80 = dce80_timing_generator.o AMD_DAL_DCE80 = $(addprefix $(AMDDALPATH)/dc/dce80/,$(DCE80)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile index 2d2007c3e2b6..8dc7938c36d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile @@ -22,11 +22,11 @@ # # Makefile for DCN. -DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o \ +DCN10 = dcn10_ipp.o \ dcn10_hw_sequencer_debug.o \ - dcn10_dpp.o dcn10_opp.o dcn10_optc.o \ + dcn10_opp.o \ dcn10_hubp.o dcn10_mpc.o \ - dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \ + dcn10_cm_common.o \ dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index 3538973bd0c6..0b49362f71b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -24,7 +24,7 @@ */ #include "dc.h" #include "reg_helper.h" -#include "dcn10_dpp.h" +#include "dcn10/dcn10_dpp.h" #include "dcn10_cm_common.h" #include "custom_float.h" @@ -62,6 +62,26 @@ void cm_helper_program_color_matrices( } +void cm_helper_read_color_matrices(struct dc_context *ctx, + uint16_t *regval, + const struct color_matrices_reg *reg) +{ + uint32_t cur_csc_reg, regval0, regval1; + unsigned int i = 0; + + for (cur_csc_reg = reg->csc_c11_c12; + cur_csc_reg <= reg->csc_c33_c34; cur_csc_reg++) { + REG_GET_2(cur_csc_reg, + csc_c11, ®val0, + csc_c12, ®val1); + + regval[2 * i] = regval0; + regval[(2 * i) + 1] = regval1; + + i++; + } +} + void cm_helper_program_xfer_func( struct dc_context *ctx, const struct pwl_params *params, @@ -382,6 +402,11 @@ bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx, i += increment) { if (j == hw_points - 1) break; + if (i >= TRANSFER_FUNC_POINTS) { + DC_LOG_ERROR("Index out of bounds: i=%d, TRANSFER_FUNC_POINTS=%d\n", + i, TRANSFER_FUNC_POINTS); + return false; + } rgb_resulted[j].red = output_tf->tf_pts.red[i]; rgb_resulted[j].green = output_tf->tf_pts.green[i]; rgb_resulted[j].blue = output_tf->tf_pts.blue[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h index 0a68b63d6126..decc50b1ac53 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h @@ -114,5 +114,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format( const struct dc_transfer_func *output_tf, struct pwl_params *lut_params); - +void cm_helper_read_color_matrices(struct dc_context *ctx, + uint16_t *regval, + const struct color_matrices_reg *reg); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index d51f1ce02874..6dd355a03033 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -130,7 +130,7 @@ bool hubbub1_verify_allow_pstate_change_high( static unsigned int max_sampled_pstate_wait_us; /* data collection */ static bool forced_pstate_allow; /* help with revert wa */ - unsigned int debug_data; + unsigned int debug_data = 0; unsigned int i; if (forced_pstate_allow) { @@ -242,7 +242,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub) bool hubbub1_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -356,7 +356,7 @@ bool hubbub1_program_urgent_watermarks( bool hubbub1_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -501,7 +501,7 @@ bool hubbub1_program_stutter_watermarks( bool hubbub1_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -582,7 +582,7 @@ bool hubbub1_program_pstate_watermarks( bool hubbub1_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h index 4201b7627030..d1f9e63944c8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h @@ -409,7 +409,7 @@ struct dcn10_hubbub { const struct dcn_hubbub_shift *shifts; const struct dcn_hubbub_mask *masks; unsigned int debug_test_index_pstate; - struct dcn_watermark_set watermarks; + union dcn_watermark_set watermarks; }; void hubbub1_update_dchub( @@ -423,7 +423,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub); bool hubbub1_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); @@ -446,17 +446,17 @@ void hubbub1_construct(struct hubbub *hubbub, bool hubbub1_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub1_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub1_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 09784222cc03..69119b2fdce2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -692,6 +692,7 @@ struct dcn_hubp_state { uint32_t primary_meta_addr_hi; uint32_t uclk_pstate_force; uint32_t hubp_cntl; + uint32_t flip_control; }; struct dcn10_hubp { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index 92fdab731f4a..c51b717e5622 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -32,7 +32,7 @@ #include "dce/dce_hwseq.h" #include "abm.h" #include "dmcu.h" -#include "dcn10_optc.h" +#include "dcn10/dcn10_optc.h" #include "dcn10/dcn10_dpp.h" #include "dcn10/dcn10_mpc.h" #include "timing_generator.h" @@ -392,7 +392,7 @@ static unsigned int dcn10_get_mpcc_states(struct dc *dc, char *pBuf, unsigned in remaining_buffer -= chars_printed; pBuf += chars_printed; - for (i = 0; i < pool->pipe_count; i++) { + for (i = 0; i < pool->mpcc_count; i++) { struct mpcc_state s = {0}; pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index 377f1ba1a81b..4d0eed7598b2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -1439,7 +1439,6 @@ enum signal_type dcn10_get_dig_mode( default: return SIGNAL_TYPE_NONE; } - return SIGNAL_TYPE_NONE; } void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index d980e6bd6c66..b7a89c39f445 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -167,7 +167,6 @@ struct dcn10_link_enc_registers { uint32_t DIO_LINKD_CNTL; uint32_t DIO_LINKE_CNTL; uint32_t DIO_LINKF_CNTL; - uint32_t DIG_FIFO_CTRL0; uint32_t DIO_CLK_CNTL; uint32_t DIG_BE_CLK_CNTL; }; @@ -475,9 +474,6 @@ struct dcn10_link_enc_registers { type HPO_DP_ENC_SEL;\ type HPO_HDMI_ENC_SEL -#define DCN32_LINK_ENCODER_REG_FIELD_LIST(type) \ - type DIG_FIFO_OUTPUT_PIXEL_MODE - #define DCN35_LINK_ENCODER_REG_FIELD_LIST(type) \ type DIG_BE_ENABLE;\ type DIG_RB_SWITCH_EN;\ @@ -512,7 +508,6 @@ struct dcn10_link_enc_shift { DCN20_LINK_ENCODER_REG_FIELD_LIST(uint8_t); DCN30_LINK_ENCODER_REG_FIELD_LIST(uint8_t); DCN31_LINK_ENCODER_REG_FIELD_LIST(uint8_t); - DCN32_LINK_ENCODER_REG_FIELD_LIST(uint8_t); DCN35_LINK_ENCODER_REG_FIELD_LIST(uint8_t); }; @@ -521,7 +516,6 @@ struct dcn10_link_enc_mask { DCN20_LINK_ENCODER_REG_FIELD_LIST(uint32_t); DCN30_LINK_ENCODER_REG_FIELD_LIST(uint32_t); DCN31_LINK_ENCODER_REG_FIELD_LIST(uint32_t); - DCN32_LINK_ENCODER_REG_FIELD_LIST(uint32_t); DCN35_LINK_ENCODER_REG_FIELD_LIST(uint32_t); }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index 0dec57679269..71e9288d60ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -23,6 +23,7 @@ * */ +#include "core_types.h" #include "dm_services.h" #include "dcn10_opp.h" #include "reg_helper.h" @@ -160,10 +161,17 @@ static void opp1_set_pixel_encoding( struct dcn10_opp *oppn10, const struct clamping_and_pixel_encoding_params *params) { + bool force_chroma_subsampling_1tap = + oppn10->base.ctx->dc->debug.force_chroma_subsampling_1tap; + switch (params->pixel_encoding) { case PIXEL_ENCODING_RGB: case PIXEL_ENCODING_YCBCR444: + REG_UPDATE_3(FMT_CONTROL, + FMT_PIXEL_ENCODING, 0, + FMT_SUBSAMPLING_MODE, 0, + FMT_CBCR_BIT_REDUCTION_BYPASS, 0); REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0); break; case PIXEL_ENCODING_YCBCR422: @@ -173,11 +181,17 @@ static void opp1_set_pixel_encoding( FMT_CBCR_BIT_REDUCTION_BYPASS, 0); break; case PIXEL_ENCODING_YCBCR420: - REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2); + REG_UPDATE_3(FMT_CONTROL, + FMT_PIXEL_ENCODING, 2, + FMT_SUBSAMPLING_MODE, 2, + FMT_CBCR_BIT_REDUCTION_BYPASS, 1); break; default: break; } + + if (force_chroma_subsampling_1tap) + REG_UPDATE(FMT_CONTROL, FMT_SUBSAMPLING_MODE, 0); } /** @@ -377,6 +391,7 @@ static const struct opp_funcs dcn10_opp_funcs = { .opp_set_disp_pattern_generator = NULL, .opp_program_dpg_dimensions = NULL, .dpg_is_blanked = NULL, + .dpg_is_pending = NULL, .opp_destroy = opp1_destroy }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h index 2c0ecfa5a643..c87de68a509e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h @@ -79,6 +79,8 @@ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh), \ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh), \ OPP_SF(FMT0_FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh), \ + OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_MODE, mask_sh), \ + OPP_SF(FMT0_FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh), \ OPP_SF(FMT0_FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh), \ OPP_SF(FMT0_FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh), \ OPP_SF(FMT0_FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh), \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index c429590f1298..1b96972b9d0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -127,7 +127,6 @@ struct dcn10_stream_enc_registers { uint32_t AFMT_60958_1; uint32_t AFMT_60958_2; uint32_t DIG_FE_CNTL; - uint32_t DIG_FE_CNTL2; uint32_t DIG_FIFO_STATUS; uint32_t DP_MSE_RATE_CNTL; uint32_t DP_MSE_RATE_UPDATE; @@ -570,7 +569,7 @@ struct dcn10_stream_enc_registers { type DP_SEC_GSP11_ENABLE;\ type DP_SEC_GSP11_LINE_NUM -#define SE_REG_FIELD_LIST_DCN3_2(type) \ +#define SE_REG_FIELD_LIST_DCN3_1_COMMON(type) \ type DIG_FIFO_OUTPUT_PIXEL_MODE;\ type DP_PIXEL_PER_CYCLE_PROCESSING_MODE;\ type DIG_SYMCLK_FE_ON;\ @@ -599,7 +598,7 @@ struct dcn10_stream_encoder_shift { uint8_t HDMI_ACP_SEND; SE_REG_FIELD_LIST_DCN2_0(uint8_t); SE_REG_FIELD_LIST_DCN3_0(uint8_t); - SE_REG_FIELD_LIST_DCN3_2(uint8_t); + SE_REG_FIELD_LIST_DCN3_1_COMMON(uint8_t); SE_REG_FIELD_LIST_DCN3_5_COMMON(uint8_t); }; @@ -608,7 +607,7 @@ struct dcn10_stream_encoder_mask { uint32_t HDMI_ACP_SEND; SE_REG_FIELD_LIST_DCN2_0(uint32_t); SE_REG_FIELD_LIST_DCN3_0(uint32_t); - SE_REG_FIELD_LIST_DCN3_2(uint32_t); + SE_REG_FIELD_LIST_DCN3_1_COMMON(uint32_t); SE_REG_FIELD_LIST_DCN3_5_COMMON(uint32_t); }; @@ -667,9 +666,6 @@ void enc1_stream_encoder_send_immediate_sdp_message( void enc1_stream_encoder_stop_dp_info_packets( struct stream_encoder *enc); -void enc1_stream_encoder_reset_fifo( - struct stream_encoder *enc); - void enc1_stream_encoder_dp_blank( struct dc_link *link, struct stream_encoder *enc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index d7dc9696a8c8..9b6070c99794 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -2,13 +2,11 @@ # # Makefile for DCN. -DCN20 = dcn20_resource.o dcn20_init.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \ - dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_optc.o dcn20_mmhubbub.o \ +DCN20 = dcn20_hubp.o \ + dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_mmhubbub.o \ dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \ dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o -DCN20 += dcn20_dsc.o - AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20)) AMD_DISPLAY_FILES += $(AMD_DAL_DCN20) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h index ab6d09c6fe34..ef5c22f41563 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h @@ -291,7 +291,43 @@ type SYMCLKB_FE_SRC_SEL;\ type SYMCLKC_FE_SRC_SEL;\ type SYMCLKD_FE_SRC_SEL;\ - type SYMCLKE_FE_SRC_SEL; + type SYMCLKE_FE_SRC_SEL;\ + type DTBCLK_P0_GATE_DISABLE;\ + type DTBCLK_P1_GATE_DISABLE;\ + type DTBCLK_P2_GATE_DISABLE;\ + type DTBCLK_P3_GATE_DISABLE;\ + type DSCCLK0_ROOT_GATE_DISABLE;\ + type DSCCLK1_ROOT_GATE_DISABLE;\ + type DSCCLK2_ROOT_GATE_DISABLE;\ + type DSCCLK3_ROOT_GATE_DISABLE;\ + type SYMCLKA_FE_ROOT_GATE_DISABLE;\ + type SYMCLKB_FE_ROOT_GATE_DISABLE;\ + type SYMCLKC_FE_ROOT_GATE_DISABLE;\ + type SYMCLKD_FE_ROOT_GATE_DISABLE;\ + type SYMCLKE_FE_ROOT_GATE_DISABLE;\ + type DPPCLK0_ROOT_GATE_DISABLE;\ + type DPPCLK1_ROOT_GATE_DISABLE;\ + type DPPCLK2_ROOT_GATE_DISABLE;\ + type DPPCLK3_ROOT_GATE_DISABLE;\ + type HDMISTREAMCLK0_ROOT_GATE_DISABLE;\ + type SYMCLKA_ROOT_GATE_DISABLE;\ + type SYMCLKB_ROOT_GATE_DISABLE;\ + type SYMCLKC_ROOT_GATE_DISABLE;\ + type SYMCLKD_ROOT_GATE_DISABLE;\ + type SYMCLKE_ROOT_GATE_DISABLE;\ + type PHYA_REFCLK_ROOT_GATE_DISABLE;\ + type PHYB_REFCLK_ROOT_GATE_DISABLE;\ + type PHYC_REFCLK_ROOT_GATE_DISABLE;\ + type PHYD_REFCLK_ROOT_GATE_DISABLE;\ + type PHYE_REFCLK_ROOT_GATE_DISABLE;\ + type DPSTREAMCLK0_ROOT_GATE_DISABLE;\ + type DPSTREAMCLK1_ROOT_GATE_DISABLE;\ + type DPSTREAMCLK2_ROOT_GATE_DISABLE;\ + type DPSTREAMCLK3_ROOT_GATE_DISABLE;\ + type DPSTREAMCLK0_GATE_DISABLE;\ + type DPSTREAMCLK1_GATE_DISABLE;\ + type DPSTREAMCLK2_GATE_DISABLE;\ + type DPSTREAMCLK3_GATE_DISABLE;\ struct dccg_shift { DCCG_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c index f8667be57046..80779e85e2c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c @@ -299,6 +299,17 @@ void dwb2_set_scaler(struct dwbc *dwbc, struct dc_dwb_params *params) } } + + if (dwbc20->dwbc_mask->WBSCL_COEF_RAM_SEL) { + /* Swap double buffered coefficient set */ + uint32_t wbscl_mode = REG_READ(WBSCL_MODE); + bool coef_ram_current = get_reg_field_value_ex( + wbscl_mode, dwbc20->dwbc_mask->WBSCL_COEF_RAM_SEL_CURRENT, + dwbc20->dwbc_shift->WBSCL_COEF_RAM_SEL_CURRENT); + + REG_UPDATE(WBSCL_MODE, WBSCL_COEF_RAM_SEL, !coef_ram_current); + } + } static const struct dwbc_funcs dcn20_dwbc_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index 6eebcb22e317..c6f859871d11 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -570,7 +570,7 @@ void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub, static bool hubbub2_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h index 2f6146bf1d32..24a9c45988ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h @@ -85,7 +85,7 @@ struct dcn20_hubbub { const struct dcn_hubbub_shift *shifts; const struct dcn_hubbub_mask *masks; unsigned int debug_test_index_pstate; - struct dcn_watermark_set watermarks; + union dcn_watermark_set watermarks; int num_vmid; struct dcn20_vmid vmid[16]; unsigned int detile_buf_size; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 139cf31d2e45..6bba020ad6fb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -1077,8 +1077,16 @@ void hubp2_cursor_set_position( if (src_y_offset < 0) src_y_offset = 0; /* Save necessary cursor info x, y position. w, h is saved in attribute func. */ - hubp->cur_rect.x = src_x_offset + param->viewport.x; - hubp->cur_rect.y = src_y_offset + param->viewport.y; + if (param->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && + param->rotation != ROTATION_ANGLE_0) { + hubp->cur_rect.x = 0; + hubp->cur_rect.y = 0; + hubp->cur_rect.w = param->stream->timing.h_addressable; + hubp->cur_rect.h = param->stream->timing.v_addressable; + } else { + hubp->cur_rect.x = src_x_offset + param->viewport.x; + hubp->cur_rect.y = src_y_offset + param->viewport.y; + } } void hubp2_clk_cntl(struct hubp *hubp, bool enable) @@ -1323,6 +1331,12 @@ void hubp2_read_state(struct hubp *hubp) SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height, PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear); + if (REG(DCHUBP_CNTL)) + s->hubp_cntl = REG_READ(DCHUBP_CNTL); + + if (REG(DCSURF_FLIP_CONTROL)) + s->flip_control = REG_READ(DCSURF_FLIP_CONTROL); + } static void hubp2_validate_dml_output(struct hubp *hubp, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h index efa2adf4f83d..8da3084d933f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h @@ -147,7 +147,7 @@ uint32_t DCN_CUR1_TTU_CNTL1;\ uint32_t VMID_SETTINGS_0 - +/*shared with dcn3.x*/ #define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \ DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \ uint32_t FLIP_PARAMETERS_3;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h index b2b266953d18..c34e04cac9a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h @@ -147,7 +147,8 @@ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP, mask_sh),\ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT, mask_sh),\ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_EN, mask_sh),\ - LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh) + LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh),\ + LE_SF(DPCSTX0_DPCSTX_DEBUG_CONFIG, DPCS_DBG_CBUS_DIS, mask_sh) #define DPCS_DCN2_MASK_SH_LIST(mask_sh)\ DPCS_MASK_SH_LIST(mask_sh),\ @@ -231,6 +232,8 @@ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \ SRI(DPCSTX_TX_CLOCK_CNTL, DPCSTX, id), \ SRI(DPCSTX_TX_CNTL, DPCSTX, id), \ + SRI(DPCSTX_DEBUG_CONFIG, DPCSTX, id), \ + SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \ SR(RDPCSTX0_RDPCSTX_SCRATCH) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index 5da6e44f284a..ea73473b970a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -395,9 +395,12 @@ static void mpc20_program_ogam_pwl( MPCC_OGAM_LUT_DATA, rgb[i].delta_green_reg); REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].delta_blue_reg); - } + REG_SEQ_SUBMIT(); + PERF_TRACE(); + REG_SEQ_WAIT_DONE(); + PERF_TRACE(); } static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id, @@ -501,11 +504,6 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id) ASSERT(!mpc_disabled); ASSERT(!mpc_idle); } - - REG_SEQ_SUBMIT(); - PERF_TRACE(); - REG_SEQ_WAIT_DONE(); - PERF_TRACE(); } static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst) @@ -542,8 +540,30 @@ static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id) return NULL; } +static void mpc2_read_mpcc_state( + struct mpc *mpc, + int mpcc_inst, + struct mpcc_state *s) +{ + struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + + REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id); + REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id); + REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id); + REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode, + MPCC_ALPHA_BLND_MODE, &s->alpha_mode, + MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha, + MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only); + REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle, + MPCC_BUSY, &s->busy); + + /* Gamma block state */ + REG_GET(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_inst], + MPCC_OGAM_CONFIG_STATUS, &s->rgam_mode); +} + static const struct mpc_funcs dcn20_mpc_funcs = { - .read_mpcc_state = mpc1_read_mpcc_state, + .read_mpcc_state = mpc2_read_mpcc_state, .insert_plane = mpc1_insert_plane, .remove_mpcc = mpc1_remove_mpcc, .mpc_init = mpc1_mpc_init, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c index 0784d0198661..fbf1b6370eb2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c @@ -337,6 +337,19 @@ bool opp2_dpg_is_blanked(struct output_pixel_processor *opp) (double_buffer_pending == 0); } +bool opp2_dpg_is_pending(struct output_pixel_processor *opp) +{ + struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); + uint32_t double_buffer_pending; + uint32_t dpg_en; + + REG_GET(DPG_CONTROL, DPG_EN, &dpg_en); + + REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending); + + return (dpg_en == 1 && double_buffer_pending == 1); +} + void opp2_program_left_edge_extra_pixel ( struct output_pixel_processor *opp, bool count) @@ -363,6 +376,7 @@ static struct opp_funcs dcn20_opp_funcs = { .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator, .opp_program_dpg_dimensions = opp2_program_dpg_dimensions, .dpg_is_blanked = opp2_dpg_is_blanked, + .dpg_is_pending = opp2_dpg_is_pending, .opp_dpg_set_blank_color = opp2_dpg_set_blank_color, .opp_destroy = opp1_destroy, .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h index 3ab221bdd27d..8f186abd558d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h @@ -159,6 +159,8 @@ void opp2_program_dpg_dimensions( bool opp2_dpg_is_blanked(struct output_pixel_processor *opp); +bool opp2_dpg_is_pending(struct output_pixel_processor *opp); + void opp2_dpg_set_blank_color( struct output_pixel_processor *opp, const struct tg_color *color); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile index 3a41a97b0729..3880db59e457 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile @@ -1,9 +1,8 @@ # SPDX-License-Identifier: MIT # # Makefile for DCN. -DCN201 = dcn201_init.o dcn201_resource.o \ - dcn201_hubbub.o\ - dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_optc.o dcn201_dpp.o \ +DCN201 = dcn201_hubbub.o\ + dcn201_mpc.o dcn201_hubp.o dcn201_opp.o \ dcn201_dccg.o dcn201_link_encoder.o AMD_DAL_DCN201 = $(addprefix $(AMDDALPATH)/dc/dcn201/,$(DCN201)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c index 037d265431c6..63798132ed95 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c @@ -52,7 +52,7 @@ static bool hubbub201_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -103,5 +103,5 @@ void hubbub201_construct(struct dcn20_hubbub *hubbub, hubbub->masks = hubbub_mask; hubbub->debug_test_index_pstate = 0xB; - hubbub->detile_buf_size = 164 * 1024; + hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */ } diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c index 35dd4bac242a..cd2bfcc51276 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c @@ -77,6 +77,7 @@ static void hubp201_program_requestor(struct hubp *hubp, MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode, CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode); + /* no need to program PTE */ REG_SET_5(DCHUBP_REQ_SIZE_CONFIG, 0, CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size, MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size, @@ -99,6 +100,10 @@ static void hubp201_setup( struct _vcs_dpi_display_rq_regs_st *rq_regs, struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest) { + /* + * otg is locked when this func is called. Register are double buffered. + * disable the requestors is not needed + */ hubp2_vready_at_or_After_vsync(hubp, pipe_dest); hubp201_program_requestor(hubp, rq_regs); hubp201_program_deadline(hubp, dlg_attr, ttu_attr); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h index 8b95ef251332..be25e8dc0636 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h @@ -30,6 +30,10 @@ #define DPCS_DCN201_MASK_SH_LIST(mask_sh)\ DPCS_MASK_SH_LIST(mask_sh),\ + LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, VCO_LD_VAL_OVRD, mask_sh),\ + LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, VCO_LD_VAL_OVRD_EN, mask_sh),\ + LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, REF_LD_VAL_OVRD, mask_sh),\ + LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, REF_LD_VAL_OVRD_EN, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DP4, mask_sh),\ @@ -44,7 +48,15 @@ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_EN, mask_sh) #define DPCS_DCN201_REG_LIST(id) \ - DPCS_DCN2_CMN_REG_LIST(id) + DPCS_DCN2_CMN_REG_LIST(id), \ + SRI_IX(RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE1_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE1_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id) void dcn201_link_encoder_construct( struct dcn20_link_encoder *enc20, diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c index 8e77db46a409..6a71ba3dfc63 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c @@ -50,6 +50,7 @@ static struct opp_funcs dcn201_opp_funcs = { .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator, .opp_program_dpg_dimensions = opp2_program_dpg_dimensions, .dpg_is_blanked = opp2_dpg_is_blanked, + .dpg_is_pending = opp2_dpg_is_pending, .opp_dpg_set_blank_color = opp2_dpg_set_blank_color, .opp_destroy = opp1_destroy, .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile index ce1be0afae4a..ca92f5c8e7fb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile @@ -2,7 +2,7 @@ # # Makefile for DCN21. -DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o \ +DCN21 = dcn21_hubp.o dcn21_hubbub.o \ dcn21_link_encoder.o dcn21_dccg.o AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c index aeb0e0d9b70a..2546224b326a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c @@ -140,7 +140,7 @@ int hubbub21_init_dchub(struct hubbub *hubbub, bool hubbub21_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -334,7 +334,7 @@ bool hubbub21_program_urgent_watermarks( bool hubbub21_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -487,7 +487,7 @@ bool hubbub21_program_stutter_watermarks( bool hubbub21_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -573,7 +573,7 @@ bool hubbub21_program_pstate_watermarks( bool hubbub21_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h index d8eb2bb7282c..ab2ce0313529 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h @@ -127,22 +127,22 @@ int hubbub21_init_dchub(struct hubbub *hubbub, struct dcn_hubbub_phys_addr_config *pa_config); bool hubbub21_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub21_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub21_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub21_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index 68cad55c72ab..e13d69a22c1c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -691,7 +691,7 @@ static void dmcub_PLAT_54186_wa(struct hubp *hubp, cmd.PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid; PERF_TRACE(); // TODO: remove after performance is stable. - dm_execute_dmub_cmd(hubp->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(hubp->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); PERF_TRACE(); // TODO: remove after performance is stable. } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile index af4d2065d2c1..c6ca70f3c061 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile @@ -23,22 +23,16 @@ # # -DCN30 := \ - dcn30_init.o \ - dcn30_hubbub.o \ +DCN30 := dcn30_hubbub.o \ dcn30_hubp.o \ - dcn30_dpp.o \ - dcn30_optc.o \ dcn30_dccg.o \ dcn30_mpc.o dcn30_vpg.o \ dcn30_afmt.o \ dcn30_dio_stream_encoder.o \ dcn30_dwb.o \ - dcn30_dpp_cm.o \ dcn30_dwb_cm.o \ dcn30_cm_common.o \ dcn30_mmhubbub.o \ - dcn30_resource.o \ dcn30_dio_link_encoder.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c index ddb344056d40..b8327237ed44 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c @@ -26,7 +26,7 @@ #include "dm_services.h" #include "core_types.h" #include "reg_helper.h" -#include "dcn30_dpp.h" +#include "dcn30/dcn30_dpp.h" #include "basics/conversion.h" #include "dcn30_cm_common.h" #include "custom_float.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h index 35a613bb08bf..3f1da7f3a91c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h @@ -29,15 +29,9 @@ #include "dcn20/dcn20_dccg.h" -#define DCCG_REG_LIST_DCN3AG() \ - DCCG_COMMON_REG_LIST_DCN_BASE(),\ - SR(PHYASYMCLK_CLOCK_CNTL),\ - SR(PHYBSYMCLK_CLOCK_CNTL),\ - SR(PHYCSYMCLK_CLOCK_CNTL) - - #define DCCG_REG_LIST_DCN30() \ DCCG_REG_LIST_DCN2(),\ + DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0),\ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2),\ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 3),\ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 4),\ @@ -46,19 +40,10 @@ SR(PHYBSYMCLK_CLOCK_CNTL),\ SR(PHYCSYMCLK_CLOCK_CNTL) -#define DCCG_MASK_SH_LIST_DCN3AG(mask_sh) \ - DCCG_MASK_SH_LIST_DCN2_1(mask_sh),\ - DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\ - DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\ - DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\ - DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\ - DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\ - DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\ - DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\ - DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh) - #define DCCG_MASK_SH_LIST_DCN3(mask_sh) \ DCCG_MASK_SH_LIST_DCN2(mask_sh),\ + DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\ + DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\ DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c index 1fb8fd7afc95..b8e31b5ea114 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c @@ -30,8 +30,6 @@ #include "dcn30_dio_link_encoder.h" #include "stream_encoder.h" #include "dc_bios_types.h" -/* #include "dcn3ag/dcn3ag_phy_fw.h" */ - #include "gpio_service_interface.h" #define CTX \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h index f2d90f2b8bf1..5b6177c2ae98 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h @@ -55,7 +55,8 @@ SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id) #define LINK_ENCODER_MASK_SH_LIST_DCN30(mask_sh) \ - LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh) + LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\ + LE_SF(DIG0_TMDS_DCBALANCER_CONTROL, TMDS_SYNC_DCBAL_EN, mask_sh) #define DPCS_DCN3_MASK_SH_LIST(mask_sh)\ DPCS_DCN2_MASK_SH_LIST(mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index 005dbe099a7a..425b830b88d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -29,9 +29,6 @@ #include "reg_helper.h" #include "hw_shared.h" #include "dc.h" -#include "core_types.h" -#include <linux/delay.h> - #define DC_LOGGER \ enc1->base.ctx->logger diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c index 0d98918bf0fc..fae98cf52020 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c @@ -130,6 +130,28 @@ bool dwb3_disable(struct dwbc *dwbc) return true; } +void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable) +{ + struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); + unsigned int pre_locked; + + REG_GET(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, &pre_locked); + + /* Lock DWB registers */ + if (pre_locked == 0) + REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 1); + + /* Disable FC */ + REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, enable); + + /* Unlock DWB registers */ + if (pre_locked == 0) + REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 0); + + DC_LOG_DWB("%s dwb3_fc_disabled at inst = %d", __func__, dwbc->inst); +} + + bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params) { struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); @@ -226,11 +248,10 @@ static const struct dwbc_funcs dcn30_dwbc_funcs = { .disable = dwb3_disable, .update = dwb3_update, .is_enabled = dwb3_is_enabled, + .set_fc_enable = dwb3_set_fc_enable, .set_stereo = dwb3_set_stereo, .set_new_content = dwb3_set_new_content, - .dwb_program_output_csc = NULL, .dwb_ogam_set_input_transfer_func = dwb3_ogam_set_input_transfer_func, //TODO: rename - .dwb_set_scaler = NULL, }; void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h index a5d1b81e768d..0f3f7c5fbaec 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h @@ -217,6 +217,7 @@ SF_DWB2(DWB_OGAM_LUT_DATA, DWBCP, 0, DWB_OGAM_LUT_DATA, mask_sh),\ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_READ_COLOR_SEL, mask_sh),\ + SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_READ_DBG, mask_sh),\ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_HOST_SEL, mask_sh),\ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_CONFIG_MODE, mask_sh),\ SF_DWB2(DWB_OGAM_RAMA_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\ @@ -524,6 +525,7 @@ type DWB_OGAM_LUT_DATA;\ type DWB_OGAM_LUT_WRITE_COLOR_MASK;\ type DWB_OGAM_LUT_READ_COLOR_SEL;\ + type DWB_OGAM_LUT_READ_DBG;\ type DWB_OGAM_LUT_HOST_SEL;\ type DWB_OGAM_LUT_CONFIG_MODE;\ type DWB_OGAM_LUT_STATUS;\ @@ -710,7 +712,7 @@ type DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET;\ type DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS;\ type DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET;\ - type DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS; + type DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS struct dcn30_dwbc_registers { /* DCN3AG */ @@ -733,6 +735,10 @@ struct dcn30_dwbc_registers { uint32_t DWB_MMHUBBUB_BACKPRESSURE_CNT; uint32_t DWB_HOST_READ_CONTROL; uint32_t DWB_SOFT_RESET; + uint32_t DWB_DEBUG_CTRL; + uint32_t DWB_DEBUG; + uint32_t DWB_TEST_DEBUG_INDEX; + uint32_t DWB_TEST_DEBUG_DATA; /* DWBSCL */ uint32_t DWBSCL_COEF_RAM_TAP_SELECT; @@ -747,6 +753,9 @@ struct dcn30_dwbc_registers { uint32_t DWBSCL_DEST_SIZE; uint32_t DWBSCL_OVERFLOW_STATUS; uint32_t DWBSCL_OVERFLOW_COUNTER; + uint32_t DWBSCL_DEBUG; + uint32_t DWBSCL_TEST_DEBUG_INDEX; + uint32_t DWBSCL_TEST_DEBUG_DATA; /* DWBCP */ uint32_t DWB_HDR_MULT_COEF; @@ -838,6 +847,9 @@ struct dcn30_dwbc_registers { uint32_t DWB_OGAM_RAMB_REGION_28_29; uint32_t DWB_OGAM_RAMB_REGION_30_31; uint32_t DWB_OGAM_RAMB_REGION_32_33; + uint32_t DWBCP_DEBUG; + uint32_t DWBCP_TEST_DEBUG_INDEX; + uint32_t DWBCP_TEST_DEBUG_DATA; }; /* Internal enums / structs */ @@ -877,6 +889,8 @@ bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params); bool dwb3_is_enabled(struct dwbc *dwbc); +void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable); + void dwb3_set_stereo(struct dwbc *dwbc, struct dwb_stereo_params *stereo_params); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c index 701c7d8bc038..03a50c32fcfe 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c @@ -243,6 +243,9 @@ static bool dwb3_program_ogam_lut( return false; } + if (params->hw_points_num == 0) + return false; + REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2); current_mode = dwb3_get_ogam_current(dwbc30); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c index 152c9c5733f1..6a5af3da4b45 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c @@ -95,7 +95,7 @@ int hubbub3_init_dchub_sys_ctx(struct hubbub *hubbub, bool hubbub3_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h index 7b597908b937..ca6233e8f1f4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h @@ -124,7 +124,7 @@ bool hubbub3_get_dcc_compression_cap(struct hubbub *hubbub, bool hubbub3_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c index 2861d974fcf6..60a64d290352 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c @@ -316,7 +316,7 @@ bool hubp3_program_surface_flip_and_addr( return true; } -static void hubp3_program_tiling( +void hubp3_program_tiling( struct dcn20_hubp *hubp2, const union dc_tiling_info *info, const enum surface_pixel_format pixel_format) @@ -455,6 +455,9 @@ void hubp3_read_state(struct hubp *hubp) if (REG(DCHUBP_CNTL)) s->hubp_cntl = REG_READ(DCHUBP_CNTL); + if (REG(DCSURF_FLIP_CONTROL)) + s->flip_control = REG_READ(DCSURF_FLIP_CONTROL); + } void hubp3_setup( diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h index 8a32772d4e91..b010531a7fe8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h @@ -278,6 +278,11 @@ void hubp3_setup( struct _vcs_dpi_display_rq_regs_st *rq_regs, struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest); +void hubp3_program_tiling( + struct dcn20_hubp *hubp2, + const union dc_tiling_info *info, + const enum surface_pixel_format pixel_format); + void hubp3_dcc_control(struct hubp *hubp, bool enable, enum hubp_ind_block_size blk_size); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c index d1500b223858..fca94e50ae93 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c @@ -44,6 +44,36 @@ #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) +void mpc3_mpc_init(struct mpc *mpc) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + int opp_id; + + mpc1_mpc_init(mpc); + + for (opp_id = 0; opp_id < MAX_OPP; opp_id++) { + if (REG(MUX[opp_id])) + /* disable mpc out rate and flow control */ + REG_UPDATE_2(MUX[opp_id], MPC_OUT_RATE_CONTROL_DISABLE, + 1, MPC_OUT_FLOW_CONTROL_COUNT, 0); + } +} + +void mpc3_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + mpc1_mpc_init_single_inst(mpc, mpcc_id); + + /* assuming mpc out mux is connected to opp with the same index at this + * point in time (e.g. transitioning from vbios to driver) + */ + if (mpcc_id < MAX_OPP && REG(MUX[mpcc_id])) + /* disable mpc out rate and flow control */ + REG_UPDATE_2(MUX[mpcc_id], MPC_OUT_RATE_CONTROL_DISABLE, + 1, MPC_OUT_FLOW_CONTROL_COUNT, 0); +} + bool mpc3_is_dwb_idle( struct mpc *mpc, int dwb_id) @@ -80,25 +110,6 @@ void mpc3_disable_dwb_mux( MPC_DWB0_MUX, 0xf); } -void mpc3_set_out_rate_control( - struct mpc *mpc, - int opp_id, - bool enable, - bool rate_2x_mode, - struct mpc_dwb_flow_control *flow_control) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - REG_UPDATE_2(MUX[opp_id], - MPC_OUT_RATE_CONTROL_DISABLE, !enable, - MPC_OUT_RATE_CONTROL, rate_2x_mode); - - if (flow_control) - REG_UPDATE_2(MUX[opp_id], - MPC_OUT_FLOW_CONTROL_MODE, flow_control->flow_ctrl_mode, - MPC_OUT_FLOW_CONTROL_COUNT, flow_control->flow_ctrl_cnt1); -} - enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id) { /*Contrary to DCN2 and DCN1 wherein a single status register field holds this info; @@ -1129,6 +1140,64 @@ void mpc3_set_gamut_remap( } } +static void read_gamut_remap(struct dcn30_mpc *mpc30, + int mpcc_id, + uint16_t *regval, + uint32_t *select) +{ + struct color_matrices_reg gam_regs; + + //current coefficient set in use + REG_GET(MPCC_GAMUT_REMAP_MODE[mpcc_id], MPCC_GAMUT_REMAP_MODE_CURRENT, select); + + gam_regs.shifts.csc_c11 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C11_A; + gam_regs.masks.csc_c11 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C11_A; + gam_regs.shifts.csc_c12 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C12_A; + gam_regs.masks.csc_c12 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C12_A; + + if (*select == GAMUT_REMAP_COEFF) { + gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_A[mpcc_id]); + gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_A[mpcc_id]); + + cm_helper_read_color_matrices( + mpc30->base.ctx, + regval, + &gam_regs); + + } else if (*select == GAMUT_REMAP_COMA_COEFF) { + + gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_B[mpcc_id]); + gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_B[mpcc_id]); + + cm_helper_read_color_matrices( + mpc30->base.ctx, + regval, + &gam_regs); + + } + +} + +void mpc3_get_gamut_remap(struct mpc *mpc, + int mpcc_id, + struct mpc_grph_gamut_adjustment *adjust) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + uint16_t arr_reg_val[12] = {0}; + int select; + + read_gamut_remap(mpc30, mpcc_id, arr_reg_val, &select); + + if (select == GAMUT_REMAP_BYPASS) { + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; + return; + } + + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; + convert_hw_matrix(adjust->temperature_matrix, + arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} + bool mpc3_program_3dlut( struct mpc *mpc, const struct tetrahedral_params *params, @@ -1382,12 +1451,58 @@ static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc) } } +static void mpc3_read_mpcc_state( + struct mpc *mpc, + int mpcc_inst, + struct mpcc_state *s) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + uint32_t rmu_status = 0xf; + + REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id); + REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id); + REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id); + REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode, + MPCC_ALPHA_BLND_MODE, &s->alpha_mode, + MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha, + MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only); + REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle, + MPCC_BUSY, &s->busy); + + /* Color blocks state */ + REG_GET(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, &rmu_status); + + if (rmu_status == mpcc_inst) { + REG_GET(SHAPER_CONTROL[0], + MPC_RMU_SHAPER_LUT_MODE_CURRENT, &s->shaper_lut_mode); + REG_GET(RMU_3DLUT_MODE[0], + MPC_RMU_3DLUT_MODE_CURRENT, &s->lut3d_mode); + REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[0], + MPC_RMU_3DLUT_30BIT_EN, &s->lut3d_bit_depth); + REG_GET(RMU_3DLUT_MODE[0], + MPC_RMU_3DLUT_SIZE, &s->lut3d_size); + } else { + REG_GET(SHAPER_CONTROL[1], + MPC_RMU_SHAPER_LUT_MODE_CURRENT, &s->shaper_lut_mode); + REG_GET(RMU_3DLUT_MODE[1], + MPC_RMU_3DLUT_MODE_CURRENT, &s->lut3d_mode); + REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[1], + MPC_RMU_3DLUT_30BIT_EN, &s->lut3d_bit_depth); + REG_GET(RMU_3DLUT_MODE[1], + MPC_RMU_3DLUT_SIZE, &s->lut3d_size); + } + + REG_GET_2(MPCC_OGAM_CONTROL[mpcc_inst], + MPCC_OGAM_MODE_CURRENT, &s->rgam_mode, + MPCC_OGAM_SELECT_CURRENT, &s->rgam_lut); +} + static const struct mpc_funcs dcn30_mpc_funcs = { - .read_mpcc_state = mpc1_read_mpcc_state, + .read_mpcc_state = mpc3_read_mpcc_state, .insert_plane = mpc1_insert_plane, .remove_mpcc = mpc1_remove_mpcc, - .mpc_init = mpc1_mpc_init, - .mpc_init_single_inst = mpc1_mpc_init_single_inst, + .mpc_init = mpc3_mpc_init, + .mpc_init_single_inst = mpc3_mpc_init_single_inst, .update_blending = mpc2_update_blending, .cursor_lock = mpc1_cursor_lock, .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp, @@ -1404,7 +1519,6 @@ static const struct mpc_funcs dcn30_mpc_funcs = { .set_dwb_mux = mpc3_set_dwb_mux, .disable_dwb_mux = mpc3_disable_dwb_mux, .is_dwb_idle = mpc3_is_dwb_idle, - .set_out_rate_control = mpc3_set_out_rate_control, .set_gamut_remap = mpc3_set_gamut_remap, .program_shaper = mpc3_program_shaper, .acquire_rmu = mpcc3_acquire_rmu, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h index 5198f2167c7c..ce93003dae01 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h @@ -1007,6 +1007,13 @@ void dcn30_mpc_construct(struct dcn30_mpc *mpc30, int num_mpcc, int num_rmu); +void mpc3_mpc_init( + struct mpc *mpc); + +void mpc3_mpc_init_single_inst( + struct mpc *mpc, + unsigned int mpcc_id); + bool mpc3_program_shaper( struct mpc *mpc, const struct pwl_params *params, @@ -1056,6 +1063,10 @@ void mpc3_set_gamut_remap( int mpcc_id, const struct mpc_grph_gamut_adjustment *adjust); +void mpc3_get_gamut_remap(struct mpc *mpc, + int mpcc_id, + struct mpc_grph_gamut_adjustment *adjust); + void mpc3_set_rmu_mux( struct mpc *mpc, int rmu_idx, @@ -1074,13 +1085,6 @@ bool mpc3_is_dwb_idle( struct mpc *mpc, int dwb_id); -void mpc3_set_out_rate_control( - struct mpc *mpc, - int opp_id, - bool enable, - bool rate_2x_mode, - struct mpc_dwb_flow_control *flow_control); - void mpc3_power_on_ogam_lut( struct mpc *mpc, int mpcc_id, bool power_on); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h index ed9a5549c389..466ba20b9c61 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h @@ -26,6 +26,7 @@ #ifndef __DAL_DCN30_VPG_H__ #define __DAL_DCN30_VPG_H__ +#include "vpg.h" #define DCN30_VPG_FROM_VPG(vpg)\ container_of(vpg, struct dcn30_vpg, base) @@ -132,28 +133,6 @@ struct dcn30_vpg_mask { VPG_DCN3_REG_FIELD_LIST(uint32_t); }; -struct vpg; - -struct vpg_funcs { - void (*update_generic_info_packet)( - struct vpg *vpg, - uint32_t packet_index, - const struct dc_info_packet *info_packet, - bool immediate_update); - - void (*vpg_poweron)( - struct vpg *vpg); - - void (*vpg_powerdown)( - struct vpg *vpg); -}; - -struct vpg { - const struct vpg_funcs *funcs; - struct dc_context *ctx; - int inst; -}; - struct dcn30_vpg { struct vpg base; const struct dcn30_vpg_registers *regs; diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile index 30fbc5e06dca..d241f665e40a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile @@ -10,9 +10,8 @@ # # Makefile for dcn30. -DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \ - dcn301_dio_link_encoder.o dcn301_panel_cntl.o dcn301_hubbub.o \ - dcn301_optc.o +DCN301 = dcn301_dccg.o \ + dcn301_dio_link_encoder.o dcn301_panel_cntl.o dcn301_hubbub.o AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h index 73db962dbc03..067e49cb238e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dccg.h @@ -56,10 +56,4 @@ struct dccg *dccg301_create( const struct dccg_shift *dccg_shift, const struct dccg_mask *dccg_mask); -struct dccg *dccg301_create( - struct dc_context *ctx, - const struct dccg_registers *regs, - const struct dccg_shift *dccg_shift, - const struct dccg_mask *dccg_mask); - #endif //__DCN301_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c index a046664e2031..c1959672df50 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c @@ -63,6 +63,7 @@ static const struct hubbub_funcs hubbub301_funcs = { .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, .force_pstate_change_control = hubbub3_force_pstate_change_control, + .init_watermarks = hubbub3_init_watermarks, .hubbub_read_state = hubbub2_read_state, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c index ad0df1a72a90..9e96a3ace207 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c @@ -215,4 +215,5 @@ void dcn301_panel_cntl_construct( dcn301_panel_cntl->base.funcs = &dcn301_link_panel_cntl_funcs; dcn301_panel_cntl->base.ctx = init_data->ctx; dcn301_panel_cntl->base.inst = init_data->inst; + dcn301_panel_cntl->base.pwrseq_inst = 0; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile deleted file mode 100644 index 95b66baf39e9..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -# -# (c) Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved -# -# Authors: AMD -# -# Makefile for dcn302. - -DCN3_02 = dcn302_init.o dcn302_resource.o - -AMD_DAL_DCN3_02 = $(addprefix $(AMDDALPATH)/dc/dcn302/,$(DCN3_02)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DCN3_02) diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile b/drivers/gpu/drm/amd/display/dc/dcn303/Makefile index d7b3ad780e5d..a954e316aca2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn303/Makefile @@ -6,7 +6,7 @@ # # Makefile for dcn303. -DCN3_03 = dcn303_init.o dcn303_resource.o +DCN3_03 = dcn303_init.o AMD_DAL_DCN3_03 = $(addprefix $(AMDDALPATH)/dc/dcn303/,$(DCN3_03)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile index 96e45c9efb46..5d93ac16c03a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile @@ -10,8 +10,8 @@ # # Makefile for dcn31. -DCN31 = dcn31_resource.o dcn31_hubbub.o dcn31_init.o dcn31_hubp.o \ - dcn31_dccg.o dcn31_optc.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \ +DCN31 = dcn31_hubbub.o dcn31_hubp.o \ + dcn31_dccg.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \ dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \ dcn31_afmt.o dcn31_vpg.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h index e3caaacf7493..e3be0bab4007 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h @@ -34,12 +34,14 @@ DCCG_SRII(DTO_PARAM, DPPCLK, 1),\ DCCG_SRII(DTO_PARAM, DPPCLK, 2),\ DCCG_SRII(DTO_PARAM, DPPCLK, 3),\ + DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0),\ SR(PHYASYMCLK_CLOCK_CNTL),\ SR(PHYBSYMCLK_CLOCK_CNTL),\ SR(PHYCSYMCLK_CLOCK_CNTL),\ SR(PHYDSYMCLK_CLOCK_CNTL),\ SR(PHYESYMCLK_CLOCK_CNTL),\ SR(DPSTREAMCLK_CNTL),\ + SR(HDMISTREAMCLK_CNTL),\ SR(SYMCLK32_SE_CNTL),\ SR(SYMCLK32_LE_CNTL),\ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\ @@ -78,6 +80,8 @@ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 3, mask_sh),\ DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\ DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\ + DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\ + DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\ DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\ @@ -92,6 +96,8 @@ DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE1_EN, mask_sh),\ DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE2_EN, mask_sh),\ DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK_PIPE3_EN, mask_sh),\ + DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_SRC_SEL, mask_sh),\ + DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_DTO_FORCE_DIS, mask_sh),\ DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE0_SRC_SEL, mask_sh),\ DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE1_SRC_SEL, mask_sh),\ DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE2_SRC_SEL, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index 4596f3bac1b4..b2cea59ba5d4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -125,7 +125,7 @@ static bool query_dp_alt_from_dmub(struct link_encoder *enc, cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data); cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter); - if (!dm_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + if (!dc_wake_and_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) return false; return true; @@ -205,7 +205,7 @@ void dcn31_link_encoder_set_dio_phy_mux( } } -static void enc31_hw_init(struct link_encoder *enc) +void enc31_hw_init(struct link_encoder *enc) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); @@ -436,7 +436,7 @@ static bool link_dpia_control(struct dc_context *dc_ctx, cmd.dig1_dpia_control.dpia_control = *dpia_control; - dm_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h index 221671563a0b..ee78ba80797c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h @@ -89,6 +89,7 @@ SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \ SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \ + SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \ SR(RDPCSTX0_RDPCSTX_SCRATCH), \ SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\ SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id) @@ -222,6 +223,7 @@ SRI(RDPCSTX_PHY_FUSE1, RDPCSTX, id), \ SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \ + SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \ SR(RDPCSTX0_RDPCSTX_SCRATCH), \ SRI(RDPCSTX_PHY_RX_LD_VAL, RDPCSTX, id),\ SRI(RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG, RDPCSTX, id) @@ -283,4 +285,6 @@ bool dcn31_link_encoder_is_in_alt_mode( void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings); +void enc31_hw_init(struct link_encoder *enc); + #endif /* __DC_LINK_ENCODER__DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c index 5b7ad38f85e0..03b4ac2f1991 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c @@ -377,7 +377,7 @@ void dcn31_hpo_dp_link_enc_update_stream_allocation_table( */ REG_WAIT(DP_DPHY_SYM32_STATUS, SAT_UPDATE_PENDING, 0, - 10, DP_SAT_UPDATE_MAX_RETRY); + 100, DP_SAT_UPDATE_MAX_RETRY); } void dcn31_hpo_dp_link_enc_set_throttled_vcp_size( @@ -395,6 +395,12 @@ void dcn31_hpo_dp_link_enc_set_throttled_vcp_size( x), 25)); + // If y rounds up to integer, carry it over to x. + if (y >> 25) { + x += 1; + y = 0; + } + switch (stream_encoder_inst) { case 0: REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c index 45143459eedd..678db949cfe3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c @@ -474,6 +474,10 @@ static void dcn31_hpo_dp_stream_enc_update_dp_info_packets( &info_frame->hdrsmd, true); + /* packetIndex 4 is used for send immediate sdp message, and please + * use other packetIndex (such as 5,6) for other info packet + */ + if (info_frame->adaptive_sync.valid) enc->vpg->funcs->update_generic_info_packet( enc->vpg, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index 5b5b5e0775fa..b906db6e7355 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -172,7 +172,7 @@ static uint32_t convert_and_clamp( static bool hubbub31_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -362,7 +362,7 @@ static bool hubbub31_program_urgent_watermarks( static bool hubbub31_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -635,7 +635,7 @@ static bool hubbub31_program_stutter_watermarks( static bool hubbub31_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -717,7 +717,7 @@ static bool hubbub31_program_pstate_watermarks( static bool hubbub31_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c index 217acd4e292a..20c6fe48567f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c @@ -50,9 +50,9 @@ static bool dcn31_query_backlight_info(struct panel_cntl *panel_cntl, union dmub cmd->panel_cntl.header.type = DMUB_CMD__PANEL_CNTL; cmd->panel_cntl.header.sub_type = DMUB_CMD__PANEL_CNTL_QUERY_BACKLIGHT_INFO; cmd->panel_cntl.header.payload_bytes = sizeof(cmd->panel_cntl.data); - cmd->panel_cntl.data.inst = dcn31_panel_cntl->base.inst; + cmd->panel_cntl.data.pwrseq_inst = dcn31_panel_cntl->base.pwrseq_inst; - return dm_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); + return dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); } static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl) @@ -78,14 +78,14 @@ static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl) cmd.panel_cntl.header.type = DMUB_CMD__PANEL_CNTL; cmd.panel_cntl.header.sub_type = DMUB_CMD__PANEL_CNTL_HW_INIT; cmd.panel_cntl.header.payload_bytes = sizeof(cmd.panel_cntl.data); - cmd.panel_cntl.data.inst = dcn31_panel_cntl->base.inst; + cmd.panel_cntl.data.pwrseq_inst = dcn31_panel_cntl->base.pwrseq_inst; cmd.panel_cntl.data.bl_pwm_cntl = panel_cntl->stored_backlight_registers.BL_PWM_CNTL; cmd.panel_cntl.data.bl_pwm_period_cntl = panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL; cmd.panel_cntl.data.bl_pwm_ref_div1 = panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV; cmd.panel_cntl.data.bl_pwm_ref_div2 = panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2; - if (!dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + if (!dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) return 0; panel_cntl->stored_backlight_registers.BL_PWM_CNTL = cmd.panel_cntl.data.bl_pwm_cntl; @@ -154,7 +154,31 @@ void dcn31_panel_cntl_construct( struct dcn31_panel_cntl *dcn31_panel_cntl, const struct panel_cntl_init_data *init_data) { + uint8_t pwrseq_inst = 0xF; + dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs; dcn31_panel_cntl->base.ctx = init_data->ctx; dcn31_panel_cntl->base.inst = init_data->inst; + + switch (init_data->eng_id) { + case ENGINE_ID_DIGA: + pwrseq_inst = 0; + break; + case ENGINE_ID_DIGB: + pwrseq_inst = 1; + break; + default: + DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", init_data->eng_id); + ASSERT(false); + break; + } + + if (dcn31_panel_cntl->base.ctx->dc->config.support_edp0_on_dp1) + //If supported, power sequencer mapping shall follow the DIG instance + dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst; + else + /* If not supported, pwrseq will be assigned in order, + * so first pwrseq will be assigned to first panel instance (legacy behavior) + */ + dcn31_panel_cntl->base.pwrseq_inst = dcn31_panel_cntl->base.inst; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c index f1deb1c3c363..cfb923d85630 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c @@ -63,7 +63,12 @@ void vpg31_poweron(struct vpg *vpg) { struct dcn31_vpg *vpg31 = DCN31_VPG_FROM_VPG(vpg); - if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false) + uint32_t vpg_gsp_mem_pwr_state; + + REG_GET(VPG_MEM_PWR, VPG_GSP_MEM_PWR_STATE, &vpg_gsp_mem_pwr_state); + + if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false && + vpg_gsp_mem_pwr_state == 0) return; REG_UPDATE_2(VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, 1, VPG_GSP_LIGHT_SLEEP_FORCE, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h index 0e76eabce441..609e58dbd056 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h @@ -26,6 +26,7 @@ #ifndef __DAL_DCN31_VPG_H__ #define __DAL_DCN31_VPG_H__ +#include "vpg.h" #define DCN31_VPG_FROM_VPG(vpg)\ container_of(vpg, struct dcn31_vpg, base) diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile index 72456debb99f..b134ab05aa71 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile @@ -10,8 +10,7 @@ # # Makefile for dcn314. -DCN314 = dcn314_resource.o dcn314_init.o \ - dcn314_dio_stream_encoder.o dcn314_dccg.o dcn314_optc.o +DCN314 = dcn314_dio_stream_encoder.o dcn314_dccg.o AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/Makefile b/drivers/gpu/drm/amd/display/dc/dcn316/Makefile deleted file mode 100644 index 819d44a9439b..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn316/Makefile +++ /dev/null @@ -1,30 +0,0 @@ -# -# Copyright 2021 Advanced Micro Devices, Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -# Authors: AMD -# -# Makefile for dcn316. - -DCN316 = dcn316_resource.o - -AMD_DAL_DCN316 = $(addprefix $(AMDDALPATH)/dc/dcn316/,$(DCN316)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DCN316) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile index 8bb251307247..a58c37165f5a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile @@ -10,10 +10,10 @@ # # Makefile for dcn32. -DCN32 = dcn32_resource.o dcn32_hubbub.o dcn32_init.o dcn32_dccg.o \ - dcn32_dccg.o dcn32_optc.o dcn32_mmhubbub.o dcn32_hubp.o dcn32_dpp.o \ - dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_hpo_dp_link_encoder.o \ - dcn32_resource_helpers.o dcn32_mpc.o +DCN32 = dcn32_hubbub.o dcn32_dccg.o \ + dcn32_mmhubbub.o dcn32_hubp.o dcn32_mpc.o \ + dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_resource_helpers.o \ + dcn32_hpo_dp_link_encoder.o AMD_DAL_DCN32 = $(addprefix $(AMDDALPATH)/dc/dcn32/,$(DCN32)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c index 501388014855..d9ff95cd2dbd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c @@ -34,6 +34,7 @@ #include "dc_bios_types.h" #include "link_enc_cfg.h" +#include "dc_dmub_srv.h" #include "gpio_service_interface.h" #ifndef MIN @@ -61,6 +62,38 @@ #define AUX_REG_WRITE(reg_name, val) \ dm_write_reg(CTX, AUX_REG(reg_name), val) +static uint8_t phy_id_from_transmitter(enum transmitter t) +{ + uint8_t phy_id; + + switch (t) { + case TRANSMITTER_UNIPHY_A: + phy_id = 0; + break; + case TRANSMITTER_UNIPHY_B: + phy_id = 1; + break; + case TRANSMITTER_UNIPHY_C: + phy_id = 2; + break; + case TRANSMITTER_UNIPHY_D: + phy_id = 3; + break; + case TRANSMITTER_UNIPHY_E: + phy_id = 4; + break; + case TRANSMITTER_UNIPHY_F: + phy_id = 5; + break; + case TRANSMITTER_UNIPHY_G: + phy_id = 6; + break; + default: + phy_id = 0; + break; + } + return phy_id; +} void enc32_hw_init(struct link_encoder *enc) { @@ -117,38 +150,50 @@ void dcn32_link_encoder_enable_dp_output( } } -static bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc) +static bool query_dp_alt_from_dmub(struct link_encoder *enc, + union dmub_rb_cmd *cmd) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); - uint32_t dp_alt_mode_disable = 0; - bool is_usb_c_alt_mode = false; - if (enc->features.flags.bits.DP_IS_USB_C) { - /* if value == 1 alt mode is disabled, otherwise it is enabled */ - REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); - is_usb_c_alt_mode = (dp_alt_mode_disable == 0); - } + memset(cmd, 0, sizeof(*cmd)); + cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS; + cmd->query_dp_alt.header.sub_type = + DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT; + cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data); + cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter); - return is_usb_c_alt_mode; + if (!dc_wake_and_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + return false; + + return true; } -static void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc, +bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc) +{ + union dmub_rb_cmd cmd; + + if (!query_dp_alt_from_dmub(enc, &cmd)) + return false; + + return (cmd.query_dp_alt.data.is_dp_alt_disable == 0); +} + +void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings) { - struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); - uint32_t is_in_usb_c_dp4_mode = 0; + union dmub_rb_cmd cmd; dcn10_link_encoder_get_max_link_cap(enc, link_settings); - /* in usb c dp2 mode, max lane count is 2 */ - if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) { - REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); - if (!is_in_usb_c_dp4_mode) - link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); - } + if (!query_dp_alt_from_dmub(enc, &cmd)) + return; + if (cmd.query_dp_alt.data.is_usb && + cmd.query_dp_alt.data.is_dp4 == 0) + link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); } + static const struct link_encoder_funcs dcn32_link_enc_funcs = { .read_state = link_enc2_read_state, .validate_output_with_stream = @@ -203,13 +248,13 @@ void dcn32_link_encoder_construct( enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; - if (enc10->base.connector.id == CONNECTOR_ID_USBC) - enc10->base.features.flags.bits.DP_IS_USB_C = 1; - enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; enc10->base.features = *enc_features; + if (enc10->base.connector.id == CONNECTOR_ID_USBC) + enc10->base.features.flags.bits.DP_IS_USB_C = 1; + enc10->base.transmitter = init_data->transmitter; /* set the flag to indicate whether driver poll the I2C data pin diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h index bbcfce06bec0..35d23d9db45e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h @@ -26,15 +26,7 @@ #ifndef __DC_LINK_ENCODER__DCN32_H__ #define __DC_LINK_ENCODER__DCN32_H__ -#include "dcn31/dcn31_dio_link_encoder.h" - -#define LE_DCN32_REG_LIST(id)\ - LE_DCN31_REG_LIST(id),\ - SRI(DIG_FIFO_CTRL0, DIG, id) - -#define LINK_ENCODER_MASK_SH_LIST_DCN32(mask_sh) \ - LINK_ENCODER_MASK_SH_LIST_DCN31(mask_sh),\ - LE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh) +#include "dcn30/dcn30_dio_link_encoder.h" void dcn32_link_encoder_construct( struct dcn20_link_encoder *enc20, @@ -53,4 +45,9 @@ void dcn32_link_encoder_enable_dp_output( const struct dc_link_settings *link_settings, enum clock_source_id clock_source); +bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc); + +void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings); + #endif /* __DC_LINK_ENCODER__DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h index 1be5410cce97..ca53d39561d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h @@ -177,11 +177,12 @@ SE_SF(DIG0_DIG_FE_CNTL, DIG_SYMCLK_FE_ON, mask_sh),\ SE_SF(DP0_DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, mask_sh),\ SE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh),\ SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, mask_sh),\ SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, mask_sh),\ SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET, mask_sh),\ - SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh),\ - SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh) + SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh) + void dcn32_dio_stream_encoder_construct( struct dcn10_stream_encoder *enc1, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c index 88dfc907553d..515c4c2b4c21 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c @@ -167,7 +167,7 @@ static uint32_t convert_and_clamp( bool hubbub32_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -357,7 +357,7 @@ bool hubbub32_program_urgent_watermarks( bool hubbub32_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -503,7 +503,7 @@ bool hubbub32_program_stutter_watermarks( bool hubbub32_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -656,7 +656,7 @@ bool hubbub32_program_pstate_watermarks( bool hubbub32_program_usr_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -750,7 +750,7 @@ void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow) static bool hubbub32_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h index f073839a4b6d..e439ba0fa30f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h @@ -118,25 +118,25 @@ bool hubbub32_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub32_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub32_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub32_program_usr_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c index 1d052f08aff5..e408e859b355 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c @@ -47,7 +47,7 @@ void mpc32_mpc_init(struct mpc *mpc) struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); int mpcc_id; - mpc1_mpc_init(mpc); + mpc3_mpc_init(mpc); if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) { if (mpc30->mpc_mask->MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE) { @@ -71,12 +71,13 @@ void mpc32_power_on_blnd_lut( { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + REG_SET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], 0, MPCC_MCM_1DLUT_MEM_PWR_DIS, power_on); + if (mpc->ctx->dc->debug.enable_mem_low_power.bits.cm) { if (power_on) { REG_UPDATE(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_FORCE, 0); REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_STATE, 0, 1, 5); } else if (!mpc->ctx->dc->debug.disable_mem_low_power) { - ASSERT(false); /* TODO: change to mpc * dpp_base->ctx->dc->optimized_required = true; * dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true; @@ -237,16 +238,19 @@ void mpc32_program_post1dlut_pwl( REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].red_reg); REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_red); } else { + REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0); REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 4); for (i = 0 ; i < num; i++) REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].red_reg); REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_red); + REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0); REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 2); for (i = 0 ; i < num; i++) REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].green_reg); REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_green); + REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0); REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 1); for (i = 0 ; i < num; i++) REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].blue_reg); @@ -987,7 +991,7 @@ static const struct mpc_funcs dcn32_mpc_funcs = { .insert_plane = mpc1_insert_plane, .remove_mpcc = mpc1_remove_mpcc, .mpc_init = mpc32_mpc_init, - .mpc_init_single_inst = mpc1_mpc_init_single_inst, + .mpc_init_single_inst = mpc3_mpc_init_single_inst, .update_blending = mpc2_update_blending, .cursor_lock = mpc1_cursor_lock, .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp, @@ -1004,7 +1008,6 @@ static const struct mpc_funcs dcn32_mpc_funcs = { .set_dwb_mux = mpc3_set_dwb_mux, .disable_dwb_mux = mpc3_disable_dwb_mux, .is_dwb_idle = mpc3_is_dwb_idle, - .set_out_rate_control = mpc3_set_out_rate_control, .set_gamut_remap = mpc3_set_gamut_remap, .program_shaper = mpc32_program_shaper, .program_3dlut = mpc32_program_3dlut, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index bc5f0db23d0c..fbcd6f7bc993 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -24,35 +24,17 @@ */ // header file of functions being implemented -#include "dcn32_resource.h" +#include "dcn32/dcn32_resource.h" #include "dcn20/dcn20_resource.h" #include "dml/dcn32/display_mode_vba_util_32.h" #include "dml/dcn32/dcn32_fpu.h" +#include "dc_state_priv.h" static bool is_dual_plane(enum surface_pixel_format format) { return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; } - -uint32_t dcn32_helper_mall_bytes_to_ways( - struct dc *dc, - uint32_t total_size_in_mall_bytes) -{ - uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways; - - /* add 2 lines for worst case alignment */ - cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2; - - total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size; - lines_per_way = total_cache_lines / dc->caps.cache_num_ways; - num_ways = cache_lines_used / lines_per_way; - if (cache_lines_used % lines_per_way > 0) - num_ways++; - - return num_ways; -} - uint32_t dcn32_helper_calculate_mall_bytes_for_cursor( struct dc *dc, struct pipe_ctx *pipe_ctx, @@ -111,8 +93,10 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp( if (context->bw_ctx.bw.dcn.mall_subvp_size_bytes > 0) { if (dc->debug.force_subvp_num_ways) { return dc->debug.force_subvp_num_ways; + } else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) { + return dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes); } else { - return dcn32_helper_mall_bytes_to_ways(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes); + return 0; } } else { return 0; @@ -190,7 +174,7 @@ bool dcn32_subvp_in_use(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) + if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) return true; } return false; @@ -264,18 +248,17 @@ static void override_det_for_subvp(struct dc *dc, struct dc_state *context, uint // Do not override if a stream has multiple planes for (i = 0; i < context->stream_count; i++) { - if (context->stream_status[i].plane_count > 1) { + if (context->stream_status[i].plane_count > 1) return; - } - if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) { + + if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM) stream_count++; - } } for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - if (pipe_ctx->stream && pipe_ctx->plane_state && pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) { + if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) { if (dcn32_allow_subvp_high_refresh_rate(dc, context, pipe_ctx)) { if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) { @@ -290,7 +273,7 @@ static void override_det_for_subvp(struct dc *dc, struct dc_state *context, uint for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - if (pipe_ctx->stream && pipe_ctx->plane_state && pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) { + if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) { if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) { if (pipe_segments[i] > 4) pipe_segments[i] = 4; @@ -337,14 +320,14 @@ void dcn32_determine_det_override(struct dc *dc, for (i = 0; i < context->stream_count; i++) { /* Don't count SubVP streams for DET allocation */ - if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) + if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM) stream_count++; } if (stream_count > 0) { stream_segments = 18 / stream_count; for (i = 0; i < context->stream_count; i++) { - if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM) + if (dc_state_get_stream_subvp_type(context, context->streams[i]) == SUBVP_PHANTOM) continue; if (context->stream_status[i].plane_count > 0) @@ -399,7 +382,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context, { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = 0; bool disable_unbounded_requesting = dc->debug.disable_z9_mpc || dc->debug.disable_unbounded_requesting; for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { @@ -430,71 +413,6 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context, dcn32_determine_det_override(dc, context, pipes); } -/** - * dcn32_save_mall_state(): Save MALL (SubVP) state for fast validation cases - * - * This function saves the MALL (SubVP) case for fast validation cases. For fast validation, - * there are situations where a shallow copy of the dc->current_state is created for the - * validation. In this case we want to save and restore the mall config because we always - * teardown subvp at the beginning of validation (and don't attempt to add it back if it's - * fast validation). If we don't restore the subvp config in cases of fast validation + - * shallow copy of the dc->current_state, the dc->current_state will have a partially - * removed subvp state when we did not intend to remove it. - * - * NOTE: This function ONLY works if the streams are not moved to a different pipe in the - * validation. We don't expect this to happen in fast_validation=1 cases. - * - * @dc: Current DC state - * @context: New DC state to be programmed - * @temp_config: struct used to cache the existing MALL state - * - * Return: void - */ -void dcn32_save_mall_state(struct dc *dc, - struct dc_state *context, - struct mall_temp_config *temp_config) -{ - uint32_t i; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - if (pipe->stream) - temp_config->mall_stream_config[i] = pipe->stream->mall_stream_config; - - if (pipe->plane_state) - temp_config->is_phantom_plane[i] = pipe->plane_state->is_phantom; - } -} - -/** - * dcn32_restore_mall_state(): Restore MALL (SubVP) state for fast validation cases - * - * Restore the MALL state based on the previously saved state from dcn32_save_mall_state - * - * @dc: Current DC state - * @context: New DC state to be programmed, restore MALL state into here - * @temp_config: struct that has the cached MALL state - * - * Return: void - */ -void dcn32_restore_mall_state(struct dc *dc, - struct dc_state *context, - struct mall_temp_config *temp_config) -{ - uint32_t i; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - if (pipe->stream) - pipe->stream->mall_stream_config = temp_config->mall_stream_config[i]; - - if (pipe->plane_state) - pipe->plane_state->is_phantom = temp_config->is_phantom_plane[i]; - } -} - #define MAX_STRETCHED_V_BLANK 1000 // in micro-seconds (must ensure to match value in FW) /* * Scaling factor for v_blank stretch calculations considering timing in @@ -589,13 +507,14 @@ static int get_refresh_rate(struct dc_stream_state *fpo_candidate_stream) * * Return: Pointer to FPO stream candidate if config can support FPO, otherwise NULL */ -struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context) +struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context) { int refresh_rate = 0; const int minimum_refreshrate_supported = 120; struct dc_stream_state *fpo_candidate_stream = NULL; bool is_fpo_vactive = false; uint32_t fpo_vactive_margin_us = 0; + struct dc_stream_status *fpo_stream_status = NULL; if (context == NULL) return NULL; @@ -618,16 +537,28 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre DC_FP_START(); dcn32_assign_fpo_vactive_candidate(dc, context, &fpo_candidate_stream); DC_FP_END(); - + if (fpo_candidate_stream) + fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream); DC_FP_START(); is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, dc->debug.fpo_vactive_min_active_margin_us); DC_FP_END(); if (!is_fpo_vactive || dc->debug.disable_fpo_vactive) return NULL; - } else + } else { fpo_candidate_stream = context->streams[0]; + if (fpo_candidate_stream) + fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream); + } - if (!fpo_candidate_stream) + /* In DCN32/321, FPO uses per-pipe P-State force. + * If there's no planes, HUBP is power gated and + * therefore programming UCLK_PSTATE_FORCE does + * nothing (P-State will always be asserted naturally + * on a pipe that has HUBP power gated. Therefore we + * only want to enable FPO if the FPO pipe has both + * a stream and a plane. + */ + if (!fpo_candidate_stream || !fpo_stream_status || fpo_stream_status->plane_count == 0) return NULL; if (fpo_candidate_stream->sink->edid_caps.panel_patch.disable_fams) @@ -666,6 +597,30 @@ bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int widt } /** + * disallow_subvp_in_active_plus_blank() - Function to determine disallowed subvp + drr/vblank configs + * + * @pipe: subvp pipe to be used for the subvp + drr/vblank config + * + * Since subvp is being enabled on more configs (such as 1080p60), we want + * to explicitly block any configs that we don't want to enable. We do not + * want to enable any 1080p60 (SubVP) + drr / vblank configs since these + * are already convered by FPO. + * + * Return: True if disallowed, false otherwise + */ +static bool disallow_subvp_in_active_plus_blank(struct pipe_ctx *pipe) +{ + bool disallow = false; + + if (resource_is_pipe_type(pipe, OPP_HEAD) && + resource_is_pipe_type(pipe, DPP_PIPE)) { + if (pipe->stream->timing.v_addressable == 1080 && pipe->stream->timing.h_addressable == 1920) + disallow = true; + } + return disallow; +} + +/** * dcn32_subvp_drr_admissable() - Determine if SubVP + DRR config is admissible * * @dc: Current DC state @@ -688,21 +643,24 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context) bool drr_pipe_found = false; bool drr_psr_capable = false; uint64_t refresh_rate = 0; + bool subvp_disallow = false; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); if (resource_is_pipe_type(pipe, OPP_HEAD) && resource_is_pipe_type(pipe, DPP_PIPE)) { - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + if (pipe_mall_type == SUBVP_MAIN) { subvp_count++; + subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe); refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); } - if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { + if (pipe_mall_type == SUBVP_NONE) { non_subvp_pipes++; drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe)); if (pipe->stream->ignore_msa_timing_param && @@ -713,7 +671,7 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context) } } - if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable && + if (subvp_count == 1 && !subvp_disallow && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable && ((uint32_t)refresh_rate < 120)) result = true; @@ -746,21 +704,24 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int struct vba_vars_st *vba = &context->bw_ctx.dml.vba; bool vblank_psr_capable = false; uint64_t refresh_rate = 0; + bool subvp_disallow = false; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); if (resource_is_pipe_type(pipe, OPP_HEAD) && resource_is_pipe_type(pipe, DPP_PIPE)) { - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + if (pipe_mall_type == SUBVP_MAIN) { subvp_count++; + subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe); refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); } - if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { + if (pipe_mall_type == SUBVP_NONE) { non_subvp_pipes++; vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe)); if (pipe->stream->ignore_msa_timing_param && @@ -772,9 +733,41 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int } if (subvp_count == 1 && non_subvp_pipes == 1 && !drr_pipe_found && !vblank_psr_capable && - ((uint32_t)refresh_rate < 120) && + ((uint32_t)refresh_rate < 120) && !subvp_disallow && vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp) result = true; return result; } + +void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes) +{ + int i, pipe_cnt; + struct resource_context *res_ctx = &context->res_ctx; + struct pipe_ctx *pipe = NULL; + + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + int odm_slice_count = 0; + + if (!res_ctx->pipe_ctx[i].stream) + continue; + pipe = &res_ctx->pipe_ctx[i]; + odm_slice_count = resource_get_odm_slice_count(pipe); + + if (odm_slice_count == 1) + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; + else if (odm_slice_count == 2) + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; + else if (odm_slice_count == 4) + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1; + + pipe_cnt++; + } +} + +void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context) +{ + if (dcn32_subvp_in_use(dc, context) && context->bw_ctx.bw.dcn.clk.dcfclk_khz <= MIN_SUBVP_DCFCLK_KHZ) + context->bw_ctx.bw.dcn.clk.dcfclk_khz = MIN_SUBVP_DCFCLK_KHZ; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/Makefile b/drivers/gpu/drm/amd/display/dc/dcn321/Makefile index 0a199c83bb5b..c195c47f58b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn321/Makefile @@ -10,7 +10,7 @@ # # Makefile for dcn321. -DCN321 = dcn321_resource.o dcn321_dio_link_encoder.o +DCN321 = dcn321_dio_link_encoder.o AMD_DAL_DCN321 = $(addprefix $(AMDDALPATH)/dc/dcn321/,$(DCN321)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c index 13be5f06d987..05783daa62ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c @@ -127,11 +127,6 @@ void dcn321_link_encoder_construct( * while doing the DP sink detect */ -/* if (dal_adapter_service_is_feature_supported(as, - FEATURE_DP_SINK_DETECT_POLL_DATA_PIN)) - enc10->base.features.flags.bits. - DP_SINK_DETECT_POLL_DATA_PIN = true;*/ - enc10->base.output_signals = SIGNAL_TYPE_DVI_SINGLE_LINK | SIGNAL_TYPE_DVI_DUAL_LINK | @@ -191,7 +186,6 @@ void dcn321_link_encoder_construct( __func__, result); } - if (enc10->base.ctx->dc->debug.hdmi20_disable) { + if (enc10->base.ctx->dc->debug.hdmi20_disable) enc10->base.features.flags.bits.HDMI_6GB_EN = 0; - } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile index 20d0eef1a13b..d5b4533d2f62 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile @@ -10,10 +10,10 @@ # # Makefile for DCN35. -DCN35 = dcn35_resource.o dcn35_init.o dcn35_dio_stream_encoder.o \ - dcn35_dio_link_encoder.o dcn35_dccg.o dcn35_optc.o \ - dcn35_dsc.o dcn35_hubp.o dcn35_hubbub.o \ - dcn35_mmhubbub.o dcn35_opp.o dcn35_dpp.o dcn35_pg_cntl.o dcn35_dwb.o +DCN35 = dcn35_dio_stream_encoder.o \ + dcn35_dio_link_encoder.o dcn35_dccg.o \ + dcn35_hubp.o dcn35_hubbub.o \ + dcn35_mmhubbub.o dcn35_opp.o dcn35_pg_cntl.o dcn35_dwb.o AMD_DAL_DCN35 = $(addprefix $(AMDDALPATH)/dc/dcn35/,$(DCN35)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c index addedcfd1238..58dd3c5bbff0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c @@ -49,15 +49,23 @@ static void dcn35_set_dppclk_enable(struct dccg *dccg, switch (dpp_inst) { case 0: REG_UPDATE(DPPCLK_CTRL, DPPCLK0_EN, enable); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable); break; case 1: REG_UPDATE(DPPCLK_CTRL, DPPCLK1_EN, enable); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable); break; case 2: REG_UPDATE(DPPCLK_CTRL, DPPCLK2_EN, enable); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable); break; case 3: REG_UPDATE(DPPCLK_CTRL, DPPCLK3_EN, enable); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable); break; default: break; @@ -100,6 +108,32 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst, dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk; } +static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg, + uint32_t dpp_inst, uint32_t enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + return; + + switch (dpp_inst) { + case 0: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable); + break; + case 1: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable); + break; + case 2: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable); + break; + case 3: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable); + break; + default: + break; + } +} + static void dccg35_get_pixel_rate_div( struct dccg *dccg, uint32_t otg_inst, @@ -256,6 +290,21 @@ static void dccg35_set_dtbclk_dto( if (params->ref_dtbclk_khz && req_dtbclk_khz) { uint32_t modulo, phase; + switch (params->otg_inst) { + case 0: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 1); + break; + case 1: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, 1); + break; + case 2: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, 1); + break; + case 3: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, 1); + break; + } + // phase / modulo = dtbclk / dtbclk ref modulo = params->ref_dtbclk_khz * 1000; phase = req_dtbclk_khz * 1000; @@ -280,6 +329,21 @@ static void dccg35_set_dtbclk_dto( REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], PIPE_DTO_SRC_SEL[params->otg_inst], 2); } else { + switch (params->otg_inst) { + case 0: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0); + break; + case 1: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, 0); + break; + case 2: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, 0); + break; + case 3: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, 0); + break; + } + REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst], DTBCLK_DTO_ENABLE[params->otg_inst], 0, PIPE_DTO_SRC_SEL[params->otg_inst], params->is_hdmi ? 0 : 1); @@ -303,21 +367,116 @@ static void dccg35_set_dpstreamclk( /* enabled to select one of the DTBCLKs for pipe */ switch (dp_hpo_inst) { case 0: - REG_UPDATE_2(DPSTREAMCLK_CNTL, - DPSTREAMCLK0_EN, + REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK0_EN, (src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, otg_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1); break; case 1: REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN, (src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, otg_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1); break; case 2: REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN, (src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, otg_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1); break; case 3: REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN, (src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, otg_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + + +static void dccg35_set_dpstreamclk_root_clock_gating( + struct dccg *dccg, + int dp_hpo_inst, + bool enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + switch (dp_hpo_inst) { + case 0: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, enable ? 1 : 0); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, enable ? 1 : 0); + } + break; + case 1: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, enable ? 1 : 0); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, enable ? 1 : 0); + } + break; + case 2: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, enable ? 1 : 0); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, enable ? 1 : 0); + } + break; + case 3: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, enable ? 1 : 0); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, enable ? 1 : 0); + } + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + + + +static void dccg35_set_physymclk_root_clock_gating( + struct dccg *dccg, + int phy_inst, + bool enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + return; + + switch (phy_inst) { + case 0: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYA_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); + break; + case 1: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYB_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); + break; + case 2: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYC_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); + break; + case 3: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYD_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); + break; + case 4: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYE_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); break; default: BREAK_TO_DEBUGGER(); @@ -340,16 +499,16 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_EN, 1, PHYASYMCLK_SRC_SEL, clk_src); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYASYMCLK_ROOT_GATE_DISABLE, 1); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYA_REFCLK_ROOT_GATE_DISABLE, 0); } else { REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_EN, 0, PHYASYMCLK_SRC_SEL, 0); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYASYMCLK_ROOT_GATE_DISABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYA_REFCLK_ROOT_GATE_DISABLE, 1); } break; case 1: @@ -357,16 +516,16 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_EN, 1, PHYBSYMCLK_SRC_SEL, clk_src); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYBSYMCLK_ROOT_GATE_DISABLE, 1); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYB_REFCLK_ROOT_GATE_DISABLE, 0); } else { REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_EN, 0, PHYBSYMCLK_SRC_SEL, 0); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYBSYMCLK_ROOT_GATE_DISABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYB_REFCLK_ROOT_GATE_DISABLE, 1); } break; case 2: @@ -374,16 +533,16 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_EN, 1, PHYCSYMCLK_SRC_SEL, clk_src); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYCSYMCLK_ROOT_GATE_DISABLE, 1); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYC_REFCLK_ROOT_GATE_DISABLE, 0); } else { REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_EN, 0, PHYCSYMCLK_SRC_SEL, 0); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYCSYMCLK_ROOT_GATE_DISABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYC_REFCLK_ROOT_GATE_DISABLE, 1); } break; case 3: @@ -391,16 +550,16 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_EN, 1, PHYDSYMCLK_SRC_SEL, clk_src); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYDSYMCLK_ROOT_GATE_DISABLE, 1); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYD_REFCLK_ROOT_GATE_DISABLE, 0); } else { REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_EN, 0, PHYDSYMCLK_SRC_SEL, 0); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYDSYMCLK_ROOT_GATE_DISABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYD_REFCLK_ROOT_GATE_DISABLE, 1); } break; case 4: @@ -408,16 +567,16 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_EN, 1, PHYESYMCLK_SRC_SEL, clk_src); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYESYMCLK_ROOT_GATE_DISABLE, 1); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYE_REFCLK_ROOT_GATE_DISABLE, 0); } else { REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_EN, 0, PHYESYMCLK_SRC_SEL, 0); - if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, - PHYESYMCLK_ROOT_GATE_DISABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYE_REFCLK_ROOT_GATE_DISABLE, 1); } break; default: @@ -454,12 +613,12 @@ static void dccg35_dpp_root_clock_control( if (clock_on) { /* turn off the DTO and leave phase/modulo at max */ - dcn35_set_dppclk_enable(dccg, dpp_inst, 0); + dcn35_set_dppclk_enable(dccg, dpp_inst, 1); REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, DPPCLK0_DTO_PHASE, 0xFF, DPPCLK0_DTO_MODULO, 0xFF); } else { - dcn35_set_dppclk_enable(dccg, dpp_inst, 1); + dcn35_set_dppclk_enable(dccg, dpp_inst, 0); /* turn on the DTO to generate a 0hz clock */ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, DPPCLK0_DTO_PHASE, 0, @@ -469,6 +628,64 @@ static void dccg35_dpp_root_clock_control( dccg->dpp_clock_gated[dpp_inst] = !clock_on; } +static void dccg35_disable_symclk32_se( + struct dccg *dccg, + int hpo_se_inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + /* set refclk as the source for symclk32_se */ + switch (hpo_se_inst) { + case 0: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE0_SRC_SEL, 0, + SYMCLK32_SE0_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE0_GATE_DISABLE, 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, +// SYMCLK32_ROOT_SE0_GATE_DISABLE, 0); + } + break; + case 1: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE1_SRC_SEL, 0, + SYMCLK32_SE1_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE1_GATE_DISABLE, 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, +// SYMCLK32_ROOT_SE1_GATE_DISABLE, 0); + } + break; + case 2: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE2_SRC_SEL, 0, + SYMCLK32_SE2_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE2_GATE_DISABLE, 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, +// SYMCLK32_ROOT_SE2_GATE_DISABLE, 0); + } + break; + case 3: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE3_SRC_SEL, 0, + SYMCLK32_SE3_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE3_GATE_DISABLE, 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, +// SYMCLK32_ROOT_SE3_GATE_DISABLE, 0); + } + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + void dccg35_init(struct dccg *dccg) { int otg_inst; @@ -477,21 +694,35 @@ void dccg35_init(struct dccg *dccg) * will cause DCN to hang. */ for (otg_inst = 0; otg_inst < 4; otg_inst++) - dccg31_disable_symclk32_se(dccg, otg_inst); + dccg35_disable_symclk32_se(dccg, otg_inst); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - for (otg_inst = 0; otg_inst < 2; otg_inst++) + for (otg_inst = 0; otg_inst < 2; otg_inst++) { dccg31_disable_symclk32_le(dccg, otg_inst); + dccg31_set_symclk32_le_root_clock_gating(dccg, otg_inst, false); + } + +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) +// for (otg_inst = 0; otg_inst < 4; otg_inst++) +// dccg35_disable_symclk_se(dccg, otg_inst, otg_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) - for (otg_inst = 0; otg_inst < 4; otg_inst++) - dccg314_set_dpstreamclk(dccg, REFCLK, otg_inst, + for (otg_inst = 0; otg_inst < 4; otg_inst++) { + dccg35_set_dpstreamclk(dccg, REFCLK, otg_inst, otg_inst); + dccg35_set_dpstreamclk_root_clock_gating(dccg, otg_inst, false); + } if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) for (otg_inst = 0; otg_inst < 5; otg_inst++) - dccg35_set_physymclk(dccg, otg_inst, - PHYSYMCLK_FORCE_SRC_SYMCLK, false); + dccg35_set_physymclk_root_clock_gating(dccg, otg_inst, + false); + + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + for (otg_inst = 0; otg_inst < 4; otg_inst++) + dccg35_set_dppclk_root_clock_gating(dccg, otg_inst, 0); + /* dccg35_enable_global_fgcg_rep( dccg, dccg->ctx->dc->debug.enable_fine_grain_clock_gating.bits @@ -516,24 +747,32 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst) DSCCLK0_DTO_PHASE, 0, DSCCLK0_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1); break; case 1: REG_UPDATE_2(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, 0, DSCCLK1_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1); break; case 2: REG_UPDATE_2(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, 0, DSCCLK2_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1); break; case 3: REG_UPDATE_2(DSCCLK3_DTO_PARAM, DSCCLK3_DTO_PHASE, 0, DSCCLK3_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1); break; default: BREAK_TO_DEBUGGER(); @@ -555,24 +794,32 @@ static void dccg35_disable_dscclk(struct dccg *dccg, REG_UPDATE_2(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, 0, DSCCLK0_DTO_MODULO, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 0); break; case 1: REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 0); REG_UPDATE_2(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, 0, DSCCLK1_DTO_MODULO, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 0); break; case 2: REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 0); REG_UPDATE_2(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, 0, DSCCLK2_DTO_MODULO, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 0); break; case 3: REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 0); REG_UPDATE_2(DSCCLK3_DTO_PARAM, DSCCLK3_DTO_PHASE, 0, DSCCLK3_DTO_MODULO, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 0); break; default: return; @@ -587,22 +834,32 @@ static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, case 0: REG_UPDATE(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, 1); break; case 1: REG_UPDATE(SYMCLKB_CLOCK_ENABLE, SYMCLKB_CLOCK_ENABLE, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, 1); break; case 2: REG_UPDATE(SYMCLKC_CLOCK_ENABLE, SYMCLKC_CLOCK_ENABLE, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, 1); break; case 3: REG_UPDATE(SYMCLKD_CLOCK_ENABLE, SYMCLKD_CLOCK_ENABLE, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, 1); break; case 4: REG_UPDATE(SYMCLKE_CLOCK_ENABLE, SYMCLKE_CLOCK_ENABLE, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, 1); break; } @@ -611,26 +868,36 @@ static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE, SYMCLKA_FE_EN, 1, SYMCLKA_FE_SRC_SEL, link_enc_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, 1); break; case 1: REG_UPDATE_2(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_EN, 1, SYMCLKB_FE_SRC_SEL, link_enc_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, 1); break; case 2: REG_UPDATE_2(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_EN, 1, SYMCLKC_FE_SRC_SEL, link_enc_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, 1); break; case 3: REG_UPDATE_2(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_EN, 1, SYMCLKD_FE_SRC_SEL, link_enc_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, 1); break; case 4: REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_EN, 1, SYMCLKE_FE_SRC_SEL, link_enc_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, 1); break; } } @@ -691,26 +958,36 @@ static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE, SYMCLKA_FE_EN, 0, SYMCLKA_FE_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, 0); break; case 1: REG_UPDATE_2(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_EN, 0, SYMCLKB_FE_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, 0); break; case 2: REG_UPDATE_2(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_EN, 0, SYMCLKC_FE_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, 0); break; case 3: REG_UPDATE_2(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_EN, 0, SYMCLKD_FE_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, 0); break; case 4: REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_EN, 0, SYMCLKE_FE_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, 0); break; } @@ -723,22 +1000,32 @@ static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst case 0: REG_UPDATE(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, 0); break; case 1: REG_UPDATE(SYMCLKB_CLOCK_ENABLE, SYMCLKB_CLOCK_ENABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, 0); break; case 2: REG_UPDATE(SYMCLKC_CLOCK_ENABLE, SYMCLKC_CLOCK_ENABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, 0); break; case 3: REG_UPDATE(SYMCLKD_CLOCK_ENABLE, SYMCLKD_CLOCK_ENABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, 0); break; case 4: REG_UPDATE(SYMCLKE_CLOCK_ENABLE, SYMCLKE_CLOCK_ENABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, 0); break; } } @@ -750,11 +1037,14 @@ static const struct dccg_funcs dccg35_funcs = { .get_dccg_ref_freq = dccg31_get_dccg_ref_freq, .dccg_init = dccg35_init, .set_dpstreamclk = dccg35_set_dpstreamclk, + .set_dpstreamclk_root_clock_gating = dccg35_set_dpstreamclk_root_clock_gating, .enable_symclk32_se = dccg31_enable_symclk32_se, - .disable_symclk32_se = dccg31_disable_symclk32_se, + .disable_symclk32_se = dccg35_disable_symclk32_se, .enable_symclk32_le = dccg31_enable_symclk32_le, .disable_symclk32_le = dccg31_disable_symclk32_le, + .set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating, .set_physymclk = dccg35_set_physymclk, + .set_physymclk_root_clock_gating = dccg35_set_physymclk_root_clock_gating, .set_dtbclk_dto = dccg35_set_dtbclk_dto, .set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto, .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h index 423feb4c2f3f..1586a45ca3bd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h @@ -34,6 +34,8 @@ #define DCCG_REG_LIST_DCN35() \ DCCG_REG_LIST_DCN314(),\ SR(DPPCLK_CTRL),\ + SR(DCCG_GATE_DISABLE_CNTL4),\ + SR(DCCG_GATE_DISABLE_CNTL5),\ SR(DCCG_GATE_DISABLE_CNTL6),\ SR(DCCG_GLOBAL_FGCG_REP_CNTL),\ SR(SYMCLKA_CLOCK_ENABLE),\ @@ -174,7 +176,61 @@ DCCG_SF(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_SRC_SEL, mask_sh),\ DCCG_SF(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_SRC_SEL, mask_sh),\ DCCG_SF(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_SRC_SEL, mask_sh),\ - DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, mask_sh) + DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, HDMICHARCLK0_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, HDMISTREAMCLK0_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE2_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE3_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYA_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYB_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYC_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYD_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYE_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\ + DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL, DISPCLK_DCCG_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, HDMISTREAMCLK0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\ struct dccg *dccg35_create( struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c index f91e08895275..20f810a6646c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c @@ -80,7 +80,6 @@ enum signal_type dcn35_get_dig_mode( default: return SIGNAL_TYPE_NONE; } - return SIGNAL_TYPE_NONE; } void dcn35_link_encoder_setup( @@ -119,7 +118,7 @@ void dcn35_link_encoder_setup( void dcn35_link_encoder_init(struct link_encoder *enc) { - enc32_hw_init(enc); + enc31_hw_init(enc); dcn35_link_encoder_set_fgcg(enc, enc->ctx->dc->debug.enable_fine_grain_clock_gating.bits.dio); } @@ -256,6 +255,10 @@ void dcn35_link_encoder_construct( enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN; enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN; enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN; + if (bp_cap_info.DP_IS_USB_C) { + /*BIOS not switch to use CONNECTOR_ID_USBC = 24 yet*/ + enc10->base.features.flags.bits.DP_IS_USB_C = 1; + } } else { DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", @@ -264,4 +267,5 @@ void dcn35_link_encoder_construct( } if (enc10->base.ctx->dc->debug.hdmi20_disable) enc10->base.features.flags.bits.HDMI_6GB_EN = 0; + } diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h index e1e560732a9d..d546a3676304 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.h @@ -37,7 +37,9 @@ LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_MODE, mask_sh),\ LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_CLK_EN, mask_sh),\ LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SOFT_RESET, mask_sh),\ + LE_SF(DIG0_DIG_BE_CLK_CNTL, HDCP_SOFT_RESET, mask_sh),\ LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SYMCLK_G_CLOCK_ON, mask_sh),\ + LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SYMCLK_G_HDCP_CLOCK_ON, mask_sh),\ LE_SF(DIG0_DIG_BE_CLK_CNTL, DIG_BE_SYMCLK_G_TMDS_CLOCK_ON, mask_sh),\ LE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh),\ LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \ @@ -114,7 +116,15 @@ LE_SF(DIO_CLK_CNTL, SYMCLK_FE_G_GATE_DIS, mask_sh),\ LE_SF(DIO_CLK_CNTL, SYMCLK_R_GATE_DIS, mask_sh),\ LE_SF(DIO_CLK_CNTL, SYMCLK_G_GATE_DIS, mask_sh),\ - LE_SF(DIO_CLK_CNTL, DIO_FGCG_REP_DIS, mask_sh) + LE_SF(DIO_CLK_CNTL, DIO_FGCG_REP_DIS, mask_sh),\ + LE_SF(DIO_CLK_CNTL, DISPCLK_G_HDCP_GATE_DIS, mask_sh),\ + LE_SF(DIO_CLK_CNTL, SYMCLKA_G_HDCP_GATE_DIS, mask_sh),\ + LE_SF(DIO_CLK_CNTL, SYMCLKB_G_HDCP_GATE_DIS, mask_sh),\ + LE_SF(DIO_CLK_CNTL, SYMCLKC_G_HDCP_GATE_DIS, mask_sh),\ + LE_SF(DIO_CLK_CNTL, SYMCLKD_G_HDCP_GATE_DIS, mask_sh),\ + LE_SF(DIO_CLK_CNTL, SYMCLKE_G_HDCP_GATE_DIS, mask_sh),\ + LE_SF(DIO_CLK_CNTL, SYMCLKF_G_HDCP_GATE_DIS, mask_sh),\ + LE_SF(DIO_CLK_CNTL, SYMCLKG_G_HDCP_GATE_DIS, mask_sh) void dcn35_link_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.c index 001f9eb66920..62a8f0b56006 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.c @@ -261,12 +261,6 @@ static void enc35_stream_encoder_enable( /* invalid mode ! */ ASSERT_CRITICAL(false); } - - REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 1); - REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 1); - } else { - REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 0); - REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 0); } } @@ -436,6 +430,8 @@ static void enc35_disable_fifo(struct stream_encoder *enc) struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0); + REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 0); + REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 0); } static void enc35_enable_fifo(struct stream_encoder *enc) @@ -443,6 +439,8 @@ static void enc35_enable_fifo(struct stream_encoder *enc) struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7); + REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 1); + REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 1); enc35_reset_fifo(enc, true); enc35_reset_fifo(enc, false); diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c index 339bf0c722dd..6293173ba2b9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c @@ -111,7 +111,7 @@ static uint32_t convert_and_clamp( static bool hubbub35_program_stutter_z8_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -297,7 +297,7 @@ static void hubbub35_get_dchub_ref_freq(struct hubbub *hubbub, static bool hubbub35_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c index 1ed58660779e..771fcd0d3b99 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c @@ -53,11 +53,146 @@ static void hubp35_init(struct hubp *hubp) /*do nothing for now for dcn3.5 or later*/ } + +void hubp35_program_pixel_format( + struct hubp *hubp, + enum surface_pixel_format format) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + uint32_t green_bar = 1; + uint32_t red_bar = 3; + uint32_t blue_bar = 2; + + /* swap for ABGR format */ + if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888 + || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010 + || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS + || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 + || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) { + red_bar = 2; + blue_bar = 3; + } + + REG_UPDATE_3(HUBPRET_CONTROL, + CROSSBAR_SRC_Y_G, green_bar, + CROSSBAR_SRC_CB_B, blue_bar, + CROSSBAR_SRC_CR_R, red_bar); + + /* Mapping is same as ipp programming (cnvc) */ + + switch (format) { + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 1); + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 3); + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 8); + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 10); + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /* we use crossbar already */ + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */ + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/ + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 24); + break; + + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 65); + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 64); + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 67); + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 66); + break; + case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 12); + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 112); + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 113); + break; + case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 114); + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 118); + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 119); + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGBE: + REG_UPDATE_2(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 116, + ALPHA_PLANE_EN, 0); + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: + REG_UPDATE_2(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 116, + ALPHA_PLANE_EN, 1); + break; + default: + BREAK_TO_DEBUGGER(); + break; + } + + /* don't see the need of program the xbar in DCN 1.0 */ +} + +void hubp35_program_surface_config( + struct hubp *hubp, + enum surface_pixel_format format, + union dc_tiling_info *tiling_info, + struct plane_size *plane_size, + enum dc_rotation_angle rotation, + struct dc_plane_dcc_param *dcc, + bool horizontal_mirror, + unsigned int compat_level) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + + hubp3_dcc_control_sienna_cichlid(hubp, dcc); + hubp3_program_tiling(hubp2, tiling_info, format); + hubp2_program_size(hubp, format, plane_size, dcc); + hubp2_program_rotation(hubp, rotation, horizontal_mirror); + hubp35_program_pixel_format(hubp, format); +} + struct hubp_funcs dcn35_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, .hubp_program_surface_flip_and_addr = hubp3_program_surface_flip_and_addr, - .hubp_program_surface_config = hubp3_program_surface_config, + .hubp_program_surface_config = hubp35_program_surface_config, .hubp_is_flip_pending = hubp2_is_flip_pending, .hubp_setup = hubp3_setup, .hubp_setup_interdependent = hubp2_setup_interdependent, diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h index 3d830f93141e..586b43aa5834 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h @@ -58,4 +58,18 @@ bool hubp35_construct( void hubp35_set_fgcg(struct hubp *hubp, bool enable); +void hubp35_program_pixel_format( + struct hubp *hubp, + enum surface_pixel_format format); + +void hubp35_program_surface_config( + struct hubp *hubp, + enum surface_pixel_format format, + union dc_tiling_info *tiling_info, + struct plane_size *plane_size, + enum dc_rotation_angle rotation, + struct dc_plane_dcc_param *dcc, + bool horizontal_mirror, + unsigned int compat_level); + #endif /* __DC_HUBP_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c index 0f60c40e1fc5..53bd0ae4bab5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c @@ -261,6 +261,7 @@ void pg_cntl35_hpo_pg_control(struct pg_cntl *pg_cntl, bool power_on) uint32_t power_gate = power_on ? 0 : 1; uint32_t pwr_status = power_on ? 0 : 2; uint32_t org_ip_request_cntl; + uint32_t power_forceon; bool block_enabled; if (pg_cntl->ctx->dc->debug.ignore_pg || @@ -277,6 +278,10 @@ void pg_cntl35_hpo_pg_control(struct pg_cntl *pg_cntl, bool power_on) return; } + REG_GET(DOMAIN25_PG_CONFIG, DOMAIN_POWER_FORCEON, &power_forceon); + if (power_forceon) + return; + REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); @@ -304,6 +309,7 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on) uint32_t power_gate = power_on ? 0 : 1; uint32_t pwr_status = power_on ? 0 : 2; uint32_t org_ip_request_cntl; + uint32_t power_forceon; bool block_enabled; if (pg_cntl->ctx->dc->debug.ignore_pg || @@ -319,6 +325,10 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on) return; } + REG_GET(DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, &power_forceon); + if (power_forceon) + return; + REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h index 4229369c57f4..f4d3f04ec857 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h +++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h @@ -26,6 +26,9 @@ #ifndef DM_CP_PSP_IF__H #define DM_CP_PSP_IF__H +/* + * Interface to CPLIB/PSP to enable ASSR + */ struct dc_link; struct cp_psp_stream_config { diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 7ce9a5b6c33b..34adae7ab6e8 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -36,6 +36,7 @@ struct dc_dp_mst_stream_allocation_table; struct aux_payload; enum aux_return_code_type; +enum set_config_status; /* * Allocate memory accessible by the GPU @@ -103,10 +104,16 @@ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( /* * Sends ALLOCATE_PAYLOAD message. */ -bool dm_helpers_dp_mst_send_payload_allocation( +void dm_helpers_dp_mst_send_payload_allocation( struct dc_context *ctx, - const struct dc_stream_state *stream, - bool enable); + const struct dc_stream_state *stream); + +/* + * Update mst manager relevant variables + */ +void dm_helpers_dp_mst_update_mst_mgr_for_deallocation( + struct dc_context *ctx, + const struct dc_stream_state *stream); bool dm_helpers_dp_mst_start_top_mgr( struct dc_context *ctx, @@ -194,7 +201,7 @@ int dm_helper_dmub_aux_transfer_sync( const struct dc_link *link, struct aux_payload *payload, enum aux_return_code_type *operation_result); -enum set_config_status; + int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, const struct dc_link *link, struct set_config_cmd_payload *payload, diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index 4440d08743aa..bd7ba0a25198 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h @@ -247,6 +247,7 @@ struct pp_smu_funcs_nv { #define PP_SMU_NUM_MEMCLK_DPM_LEVELS 4 #define PP_SMU_NUM_DCLK_DPM_LEVELS 8 #define PP_SMU_NUM_VCLK_DPM_LEVELS 8 +#define PP_SMU_NUM_VPECLK_DPM_LEVELS 8 struct dpm_clock { uint32_t Freq; // In MHz @@ -262,6 +263,7 @@ struct dpm_clocks { struct dpm_clock MemClocks[PP_SMU_NUM_MEMCLK_DPM_LEVELS]; struct dpm_clock VClocks[PP_SMU_NUM_VCLK_DPM_LEVELS]; struct dpm_clock DClocks[PP_SMU_NUM_DCLK_DPM_LEVELS]; + struct dpm_clock VPEClocks[PP_SMU_NUM_VPECLK_DPM_LEVELS]; }; diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index d0eed3b4771e..9405c47ee2a9 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -275,6 +275,16 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc #define PERF_TRACE_CTX(__CTX) dm_perf_trace_timestamp(__func__, __LINE__, __CTX) /* + * SMU message tracing + */ +void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx); +void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx); + +#define TRACE_SMU_MSG(msg_id, param_in, ctx) dm_trace_smu_msg(msg_id, param_in, ctx) +#define TRACE_SMU_DELAY(response_delay, ctx) dm_trace_smu_delay(response_delay, ctx) + + +/* * DMUB Interfaces */ bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index ea7d60f9a9b4..c4a5efd2dda5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -61,18 +61,22 @@ endif endif ifneq ($(CONFIG_FRAME_WARN),0) +ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y) +frame_warn_flag := -Wframe-larger-than=3072 +else frame_warn_flag := -Wframe-larger-than=2048 endif +endif CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags) @@ -88,6 +92,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_rq_dlg_calc_32.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn321/dcn321_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn35/dcn35_fpu.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn351/dcn351_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags) @@ -122,6 +127,7 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn321/dcn321_fpu.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn35/dcn35_fpu.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn351/dcn351_fpu.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn303/dcn303_fpu.o := $(dml_rcflags) @@ -153,6 +159,7 @@ DML += dcn302/dcn302_fpu.o DML += dcn303/dcn303_fpu.o DML += dcn314/dcn314_fpu.o DML += dcn35/dcn35_fpu.o +DML += dcn351/dcn351_fpu.o DML += dsc/rc_calc_fpu.o DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c index 50b0434354f8..0c4a8fe8e5ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c @@ -30,7 +30,7 @@ #include "dcn_calc_auto.h" #include "dal_asic_id.h" #include "resource.h" -#include "dcn10/dcn10_resource.h" +#include "resource/dcn10/dcn10_resource.h" #include "dcn10/dcn10_hubbub.h" #include "dml/dml1_display_rq_dlg_calc.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h index 2cbdd75429ff..6e669a2c5b2d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h @@ -36,7 +36,7 @@ * Define the maximum amount of states supported by the ASIC. Every ASIC has a * specific number of states; this macro defines the maximum number of states. */ -#define DC__VOLTAGE_STATES 20 +#define DC__VOLTAGE_STATES 40 #define DC__NUM_DPP__4 1 #define DC__NUM_DPP__0_PRESENT 1 #define DC__NUM_DPP__1_PRESENT 1 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index 7fc8b18096ba..74da9ebda016 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -33,6 +33,7 @@ #include "link.h" #include "dcn20_fpu.h" +#include "dc_state_priv.h" #define DC_LOGGER \ dc->ctx->logger @@ -440,7 +441,115 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = { .use_urgent_burst_bw = 0 }; -struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 }; +struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { + .clock_limits = { + { + .state = 0, + .dcfclk_mhz = 560.0, + .fabricclk_mhz = 560.0, + .dispclk_mhz = 513.0, + .dppclk_mhz = 513.0, + .phyclk_mhz = 540.0, + .socclk_mhz = 560.0, + .dscclk_mhz = 171.0, + .dram_speed_mts = 1069.0, + }, + { + .state = 1, + .dcfclk_mhz = 694.0, + .fabricclk_mhz = 694.0, + .dispclk_mhz = 642.0, + .dppclk_mhz = 642.0, + .phyclk_mhz = 600.0, + .socclk_mhz = 694.0, + .dscclk_mhz = 214.0, + .dram_speed_mts = 1324.0, + }, + { + .state = 2, + .dcfclk_mhz = 875.0, + .fabricclk_mhz = 875.0, + .dispclk_mhz = 734.0, + .dppclk_mhz = 734.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 875.0, + .dscclk_mhz = 245.0, + .dram_speed_mts = 1670.0, + }, + { + .state = 3, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 1000.0, + .dispclk_mhz = 1100.0, + .dppclk_mhz = 1100.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1000.0, + .dscclk_mhz = 367.0, + .dram_speed_mts = 2000.0, + }, + { + .state = 4, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 2000.0, + }, + { + .state = 5, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 2000.0, + }, + }, + + .num_states = 5, + .sr_exit_time_us = 1.9, + .sr_enter_plus_exit_time_us = 4.4, + .urgent_latency_us = 3.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, + .max_avg_sdp_bw_use_normal_percent = 40.0, + .max_avg_dram_bw_use_normal_percent = 40.0, + .writeback_latency_us = 12.0, + .ideal_dram_bw_after_urgent_percent = 40.0, + .max_request_size_bytes = 256, + .dram_channel_width_bytes = 16, + .fabric_datapath_to_dcn_data_return_bytes = 64, + .dcn_downspread_percent = 0.5, + .downspread_percent = 0.5, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 131, + .urgent_out_of_order_return_per_channel_bytes = 4096, + .channel_interleave_bytes = 256, + .num_banks = 8, + .num_chans = 16, + .vmm_page_size_bytes = 4096, + .dram_clock_change_latency_us = 45.0, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3850, + .xfc_bus_transport_time_us = 20, + .xfc_xbuf_latency_tolerance_us = 50, + .use_urgent_burst_bw = 0, +}; struct _vcs_dpi_ip_params_st dcn2_1_ip = { .odm_capable = 1, @@ -950,10 +1059,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc { int plane_count; int i; - unsigned int min_dst_y_next_start_us; plane_count = 0; - min_dst_y_next_start_us = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { if (context->res_ctx.pipe_ctx[i].plane_state) plane_count++; @@ -975,29 +1082,21 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) { struct dc_link *link = context->streams[0]->sink->link; struct dc_stream_status *stream_status = &context->stream_status[0]; - struct dc_stream_state *current_stream = context->streams[0]; int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000; bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency; bool is_pwrseq0 = link->link_index == 0; - bool isFreesyncVideo; - - isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max; - isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min; - for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) { - min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us; - break; - } - } + bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 || + link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr); + bool is_replay = link && link->replay_settings.replay_feature_enabled; /* Don't support multi-plane configurations */ if (stream_status->plane_count > 1) return DCN_ZSTATE_SUPPORT_DISALLOW; - if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000)) + if (is_pwrseq0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0) return DCN_ZSTATE_SUPPORT_ALLOW; - else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr) - return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY; + else if (is_pwrseq0 && (is_psr || is_replay)) + return DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY; else return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY : DCN_ZSTATE_SUPPORT_DISALLOW; } else { @@ -1087,7 +1186,7 @@ void dcn20_calculate_dlg_params(struct dc *dc, pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); - if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) { // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0; context->res_ctx.pipe_ctx[i].unbounded_req = false; @@ -1437,7 +1536,7 @@ int dcn20_populate_dml_pipes_from_context(struct dc *dc, */ if (res_ctx->pipe_ctx[i].plane_state && (res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE || - res_ctx->pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM)) + dc_state_get_pipe_subvp_type(context, &res_ctx->pipe_ctx[i]) == SUBVP_PHANTOM)) pipes[pipe_cnt].pipe.src.num_cursors = 0; else pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors; @@ -2273,7 +2372,7 @@ validate_out: static struct _vcs_dpi_voltage_scaling_st construct_low_pstate_lvl(struct clk_limit_table *clk_table, unsigned int high_voltage_lvl) { - struct _vcs_dpi_voltage_scaling_st low_pstate_lvl; + struct _vcs_dpi_voltage_scaling_st low_pstate_lvl = {0}; int i; low_pstate_lvl.state = 1; @@ -2378,7 +2477,7 @@ void dcn201_populate_dml_writeback_from_context_fpu(struct dc *dc, int pipe_cnt, i, j; double max_calc_writeback_dispclk; double writeback_dispclk; - struct writeback_st dout_wb; + struct writeback_st dout_wb = {0}; dc_assert_fp_enabled(); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c index ccb4ad78f667..81f7b90849ce 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c @@ -260,7 +260,7 @@ void dcn30_fpu_populate_dml_writeback_from_context( int pipe_cnt, i, j; double max_calc_writeback_dispclk; double writeback_dispclk; - struct writeback_st dout_wb; + struct writeback_st dout_wb = {0}; dc_assert_fp_enabled(); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index ad741a723c0e..e0b52db2c210 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -3535,14 +3535,13 @@ static double TruncToValidBPP( return DesiredBPP; } } - return BPP_INVALID; } void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib) { struct vba_vars_st *v = &mode_lib->vba; int MinPrefetchMode, MaxPrefetchMode; - int i; + int i, start_state; unsigned int j, k, m; bool EnoughWritebackUnits = true; bool WritebackModeSupport = true; @@ -3553,6 +3552,11 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/ + if (mode_lib->validate_max_state) + start_state = v->soc.num_states - 1; + else + start_state = 0; + CalculateMinAndMaxPrefetchMode( mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank, &MinPrefetchMode, &MaxPrefetchMode); @@ -3851,7 +3855,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->SingleDPPViewportSizeSupportPerPlane, &v->ViewportSizeSupport[0][0]); - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (j = 0; j < 2; j++) { v->MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDispclk[i], v->DISPCLKDPPCLKVCOSpeed); v->MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDppclk[i], v->DISPCLKDPPCLKVCOSpeed); @@ -4007,7 +4011,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /*Total Available Pipes Support Check*/ - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (j = 0; j < 2; j++) { if (v->TotalNumberOfActiveDPP[i][j] <= v->MaxNumDPP) { v->TotalAvailablePipesSupport[i][j] = true; @@ -4046,7 +4050,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { v->RequiresDSC[i][k] = false; v->RequiresFEC[i][k] = false; @@ -4174,7 +4178,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } } - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { v->DIOSupport[i] = true; for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { if (!v->skip_dio_check[k] && v->BlendingAndTiming[k] == k && (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi) @@ -4185,7 +4189,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = 0; i < v->soc.num_states; ++i) { + for (i = start_state; i < v->soc.num_states; ++i) { v->ODMCombine4To1SupportCheckOK[i] = true; for (k = 0; k < v->NumberOfActivePlanes; ++k) { if (v->BlendingAndTiming[k] == k && v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1 @@ -4197,7 +4201,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /* Skip dscclk validation: as long as dispclk is supported, dscclk is also implicitly supported */ - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { v->NotEnoughDSCUnits[i] = false; v->TotalDSCUnitsRequired = 0.0; for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { @@ -4217,7 +4221,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } /*DSC Delay per state*/ - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { if (v->OutputBppPerState[i][k] == BPP_INVALID) { v->BPP = 0.0; @@ -4268,7 +4272,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l //Calculate Swath, DET Configuration, DCFCLKDeepSleep // - for (i = 0; i < mode_lib->soc.num_states; ++i) { + for (i = start_state; i < mode_lib->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { for (k = 0; k < v->NumberOfActivePlanes; ++k) { v->RequiredDPPCLKThisState[k] = v->RequiredDPPCLK[i][j][k]; @@ -4333,7 +4337,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->cursor_bw[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0 / (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k]; } - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (j = 0; j < 2; j++) { for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { v->swath_width_luma_ub_this_state[k] = v->swath_width_luma_ub_all_states[i][j][k]; @@ -4571,7 +4575,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l //Calculate Return BW - for (i = 0; i < mode_lib->soc.num_states; ++i) { + for (i = start_state; i < mode_lib->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { if (v->BlendingAndTiming[k] == k) { @@ -4630,7 +4634,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->UrgentOutOfOrderReturnPerChannelVMDataOnly); v->FinalDRAMClockChangeLatency = (v->DRAMClockChangeLatencyOverride > 0 ? v->DRAMClockChangeLatencyOverride : v->DRAMClockChangeLatency); - for (i = 0; i < mode_lib->soc.num_states; ++i) { + for (i = start_state; i < mode_lib->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { v->DCFCLKState[i][j] = v->DCFCLKPerState[i]; } @@ -4641,7 +4645,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l if (v->ClampMinDCFCLK) { /* Clamp calculated values to actual minimum */ - for (i = 0; i < mode_lib->soc.num_states; ++i) { + for (i = start_state; i < mode_lib->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { if (v->DCFCLKState[i][j] < mode_lib->soc.min_dcfclk) { v->DCFCLKState[i][j] = mode_lib->soc.min_dcfclk; @@ -4651,7 +4655,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = 0; i < mode_lib->soc.num_states; ++i) { + for (i = start_state; i < mode_lib->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { v->IdealSDPPortBandwidthPerState[i][j] = dml_min3( v->ReturnBusWidth * v->DCFCLKState[i][j], @@ -4669,7 +4673,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l //Re-ordering Buffer Support Check - for (i = 0; i < mode_lib->soc.num_states; ++i) { + for (i = start_state; i < mode_lib->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { if ((v->ROBBufferSizeInKByte - v->PixelChunkSizeInKByte) * 1024 / v->ReturnBWPerState[i][j] > (v->RoundTripPingLatencyCycles + 32) / v->DCFCLKState[i][j] + ReorderingBytes / v->ReturnBWPerState[i][j]) { @@ -4687,7 +4691,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k]; } - for (i = 0; i < mode_lib->soc.num_states; ++i) { + for (i = start_state; i < mode_lib->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { v->MaxTotalVerticalActiveAvailableBandwidth[i][j] = dml_min( v->IdealSDPPortBandwidthPerState[i][j] * v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100, @@ -4703,7 +4707,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l //Prefetch Check - for (i = 0; i < mode_lib->soc.num_states; ++i) { + for (i = start_state; i < mode_lib->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { int NextPrefetchModeState = MinPrefetchMode; @@ -5075,7 +5079,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /*PTE Buffer Size Check*/ - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (j = 0; j < 2; j++) { v->PTEBufferSizeNotExceeded[i][j] = true; for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { @@ -5128,7 +5132,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l ViewportExceedsSurface = true; if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16 - && v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) { + && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) { if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k] || v->ViewportHeightChroma[k] > v->SurfaceHeightC[k]) { ViewportExceedsSurface = true; } @@ -5136,7 +5140,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } /*Mode Support, Voltage State and SOC Configuration*/ - for (i = v->soc.num_states - 1; i >= 0; i--) { + for (i = v->soc.num_states - 1; i >= start_state; i--) { for (j = 0; j < 2; j++) { if (v->ScaleRatioAndTapsSupport == 1 && v->SourceFormatPixelAndScanSupport == 1 && v->ViewportSizeSupport[i][j] == 1 && v->DIOSupport[i] == 1 && v->ODMCombine4To1SupportCheckOK[i] == 1 @@ -5158,7 +5162,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } { unsigned int MaximumMPCCombine = 0; - for (i = v->soc.num_states; i >= 0; i--) { + for (i = v->soc.num_states; i >= start_state; i--) { if (i == v->soc.num_states || v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true) { v->VoltageLevel = i; v->ModeIsSupported = v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c index 3eb3a021ab7d..3f02bb806d42 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c @@ -266,6 +266,17 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p optimal_uclk_for_dcfclk_sta_targets[i] = bw_params->clk_table.entries[j].memclk_mhz * 16; break; + } else { + /* condition where (dcfclk_sta_targets[i] >= optimal_dcfclk_for_uclk[j]): + * This is required for dcn303 because it just so happens that the memory + * bandwidth is low enough such that all the optimal DCFCLK for each UCLK + * is lower than the smallest DCFCLK STA target. In this case we need to + * populate the optimal UCLK for each DCFCLK STA target to be the max UCLK. + */ + if (j == num_uclk_states - 1) { + optimal_uclk_for_dcfclk_sta_targets[i] = + bw_params->clk_table.entries[j].memclk_mhz * 16; + } } } } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c index deb6d162a2d5..94317b2e4a85 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c @@ -291,6 +291,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = { .do_urgent_latency_adjustment = false, .urgent_latency_adjustment_fabric_clock_component_us = 0, .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, + .dispclk_dppclk_vco_speed_mhz = 2400.0, .num_chans = 4, .dummy_pstate_latency_us = 10.0 }; @@ -438,6 +439,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = { .do_urgent_latency_adjustment = false, .urgent_latency_adjustment_fabric_clock_component_us = 0, .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, + .dispclk_dppclk_vco_speed_mhz = 2500.0, }; void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes, @@ -485,6 +487,7 @@ void dcn31_calculate_wm_and_dlg_fp( { int i, pipe_idx, total_det = 0, active_hubp_count = 0; double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; + uint32_t cstate_enter_plus_exit_z8_ns; dc_assert_fp_enabled(); @@ -504,6 +507,13 @@ void dcn31_calculate_wm_and_dlg_fp( pipes[0].clks_cfg.dcfclk_mhz = dcfclk; pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; + cstate_enter_plus_exit_z8_ns = + get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + + if (get_stutter_period(&context->bw_ctx.dml, pipes, pipe_cnt) < dc->debug.minimum_z8_residency_time && + cstate_enter_plus_exit_z8_ns < dc->debug.minimum_z8_residency_time * 1000) + cstate_enter_plus_exit_z8_ns = dc->debug.minimum_z8_residency_time * 1000; + /* Set A: * All clocks min required * @@ -514,7 +524,7 @@ void dcn31_calculate_wm_and_dlg_fp( context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = cstate_enter_plus_exit_z8_ns; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h index 8f9c8faed260..d2ae43a82ba5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h @@ -30,6 +30,7 @@ #define DCN3_15_DEFAULT_DET_SIZE 192 #define DCN3_15_MIN_COMPBUF_SIZE_KB 128 #define DCN3_16_DEFAULT_DET_SIZE 192 +#define DCN3_16_MIN_COMPBUF_SIZE_KB 128 void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes, int pipe_cnt); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index adea459e7d36..33cf824c5da1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -3679,7 +3679,6 @@ static double TruncToValidBPP( return DesiredBPP; } } - return BPP_INVALID; } static noinline void CalculatePrefetchSchedulePerPlane( diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index fb21572750e8..21f637ae4add 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -310,7 +310,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = 0; bool upscaled = false; const unsigned int max_allowed_vblank_nom = 1023; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c index 88e56889a68c..3242957d00c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c @@ -3788,7 +3788,6 @@ static double TruncToValidBPP( return DesiredBPP; } } - return BPP_INVALID; } static noinline void CalculatePrefetchSchedulePerPlane( diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 9ec4172d1c2d..f6fe0a64beac 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -32,6 +32,7 @@ #include "clk_mgr/dcn32/dcn32_smu13_driver_if.h" #include "dcn30/dcn30_resource.h" #include "link.h" +#include "dc_state_priv.h" #define DC_LOGGER_INIT(logger) @@ -45,6 +46,14 @@ static const struct subvp_high_refresh_list subvp_high_refresh_list = { {.width = 1920, .height = 1080, }}, }; +static const struct subvp_active_margin_list subvp_active_margin_list = { + .min_refresh = 55, + .max_refresh = 65, + .res = { + {.width = 2560, .height = 1440, }, + {.width = 1920, .height = 1080, }}, +}; + struct _vcs_dpi_ip_params_st dcn3_2_ip = { .gpuvm_enable = 0, .gpuvm_max_page_table_levels = 4, @@ -171,6 +180,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = { .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000, }; +static bool dcn32_apply_merge_split_flags_helper(struct dc *dc, struct dc_state *context, + bool *repopulate_pipes, int *split, bool *merge); + void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr) { /* defaults */ @@ -333,7 +345,7 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc, if (!pipe->stream) continue; - if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (pipe->plane_state && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); pipes[pipe_idx].pipe.dest.vupdate_offset = @@ -613,10 +625,10 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, * to combine this with SubVP can cause issues with the scheduling). * - Not TMZ surface */ - if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && + if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && !dcn32_is_center_timing(pipe) && !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) && (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && - pipe->stream->mall_stream_config.type == SUBVP_NONE && + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE && (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) && !pipe->plane_state->address.tmz_surface && (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 || @@ -674,7 +686,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context // Find the minimum pipe split count for non SubVP pipes if (resource_is_pipe_type(pipe, OPP_HEAD) && - pipe->stream->mall_stream_config.type == SUBVP_NONE) { + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE) { split_cnt = 0; while (pipe) { split_cnt++; @@ -711,7 +723,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context */ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context) { - struct pipe_ctx *subvp_pipes[2]; + struct pipe_ctx *subvp_pipes[2] = {0}; struct dc_stream_state *phantom = NULL; uint32_t microschedule_lines = 0; uint32_t index = 0; @@ -727,8 +739,8 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context) * and also to store the two main SubVP pipe pointers in subvp_pipes[2]. */ if (pipe->stream && pipe->plane_state && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { - phantom = pipe->stream->mall_stream_config.paired_stream; + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { + phantom = dc_state_get_paired_subvp_stream(context, pipe->stream); microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) + phantom->timing.v_addressable; @@ -796,6 +808,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context) int16_t stretched_drr_us = 0; int16_t drr_stretched_vblank_us = 0; int16_t max_vblank_mallregion = 0; + struct dc_stream_state *phantom_stream; + bool subvp_found = false; + bool drr_found = false; // Find SubVP pipe for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -808,8 +823,10 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context) continue; // Find the SubVP pipe - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { + subvp_found = true; break; + } } // Find the DRR pipe @@ -817,32 +834,37 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context) drr_pipe = &context->res_ctx.pipe_ctx[i]; // We check for master pipe only - if (!resource_is_pipe_type(pipe, OTG_MASTER) || - !resource_is_pipe_type(pipe, DPP_PIPE)) + if (!resource_is_pipe_type(drr_pipe, OTG_MASTER) || + !resource_is_pipe_type(drr_pipe, DPP_PIPE)) continue; - if (drr_pipe->stream->mall_stream_config.type == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param && - (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed)) + if (dc_state_get_pipe_subvp_type(context, drr_pipe) == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param && + (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed)) { + drr_found = true; break; + } } - main_timing = &pipe->stream->timing; - phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing; - drr_timing = &drr_pipe->stream->timing; - prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total / - (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 + - dc->caps.subvp_prefetch_end_to_mall_start_us; - subvp_active_us = main_timing->v_addressable * main_timing->h_total / - (double)(main_timing->pix_clk_100hz * 100) * 1000000; - drr_frame_us = drr_timing->v_total * drr_timing->h_total / - (double)(drr_timing->pix_clk_100hz * 100) * 1000000; - // P-State allow width and FW delays already included phantom_timing->v_addressable - mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total / - (double)(phantom_timing->pix_clk_100hz * 100) * 1000000; - stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US; - drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total / - (double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us); - max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us; + if (subvp_found && drr_found) { + phantom_stream = dc_state_get_paired_subvp_stream(context, pipe->stream); + main_timing = &pipe->stream->timing; + phantom_timing = &phantom_stream->timing; + drr_timing = &drr_pipe->stream->timing; + prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total / + (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 + + dc->caps.subvp_prefetch_end_to_mall_start_us; + subvp_active_us = main_timing->v_addressable * main_timing->h_total / + (double)(main_timing->pix_clk_100hz * 100) * 1000000; + drr_frame_us = drr_timing->v_total * drr_timing->h_total / + (double)(drr_timing->pix_clk_100hz * 100) * 1000000; + // P-State allow width and FW delays already included phantom_timing->v_addressable + mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total / + (double)(phantom_timing->pix_clk_100hz * 100) * 1000000; + stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US; + drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total / + (double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us); + max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us; + } /* We consider SubVP + DRR schedulable if the stretched frame duration of the DRR display (i.e. the * highest refresh rate + margin that can support UCLK P-State switch) passes the static analysis @@ -887,6 +909,8 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) struct dc_crtc_timing *main_timing = NULL; struct dc_crtc_timing *phantom_timing = NULL; struct dc_crtc_timing *vblank_timing = NULL; + struct dc_stream_state *phantom_stream; + enum mall_stream_type pipe_mall_type; /* For SubVP + VBLANK/DRR cases, we assume there can only be * a single VBLANK/DRR display. If DML outputs SubVP + VBLANK @@ -896,6 +920,7 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) */ for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; + pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); // We check for master pipe, but it shouldn't matter since we only need // the pipe for timing info (stream should be same for any pipe splits) @@ -903,18 +928,19 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) !resource_is_pipe_type(pipe, DPP_PIPE)) continue; - if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) { + if (!found && pipe_mall_type == SUBVP_NONE) { // Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe). vblank_index = i; found = true; } - if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN) subvp_pipe = pipe; } if (found) { + phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); main_timing = &subvp_pipe->stream->timing; - phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; + phantom_timing = &phantom_stream->timing; vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing; // Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe // Also include the prefetch end to mallstart delay time @@ -969,7 +995,7 @@ static bool subvp_subvp_admissable(struct dc *dc, continue; if (pipe->plane_state && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); @@ -1018,23 +1044,23 @@ static bool subvp_validate_static_schedulability(struct dc *dc, for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); if (!pipe->stream) continue; if (pipe->plane_state && !pipe->top_pipe) { - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (pipe_mall_type == SUBVP_MAIN) subvp_count++; - if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { + if (pipe_mall_type == SUBVP_NONE) non_subvp_pipes++; - } } // Count how many planes that aren't SubVP/phantom are capable of VACTIVE // switching (SubVP + VACTIVE unsupported). In situations where we force // SubVP for a VACTIVE plane, we don't want to increment the vactive_count. if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vlevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 && - pipe->stream->mall_stream_config.type == SUBVP_NONE) { + pipe_mall_type == SUBVP_NONE) { vactive_count++; } pipe_idx++; @@ -1070,7 +1096,7 @@ static void assign_subvp_index(struct dc *dc, struct dc_state *context) struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && - pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) { + dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) { pipe_ctx->subvp_index = index++; } else { pipe_ctx->subvp_index = 0; @@ -1089,7 +1115,7 @@ struct pipe_slice_table { struct pipe_ctx *pri_pipe; struct dc_plane_state *plane; int slice_count; - } mpc_combines[MAX_SURFACES]; + } mpc_combines[MAX_PLANES]; int mpc_combine_count; }; @@ -1192,13 +1218,16 @@ static bool update_pipe_slice_table_with_split_flags( */ struct pipe_ctx *pipe; bool odm; - int i; + int dc_pipe_idx, dml_pipe_idx = 0; bool updated = false; - for (i = 0; i < dc->res_pool->pipe_count; i++) { - pipe = &context->res_ctx.pipe_ctx[i]; + for (dc_pipe_idx = 0; + dc_pipe_idx < dc->res_pool->pipe_count; dc_pipe_idx++) { + pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx]; + if (resource_is_pipe_type(pipe, FREE_PIPE)) + continue; - if (merge[i]) { + if (merge[dc_pipe_idx]) { if (resource_is_pipe_type(pipe, OPP_HEAD)) /* merging OPP head means reducing ODM slice * count by 1 @@ -1213,17 +1242,18 @@ static bool update_pipe_slice_table_with_split_flags( updated = true; } - if (split[i]) { - odm = vba->ODMCombineEnabled[vba->pipe_plane[i]] != + if (split[dc_pipe_idx]) { + odm = vba->ODMCombineEnabled[vba->pipe_plane[dml_pipe_idx]] != dm_odm_combine_mode_disabled; if (odm && resource_is_pipe_type(pipe, OPP_HEAD)) update_slice_table_for_stream( - table, pipe->stream, split[i] - 1); + table, pipe->stream, split[dc_pipe_idx] - 1); else if (!odm && resource_is_pipe_type(pipe, DPP_PIPE)) update_slice_table_for_plane(table, pipe, - pipe->plane_state, split[i] - 1); + pipe->plane_state, split[dc_pipe_idx] - 1); updated = true; } + dml_pipe_idx++; } return updated; } @@ -1233,15 +1263,11 @@ static void update_pipes_with_slice_table(struct dc *dc, struct dc_state *contex { int i; - for (i = 0; i < table->odm_combine_count; i++) { + for (i = 0; i < table->odm_combine_count; i++) resource_update_pipes_for_stream_with_slice_count(context, dc->current_state, dc->res_pool, table->odm_combines[i].stream, table->odm_combines[i].slice_count); - /* TODO: move this into the function above */ - dcn20_build_mapped_resource(dc, context, - table->odm_combines[i].stream); - } for (i = 0; i < table->mpc_combine_count; i++) resource_update_pipes_for_plane_with_slice_count(context, @@ -1265,7 +1291,7 @@ static bool update_pipes_with_split_flags(struct dc *dc, struct dc_state *contex return updated; } -static bool should_allow_odm_power_optimization(struct dc *dc, +static bool should_apply_odm_power_optimization(struct dc *dc, struct dc_state *context, struct vba_vars_st *v, int *split, bool *merge) { @@ -1369,9 +1395,12 @@ static void try_odm_power_optimization_and_revalidate( { int i; unsigned int new_vlevel; + unsigned int cur_policy[MAX_PIPES]; - for (i = 0; i < pipe_cnt; i++) + for (i = 0; i < pipe_cnt; i++) { + cur_policy[i] = pipes[i].pipe.dest.odm_combine_policy; pipes[i].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; + } new_vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); @@ -1380,6 +1409,9 @@ static void try_odm_power_optimization_and_revalidate( memset(merge, 0, MAX_PIPES * sizeof(bool)); *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, new_vlevel, split, merge); context->bw_ctx.dml.vba.VoltageLevel = *vlevel; + } else { + for (i = 0; i < pipe_cnt; i++) + pipes[i].pipe.dest.odm_combine_policy = cur_policy[i]; } } @@ -1396,18 +1428,20 @@ static bool is_test_pattern_enabled( return false; } -static void dcn32_full_validate_bw_helper(struct dc *dc, +static bool dcn32_full_validate_bw_helper(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *vlevel, int *split, bool *merge, - int *pipe_cnt) + int *pipe_cnt, + bool *repopulate_pipes) { struct vba_vars_st *vba = &context->bw_ctx.dml.vba; unsigned int dc_pipe_idx = 0; int i = 0; bool found_supported_config = false; + int vlevel_temp = 0; dc_assert_fp_enabled(); @@ -1431,6 +1465,12 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, vba->VoltageLevel = *vlevel; } + /* Apply split and merge flags before checking for subvp */ + if (!dcn32_apply_merge_split_flags_helper(dc, context, repopulate_pipes, split, merge)) + return false; + memset(split, 0, MAX_PIPES * sizeof(int)); + memset(merge, 0, MAX_PIPES * sizeof(bool)); + /* Conditions for setting up phantom pipes for SubVP: * 1. Not force disable SubVP * 2. Full update (i.e. !fast_validate) @@ -1440,22 +1480,12 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, */ if (!dc->debug.force_disable_subvp && !dc->caps.dmub_caps.gecc_enable && dcn32_all_pipes_have_stream_and_plane(dc, context) && !dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) && !is_test_pattern_enabled(context) && - (*vlevel == context->bw_ctx.dml.soc.num_states || + (*vlevel == context->bw_ctx.dml.soc.num_states || (vba->DRAMSpeedPerState[*vlevel] != vba->DRAMSpeedPerState[0] && + vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported) || vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported || dc->debug.force_subvp_mclk_switch)) { - dcn32_merge_pipes_for_subvp(dc, context); - memset(merge, 0, MAX_PIPES * sizeof(bool)); - - /* to re-initialize viewport after the pipe merge */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - - if (!pipe_ctx->plane_state || !pipe_ctx->stream) - continue; - - resource_build_scaling_params(pipe_ctx); - } + vlevel_temp = *vlevel; while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) && dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) { @@ -1515,10 +1545,14 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, } } + if (vba->DRAMSpeedPerState[*vlevel] >= vba->DRAMSpeedPerState[vlevel_temp]) + found_supported_config = false; + // If SubVP pipe config is unsupported (or cannot be used for UCLK switching) // remove phantom pipes and repopulate dml pipes if (!found_supported_config) { - dc->res_pool->funcs->remove_phantom_pipes(dc, context, false); + dc_state_remove_phantom_streams_and_planes(dc, context); + dc_state_release_phantom_streams_and_planes(dc, context); vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported; *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false); @@ -1540,8 +1574,6 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, * add phantom pipes. If pipe split (ODM / MPC) is required, both the main * and phantom pipes will be split in the regular pipe splitting sequence. */ - memset(split, 0, MAX_PIPES * sizeof(int)); - memset(merge, 0, MAX_PIPES * sizeof(bool)); *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge); vba->VoltageLevel = *vlevel; // Note: We can't apply the phantom pipes to hardware at this time. We have to wait @@ -1550,10 +1582,11 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, } } - if (should_allow_odm_power_optimization(dc, context, vba, split, merge)) + if (should_apply_odm_power_optimization(dc, context, vba, split, merge)) try_odm_power_optimization_and_revalidate( dc, context, pipes, split, merge, vlevel, *pipe_cnt); + return true; } static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) @@ -1670,7 +1703,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); - if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) { // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0; context->res_ctx.pipe_ctx[i].unbounded_req = false; @@ -1702,7 +1735,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) && context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) { /* SS: all active surfaces stored in MALL */ - if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type != SUBVP_PHANTOM) { + if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) != SUBVP_PHANTOM) { context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) { @@ -1893,105 +1926,23 @@ static bool dcn32_split_stream_for_mpc_or_odm( return true; } -bool dcn32_internal_validate_bw(struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int *pipe_cnt_out, - int *vlevel_out, - bool fast_validate) +static bool dcn32_apply_merge_split_flags_helper( + struct dc *dc, + struct dc_state *context, + bool *repopulate_pipes, + int *split, + bool *merge) { - bool out = false; - bool repopulate_pipes = false; - int split[MAX_PIPES] = { 0 }; - bool merge[MAX_PIPES] = { false }; + int i, pipe_idx; bool newly_split[MAX_PIPES] = { false }; - int pipe_cnt, i, pipe_idx; - int vlevel = context->bw_ctx.dml.soc.num_states; struct vba_vars_st *vba = &context->bw_ctx.dml.vba; - dc_assert_fp_enabled(); - - ASSERT(pipes); - if (!pipes) - return false; - - // For each full update, remove all existing phantom pipes first - dc->res_pool->funcs->remove_phantom_pipes(dc, context, fast_validate); - - dc->res_pool->funcs->update_soc_for_wm_a(dc, context); - - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); - - if (!pipe_cnt) { - out = true; - goto validate_out; - } - - dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); - context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context); - - if (!fast_validate) - dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt); - - if (fast_validate || - (dc->debug.dml_disallow_alternate_prefetch_modes && - (vlevel == context->bw_ctx.dml.soc.num_states || - vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) { - /* - * If dml_disallow_alternate_prefetch_modes is false, then we have already - * tried alternate prefetch modes during full validation. - * - * If mode is unsupported or there is no p-state support, then - * fall back to favouring voltage. - * - * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try - * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2) - */ - context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = - dm_prefetch_support_none; - - context->bw_ctx.dml.validate_max_state = fast_validate; - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - - context->bw_ctx.dml.validate_max_state = false; - - if (vlevel < context->bw_ctx.dml.soc.num_states) { - memset(split, 0, sizeof(split)); - memset(merge, 0, sizeof(merge)); - vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); - // dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML - vba->VoltageLevel = vlevel; - } - } - - dml_log_mode_support_params(&context->bw_ctx.dml); - - if (vlevel == context->bw_ctx.dml.soc.num_states) - goto validate_fail; - - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; - - if (!pipe->stream) - continue; - - if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled - && !dc->config.enable_windowed_mpo_odm - && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_state->clip_rect, - &pipe->stream->src, - sizeof(struct rect)) != 0) { - ASSERT(mpo_pipe->plane_state != pipe->plane_state); - goto validate_fail; - } - pipe_idx++; - } - if (dc->config.enable_windowed_mpo_odm) { - repopulate_pipes = update_pipes_with_split_flags( - dc, context, vba, split, merge); + if (update_pipes_with_split_flags( + dc, context, vba, split, merge)) + *repopulate_pipes = true; } else { + /* the code below will be removed once windowed mpo odm is fully * enabled. */ @@ -2048,7 +1999,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); memset(&pipe->link_res, 0, sizeof(pipe->link_res)); - repopulate_pipes = true; + *repopulate_pipes = true; } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { struct pipe_ctx *top_pipe = pipe->top_pipe; struct pipe_ctx *bottom_pipe = pipe->bottom_pipe; @@ -2064,7 +2015,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); memset(&pipe->link_res, 0, sizeof(pipe->link_res)); - repopulate_pipes = true; + *repopulate_pipes = true; } else ASSERT(0); /* Should never try to merge master pipe */ @@ -2103,15 +2054,15 @@ bool dcn32_internal_validate_bw(struct dc *dc, hsplit_pipe = dcn32_find_split_pipe(dc, context, old_index); ASSERT(hsplit_pipe); if (!hsplit_pipe) - goto validate_fail; + return false; if (!dcn32_split_stream_for_mpc_or_odm( dc, &context->res_ctx, pipe, hsplit_pipe, odm)) - goto validate_fail; + return false; newly_split[hsplit_pipe->pipe_idx] = true; - repopulate_pipes = true; + *repopulate_pipes = true; } if (split[i] == 4) { struct pipe_ctx *pipe_4to1; @@ -2126,11 +2077,11 @@ bool dcn32_internal_validate_bw(struct dc *dc, pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index); ASSERT(pipe_4to1); if (!pipe_4to1) - goto validate_fail; + return false; if (!dcn32_split_stream_for_mpc_or_odm( dc, &context->res_ctx, pipe, pipe_4to1, odm)) - goto validate_fail; + return false; newly_split[pipe_4to1->pipe_idx] = true; if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe @@ -2145,11 +2096,11 @@ bool dcn32_internal_validate_bw(struct dc *dc, pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index); ASSERT(pipe_4to1); if (!pipe_4to1) - goto validate_fail; + return false; if (!dcn32_split_stream_for_mpc_or_odm( dc, &context->res_ctx, hsplit_pipe, pipe_4to1, odm)) - goto validate_fail; + return false; newly_split[pipe_4to1->pipe_idx] = true; } if (odm) @@ -2161,10 +2112,121 @@ bool dcn32_internal_validate_bw(struct dc *dc, if (pipe->plane_state) { if (!resource_build_scaling_params(pipe)) - goto validate_fail; + return false; } } + + for (i = 0; i < context->stream_count; i++) { + struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx, + context->streams[i]); + + if (otg_master) + resource_build_test_pattern_params(&context->res_ctx, otg_master); + } } + return true; +} + +bool dcn32_internal_validate_bw(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *vlevel_out, + bool fast_validate) +{ + bool out = false; + bool repopulate_pipes = false; + int split[MAX_PIPES] = { 0 }; + bool merge[MAX_PIPES] = { false }; + int pipe_cnt, i, pipe_idx; + int vlevel = context->bw_ctx.dml.soc.num_states; + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + + dc_assert_fp_enabled(); + + ASSERT(pipes); + if (!pipes) + return false; + + /* For each full update, remove all existing phantom pipes first */ + dc_state_remove_phantom_streams_and_planes(dc, context); + dc_state_release_phantom_streams_and_planes(dc, context); + + dc->res_pool->funcs->update_soc_for_wm_a(dc, context); + + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + + if (!pipe_cnt) { + out = true; + goto validate_out; + } + + dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); + context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context); + + if (!fast_validate) { + if (!dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, + &pipe_cnt, &repopulate_pipes)) + goto validate_fail; + } + + if (fast_validate || + (dc->debug.dml_disallow_alternate_prefetch_modes && + (vlevel == context->bw_ctx.dml.soc.num_states || + vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) { + /* + * If dml_disallow_alternate_prefetch_modes is false, then we have already + * tried alternate prefetch modes during full validation. + * + * If mode is unsupported or there is no p-state support, then + * fall back to favouring voltage. + * + * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try + * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2) + */ + context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = + dm_prefetch_support_none; + + context->bw_ctx.dml.validate_max_state = fast_validate; + vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); + + context->bw_ctx.dml.validate_max_state = false; + + if (vlevel < context->bw_ctx.dml.soc.num_states) { + memset(split, 0, sizeof(split)); + memset(merge, 0, sizeof(merge)); + vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); + /* dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML */ + vba->VoltageLevel = vlevel; + } + } + + dml_log_mode_support_params(&context->bw_ctx.dml); + + if (vlevel == context->bw_ctx.dml.soc.num_states) + goto validate_fail; + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; + + if (!pipe->stream) + continue; + + if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled + && !dc->config.enable_windowed_mpo_odm + && pipe->plane_state && mpo_pipe + && memcmp(&mpo_pipe->plane_state->clip_rect, + &pipe->stream->src, + sizeof(struct rect)) != 0) { + ASSERT(mpo_pipe->plane_state != pipe->plane_state); + goto validate_fail; + } + pipe_idx++; + } + + if (!dcn32_apply_merge_split_flags_helper(dc, context, &repopulate_pipes, split, merge)) + goto validate_fail; /* Actual dsc count per stream dsc validation*/ if (!dcn20_validate_dsc(dc, context)) { @@ -2178,6 +2240,8 @@ bool dcn32_internal_validate_bw(struct dc *dc, int i; pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + if (!dc->config.enable_windowed_mpo_odm) + dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes); /* repopulate_pipes = 1 means the pipes were either split or merged. In this case * we have to re-calculate the DET allocation and run through DML once more to @@ -2186,7 +2250,9 @@ bool dcn32_internal_validate_bw(struct dc *dc, * */ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = dm_prefetch_support_uclk_fclk_and_stutter_if_possible; + vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); + if (vlevel == context->bw_ctx.dml.soc.num_states) { /* failed after DET size changes */ goto validate_fail; @@ -2231,6 +2297,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, int i, pipe_idx, vlevel_temp = 0; double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz; double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; + double dram_speed_from_validation = context->bw_ctx.dml.vba.DRAMSpeed; double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation; bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != dm_dram_clock_change_unsupported; @@ -2418,7 +2485,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, } if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) { - min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed; + min_dram_speed_mts = dram_speed_from_validation; min_dram_speed_mts_margin = 160; context->bw_ctx.dml.soc.dram_clock_change_latency_us = @@ -3294,25 +3361,24 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe) { bool allow = false; uint32_t refresh_rate = 0; + uint32_t min_refresh = subvp_active_margin_list.min_refresh; + uint32_t max_refresh = subvp_active_margin_list.max_refresh; + uint32_t i; - /* Allow subvp on displays that have active margin for 2560x1440@60hz displays - * only for now. There must be no scaling as well. - * - * For now we only enable on 2560x1440@60hz displays to enable 4K60 + 1440p60 configs - * for p-state switching. - */ - if (pipe->stream && pipe->plane_state) { - refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 + - pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1) - / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total); - if (pipe->stream->timing.v_addressable == 1440 && - pipe->stream->timing.h_addressable == 2560 && - refresh_rate >= 55 && refresh_rate <= 65 && - pipe->plane_state->src_rect.height == 1440 && - pipe->plane_state->src_rect.width == 2560 && - pipe->plane_state->dst_rect.height == 1440 && - pipe->plane_state->dst_rect.width == 2560) + for (i = 0; i < SUBVP_ACTIVE_MARGIN_LIST_LEN; i++) { + uint32_t width = subvp_active_margin_list.res[i].width; + uint32_t height = subvp_active_margin_list.res[i].height; + + refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); + + if (refresh_rate >= min_refresh && refresh_rate <= max_refresh && + dcn32_check_native_scaling_for_res(pipe, width, height)) { allow = true; + break; + } } return allow; } @@ -3431,7 +3497,15 @@ void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *co for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe->stream) + /* In DCN32/321, FPO uses per-pipe P-State force. + * If there's no planes, HUBP is power gated and + * therefore programming UCLK_PSTATE_FORCE does + * nothing (P-State will always be asserted naturally + * on a pipe that has HUBP power gated. Therefore we + * only want to enable FPO if the FPO pipe has both + * a stream and a plane. + */ + if (!pipe->stream || !pipe->plane_state) continue; if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index cbdfb762c10c..6c84b0fa40f4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -813,6 +813,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman (v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ? mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, + mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] > 0 || mode_lib->vba.DRAMClockChangeRequirementFinal == false, + /* Output */ &v->DSTXAfterScaler[k], &v->DSTYAfterScaler[k], @@ -3317,6 +3319,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->SwathHeightCThisState[k], v->TWait, (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ? mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, + mode_lib->vba.PrefetchModePerState[i][j] > 0 || mode_lib->vba.DRAMClockChangeRequirementFinal == false, /* Output */ &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k], diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index d940dfa5ae43..ba1310c8fd77 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -1650,6 +1650,8 @@ double dml32_TruncToValidBPP( MaxLinkBPP = 2 * MaxLinkBPP; } + *RequiredSlots = dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1); + if (DesiredBPP == 0) { if (DSCEnable) { if (MaxLinkBPP < MinDSCBPP) @@ -1676,10 +1678,6 @@ double dml32_TruncToValidBPP( else return DesiredBPP; } - - *RequiredSlots = dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1); - - return BPP_INVALID; } // TruncToValidBPP double dml32_RequiredDTBCLK( @@ -1975,8 +1973,8 @@ void dml32_CalculateVMRowAndSwath( unsigned int PTEBufferSizeInRequestsForChroma[DC__NUM_DPP__MAX]; unsigned int PDEAndMetaPTEBytesFrameY; unsigned int PDEAndMetaPTEBytesFrameC; - unsigned int MetaRowByteY[DC__NUM_DPP__MAX]; - unsigned int MetaRowByteC[DC__NUM_DPP__MAX]; + unsigned int MetaRowByteY[DC__NUM_DPP__MAX] = {0}; + unsigned int MetaRowByteC[DC__NUM_DPP__MAX] = {0}; unsigned int PixelPTEBytesPerRowY[DC__NUM_DPP__MAX]; unsigned int PixelPTEBytesPerRowC[DC__NUM_DPP__MAX]; unsigned int PixelPTEBytesPerRowY_one_row_per_frame[DC__NUM_DPP__MAX]; @@ -3423,6 +3421,7 @@ bool dml32_CalculatePrefetchSchedule( unsigned int SwathHeightC, double TWait, double TPreReq, + bool ExtendPrefetchIfPossible, /* Output */ double *DSTXAfterScaler, double *DSTYAfterScaler, @@ -3892,12 +3891,32 @@ bool dml32_CalculatePrefetchSchedule( /* Clamp to oto for bandwidth calculation */ LinesForPrefetchBandwidth = dst_y_prefetch_oto; } else { - *DestinationLinesForPrefetch = dst_y_prefetch_equ; - TimeForFetchingMetaPTE = Tvm_equ; - TimeForFetchingRowInVBlank = Tr0_equ; - *PrefetchBandwidth = prefetch_bw_equ; - /* Clamp to equ for bandwidth calculation */ - LinesForPrefetchBandwidth = dst_y_prefetch_equ; + /* For mode programming we want to extend the prefetch as much as possible + * (up to oto, or as long as we can for equ) if we're not already applying + * the 60us prefetch requirement. This is to avoid intermittent underflow + * issues during prefetch. + * + * The prefetch extension is applied under the following scenarios: + * 1. We're in prefetch mode > 0 (i.e. we don't support MCLK switch in blank) + * 2. We're using subvp or drr methods of p-state switch, in which case we + * we don't care if prefetch takes up more of the blanking time + * + * Mode programming typically chooses the smallest prefetch time possible + * (i.e. highest bandwidth during prefetch) presumably to create margin between + * p-states / c-states that happen in vblank and prefetch. Therefore we only + * apply this prefetch extension when p-state in vblank is not required (UCLK + * p-states take up the most vblank time). + */ + if (ExtendPrefetchIfPossible && TPreReq == 0 && VStartup < MaxVStartup) { + MyError = true; + } else { + *DestinationLinesForPrefetch = dst_y_prefetch_equ; + TimeForFetchingMetaPTE = Tvm_equ; + TimeForFetchingRowInVBlank = Tr0_equ; + *PrefetchBandwidth = prefetch_bw_equ; + /* Clamp to equ for bandwidth calculation */ + LinesForPrefetchBandwidth = dst_y_prefetch_equ; + } } *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0; @@ -4270,7 +4289,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( unsigned int i, j, k; unsigned int SurfaceWithMinActiveFCLKChangeMargin = 0; unsigned int DRAMClockChangeSupportNumber = 0; - unsigned int LastSurfaceWithoutMargin; + unsigned int LastSurfaceWithoutMargin = 0; unsigned int DRAMClockChangeMethod = 0; bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false; double MinActiveFCLKChangeMargin = 0.; @@ -5635,9 +5654,9 @@ void dml32_CalculateStutterEfficiency( double LastZ8StutterPeriod = 0.0; double LastStutterPeriod = 0.0; unsigned int TotalNumberOfActiveOTG = 0; - double doublePixelClock; - unsigned int doubleHTotal; - unsigned int doubleVTotal; + double doublePixelClock = 0; + unsigned int doubleHTotal = 0; + unsigned int doubleVTotal = 0; bool SameTiming = true; double DETBufferingTimeY; double SwathWidthYCriticalSurface = 0.0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h index 592d174df6c6..5d34735df83d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h @@ -747,6 +747,7 @@ bool dml32_CalculatePrefetchSchedule( unsigned int SwathHeightC, double TWait, double TPreReq, + bool ExtendPrefetchIfPossible, /* Output */ double *DSTXAfterScaler, double *DSTYAfterScaler, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index a5fe523668e9..60f251cf973b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -124,7 +124,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = { .phyclk_mhz = 600.0, .phyclk_d18_mhz = 667.0, .dscclk_mhz = 186.0, - .dtbclk_mhz = 625.0, + .dtbclk_mhz = 600.0, }, { .state = 1, @@ -133,7 +133,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = { .phyclk_mhz = 810.0, .phyclk_d18_mhz = 667.0, .dscclk_mhz = 209.0, - .dtbclk_mhz = 625.0, + .dtbclk_mhz = 600.0, }, { .state = 2, @@ -142,7 +142,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = { .phyclk_mhz = 810.0, .phyclk_d18_mhz = 667.0, .dscclk_mhz = 209.0, - .dtbclk_mhz = 625.0, + .dtbclk_mhz = 600.0, }, { .state = 3, @@ -151,7 +151,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = { .phyclk_mhz = 810.0, .phyclk_d18_mhz = 667.0, .dscclk_mhz = 371.0, - .dtbclk_mhz = 625.0, + .dtbclk_mhz = 600.0, }, { .state = 4, @@ -160,15 +160,15 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = { .phyclk_mhz = 810.0, .phyclk_d18_mhz = 667.0, .dscclk_mhz = 417.0, - .dtbclk_mhz = 625.0, + .dtbclk_mhz = 600.0, }, }, .num_states = 5, - .sr_exit_time_us = 9.0, - .sr_enter_plus_exit_time_us = 11.0, - .sr_exit_z8_time_us = 50.0, /*changed from 442.0*/ - .sr_enter_plus_exit_z8_time_us = 50.0,/*changed from 560.0*/ - .fclk_change_latency_us = 20.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, + .sr_exit_z8_time_us = 250.0, + .sr_enter_plus_exit_z8_time_us = 350.0, + .fclk_change_latency_us = 24.0, .usr_retraining_latency_us = 2, .writeback_latency_us = 12.0, @@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = { .dcn_downspread_percent = 0.5, .gpuvm_min_page_size_bytes = 4096, .hostvm_min_page_size_bytes = 4096, - .do_urgent_latency_adjustment = 0, + .do_urgent_latency_adjustment = 1, .urgent_latency_adjustment_fabric_clock_component_us = 0, - .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000, }; void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr) @@ -326,9 +326,74 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc, dcn3_5_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000.0; } + + if (dc->bb_overrides.dram_clock_change_latency_ns > 0) + dcn3_5_soc.dram_clock_change_latency_us = + dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + + if (dc->bb_overrides.sr_exit_time_ns > 0) + dcn3_5_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; + + if (dc->bb_overrides.sr_enter_plus_exit_time_ns > 0) + dcn3_5_soc.sr_enter_plus_exit_time_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + + if (dc->bb_overrides.sr_exit_z8_time_ns > 0) + dcn3_5_soc.sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0; + + if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns > 0) + dcn3_5_soc.sr_enter_plus_exit_z8_time_us = + dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0; + /*temp till dml2 fully work without dml1*/ dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip, DML_PROJECT_DCN31); + + /*copy to dml2, before dml2_create*/ + if (clk_table->num_entries > 2) { + + for (i = 0; i < clk_table->num_entries; i++) { + dc->dml2_options.bbox_overrides.clks_table.num_states = + clk_table->num_entries; + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz = + clock_limits[i].dcfclk_mhz; + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz = + clock_limits[i].fabricclk_mhz; + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz = + clock_limits[i].dispclk_mhz; + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz = + clock_limits[i].dppclk_mhz; + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz = + clock_limits[i].socclk_mhz; + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz = + clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio; + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz = + clock_limits[i].dtbclk_mhz; + dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels = + clk_table->num_entries; + dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels = + clk_table->num_entries; + dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels = + clk_table->num_entries; + dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels = + clk_table->num_entries; + dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels = + clk_table->num_entries; + dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels = + clk_table->num_entries; + dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels = + clk_table->num_entries; + } + } + + /* Update latency values */ + dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = dcn3_5_soc.dram_clock_change_latency_us; + + dc->dml2_options.bbox_overrides.sr_exit_latency_us = dcn3_5_soc.sr_exit_time_us; + dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = dcn3_5_soc.sr_enter_plus_exit_time_us; + + dc->dml2_options.bbox_overrides.sr_exit_z8_time_us = dcn3_5_soc.sr_exit_z8_time_us; + dc->dml2_options.bbox_overrides.sr_enter_plus_exit_z8_time_us = dcn3_5_soc.sr_enter_plus_exit_z8_time_us; } static bool is_dual_plane(enum surface_pixel_format format) @@ -374,7 +439,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = 0; bool upscaled = false; const unsigned int max_allowed_vblank_nom = 1023; @@ -507,3 +572,45 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, return pipe_cnt; } + +void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context) +{ + enum dcn_zstate_support_state support = DCN_ZSTATE_SUPPORT_DISALLOW; + unsigned int i, plane_count = 0; + DC_LOGGER_INIT(dc->ctx->logger); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].plane_state) + plane_count++; + } + + if (context->stream_count == 0 || plane_count == 0) { + support = DCN_ZSTATE_SUPPORT_ALLOW; + } else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) { + struct dc_link *link = context->streams[0]->sink->link; + bool is_pwrseq0 = link && link->link_index == 0; + bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 || + link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr); + bool is_replay = link && link->replay_settings.replay_feature_enabled; + int minmum_z8_residency = + dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000; + bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency; + int minmum_z10_residency = + dc->debug.minimum_z10_residency_time > 0 ? dc->debug.minimum_z10_residency_time : 5000; + bool allow_z10 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z10_residency; + + /*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/ + if (is_pwrseq0 && allow_z10) + support = DCN_ZSTATE_SUPPORT_ALLOW; + else if (is_pwrseq0 && (is_psr || is_replay)) + support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY; + else if (allow_z8) + support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY; + + } + + DC_LOG_SMU("zstate_support: %d, StutterPeriod: %d\n", support, + (int)context->bw_ctx.dml.vba.StutterPeriod); + + context->bw_ctx.bw.dcn.clk.zstate_support = support; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h index e8d5a170893e..067480fc3691 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h @@ -39,4 +39,6 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, display_e2e_pipe_params_st *pipes, bool fast_validate); +void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c new file mode 100644 index 000000000000..e4f333d4fb54 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -0,0 +1,638 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright 2024 Advanced Micro Devices, Inc. */ +#include "resource.h" +#include "dcn351_fpu.h" +#include "dcn31/dcn31_resource.h" +#include "dcn32/dcn32_resource.h" +#include "dcn35/dcn35_resource.h" +#include "dcn351/dcn351_resource.h" +#include "dml/dcn31/dcn31_fpu.h" +#include "dml/dcn35/dcn35_fpu.h" +#include "dml/dml_inline_defs.h" + +#include "link.h" + +#define DC_LOGGER_INIT(logger) + +struct _vcs_dpi_ip_params_st dcn3_51_ip = { + .VBlankNomDefaultUS = 668, + .gpuvm_enable = 1, + .gpuvm_max_page_table_levels = 1, + .hostvm_enable = 1, + .hostvm_max_page_table_levels = 2, + .rob_buffer_size_kbytes = 64, + .det_buffer_size_kbytes = 1536, + .config_return_buffer_size_in_kbytes = 1792, + .compressed_buffer_segment_size_in_kbytes = 64, + .meta_fifo_size_in_kentries = 32, + .zero_size_buffer_entries = 512, + .compbuf_reserved_space_64b = 256, + .compbuf_reserved_space_zs = 64, + .dpp_output_buffer_pixels = 2560,/*not used*/ + .opp_output_buffer_lines = 1,/*not used*/ + .pixel_chunk_size_kbytes = 8, + //.alpha_pixel_chunk_size_kbytes = 4;/*new*/ + //.min_pixel_chunk_size_bytes = 1024;/*new*/ + .meta_chunk_size_kbytes = 2, + .min_meta_chunk_size_bytes = 256, + .writeback_chunk_size_kbytes = 8, + .ptoi_supported = false, + .num_dsc = 4, + .maximum_dsc_bits_per_component = 12,/*delta from 10*/ + .dsc422_native_support = true,/*delta from false*/ + .is_line_buffer_bpp_fixed = true,/*new*/ + .line_buffer_fixed_bpp = 32,/*delta from 48*/ + .line_buffer_size_bits = 986880,/*delta from 789504*/ + .max_line_buffer_lines = 32,/*delta from 12*/ + .writeback_interface_buffer_size_kbytes = 90, + .max_num_dpp = 4, + .max_num_otg = 4, + .max_num_hdmi_frl_outputs = 1, + .max_num_wb = 1, + /*.max_num_hdmi_frl_outputs = 1; new in dml2*/ + /*.max_num_dp2p0_outputs = 2; new in dml2*/ + /*.max_num_dp2p0_streams = 4; new in dml2*/ + .max_dchub_pscl_bw_pix_per_clk = 4, + .max_pscl_lb_bw_pix_per_clk = 2, + .max_lb_vscl_bw_pix_per_clk = 4, + .max_vscl_hscl_bw_pix_per_clk = 4, + .max_hscl_ratio = 6, + .max_vscl_ratio = 6, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .dpte_buffer_size_in_pte_reqs_luma = 68,/*changed from 64,*/ + .dpte_buffer_size_in_pte_reqs_chroma = 36,/*changed from 34*/ + /*.dcc_meta_buffer_size_bytes = 6272; new to dml2*/ + .dispclk_ramp_margin_percent = 1.11,/*delta from 1*/ + /*.dppclk_delay_subtotal = 47; + .dppclk_delay_scl = 50; + .dppclk_delay_scl_lb_only = 16; + .dppclk_delay_cnvc_formatter = 28; + .dppclk_delay_cnvc_cursor = 6; + .dispclk_delay_subtotal = 125;*/ /*new to dml2*/ + .max_inter_dcn_tile_repeaters = 8, + .cursor_buffer_size = 16, + .cursor_chunk_size = 2, + .writeback_line_buffer_buffer_size = 0, + .writeback_min_hscl_ratio = 1, + .writeback_min_vscl_ratio = 1, + .writeback_max_hscl_ratio = 1, + .writeback_max_vscl_ratio = 1, + .writeback_max_hscl_taps = 1, + .writeback_max_vscl_taps = 1, + .dppclk_delay_subtotal = 47, /* changed from 46,*/ + .dppclk_delay_scl = 50, + .dppclk_delay_scl_lb_only = 16, + .dppclk_delay_cnvc_formatter = 28,/*changed from 27,*/ + .dppclk_delay_cnvc_cursor = 6, + .dispclk_delay_subtotal = 125, /*changed from 119,*/ + .dynamic_metadata_vm_enabled = false, + .odm_combine_4to1_supported = false, + .dcc_supported = true, +// .config_return_buffer_segment_size_in_kbytes = 64;/*required, hard coded in dml2_translate_ip_params*/ + +}; + +struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = { + /*TODO: correct dispclk/dppclk voltage level determination*/ + .clock_limits = { + { + .state = 0, + .dcfclk_mhz = 400.0, + .fabricclk_mhz = 400.0, + .socclk_mhz = 600.0, + .dram_speed_mts = 3200.0, + .dispclk_mhz = 600.0, + .dppclk_mhz = 600.0, + .phyclk_mhz = 600.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 200.0, + .dtbclk_mhz = 600.0, + }, + { + .state = 1, + .dcfclk_mhz = 600.0, + .fabricclk_mhz = 1000.0, + .socclk_mhz = 733.0, + .dram_speed_mts = 6400.0, + .dispclk_mhz = 800.0, + .dppclk_mhz = 800.0, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 266.7, + .dtbclk_mhz = 600.0, + }, + { + .state = 2, + .dcfclk_mhz = 738.0, + .fabricclk_mhz = 1200.0, + .socclk_mhz = 880.0, + .dram_speed_mts = 7500.0, + .dispclk_mhz = 800.0, + .dppclk_mhz = 800.0, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 266.7, + .dtbclk_mhz = 600.0, + }, + { + .state = 3, + .dcfclk_mhz = 800.0, + .fabricclk_mhz = 1400.0, + .socclk_mhz = 978.0, + .dram_speed_mts = 7500.0, + .dispclk_mhz = 960.0, + .dppclk_mhz = 960.0, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 320.0, + .dtbclk_mhz = 600.0, + }, + { + .state = 4, + .dcfclk_mhz = 873.0, + .fabricclk_mhz = 1600.0, + .socclk_mhz = 1100.0, + .dram_speed_mts = 8533.0, + .dispclk_mhz = 1066.7, + .dppclk_mhz = 1066.7, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 355.6, + .dtbclk_mhz = 600.0, + }, + { + .state = 5, + .dcfclk_mhz = 960.0, + .fabricclk_mhz = 1700.0, + .socclk_mhz = 1257.0, + .dram_speed_mts = 8533.0, + .dispclk_mhz = 1200.0, + .dppclk_mhz = 1200.0, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 400.0, + .dtbclk_mhz = 600.0, + }, + { + .state = 6, + .dcfclk_mhz = 1067.0, + .fabricclk_mhz = 1850.0, + .socclk_mhz = 1257.0, + .dram_speed_mts = 8533.0, + .dispclk_mhz = 1371.4, + .dppclk_mhz = 1371.4, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 457.1, + .dtbclk_mhz = 600.0, + }, + { + .state = 7, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 2000.0, + .socclk_mhz = 1467.0, + .dram_speed_mts = 8533.0, + .dispclk_mhz = 1600.0, + .dppclk_mhz = 1600.0, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 533.3, + .dtbclk_mhz = 600.0, + }, + }, + .num_states = 8, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, + .sr_exit_z8_time_us = 250.0, + .sr_enter_plus_exit_z8_time_us = 350.0, + .fclk_change_latency_us = 24.0, + .usr_retraining_latency_us = 2, + .writeback_latency_us = 12.0, + + .dram_channel_width_bytes = 4,/*not exist in dml2*/ + .round_trip_ping_latency_dcfclk_cycles = 106,/*not exist in dml2*/ + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .dram_clock_change_latency_us = 11.72, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + + .pct_ideal_sdp_bw_after_urgent = 80.0, + .pct_ideal_fabric_bw_after_urgent = 80.0, /*new to dml2*/ + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0, + .max_avg_sdp_bw_use_normal_percent = 60.0, + .max_avg_dram_bw_use_normal_percent = 60.0, + .fabric_datapath_to_dcn_data_return_bytes = 32, + .return_bus_width_bytes = 64, + .downspread_percent = 0.38, + .dcn_downspread_percent = 0.5, + .gpuvm_min_page_size_bytes = 4096, + .hostvm_min_page_size_bytes = 4096, + .do_urgent_latency_adjustment = 0, + .urgent_latency_adjustment_fabric_clock_component_us = 0, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, + .num_chans = 4, + .dispclk_dppclk_vco_speed_mhz = 2400.0, +}; + +/* + * dcn351_update_bw_bounding_box + * + * This would override some dcn3_51 ip_or_soc initial parameters hardcoded from + * spreadsheet with actual values as per dGPU SKU: + * - with passed few options from dc->config + * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might + * need to get it from PM FW) + * - with passed latency values (passed in ns units) in dc-> bb override for + * debugging purposes + * - with passed latencies from VBIOS (in 100_ns units) if available for + * certain dGPU SKU + * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU + * of the same ASIC) + * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM + * FW for different clocks (which might differ for certain dGPU SKU of the + * same ASIC) + */ +void dcn351_update_bw_bounding_box_fpu(struct dc *dc, + struct clk_bw_params *bw_params) +{ + unsigned int i, closest_clk_lvl; + int j; + struct clk_limit_table *clk_table = &bw_params->clk_table; + struct _vcs_dpi_voltage_scaling_st *clock_limits = + dc->scratch.update_bw_bounding_box.clock_limits; + int max_dispclk_mhz = 0, max_dppclk_mhz = 0; + + dc_assert_fp_enabled(); + + dcn3_51_ip.max_num_otg = + dc->res_pool->res_cap->num_timing_generator; + dcn3_51_ip.max_num_dpp = dc->res_pool->pipe_count; + dcn3_51_soc.num_chans = bw_params->num_channels; + + ASSERT(clk_table->num_entries); + + /* Prepass to find max clocks independent of voltage level. */ + for (i = 0; i < clk_table->num_entries; ++i) { + if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz) + max_dispclk_mhz = clk_table->entries[i].dispclk_mhz; + if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz) + max_dppclk_mhz = clk_table->entries[i].dppclk_mhz; + } + + for (i = 0; i < clk_table->num_entries; i++) { + /* loop backwards*/ + for (closest_clk_lvl = 0, j = dcn3_51_soc.num_states - 1; + j >= 0; j--) { + if (dcn3_51_soc.clock_limits[j].dcfclk_mhz <= + clk_table->entries[i].dcfclk_mhz) { + closest_clk_lvl = j; + break; + } + } + if (clk_table->num_entries == 1) { + /*smu gives one DPM level, let's take the highest one*/ + closest_clk_lvl = dcn3_51_soc.num_states - 1; + } + + clock_limits[i].state = i; + + /* Clocks dependent on voltage level. */ + clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; + if (clk_table->num_entries == 1 && + clock_limits[i].dcfclk_mhz < + dcn3_51_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) { + /*SMU fix not released yet*/ + clock_limits[i].dcfclk_mhz = + dcn3_51_soc.clock_limits[closest_clk_lvl].dcfclk_mhz; + } + + clock_limits[i].fabricclk_mhz = + clk_table->entries[i].fclk_mhz; + clock_limits[i].socclk_mhz = + clk_table->entries[i].socclk_mhz; + + if (clk_table->entries[i].memclk_mhz && + clk_table->entries[i].wck_ratio) + clock_limits[i].dram_speed_mts = + clk_table->entries[i].memclk_mhz * 2 * + clk_table->entries[i].wck_ratio; + + /* Clocks independent of voltage level. */ + clock_limits[i].dispclk_mhz = max_dispclk_mhz ? + max_dispclk_mhz : + dcn3_51_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + + clock_limits[i].dppclk_mhz = max_dppclk_mhz ? + max_dppclk_mhz : + dcn3_51_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + + clock_limits[i].dram_bw_per_chan_gbps = + dcn3_51_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; + clock_limits[i].dscclk_mhz = + dcn3_51_soc.clock_limits[closest_clk_lvl].dscclk_mhz; + clock_limits[i].dtbclk_mhz = + dcn3_51_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; + clock_limits[i].phyclk_d18_mhz = + dcn3_51_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; + clock_limits[i].phyclk_mhz = + dcn3_51_soc.clock_limits[closest_clk_lvl].phyclk_mhz; + } + + memcpy(dcn3_51_soc.clock_limits, clock_limits, + sizeof(dcn3_51_soc.clock_limits)); + + if (clk_table->num_entries) + dcn3_51_soc.num_states = clk_table->num_entries; + + if (max_dispclk_mhz) { + dcn3_51_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2; + dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2; + } + if ((int)(dcn3_51_soc.dram_clock_change_latency_us * 1000) + != dc->debug.dram_clock_change_latency_ns + && dc->debug.dram_clock_change_latency_ns) { + dcn3_51_soc.dram_clock_change_latency_us = + dc->debug.dram_clock_change_latency_ns / 1000.0; + } + + if (dc->bb_overrides.dram_clock_change_latency_ns > 0) + dcn3_51_soc.dram_clock_change_latency_us = + dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + + if (dc->bb_overrides.sr_exit_time_ns > 0) + dcn3_51_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; + + if (dc->bb_overrides.sr_enter_plus_exit_time_ns > 0) + dcn3_51_soc.sr_enter_plus_exit_time_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + + if (dc->bb_overrides.sr_exit_z8_time_ns > 0) + dcn3_51_soc.sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0; + + if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns > 0) + dcn3_51_soc.sr_enter_plus_exit_z8_time_us = + dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0; + + /*temp till dml2 fully work without dml1*/ + dml_init_instance(&dc->dml, &dcn3_51_soc, &dcn3_51_ip, + DML_PROJECT_DCN31); + + /*copy to dml2, before dml2_create*/ + if (clk_table->num_entries > 2) { + + for (i = 0; i < clk_table->num_entries; i++) { + dc->dml2_options.bbox_overrides.clks_table.num_states = + clk_table->num_entries; + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz = + clock_limits[i].dcfclk_mhz; + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz = + clock_limits[i].fabricclk_mhz; + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz = + clock_limits[i].dispclk_mhz; + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz = + clock_limits[i].dppclk_mhz; + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz = + clock_limits[i].socclk_mhz; + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz = + clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio; + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz = + clock_limits[i].dtbclk_mhz; + dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels = + clk_table->num_entries; + dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels = + clk_table->num_entries; + dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels = + clk_table->num_entries; + dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels = + clk_table->num_entries; + dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels = + clk_table->num_entries; + dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels = + clk_table->num_entries; + dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels = + clk_table->num_entries; + } + } + + /* Update latency values */ + dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = dcn3_51_soc.dram_clock_change_latency_us; + + dc->dml2_options.bbox_overrides.sr_exit_latency_us = dcn3_51_soc.sr_exit_time_us; + dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = dcn3_51_soc.sr_enter_plus_exit_time_us; + + dc->dml2_options.bbox_overrides.sr_exit_z8_time_us = dcn3_51_soc.sr_exit_z8_time_us; + dc->dml2_options.bbox_overrides.sr_enter_plus_exit_z8_time_us = dcn3_51_soc.sr_enter_plus_exit_z8_time_us; +} + +static bool is_dual_plane(enum surface_pixel_format format) +{ + return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || + format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; +} + +/* + * micro_sec_to_vert_lines () - converts time to number of vertical lines for a given timing + * + * @param: num_us: number of microseconds + * @return: number of vertical lines. If exact number of vertical lines is not found then + * it will round up to next number of lines to guarantee num_us + */ +static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_timing *timing) +{ + unsigned int num_lines = 0; + unsigned int lines_time_in_ns = 1000.0 * + (((float)timing->h_total * 1000.0) / + ((float)timing->pix_clk_100hz / 10.0)); + + num_lines = dml_ceil(1000.0 * num_us / lines_time_in_ns, 1.0); + + return num_lines; +} + +static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing) +{ + unsigned int v_active = 0, v_blank = 0, v_back_porch = 0; + + v_active = timing->v_border_top + timing->v_addressable + timing->v_border_bottom; + v_blank = timing->v_total - v_active; + v_back_porch = v_blank - timing->v_front_porch - timing->v_sync_width; + + return v_back_porch; +} + +int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate) +{ + int i, pipe_cnt; + struct resource_context *res_ctx = &context->res_ctx; + struct pipe_ctx *pipe = 0; + bool upscaled = false; + const unsigned int max_allowed_vblank_nom = 1023; + + dcn31_populate_dml_pipes_from_context(dc, context, pipes, + fast_validate); + + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + struct dc_crtc_timing *timing; + unsigned int num_lines = 0; + unsigned int v_back_porch = 0; + + if (!res_ctx->pipe_ctx[i].stream) + continue; + + pipe = &res_ctx->pipe_ctx[i]; + timing = &pipe->stream->timing; + + num_lines = micro_sec_to_vert_lines(dcn3_51_ip.VBlankNomDefaultUS, timing); + v_back_porch = get_vertical_back_porch(timing); + + if (pipe->stream->adjust.v_total_max == + pipe->stream->adjust.v_total_min && + pipe->stream->adjust.v_total_min > timing->v_total) { + pipes[pipe_cnt].pipe.dest.vtotal = + pipe->stream->adjust.v_total_min; + pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - + pipes[pipe_cnt].pipe.dest.vactive; + } + + pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive; + pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines); + // vblank_nom should not smaller than (VSync (timing->v_sync_width + v_back_porch) + 2) + // + 2 is because + // 1 -> VStartup_start should be 1 line before VSync + // 1 -> always reserve 1 line between start of vblank to vstartup signal + pipes[pipe_cnt].pipe.dest.vblank_nom = + max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width + v_back_porch + 2); + pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom); + + if (pipe->plane_state && + (pipe->plane_state->src_rect.height < + pipe->plane_state->dst_rect.height || + pipe->plane_state->src_rect.width < + pipe->plane_state->dst_rect.width)) + upscaled = true; + + /* + * Immediate flip can be set dynamically after enabling the + * plane. We need to require support for immediate flip or + * underflow can be intermittently experienced depending on peak + * b/w requirements. + */ + pipes[pipe_cnt].pipe.src.immediate_flip = true; + + pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; + + DC_FP_START(); + dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); + DC_FP_END(); + + pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; + pipes[pipe_cnt].pipe.src.dcc_rate = 3; + pipes[pipe_cnt].dout.dsc_input_bpc = 0; + pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; + + if (pipes[pipe_cnt].dout.dsc_enable) { + switch (timing->display_color_depth) { + case COLOR_DEPTH_888: + pipes[pipe_cnt].dout.dsc_input_bpc = 8; + break; + case COLOR_DEPTH_101010: + pipes[pipe_cnt].dout.dsc_input_bpc = 10; + break; + case COLOR_DEPTH_121212: + pipes[pipe_cnt].dout.dsc_input_bpc = 12; + break; + default: + ASSERT(0); + break; + } + } + + pipe_cnt++; + } + + context->bw_ctx.dml.ip.det_buffer_size_kbytes = 384;/*per guide*/ + dc->config.enable_4to1MPC = false; + + if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { + if (is_dual_plane(pipe->plane_state->format) + && pipe->plane_state->src_rect.width <= 1920 && + pipe->plane_state->src_rect.height <= 1080) { + dc->config.enable_4to1MPC = true; + } else if (!is_dual_plane(pipe->plane_state->format) && + pipe->plane_state->src_rect.width <= 5120) { + /* + * Limit to 5k max to avoid forced pipe split when there + * is not enough detile for swath + */ + context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; + pipes[0].pipe.src.unbounded_req_mode = true; + } + } else if (context->stream_count >= + dc->debug.crb_alloc_policy_min_disp_count && + dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) { + context->bw_ctx.dml.ip.det_buffer_size_kbytes = + dc->debug.crb_alloc_policy * 64; + } else if (context->stream_count >= 3 && upscaled) { + context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; + } + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (!pipe->stream) + continue; + + if (pipe->stream->signal == SIGNAL_TYPE_EDP && + dc->debug.seamless_boot_odm_combine && + pipe->stream->apply_seamless_boot_optimization) { + + if (pipe->stream->apply_boot_odm_mode == + dm_odm_combine_policy_2to1) { + context->bw_ctx.dml.vba.ODMCombinePolicy = + dm_odm_combine_policy_2to1; + break; + } + } + } + + return pipe_cnt; +} + +void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context) +{ + enum dcn_zstate_support_state support = DCN_ZSTATE_SUPPORT_DISALLOW; + unsigned int i, plane_count = 0; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].plane_state) + plane_count++; + } + + /*dcn351 does not support z9/z10*/ + if (context->stream_count == 0 || plane_count == 0) { + support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY; + } else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) { + struct dc_link *link = context->streams[0]->sink->link; + bool is_pwrseq0 = link && link->link_index == 0; + bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 || + link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr); + bool is_replay = link && link->replay_settings.replay_feature_enabled; + int minmum_z8_residency = + dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000; + bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency; + + /*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/ + if (is_pwrseq0 && (is_psr || is_replay)) + support = allow_z8 ? allow_z8 : DCN_ZSTATE_SUPPORT_DISALLOW; + } + context->bw_ctx.bw.dcn.clk.zstate_support = support; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h new file mode 100644 index 000000000000..f93efab9a668 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright 2024 Advanced Micro Devices, Inc. */ + +#ifndef __DCN351_FPU_H__ +#define __DCN351_FPU_H__ + +#include "clk_mgr.h" + +void dcn351_update_bw_bounding_box_fpu(struct dc *dc, + struct clk_bw_params *bw_params); + +int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate); + +void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile index 70ae5eba624e..1c9498a72520 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile @@ -60,9 +60,14 @@ endif endif ifneq ($(CONFIG_FRAME_WARN),0) +ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y) +frame_warn_flag := -Wframe-larger-than=3072 +else frame_warn_flag := -Wframe-larger-than=2048 endif +endif +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2 CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c index 510be909cd75..3e919f5c00ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c @@ -31,6 +31,8 @@ #include "dml_assert.h" #define DML2_MAX_FMT_420_BUFFER_WIDTH 4096 +#define TB_BORROWED_MAX 400 + // --------------------------- // Declaration Begins // --------------------------- @@ -2782,6 +2784,8 @@ static dml_float_t TruncToValidBPP( } } + *RequiredSlots = (dml_uint_t)(dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1)); + if (DesiredBPP == 0) { if (DSCEnable) { if (MaxLinkBPP < MinDSCBPP) { @@ -2810,10 +2814,6 @@ static dml_float_t TruncToValidBPP( return DesiredBPP; } } - - *RequiredSlots = (dml_uint_t)(dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1)); - - return __DML_DPP_INVALID__; } // TruncToValidBPP static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( @@ -3790,9 +3790,9 @@ static void CalculateStutterEfficiency(struct display_mode_lib_scratch_st *scrat dml_bool_t FoundCriticalSurface = false; dml_uint_t TotalNumberOfActiveOTG = 0; - dml_float_t SinglePixelClock; - dml_uint_t SingleHTotal; - dml_uint_t SingleVTotal; + dml_float_t SinglePixelClock = 0; + dml_uint_t SingleHTotal = 0; + dml_uint_t SingleVTotal = 0; dml_bool_t SameTiming = true; dml_float_t LastStutterPeriod = 0.0; @@ -5420,7 +5420,7 @@ static void CalculateOutputLink( *OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (dml_uint_t)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots); - if (OutBpp == 0 && PHYCLKD32PerState < 20000 / 32 && DSCEnable == dml_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) { + if (*OutBpp == 0 && PHYCLKD32PerState < 20000 / 32 && DSCEnable == dml_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) { *RequiresDSC = true; LinkDSCEnable = true; *OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output, @@ -6229,7 +6229,7 @@ static void set_calculate_prefetch_schedule_params(struct display_mode_lib_st *m CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable; CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable; CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels; - CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes; + CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024; CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k]; CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled; CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k]; @@ -6329,7 +6329,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib) mode_lib->ms.NoOfDPPThisState, mode_lib->ms.dpte_group_bytes, s->HostVMInefficiencyFactor, - mode_lib->ms.soc.hostvm_min_page_size_kbytes, + mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024, mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels); s->NextMaxVStartup = s->MaxVStartupAllPlanes[j]; @@ -6542,7 +6542,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib) mode_lib->ms.cache_display_cfg.plane.HostVMEnable, mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels, mode_lib->ms.cache_display_cfg.plane.GPUVMEnable, - mode_lib->ms.soc.hostvm_min_page_size_kbytes, + mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024, mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k], mode_lib->ms.MetaRowBytes[j][k], mode_lib->ms.DPTEBytesPerRow[j][k], @@ -7687,7 +7687,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib) CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels; CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels; CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes; - CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes; + CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024; CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn; CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode; CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceededPerState; @@ -7957,7 +7957,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib) UseMinimumDCFCLK_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels; UseMinimumDCFCLK_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable; UseMinimumDCFCLK_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes; - UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes; + UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024; UseMinimumDCFCLK_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels; UseMinimumDCFCLK_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled; UseMinimumDCFCLK_params->ImmediateFlipRequirement = s->ImmediateFlipRequiredFinal; @@ -8699,7 +8699,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels; CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels; CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes; - CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes; + CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024; CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn; CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode; CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = s->dummy_boolean_array[0]; @@ -8805,7 +8805,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc mode_lib->ms.cache_display_cfg.hw.DPPPerSurface, locals->dpte_group_bytes, s->HostVMInefficiencyFactor, - mode_lib->ms.soc.hostvm_min_page_size_kbytes, + mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024, mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels); locals->TCalc = 24.0 / locals->DCFCLKDeepSleep; @@ -8995,7 +8995,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable; CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable; CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels; - CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes; + CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024; CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k]; CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled; CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k]; @@ -9240,7 +9240,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc mode_lib->ms.cache_display_cfg.plane.HostVMEnable, mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels, mode_lib->ms.cache_display_cfg.plane.GPUVMEnable, - mode_lib->ms.soc.hostvm_min_page_size_kbytes, + mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024, locals->PDEAndMetaPTEBytesFrame[k], locals->MetaRowByte[k], locals->PixelPTEBytesPerRow[k], @@ -9446,13 +9446,13 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc CalculateWatermarks_params->CompressedBufferSizeInkByte = locals->CompressedBufferSizeInkByte; // Output - CalculateWatermarks_params->Watermark = &s->dummy_watermark; // Watermarks *Watermark - CalculateWatermarks_params->DRAMClockChangeSupport = &mode_lib->ms.support.DRAMClockChangeSupport[j]; - CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0][0]; // dml_float_t *MaxActiveDRAMClockChangeLatencySupported[] - CalculateWatermarks_params->SubViewportLinesNeededInMALL = &mode_lib->ms.SubViewportLinesNeededInMALL[j]; // dml_uint_t SubViewportLinesNeededInMALL[] - CalculateWatermarks_params->FCLKChangeSupport = &mode_lib->ms.support.FCLKChangeSupport[j]; - CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // dml_float_t *MaxActiveFCLKChangeLatencySupported - CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport[j]; + CalculateWatermarks_params->Watermark = &locals->Watermark; // Watermarks *Watermark + CalculateWatermarks_params->DRAMClockChangeSupport = &locals->DRAMClockChangeSupport; + CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = locals->MaxActiveDRAMClockChangeLatencySupported; // dml_float_t *MaxActiveDRAMClockChangeLatencySupported[] + CalculateWatermarks_params->SubViewportLinesNeededInMALL = locals->SubViewportLinesNeededInMALL; // dml_uint_t SubViewportLinesNeededInMALL[] + CalculateWatermarks_params->FCLKChangeSupport = &locals->FCLKChangeSupport; + CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &locals->MaxActiveFCLKChangeLatencySupported; // dml_float_t *MaxActiveFCLKChangeLatencySupported + CalculateWatermarks_params->USRRetrainingSupport = &locals->USRRetrainingSupport; CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( &mode_lib->scratch, @@ -9460,8 +9460,10 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc /* Copy the calculated watermarks to mp.Watermark as the getter functions are * implemented by the DML team to copy the calculated values from the mp.Watermark interface. + * &mode_lib->mp.Watermark and &locals->Watermark are the same address, memcpy may lead to + * unexpected behavior. memmove should be used. */ - memcpy(&mode_lib->mp.Watermark, CalculateWatermarks_params->Watermark, sizeof(struct Watermarks)); + memmove(&mode_lib->mp.Watermark, CalculateWatermarks_params->Watermark, sizeof(struct Watermarks)); for (k = 0; k < mode_lib->ms.num_active_planes; ++k) { if (mode_lib->ms.cache_display_cfg.writeback.WritebackEnable[k] == true) { @@ -10214,6 +10216,7 @@ dml_get_var_func(fraction_of_urgent_bandwidth_imm_flip, dml_float_t, mode_lib->m dml_get_var_func(urgent_latency, dml_float_t, mode_lib->mp.UrgentLatency); dml_get_var_func(clk_dcf_deepsleep, dml_float_t, mode_lib->mp.DCFCLKDeepSleep); dml_get_var_func(wm_writeback_dram_clock_change, dml_float_t, mode_lib->mp.Watermark.WritebackDRAMClockChangeWatermark); +dml_get_var_func(wm_writeback_urgent, dml_float_t, mode_lib->mp.Watermark.WritebackUrgentWatermark); dml_get_var_func(stutter_efficiency, dml_float_t, mode_lib->mp.StutterEfficiency); dml_get_var_func(stutter_efficiency_no_vblank, dml_float_t, mode_lib->mp.StutterEfficiencyNotIncludingVBlank); dml_get_var_func(stutter_efficiency_z8, dml_float_t, mode_lib->mp.Z8StutterEfficiency); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h index 8452485684f5..3116b88e99dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h @@ -94,6 +94,7 @@ dml_get_var_decl(wm_usr_retraining, dml_float_t); dml_get_var_decl(urgent_latency, dml_float_t); dml_get_var_decl(wm_writeback_dram_clock_change, dml_float_t); +dml_get_var_decl(wm_writeback_urgent, dml_float_t); dml_get_var_decl(stutter_efficiency_no_vblank, dml_float_t); dml_get_var_decl(stutter_efficiency, dml_float_t); dml_get_var_decl(stutter_efficiency_z8, dml_float_t); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h index de63364be01d..14d389525296 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h @@ -41,6 +41,7 @@ #define DCN_DML__VM_PRESENT__1 1 #define DCN_DML__HOST_VM_PRESENT 1 #define DCN_DML__HOST_VM_PRESENT__1 1 +#define DCN_DML__DWB 1 #include "dml_depedencies.h" @@ -59,6 +60,7 @@ #define __DML_NUM_PLANES__ DCN_DML__NUM_PLANE #define __DML_NUM_CURSORS__ DCN_DML__NUM_CURSOR #define __DML_DPP_INVALID__ 0 +#define __DML_NUM_DMB__ DCN_DML__DWB #define __DML_PIPE_NO_PLANE__ 99 #define __DML_MAX_STATE_ARRAY_SIZE__ DCN_DML__NUM_PWR_STATE diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c index d2046e770c50..ad2a6b4769fe 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c @@ -55,10 +55,11 @@ struct dc_pipe_mapping_scratch { struct dc_plane_pipe_pool pipe_pool; }; -static bool get_plane_id(const struct dc_state *state, const struct dc_plane_state *plane, - unsigned int stream_id, unsigned int *plane_id) +static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *state, const struct dc_plane_state *plane, + unsigned int stream_id, unsigned int plane_index, unsigned int *plane_id) { int i, j; + bool is_plane_duplicate = dml2->v20.scratch.plane_duplicate_exists; if (!plane_id) return false; @@ -66,7 +67,8 @@ static bool get_plane_id(const struct dc_state *state, const struct dc_plane_sta for (i = 0; i < state->stream_count; i++) { if (state->streams[i]->stream_id == stream_id) { for (j = 0; j < state->stream_status[i].plane_count; j++) { - if (state->stream_status[i].plane_states[j] == plane) { + if (state->stream_status[i].plane_states[j] == plane && + (!is_plane_duplicate || (is_plane_duplicate && (j == plane_index)))) { *plane_id = (i << 16) | j; return true; } @@ -86,7 +88,8 @@ static int find_disp_cfg_idx_by_plane_id(struct dml2_dml_to_dc_pipe_mapping *map return i; } - return -1; + ASSERT(false); + return __DML2_WRAPPER_MAX_STREAMS_PLANES__; } static int find_disp_cfg_idx_by_stream_id(struct dml2_dml_to_dc_pipe_mapping *mapping, unsigned int stream_id) @@ -98,7 +101,8 @@ static int find_disp_cfg_idx_by_stream_id(struct dml2_dml_to_dc_pipe_mapping *ma return i; } - return -1; + ASSERT(false); + return __DML2_WRAPPER_MAX_STREAMS_PLANES__; } // The master pipe of a stream is defined as the top pipe in odm slice 0 @@ -123,8 +127,9 @@ static struct pipe_ctx *find_master_pipe_of_plane(struct dml2_context *ctx, unsigned int plane_id_assigned_to_pipe; for (i = 0; i < ctx->config.dcn_pipe_count; i++) { - if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(state, state->res_ctx.pipe_ctx[i].plane_state, - state->res_ctx.pipe_ctx[i].stream->stream_id, &plane_id_assigned_to_pipe)) { + if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(ctx, state, state->res_ctx.pipe_ctx[i].plane_state, + state->res_ctx.pipe_ctx[i].stream->stream_id, + ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[state->res_ctx.pipe_ctx[i].pipe_idx], &plane_id_assigned_to_pipe)) { if (plane_id_assigned_to_pipe == plane_id) return &state->res_ctx.pipe_ctx[i]; } @@ -138,13 +143,33 @@ static unsigned int find_pipes_assigned_to_plane(struct dml2_context *ctx, { int i; unsigned int num_found = 0; - unsigned int plane_id_assigned_to_pipe; + unsigned int plane_id_assigned_to_pipe = -1; for (i = 0; i < ctx->config.dcn_pipe_count; i++) { - if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(state, state->res_ctx.pipe_ctx[i].plane_state, - state->res_ctx.pipe_ctx[i].stream->stream_id, &plane_id_assigned_to_pipe)) { - if (plane_id_assigned_to_pipe == plane_id) - pipes[num_found++] = i; + struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i]; + + if (!pipe->plane_state || !pipe->stream) + continue; + + get_plane_id(ctx, state, pipe->plane_state, pipe->stream->stream_id, + ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[pipe->pipe_idx], + &plane_id_assigned_to_pipe); + if (plane_id_assigned_to_pipe == plane_id && !pipe->prev_odm_pipe + && (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state)) { + while (pipe) { + struct pipe_ctx *mpc_pipe = pipe; + + while (mpc_pipe) { + pipes[num_found++] = mpc_pipe->pipe_idx; + mpc_pipe = mpc_pipe->bottom_pipe; + if (!mpc_pipe) + break; + if (mpc_pipe->plane_state != pipe->plane_state) + mpc_pipe = NULL; + } + pipe = pipe->next_odm_pipe; + } + break; } } @@ -562,8 +587,14 @@ static unsigned int find_pipes_assigned_to_stream(struct dml2_context *ctx, stru unsigned int num_found = 0; for (i = 0; i < ctx->config.dcn_pipe_count; i++) { - if (state->res_ctx.pipe_ctx[i].stream && state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id) { - pipes[num_found++] = i; + struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->stream->stream_id == stream_id && !pipe->top_pipe && !pipe->prev_odm_pipe) { + while (pipe) { + pipes[num_found++] = pipe->pipe_idx; + pipe = pipe->next_odm_pipe; + } + break; } } @@ -609,6 +640,7 @@ static struct pipe_ctx *assign_pipes_to_plane(struct dml2_context *ctx, struct d const struct dc_plane_state *plane, int odm_factor, int mpc_factor, + int plane_index, struct dc_plane_pipe_pool *pipe_pool, const struct dc_state *existing_state) { @@ -620,7 +652,7 @@ static struct pipe_ctx *assign_pipes_to_plane(struct dml2_context *ctx, struct d unsigned int next_pipe_to_assign; int odm_slice, mpc_slice; - if (!get_plane_id(state, plane, stream->stream_id, &plane_id)) { + if (!get_plane_id(ctx, state, plane, stream->stream_id, plane_index, &plane_id)) { ASSERT(false); return master_pipe; } @@ -667,12 +699,16 @@ static void free_pipe(struct pipe_ctx *pipe) } static void free_unused_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state, - const struct dc_plane_state *plane, const struct dc_plane_pipe_pool *pool, unsigned int stream_id) + const struct dc_plane_state *plane, const struct dc_plane_pipe_pool *pool, unsigned int stream_id, int plane_index) { int i; + bool is_plane_duplicate = ctx->v20.scratch.plane_duplicate_exists; + for (i = 0; i < ctx->config.dcn_pipe_count; i++) { if (state->res_ctx.pipe_ctx[i].plane_state == plane && state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id && + (!is_plane_duplicate || (is_plane_duplicate && + ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[state->res_ctx.pipe_ctx[i].pipe_idx] == plane_index)) && !is_pipe_used(pool, state->res_ctx.pipe_ctx[i].pipe_idx)) { free_pipe(&state->res_ctx.pipe_ctx[i]); } @@ -717,19 +753,20 @@ static void map_pipes_for_stream(struct dml2_context *ctx, struct dc_state *stat } static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state, const struct dc_stream_state *stream, const struct dc_plane_state *plane, - struct dc_pipe_mapping_scratch *scratch, const struct dc_state *existing_state) + int plane_index, struct dc_pipe_mapping_scratch *scratch, const struct dc_state *existing_state) { int odm_slice_index; unsigned int plane_id; struct pipe_ctx *master_pipe = NULL; int i; - if (!get_plane_id(state, plane, stream->stream_id, &plane_id)) { + if (!get_plane_id(ctx, state, plane, stream->stream_id, plane_index, &plane_id)) { ASSERT(false); return; } - master_pipe = assign_pipes_to_plane(ctx, state, stream, plane, scratch->odm_info.odm_factor, scratch->mpc_info.mpc_factor, &scratch->pipe_pool, existing_state); + master_pipe = assign_pipes_to_plane(ctx, state, stream, plane, scratch->odm_info.odm_factor, + scratch->mpc_info.mpc_factor, plane_index, &scratch->pipe_pool, existing_state); sort_pipes_for_splitting(&scratch->pipe_pool); for (odm_slice_index = 0; odm_slice_index < scratch->odm_info.odm_factor; odm_slice_index++) { @@ -755,37 +792,50 @@ static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state } } - free_unused_pipes_for_plane(ctx, state, plane, &scratch->pipe_pool, stream->stream_id); + free_unused_pipes_for_plane(ctx, state, plane, &scratch->pipe_pool, stream->stream_id, plane_index); } -static unsigned int get_mpc_factor(struct dml2_context *ctx, - const struct dc_state *state, +static unsigned int get_target_mpc_factor(struct dml2_context *ctx, + struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, - const struct dc_stream_status *status, unsigned int stream_id, + const struct dc_stream_status *status, + const struct dc_stream_state *stream, int plane_idx) { unsigned int plane_id; unsigned int cfg_idx; + unsigned int mpc_factor; - get_plane_id(state, status->plane_states[plane_idx], stream_id, &plane_id); - cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id); - if (ctx->architecture == dml2_architecture_20) - return (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx]; - ASSERT(false); - return 1; + if (ctx->architecture == dml2_architecture_20) { + get_plane_id(ctx, state, status->plane_states[plane_idx], + stream->stream_id, plane_idx, &plane_id); + cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id); + mpc_factor = (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx]; + } else { + mpc_factor = 1; + ASSERT(false); + } + + /* For stereo timings, we need to pipe split */ + if (dml2_is_stereo_timing(stream)) + mpc_factor = 2; + + return mpc_factor; } -static unsigned int get_odm_factor( +static unsigned int get_target_odm_factor( const struct dml2_context *ctx, + struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, const struct dc_stream_state *stream) { - unsigned int cfg_idx = find_disp_cfg_idx_by_stream_id( - mapping, stream->stream_id); + unsigned int cfg_idx; - if (ctx->architecture == dml2_architecture_20) + if (ctx->architecture == dml2_architecture_20) { + cfg_idx = find_disp_cfg_idx_by_stream_id( + mapping, stream->stream_id); switch (disp_cfg->hw.ODMMode[cfg_idx]) { case dml_odm_mode_bypass: return 1; @@ -796,84 +846,122 @@ static unsigned int get_odm_factor( default: break; } + } ASSERT(false); return 1; } +static unsigned int get_source_odm_factor(const struct dml2_context *ctx, + struct dc_state *state, + const struct dc_stream_state *stream) +{ + struct pipe_ctx *otg_master = ctx->config.callbacks.get_otg_master_for_stream(&state->res_ctx, stream); + + return ctx->config.callbacks.get_odm_slice_count(otg_master); +} + +static unsigned int get_source_mpc_factor(const struct dml2_context *ctx, + struct dc_state *state, + const struct dc_plane_state *plane) +{ + struct pipe_ctx *dpp_pipes[MAX_PIPES] = {0}; + int dpp_pipe_count = ctx->config.callbacks.get_dpp_pipes_for_plane(plane, + &state->res_ctx, dpp_pipes); + + ASSERT(dpp_pipe_count > 0); + return ctx->config.callbacks.get_mpc_slice_count(dpp_pipes[0]); +} + + static void populate_mpc_factors_for_stream( struct dml2_context *ctx, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, - const struct dc_state *state, + struct dc_state *state, unsigned int stream_idx, - unsigned int odm_factor, - unsigned int mpc_factors[MAX_PIPES]) + struct dml2_pipe_combine_factor odm_factor, + struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES]) { const struct dc_stream_status *status = &state->stream_status[stream_idx]; - unsigned int stream_id = state->streams[stream_idx]->stream_id; int i; - for (i = 0; i < status->plane_count; i++) - if (odm_factor == 1) - mpc_factors[i] = get_mpc_factor( - ctx, state, disp_cfg, mapping, status, - stream_id, i); - else - mpc_factors[i] = 1; + for (i = 0; i < status->plane_count; i++) { + mpc_factors[i].source = get_source_mpc_factor(ctx, state, status->plane_states[i]); + mpc_factors[i].target = (odm_factor.target == 1) ? + get_target_mpc_factor(ctx, state, disp_cfg, mapping, status, state->streams[stream_idx], i) : 1; + } } static void populate_odm_factors(const struct dml2_context *ctx, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, - const struct dc_state *state, - unsigned int odm_factors[MAX_PIPES]) + struct dc_state *state, + struct dml2_pipe_combine_factor odm_factors[MAX_PIPES]) { int i; - for (i = 0; i < state->stream_count; i++) - odm_factors[i] = get_odm_factor( - ctx, disp_cfg, mapping, state->streams[i]); + for (i = 0; i < state->stream_count; i++) { + odm_factors[i].source = get_source_odm_factor(ctx, state, state->streams[i]); + odm_factors[i].target = get_target_odm_factor( + ctx, state, disp_cfg, mapping, state->streams[i]); + } } -static bool map_dc_pipes_for_stream(struct dml2_context *ctx, +static bool unmap_dc_pipes_for_stream(struct dml2_context *ctx, struct dc_state *state, const struct dc_state *existing_state, const struct dc_stream_state *stream, const struct dc_stream_status *status, - unsigned int odm_factor, - unsigned int mpc_factors[MAX_PIPES]) + struct dml2_pipe_combine_factor odm_factor, + struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES]) { int plane_idx; bool result = true; - if (odm_factor == 1) - /* - * ODM and MPC combines are by DML design mutually exclusive. - * ODM factor of 1 means MPC factors may be greater than 1. - * In this case, we want to set ODM factor to 1 first to free up - * pipe resources from previous ODM configuration before setting - * up MPC combine to acquire more pipe resources. - */ + for (plane_idx = 0; plane_idx < status->plane_count; plane_idx++) + if (mpc_factors[plane_idx].target < mpc_factors[plane_idx].source) + result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count( + state, + existing_state, + ctx->config.callbacks.dc->res_pool, + status->plane_states[plane_idx], + mpc_factors[plane_idx].target); + if (odm_factor.target < odm_factor.source) result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count( state, existing_state, ctx->config.callbacks.dc->res_pool, stream, - odm_factor); + odm_factor.target); + return result; +} + +static bool map_dc_pipes_for_stream(struct dml2_context *ctx, + struct dc_state *state, + const struct dc_state *existing_state, + const struct dc_stream_state *stream, + const struct dc_stream_status *status, + struct dml2_pipe_combine_factor odm_factor, + struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES]) +{ + int plane_idx; + bool result = true; + for (plane_idx = 0; plane_idx < status->plane_count; plane_idx++) - result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count( - state, - existing_state, - ctx->config.callbacks.dc->res_pool, - status->plane_states[plane_idx], - mpc_factors[plane_idx]); - if (odm_factor > 1) + if (mpc_factors[plane_idx].target > mpc_factors[plane_idx].source) + result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count( + state, + existing_state, + ctx->config.callbacks.dc->res_pool, + status->plane_states[plane_idx], + mpc_factors[plane_idx].target); + if (odm_factor.target > odm_factor.source) result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count( state, existing_state, ctx->config.callbacks.dc->res_pool, stream, - odm_factor); + odm_factor.target); return result; } @@ -883,20 +971,20 @@ static bool map_dc_pipes_with_callbacks(struct dml2_context *ctx, struct dml2_dml_to_dc_pipe_mapping *mapping, const struct dc_state *existing_state) { - unsigned int odm_factors[MAX_PIPES]; - unsigned int mpc_factors_for_stream[MAX_PIPES]; int i; bool result = true; - populate_odm_factors(ctx, disp_cfg, mapping, state, odm_factors); - for (i = 0; i < state->stream_count; i++) { + populate_odm_factors(ctx, disp_cfg, mapping, state, ctx->pipe_combine_scratch.odm_factors); + for (i = 0; i < state->stream_count; i++) populate_mpc_factors_for_stream(ctx, disp_cfg, mapping, state, - i, odm_factors[i], mpc_factors_for_stream); - result &= map_dc_pipes_for_stream(ctx, state, existing_state, - state->streams[i], - &state->stream_status[i], - odm_factors[i], mpc_factors_for_stream); - } + i, ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]); + for (i = 0; i < state->stream_count; i++) + result &= unmap_dc_pipes_for_stream(ctx, state, existing_state, state->streams[i], + &state->stream_status[i], ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]); + for (i = 0; i < state->stream_count; i++) + result &= map_dc_pipes_for_stream(ctx, state, existing_state, state->streams[i], + &state->stream_status[i], ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]); + return result; } @@ -911,26 +999,14 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s unsigned int stream_id; const unsigned int *ODMMode, *DPPPerSurface; - unsigned int odm_mode_array[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0}, dpp_per_surface_array[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0}; struct dc_pipe_mapping_scratch scratch; if (ctx->config.map_dc_pipes_with_callbacks) return map_dc_pipes_with_callbacks( ctx, state, disp_cfg, mapping, existing_state); - if (ctx->architecture == dml2_architecture_21) { - /* - * Extract ODM and DPP outputs from DML2.1 and map them in an array as required for pipe mapping in dml2_map_dc_pipes. - * As data cannot be directly extracted in const pointers, assign these arrays to const pointers before proceeding to - * maximize the reuse of existing code. Const pointers are required because dml2.0 dml_display_cfg_st is const. - * - */ - ODMMode = (const unsigned int *)odm_mode_array; - DPPPerSurface = (const unsigned int *)dpp_per_surface_array; - } else { - ODMMode = (unsigned int *)disp_cfg->hw.ODMMode; - DPPPerSurface = disp_cfg->hw.DPPPerSurface; - } + ODMMode = (unsigned int *)disp_cfg->hw.ODMMode; + DPPPerSurface = disp_cfg->hw.DPPPerSurface; for (stream_index = 0; stream_index < state->stream_count; stream_index++) { memset(&scratch, 0, sizeof(struct dc_pipe_mapping_scratch)); @@ -958,8 +1034,8 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s for (plane_index = 0; plane_index < state->stream_status[stream_index].plane_count; plane_index++) { // Planes are ordered top to bottom. - if (get_plane_id(state, state->stream_status[stream_index].plane_states[plane_index], - stream_id, &plane_id)) { + if (get_plane_id(ctx, state, state->stream_status[stream_index].plane_states[plane_index], + stream_id, plane_index, &plane_id)) { plane_disp_cfg_index = find_disp_cfg_idx_by_plane_id(mapping, plane_id); // Setup mpc_info for this plane @@ -983,7 +1059,8 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s // Clear the pool assignment scratch (which is per plane) memset(&scratch.pipe_pool, 0, sizeof(struct dc_plane_pipe_pool)); - map_pipes_for_plane(ctx, state, state->streams[stream_index], state->stream_status[stream_index].plane_states[plane_index], &scratch, existing_state); + map_pipes_for_plane(ctx, state, state->streams[stream_index], + state->stream_status[stream_index].plane_states[plane_index], plane_index, &scratch, existing_state); } else { // Plane ID cannot be generated, therefore no DML mapping can be performed. ASSERT(false); @@ -1003,6 +1080,12 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s ASSERT(false); } } + + if (ctx->config.callbacks.build_test_pattern_params && + pipe->stream && + pipe->prev_odm_pipe == NULL && + pipe->top_pipe == NULL) + ctx->config.callbacks.build_test_pattern_params(&state->res_ctx, pipe); } return true; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h index 2f91244a7b01..1538b708d8be 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h @@ -30,6 +30,8 @@ #include "dml2_dc_types.h" struct dml2_context; +struct dml2_dml_to_dc_pipe_mapping; +struct dml_display_cfg_st; /* * dml2_map_dc_pipes - Creates a pipe linkage in dc_state based on current display config. diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h index e85866db80ff..7ca7f2a743c2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h @@ -38,5 +38,6 @@ #include "core_types.h" #include "dsc.h" #include "clk_mgr.h" +#include "dc_state_priv.h" #endif //__DML2_DC_TYPES_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h index ed5b767d46e0..9dab4e43c511 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h @@ -75,6 +75,8 @@ struct dml2_dml_to_dc_pipe_mapping { bool dml_pipe_idx_to_stream_id_valid[__DML2_WRAPPER_MAX_STREAMS_PLANES__]; unsigned int dml_pipe_idx_to_plane_id[__DML2_WRAPPER_MAX_STREAMS_PLANES__]; bool dml_pipe_idx_to_plane_id_valid[__DML2_WRAPPER_MAX_STREAMS_PLANES__]; + unsigned int dml_pipe_idx_to_plane_index[__DML2_WRAPPER_MAX_STREAMS_PLANES__]; + bool dml_pipe_idx_to_plane_index_valid[__DML2_WRAPPER_MAX_STREAMS_PLANES__]; }; struct dml2_wrapper_scratch { @@ -96,6 +98,7 @@ struct dml2_wrapper_scratch { struct dml2_dml_to_dc_pipe_mapping dml_to_dc_pipe_mapping; bool enable_flexible_pipe_mapping; + bool plane_duplicate_exists; }; struct dml2_helper_det_policy_scratch { @@ -104,13 +107,23 @@ struct dml2_helper_det_policy_scratch { enum dml2_architecture { dml2_architecture_20, - dml2_architecture_21 +}; + +struct dml2_pipe_combine_factor { + unsigned int source; + unsigned int target; +}; + +struct dml2_pipe_combine_scratch { + struct dml2_pipe_combine_factor odm_factors[MAX_PIPES]; + struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES][MAX_PIPES]; }; struct dml2_context { enum dml2_architecture architecture; struct dml2_configuration_options config; struct dml2_helper_det_policy_scratch det_helper_scratch; + struct dml2_pipe_combine_scratch pipe_combine_scratch; union { struct { struct display_mode_lib_st dml_core_ctx; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c index 32f8a43af3d6..282d70e2b18a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c @@ -51,7 +51,7 @@ unsigned int dml2_helper_calculate_num_ways_for_subvp(struct dml2_context *ctx, // Find the phantom pipes if (pipe->stream && pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4; mblk_width = ctx->config.mall_cfg.mblk_width_pixels; mblk_height = bytes_per_pixel == 4 ? mblk_width = ctx->config.mall_cfg.mblk_height_4bpe_pixels : ctx->config.mall_cfg.mblk_height_8bpe_pixels; @@ -253,7 +253,7 @@ static bool assign_subvp_pipe(struct dml2_context *ctx, struct dc_state *context * to combine this with SubVP can cause issues with the scheduling). */ if (pipe->plane_state && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_NONE && refresh_rate < 120 && vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) { while (pipe) { num_pipes++; @@ -317,7 +317,7 @@ static bool enough_pipes_for_subvp(struct dml2_context *ctx, struct dc_state *st // Find the minimum pipe split count for non SubVP pipes if (pipe->stream && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_NONE) { + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_NONE) { split_cnt = 0; while (pipe) { split_cnt++; @@ -372,8 +372,8 @@ static bool subvp_subvp_schedulable(struct dml2_context *ctx, struct dc_state *c * and also to store the two main SubVP pipe pointers in subvp_pipes[2]. */ if (pipe->stream && pipe->plane_state && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { - phantom = pipe->stream->mall_stream_config.paired_stream; + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { + phantom = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, pipe->stream); microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) + phantom->timing.v_addressable; @@ -435,6 +435,7 @@ bool dml2_svp_drr_schedulable(struct dml2_context *ctx, struct dc_state *context struct pipe_ctx *pipe = NULL; struct dc_crtc_timing *main_timing = NULL; struct dc_crtc_timing *phantom_timing = NULL; + struct dc_stream_state *phantom_stream; int16_t prefetch_us = 0; int16_t mall_region_us = 0; int16_t drr_frame_us = 0; // nominal frame time @@ -453,12 +454,13 @@ bool dml2_svp_drr_schedulable(struct dml2_context *ctx, struct dc_state *context continue; // Find the SubVP pipe - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) break; } + phantom_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, pipe->stream); main_timing = &pipe->stream->timing; - phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing; + phantom_timing = &phantom_stream->timing; prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total / (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 + ctx->config.svp_pstate.subvp_prefetch_end_to_mall_start_us; @@ -519,6 +521,8 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state * struct dc_crtc_timing *main_timing = NULL; struct dc_crtc_timing *phantom_timing = NULL; struct dc_crtc_timing *vblank_timing = NULL; + struct dc_stream_state *phantom_stream; + enum mall_stream_type pipe_mall_type; /* For SubVP + VBLANK/DRR cases, we assume there can only be * a single VBLANK/DRR display. If DML outputs SubVP + VBLANK @@ -528,19 +532,20 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state * */ for (i = 0; i < ctx->config.dcn_pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; + pipe_mall_type = ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe); // We check for master pipe, but it shouldn't matter since we only need // the pipe for timing info (stream should be same for any pipe splits) if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe) continue; - if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) { + if (!found && pipe_mall_type == SUBVP_NONE) { // Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe). vblank_index = i; found = true; } - if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN) subvp_pipe = pipe; } // Use ignore_msa_timing_param flag to identify as DRR @@ -548,8 +553,9 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state * // SUBVP + DRR case schedulable = dml2_svp_drr_schedulable(ctx, context, &context->res_ctx.pipe_ctx[vblank_index].stream->timing); } else if (found) { + phantom_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, subvp_pipe->stream); main_timing = &subvp_pipe->stream->timing; - phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; + phantom_timing = &phantom_stream->timing; vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing; // Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe // Also include the prefetch end to mallstart delay time @@ -602,19 +608,20 @@ bool dml2_svp_validate_static_schedulability(struct dml2_context *ctx, struct dc for (i = 0, pipe_idx = 0; i < ctx->config.dcn_pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + enum mall_stream_type pipe_mall_type = ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe); if (!pipe->stream) continue; if (pipe->plane_state && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) + pipe_mall_type == SUBVP_MAIN) subvp_count++; // Count how many planes that aren't SubVP/phantom are capable of VACTIVE // switching (SubVP + VACTIVE unsupported). In situations where we force // SubVP for a VACTIVE plane, we don't want to increment the vactive_count. if (vba->ActiveDRAMClockChangeLatencyMargin[vba->pipe_plane[pipe_idx]] > 0 && - pipe->stream->mall_stream_config.type == SUBVP_NONE) { + pipe_mall_type == SUBVP_NONE) { vactive_count++; } pipe_idx++; @@ -708,14 +715,10 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state static struct dc_stream_state *enable_phantom_stream(struct dml2_context *ctx, struct dc_state *state, unsigned int dc_pipe_idx, unsigned int svp_height, unsigned int vstartup) { struct pipe_ctx *ref_pipe = &state->res_ctx.pipe_ctx[dc_pipe_idx]; - struct dc_stream_state *phantom_stream = ctx->config.svp_pstate.callbacks.create_stream_for_sink(ref_pipe->stream->sink); - - phantom_stream->signal = SIGNAL_TYPE_VIRTUAL; - phantom_stream->dpms_off = true; - phantom_stream->mall_stream_config.type = SUBVP_PHANTOM; - phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream; - ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN; - ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream; + struct dc_stream_state *phantom_stream = ctx->config.svp_pstate.callbacks.create_phantom_stream( + ctx->config.svp_pstate.callbacks.dc, + state, + ref_pipe->stream); /* stream has limited viewport and small timing */ memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing)); @@ -723,7 +726,10 @@ static struct dc_stream_state *enable_phantom_stream(struct dml2_context *ctx, s memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst)); set_phantom_stream_timing(ctx, state, ref_pipe, phantom_stream, dc_pipe_idx, svp_height, vstartup); - ctx->config.svp_pstate.callbacks.add_stream_to_ctx(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream); + ctx->config.svp_pstate.callbacks.add_phantom_stream(ctx->config.svp_pstate.callbacks.dc, + state, + phantom_stream, + ref_pipe->stream); return phantom_stream; } @@ -740,7 +746,10 @@ static void enable_phantom_plane(struct dml2_context *ctx, if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) { phantom_plane = prev_phantom_plane; } else { - phantom_plane = ctx->config.svp_pstate.callbacks.create_plane(ctx->config.svp_pstate.callbacks.dc); + phantom_plane = ctx->config.svp_pstate.callbacks.create_phantom_plane( + ctx->config.svp_pstate.callbacks.dc, + state, + curr_pipe->plane_state); } memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address)); @@ -763,9 +772,7 @@ static void enable_phantom_plane(struct dml2_context *ctx, phantom_plane->clip_rect.y = 0; phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable; - phantom_plane->is_phantom = true; - - ctx->config.svp_pstate.callbacks.add_plane_to_context(ctx->config.svp_pstate.callbacks.dc, phantom_stream, phantom_plane, state); + ctx->config.svp_pstate.callbacks.add_phantom_plane(ctx->config.svp_pstate.callbacks.dc, phantom_stream, phantom_plane, state); curr_pipe = curr_pipe->bottom_pipe; prev_phantom_plane = phantom_plane; @@ -790,7 +797,7 @@ static void add_phantom_pipes_for_main_pipe(struct dml2_context *ctx, struct dc_ // We determine which phantom pipes were added by comparing with // the phantom stream. if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream && - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) { pipe->stream->use_dynamic_meta = false; pipe->plane_state->flip_immediate = false; if (!ctx->config.svp_pstate.callbacks.build_scaling_params(pipe)) { @@ -800,7 +807,7 @@ static void add_phantom_pipes_for_main_pipe(struct dml2_context *ctx, struct dc_ } } -static bool remove_all_planes_for_stream(struct dml2_context *ctx, struct dc_stream_state *stream, struct dc_state *context) +static bool remove_all_phantom_planes_for_stream(struct dml2_context *ctx, struct dc_stream_state *stream, struct dc_state *context) { int i, old_plane_count; struct dc_stream_status *stream_status = NULL; @@ -821,9 +828,11 @@ static bool remove_all_planes_for_stream(struct dml2_context *ctx, struct dc_str for (i = 0; i < old_plane_count; i++) del_planes[i] = stream_status->plane_states[i]; - for (i = 0; i < old_plane_count; i++) - if (!ctx->config.svp_pstate.callbacks.remove_plane_from_context(ctx->config.svp_pstate.callbacks.dc, stream, del_planes[i], context)) + for (i = 0; i < old_plane_count; i++) { + if (!ctx->config.svp_pstate.callbacks.remove_phantom_plane(ctx->config.svp_pstate.callbacks.dc, stream, del_planes[i], context)) return false; + ctx->config.svp_pstate.callbacks.release_phantom_plane(ctx->config.svp_pstate.callbacks.dc, context, del_planes[i]); + } return true; } @@ -832,35 +841,21 @@ bool dml2_svp_remove_all_phantom_pipes(struct dml2_context *ctx, struct dc_state { int i; bool removed_pipe = false; - struct dc_plane_state *phantom_plane = NULL; struct dc_stream_state *phantom_stream = NULL; for (i = 0; i < ctx->config.dcn_pipe_count; i++) { struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i]; // build scaling params for phantom pipes - if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - phantom_plane = pipe->plane_state; + if (pipe->plane_state && pipe->stream && ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) { phantom_stream = pipe->stream; - remove_all_planes_for_stream(ctx, pipe->stream, state); - ctx->config.svp_pstate.callbacks.remove_stream_from_ctx(ctx->config.svp_pstate.callbacks.dc, state, pipe->stream); - - /* Ref count is incremented on allocation and also when added to the context. - * Therefore we must call release for the the phantom plane and stream once - * they are removed from the ctx to finally decrement the refcount to 0 to free. - */ - ctx->config.svp_pstate.callbacks.plane_state_release(phantom_plane); - ctx->config.svp_pstate.callbacks.stream_release(phantom_stream); + remove_all_phantom_planes_for_stream(ctx, phantom_stream, state); + ctx->config.svp_pstate.callbacks.remove_phantom_stream(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream); + ctx->config.svp_pstate.callbacks.release_phantom_stream(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream); removed_pipe = true; } - // Clear all phantom stream info - if (pipe->stream) { - pipe->stream->mall_stream_config.type = SUBVP_NONE; - pipe->stream->mall_stream_config.paired_stream = NULL; - } - if (pipe->plane_state) { pipe->plane_state->is_phantom = false; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 89836f175a13..a41812598ce8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -29,6 +29,7 @@ #include "dml2_translation_helper.h" #define NUM_DCFCLK_STAS 5 +#define NUM_DCFCLK_STAS_NEW 8 void dml2_init_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out) { @@ -228,16 +229,13 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s break; case dml_project_dcn35: + case dml_project_dcn351: out->num_chans = 4; out->round_trip_ping_latency_dcfclk_cycles = 106; out->smn_latency_us = 2; + out->dispclk_dppclk_vco_speed_mhz = 3600; break; - case dml_project_dcn351: - out->num_chans = 16; - out->round_trip_ping_latency_dcfclk_cycles = 1100; - out->smn_latency_us = 2; - break; } /* ---Overrides if available--- */ if (dml2->config.bbox_overrides.dram_num_chan) @@ -252,12 +250,21 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, { struct dml2_policy_build_synthetic_soc_states_scratch *s = &dml2->v20.scratch.create_scratch.build_synthetic_socbb_scratch; struct dml2_policy_build_synthetic_soc_states_params *p = &dml2->v20.scratch.build_synthetic_socbb_params; - unsigned int dcfclk_stas_mhz[NUM_DCFCLK_STAS]; + unsigned int dcfclk_stas_mhz[NUM_DCFCLK_STAS] = {0}; + unsigned int dcfclk_stas_mhz_new[NUM_DCFCLK_STAS_NEW] = {0}; + unsigned int dml_project = dml2->v20.dml_core_ctx.project; + unsigned int i = 0; unsigned int transactions_per_mem_clock = 16; // project specific, depends on used Memory type - p->dcfclk_stas_mhz = dcfclk_stas_mhz; - p->num_dcfclk_stas = NUM_DCFCLK_STAS; + if (dml_project == dml_project_dcn351) { + p->dcfclk_stas_mhz = dcfclk_stas_mhz_new; + p->num_dcfclk_stas = NUM_DCFCLK_STAS_NEW; + } else { + p->dcfclk_stas_mhz = dcfclk_stas_mhz; + p->num_dcfclk_stas = NUM_DCFCLK_STAS; + } + p->in_bbox = in_bbox; p->out_states = out; p->in_states = &dml2->v20.scratch.create_scratch.in_states; @@ -340,25 +347,42 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, break; } - /* Override from passed values, mainly for debugging purposes, if available */ - if (dml2->config.bbox_overrides.sr_exit_latency_us) { - p->in_states->state_array[0].sr_exit_time_us = dml2->config.bbox_overrides.sr_exit_latency_us; - } + /* Override from passed values, if available */ + for (i = 0; i < p->in_states->num_states; i++) { + if (dml2->config.bbox_overrides.sr_exit_latency_us) { + p->in_states->state_array[i].sr_exit_time_us = + dml2->config.bbox_overrides.sr_exit_latency_us; + } - if (dml2->config.bbox_overrides.sr_enter_plus_exit_latency_us) { - p->in_states->state_array[0].sr_enter_plus_exit_time_us = dml2->config.bbox_overrides.sr_enter_plus_exit_latency_us; - } + if (dml2->config.bbox_overrides.sr_enter_plus_exit_latency_us) { + p->in_states->state_array[i].sr_enter_plus_exit_time_us = + dml2->config.bbox_overrides.sr_enter_plus_exit_latency_us; + } - if (dml2->config.bbox_overrides.urgent_latency_us) { - p->in_states->state_array[0].urgent_latency_pixel_data_only_us = dml2->config.bbox_overrides.urgent_latency_us; - } + if (dml2->config.bbox_overrides.sr_exit_z8_time_us) { + p->in_states->state_array[i].sr_exit_z8_time_us = + dml2->config.bbox_overrides.sr_exit_z8_time_us; + } - if (dml2->config.bbox_overrides.dram_clock_change_latency_us) { - p->in_states->state_array[0].dram_clock_change_latency_us = dml2->config.bbox_overrides.dram_clock_change_latency_us; - } + if (dml2->config.bbox_overrides.sr_enter_plus_exit_z8_time_us) { + p->in_states->state_array[i].sr_enter_plus_exit_z8_time_us = + dml2->config.bbox_overrides.sr_enter_plus_exit_z8_time_us; + } + + if (dml2->config.bbox_overrides.urgent_latency_us) { + p->in_states->state_array[i].urgent_latency_pixel_data_only_us = + dml2->config.bbox_overrides.urgent_latency_us; + } - if (dml2->config.bbox_overrides.fclk_change_latency_us) { - p->in_states->state_array[0].fclk_change_latency_us = dml2->config.bbox_overrides.fclk_change_latency_us; + if (dml2->config.bbox_overrides.dram_clock_change_latency_us) { + p->in_states->state_array[i].dram_clock_change_latency_us = + dml2->config.bbox_overrides.dram_clock_change_latency_us; + } + + if (dml2->config.bbox_overrides.fclk_change_latency_us) { + p->in_states->state_array[i].fclk_change_latency_us = + dml2->config.bbox_overrides.fclk_change_latency_us; + } } /* DCFCLK stas values are project specific */ @@ -380,7 +404,6 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, /* Copy clocks tables entries, if available */ if (dml2->config.bbox_overrides.clks_table.num_states) { p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states; - for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels; i++) { p->in_states->state_array[i].dcfclk_mhz = dml2->config.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz; } @@ -405,8 +428,9 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, } for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels; i++) { - p->in_states->state_array[i].dtbclk_mhz = - dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz; + if (dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz > 0) + p->in_states->state_array[i].dtbclk_mhz = + dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz; } for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels; i++) { @@ -418,6 +442,13 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, } dml2_policy_build_synthetic_soc_states(s, p); + if (dml2->v20.dml_core_ctx.project == dml_project_dcn35) { + // Override last out_state with data from last in_state + // This will ensure that out_state contains max fclk + memcpy(&p->out_states->state_array[p->out_states->num_states - 1], + &p->in_states->state_array[p->in_states->num_states - 1], + sizeof(struct soc_state_bounding_box_st)); + } } void dml2_translate_ip_params(const struct dc *in, struct ip_params_st *out) @@ -497,8 +528,8 @@ void dml2_translate_socbb_params(const struct dc *in, struct soc_bounding_box_st out->do_urgent_latency_adjustment = in_soc_params->do_urgent_latency_adjustment; out->dram_channel_width_bytes = (dml_uint_t)in_soc_params->dram_channel_width_bytes; out->fabric_datapath_to_dcn_data_return_bytes = (dml_uint_t)in_soc_params->fabric_datapath_to_dcn_data_return_bytes; - out->gpuvm_min_page_size_kbytes = in_soc_params->gpuvm_min_page_size_bytes * 1024; - out->hostvm_min_page_size_kbytes = in_soc_params->hostvm_min_page_size_bytes * 1024; + out->gpuvm_min_page_size_kbytes = in_soc_params->gpuvm_min_page_size_bytes / 1024; + out->hostvm_min_page_size_kbytes = in_soc_params->hostvm_min_page_size_bytes / 1024; out->mall_allocated_for_dcn_mbytes = (dml_uint_t)in_soc_params->mall_allocated_for_dcn_mbytes; out->max_avg_dram_bw_use_normal_percent = in_soc_params->max_avg_dram_bw_use_normal_percent; out->max_avg_fabric_bw_use_normal_percent = in_soc_params->max_avg_fabric_bw_use_normal_percent; @@ -605,8 +636,8 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st * if (is_dp2p0_output_encoder(pipe)) out->OutputEncoder[location] = dml_dp2p0; break; - out->OutputEncoder[location] = dml_edp; case SIGNAL_TYPE_EDP: + out->OutputEncoder[location] = dml_edp; break; case SIGNAL_TYPE_HDMI_TYPE_A: case SIGNAL_TYPE_DVI_SINGLE_LINK: @@ -772,46 +803,51 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p } } -/*TODO no support for mpc combine, need rework - should calculate scaling params based on plane+stream*/ -static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state *in, const struct dc_state *context) +static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context) { int i; - struct scaler_data data = { 0 }; + struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe; + + memset(temp_pipe, 0, sizeof(struct pipe_ctx)); for (i = 0; i < MAX_PIPES; i++) { const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->plane_state == in && !pipe->prev_odm_pipe) { - const struct pipe_ctx *next_pipe = pipe->next_odm_pipe; - - data = context->res_ctx.pipe_ctx[i].plane_res.scl_data; - while (next_pipe) { - data.h_active += next_pipe->plane_res.scl_data.h_active; - data.recout.width += next_pipe->plane_res.scl_data.recout.width; - if (in->rotation == ROTATION_ANGLE_0 || in->rotation == ROTATION_ANGLE_180) { - data.viewport.width += next_pipe->plane_res.scl_data.viewport.width; - } else { - data.viewport.height += next_pipe->plane_res.scl_data.viewport.height; - } - next_pipe = next_pipe->next_odm_pipe; - } + temp_pipe->stream = pipe->stream; + temp_pipe->plane_state = pipe->plane_state; + temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps; + + resource_build_scaling_params(temp_pipe); break; } } ASSERT(i < MAX_PIPES); - return data; + return temp_pipe->plane_res.scl_data; } static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in) { + dml_uint_t width, height; + + if (in->timing.h_addressable > 3840) + width = 3840; + else + width = in->timing.h_addressable; // 4K max + + if (in->timing.v_addressable > 2160) + height = 2160; + else + height = in->timing.v_addressable; // 4K max + out->CursorBPP[location] = dml_cur_32bit; out->CursorWidth[location] = 256; out->GPUVMMinPageSizeKBytes[location] = 256; - out->ViewportWidth[location] = in->timing.h_addressable; - out->ViewportHeight[location] = in->timing.v_addressable; + out->ViewportWidth[location] = width; + out->ViewportHeight[location] = height; out->ViewportStationary[location] = false; out->ViewportWidthChroma[location] = 0; out->ViewportHeightChroma[location] = 0; @@ -830,7 +866,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned out->HTapsChroma[location] = 0; out->VTapsChroma[location] = 0; out->SourceScan[location] = dml_rotation_0; - out->ScalerRecoutWidth[location] = in->timing.h_addressable; + out->ScalerRecoutWidth[location] = width; out->LBBitPerPixel[location] = 57; @@ -845,7 +881,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned out->ScalerEnabled[location] = false; } -static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, const struct dc_state *context) +static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, struct dc_state *context) { const struct scaler_data scaler_data = get_scaler_data_for_plane(in, context); @@ -930,10 +966,11 @@ static unsigned int map_stream_to_dml_display_cfg(const struct dml2_context *dml return location; } -static bool get_plane_id(const struct dc_state *context, const struct dc_plane_state *plane, - unsigned int stream_id, unsigned int *plane_id) +static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *context, const struct dc_plane_state *plane, + unsigned int stream_id, unsigned int plane_index, unsigned int *plane_id) { int i, j; + bool is_plane_duplicate = dml2->v20.scratch.plane_duplicate_exists; if (!plane_id) return false; @@ -941,7 +978,8 @@ static bool get_plane_id(const struct dc_state *context, const struct dc_plane_s for (i = 0; i < context->stream_count; i++) { if (context->streams[i]->stream_id == stream_id) { for (j = 0; j < context->stream_status[i].plane_count; j++) { - if (context->stream_status[i].plane_states[j] == plane) { + if (context->stream_status[i].plane_states[j] == plane && + (!is_plane_duplicate || (is_plane_duplicate && (j == plane_index)))) { *plane_id = (i << 16) | j; return true; } @@ -953,13 +991,13 @@ static bool get_plane_id(const struct dc_state *context, const struct dc_plane_s } static unsigned int map_plane_to_dml_display_cfg(const struct dml2_context *dml2, const struct dc_plane_state *plane, - const struct dc_state *context, const struct dml_display_cfg_st *dml_dispcfg, unsigned int stream_id) + const struct dc_state *context, const struct dml_display_cfg_st *dml_dispcfg, unsigned int stream_id, int plane_index) { unsigned int plane_id; int i = 0; int location = -1; - if (!get_plane_id(context, plane, stream_id, &plane_id)) { + if (!get_plane_id(context->bw_ctx.dml2, context, plane, stream_id, plane_index, &plane_id)) { ASSERT(false); return -1; } @@ -990,10 +1028,85 @@ static void apply_legacy_svp_drr_settings(struct dml2_context *dml2, const struc } } -void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg) +static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2, struct dc_state *state) +{ + unsigned int i; + unsigned int pipe_index = 0; + unsigned int plane_index = 0; + struct dml2_dml_to_dc_pipe_mapping *dml_to_dc_pipe_mapping = &dml2->v20.scratch.dml_to_dc_pipe_mapping; + + for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) { + dml_to_dc_pipe_mapping->dml_pipe_idx_to_plane_index_valid[i] = false; + dml_to_dc_pipe_mapping->dml_pipe_idx_to_plane_index[i] = 0; + } + + for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) { + struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i]; + + if (!pipe || !pipe->stream || !pipe->plane_state) + continue; + + while (pipe) { + pipe_index = pipe->pipe_idx; + + if (pipe->stream && dml_to_dc_pipe_mapping->dml_pipe_idx_to_plane_index_valid[pipe_index] == false) { + dml_to_dc_pipe_mapping->dml_pipe_idx_to_plane_index[pipe_index] = plane_index; + plane_index++; + dml_to_dc_pipe_mapping->dml_pipe_idx_to_plane_index_valid[pipe_index] = true; + } + + pipe = pipe->bottom_pipe; + } + + plane_index = 0; + } +} +static void populate_dml_writeback_cfg_from_stream_state(struct dml_writeback_cfg_st *out, + unsigned int location, const struct dc_stream_state *in) +{ + if (in->num_wb_info > 0) { + for (int i = 0; i < __DML_NUM_DMB__; i++) { + const struct dc_writeback_info *wb_info = &in->writeback_info[i]; + /*current dml support 1 dwb per stream, limitation*/ + if (wb_info->wb_enabled) { + out->WritebackEnable[location] = wb_info->wb_enabled; + out->ActiveWritebacksPerSurface[location] = wb_info->dwb_params.cnv_params.src_width; + out->WritebackDestinationWidth[location] = wb_info->dwb_params.dest_width; + out->WritebackDestinationHeight[location] = wb_info->dwb_params.dest_height; + + out->WritebackSourceWidth[location] = wb_info->dwb_params.cnv_params.crop_en ? + wb_info->dwb_params.cnv_params.crop_width : + wb_info->dwb_params.cnv_params.src_width; + + out->WritebackSourceHeight[location] = wb_info->dwb_params.cnv_params.crop_en ? + wb_info->dwb_params.cnv_params.crop_height : + wb_info->dwb_params.cnv_params.src_height; + /*current design does not have chroma scaling, need to follow up*/ + out->WritebackHTaps[location] = wb_info->dwb_params.scaler_taps.h_taps > 0 ? + wb_info->dwb_params.scaler_taps.h_taps : 1; + out->WritebackVTaps[location] = wb_info->dwb_params.scaler_taps.v_taps > 0 ? + wb_info->dwb_params.scaler_taps.v_taps : 1; + + out->WritebackHRatio[location] = wb_info->dwb_params.cnv_params.crop_en ? + (double)wb_info->dwb_params.cnv_params.crop_width / + (double)wb_info->dwb_params.dest_width : + (double)wb_info->dwb_params.cnv_params.src_width / + (double)wb_info->dwb_params.dest_width; + out->WritebackVRatio[location] = wb_info->dwb_params.cnv_params.crop_en ? + (double)wb_info->dwb_params.cnv_params.crop_height / + (double)wb_info->dwb_params.dest_height : + (double)wb_info->dwb_params.cnv_params.src_height / + (double)wb_info->dwb_params.dest_height; + } + } + } +} +void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg) { - int i = 0, j = 0; + int i = 0, j = 0, k = 0; int disp_cfg_stream_location, disp_cfg_plane_location; + enum mall_stream_type stream_mall_type; + struct pipe_ctx *current_pipe_context; for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) { dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id_valid[i] = false; @@ -1003,12 +1116,27 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct d } //Generally these are set by referencing our latest BB/IP params in dcn32_resource.c file - dml_dispcfg->plane.GPUVMEnable = true; - dml_dispcfg->plane.GPUVMMaxPageTableLevels = 4; - dml_dispcfg->plane.HostVMEnable = false; + dml_dispcfg->plane.GPUVMEnable = dml2->v20.dml_core_ctx.ip.gpuvm_enable; + dml_dispcfg->plane.GPUVMMaxPageTableLevels = dml2->v20.dml_core_ctx.ip.gpuvm_max_page_table_levels; + dml_dispcfg->plane.HostVMEnable = dml2->v20.dml_core_ctx.ip.hostvm_enable; + dml_dispcfg->plane.HostVMMaxPageTableLevels = dml2->v20.dml_core_ctx.ip.hostvm_max_page_table_levels; + if (dml2->v20.dml_core_ctx.ip.hostvm_enable) + dml2->v20.dml_core_ctx.policy.AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter; + + dml2_populate_pipe_to_plane_index_mapping(dml2, context); for (i = 0; i < context->stream_count; i++) { + current_pipe_context = NULL; + for (k = 0; k < MAX_PIPES; k++) { + /* find one pipe allocated to this stream for the purpose of getting + info about the link later */ + if (context->streams[i] == context->res_ctx.pipe_ctx[k].stream) { + current_pipe_context = &context->res_ctx.pipe_ctx[k]; + break; + } + } disp_cfg_stream_location = map_stream_to_dml_display_cfg(dml2, context->streams[i], dml_dispcfg); + stream_mall_type = dc_state_get_stream_subvp_type(context, context->streams[i]); if (disp_cfg_stream_location < 0) disp_cfg_stream_location = dml_dispcfg->num_timings++; @@ -1016,7 +1144,11 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct d ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__); populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]); - populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], &context->res_ctx.pipe_ctx[i]); + populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context); + /*Call site for populate_dml_writeback_cfg_from_stream_state*/ + populate_dml_writeback_cfg_from_stream_state(&dml_dispcfg->writeback, + disp_cfg_stream_location, context->streams[i]); + switch (context->streams[i]->debug.force_odm_combine_segments) { case 2: dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_stream_location] = dml_odm_use_policy_combine_2to1; @@ -1043,7 +1175,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct d } else { for (j = 0; j < context->stream_status[i].plane_count; j++) { disp_cfg_plane_location = map_plane_to_dml_display_cfg(dml2, - context->stream_status[i].plane_states[j], context, dml_dispcfg, context->streams[i]->stream_id); + context->stream_status[i].plane_states[j], context, dml_dispcfg, context->streams[i]->stream_id, j); if (disp_cfg_plane_location < 0) disp_cfg_plane_location = dml_dispcfg->num_surfaces++; @@ -1053,10 +1185,10 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct d populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]); populate_dml_plane_cfg_from_plane_state(&dml_dispcfg->plane, disp_cfg_plane_location, context->stream_status[i].plane_states[j], context); - if (context->streams[i]->mall_stream_config.type == SUBVP_MAIN) { + if (stream_mall_type == SUBVP_MAIN) { dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport; dml_dispcfg->plane.UseMALLForStaticScreen[disp_cfg_plane_location] = dml_use_mall_static_screen_optimize; - } else if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM) { + } else if (stream_mall_type == SUBVP_PHANTOM) { dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_phantom_pipe; dml_dispcfg->plane.UseMALLForStaticScreen[disp_cfg_plane_location] = dml_use_mall_static_screen_disable; dml2->v20.dml_core_ctx.policy.ImmediateFlipRequirement[disp_cfg_plane_location] = dml_immediate_flip_not_required; @@ -1067,13 +1199,13 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct d dml_dispcfg->plane.BlendingAndTiming[disp_cfg_plane_location] = disp_cfg_stream_location; - if (get_plane_id(context, context->stream_status[i].plane_states[j], context->streams[i]->stream_id, + if (get_plane_id(dml2, context, context->stream_status[i].plane_states[j], context->streams[i]->stream_id, j, &dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location])) dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id_valid[disp_cfg_plane_location] = true; if (j >= 1) { populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_plane_location, context->streams[i]); - populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], &context->res_ctx.pipe_ctx[i]); + populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], current_pipe_context); switch (context->streams[i]->debug.force_odm_combine_segments) { case 2: dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_plane_location] = dml_odm_use_policy_combine_2to1; @@ -1085,9 +1217,9 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct d break; } - if (context->streams[i]->mall_stream_config.type == SUBVP_MAIN) + if (stream_mall_type == SUBVP_MAIN) dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport; - else if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM) + else if (stream_mall_type == SUBVP_PHANTOM) dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_phantom_pipe; dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[disp_cfg_plane_location] = context->streams[i]->stream_id; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h index dac6d27b14cd..d764773938f4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h @@ -34,7 +34,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, void dml2_translate_ip_params(const struct dc *in_dc, struct ip_params_st *out); void dml2_translate_socbb_params(const struct dc *in_dc, struct soc_bounding_box_st *out); void dml2_translate_soc_states(const struct dc *in_dc, struct soc_states_st *out, int num_states); -void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg); +void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg); void dml2_update_pipe_ctx_dchub_regs(struct _vcs_dpi_dml_display_rq_regs_st *rq_regs, struct _vcs_dpi_dml_display_dlg_regs_st *disp_dlg_regs, struct _vcs_dpi_dml_display_ttu_regs_st *disp_ttu_regs, struct pipe_ctx *out); bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c index 69fd96f4f3b0..0f8b3336e26d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c @@ -155,8 +155,20 @@ unsigned int dml2_util_get_maximum_odm_combine_for_output(bool force_odm_4to1, e bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx) { + if (pipe_ctx == NULL || pipe_ctx->stream == NULL) + return false; + /* If this assert is hit then we have a link encoder dynamic management issue */ ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true); + + /* Count MST hubs once by treating only 1st remote sink in topology as an encoder */ + if (pipe_ctx->stream->link && pipe_ctx->stream->link->remote_sinks[0]) { + return (pipe_ctx->stream_res.hpo_dp_stream_enc && + pipe_ctx->link_res.hpo_dp_link_enc && + dc_is_dp_signal(pipe_ctx->stream->signal) && + (pipe_ctx->stream->link->remote_sinks[0]->sink_id == pipe_ctx->stream->sink->sink_id)); + } + return (pipe_ctx->stream_res.hpo_dp_stream_enc && pipe_ctx->link_res.hpo_dp_link_enc && dc_is_dp_signal(pipe_ctx->stream->signal)); @@ -209,10 +221,11 @@ static int find_dml_pipe_idx_by_plane_id(struct dml2_context *ctx, unsigned int return -1; } -static bool get_plane_id(const struct dc_state *state, const struct dc_plane_state *plane, - unsigned int stream_id, unsigned int *plane_id) +static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *state, const struct dc_plane_state *plane, + unsigned int stream_id, unsigned int plane_index, unsigned int *plane_id) { - int i, j; + unsigned int i, j; + bool is_plane_duplicate = dml2->v20.scratch.plane_duplicate_exists; if (!plane_id) return false; @@ -220,7 +233,8 @@ static bool get_plane_id(const struct dc_state *state, const struct dc_plane_sta for (i = 0; i < state->stream_count; i++) { if (state->streams[i]->stream_id == stream_id) { for (j = 0; j < state->stream_status[i].plane_count; j++) { - if (state->stream_status[i].plane_states[j] == plane) { + if (state->stream_status[i].plane_states[j] == plane && + (!is_plane_duplicate || (j == plane_index))) { *plane_id = (i << 16) | j; return true; } @@ -273,6 +287,7 @@ static void populate_pipe_ctx_dlg_params_from_dml(struct pipe_ctx *pipe_ctx, str void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state, struct dml2_context *in_ctx, unsigned int pipe_cnt) { unsigned int dc_pipe_ctx_index, dml_pipe_idx, plane_id; + enum mall_stream_type pipe_mall_type; bool unbounded_req_enabled = false; struct dml2_calculate_rq_and_dlg_params_scratch *s = &in_ctx->v20.scratch.calculate_rq_and_dlg_params_scratch; @@ -304,13 +319,16 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont * there is a need to know which DML pipe index maps to which DC pipe. The code below * finds a dml_pipe_index from the plane id if a plane is valid. If a plane is not valid then * it finds a dml_pipe_index from the stream id. */ - if (get_plane_id(context, context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_state, - context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id, &plane_id)) { + if (get_plane_id(in_ctx, context, context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_state, + context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id, + in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[context->res_ctx.pipe_ctx[dc_pipe_ctx_index].pipe_idx], &plane_id)) { dml_pipe_idx = find_dml_pipe_idx_by_plane_id(in_ctx, plane_id); } else { dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id); } + if (dml_pipe_idx == 0xFFFFFFFF) + continue; ASSERT(in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[dml_pipe_idx]); ASSERT(in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[dml_pipe_idx] == context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id); @@ -319,7 +337,8 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont */ populate_pipe_ctx_dlg_params_from_dml(&context->res_ctx.pipe_ctx[dc_pipe_ctx_index], &context->bw_ctx.dml2->v20.dml_core_ctx, dml_pipe_idx); - if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->mall_stream_config.type == SUBVP_PHANTOM) { + pipe_mall_type = dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[dc_pipe_ctx_index]); + if (pipe_mall_type == SUBVP_PHANTOM) { // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests context->res_ctx.pipe_ctx[dc_pipe_ctx_index].det_buffer_size_kb = 0; context->res_ctx.pipe_ctx[dc_pipe_ctx_index].unbounded_req = false; @@ -346,7 +365,7 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_state != context->res_ctx.pipe_ctx[dc_pipe_ctx_index].top_pipe->plane_state) && context->res_ctx.pipe_ctx[dc_pipe_ctx_index].prev_odm_pipe == NULL) { /* SS: all active surfaces stored in MALL */ - if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->mall_stream_config.type != SUBVP_PHANTOM) { + if (pipe_mall_type != SUBVP_PHANTOM) { context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[dc_pipe_ctx_index].surface_size_in_mall_bytes; } else { /* SUBVP: phantom surfaces only stored in MALL */ @@ -357,10 +376,16 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = in_ctx->v20.dml_core_ctx.states.state_array[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx].dppclk_mhz * 1000; context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = in_ctx->v20.dml_core_ctx.states.state_array[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx].dispclk_mhz * 1000; + + if (dc->config.forced_clocks || dc->debug.max_disp_clk) { + context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz; + context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz ; + } } void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display_mode_lib_st *dml_core_ctx) @@ -379,6 +404,71 @@ void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display watermark->cstate_pstate.cstate_exit_z8_ns = dml_get_wm_z8_stutter(dml_core_ctx) * 1000; } +unsigned int dml2_calc_max_scaled_time( + unsigned int time_per_pixel, + enum mmhubbub_wbif_mode mode, + unsigned int urgent_watermark) +{ + unsigned int time_per_byte = 0; + unsigned int total_free_entry = 0xb40; + unsigned int buf_lh_capability; + unsigned int max_scaled_time; + + if (mode == PACKED_444) /* packed mode 32 bpp */ + time_per_byte = time_per_pixel/4; + else if (mode == PACKED_444_FP16) /* packed mode 64 bpp */ + time_per_byte = time_per_pixel/8; + + if (time_per_byte == 0) + time_per_byte = 1; + + buf_lh_capability = (total_free_entry*time_per_byte*32) >> 6; /* time_per_byte is in u6.6*/ + max_scaled_time = buf_lh_capability - urgent_watermark; + return max_scaled_time; +} + +void dml2_extract_writeback_wm(struct dc_state *context, struct display_mode_lib_st *dml_core_ctx) +{ + int i, j = 0;; + struct mcif_arb_params *wb_arb_params = NULL; + struct dcn_bw_writeback *bw_writeback = NULL; + enum mmhubbub_wbif_mode wbif_mode = PACKED_444_FP16; /*for now*/ + + if (context->stream_count != 0) { + for (i = 0; i < context->stream_count; i++) { + if (context->streams[i]->num_wb_info != 0) + j++; + } + } + if (j == 0) /*no dwb */ + return; + for (i = 0; i < __DML_NUM_DMB__; i++) { + bw_writeback = &context->bw_ctx.bw.dcn.bw_writeback; + wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[i]; + + for (j = 0 ; j < 4; j++) { + /*current dml only has one set of watermark, need to follow up*/ + bw_writeback->mcif_wb_arb[i].cli_watermark[j] = + dml_get_wm_writeback_urgent(dml_core_ctx) * 1000; + bw_writeback->mcif_wb_arb[i].pstate_watermark[j] = + dml_get_wm_writeback_dram_clock_change(dml_core_ctx) * 1000; + } + if (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk != 0) { + /* time_per_pixel should be in u6.6 format */ + bw_writeback->mcif_wb_arb[i].time_per_pixel = + (1000000 << 6) / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; + } + bw_writeback->mcif_wb_arb[i].slice_lines = 32; + bw_writeback->mcif_wb_arb[i].arbitration_slice = 2; + bw_writeback->mcif_wb_arb[i].max_scaled_time = + dml2_calc_max_scaled_time(wb_arb_params->time_per_pixel, + wbif_mode, wb_arb_params->cli_watermark[0]); + /*not required any more*/ + bw_writeback->mcif_wb_arb[i].dram_speed_change_duration = + dml_get_wm_writeback_dram_clock_change(dml_core_ctx) * 1000; + + } +} void dml2_initialize_det_scratch(struct dml2_context *in_ctx) { int i; @@ -445,11 +535,15 @@ bool dml2_verify_det_buffer_configuration(struct dml2_context *in_ctx, struct dc for (i = 0; i < MAX_PIPES; i++) { if (!display_state->res_ctx.pipe_ctx[i].stream) continue; - if (get_plane_id(display_state, display_state->res_ctx.pipe_ctx[i].plane_state, - display_state->res_ctx.pipe_ctx[i].stream->stream_id, &plane_id)) + if (get_plane_id(in_ctx, display_state, display_state->res_ctx.pipe_ctx[i].plane_state, + display_state->res_ctx.pipe_ctx[i].stream->stream_id, + in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[display_state->res_ctx.pipe_ctx[i].pipe_idx], &plane_id)) dml_pipe_idx = find_dml_pipe_idx_by_plane_id(in_ctx, plane_id); else dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, display_state->res_ctx.pipe_ctx[i].stream->stream_id); + + if (dml_pipe_idx == 0xFFFFFFFF) + continue; total_det_allocated += dml_get_det_buffer_size_kbytes(&in_ctx->v20.dml_core_ctx, dml_pipe_idx); if (total_det_allocated > max_det_size) { need_recalculation = true; @@ -464,7 +558,7 @@ bool dml2_verify_det_buffer_configuration(struct dml2_context *in_ctx, struct dc return need_recalculation; } -bool dml2_is_stereo_timing(struct dc_stream_state *stream) +bool dml2_is_stereo_timing(const struct dc_stream_state *stream) { bool is_stereo = false; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h index 23b9028337d4..04fcfe637119 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h @@ -40,9 +40,14 @@ void dml2_util_copy_dml_output(struct dml_output_cfg_st *dml_output_array, unsig unsigned int dml2_util_get_maximum_odm_combine_for_output(bool force_odm_4to1, enum dml_output_encoder_class encoder, bool dsc_enabled); void dml2_copy_clocks_to_dc_state(struct dml2_dcn_clocks *out_clks, struct dc_state *context); void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display_mode_lib_st *dml_core_ctx); +void dml2_extract_writeback_wm(struct dc_state *context, struct display_mode_lib_st *dml_core_ctx); int dml2_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id); bool is_dtbclk_required(const struct dc *dc, struct dc_state *context); -bool dml2_is_stereo_timing(struct dc_stream_state *stream); +bool dml2_is_stereo_timing(const struct dc_stream_state *stream); +unsigned int dml2_calc_max_scaled_time( + unsigned int time_per_pixel, + enum mmhubbub_wbif_mode mode, + unsigned int urgent_watermark); /* * dml2_dc_construct_pipes - This function will determine if we need additional pipes based diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 0a06bf3b135a..9412d5384a41 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -76,6 +76,11 @@ static void map_hw_resources(struct dml2_context *dml2, in_out_display_cfg->hw.DLGRefClkFreqMHz = 50; } for (j = 0; j < mode_support_info->DPPPerSurface[i]; j++) { + if (i >= __DML2_WRAPPER_MAX_STREAMS_PLANES__) { + dml_print("DML::%s: Index out of bounds: i=%d, __DML2_WRAPPER_MAX_STREAMS_PLANES__=%d\n", + __func__, i, __DML2_WRAPPER_MAX_STREAMS_PLANES__); + break; + } dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i]; dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[num_pipes] = true; dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[i]; @@ -418,7 +423,7 @@ static int find_drr_eligible_stream(struct dc_state *display_state) int i; for (i = 0; i < display_state->stream_count; i++) { - if (display_state->streams[i]->mall_stream_config.type == SUBVP_NONE + if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE && display_state->streams[i]->ignore_msa_timing_param) { // Use ignore_msa_timing_param flag to identify as DRR return i; @@ -565,6 +570,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s struct dml2_dcn_clocks out_clks; unsigned int result = 0; bool need_recalculation = false; + uint32_t cstate_enter_plus_exit_z8_ns; if (!context || context->stream_count == 0) return true; @@ -634,12 +640,23 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx); memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c)); dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx); + dml2_extract_writeback_wm(context, &dml2->v20.dml_core_ctx); + //copy for deciding zstate use + context->bw_ctx.dml.vba.StutterPeriod = context->bw_ctx.dml2->v20.dml_core_ctx.mp.StutterPeriod; + + cstate_enter_plus_exit_z8_ns = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns; + + if (context->bw_ctx.dml.vba.StutterPeriod < in_dc->debug.minimum_z8_residency_time && + cstate_enter_plus_exit_z8_ns < in_dc->debug.minimum_z8_residency_time * 1000) + cstate_enter_plus_exit_z8_ns = in_dc->debug.minimum_z8_residency_time * 1000; + + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = cstate_enter_plus_exit_z8_ns; } return result; } -static bool dml2_validate_only(const struct dc_state *context) +static bool dml2_validate_only(struct dc_state *context) { struct dml2_context *dml2 = context->bw_ctx.dml2; unsigned int result = 0; @@ -674,13 +691,13 @@ static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *d } } -bool dml2_validate(const struct dc *in_dc, struct dc_state *context, bool fast_validate) +bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, bool fast_validate) { bool out = false; - if (!(context->bw_ctx.dml2)) + if (!dml2) return false; - dml2_apply_debug_options(in_dc, context->bw_ctx.dml2); + dml2_apply_debug_options(in_dc, dml2); /* Use dml_validate_only for fast_validate path */ @@ -691,13 +708,13 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, bool fast_v return out; } -bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) +static inline struct dml2_context *dml2_allocate_memory(void) { - // Allocate Mode Lib Ctx - *dml2 = (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL); + return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL); +} - if (!(*dml2)) - return false; +static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) +{ // Store config options (*dml2)->config = *config; @@ -725,9 +742,18 @@ bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc); initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states); +} + +bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) +{ + // Allocate Mode Lib Ctx + *dml2 = dml2_allocate_memory(); + + if (!(*dml2)) + return false; + + dml2_init(in_dc, config, dml2); - /*Initialize DML20 instance which calls dml2_core_create, and core_dcn3_populate_informative*/ - //dml2_initialize_instance(&(*dml_ctx)->v20.dml_init); return true; } @@ -745,3 +771,33 @@ void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2, *fclk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.FCLKChangeSupport[0]; *dram_clk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.DRAMClockChangeSupport[0]; } + +void dml2_copy(struct dml2_context *dst_dml2, + struct dml2_context *src_dml2) +{ + /* copy Mode Lib Ctx */ + memcpy(dst_dml2, src_dml2, sizeof(struct dml2_context)); +} + +bool dml2_create_copy(struct dml2_context **dst_dml2, + struct dml2_context *src_dml2) +{ + /* Allocate Mode Lib Ctx */ + *dst_dml2 = dml2_allocate_memory(); + + if (!(*dst_dml2)) + return false; + + /* copy Mode Lib Ctx */ + dml2_copy(*dst_dml2, src_dml2); + + return true; +} + +void dml2_reinit(const struct dc *in_dc, + const struct dml2_configuration_options *config, + struct dml2_context **dml2) +{ + + dml2_init(in_dc, config, dml2); +} diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index 317f90776d97..4a8bd2f4195e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -71,6 +71,7 @@ struct dml2_dcn_clocks { struct dml2_dc_callbacks { struct dc *dc; bool (*build_scaling_params)(struct pipe_ctx *pipe_ctx); + void (*build_test_pattern_params)(struct resource_context *res_ctx, struct pipe_ctx *otg_master); bool (*can_support_mclk_switch_using_fw_based_vblank_stretch)(struct dc *dc, struct dc_state *context); bool (*acquire_secondary_pipe_for_mpc_odm)(const struct dc *dc, struct dc_state *state, struct pipe_ctx *pri_pipe, struct pipe_ctx *sec_pipe, bool odm); bool (*update_pipes_for_stream_with_slice_count)( @@ -86,22 +87,65 @@ struct dml2_dc_callbacks { const struct dc_plane_state *plane, int slice_count); int (*get_odm_slice_index)(const struct pipe_ctx *opp_head); + int (*get_odm_slice_count)(const struct pipe_ctx *opp_head); int (*get_mpc_slice_index)(const struct pipe_ctx *dpp_pipe); + int (*get_mpc_slice_count)(const struct pipe_ctx *dpp_pipe); struct pipe_ctx *(*get_opp_head)(const struct pipe_ctx *pipe_ctx); + struct pipe_ctx *(*get_otg_master_for_stream)( + struct resource_context *res_ctx, + const struct dc_stream_state *stream); + int (*get_opp_heads_for_otg_master)(const struct pipe_ctx *otg_master, + struct resource_context *res_ctx, + struct pipe_ctx *opp_heads[MAX_PIPES]); + int (*get_dpp_pipes_for_plane)(const struct dc_plane_state *plane, + struct resource_context *res_ctx, + struct pipe_ctx *dpp_pipes[MAX_PIPES]); + struct dc_stream_status *(*get_stream_status)( + struct dc_state *state, + const struct dc_stream_state *stream); + struct dc_stream_state *(*get_stream_from_id)(const struct dc_state *state, unsigned int id); }; struct dml2_dc_svp_callbacks { struct dc *dc; bool (*build_scaling_params)(struct pipe_ctx *pipe_ctx); - struct dc_stream_state* (*create_stream_for_sink)(struct dc_sink *dc_sink_data); - struct dc_plane_state* (*create_plane)(struct dc *dc); - enum dc_status (*add_stream_to_ctx)(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); - bool (*add_plane_to_context)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context); - bool (*remove_plane_from_context)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context); - enum dc_status (*remove_stream_from_ctx)(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *stream); - void (*plane_state_release)(struct dc_plane_state *plane_state); - void (*stream_release)(struct dc_stream_state *stream); + struct dc_stream_state* (*create_phantom_stream)(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *main_stream); + struct dc_plane_state* (*create_phantom_plane)(const struct dc *dc, + struct dc_state *state, + struct dc_plane_state *main_plane); + enum dc_status (*add_phantom_stream)(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream, + struct dc_stream_state *main_stream); + bool (*add_phantom_plane)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context); + bool (*remove_phantom_plane)(const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state *plane_state, + struct dc_state *context); + enum dc_status (*remove_phantom_stream)(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *stream); + void (*release_phantom_plane)(const struct dc *dc, + struct dc_state *state, + struct dc_plane_state *plane); + void (*release_phantom_stream)(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *stream); void (*release_dsc)(struct resource_context *res_ctx, const struct resource_pool *pool, struct display_stream_compressor **dsc); + enum mall_stream_type (*get_pipe_subvp_type)(const struct dc_state *state, const struct pipe_ctx *pipe_ctx); + enum mall_stream_type (*get_stream_subvp_type)(const struct dc_state *state, const struct dc_stream_state *stream); + struct dc_stream_state *(*get_paired_subvp_stream)(const struct dc_state *state, const struct dc_stream_state *stream); + bool (*remove_phantom_streams_and_planes)( + const struct dc *dc, + struct dc_state *state); + void (*release_phantom_streams_and_planes)( + const struct dc *dc, + struct dc_state *state); + unsigned int (*calculate_mall_ways_from_bytes)( + const struct dc *dc, + unsigned int total_size_in_mall_bytes); }; struct dml2_clks_table_entry { @@ -139,6 +183,8 @@ struct dml2_soc_bbox_overrides { double urgent_latency_us; double sr_exit_latency_us; double sr_enter_plus_exit_latency_us; + double sr_exit_z8_time_us; + double sr_enter_plus_exit_z8_time_us; double dram_clock_change_latency_us; double fclk_change_latency_us; unsigned int dram_num_chan; @@ -170,6 +216,8 @@ struct dml2_configuration_options { unsigned int max_segments_per_hubp; unsigned int det_segment_size; bool map_dc_pipes_with_callbacks; + + bool use_clock_dc_limits; }; /* @@ -189,6 +237,13 @@ bool dml2_create(const struct dc *in_dc, struct dml2_context **dml2); void dml2_destroy(struct dml2_context *dml2); +void dml2_copy(struct dml2_context *dst_dml2, + struct dml2_context *src_dml2); +bool dml2_create_copy(struct dml2_context **dst_dml2, + struct dml2_context *src_dml2); +void dml2_reinit(const struct dc *in_dc, + const struct dml2_configuration_options *config, + struct dml2_context **dml2); /* * dml2_validate - Determines if a display configuration is supported or not. @@ -216,6 +271,7 @@ void dml2_destroy(struct dml2_context *dml2); */ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, + struct dml2_context *dml2, bool fast_validate); /* diff --git a/drivers/gpu/drm/amd/display/dc/dpp/Makefile b/drivers/gpu/drm/amd/display/dc/dpp/Makefile new file mode 100644 index 000000000000..99bd36073561 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/Makefile @@ -0,0 +1,77 @@ + +# Copyright 2022 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Makefile for the 'dpp' sub-component of DAL. +# +ifdef CONFIG_DRM_AMD_DC_FP +############################################################################### +# DCN +############################################################################### + +DPP_DCN10 = dcn10_dpp.o dcn10_dpp_dscl.o dcn10_dpp_cm.o + +AMD_DAL_DPP_DCN10 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn10/,$(DPP_DCN10)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN10) + +############################################################################### + +DPP_DCN20 = dcn20_dpp.o dcn20_dpp_cm.o + +AMD_DAL_DPP_DCN20 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn20/,$(DPP_DCN20)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN20) + +############################################################################### + +DPP_DCN201 = dcn201_dpp.o + +AMD_DAL_DPP_DCN201 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn201/,$(DPP_DCN201)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN201) + +############################################################################### + +DPP_DCN30 = dcn30_dpp.o dcn30_dpp_cm.o + +AMD_DAL_DPP_DCN30 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn30/,$(DPP_DCN30)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN30) + +############################################################################### + +DPP_DCN32 = dcn32_dpp.o + +AMD_DAL_DPP_DCN32 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn32/,$(DPP_DCN32)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN32) + +############################################################################### + +DPP_DCN35 = dcn35_dpp.o + +AMD_DAL_DPP_DCN35 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn35/,$(DPP_DCN35)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN35) + +############################################################################### + +endif
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt new file mode 100644 index 000000000000..1318c6fba3e7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt @@ -0,0 +1,6 @@ +dal3_subdirectory_sources( + dcn10_dpp.c + dcn10_dpp_cm.c + dcn10_dpp_dscl.c + dcn10_dpp.h +) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c index ef52e6b6eccf..e1da48b05d00 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c @@ -28,7 +28,7 @@ #include "core_types.h" #include "reg_helper.h" -#include "dcn10_dpp.h" +#include "dcn10/dcn10_dpp.h" #include "basics/conversion.h" #define NUM_PHASES 64 @@ -543,7 +543,8 @@ static const struct dpp_funcs dcn10_dpp_funcs = { .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier, .dpp_program_blnd_lut = NULL, .dpp_program_shaper_lut = NULL, - .dpp_program_3dlut = NULL + .dpp_program_3dlut = NULL, + .dpp_get_gamut_remap = dpp1_cm_get_gamut_remap, }; static struct dpp_caps dcn10_dpp_cap = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h index c9e045666dcc..c48139bed11f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h @@ -1090,7 +1090,8 @@ type DPP_CLOCK_ENABLE; \ type CM_HDR_MULT_COEF; \ type CUR0_FP_BIAS; \ - type CUR0_FP_SCALE; + type CUR0_FP_SCALE;\ + type DISPCLK_R_GATE_DISABLE; struct dcn_dpp_shift { TF_REG_FIELD_LIST(uint8_t) @@ -1521,4 +1522,7 @@ void dpp1_construct(struct dcn10_dpp *dpp1, const struct dcn_dpp_registers *tf_regs, const struct dcn_dpp_shift *tf_shift, const struct dcn_dpp_mask *tf_mask); + +void dpp1_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c index 904c2d278998..006e23842016 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c @@ -28,9 +28,9 @@ #include "core_types.h" #include "reg_helper.h" -#include "dcn10_dpp.h" +#include "dcn10/dcn10_dpp.h" #include "basics/conversion.h" -#include "dcn10_cm_common.h" +#include "dcn10/dcn10_cm_common.h" #define NUM_PHASES 64 #define HORZ_MAX_TAPS 8 @@ -98,7 +98,7 @@ static void program_gamut_remap( if (regval == NULL || select == GAMUT_REMAP_BYPASS) { REG_SET(CM_GAMUT_REMAP_CONTROL, 0, - CM_GAMUT_REMAP_MODE, 0); + CM_GAMUT_REMAP_MODE, 0); return; } switch (select) { @@ -181,6 +181,74 @@ void dpp1_cm_set_gamut_remap( } } +static void read_gamut_remap(struct dcn10_dpp *dpp, + uint16_t *regval, + enum gamut_remap_select *select) +{ + struct color_matrices_reg gam_regs; + uint32_t selection; + + REG_GET(CM_GAMUT_REMAP_CONTROL, + CM_GAMUT_REMAP_MODE, &selection); + + *select = selection; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + if (*select == GAMUT_REMAP_COEFF) { + + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + + cm_helper_read_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + } else if (*select == GAMUT_REMAP_COMA_COEFF) { + + gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); + + cm_helper_read_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + } else if (*select == GAMUT_REMAP_COMB_COEFF) { + + gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); + + cm_helper_read_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + } +} + +void dpp1_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + uint16_t arr_reg_val[12] = {0}; + enum gamut_remap_select select; + + read_gamut_remap(dpp, arr_reg_val, &select); + + if (select == GAMUT_REMAP_BYPASS) { + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; + return; + } + + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; + convert_hw_matrix(adjust->temperature_matrix, + arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} + static void dpp1_cm_program_color_matrix( struct dcn10_dpp *dpp, const uint16_t *regval) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c index 5ca9ab8a76e8..808bca9fb804 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c @@ -28,7 +28,7 @@ #include "core_types.h" #include "reg_helper.h" -#include "dcn10_dpp.h" +#include "dcn10/dcn10_dpp.h" #include "basics/conversion.h" diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt new file mode 100644 index 000000000000..9c2d7096348e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt @@ -0,0 +1,5 @@ +dal3_subdirectory_sources( + dcn20_dpp.c + dcn20_dpp_cm.c + dcn20_dpp.h +) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c index eaa7032f0f1a..56ebd7164dd7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c @@ -28,7 +28,7 @@ #include "core_types.h" #include "reg_helper.h" -#include "dcn20_dpp.h" +#include "dcn20/dcn20_dpp.h" #include "basics/conversion.h" #define NUM_PHASES 64 @@ -55,21 +55,23 @@ void dpp20_read_state(struct dpp *dpp_base, REG_GET(DPP_CONTROL, DPP_CLOCK_ENABLE, &s->is_enabled); + + // Degamma LUT (RAM) REG_GET(CM_DGAM_CONTROL, - CM_DGAM_LUT_MODE, &s->dgam_lut_mode); - // BGAM has no ROM, and definition is different, can't reuse same dump - //REG_GET(CM_BLNDGAM_CONTROL, - // CM_BLNDGAM_LUT_MODE, &s->rgam_lut_mode); - REG_GET(CM_GAMUT_REMAP_CONTROL, - CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode); - if (s->gamut_remap_mode) { - s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12); - s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14); - s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22); - s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24); - s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32); - s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34); - } + CM_DGAM_LUT_MODE, &s->dgam_lut_mode); + + // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size) + REG_GET(CM_SHAPER_CONTROL, + CM_SHAPER_LUT_MODE, &s->shaper_lut_mode); + REG_GET_2(CM_3DLUT_READ_WRITE_CONTROL, + CM_3DLUT_CONFIG_STATUS, &s->lut3d_mode, + CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth); + REG_GET(CM_3DLUT_MODE, + CM_3DLUT_SIZE, &s->lut3d_size); + + // Blend/Out Gamma (RAM) + REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK, + CM_BLNDGAM_CONFIG_STATUS, &s->rgam_lut_mode); } void dpp2_power_on_obuf( @@ -393,6 +395,7 @@ static struct dpp_funcs dcn20_dpp_funcs = { .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap, }; static struct dpp_caps dcn20_dpp_cap = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h index e735363d0051..49cb25c9cb36 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h @@ -736,7 +736,7 @@ bool dpp20_program_shaper( bool dpp20_program_3dlut( struct dpp *dpp_base, - struct tetrahedral_params *params); + const struct tetrahedral_params *params); void dpp2_cnv_set_alpha_keyer( struct dpp *dpp_base, @@ -775,4 +775,7 @@ bool dpp2_construct(struct dcn20_dpp *dpp2, void dpp2_power_on_obuf( struct dpp *dpp_base, bool power_on); + +void dpp2_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust); #endif /* __DC_HWSS_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c index 598caa508d43..31613372e214 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c @@ -28,7 +28,7 @@ #include "core_types.h" #include "reg_helper.h" -#include "dcn20_dpp.h" +#include "dcn20/dcn20_dpp.h" #include "basics/conversion.h" #include "dcn10/dcn10_cm_common.h" @@ -234,6 +234,61 @@ void dpp2_cm_set_gamut_remap( } } +static void read_gamut_remap(struct dcn20_dpp *dpp, + uint16_t *regval, + enum dcn20_gamut_remap_select *select) +{ + struct color_matrices_reg gam_regs; + uint32_t selection; + + IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, + CM_TEST_DEBUG_DATA_STATUS_IDX, + CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &selection); + + *select = selection; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + if (*select == DCN2_GAMUT_REMAP_COEF_A) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + + cm_helper_read_color_matrices(dpp->base.ctx, + regval, + &gam_regs); + + } else if (*select == DCN2_GAMUT_REMAP_COEF_B) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); + + cm_helper_read_color_matrices(dpp->base.ctx, + regval, + &gam_regs); + } +} + +void dpp2_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + uint16_t arr_reg_val[12] = {0}; + enum dcn20_gamut_remap_select select; + + read_gamut_remap(dpp, arr_reg_val, &select); + + if (select == DCN2_GAMUT_REMAP_BYPASS) { + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; + return; + } + + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; + convert_hw_matrix(adjust->temperature_matrix, + arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} + void dpp2_program_input_csc( struct dpp *dpp_base, enum dc_color_space color_space, @@ -1059,15 +1114,15 @@ static void dpp20_select_3dlut_ram_mask( bool dpp20_program_3dlut( struct dpp *dpp_base, - struct tetrahedral_params *params) + const struct tetrahedral_params *params) { enum dc_lut_mode mode; bool is_17x17x17; bool is_12bits_color_channel; - struct dc_rgb *lut0; - struct dc_rgb *lut1; - struct dc_rgb *lut2; - struct dc_rgb *lut3; + const struct dc_rgb *lut0; + const struct dc_rgb *lut1; + const struct dc_rgb *lut2; + const struct dc_rgb *lut3; int lut_size0; int lut_size; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt new file mode 100644 index 000000000000..7711cd3c47a7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt @@ -0,0 +1,4 @@ +dal3_subdirectory_sources( + dcn201_dpp.c + dcn201_dpp.h +) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c index a7268027a472..345202fee40f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c @@ -28,7 +28,7 @@ #include "core_types.h" #include "reg_helper.h" -#include "dcn201_dpp.h" +#include "dcn201/dcn201_dpp.h" #include "basics/conversion.h" #define REG(reg)\ @@ -275,6 +275,7 @@ static struct dpp_funcs dcn201_dpp_funcs = { .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap, }; static struct dpp_caps dcn201_dpp_cap = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h index cbd5b47b4acf..cbd5b47b4acf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt new file mode 100644 index 000000000000..0faee2a1e32b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt @@ -0,0 +1,5 @@ +dal3_subdirectory_sources( + dcn30_dpp.c + dcn30_dpp_cm.c + dcn30_dpp.h +) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c index 50dc83404644..f8c0cee34080 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -26,9 +26,9 @@ #include "dm_services.h" #include "core_types.h" #include "reg_helper.h" -#include "dcn30_dpp.h" +#include "dcn30/dcn30_dpp.h" #include "basics/conversion.h" -#include "dcn30_cm_common.h" +#include "dcn30/dcn30_cm_common.h" #define REG(reg)\ dpp->tf_regs->reg @@ -44,12 +44,45 @@ void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s) { struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + uint32_t gamcor_lut_mode, rgam_lut_mode; REG_GET(DPP_CONTROL, - DPP_CLOCK_ENABLE, &s->is_enabled); + DPP_CLOCK_ENABLE, &s->is_enabled); + + // Pre-degamma (ROM) + REG_GET_2(PRE_DEGAM, + PRE_DEGAM_MODE, &s->pre_dgam_mode, + PRE_DEGAM_SELECT, &s->pre_dgam_select); + + // Gamma Correction (RAM) + REG_GET(CM_GAMCOR_CONTROL, + CM_GAMCOR_MODE_CURRENT, &s->gamcor_mode); + if (s->gamcor_mode) { + REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &gamcor_lut_mode); + if (!gamcor_lut_mode) + s->gamcor_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B + } - // TODO: Implement for DCN3 + // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size) + REG_GET(CM_SHAPER_CONTROL, + CM_SHAPER_LUT_MODE, &s->shaper_lut_mode); + REG_GET(CM_3DLUT_MODE, + CM_3DLUT_MODE_CURRENT, &s->lut3d_mode); + REG_GET(CM_3DLUT_READ_WRITE_CONTROL, + CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth); + REG_GET(CM_3DLUT_MODE, + CM_3DLUT_SIZE, &s->lut3d_size); + + // Blend/Out Gamma (RAM) + REG_GET(CM_BLNDGAM_CONTROL, + CM_BLNDGAM_MODE_CURRENT, &s->rgam_lut_mode); + if (s->rgam_lut_mode){ + REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &rgam_lut_mode); + if (!rgam_lut_mode) + s->rgam_lut_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B + } } + /*program post scaler scs block in dpp CM*/ void dpp3_program_post_csc( struct dpp *dpp_base, @@ -260,9 +293,11 @@ void dpp3_cnv_setup ( break; case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: pixel_format = 112; + alpha_en = 0; break; case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: pixel_format = 113; + alpha_en = 0; break; case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: pixel_format = 114; @@ -286,9 +321,11 @@ void dpp3_cnv_setup ( break; case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: pixel_format = 118; + alpha_en = 0; break; case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: pixel_format = 119; + alpha_en = 0; break; default: break; @@ -613,16 +650,19 @@ static void dpp3_program_blnd_pwl( REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red); } else { + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 4); for (i = 0 ; i < num; i++) REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red); + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 2); for (i = 0 ; i < num; i++) REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg); REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_green); + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 1); for (i = 0 ; i < num; i++) REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg); @@ -1348,15 +1388,15 @@ static void dpp3_select_3dlut_ram_mask( } static bool dpp3_program_3dlut(struct dpp *dpp_base, - struct tetrahedral_params *params) + const struct tetrahedral_params *params) { enum dc_lut_mode mode; bool is_17x17x17; bool is_12bits_color_channel; - struct dc_rgb *lut0; - struct dc_rgb *lut1; - struct dc_rgb *lut2; - struct dc_rgb *lut3; + const struct dc_rgb *lut0; + const struct dc_rgb *lut1; + const struct dc_rgb *lut2; + const struct dc_rgb *lut3; int lut_size0; int lut_size; @@ -1459,6 +1499,7 @@ static struct dpp_funcs dcn30_dpp_funcs = { .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h index cea3208e4ab1..269f437c1633 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h @@ -132,6 +132,8 @@ SRI(CM_POST_CSC_B_C33_C34, CM, id), \ SRI(CM_MEM_PWR_CTRL, CM, id), \ SRI(CM_CONTROL, CM, id), \ + SRI(CM_TEST_DEBUG_INDEX, CM, id), \ + SRI(CM_TEST_DEBUG_DATA, CM, id), \ SRI(FORMAT_CONTROL, CNVC_CFG, id), \ SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \ SRI(CURSOR0_CONTROL, CNVC_CUR, id), \ @@ -294,6 +296,7 @@ TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C12, mask_sh), \ TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C33, mask_sh), \ TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C34, mask_sh), \ + TF_SF(CM0_CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_INDEX, mask_sh), \ TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \ TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \ TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \ @@ -426,6 +429,7 @@ type CM_GAMCOR_LUT_DATA; \ type CM_GAMCOR_LUT_WRITE_COLOR_MASK; \ type CM_GAMCOR_LUT_READ_COLOR_SEL; \ + type CM_GAMCOR_LUT_READ_DBG; \ type CM_GAMCOR_LUT_HOST_SEL; \ type CM_GAMCOR_LUT_CONFIG_MODE; \ type CM_GAMCOR_LUT_STATUS; \ @@ -637,4 +641,6 @@ void dpp3_program_cm_dealpha( struct dpp *dpp_base, uint32_t enable, uint32_t additive_blending); +void dpp3_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust); #endif /* __DC_HWSS_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c index e43f77c11c00..82eca0e7b7d0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c @@ -26,9 +26,9 @@ #include "dm_services.h" #include "core_types.h" #include "reg_helper.h" -#include "dcn30_dpp.h" +#include "dcn30/dcn30_dpp.h" #include "basics/conversion.h" -#include "dcn30_cm_common.h" +#include "dcn30/dcn30_cm_common.h" #define REG(reg)\ dpp->tf_regs->reg @@ -56,16 +56,13 @@ static void dpp3_enable_cm_block( static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base) { - enum dc_lut_mode mode; + enum dc_lut_mode mode = LUT_BYPASS; uint32_t state_mode; uint32_t lut_mode; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode); - if (state_mode == 0) - mode = LUT_BYPASS; - if (state_mode == 2) {//Programmable RAM LUT REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode); if (lut_mode == 0) @@ -408,3 +405,57 @@ void dpp3_cm_set_gamut_remap( program_gamut_remap(dpp, arr_reg_val, gamut_mode); } } + +static void read_gamut_remap(struct dcn3_dpp *dpp, + uint16_t *regval, + int *select) +{ + struct color_matrices_reg gam_regs; + uint32_t selection; + + //current coefficient set in use + REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &selection); + + *select = selection; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + if (*select == GAMUT_REMAP_COEFF) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + + cm_helper_read_color_matrices(dpp->base.ctx, + regval, + &gam_regs); + + } else if (*select == GAMUT_REMAP_COMA_COEFF) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); + + cm_helper_read_color_matrices(dpp->base.ctx, + regval, + &gam_regs); + } +} + +void dpp3_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + uint16_t arr_reg_val[12] = {0}; + int select; + + read_gamut_remap(dpp, arr_reg_val, &select); + + if (select == GAMUT_REMAP_BYPASS) { + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; + return; + } + + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; + convert_hw_matrix(adjust->temperature_matrix, + arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt new file mode 100644 index 000000000000..7743edc4599f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt @@ -0,0 +1,4 @@ +dal3_subdirectory_sources( + dcn32_dpp.c + dcn32_dpp.h +) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c index dcf12a0b031c..41679997b44d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c @@ -26,7 +26,7 @@ #include "dm_services.h" #include "core_types.h" #include "reg_helper.h" -#include "dcn32_dpp.h" +#include "dcn32/dcn32_dpp.h" #include "basics/conversion.h" #include "dcn30/dcn30_cm_common.h" @@ -133,6 +133,7 @@ static struct dpp_funcs dcn32_dpp_funcs = { .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h index 572958d287eb..572958d287eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt new file mode 100644 index 000000000000..91df5db26435 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt @@ -0,0 +1,4 @@ +dal3_subdirectory_sources( + dcn35_dpp.c + dcn35_dpp.h +) diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c new file mode 100644 index 000000000000..e16274fee31d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "core_types.h" +#include "dcn35/dcn35_dpp.h" +#include "reg_helper.h" + +#define REG(reg) dpp->tf_regs->reg + +#define CTX dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + ((const struct dcn35_dpp_shift *)(dpp->tf_shift))->field_name, \ + ((const struct dcn35_dpp_mask *)(dpp->tf_mask))->field_name + +void dpp35_dppclk_control( + struct dpp *dpp_base, + bool dppclk_div, + bool enable) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + if (enable) { + if (dpp->tf_mask->DPPCLK_RATE_CONTROL) + REG_UPDATE_2(DPP_CONTROL, + DPPCLK_RATE_CONTROL, dppclk_div, + DPP_CLOCK_ENABLE, 1); + else + REG_UPDATE_2(DPP_CONTROL, + DPP_CLOCK_ENABLE, 1, + DISPCLK_R_GATE_DISABLE, 1); + } else + REG_UPDATE_2(DPP_CONTROL, + DPP_CLOCK_ENABLE, 0, + DISPCLK_R_GATE_DISABLE, 0); +} + +static struct dpp_funcs dcn35_dpp_funcs = { + .dpp_program_gamcor_lut = dpp3_program_gamcor_lut, + .dpp_read_state = dpp30_read_state, + .dpp_reset = dpp_reset, + .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, + .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps, + .dpp_set_gamut_remap = dpp3_cm_set_gamut_remap, + .dpp_set_csc_adjustment = NULL, + .dpp_set_csc_default = NULL, + .dpp_program_regamma_pwl = NULL, + .dpp_set_pre_degam = dpp3_set_pre_degam, + .dpp_program_input_lut = NULL, + .dpp_full_bypass = dpp1_full_bypass, + .dpp_setup = dpp3_cnv_setup, + .dpp_program_degamma_pwl = NULL, + .dpp_program_cm_dealpha = dpp3_program_cm_dealpha, + .dpp_program_cm_bias = dpp3_program_cm_bias, + + .dpp_program_blnd_lut = NULL, // BLNDGAM is removed completely in DCN3.2 DPP + .dpp_program_shaper_lut = NULL, // CM SHAPER block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) + .dpp_program_3dlut = NULL, // CM 3DLUT block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) + + .dpp_program_bias_and_scale = NULL, + .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, + .set_cursor_attributes = dpp3_set_cursor_attributes, + .set_cursor_position = dpp1_set_cursor_position, + .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, + .dpp_dppclk_control = dpp35_dppclk_control, + .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, +}; + + +bool dpp35_construct( + struct dcn3_dpp *dpp, struct dc_context *ctx, + uint32_t inst, const struct dcn3_dpp_registers *tf_regs, + const struct dcn35_dpp_shift *tf_shift, + const struct dcn35_dpp_mask *tf_mask) +{ + bool ret = dpp32_construct(dpp, ctx, inst, tf_regs, + (const struct dcn3_dpp_shift *)(tf_shift), + (const struct dcn3_dpp_mask *)(tf_mask)); + + dpp->base.funcs = &dcn35_dpp_funcs; + return ret; +} + +void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable) +{ + REG_UPDATE(DPP_CONTROL, DPP_FGCG_REP_DIS, !enable); +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h index 09b84307cd9e..135872d88219 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h @@ -31,7 +31,9 @@ #define DPP_REG_LIST_SH_MASK_DCN35(mask_sh) \ DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh), \ - TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh) + TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh), \ + TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh), \ + TF_SF(DPP_TOP0_DPP_CONTROL, DISPCLK_R_GATE_DISABLE, mask_sh) #define DPP_REG_FIELD_LIST_DCN35(type) \ struct { \ @@ -47,6 +49,11 @@ struct dcn35_dpp_mask { DPP_REG_FIELD_LIST_DCN35(uint32_t); }; +void dpp35_dppclk_control( + struct dpp *dpp_base, + bool dppclk_div, + bool enable); + bool dpp35_construct(struct dcn3_dpp *dpp3, struct dc_context *ctx, uint32_t inst, const struct dcn3_dpp_registers *tf_regs, const struct dcn35_dpp_shift *tf_shift, diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile index a2537229ee88..b183ba5a692e 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile @@ -1,8 +1,34 @@ # SPDX-License-Identifier: MIT # # Makefile for the 'dsc' sub-component of DAL. + +ifdef CONFIG_DRM_AMD_DC_FP + +############################################################################### +# DCN20 +############################################################################### +DSC_DCN20 = dcn20_dsc.o + +AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/dsc/dcn20/,$(DSC_DCN20)) + + + + +############################################################################### +# DCN35 +############################################################################### + +DSC_DCN35 = dcn35_dsc.o + +AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/dsc/dcn35/,$(DSC_DCN35)) + + + +endif + DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o AMD_DAL_DSC = $(addprefix $(AMDDALPATH)/dc/dsc/,$(DSC)) AMD_DISPLAY_FILES += $(AMD_DAL_DSC) + diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 3966845c7694..150ef23440a2 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -137,6 +137,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing( if (link_encoding == DC_LINK_ENCODING_DP_128b_132b) kbps = apply_128b_132b_stream_overhead(timing, kbps); + if (link_encoding == DC_LINK_ENCODING_HDMI_FRL && + timing->vic == 0 && timing->hdmi_vic == 0 && + timing->frl_uncompressed_video_bandwidth_in_kbps != 0) + kbps = timing->frl_uncompressed_video_bandwidth_in_kbps; + return kbps; } @@ -331,8 +336,9 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, int buff_block_size; int buff_size; - if (!dsc_buff_block_size_from_dpcd(dpcd_dsc_basic_data[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT], - &buff_block_size)) + if (!dsc_buff_block_size_from_dpcd( + dpcd_dsc_basic_data[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT] & 0x03, + &buff_block_size)) return false; buff_size = dpcd_dsc_basic_data[DP_DSC_RC_BUF_SIZE - DP_DSC_SUPPORT] + 1; @@ -357,10 +363,15 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, { int dpcd_throughput = dpcd_dsc_basic_data[DP_DSC_PEAK_THROUGHPUT - DP_DSC_SUPPORT]; + int dsc_throughput_granular_delta; + + dsc_throughput_granular_delta = dpcd_dsc_basic_data[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT] >> 3; + dsc_throughput_granular_delta *= 2; if (!dsc_throughput_from_dpcd(dpcd_throughput & DP_DSC_THROUGHPUT_MODE_0_MASK, &dsc_sink_caps->throughput_mode_0_mps)) return false; + dsc_sink_caps->throughput_mode_0_mps += dsc_throughput_granular_delta; dpcd_throughput = (dpcd_throughput & DP_DSC_THROUGHPUT_MODE_1_MASK) >> DP_DSC_THROUGHPUT_MODE_1_SHIFT; if (!dsc_throughput_from_dpcd(dpcd_throughput, &dsc_sink_caps->throughput_mode_1_mps)) @@ -447,7 +458,7 @@ bool dc_dsc_compute_bandwidth_range( bool is_dsc_possible = false; struct dsc_enc_caps dsc_enc_caps; struct dsc_enc_caps dsc_common_caps; - struct dc_dsc_config config; + struct dc_dsc_config config = {0}; struct dc_dsc_config_options options = {0}; options.dsc_min_slice_height_override = dsc_min_slice_height_override; @@ -512,6 +523,11 @@ static bool intersect_dsc_caps( dsc_sink_caps->slice_caps1.bits.NUM_SLICES_4 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_4; dsc_common_caps->slice_caps.bits.NUM_SLICES_8 = dsc_sink_caps->slice_caps1.bits.NUM_SLICES_8 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_8; + dsc_common_caps->slice_caps.bits.NUM_SLICES_12 = + dsc_sink_caps->slice_caps1.bits.NUM_SLICES_12 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_12; + dsc_common_caps->slice_caps.bits.NUM_SLICES_16 = + dsc_sink_caps->slice_caps2.bits.NUM_SLICES_16 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_16; + if (!dsc_common_caps->slice_caps.raw) return false; @@ -703,6 +719,12 @@ static int get_available_dsc_slices(union dsc_enc_slice_caps slice_caps, int *av if (slice_caps.bits.NUM_SLICES_8) available_slices[idx++] = 8; + if (slice_caps.bits.NUM_SLICES_12) + available_slices[idx++] = 12; + + if (slice_caps.bits.NUM_SLICES_16) + available_slices[idx++] = 16; + return idx; } @@ -846,9 +868,9 @@ static bool setup_dsc_config( struct dc_dsc_config *dsc_cfg) { struct dsc_enc_caps dsc_common_caps; - int max_slices_h; - int min_slices_h; - int num_slices_h; + int max_slices_h = 0; + int min_slices_h = 0; + int num_slices_h = 0; int pic_width; int slice_width; int target_bpp; @@ -1033,7 +1055,12 @@ static bool setup_dsc_config( if (!is_dsc_possible) goto done; - dsc_cfg->num_slices_v = pic_height/slice_height; + if (slice_height > 0) { + dsc_cfg->num_slices_v = pic_height / slice_height; + } else { + is_dsc_possible = false; + goto done; + } if (target_bandwidth_kbps > 0) { is_dsc_possible = decide_dsc_target_bpp_x16( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c index 5eebe7f03ddc..c9ae2d8f0096 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c @@ -137,7 +137,15 @@ void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) dsc_enc_caps->max_total_throughput_mps = DCN20_MAX_DISPLAY_CLOCK_Mhz * 2; } - // TODO DSC: This is actually image width limitation, not a slice width. This should be added to the criteria to use ODM. + /* For pixel clock bigger than a single-pipe limit needing four engines ODM 4:1, which then quardruples our + * throughput and number of slices + */ + if (pixel_clock_100Hz > DCN20_MAX_PIXEL_CLOCK_Mhz*10000*2) { + dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 = 1; + dsc_enc_caps->max_total_throughput_mps = DCN20_MAX_DISPLAY_CLOCK_Mhz * 4; + } + dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */ dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */ } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h index ba869387c3c5..ba869387c3c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c index 71d2dff9986d..71d2dff9986d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.h index 133ad38842cc..133ad38842cc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.h diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h index d7b8d586b523..4b27f29d0d80 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h @@ -76,6 +76,8 @@ union dsc_enc_slice_caps { uint8_t NUM_SLICES_3 : 1; /* This one is not per DSC spec, but our encoder supports it */ uint8_t NUM_SLICES_4 : 1; uint8_t NUM_SLICES_8 : 1; + uint8_t NUM_SLICES_12 : 1; + uint8_t NUM_SLICES_16 : 1; } bits; uint8_t raw; }; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c index 36d6c1646a51..59864130cf83 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c @@ -101,7 +101,6 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, { int ret; struct drm_dsc_config dsc_cfg; - unsigned long long tmp; dsc_params->pps = *pps; dsc_params->pps.initial_scale_value = 8 * rc->rc_model_size / (rc->rc_model_size - rc->initial_fullness_offset); @@ -112,9 +111,9 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64; ret = drm_dsc_compute_rc_parameters(&dsc_cfg); - tmp = (unsigned long long)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1); - do_div(tmp, (uint32_t)dsc_cfg.slice_width); //ROUND-UP - dsc_params->bytes_per_pixel = (uint32_t)tmp; + dsc_params->bytes_per_pixel = + (uint32_t)(div_u64(((uint64_t)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1)), + (uint32_t)dsc_cfg.slice_width)); /* Round-up */ copy_pps_fields(&dsc_params->pps, &dsc_cfg); dsc_params->rc_buffer_model_size = dsc_cfg.rc_bits; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c index d734e3a134d1..2840ed5c57d8 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c @@ -95,10 +95,6 @@ static bool offset_to_id( return true; default: ASSERT_CRITICAL(false); -#ifdef PALLADIUM_SUPPORTED - *en = GPIO_DDC_LINE_DDC1; - return true; -#endif return false; } break; @@ -184,11 +180,6 @@ static bool offset_to_id( /* UNEXPECTED */ default: /* case REG(DC_GPIO_SYNCA_A): not exista */ -#ifdef PALLADIUM_SUPPORTED - *id = GPIO_ID_HPD; - *en = GPIO_DDC_LINE_DDC1; - return true; -#endif ASSERT_CRITICAL(false); return false; } @@ -308,10 +299,6 @@ static bool id_to_offset( break; default: ASSERT_CRITICAL(false); -#ifdef PALLADIUM_SUPPORTED - info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK; - result = true; -#endif result = false; } break; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c index 3ede6e02c3a7..663c17f52779 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c @@ -128,7 +128,7 @@ struct gpio *dal_gpio_service_create_irq( uint32_t offset, uint32_t mask) { - enum gpio_id id; + enum gpio_id id = 0; uint32_t en; if (!service->translate.funcs->offset_to_id(offset, mask, &id, &en)) { @@ -144,7 +144,7 @@ struct gpio *dal_gpio_service_create_generic_mux( uint32_t offset, uint32_t mask) { - enum gpio_id id; + enum gpio_id id = 0; uint32_t en; struct gpio *generic; @@ -178,7 +178,7 @@ struct gpio_pin_info dal_gpio_get_generic_pin_info( enum gpio_id id, uint32_t en) { - struct gpio_pin_info pin; + struct gpio_pin_info pin = {0}; if (service->translate.funcs->id_to_offset) { service->translate.funcs->id_to_offset(id, en, &pin); diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c index 279020535af7..8f1a95b77830 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c @@ -110,6 +110,7 @@ bool dal_hw_factory_init( case DCN_VERSION_3_2: case DCN_VERSION_3_21: case DCN_VERSION_3_5: + case DCN_VERSION_3_51: dal_hw_factory_dcn32_init(factory); return true; default: diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c index d6b0a1af7d3e..37166b2b3fee 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c @@ -111,6 +111,7 @@ bool dal_hw_translate_init( case DCN_VERSION_3_2: case DCN_VERSION_3_21: case DCN_VERSION_3_5: + case DCN_VERSION_3_51: dal_hw_translate_dcn32_init(translate); return true; default: diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c index 25ffc052d53b..99e17c164ce7 100644 --- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c @@ -23,8 +23,6 @@ * */ -#include <linux/slab.h> - #include "dm_services.h" #include "dm_helpers.h" #include "include/hdcp_msg_types.h" diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile index bccd46bd1815..cf8aa23b4415 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile +++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile @@ -78,7 +78,7 @@ ifdef CONFIG_DRM_AMD_DC_FP # DCN ############################################################################### -HWSS_DCN10 = dcn10_hwseq.o +HWSS_DCN10 = dcn10_hwseq.o dcn10_init.o AMD_DAL_HWSS_DCN10 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn10/,$(HWSS_DCN10)) @@ -86,7 +86,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN10) ############################################################################### -HWSS_DCN20 = dcn20_hwseq.o +HWSS_DCN20 = dcn20_hwseq.o dcn20_init.o AMD_DAL_HWSS_DCN20 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn20/,$(HWSS_DCN20)) @@ -94,7 +94,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN20) ############################################################################### -HWSS_DCN201 = dcn201_hwseq.o +HWSS_DCN201 = dcn201_hwseq.o dcn201_init.o AMD_DAL_HWSS_DCN201 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn201/,$(HWSS_DCN201)) @@ -102,7 +102,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN201) ############################################################################### -HWSS_DCN21 = dcn21_hwseq.o +HWSS_DCN21 = dcn21_hwseq.o dcn21_init.o AMD_DAL_HWSS_DCN21 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn21/,$(HWSS_DCN21)) @@ -114,7 +114,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN21) ############################################################################### -HWSS_DCN30 = dcn30_hwseq.o +HWSS_DCN30 = dcn30_hwseq.o dcn30_init.o AMD_DAL_HWSS_DCN30 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn30/,$(HWSS_DCN30)) @@ -122,7 +122,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN30) ############################################################################### -HWSS_DCN301 = dcn301_hwseq.o +HWSS_DCN301 = dcn301_hwseq.o dcn301_init.o AMD_DAL_HWSS_DCN301 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn301/,$(HWSS_DCN301)) @@ -130,15 +130,17 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN301) ############################################################################### -HWSS_DCN302 = dcn302_hwseq.o +HWSS_DCN302 = dcn302_hwseq.o dcn302_init.o AMD_DAL_HWSS_DCN302 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn302/,$(HWSS_DCN302)) AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN302) + + ############################################################################### -HWSS_DCN303 = dcn303_hwseq.o +HWSS_DCN303 = dcn303_hwseq.o dcn303_init.o AMD_DAL_HWSS_DCN303 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn303/,$(HWSS_DCN303)) @@ -146,7 +148,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN303) ############################################################################### -HWSS_DCN31 = dcn31_hwseq.o +HWSS_DCN31 = dcn31_hwseq.o dcn31_init.o AMD_DAL_HWSS_DCN31 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn31/,$(HWSS_DCN31)) @@ -154,7 +156,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN31) ############################################################################### -HWSS_DCN314 = dcn314_hwseq.o +HWSS_DCN314 = dcn314_hwseq.o dcn314_init.o AMD_DAL_HWSS_DCN314 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn314/,$(HWSS_DCN314)) @@ -162,7 +164,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN314) ############################################################################### -HWSS_DCN32 = dcn32_hwseq.o +HWSS_DCN32 = dcn32_hwseq.o dcn32_init.o AMD_DAL_HWSS_DCN32 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn32/,$(HWSS_DCN32)) @@ -170,7 +172,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN32) ############################################################################### -HWSS_DCN35 = dcn35_hwseq.o +HWSS_DCN35 = dcn35_hwseq.o dcn35_init.o AMD_DAL_HWSS_DCN35 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn35/,$(HWSS_DCN35)) @@ -178,6 +180,14 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN35) ############################################################################### +HWSS_DCN351 = dcn351_hwseq.o dcn351_init.o + +AMD_DAL_HWSS_DCN351 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn351/,$(HWSS_DCN351)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN351) + +############################################################################### + ############################################################################### -endif
\ No newline at end of file +endif diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h index 2fefdf40612d..52f045cfd52a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h @@ -682,6 +682,7 @@ struct dce_hwseq_registers { uint32_t DCHUBBUB_ARB_HOSTVM_CNTL; uint32_t HPO_TOP_HW_CONTROL; uint32_t DMU_CLK_CNTL; + uint32_t DCCG_GATE_DISABLE_CNTL4; uint32_t DCCG_GATE_DISABLE_CNTL5; }; /* set field name */ @@ -1183,7 +1184,35 @@ struct dce_hwseq_registers { type LONO_FGCG_REP_DIS;\ type LONO_DISPCLK_GATE_DISABLE;\ type LONO_SOCCLK_GATE_DISABLE;\ - type LONO_DMCUBCLK_GATE_DISABLE; + type LONO_DMCUBCLK_GATE_DISABLE;\ + type SYMCLKA_FE_GATE_DISABLE;\ + type SYMCLKB_FE_GATE_DISABLE;\ + type SYMCLKC_FE_GATE_DISABLE;\ + type SYMCLKD_FE_GATE_DISABLE;\ + type SYMCLKE_FE_GATE_DISABLE;\ + type HDMICHARCLK0_GATE_DISABLE;\ + type SYMCLKA_GATE_DISABLE;\ + type SYMCLKB_GATE_DISABLE;\ + type SYMCLKC_GATE_DISABLE;\ + type SYMCLKD_GATE_DISABLE;\ + type SYMCLKE_GATE_DISABLE;\ + type PHYASYMCLK_ROOT_GATE_DISABLE;\ + type PHYBSYMCLK_ROOT_GATE_DISABLE;\ + type PHYCSYMCLK_ROOT_GATE_DISABLE;\ + type PHYDSYMCLK_ROOT_GATE_DISABLE;\ + type PHYESYMCLK_ROOT_GATE_DISABLE;\ + type DTBCLK_P0_GATE_DISABLE;\ + type DTBCLK_P1_GATE_DISABLE;\ + type DTBCLK_P2_GATE_DISABLE;\ + type DTBCLK_P3_GATE_DISABLE;\ + type DPSTREAMCLK0_GATE_DISABLE;\ + type DPSTREAMCLK1_GATE_DISABLE;\ + type DPSTREAMCLK2_GATE_DISABLE;\ + type DPSTREAMCLK3_GATE_DISABLE;\ + type DPIASYMCLK0_GATE_DISABLE;\ + type DPIASYMCLK1_GATE_DISABLE;\ + type DPIASYMCLK2_GATE_DISABLE;\ + type DPIASYMCLK3_GATE_DISABLE; struct dce_hwseq_shift { HWSEQ_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 960a55e06375..0d3ea291eeee 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -55,6 +55,7 @@ #include "audio.h" #include "reg_helper.h" #include "panel_cntl.h" +#include "dc_state_priv.h" #include "dpcd_defs.h" /* include DCE11 register header files */ #include "dce/dce_11_0_d.h" @@ -248,7 +249,7 @@ static bool dce110_enable_display_power_gating( return false; } -static void build_prescale_params(struct ipp_prescale_params *prescale_params, +static void dce110_prescale_params(struct ipp_prescale_params *prescale_params, const struct dc_plane_state *plane_state) { prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED; @@ -288,16 +289,14 @@ dce110_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, if (ipp == NULL) return false; - if (plane_state->in_transfer_func) - tf = plane_state->in_transfer_func; + tf = &plane_state->in_transfer_func; - build_prescale_params(&prescale_params, plane_state); + dce110_prescale_params(&prescale_params, plane_state); ipp->funcs->ipp_program_prescale(ipp, &prescale_params); - if (plane_state->gamma_correction && - !plane_state->gamma_correction->is_identity && + if (!plane_state->gamma_correction.is_identity && dce_use_lut(plane_state->format)) - ipp->funcs->ipp_program_input_lut(ipp, plane_state->gamma_correction); + ipp->funcs->ipp_program_input_lut(ipp, &plane_state->gamma_correction); if (tf == NULL) { /* Default case if no input transfer function specified */ @@ -613,11 +612,10 @@ dce110_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, xfm->funcs->opp_power_on_regamma_lut(xfm, true); xfm->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM; - if (stream->out_transfer_func && - stream->out_transfer_func->type == TF_TYPE_PREDEFINED && - stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB) { + if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED && + stream->out_transfer_func.tf == TRANSFER_FUNCTION_SRGB) { xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_SRGB); - } else if (dce110_translate_regamma_to_hw_format(stream->out_transfer_func, + } else if (dce110_translate_regamma_to_hw_format(&stream->out_transfer_func, &xfm->regamma_params)) { xfm->funcs->opp_program_regamma_pwl(xfm, &xfm->regamma_params); xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_USER); @@ -790,7 +788,7 @@ void dce110_edp_power_control( struct dc_context *ctx = link->ctx; struct bp_transmitter_control cntl = { 0 }; enum bp_result bp_result; - uint8_t panel_instance; + uint8_t pwrseq_instance; if (dal_graphics_object_id_get_connector_id(link->link_enc->connector) @@ -873,7 +871,7 @@ void dce110_edp_power_control( cntl.coherent = false; cntl.lanes_number = LANE_COUNT_FOUR; cntl.hpd_sel = link->link_enc->hpd_source; - panel_instance = link->panel_cntl->inst; + pwrseq_instance = link->panel_cntl->pwrseq_inst; if (ctx->dc->ctx->dmub_srv && ctx->dc->debug.dmub_command_table) { @@ -881,11 +879,11 @@ void dce110_edp_power_control( if (cntl.action == TRANSMITTER_CONTROL_POWER_ON) { bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, LVTMA_CONTROL_POWER_ON, - panel_instance, link->link_powered_externally); + pwrseq_instance, link->link_powered_externally); } else { bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, LVTMA_CONTROL_POWER_OFF, - panel_instance, link->link_powered_externally); + pwrseq_instance, link->link_powered_externally); } } @@ -956,7 +954,7 @@ void dce110_edp_backlight_control( { struct dc_context *ctx = link->ctx; struct bp_transmitter_control cntl = { 0 }; - uint8_t panel_instance; + uint8_t pwrseq_instance; unsigned int pre_T11_delay = OLED_PRE_T11_DELAY; unsigned int post_T7_delay = OLED_POST_T7_DELAY; @@ -1009,7 +1007,7 @@ void dce110_edp_backlight_control( */ /* dc_service_sleep_in_milliseconds(50); */ /*edp 1.2*/ - panel_instance = link->panel_cntl->inst; + pwrseq_instance = link->panel_cntl->pwrseq_inst; if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) { if (!link->dc->config.edp_no_power_sequencing) @@ -1034,11 +1032,11 @@ void dce110_edp_backlight_control( if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, LVTMA_CONTROL_LCD_BLON, - panel_instance, link->link_powered_externally); + pwrseq_instance, link->link_powered_externally); else ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, LVTMA_CONTROL_LCD_BLOFF, - panel_instance, link->link_powered_externally); + pwrseq_instance, link->link_powered_externally); } link_transmitter_control(ctx->dc_bios, &cntl); @@ -1182,24 +1180,15 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) dto_params.timing = &pipe_ctx->stream->timing; dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; if (dccg) { - dccg->funcs->set_dtbclk_dto(dccg, &dto_params); dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst); + if (dccg && dccg->funcs->set_dtbclk_dto) + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); } } else if (dccg && dccg->funcs->disable_symclk_se) { dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); } - - if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { - /* TODO: This looks like a bug to me as we are disabling HPO IO when - * we are just disabling a single HPO stream. Shouldn't we disable HPO - * HW control only when HPOs for all streams are disabled? - */ - if (pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control) - pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control( - pipe_ctx->stream->ctx->dc->hwseq, false); - } } void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, @@ -1290,6 +1279,46 @@ static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id) } } +static void populate_audio_dp_link_info( + const struct pipe_ctx *pipe_ctx, + struct audio_dp_link_info *dp_link_info) +{ + const struct dc_stream_state *stream = pipe_ctx->stream; + const struct dc_link *link = stream->link; + struct fixed31_32 link_bw_kbps; + + dp_link_info->encoding = link->dc->link_srv->dp_get_encoding_format( + &pipe_ctx->link_config.dp_link_settings); + dp_link_info->is_mst = (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST); + dp_link_info->lane_count = pipe_ctx->link_config.dp_link_settings.lane_count; + dp_link_info->link_rate = pipe_ctx->link_config.dp_link_settings.link_rate; + + link_bw_kbps = dc_fixpt_from_int(dc_link_bandwidth_kbps(link, + &pipe_ctx->link_config.dp_link_settings)); + + /* For audio stream calculations, the video stream should not include FEC or SSC + * in order to get the most pessimistic values. + */ + if (dp_link_info->encoding == DP_8b_10b_ENCODING && + link->dc->link_srv->dp_is_fec_supported(link)) { + link_bw_kbps = dc_fixpt_mul(link_bw_kbps, + dc_fixpt_from_fraction(100, DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100)); + } else if (dp_link_info->encoding == DP_128b_132b_ENCODING) { + link_bw_kbps = dc_fixpt_mul(link_bw_kbps, + dc_fixpt_from_fraction(10000, 9975)); /* 99.75% SSC overhead*/ + } + + dp_link_info->link_bandwidth_kbps = dc_fixpt_floor(link_bw_kbps); + + /* HW minimum for 128b/132b HBlank is 4 frame symbols. + * TODO: Plumb the actual programmed HBlank min symbol width to here. + */ + if (dp_link_info->encoding == DP_128b_132b_ENCODING) + dp_link_info->hblank_min_symbol_width = 4; + else + dp_link_info->hblank_min_symbol_width = 0; +} + static void build_audio_output( struct dc_state *state, const struct pipe_ctx *pipe_ctx, @@ -1337,6 +1366,15 @@ static void build_audio_output( audio_output->crtc_info.calculated_pixel_clock_100Hz = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz; + audio_output->crtc_info.pixel_encoding = + stream->timing.pixel_encoding; + + audio_output->crtc_info.dsc_bits_per_pixel = + stream->timing.dsc_cfg.bits_per_pixel; + + audio_output->crtc_info.dsc_num_slices = + stream->timing.dsc_cfg.num_slices_h; + /*for HDMI, audio ACR is with deep color ratio factor*/ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) && audio_output->crtc_info.requested_pixel_clock_100Hz == @@ -1353,7 +1391,7 @@ static void build_audio_output( if (state->clk_mgr && (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) { - audio_output->pll_info.dp_dto_source_clock_in_khz = + audio_output->pll_info.audio_dto_source_clock_in_khz = state->clk_mgr->funcs->get_dp_ref_clk_frequency( state->clk_mgr); } @@ -1370,6 +1408,10 @@ static void build_audio_output( audio_output->pll_info.ss_percentage = pipe_ctx->pll_settings.ss_percentage; + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) { + populate_audio_dp_link_info(pipe_ctx, &audio_output->dp_link_info); + } } static void program_scaler(const struct dc *dc, @@ -1475,7 +1517,7 @@ static enum dc_status dce110_enable_stream_timing( return DC_OK; } -static enum dc_status apply_single_controller_ctx_to_hw( +enum dc_status dce110_apply_single_controller_ctx_to_hw( struct pipe_ctx *pipe_ctx, struct dc_state *context, struct dc *dc) @@ -1495,7 +1537,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( } if (pipe_ctx->stream_res.audio != NULL) { - struct audio_output audio_output; + struct audio_output audio_output = {0}; build_audio_output(context, pipe_ctx, &audio_output); @@ -1506,7 +1548,8 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.audio, pipe_ctx->stream->signal, &audio_output.crtc_info, - &pipe_ctx->stream->audio_info); + &pipe_ctx->stream->audio_info, + &audio_output.dp_link_info); } /* make sure no pipes syncd to the pipe being enabled */ @@ -1596,7 +1639,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( * is constructed with the same sink). Make sure not to override * and link programming on the main. */ - if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) { + if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) { pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false; pipe_ctx->stream->link->replay_settings.replay_feature_enabled = false; } @@ -1684,7 +1727,7 @@ static void disable_vga_and_power_gate_all_controllers( true); dc->current_state->res_ctx.pipe_ctx[i].pipe_idx = i; - dc->hwss.disable_plane(dc, + dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); } } @@ -2124,7 +2167,8 @@ static void dce110_reset_hw_ctx_wrap( BREAK_TO_DEBUGGER(); } pipe_ctx_old->stream_res.tg->funcs->disable_crtc(pipe_ctx_old->stream_res.tg); - pipe_ctx_old->stream->link->phy_state.symclk_ref_cnts.otg = 0; + if (dc_is_hdmi_tmds_signal(pipe_ctx_old->stream->signal)) + pipe_ctx_old->stream->link->phy_state.symclk_ref_cnts.otg = 0; pipe_ctx_old->plane_res.mi->funcs->free_mem_input( pipe_ctx_old->plane_res.mi, dc->current_state->stream_count); @@ -2133,7 +2177,7 @@ static void dce110_reset_hw_ctx_wrap( old_clk)) old_clk->funcs->cs_power_down(old_clk); - dc->hwss.disable_plane(dc, pipe_ctx_old); + dc->hwss.disable_plane(dc, dc->current_state, pipe_ctx_old); pipe_ctx_old->stream = NULL; } @@ -2144,7 +2188,7 @@ static void dce110_setup_audio_dto( struct dc *dc, struct dc_state *context) { - int i; + unsigned int i; /* program audio wall clock. use HDMI as clock source if HDMI * audio active. Otherwise, use DP as clock source @@ -2216,7 +2260,7 @@ static void dce110_setup_audio_dto( continue; if (pipe_ctx->stream_res.audio != NULL) { - struct audio_output audio_output; + struct audio_output audio_output = {0}; build_audio_output(context, pipe_ctx, &audio_output); @@ -2231,6 +2275,19 @@ static void dce110_setup_audio_dto( } } +static bool dce110_is_hpo_enabled(struct dc_state *context) +{ + int i; + + for (i = 0; i < MAX_HPO_DP2_ENCODERS; i++) { + if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i]) { + return true; + } + } + + return false; +} + enum dc_status dce110_apply_ctx_to_hw( struct dc *dc, struct dc_state *context) @@ -2239,6 +2296,8 @@ enum dc_status dce110_apply_ctx_to_hw( struct dc_bios *dcb = dc->ctx->dc_bios; enum dc_status status; int i; + bool was_hpo_enabled = dce110_is_hpo_enabled(dc->current_state); + bool is_hpo_enabled = dce110_is_hpo_enabled(context); /* reset syncd pipes from disabled pipes */ if (dc->config.use_pipe_ctx_sync_logic) @@ -2281,6 +2340,10 @@ enum dc_status dce110_apply_ctx_to_hw( dce110_setup_audio_dto(dc, context); + if (dc->hwseq->funcs.setup_hpo_hw_control && was_hpo_enabled != is_hpo_enabled) { + dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, is_hpo_enabled); + } + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -2300,7 +2363,7 @@ enum dc_status dce110_apply_ctx_to_hw( if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe) continue; - status = apply_single_controller_ctx_to_hw( + status = dce110_apply_single_controller_ctx_to_hw( pipe_ctx, context, dc); @@ -2497,6 +2560,7 @@ static bool wait_for_reset_trigger_to_occur( /* Enable timing synchronization for a group of Timing Generators. */ static void dce110_enable_timing_synchronization( struct dc *dc, + struct dc_state *state, int group_index, int group_size, struct pipe_ctx *grouped_pipes[]) @@ -2590,6 +2654,7 @@ static void init_hw(struct dc *dc) struct dmcu *dmcu; struct dce_hwseq *hws = dc->hwseq; uint32_t backlight = MAX_BACKLIGHT_LEVEL; + uint32_t user_level = MAX_BACKLIGHT_LEVEL; bp = dc->ctx->dc_bios; for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -2639,13 +2704,15 @@ static void init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->panel_cntl) + if (link->panel_cntl) { backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); + user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; + } } abm = dc->res_pool->abm; if (abm != NULL) - abm->funcs->abm_init(abm, backlight); + abm->funcs->abm_init(abm, backlight, user_level); dmcu = dc->res_pool->dmcu; if (dmcu != NULL && abm != NULL) @@ -2842,7 +2909,7 @@ static void dce110_post_unlock_program_front_end( { } -static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx) +static void dce110_power_down_fe(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; int fe_idx = pipe_ctx->plane_res.mi ? @@ -3115,7 +3182,8 @@ void dce110_disable_link_output(struct dc_link *link, struct dmcu *dmcu = dc->res_pool->dmcu; if (signal == SIGNAL_TYPE_EDP && - link->dc->hwss.edp_backlight_control) + link->dc->hwss.edp_backlight_control && + !link->skip_implict_edp_power_control) link->dc->hwss.edp_backlight_control(link, false); else if (dmcu != NULL && dmcu->funcs->lock_phy) dmcu->funcs->lock_phy(dmcu); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h index 08028a1779ae..ed3cc3648e8e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h @@ -39,6 +39,10 @@ enum dc_status dce110_apply_ctx_to_hw( struct dc *dc, struct dc_state *context); +enum dc_status dce110_apply_single_controller_ctx_to_hw( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); void dce110_enable_stream(struct pipe_ctx *pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index 2b8b8366538e..0c4aef8ffe2c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -56,6 +56,7 @@ #include "dc_trace.h" #include "dce/dmub_outbox.h" #include "link.h" +#include "dc_state_priv.h" #define DC_LOGGER \ dc_logger @@ -115,7 +116,7 @@ void dcn10_lock_all_pipes(struct dc *dc, !pipe_ctx->stream || (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) || !tg->funcs->is_tg_enabled(tg) || - pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) + dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) continue; if (lock) @@ -282,33 +283,33 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) DTN_INFO("\n"); } -void dcn10_log_hw_state(struct dc *dc, - struct dc_log_buffer_ctx *log_ctx) +static void dcn10_log_color_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx) { struct dc_context *dc_ctx = dc->ctx; struct resource_pool *pool = dc->res_pool; int i; - DTN_INFO_BEGIN(); - - dcn10_log_hubbub_state(dc, log_ctx); - - dcn10_log_hubp_states(dc, log_ctx); - - DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode" - " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 " - "C31 C32 C33 C34\n"); + DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode" + " GAMUT adjust " + "C11 C12 C13 C14 " + "C21 C22 C23 C24 " + "C31 C32 C33 C34 \n"); for (i = 0; i < pool->pipe_count; i++) { struct dpp *dpp = pool->dpps[i]; struct dcn_dpp_state s = {0}; dpp->funcs->dpp_read_state(dpp, &s); + dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); if (!s.is_enabled) continue; - DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s" - "%8x %08xh %08xh %08xh %08xh %08xh %08xh", + DTN_INFO("[%2d]: %11xh %11s %9s %9s" + " %12s " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld", dpp->inst, s.igam_input_format, (s.igam_lut_mode == 0) ? "BypassFixed" : @@ -328,19 +329,45 @@ void dcn10_log_hw_state(struct dc *dc, ((s.rgam_lut_mode == 3) ? "RAM" : ((s.rgam_lut_mode == 4) ? "RAM" : "Unknown")))), - s.gamut_remap_mode, - s.gamut_remap_c11_c12, - s.gamut_remap_c13_c14, - s.gamut_remap_c21_c22, - s.gamut_remap_c23_c24, - s.gamut_remap_c31_c32, - s.gamut_remap_c33_c34); + (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : + ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : + "SW"), + s.gamut_remap.temperature_matrix[0].value, + s.gamut_remap.temperature_matrix[1].value, + s.gamut_remap.temperature_matrix[2].value, + s.gamut_remap.temperature_matrix[3].value, + s.gamut_remap.temperature_matrix[4].value, + s.gamut_remap.temperature_matrix[5].value, + s.gamut_remap.temperature_matrix[6].value, + s.gamut_remap.temperature_matrix[7].value, + s.gamut_remap.temperature_matrix[8].value, + s.gamut_remap.temperature_matrix[9].value, + s.gamut_remap.temperature_matrix[10].value, + s.gamut_remap.temperature_matrix[11].value); DTN_INFO("\n"); } DTN_INFO("\n"); + DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d" + " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d" + " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d" + " blnd_lut:%d oscs:%d\n\n", + dc->caps.color.dpp.input_lut_shared, + dc->caps.color.dpp.icsc, + dc->caps.color.dpp.dgam_ram, + dc->caps.color.dpp.dgam_rom_caps.srgb, + dc->caps.color.dpp.dgam_rom_caps.bt2020, + dc->caps.color.dpp.dgam_rom_caps.gamma2_2, + dc->caps.color.dpp.dgam_rom_caps.pq, + dc->caps.color.dpp.dgam_rom_caps.hlg, + dc->caps.color.dpp.post_csc, + dc->caps.color.dpp.gamma_corr, + dc->caps.color.dpp.dgam_rom_for_yuv, + dc->caps.color.dpp.hw_3d_lut, + dc->caps.color.dpp.ogam_ram, + dc->caps.color.dpp.ocsc); DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n"); - for (i = 0; i < pool->pipe_count; i++) { + for (i = 0; i < pool->mpcc_count; i++) { struct mpcc_state s = {0}; pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); @@ -351,6 +378,30 @@ void dcn10_log_hw_state(struct dc *dc, s.idle); } DTN_INFO("\n"); + DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n", + dc->caps.color.mpc.gamut_remap, + dc->caps.color.mpc.num_3dluts, + dc->caps.color.mpc.ogam_ram, + dc->caps.color.mpc.ocsc); +} + +void dcn10_log_hw_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx) +{ + struct dc_context *dc_ctx = dc->ctx; + struct resource_pool *pool = dc->res_pool; + int i; + + DTN_INFO_BEGIN(); + + dcn10_log_hubbub_state(dc, log_ctx); + + dcn10_log_hubp_states(dc, log_ctx); + + if (dc->hwss.log_color_state) + dc->hwss.log_color_state(dc, log_ctx); + else + dcn10_log_color_state(dc, log_ctx); DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n"); @@ -1057,7 +1108,8 @@ static void dcn10_reset_back_end_for_pipe( if (pipe_ctx->stream_res.tg->funcs->set_drr) pipe_ctx->stream_res.tg->funcs->set_drr( pipe_ctx->stream_res.tg, NULL); - pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0; + if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) + pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0; } for (i = 0; i < dc->res_pool->pipe_count; i++) @@ -1180,7 +1232,9 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc) } /* trigger HW to start disconnect plane from stream on the next vsync */ -void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_plane_atomic_disconnect(struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; @@ -1200,7 +1254,7 @@ void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove); // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle, // so don't wait for MPCC_IDLE in the programming sequence - if (opp != NULL && !pipe_ctx->plane_state->is_phantom) + if (opp != NULL && dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM) opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; dc->optimized_required = true; @@ -1290,7 +1344,7 @@ void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx->plane_state = NULL; } -void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; DC_LOGGER_INIT(dc->ctx->logger); @@ -1312,6 +1366,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) struct dce_hwseq *hws = dc->hwseq; struct hubbub *hubbub = dc->res_pool->hubbub; bool can_apply_seamless_boot = false; + bool tg_enabled[MAX_PIPES] = {false}; for (i = 0; i < context->stream_count; i++) { if (context->streams[i]->apply_seamless_boot_optimization) { @@ -1393,6 +1448,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) // requesting data while in PSR. tg->funcs->tg_init(tg); hubp->power_gated = true; + tg_enabled[i] = true; continue; } @@ -1416,12 +1472,12 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; - hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx); if (tg->funcs->is_tg_enabled(tg)) tg->funcs->unlock(tg); - dc->hwss.disable_plane(dc, pipe_ctx); + dc->hwss.disable_plane(dc, context, pipe_ctx); pipe_ctx->stream_res.tg = NULL; pipe_ctx->plane_res.hubp = NULL; @@ -1434,6 +1490,20 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) tg->funcs->tg_init(tg); } + /* Clean up MPC tree */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (tg_enabled[i]) { + if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) { + if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) { + int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id; + + if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id])) + dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL; + } + } + } + } + /* Power gate DSCs */ if (hws->funcs.dsc_pg_control != NULL) { uint32_t num_opps = 0; @@ -1486,6 +1556,7 @@ void dcn10_init_hw(struct dc *dc) struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; uint32_t backlight = MAX_BACKLIGHT_LEVEL; + uint32_t user_level = MAX_BACKLIGHT_LEVEL; bool is_optimized_init_done = false; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) @@ -1583,12 +1654,14 @@ void dcn10_init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->panel_cntl) + if (link->panel_cntl) { backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); + user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; + } } if (abm != NULL) - abm->funcs->abm_init(abm, backlight); + abm->funcs->abm_init(abm, backlight, user_level); if (dmcu != NULL && !dmcu->auto_load_dmcu) dmcu->funcs->dmcu_init(dmcu); @@ -1756,14 +1829,12 @@ bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, if (dpp_base == NULL) return false; - if (plane_state->in_transfer_func) - tf = plane_state->in_transfer_func; + tf = &plane_state->in_transfer_func; - if (plane_state->gamma_correction && - !dpp_base->ctx->dc->debug.always_use_regamma - && !plane_state->gamma_correction->is_identity + if (!dpp_base->ctx->dc->debug.always_use_regamma + && !plane_state->gamma_correction.is_identity && dce_use_lut(plane_state->format)) - dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction); + dpp_base->funcs->dpp_program_input_lut(dpp_base, &plane_state->gamma_correction); if (tf == NULL) dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS); @@ -1804,7 +1875,7 @@ bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, #define MAX_NUM_HW_POINTS 0x200 static void log_tf(struct dc_context *ctx, - struct dc_transfer_func *tf, uint32_t hw_points_num) + const struct dc_transfer_func *tf, uint32_t hw_points_num) { // DC_LOG_GAMMA is default logging of all hw points // DC_LOG_ALL_GAMMA logs all points, not only hw points @@ -1833,21 +1904,23 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, { struct dpp *dpp = pipe_ctx->plane_res.dpp; + if (!stream) + return false; + if (dpp == NULL) return false; dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM; - if (stream->out_transfer_func && - stream->out_transfer_func->type == TF_TYPE_PREDEFINED && - stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB) + if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED && + stream->out_transfer_func.tf == TRANSFER_FUNCTION_SRGB) dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB); /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full * update. */ else if (cm_helper_translate_curve_to_hw_format(dc->ctx, - stream->out_transfer_func, + &stream->out_transfer_func, &dpp->regamma_params, false)) { dpp->funcs->dpp_program_regamma_pwl( dpp, @@ -1855,10 +1928,9 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, } else dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS); - if (stream != NULL && stream->ctx != NULL && - stream->out_transfer_func != NULL) { + if (stream->ctx) { log_tf(stream->ctx, - stream->out_transfer_func, + &stream->out_transfer_func, dpp->regamma_params.hw_points_num); } @@ -2113,7 +2185,7 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size, struct dc_crtc_timing *hw_crtc_timing; uint64_t phase[MAX_PIPES]; uint64_t modulo[MAX_PIPES]; - unsigned int pclk; + unsigned int pclk = 0; uint32_t embedded_pix_clk_100hz; uint16_t embedded_h_total; @@ -2204,7 +2276,7 @@ void dcn10_enable_vblanks_synchronization( struct dc_context *dc_ctx = dc->ctx; struct output_pixel_processor *opp; struct timing_generator *tg; - int i, width, height, master; + int i, width = 0, height = 0, master; DC_LOGGER_INIT(dc_ctx->logger); @@ -2262,6 +2334,7 @@ void dcn10_enable_vblanks_synchronization( void dcn10_enable_timing_synchronization( struct dc *dc, + struct dc_state *state, int group_index, int group_size, struct pipe_ctx *grouped_pipes[]) @@ -2269,14 +2342,14 @@ void dcn10_enable_timing_synchronization( struct dc_context *dc_ctx = dc->ctx; struct output_pixel_processor *opp; struct timing_generator *tg; - int i, width, height; + int i, width = 0, height = 0; DC_LOGGER_INIT(dc_ctx->logger); DC_SYNC_INFO("Setting up OTG reset trigger\n"); for (i = 1; i < group_size; i++) { - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM) continue; opp = grouped_pipes[i]->stream_res.opp; @@ -2296,14 +2369,14 @@ void dcn10_enable_timing_synchronization( if (grouped_pipes[i]->stream == NULL) continue; - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM) continue; grouped_pipes[i]->stream->vblank_synchronized = false; } for (i = 1; i < group_size; i++) { - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM) continue; grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger( @@ -2317,11 +2390,11 @@ void dcn10_enable_timing_synchronization( * synchronized. Look at last pipe programmed to reset. */ - if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM) + if (grouped_pipes[1]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[1]) != SUBVP_PHANTOM) wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg); for (i = 1; i < group_size; i++) { - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM) continue; grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger( @@ -2329,7 +2402,7 @@ void dcn10_enable_timing_synchronization( } for (i = 1; i < group_size; i++) { - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM) continue; opp = grouped_pipes[i]->stream_res.opp; @@ -3021,7 +3094,7 @@ void dcn10_post_unlock_program_front_end( for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) - dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) { @@ -3417,7 +3490,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz, .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert, .rotation = pipe_ctx->plane_state->rotation, - .mirror = pipe_ctx->plane_state->horizontal_mirror + .mirror = pipe_ctx->plane_state->horizontal_mirror, + .stream = pipe_ctx->stream, }; bool pipe_split_on = false; bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) || diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h index ef6d56da417c..bc5dd68a2408 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h @@ -75,7 +75,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn10_reset_hw_ctx_wrap( struct dc *dc, struct dc_state *context); -void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); void dcn10_lock_all_pipes( struct dc *dc, struct dc_state *context, @@ -108,13 +108,16 @@ void dcn10_power_down_on_boot(struct dc *dc); enum dc_status dce110_apply_ctx_to_hw( struct dc *dc, struct dc_state *context); -void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_plane_atomic_disconnect(struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx); void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data); void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx); void dce110_power_down(struct dc *dc); void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context); void dcn10_enable_timing_synchronization( struct dc *dc, + struct dc_state *state, int group_index, int group_size, struct pipe_ctx *grouped_pipes[]); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c index a5bdac79a744..a5bdac79a744 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h index 8c6fd7b844a4..8c6fd7b844a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 608221b0dd5d..7d833fa6dd77 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -55,6 +55,7 @@ #include "inc/link_enc_cfg.h" #include "link_hwss.h" #include "link.h" +#include "dc_state_priv.h" #define DC_LOGGER \ dc_logger @@ -70,6 +71,112 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name +void dcn20_log_color_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx) +{ + struct dc_context *dc_ctx = dc->ctx; + struct resource_pool *pool = dc->res_pool; + int i; + + DTN_INFO("DPP: DGAM mode SHAPER mode 3DLUT mode 3DLUT bit depth" + " 3DLUT size RGAM mode GAMUT adjust " + "C11 C12 C13 C14 " + "C21 C22 C23 C24 " + "C31 C32 C33 C34 \n"); + + for (i = 0; i < pool->pipe_count; i++) { + struct dpp *dpp = pool->dpps[i]; + struct dcn_dpp_state s = {0}; + + dpp->funcs->dpp_read_state(dpp, &s); + dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); + + if (!s.is_enabled) + continue; + + DTN_INFO("[%2d]: %8s %11s %10s %15s %10s %9s %12s " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld", + dpp->inst, + (s.dgam_lut_mode == 0) ? "Bypass" : + ((s.dgam_lut_mode == 1) ? "sRGB" : + ((s.dgam_lut_mode == 2) ? "Ycc" : + ((s.dgam_lut_mode == 3) ? "RAM" : + ((s.dgam_lut_mode == 4) ? "RAM" : + "Unknown")))), + (s.shaper_lut_mode == 1) ? "RAM A" : + ((s.shaper_lut_mode == 2) ? "RAM B" : + "Bypass"), + (s.lut3d_mode == 1) ? "RAM A" : + ((s.lut3d_mode == 2) ? "RAM B" : + "Bypass"), + (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit", + (s.lut3d_size == 0) ? "17x17x17" : "9x9x9", + (s.rgam_lut_mode == 1) ? "RAM A" : + ((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass"), + (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : + ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : + "SW"), + s.gamut_remap.temperature_matrix[0].value, + s.gamut_remap.temperature_matrix[1].value, + s.gamut_remap.temperature_matrix[2].value, + s.gamut_remap.temperature_matrix[3].value, + s.gamut_remap.temperature_matrix[4].value, + s.gamut_remap.temperature_matrix[5].value, + s.gamut_remap.temperature_matrix[6].value, + s.gamut_remap.temperature_matrix[7].value, + s.gamut_remap.temperature_matrix[8].value, + s.gamut_remap.temperature_matrix[9].value, + s.gamut_remap.temperature_matrix[10].value, + s.gamut_remap.temperature_matrix[11].value); + DTN_INFO("\n"); + } + DTN_INFO("\n"); + DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d" + " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d" + " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d" + " blnd_lut:%d oscs:%d\n\n", + dc->caps.color.dpp.input_lut_shared, + dc->caps.color.dpp.icsc, + dc->caps.color.dpp.dgam_ram, + dc->caps.color.dpp.dgam_rom_caps.srgb, + dc->caps.color.dpp.dgam_rom_caps.bt2020, + dc->caps.color.dpp.dgam_rom_caps.gamma2_2, + dc->caps.color.dpp.dgam_rom_caps.pq, + dc->caps.color.dpp.dgam_rom_caps.hlg, + dc->caps.color.dpp.post_csc, + dc->caps.color.dpp.gamma_corr, + dc->caps.color.dpp.dgam_rom_for_yuv, + dc->caps.color.dpp.hw_3d_lut, + dc->caps.color.dpp.ogam_ram, + dc->caps.color.dpp.ocsc); + + DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE" + " OGAM mode\n"); + + for (i = 0; i < pool->mpcc_count; i++) { + struct mpcc_state s = {0}; + + pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); + if (s.opp_id != 0xf) + DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d %9s\n", + i, s.opp_id, s.dpp_id, s.bot_mpcc_id, + s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only, + s.idle, + (s.rgam_mode == 1) ? "RAM A" : + ((s.rgam_mode == 2) ? "RAM B" : + "Bypass")); + } + DTN_INFO("\n"); + DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n", + dc->caps.color.mpc.gamut_remap, + dc->caps.color.mpc.num_3dluts, + dc->caps.color.mpc.ogam_ram, + dc->caps.color.mpc.ocsc); +} + + static int find_free_gsl_group(const struct dc *dc) { if (dc->res_pool->gsl_groups.gsl_0 == 0) @@ -296,7 +403,7 @@ void dcn20_init_blank( struct output_pixel_processor *opp = NULL; struct output_pixel_processor *bottom_opp = NULL; uint32_t num_opps, opp_id_src0, opp_id_src1; - uint32_t otg_active_width, otg_active_height; + uint32_t otg_active_width = 0, otg_active_height = 0; /* program opp dpg blank color */ color_space = COLOR_SPACE_SRGB; @@ -623,9 +730,9 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) } -void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx) { - bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom; + bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM; struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL; DC_LOGGER_INIT(dc->ctx->logger); @@ -766,6 +873,22 @@ enum dc_status dcn20_enable_stream_timing( return DC_ERROR_UNEXPECTED; } + if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { + struct dccg *dccg = dc->res_pool->dccg; + struct timing_generator *tg = pipe_ctx->stream_res.tg; + struct dtbclk_dto_params dto_params = {0}; + + if (dccg->funcs->set_dtbclk_p_src) + dccg->funcs->set_dtbclk_p_src(dccg, DTBCLK0, tg->inst); + + dto_params.otg_inst = tg->inst; + dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10; + dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); + dto_params.timing = &pipe_ctx->stream->timing; + dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + } + if (dc_is_hdmi_tmds_signal(stream->signal)) { stream->link->phy_state.symclk_ref_cnts.otg = 1; if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF) @@ -847,27 +970,11 @@ enum dc_status dcn20_enable_stream_timing( /* TODO enable stream if timing changed */ /* TODO unblank stream if DP */ - if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) { if (pipe_ctx->stream_res.tg && pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable) pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg); } - if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { - struct dccg *dccg = dc->res_pool->dccg; - struct timing_generator *tg = pipe_ctx->stream_res.tg; - struct dtbclk_dto_params dto_params = {0}; - - if (dccg->funcs->set_dtbclk_p_src) - dccg->funcs->set_dtbclk_p_src(dccg, DTBCLK0, tg->inst); - - dto_params.otg_inst = tg->inst; - dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10; - dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); - dto_params.timing = &pipe_ctx->stream->timing; - dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); - dccg->funcs->set_dtbclk_dto(dccg, &dto_params); - } - return DC_OK; } @@ -904,7 +1011,7 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, { int mpcc_id = pipe_ctx->plane_res.hubp->inst; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; - struct pwl_params *params = NULL; + const struct pwl_params *params = NULL; /* * program OGAM only for the top pipe * if there is a pipe split then fix diagnostic is required: @@ -915,19 +1022,19 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, if (mpc->funcs->power_on_mpc_mem_pwr) mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true); if (pipe_ctx->top_pipe == NULL - && mpc->funcs->set_output_gamma && stream->out_transfer_func) { - if (stream->out_transfer_func->type == TF_TYPE_HWPWL) - params = &stream->out_transfer_func->pwl; - else if (pipe_ctx->stream->out_transfer_func->type == + && mpc->funcs->set_output_gamma) { + if (stream->out_transfer_func.type == TF_TYPE_HWPWL) + params = &stream->out_transfer_func.pwl; + else if (pipe_ctx->stream->out_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && cm_helper_translate_curve_to_hw_format(dc->ctx, - stream->out_transfer_func, + &stream->out_transfer_func, &mpc->blender_params, false)) params = &mpc->blender_params; /* * there is no ROM */ - if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED) + if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED) BREAK_TO_DEBUGGER(); } /* @@ -943,17 +1050,15 @@ bool dcn20_set_blend_lut( { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; bool result = true; - struct pwl_params *blend_lut = NULL; - - if (plane_state->blend_tf) { - if (plane_state->blend_tf->type == TF_TYPE_HWPWL) - blend_lut = &plane_state->blend_tf->pwl; - else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format(plane_state->ctx, - plane_state->blend_tf, - &dpp_base->regamma_params, false); - blend_lut = &dpp_base->regamma_params; - } + const struct pwl_params *blend_lut = NULL; + + if (plane_state->blend_tf.type == TF_TYPE_HWPWL) + blend_lut = &plane_state->blend_tf.pwl; + else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { + cm_helper_translate_curve_to_hw_format(plane_state->ctx, + &plane_state->blend_tf, + &dpp_base->regamma_params, false); + blend_lut = &dpp_base->regamma_params; } result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut); @@ -965,24 +1070,21 @@ bool dcn20_set_shaper_3dlut( { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; bool result = true; - struct pwl_params *shaper_lut = NULL; - - if (plane_state->in_shaper_func) { - if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL) - shaper_lut = &plane_state->in_shaper_func->pwl; - else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format(plane_state->ctx, - plane_state->in_shaper_func, - &dpp_base->shaper_params, true); - shaper_lut = &dpp_base->shaper_params; - } + const struct pwl_params *shaper_lut = NULL; + + if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL) + shaper_lut = &plane_state->in_shaper_func.pwl; + else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { + cm_helper_translate_curve_to_hw_format(plane_state->ctx, + &plane_state->in_shaper_func, + &dpp_base->shaper_params, true); + shaper_lut = &dpp_base->shaper_params; } result = dpp_base->funcs->dpp_program_shaper_lut(dpp_base, shaper_lut); - if (plane_state->lut3d_func && - plane_state->lut3d_func->state.bits.initialized == 1) + if (plane_state->lut3d_func.state.bits.initialized == 1) result = dpp_base->funcs->dpp_program_3dlut(dpp_base, - &plane_state->lut3d_func->lut_3d); + &plane_state->lut3d_func.lut_3d); else result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL); @@ -1005,15 +1107,7 @@ bool dcn20_set_input_transfer_func(struct dc *dc, hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state); hws->funcs.set_blend_lut(pipe_ctx, plane_state); - if (plane_state->in_transfer_func) - tf = plane_state->in_transfer_func; - - - if (tf == NULL) { - dpp_base->funcs->dpp_set_degamma(dpp_base, - IPP_DEGAMMA_MODE_BYPASS); - return true; - } + tf = &plane_state->in_transfer_func; if (tf->type == TF_TYPE_HWPWL || tf->type == TF_TYPE_DISTRIBUTED_POINTS) use_degamma_ram = true; @@ -1368,8 +1462,14 @@ void dcn20_pipe_control_lock( } } -static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe) +static void dcn20_detect_pipe_changes(struct dc_state *old_state, + struct dc_state *new_state, + struct pipe_ctx *old_pipe, + struct pipe_ctx *new_pipe) { + bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM; + bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM; + new_pipe->update_flags.raw = 0; /* If non-phantom pipe is being transitioned to a phantom pipe, @@ -1379,12 +1479,17 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx * be different). The post_unlock sequence will set the correct * update flags to enable the phantom pipe. */ - if (old_pipe->plane_state && !old_pipe->plane_state->is_phantom && - new_pipe->plane_state && new_pipe->plane_state->is_phantom) { + if (old_pipe->plane_state && !old_is_phantom && + new_pipe->plane_state && new_is_phantom) { new_pipe->update_flags.bits.disable = 1; return; } + if (resource_is_pipe_type(new_pipe, OTG_MASTER) && + resource_is_odm_topology_changed(new_pipe, old_pipe)) + /* Detect odm changes */ + new_pipe->update_flags.bits.odm = 1; + /* Exit on unchanged, unused pipe */ if (!old_pipe->plane_state && !new_pipe->plane_state) return; @@ -1400,6 +1505,10 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx new_pipe->update_flags.bits.scaler = 1; new_pipe->update_flags.bits.viewport = 1; new_pipe->update_flags.bits.det_size = 1; + if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE && + new_pipe->stream_res.test_pattern_params.width != 0 && + new_pipe->stream_res.test_pattern_params.height != 0) + new_pipe->update_flags.bits.test_pattern_changed = 1; if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { new_pipe->update_flags.bits.odm = 1; new_pipe->update_flags.bits.global_sync = 1; @@ -1412,14 +1521,14 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx * The remove-add sequence of the phantom pipe always results in the pipe * being blanked in enable_stream_timing (DPG). */ - if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM) new_pipe->update_flags.bits.enable = 1; /* Phantom pipes are effectively disabled, if the pipe was previously phantom * we have to enable */ - if (old_pipe->plane_state && old_pipe->plane_state->is_phantom && - new_pipe->plane_state && !new_pipe->plane_state->is_phantom) + if (old_pipe->plane_state && old_is_phantom && + new_pipe->plane_state && !new_is_phantom) new_pipe->update_flags.bits.enable = 1; if (old_pipe->plane_state && !new_pipe->plane_state) { @@ -1434,10 +1543,6 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx /* Detect top pipe only changes */ if (resource_is_pipe_type(new_pipe, OTG_MASTER)) { - /* Detect odm changes */ - if (resource_is_odm_topology_changed(new_pipe, old_pipe)) - new_pipe->update_flags.bits.odm = 1; - /* Detect global sync changes */ if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start @@ -1556,6 +1661,7 @@ static void dcn20_update_dchubp_dpp( struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct dccg *dccg = dc->res_pool->dccg; bool viewport_changed = false; + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx); if (pipe_ctx->update_flags.bits.dppclk) dpp->funcs->dpp_dppclk_control(dpp, false, true); @@ -1621,6 +1727,7 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.scaler || plane_state->update_flags.bits.scaling_change || plane_state->update_flags.bits.position_change || + plane_state->update_flags.bits.clip_size_change || plane_state->update_flags.bits.per_pixel_alpha_change || pipe_ctx->stream->update_flags.bits.scaling) { pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; @@ -1633,6 +1740,7 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.viewport || (context == dc->current_state && plane_state->update_flags.bits.position_change) || (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || + (context == dc->current_state && plane_state->update_flags.bits.clip_size_change) || (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { hubp->funcs->mem_program_viewport( @@ -1701,7 +1809,7 @@ static void dcn20_update_dchubp_dpp( pipe_ctx->update_flags.bits.plane_changed || plane_state->update_flags.bits.addr_update) { if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && - pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) { + pipe_mall_type == SUBVP_MAIN) { union block_sequence_params params; params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv; @@ -1715,7 +1823,7 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.enable) hubp->funcs->set_blank(hubp, false); /* If the stream paired with this plane is phantom, the plane is also phantom */ - if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM + if (pipe_ctx->stream && pipe_mall_type == SUBVP_PHANTOM && hubp->funcs->phantom_hubp_post_enable) hubp->funcs->phantom_hubp_post_enable(hubp); } @@ -1773,7 +1881,7 @@ static void dcn20_program_pipe( pipe_ctx->pipe_dlg_param.vupdate_offset, pipe_ctx->pipe_dlg_param.vupdate_width); - if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) + if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); pipe_ctx->stream_res.tg->funcs->set_vtg_params( @@ -1796,9 +1904,11 @@ static void dcn20_program_pipe( dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub); } - if (dc->res_pool->hubbub->funcs->program_det_size && pipe_ctx->update_flags.bits.det_size) - dc->res_pool->hubbub->funcs->program_det_size( - dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb); + if (pipe_ctx->update_flags.bits.det_size) { + if (dc->res_pool->hubbub->funcs->program_det_size) + dc->res_pool->hubbub->funcs->program_det_size( + dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb); + } if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) dcn20_update_dchubp_dpp(dc, pipe_ctx, context); @@ -1877,26 +1987,43 @@ void dcn20_program_front_end_for_ctx( int i; struct dce_hwseq *hws = dc->hwseq; DC_LOGGER_INIT(dc->ctx->logger); + unsigned int prev_hubp_count = 0; + unsigned int hubp_count = 0; + struct pipe_ctx *pipe; if (resource_is_pipe_topology_changed(dc->current_state, context)) resource_log_pipe_topology_update(dc, context); if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) { - ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); + if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) { + ASSERT(!pipe->plane_state->triplebuffer_flips); /*turn off triple buffer for full update*/ dc->hwss.program_triplebuffer( - dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); + dc, pipe, pipe->plane_state->triplebuffer_flips); } } } + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].plane_state) + prev_hubp_count++; + if (context->res_ctx.pipe_ctx[i].plane_state) + hubp_count++; + } + + if (prev_hubp_count == 0 && hubp_count > 0) { + if (dc->res_pool->hubbub->funcs->force_pstate_change_control) + dc->res_pool->hubbub->funcs->force_pstate_change_control( + dc->res_pool->hubbub, true, false); + udelay(500); + } + /* Set pipe update flags and lock pipes */ for (i = 0; i < dc->res_pool->pipe_count; i++) - dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i], + dcn20_detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i], &context->res_ctx.pipe_ctx[i]); /* When disabling phantom pipes, turn on phantom OTG first (so we can get double @@ -1906,15 +2033,16 @@ void dcn20_program_front_end_for_ctx( struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream; if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream && - dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { + dc_state_get_pipe_subvp_type(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) { struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg; if (tg->funcs->enable_crtc) { if (dc->hwss.blank_phantom) { int main_pipe_width, main_pipe_height; + struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(dc->current_state, dc->current_state->res_ctx.pipe_ctx[i].stream); - main_pipe_width = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.width; - main_pipe_height = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.height; + main_pipe_width = phantom_stream->dst.width; + main_pipe_height = phantom_stream->dst.height; dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height); } tg->funcs->enable_crtc(tg); @@ -1929,7 +2057,6 @@ void dcn20_program_front_end_for_ctx( && context->res_ctx.pipe_ctx[i].stream) hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); - /* Disconnect mpcc */ for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable @@ -1942,19 +2069,31 @@ void dcn20_program_front_end_for_ctx( * turned on (i.e. in an MCLK switch) which can come in too late and cause issues with * DET allocation. */ - if (hubbub->funcs->program_det_size && (context->res_ctx.pipe_ctx[i].update_flags.bits.disable || - (context->res_ctx.pipe_ctx[i].plane_state && context->res_ctx.pipe_ctx[i].plane_state->is_phantom))) - hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0); - hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable || + (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM))) { + if (hubbub->funcs->program_det_size) + hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0); + } + hws->funcs.plane_atomic_disconnect(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); } + /* update ODM for blanked OTG master pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + if (resource_is_pipe_type(pipe, OTG_MASTER) && + !resource_is_pipe_type(pipe, DPP_PIPE) && + pipe->update_flags.bits.odm && + hws->funcs.update_odm) + hws->funcs.update_odm(dc, context, pipe); + } + /* * Program all updated pipes, order matters for mpcc setup. Start with * top pipe and program all pipes that follow in order */ for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->plane_state && !pipe->top_pipe) { while (pipe) { @@ -1968,7 +2107,7 @@ void dcn20_program_front_end_for_ctx( * but the MPO still exists until the double buffered update of the main pipe so we * will get a frame of underflow if the phantom pipe is programmed here. */ - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) dcn20_program_pipe(dc, pipe, context); } @@ -1993,17 +2132,6 @@ void dcn20_program_front_end_for_ctx( context->stream_status[0].plane_count > 1) { pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp); } - - /* when dynamic ODM is active, pipes must be reconfigured when all planes are - * disabled, as some transitions will leave software and hardware state - * mismatched. - */ - if (dc->debug.enable_single_display_2to1_odm_policy && - pipe->stream && - pipe->update_flags.bits.disable && - !pipe->prev_odm_pipe && - hws->funcs.update_odm) - hws->funcs.update_odm(dc, context, pipe); } } @@ -2018,7 +2146,7 @@ void dcn20_post_unlock_program_front_end( for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) - dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); /* * If we are enabling a pipe, we need to wait for pending clear as this is a critical @@ -2030,7 +2158,7 @@ void dcn20_post_unlock_program_front_end( struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; // Don't check flip pending on phantom pipes if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable && - pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { + dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) { struct hubp *hubp = pipe->plane_res.hubp; int j = 0; for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us @@ -2039,6 +2167,10 @@ void dcn20_post_unlock_program_front_end( } } + if (dc->res_pool->hubbub->funcs->force_pstate_change_control) + dc->res_pool->hubbub->funcs->force_pstate_change_control( + dc->res_pool->hubbub, false, false); + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -2049,7 +2181,7 @@ void dcn20_post_unlock_program_front_end( * programming sequence). */ while (pipe) { - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { /* When turning on the phantom pipe we want to run through the * entire enable sequence, so apply all the "enable" flags. */ @@ -2119,7 +2251,7 @@ void dcn20_prepare_bandwidth( struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; // At optimize don't restore the original watermark value - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) { context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U; break; } @@ -2163,7 +2295,7 @@ void dcn20_optimize_bandwidth( struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; // At optimize don't need to restore the original watermark value - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) { context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U; break; } @@ -2197,7 +2329,8 @@ void dcn20_optimize_bandwidth( dc->clk_mgr, context, true); - if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) { + if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW && + !dc->debug.disable_extblankadj) { for (i = 0; i < dc->res_pool->pipe_count; ++i) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; @@ -2310,7 +2443,7 @@ bool dcn20_wait_for_blank_complete( int counter; for (counter = 0; counter < 1000; counter++) { - if (opp->funcs->dpg_is_blanked(opp)) + if (!opp->funcs->dpg_is_pending(opp)) break; udelay(100); @@ -2321,7 +2454,7 @@ bool dcn20_wait_for_blank_complete( return false; } - return true; + return opp->funcs->dpg_is_blanked(opp); } bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx) @@ -2527,7 +2660,7 @@ void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) tg->funcs->setup_vertical_interrupt2(tg, start_line); } -static void dcn20_reset_back_end_for_pipe( +void dcn20_reset_back_end_for_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) @@ -2590,7 +2723,8 @@ static void dcn20_reset_back_end_for_pipe( * the case where the same symclk is shared across multiple otg * instances */ - link->phy_state.symclk_ref_cnts.otg = 0; + if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) + link->phy_state.symclk_ref_cnts.otg = 0; if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) { link_hwss->disable_link_output(link, &pipe_ctx->link_res, pipe_ctx->stream->signal); @@ -2750,23 +2884,17 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { - if (dc->hwseq->funcs.setup_hpo_hw_control) - dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true); - } - - if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { - dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; - dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst); - - phyd32clk = get_phyd32clk_src(link); - dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); - dto_params.otg_inst = tg->inst; dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10; dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); dto_params.timing = &pipe_ctx->stream->timing; dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; + dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst); + + phyd32clk = get_phyd32clk_src(link); + dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); } else { if (dccg->funcs->enable_symclk_se) dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, @@ -2923,7 +3051,7 @@ void dcn20_fpga_init_hw(struct dc *dc) dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; /*to do*/ - hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx); } /* initialize DWB pointer to MCIF_WB */ @@ -2940,7 +3068,7 @@ void dcn20_fpga_init_hw(struct dc *dc) for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - dc->hwss.disable_plane(dc, pipe_ctx); + dc->hwss.disable_plane(dc, context, pipe_ctx); pipe_ctx->stream_res.tg = NULL; pipe_ctx->plane_res.hubp = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h index ab02e4e9c8c2..5c874f7b0683 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h @@ -28,6 +28,8 @@ #include "hw_sequencer_private.h" +void dcn20_log_color_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx); bool dcn20_set_blend_lut( struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); bool dcn20_set_shaper_3dlut( @@ -52,7 +54,7 @@ void dcn20_program_output_csc(struct dc *dc, void dcn20_enable_stream(struct pipe_ctx *pipe_ctx); void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings); -void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); void dcn20_disable_pixel_data( struct dc *dc, struct pipe_ctx *pipe_ctx, @@ -84,6 +86,10 @@ enum dc_status dcn20_enable_stream_timing( void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_reset_back_end_for_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); void dcn20_init_blank( struct dc *dc, struct timing_generator *tg); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c index 884e3e323338..ef6488165b8f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c @@ -67,6 +67,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .setup_stereo = dcn10_setup_stereo, .set_avmute = dce110_set_avmute, .log_hw_state = dcn10_log_hw_state, + .log_color_state = dcn20_log_color_state, .get_hw_state = dcn10_get_hw_state, .clear_status_bits = dcn10_clear_status_bits, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h index 12277797cd71..12277797cd71 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c index d3fe6092f50e..6be846635a79 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c @@ -167,7 +167,7 @@ void dcn201_init_blank( struct tg_color black_color = {0}; struct output_pixel_processor *opp = NULL; uint32_t num_opps, opp_id_src0, opp_id_src1; - uint32_t otg_active_width, otg_active_height; + uint32_t otg_active_width = 0, otg_active_height = 0; /* program opp dpg blank color */ color_space = COLOR_SPACE_SRGB; @@ -320,7 +320,7 @@ void dcn201_init_hw(struct dc *dc) res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = res_pool->opps[i]; /*To do: number of MPCC != number of opp*/ - hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx); } /* initialize DWB pointer to MCIF_WB */ @@ -337,7 +337,7 @@ void dcn201_init_hw(struct dc *dc) for (i = 0; i < res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - dc->hwss.disable_plane(dc, pipe_ctx); + dc->hwss.disable_plane(dc, context, pipe_ctx); pipe_ctx->stream_res.tg = NULL; pipe_ctx->plane_res.hubp = NULL; @@ -369,7 +369,9 @@ void dcn201_init_hw(struct dc *dc) } /* trigger HW to start disconnect plane from stream on the next vsync */ -void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn201_plane_atomic_disconnect(struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h index 26cd62be6418..6a50a9894be6 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h @@ -33,7 +33,7 @@ void dcn201_init_hw(struct dc *dc); void dcn201_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings); void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx); -void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn201_plane_atomic_disconnect(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn201_set_cursor_attribute(struct pipe_ctx *pipe_ctx); void dcn201_pipe_control_lock( diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c index a13bf6c9386e..a13bf6c9386e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h index 1168887b033d..1168887b033d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c index 467812cf3368..804be977ea47 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c @@ -66,7 +66,7 @@ static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *c int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) { - struct dcn_hubbub_phys_addr_config config; + struct dcn_hubbub_phys_addr_config config = {0}; config.system_aperture.fb_top = pa_config->system_aperture.fb_top; config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset; @@ -137,7 +137,8 @@ void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx) pipe_ctx->stream->dpms_off = true; } -static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst) +static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, + uint32_t option, uint32_t panel_inst, uint32_t pwrseq_inst) { union dmub_rb_cmd cmd; struct dc_context *dc = abm->ctx; @@ -147,12 +148,13 @@ static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t optio cmd.abm_set_pipe.header.type = DMUB_CMD__ABM; cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE; cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst; + cmd.abm_set_pipe.abm_set_pipe_data.pwrseq_inst = pwrseq_inst; cmd.abm_set_pipe.abm_set_pipe_data.set_pipe_option = option; cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst; cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary; cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data); - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -171,7 +173,7 @@ static void dmub_abm_set_backlight(struct dc_context *dc, uint32_t backlight_pwm cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst); cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx) @@ -179,7 +181,6 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx) struct abm *abm = pipe_ctx->stream_res.abm; uint32_t otg_inst = pipe_ctx->stream_res.tg->inst; struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; - struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu; if (dmcu) { @@ -190,9 +191,13 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx) if (abm && panel_cntl) { if (abm->funcs && abm->funcs->set_pipe_ex) { abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, - panel_cntl->inst); + panel_cntl->inst, panel_cntl->pwrseq_inst); } else { - dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, panel_cntl->inst); + dmub_abm_set_pipe(abm, + otg_inst, + SET_ABM_PIPE_IMMEDIATELY_DISABLE, + panel_cntl->inst, + panel_cntl->pwrseq_inst); } panel_cntl->funcs->store_backlight_level(panel_cntl); } @@ -201,21 +206,32 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx) void dcn21_set_pipe(struct pipe_ctx *pipe_ctx) { struct abm *abm = pipe_ctx->stream_res.abm; - uint32_t otg_inst = pipe_ctx->stream_res.tg->inst; + struct timing_generator *tg = pipe_ctx->stream_res.tg; struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu; + uint32_t otg_inst; + + if (!abm || !tg || !panel_cntl) + return; + + otg_inst = tg->inst; if (dmcu) { dce110_set_pipe(pipe_ctx); return; } - if (abm && panel_cntl) { - if (abm->funcs && abm->funcs->set_pipe_ex) { - abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); - } else { - dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); - } + if (abm->funcs && abm->funcs->set_pipe_ex) { + abm->funcs->set_pipe_ex(abm, + otg_inst, + SET_ABM_PIPE_NORMAL, + panel_cntl->inst, + panel_cntl->pwrseq_inst); + } else { + dmub_abm_set_pipe(abm, otg_inst, + SET_ABM_PIPE_NORMAL, + panel_cntl->inst, + panel_cntl->pwrseq_inst); } } @@ -225,26 +241,35 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, { struct dc_context *dc = pipe_ctx->stream->ctx; struct abm *abm = pipe_ctx->stream_res.abm; + struct timing_generator *tg = pipe_ctx->stream_res.tg; struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; + uint32_t otg_inst; + + if (!abm || !tg || !panel_cntl) + return false; + + otg_inst = tg->inst; if (dc->dc->res_pool->dmcu) { dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp); return true; } - if (abm != NULL) { - uint32_t otg_inst = pipe_ctx->stream_res.tg->inst; - - if (abm && panel_cntl) { - if (abm->funcs && abm->funcs->set_pipe_ex) { - abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); - } else { - dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst); - } - } + if (abm->funcs && abm->funcs->set_pipe_ex) { + abm->funcs->set_pipe_ex(abm, + otg_inst, + SET_ABM_PIPE_NORMAL, + panel_cntl->inst, + panel_cntl->pwrseq_inst); + } else { + dmub_abm_set_pipe(abm, + otg_inst, + SET_ABM_PIPE_NORMAL, + panel_cntl->inst, + panel_cntl->pwrseq_inst); } - if (abm && abm->funcs && abm->funcs->set_backlight_level_pwm) + if (abm->funcs && abm->funcs->set_backlight_level_pwm) abm->funcs->set_backlight_level_pwm(abm, backlight_pwm_u16_16, frame_ramp, 0, panel_cntl->inst); else diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c index 18249c6b6d81..18249c6b6d81 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h index 3ed24292648a..3ed24292648a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index d71faf2ecd41..ed9141a67db3 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -51,7 +51,7 @@ #include "dcn20/dcn20_hwseq.h" #include "dcn30/dcn30_resource.h" #include "link.h" - +#include "dc_state_priv.h" @@ -69,21 +69,168 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name +void dcn30_log_color_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx) +{ + struct dc_context *dc_ctx = dc->ctx; + struct resource_pool *pool = dc->res_pool; + int i; + + DTN_INFO("DPP: DGAM ROM DGAM ROM type DGAM LUT SHAPER mode" + " 3DLUT mode 3DLUT bit depth 3DLUT size RGAM mode" + " GAMUT adjust " + "C11 C12 C13 C14 " + "C21 C22 C23 C24 " + "C31 C32 C33 C34 \n"); + + for (i = 0; i < pool->pipe_count; i++) { + struct dpp *dpp = pool->dpps[i]; + struct dcn_dpp_state s = {0}; + + dpp->funcs->dpp_read_state(dpp, &s); + dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); + + if (!s.is_enabled) + continue; + + DTN_INFO("[%2d]: %7x %13s %8s %11s %10s %15s %10s %9s" + " %12s " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld", + dpp->inst, + s.pre_dgam_mode, + (s.pre_dgam_select == 0) ? "sRGB" : + ((s.pre_dgam_select == 1) ? "Gamma 2.2" : + ((s.pre_dgam_select == 2) ? "Gamma 2.4" : + ((s.pre_dgam_select == 3) ? "Gamma 2.6" : + ((s.pre_dgam_select == 4) ? "BT.709" : + ((s.pre_dgam_select == 5) ? "PQ" : + ((s.pre_dgam_select == 6) ? "HLG" : + "Unknown")))))), + (s.gamcor_mode == 0) ? "Bypass" : + ((s.gamcor_mode == 1) ? "RAM A" : + "RAM B"), + (s.shaper_lut_mode == 1) ? "RAM A" : + ((s.shaper_lut_mode == 2) ? "RAM B" : + "Bypass"), + (s.lut3d_mode == 1) ? "RAM A" : + ((s.lut3d_mode == 2) ? "RAM B" : + "Bypass"), + (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit", + (s.lut3d_size == 0) ? "17x17x17" : "9x9x9", + (s.rgam_lut_mode == 0) ? "Bypass" : + ((s.rgam_lut_mode == 1) ? "RAM A" : + "RAM B"), + (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : + ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : + "SW"), + s.gamut_remap.temperature_matrix[0].value, + s.gamut_remap.temperature_matrix[1].value, + s.gamut_remap.temperature_matrix[2].value, + s.gamut_remap.temperature_matrix[3].value, + s.gamut_remap.temperature_matrix[4].value, + s.gamut_remap.temperature_matrix[5].value, + s.gamut_remap.temperature_matrix[6].value, + s.gamut_remap.temperature_matrix[7].value, + s.gamut_remap.temperature_matrix[8].value, + s.gamut_remap.temperature_matrix[9].value, + s.gamut_remap.temperature_matrix[10].value, + s.gamut_remap.temperature_matrix[11].value); + DTN_INFO("\n"); + } + DTN_INFO("\n"); + DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d" + " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d" + " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d" + " blnd_lut:%d oscs:%d\n\n", + dc->caps.color.dpp.input_lut_shared, + dc->caps.color.dpp.icsc, + dc->caps.color.dpp.dgam_ram, + dc->caps.color.dpp.dgam_rom_caps.srgb, + dc->caps.color.dpp.dgam_rom_caps.bt2020, + dc->caps.color.dpp.dgam_rom_caps.gamma2_2, + dc->caps.color.dpp.dgam_rom_caps.pq, + dc->caps.color.dpp.dgam_rom_caps.hlg, + dc->caps.color.dpp.post_csc, + dc->caps.color.dpp.gamma_corr, + dc->caps.color.dpp.dgam_rom_for_yuv, + dc->caps.color.dpp.hw_3d_lut, + dc->caps.color.dpp.ogam_ram, + dc->caps.color.dpp.ocsc); + + DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE" + " SHAPER mode 3DLUT mode 3DLUT bit-depth 3DLUT size OGAM mode OGAM LUT" + " GAMUT adjust " + "C11 C12 C13 C14 " + "C21 C22 C23 C24 " + "C31 C32 C33 C34 \n"); + + for (i = 0; i < pool->mpcc_count; i++) { + struct mpcc_state s = {0}; + + pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); + mpc3_get_gamut_remap(pool->mpc, i, &s.gamut_remap); + + if (s.opp_id != 0xf) + DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d %11s %11s %16s %11s %10s %9s" + " %-12s " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld\n", + i, s.opp_id, s.dpp_id, s.bot_mpcc_id, + s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only, + s.idle, + (s.shaper_lut_mode == 1) ? "RAM A" : + ((s.shaper_lut_mode == 2) ? "RAM B" : + "Bypass"), + (s.lut3d_mode == 1) ? "RAM A" : + ((s.lut3d_mode == 2) ? "RAM B" : + "Bypass"), + (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit", + (s.lut3d_size == 0) ? "17x17x17" : "9x9x9", + (s.rgam_mode == 0) ? "Bypass" : + ((s.rgam_mode == 2) ? "RAM" : + "Unknown"), + (s.rgam_mode == 1) ? "B" : "A", + (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : + ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : + "SW"), + s.gamut_remap.temperature_matrix[0].value, + s.gamut_remap.temperature_matrix[1].value, + s.gamut_remap.temperature_matrix[2].value, + s.gamut_remap.temperature_matrix[3].value, + s.gamut_remap.temperature_matrix[4].value, + s.gamut_remap.temperature_matrix[5].value, + s.gamut_remap.temperature_matrix[6].value, + s.gamut_remap.temperature_matrix[7].value, + s.gamut_remap.temperature_matrix[8].value, + s.gamut_remap.temperature_matrix[9].value, + s.gamut_remap.temperature_matrix[10].value, + s.gamut_remap.temperature_matrix[11].value); + + } + DTN_INFO("\n"); + DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n", + dc->caps.color.mpc.gamut_remap, + dc->caps.color.mpc.num_3dluts, + dc->caps.color.mpc.ogam_ram, + dc->caps.color.mpc.ocsc); +} + bool dcn30_set_blend_lut( struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; bool result = true; - struct pwl_params *blend_lut = NULL; - - if (plane_state->blend_tf) { - if (plane_state->blend_tf->type == TF_TYPE_HWPWL) - blend_lut = &plane_state->blend_tf->pwl; - else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm3_helper_translate_curve_to_hw_format( - plane_state->blend_tf, &dpp_base->regamma_params, false); - blend_lut = &dpp_base->regamma_params; - } + const struct pwl_params *blend_lut = NULL; + + if (plane_state->blend_tf.type == TF_TYPE_HWPWL) + blend_lut = &plane_state->blend_tf.pwl; + else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { + cm3_helper_translate_curve_to_hw_format( + &plane_state->blend_tf, &dpp_base->regamma_params, false); + blend_lut = &dpp_base->regamma_params; } result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut); @@ -151,27 +298,24 @@ bool dcn30_set_input_transfer_func(struct dc *dc, struct dpp *dpp_base = pipe_ctx->plane_res.dpp; enum dc_transfer_func_predefined tf; bool result = true; - struct pwl_params *params = NULL; + const struct pwl_params *params = NULL; if (dpp_base == NULL || plane_state == NULL) return false; tf = TRANSFER_FUNCTION_UNITY; - if (plane_state->in_transfer_func && - plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED) - tf = plane_state->in_transfer_func->tf; + if (plane_state->in_transfer_func.type == TF_TYPE_PREDEFINED) + tf = plane_state->in_transfer_func.tf; dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf); - if (plane_state->in_transfer_func) { - if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL) - params = &plane_state->in_transfer_func->pwl; - else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS && - cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func, - &dpp_base->degamma_params, false)) - params = &dpp_base->degamma_params; - } + if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL) + params = &plane_state->in_transfer_func.pwl; + else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && + cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func, + &dpp_base->degamma_params, false)) + params = &dpp_base->degamma_params; result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); @@ -229,24 +373,24 @@ bool dcn30_set_output_transfer_func(struct dc *dc, { int mpcc_id = pipe_ctx->plane_res.hubp->inst; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; - struct pwl_params *params = NULL; + const struct pwl_params *params = NULL; bool ret = false; /* program OGAM or 3DLUT only for the top pipe*/ if (pipe_ctx->top_pipe == NULL) { /*program rmu shaper and 3dlut in MPC*/ ret = dcn30_set_mpc_shaper_3dlut(pipe_ctx, stream); - if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) { - if (stream->out_transfer_func->type == TF_TYPE_HWPWL) - params = &stream->out_transfer_func->pwl; - else if (pipe_ctx->stream->out_transfer_func->type == + if (ret == false && mpc->funcs->set_output_gamma) { + if (stream->out_transfer_func.type == TF_TYPE_HWPWL) + params = &stream->out_transfer_func.pwl; + else if (pipe_ctx->stream->out_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && cm3_helper_translate_curve_to_hw_format( - stream->out_transfer_func, + &stream->out_transfer_func, &mpc->blender_params, false)) params = &mpc->blender_params; /* there are no ROM LUTs in OUTGAM */ - if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED) + if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED) BREAK_TO_DEBUGGER(); } } @@ -367,6 +511,10 @@ void dcn30_enable_writeback( DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\ __func__, wb_info->dwb_pipe_inst,\ wb_info->mpcc_inst); + + /* Warmup interface */ + dcn30_mmhubbub_warmup(dc, 1, wb_info); + /* Update writeback pipe */ dcn30_set_writeback(dc, wb_info, context); @@ -472,6 +620,7 @@ void dcn30_init_hw(struct dc *dc) int i; int edp_num; uint32_t backlight = MAX_BACKLIGHT_LEVEL; + uint32_t user_level = MAX_BACKLIGHT_LEVEL; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); @@ -608,13 +757,15 @@ void dcn30_init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->panel_cntl) + if (link->panel_cntl) { backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); + user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; + } } for (i = 0; i < dc->res_pool->pipe_count; i++) { if (abms[i] != NULL) - abms[i]->funcs->abm_init(abms[i], backlight); + abms[i]->funcs->abm_init(abms[i], backlight, user_level); } /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ @@ -648,7 +799,7 @@ void dcn30_init_hw(struct dc *dc) // Get DMCUB capabilities dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; - dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; + dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; } void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) @@ -656,10 +807,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) if (pipe_ctx == NULL) return; - if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) + if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) { pipe_ctx->stream_res.stream_enc->funcs->set_avmute( pipe_ctx->stream_res.stream_enc, enable); + + /* Wait for two frame to make sure AV mute is sent out */ + if (enable && pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) { + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); + } + } } void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx) @@ -724,7 +885,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) { union dmub_rb_cmd cmd; uint32_t tmr_delay = 0, tmr_scale = 0; - struct dc_cursor_attributes cursor_attr; + struct dc_cursor_attributes cursor_attr = {0}; bool cursor_cache_enable = false; struct dc_stream_state *stream = NULL; struct dc_plane_state *plane = NULL; @@ -750,7 +911,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_NO_DF_REQ; cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header); - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); return true; } @@ -780,7 +941,8 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888 && plane->address.page_table_base.quad_part == 0 && dc->hwss.does_plane_fit_in_mall && - dc->hwss.does_plane_fit_in_mall(dc, plane, + dc->hwss.does_plane_fit_in_mall(dc, plane->plane_size.surface_pitch, + plane->plane_size.surface_size.height, plane->format, cursor_cache_enable ? &cursor_attr : NULL)) { unsigned int v_total = stream->adjust.v_total_max ? stream->adjust.v_total_max : stream->timing.v_total; @@ -872,7 +1034,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.mall.cursor_height = cursor_attr.height; cmd.mall.cursor_pitch = cursor_attr.pitch; - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); /* Use copied cursor, and it's okay to not switch back */ cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part; @@ -888,7 +1050,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.mall.tmr_scale = tmr_scale; cmd.mall.debug_bits = dc->debug.mall_error_as_fatal; - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); return true; } @@ -905,16 +1067,20 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header); - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } -bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane, struct dc_cursor_attributes *cursor_attr) +bool dcn30_does_plane_fit_in_mall(struct dc *dc, + unsigned int pitch, + unsigned int height, + enum surface_pixel_format format, + struct dc_cursor_attributes *cursor_attr) { // add meta size? - unsigned int surface_size = plane->plane_size.surface_pitch * plane->plane_size.surface_size.height * - (plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4); + unsigned int surface_size = pitch * height * + (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4); unsigned int mall_size = dc->caps.mall_size_total; unsigned int cursor_size = 0; @@ -962,7 +1128,7 @@ void dcn30_hardware_release(struct dc *dc) if (!pipe->stream) continue; - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_MAIN) { subvp_in_use = true; break; } @@ -1008,21 +1174,3 @@ void dcn30_prepare_bandwidth(struct dc *dc, if (!dc->clk_mgr->clks.fw_based_mclk_switching) dc_dmub_srv_p_state_delegate(dc, false, context); } - -void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx, - int num_pipes, const struct dc_static_screen_params *params) -{ - unsigned int i; - unsigned int triggers = 0; - - if (params->triggers.surface_update) - triggers |= 0x100; - if (params->triggers.cursor_update) - triggers |= 0x8; - if (params->triggers.force_trigger) - triggers |= 0x1; - - for (i = 0; i < num_pipes; i++) - pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg, - triggers, params->num_frames); -} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h index e557e2b98618..76b16839486a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h @@ -52,6 +52,9 @@ bool dcn30_mmhubbub_warmup( unsigned int num_dwb, struct dc_writeback_info *wb_info); +void dcn30_log_color_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx); + bool dcn30_set_blend_lut(struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); @@ -68,7 +71,10 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx); void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx); -bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane, +bool dcn30_does_plane_fit_in_mall(struct dc *dc, + unsigned int pitch, + unsigned int height, + enum surface_pixel_format format, struct dc_cursor_attributes *cursor_attr); bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable); @@ -90,7 +96,4 @@ void dcn30_set_hubp_blank(const struct dc *dc, void dcn30_prepare_bandwidth(struct dc *dc, struct dc_state *context); -void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx, - int num_pipes, const struct dc_static_screen_params *params); - #endif /* __DC_HWSS_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c index 9894caedffed..ef913445a795 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c @@ -64,7 +64,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, - .set_static_screen_control = dcn30_set_static_screen_control, + .set_static_screen_control = dcn10_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h index c280ff90bfa3..c280ff90bfa3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c index 6477009ce065..6477009ce065 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h index 0bca48ccbfa2..0bca48ccbfa2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c index 637f9514d37b..637f9514d37b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h index 899587b93aa1..899587b93aa1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c index edb4d68b8187..edb4d68b8187 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h index 4949981126d7..4949981126d7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index 97798cee876e..1c8abb417b6e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -96,11 +96,10 @@ static void enable_memory_low_power(struct dc *dc) if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) { // Power down VPGs for (i = 0; i < dc->res_pool->stream_enc_count; i++) - dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); -#if defined(CONFIG_DRM_AMD_DC_FP) + if (dc->res_pool->stream_enc[i]->vpg) + dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg); -#endif } } @@ -112,6 +111,7 @@ void dcn31_init_hw(struct dc *dc) struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; uint32_t backlight = MAX_BACKLIGHT_LEVEL; + uint32_t user_level = MAX_BACKLIGHT_LEVEL; int i; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) @@ -223,13 +223,15 @@ void dcn31_init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->panel_cntl) + if (link->panel_cntl) { backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); + user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; + } } for (i = 0; i < dc->res_pool->pipe_count; i++) { if (abms[i] != NULL) - abms[i]->funcs->abm_init(abms[i], backlight); + abms[i]->funcs->abm_init(abms[i], backlight, user_level); } /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ @@ -271,7 +273,7 @@ void dcn31_init_hw(struct dc *dc) // Get DMCUB capabilities dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; - dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; + dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; } void dcn31_dsc_pg_control( @@ -415,7 +417,7 @@ void dcn31_z10_save_init(struct dc *dc) cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT; cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT; - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dcn31_z10_restore(const struct dc *dc) @@ -433,7 +435,7 @@ void dcn31_z10_restore(const struct dc *dc) cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT; cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_RESTORE; - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) @@ -477,7 +479,7 @@ void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool p int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) { - struct dcn_hubbub_phys_addr_config config; + struct dcn_hubbub_phys_addr_config config = {0}; config.system_aperture.fb_top = pa_config->system_aperture.fb_top; config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset; @@ -523,7 +525,8 @@ static void dcn31_reset_back_end_for_pipe( if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass) pipe_ctx->stream_res.tg->funcs->set_odm_bypass( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0; + if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) + pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0; if (pipe_ctx->stream_res.tg->funcs->set_drr) pipe_ctx->stream_res.tg->funcs->set_drr( @@ -612,3 +615,21 @@ void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable) if (hws->ctx->dc->debug.hpo_optimization) REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable); } + +void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx, + int num_pipes, const struct dc_static_screen_params *params) +{ + unsigned int i; + unsigned int triggers = 0; + + if (params->triggers.surface_update) + triggers |= 0x100; + if (params->triggers.cursor_update) + triggers |= 0x8; + if (params->triggers.force_trigger) + triggers |= 0x1; + + for (i = 0; i < num_pipes; i++) + pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg, + triggers, params->num_frames); +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h index edfc01d6ad73..b8bc939da155 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h @@ -56,4 +56,8 @@ bool dcn31_is_abm_supported(struct dc *dc, void dcn31_init_pipes(struct dc *dc, struct dc_state *context); void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable); +void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx, + int num_pipes, const struct dc_static_screen_params *params); + + #endif /* __DC_HWSS_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c index 669f524bd064..c06cc2c5da92 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c @@ -67,7 +67,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, - .set_static_screen_control = dcn30_set_static_screen_control, + .set_static_screen_control = dcn31_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h index a3db08c8bd35..a3db08c8bd35 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c index 3a9cc8ac0c07..0d8a05cf8b1a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c @@ -69,29 +69,6 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name -static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream, - int opp_cnt) -{ - bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing); - int flow_ctrl_cnt; - - if (opp_cnt >= 2) - hblank_halved = true; - - flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable - - stream->timing.h_border_left - - stream->timing.h_border_right; - - if (hblank_halved) - flow_ctrl_cnt /= 2; - - /* ODM combine 4:1 case */ - if (opp_cnt == 4) - flow_ctrl_cnt /= 2; - - return flow_ctrl_cnt; -} - static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; @@ -105,7 +82,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) if (enable) { struct dsc_config dsc_cfg; - struct dsc_optc_config dsc_optc_cfg; + struct dsc_optc_config dsc_optc_cfg = {0}; enum optc_dsc_mode optc_dsc_mode; /* Enable DSC hw block */ @@ -183,10 +160,6 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx struct pipe_ctx *odm_pipe; int opp_cnt = 0; int opp_inst[MAX_PIPES] = {0}; - bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing)); - struct mpc_dwb_flow_control flow_control; - struct mpc *mpc = dc->res_pool->mpc; - int i; opp_cnt = get_odm_config(pipe_ctx, opp_inst); @@ -199,20 +172,6 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx pipe_ctx->stream_res.tg->funcs->set_odm_bypass( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1; - flow_control.flow_ctrl_mode = 0; - flow_control.flow_ctrl_cnt0 = 0x80; - flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt); - if (mpc->funcs->set_out_rate_control) { - for (i = 0; i < opp_cnt; ++i) { - mpc->funcs->set_out_rate_control( - mpc, opp_inst[i], - true, - rate_control_2x_pclk, - &flow_control); - } - } - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control( odm_pipe->stream_res.opp, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c index ccb7e317e86a..542ce3b7f9e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c @@ -69,7 +69,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = { .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, - .set_static_screen_control = dcn30_set_static_screen_control, + .set_static_screen_control = dcn31_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h index 8f92e66577cf..8f92e66577cf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 1b9f21fd4f17..b8e884368dc6 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -51,6 +51,7 @@ #include "dcn32/dcn32_resource.h" #include "link.h" #include "../dcn20/dcn20_hwseq.h" +#include "dc_state_priv.h" #define DC_LOGGER_INIT(logger) @@ -238,8 +239,10 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c // Convert number of cache lines required to number of ways if (dc->debug.force_mall_ss_num_ways > 0) { num_ways = dc->debug.force_mall_ss_num_ways; + } else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) { + num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, mall_ss_size_bytes); } else { - num_ways = dcn32_helper_mall_bytes_to_ways(dc, mall_ss_size_bytes); + num_ways = 0; } return num_ways; @@ -260,7 +263,9 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) for (i = 0; i < dc->current_state->stream_count; i++) { /* MALL SS messaging is not supported with PSR at this time */ if (dc->current_state->streams[i] != NULL && - dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) + dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && + (dc->current_state->stream_count > 1 || (!dc->current_state->streams[i]->dpms_off && + dc->current_state->stream_status[i].plane_count > 0))) return false; } @@ -277,7 +282,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ; cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); return true; } @@ -311,7 +316,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); cmd.cab.cab_alloc_ways = (uint8_t)ways; - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); return true; } @@ -327,7 +332,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -348,8 +353,7 @@ void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context) for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.paired_stream && - pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) { + if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) { // There is at least 1 SubVP pipe, so enable SubVP enable_subvp = true; break; @@ -375,18 +379,20 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc, bool subvp_immediate_flip = false; bool subvp_in_use = false; struct pipe_ctx *pipe; + enum mall_stream_type pipe_mall_type = SUBVP_NONE; for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; + pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); - if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + if (pipe->stream && pipe->plane_state && pipe_mall_type == SUBVP_MAIN) { subvp_in_use = true; break; } } if (top_pipe_to_program && top_pipe_to_program->stream && top_pipe_to_program->plane_state) { - if (top_pipe_to_program->stream->mall_stream_config.type == SUBVP_MAIN && + if (dc_state_get_pipe_subvp_type(context, top_pipe_to_program) == SUBVP_MAIN && top_pipe_to_program->plane_state->flip_immediate) subvp_immediate_flip = true; } @@ -398,7 +404,7 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc, if (!lock) { for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN && + if (pipe->stream && pipe->plane_state && pipe_mall_type == SUBVP_MAIN && should_lock_all_pipes) pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); } @@ -416,14 +422,7 @@ void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params) { struct dc *dc = params->subvp_pipe_control_lock_fast_params.dc; bool lock = params->subvp_pipe_control_lock_fast_params.lock; - struct pipe_ctx *pipe_ctx = params->subvp_pipe_control_lock_fast_params.pipe_ctx; - bool subvp_immediate_flip = false; - - if (pipe_ctx && pipe_ctx->stream && pipe_ctx->plane_state) { - if (pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN && - pipe_ctx->plane_state->flip_immediate) - subvp_immediate_flip = true; - } + bool subvp_immediate_flip = params->subvp_pipe_control_lock_fast_params.subvp_immediate_flip; // Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared. if (subvp_immediate_flip) { @@ -480,41 +479,35 @@ bool dcn32_set_mcm_luts( int mpcc_id = pipe_ctx->plane_res.hubp->inst; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; bool result = true; - struct pwl_params *lut_params = NULL; + const struct pwl_params *lut_params = NULL; // 1D LUT - if (plane_state->blend_tf) { - if (plane_state->blend_tf->type == TF_TYPE_HWPWL) - lut_params = &plane_state->blend_tf->pwl; - else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format(plane_state->ctx, - plane_state->blend_tf, - &dpp_base->regamma_params, false); - lut_params = &dpp_base->regamma_params; - } + if (plane_state->blend_tf.type == TF_TYPE_HWPWL) + lut_params = &plane_state->blend_tf.pwl; + else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { + cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf, + &dpp_base->regamma_params, false); + lut_params = &dpp_base->regamma_params; } result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id); lut_params = NULL; // Shaper - if (plane_state->in_shaper_func) { - if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL) - lut_params = &plane_state->in_shaper_func->pwl; - else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) { - // TODO: dpp_base replace - ASSERT(false); - cm_helper_translate_curve_to_hw_format(plane_state->ctx, - plane_state->in_shaper_func, - &dpp_base->shaper_params, true); - lut_params = &dpp_base->shaper_params; - } + if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL) + lut_params = &plane_state->in_shaper_func.pwl; + else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { + // TODO: dpp_base replace + ASSERT(false); + cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func, + &dpp_base->shaper_params, true); + lut_params = &dpp_base->shaper_params; } result = mpc->funcs->program_shaper(mpc, lut_params, mpcc_id); // 3D - if (plane_state->lut3d_func && plane_state->lut3d_func->state.bits.initialized == 1) - result = mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func->lut_3d, mpcc_id); + if (plane_state->lut3d_func.state.bits.initialized == 1) + result = mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id); else result = mpc->funcs->program_3dlut(mpc, NULL, mpcc_id); @@ -531,27 +524,24 @@ bool dcn32_set_input_transfer_func(struct dc *dc, enum dc_transfer_func_predefined tf; bool result = true; - struct pwl_params *params = NULL; + const struct pwl_params *params = NULL; if (mpc == NULL || plane_state == NULL) return false; tf = TRANSFER_FUNCTION_UNITY; - if (plane_state->in_transfer_func && - plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED) - tf = plane_state->in_transfer_func->tf; + if (plane_state->in_transfer_func.type == TF_TYPE_PREDEFINED) + tf = plane_state->in_transfer_func.tf; dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf); - if (plane_state->in_transfer_func) { - if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL) - params = &plane_state->in_transfer_func->pwl; - else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS && - cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func, - &dpp_base->degamma_params, false)) - params = &dpp_base->degamma_params; - } + if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL) + params = &plane_state->in_transfer_func.pwl; + else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && + cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func, + &dpp_base->degamma_params, false)) + params = &dpp_base->degamma_params; dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); @@ -569,24 +559,24 @@ bool dcn32_set_output_transfer_func(struct dc *dc, { int mpcc_id = pipe_ctx->plane_res.hubp->inst; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; - struct pwl_params *params = NULL; + const struct pwl_params *params = NULL; bool ret = false; /* program OGAM or 3DLUT only for the top pipe*/ if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) { /*program shaper and 3dlut in MPC*/ ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream); - if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) { - if (stream->out_transfer_func->type == TF_TYPE_HWPWL) - params = &stream->out_transfer_func->pwl; - else if (pipe_ctx->stream->out_transfer_func->type == + if (ret == false && mpc->funcs->set_output_gamma) { + if (stream->out_transfer_func.type == TF_TYPE_HWPWL) + params = &stream->out_transfer_func.pwl; + else if (pipe_ctx->stream->out_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && cm3_helper_translate_curve_to_hw_format( - stream->out_transfer_func, + &stream->out_transfer_func, &mpc->blender_params, false)) params = &mpc->blender_params; /* there are no ROM LUTs in OUTGAM */ - if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED) + if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED) BREAK_TO_DEBUGGER(); } } @@ -611,16 +601,10 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context) struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct hubp *hubp = pipe->plane_res.hubp; - if (!pipe->stream || !(pipe->stream->mall_stream_config.type == SUBVP_MAIN || + if (!pipe->stream || !(dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN || pipe->stream->fpo_in_use)) { if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) hubp->funcs->hubp_update_force_pstate_disallow(hubp, false); - } - - /* Today only FPO uses cursor P-State force. Only clear cursor P-State force - * if it's not FPO. - */ - if (!pipe->stream || !pipe->stream->fpo_in_use) { if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow) hubp->funcs->hubp_update_force_cursor_pstate_disallow(hubp, false); } @@ -632,17 +616,10 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context) struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct hubp *hubp = pipe->plane_res.hubp; - if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { - if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) - hubp->funcs->hubp_update_force_pstate_disallow(hubp, true); - } - - if (pipe->stream && pipe->stream->fpo_in_use) { + if (pipe->stream && (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN || + pipe->stream->fpo_in_use)) { if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) hubp->funcs->hubp_update_force_pstate_disallow(hubp, true); - /* For now only force cursor p-state disallow for FPO - * Needs to be added for subvp once FW side gets updated - */ if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow) hubp->funcs->hubp_update_force_cursor_pstate_disallow(hubp, true); } @@ -686,8 +663,8 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context) if (cursor_size > 16384) cache_cursor = true; - if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - hubp->funcs->hubp_update_mall_sel(hubp, 1, false); + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { + hubp->funcs->hubp_update_mall_sel(hubp, 1, false); } else { // MALL not supported with Stereo3D hubp->funcs->hubp_update_mall_sel(hubp, @@ -729,9 +706,8 @@ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context) * see if CURSOR_REQ_MODE will be back to 1 for SubVP * when it should be 0 for MPO */ - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) hubp->funcs->hubp_prepare_subvp_buffering(hubp, true); - } } } } @@ -774,6 +750,7 @@ void dcn32_init_hw(struct dc *dc) int i; int edp_num; uint32_t backlight = MAX_BACKLIGHT_LEVEL; + uint32_t user_level = MAX_BACKLIGHT_LEVEL; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); @@ -928,13 +905,15 @@ void dcn32_init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->panel_cntl) + if (link->panel_cntl) { backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); + user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; + } } for (i = 0; i < dc->res_pool->pipe_count; i++) { if (abms[i] != NULL && abms[i]->funcs != NULL) - abms[i]->funcs->abm_init(abms[i], backlight); + abms[i]->funcs->abm_init(abms[i], backlight, user_level); } /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ @@ -974,39 +953,35 @@ void dcn32_init_hw(struct dc *dc) dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support; dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable; - dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; - } -} + dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; -static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream, - int opp_cnt) -{ - bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing); - int flow_ctrl_cnt; - - if (opp_cnt >= 2) - hblank_halved = true; - - flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable - - stream->timing.h_border_left - - stream->timing.h_border_right; - - if (hblank_halved) - flow_ctrl_cnt /= 2; - - /* ODM combine 4:1 case */ - if (opp_cnt == 4) - flow_ctrl_cnt /= 2; - - return flow_ctrl_cnt; + if (dc->ctx->dmub_srv->dmub->fw_version < + DMUB_FW_VERSION(7, 0, 35)) { + dc->debug.force_disable_subvp = true; + dc->debug.disable_fpo_optimizations = true; + } + } } static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct pipe_ctx *odm_pipe; int opp_cnt = 1; + struct dccg *dccg = dc->res_pool->dccg; + /* It has been found that when DSCCLK is lower than 16Mhz, we will get DCN + * register access hung. When DSCCLk is based on refclk, DSCCLk is always a + * fixed value higher than 16Mhz so the issue doesn't occur. When DSCCLK is + * generated by DTO, DSCCLK would be based on 1/3 dispclk. For small timings + * with DSC such as 480p60Hz, the dispclk could be low enough to trigger + * this problem. We are implementing a workaround here to keep using dscclk + * based on fixed value refclk when timing is smaller than 3x16Mhz (i.e + * 48Mhz) pixel clock to avoid hitting this problem. + */ + bool should_use_dto_dscclk = (dccg->funcs->set_dto_dscclk != NULL) && + stream->timing.pix_clk_100hz > 480000; ASSERT(dsc); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) @@ -1014,7 +989,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) if (enable) { struct dsc_config dsc_cfg; - struct dsc_optc_config dsc_optc_cfg; + struct dsc_optc_config dsc_optc_cfg = {0}; enum optc_dsc_mode optc_dsc_mode; /* Enable DSC hw block */ @@ -1029,12 +1004,16 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); + if (should_use_dto_dscclk) + dccg->funcs->set_dto_dscclk(dccg, dsc->inst); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; ASSERT(odm_dsc); odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); + if (should_use_dto_dscclk) + dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst); } dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt; dsc_cfg.pic_width *= opp_cnt; @@ -1054,9 +1033,13 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) OPTC_DSC_DISABLED, 0, 0); /* disable DSC block */ + if (dccg->funcs->set_ref_dscclk) + dccg->funcs->set_ref_dscclk(dccg, pipe_ctx->stream_res.dsc->inst); dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { ASSERT(odm_pipe->stream_res.dsc); + if (dccg->funcs->set_ref_dscclk) + dccg->funcs->set_ref_dscclk(dccg, odm_pipe->stream_res.dsc->inst); odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc); } } @@ -1094,10 +1077,6 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * struct pipe_ctx *odm_pipe; int opp_cnt = 0; int opp_inst[MAX_PIPES] = {0}; - bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing)); - struct mpc_dwb_flow_control flow_control; - struct mpc *mpc = dc->res_pool->mpc; - int i; opp_cnt = get_odm_config(pipe_ctx, opp_inst); @@ -1110,20 +1089,6 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * pipe_ctx->stream_res.tg->funcs->set_odm_bypass( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1; - flow_control.flow_ctrl_mode = 0; - flow_control.flow_ctrl_cnt0 = 0x80; - flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt); - if (mpc->funcs->set_out_rate_control) { - for (i = 0; i < opp_cnt; ++i) { - mpc->funcs->set_out_rate_control( - mpc, opp_inst[i], - true, - rate_control_2x_pclk, - &flow_control); - } - } - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control( odm_pipe->stream_res.opp, @@ -1139,10 +1104,21 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe->stream_res.dsc) { struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc; + struct dccg *dccg = dc->res_pool->dccg; + + if (dccg->funcs->set_ref_dscclk) + dccg->funcs->set_ref_dscclk(dccg, dsc->inst); /* disconnect DSC block from stream */ dsc->funcs->dsc_disconnect(dsc); } } + + if (!resource_is_pipe_type(pipe_ctx, DPP_PIPE)) + /* + * blank pattern is generated by OPP, reprogram blank pattern + * due to OPP count change + */ + dc->hwseq->funcs.blank_pixel_data(dc, pipe_ctx, true); } unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div) @@ -1212,7 +1188,7 @@ void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_ continue; if ((pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal)) - && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { + && dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_PHANTOM) { pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg); reset_sync_context_for_pipe(dc, context, i); otg_disabled[i] = true; @@ -1363,8 +1339,8 @@ void dcn32_update_phantom_vp_position(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN && - pipe->stream->mall_stream_config.paired_stream == phantom_pipe->stream) { + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN && + dc_state_get_paired_subvp_stream(context, pipe->stream) == phantom_pipe->stream) { if (pipe->plane_state && pipe->plane_state->update_flags.bits.position_change) { phantom_plane->src_rect.x = pipe->plane_state->src_rect.x; @@ -1389,21 +1365,19 @@ void dcn32_update_phantom_vp_position(struct dc *dc, void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe) { phantom_pipe->update_flags.raw = 0; - if (phantom_pipe->stream && phantom_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) { - phantom_pipe->update_flags.bits.enable = 1; - phantom_pipe->update_flags.bits.mpcc = 1; - phantom_pipe->update_flags.bits.dppclk = 1; - phantom_pipe->update_flags.bits.hubp_interdependent = 1; - phantom_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; - phantom_pipe->update_flags.bits.gamut_remap = 1; - phantom_pipe->update_flags.bits.scaler = 1; - phantom_pipe->update_flags.bits.viewport = 1; - phantom_pipe->update_flags.bits.det_size = 1; - if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) { - phantom_pipe->update_flags.bits.odm = 1; - phantom_pipe->update_flags.bits.global_sync = 1; - } + if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) { + phantom_pipe->update_flags.bits.enable = 1; + phantom_pipe->update_flags.bits.mpcc = 1; + phantom_pipe->update_flags.bits.dppclk = 1; + phantom_pipe->update_flags.bits.hubp_interdependent = 1; + phantom_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; + phantom_pipe->update_flags.bits.gamut_remap = 1; + phantom_pipe->update_flags.bits.scaler = 1; + phantom_pipe->update_flags.bits.viewport = 1; + phantom_pipe->update_flags.bits.det_size = 1; + if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) { + phantom_pipe->update_flags.bits.odm = 1; + phantom_pipe->update_flags.bits.global_sync = 1; } } } @@ -1463,9 +1437,44 @@ void dcn32_update_dsc_pg(struct dc *dc, } } +void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context) +{ + struct dce_hwseq *hws = dc->hwseq; + int i; + + for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { + struct pipe_ctx *pipe_ctx_old = + &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx_old->stream) + continue; + + if (dc_state_get_pipe_subvp_type(dc->current_state, pipe_ctx_old) != SUBVP_PHANTOM) + continue; + + if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) + continue; + + if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx) || + (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)) { + struct clock_source *old_clk = pipe_ctx_old->clock_source; + + if (hws->funcs.reset_back_end_for_pipe) + hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); + if (hws->funcs.enable_stream_gating) + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); + if (old_clk) + old_clk->funcs->cs_power_down(old_clk); + } + } +} + void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) { unsigned int i; + enum dc_status status = DC_OK; + struct dce_hwseq *hws = dc->hwseq; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -1475,8 +1484,8 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) * pipe, wait for the double buffer update to complete first before we do * ANY phantom pipe programming. */ - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM && - old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM && + old_pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) != SUBVP_PHANTOM) { old_pipe->stream_res.tg->funcs->wait_for_state( old_pipe->stream_res.tg, CRTC_STATE_VBLANK); @@ -1486,16 +1495,39 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) } } for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; - - if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - // If old context or new context has phantom pipes, apply - // the phantom timings now. We can't change the phantom - // pipe configuration safely without driver acquiring - // the DMCUB lock first. - dc->hwss.apply_ctx_to_hw(dc, context); - break; + struct pipe_ctx *pipe_ctx_old = + &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->stream == NULL) + continue; + + if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) + continue; + + if (pipe_ctx->stream == pipe_ctx_old->stream && + pipe_ctx->stream->link->link_state_valid) { + continue; } + + if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) + continue; + + if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe) + continue; + + if (hws->funcs.apply_single_controller_ctx_to_hw) + status = hws->funcs.apply_single_controller_ctx_to_hw( + pipe_ctx, + context, + dc); + + ASSERT(status == DC_OK); + +#ifdef CONFIG_DRM_AMD_DC_FP + if (hws->funcs.resync_fifo_dccg_dio) + hws->funcs.resync_fifo_dccg_dio(hws, dc, context); +#endif } } @@ -1510,7 +1542,7 @@ void dcn32_init_blank( struct output_pixel_processor *opp = NULL; struct output_pixel_processor *bottom_opp = NULL; uint32_t num_opps, opp_id_src0, opp_id_src1; - uint32_t otg_active_width, otg_active_height; + uint32_t otg_active_width = 0, otg_active_height = 0; uint32_t i; /* program opp dpg blank color */ @@ -1709,3 +1741,26 @@ void dcn32_prepare_bandwidth(struct dc *dc, context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; } } + +void dcn32_interdependent_update_lock(struct dc *dc, + struct dc_state *context, bool lock) +{ + unsigned int i; + struct pipe_ctx *pipe; + struct timing_generator *tg; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + tg = pipe->stream_res.tg; + + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !tg->funcs->is_tg_enabled(tg) || + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) + continue; + + if (lock) + dc->hwss.pipe_control_lock(dc, pipe, true); + else + dc->hwss.pipe_control_lock(dc, pipe, false); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h index cecf7f0f5671..f55c11fc56ec 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h @@ -111,6 +111,8 @@ void dcn32_update_dsc_pg(struct dc *dc, void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context); +void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context); + void dcn32_init_blank( struct dc *dc, struct timing_generator *tg); @@ -127,4 +129,6 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc, void dcn32_prepare_bandwidth(struct dc *dc, struct dc_state *context); +void dcn32_interdependent_update_lock(struct dc *dc, + struct dc_state *context, bool lock); #endif /* __DC_HWSS_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c index 427cfc8c24a4..67d661dbd5b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c @@ -58,14 +58,14 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .disable_plane = dcn20_disable_plane, .disable_pixel_data = dcn20_disable_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, - .interdependent_update_lock = dcn10_lock_all_pipes, + .interdependent_update_lock = dcn32_interdependent_update_lock, .cursor_lock = dcn10_cursor_lock, .prepare_bandwidth = dcn32_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, - .set_static_screen_control = dcn30_set_static_screen_control, + .set_static_screen_control = dcn31_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, @@ -109,6 +109,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .get_dcc_en_bits = dcn10_get_dcc_en_bits, .commit_subvp_config = dcn32_commit_subvp_config, .enable_phantom_streams = dcn32_enable_phantom_streams, + .disable_phantom_streams = dcn32_disable_phantom_streams, .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock, .update_visual_confirm_color = dcn10_update_visual_confirm_color, .subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast, @@ -159,6 +160,8 @@ static const struct hwseq_private_funcs dcn32_private_funcs = { .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, .resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio, .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, + .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw, + .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe, }; void dcn32_hw_sequencer_init_functions(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h index 89a591eb2c23..89a591eb2c23 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 34737d60b965..5295f52e4fc8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -56,6 +56,7 @@ #include "dcn30/dcn30_cm_common.h" #include "dcn31/dcn31_hwseq.h" #include "dcn20/dcn20_hwseq.h" +#include "dc_state_priv.h" #define DC_LOGGER_INIT(logger) \ struct dal_logger *dc_logger = logger @@ -133,21 +134,50 @@ void dcn35_init_hw(struct dc *dc) struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; uint32_t backlight = MAX_BACKLIGHT_LEVEL; + uint32_t user_level = MAX_BACKLIGHT_LEVEL; int i; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0x3F000000); - REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0x1f7c3fcf); - //dcn35_set_dmu_fgcg(hws, dc->debug.enable_fine_grain_clock_gating.bits.dmu); if (!dcb->funcs->is_accelerated_mode(dcb)) { /*this calls into dmubfw to do the init*/ hws->funcs.bios_golden_init(dc); } + + if (!dc->debug.disable_clock_gate) { + REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); + REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); + + /* Disable gating for PHYASYMCLK. This will be enabled in dccg if needed */ + REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, 1, + PHYBSYMCLK_ROOT_GATE_DISABLE, 1, + PHYCSYMCLK_ROOT_GATE_DISABLE, 1, + PHYDSYMCLK_ROOT_GATE_DISABLE, 1, + PHYESYMCLK_ROOT_GATE_DISABLE, 1); + + REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL4, + DPIASYMCLK0_GATE_DISABLE, 0, + DPIASYMCLK1_GATE_DISABLE, 0, + DPIASYMCLK2_GATE_DISABLE, 0, + DPIASYMCLK3_GATE_DISABLE, 0); + + REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0xFFFFFFFF); + REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5, + DTBCLK_P0_GATE_DISABLE, 0, + DTBCLK_P1_GATE_DISABLE, 0, + DTBCLK_P2_GATE_DISABLE, 0, + DTBCLK_P3_GATE_DISABLE, 0); + REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5, + DPSTREAMCLK0_GATE_DISABLE, 0, + DPSTREAMCLK1_GATE_DISABLE, 0, + DPSTREAMCLK2_GATE_DISABLE, 0, + DPSTREAMCLK3_GATE_DISABLE, 0); + + } + // Initialize the dccg if (res_pool->dccg->funcs->dccg_init) res_pool->dccg->funcs->dccg_init(res_pool->dccg); @@ -251,13 +281,15 @@ void dcn35_init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->panel_cntl) + if (link->panel_cntl) { backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); + user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; + } } if (dc->ctx->dmub_srv) { for (i = 0; i < dc->res_pool->pipe_count; i++) { if (abms[i] != NULL && abms[i]->funcs != NULL) - abms[i]->funcs->abm_init(abms[i], backlight); + abms[i]->funcs->abm_init(abms[i], backlight, user_level); } } @@ -274,7 +306,19 @@ void dcn35_init_hw(struct dc *dc) if (!dc->debug.disable_clock_gate) { /* enable all DCN clock gating */ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); + + REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, 0, + SYMCLKB_FE_GATE_DISABLE, 0, + SYMCLKC_FE_GATE_DISABLE, 0, + SYMCLKD_FE_GATE_DISABLE, 0, + SYMCLKE_FE_GATE_DISABLE, 0); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, 0); + REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, 0, + SYMCLKB_GATE_DISABLE, 0, + SYMCLKC_GATE_DISABLE, 0, + SYMCLKD_GATE_DISABLE, 0, + SYMCLKE_GATE_DISABLE, 0); + REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } @@ -305,7 +349,7 @@ void dcn35_init_hw(struct dc *dc) if (dc->ctx->dmub_srv) { dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; - dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; + dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; } if (dc->res_pool->pg_cntl) { @@ -314,29 +358,6 @@ void dcn35_init_hw(struct dc *dc) } } -static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream, - int opp_cnt) -{ - bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing); - int flow_ctrl_cnt; - - if (opp_cnt >= 2) - hblank_halved = true; - - flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable - - stream->timing.h_border_left - - stream->timing.h_border_right; - - if (hblank_halved) - flow_ctrl_cnt /= 2; - - /* ODM combine 4:1 case */ - if (opp_cnt == 4) - flow_ctrl_cnt /= 2; - - return flow_ctrl_cnt; -} - static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; @@ -352,7 +373,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) if (enable) { struct dsc_config dsc_cfg; - struct dsc_optc_config dsc_optc_cfg; + struct dsc_optc_config dsc_optc_cfg = {0}; enum optc_dsc_mode optc_dsc_mode; /* Enable DSC hw block */ @@ -430,10 +451,6 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * struct pipe_ctx *odm_pipe; int opp_cnt = 0; int opp_inst[MAX_PIPES] = {0}; - bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing)); - struct mpc_dwb_flow_control flow_control; - struct mpc *mpc = dc->res_pool->mpc; - int i; opp_cnt = get_odm_config(pipe_ctx, opp_inst); @@ -446,20 +463,6 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * pipe_ctx->stream_res.tg->funcs->set_odm_bypass( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1; - flow_control.flow_ctrl_mode = 0; - flow_control.flow_ctrl_cnt0 = 0x80; - flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt); - if (mpc->funcs->set_out_rate_control) { - for (i = 0; i < opp_cnt; ++i) { - mpc->funcs->set_out_rate_control( - mpc, opp_inst[i], - true, - rate_control_2x_pclk, - &flow_control); - } - } - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control( odm_pipe->stream_res.opp, @@ -492,6 +495,17 @@ void dcn35_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, } } +void dcn35_dpstream_root_clock_control(struct dce_hwseq *hws, unsigned int dp_hpo_inst, bool clock_on) +{ + if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpstream) + return; + + if (hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating) { + hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating( + hws->ctx->dc->res_pool->dccg, dp_hpo_inst, clock_on); + } +} + void dcn35_dsc_pg_control( struct dce_hwseq *hws, unsigned int dsc_inst, @@ -635,23 +649,47 @@ void dcn35_power_down_on_boot(struct dc *dc) bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable) { - struct dc_link *edp_links[MAX_NUM_EDP]; - int edp_num; if (dc->debug.dmcub_emulation) return true; if (enable) { - dc_get_edp_links(dc, edp_links, &edp_num); - if (edp_num == 0 || edp_num > 1) + uint32_t num_active_edp = 0; + int i; + + for (i = 0; i < dc->current_state->stream_count; ++i) { + struct dc_stream_state *stream = dc->current_state->streams[i]; + struct dc_link *link = stream->link; + bool is_psr = link && !link->panel_config.psr.disable_psr && + (link->psr_settings.psr_version == DC_PSR_VERSION_1 || + link->psr_settings.psr_version == DC_PSR_VERSION_SU_1); + bool is_replay = link && link->replay_settings.replay_feature_enabled; + + /* Ignore streams that disabled. */ + if (stream->dpms_off) + continue; + + /* Active external displays block idle optimizations. */ + if (!dc_is_embedded_signal(stream->signal)) + return false; + + /* If not PWRSEQ0 can't enter idle optimizations */ + if (link && link->link_index != 0) + return false; + + /* Check for panel power features required for idle optimizations. */ + if (!is_psr && !is_replay) + return false; + + num_active_edp += 1; + } + + /* If more than one active eDP then disallow. */ + if (num_active_edp > 1) return false; } // TODO: review other cases when idle optimization is allowed - - if (!enable) - dc_dmub_srv_exit_low_power_state(dc); - else - dc_dmub_srv_notify_idle(dc, enable); + dc_dmub_srv_apply_idle_power_optimizations(dc, enable); return true; } @@ -661,7 +699,7 @@ void dcn35_z10_restore(const struct dc *dc) if (dc->debug.disable_z10) return; - dc_dmub_srv_exit_low_power_state(dc); + dc_dmub_srv_apply_idle_power_optimizations(dc, false); dcn31_z10_restore(dc); } @@ -673,6 +711,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) struct hubbub *hubbub = dc->res_pool->hubbub; struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; bool can_apply_seamless_boot = false; + bool tg_enabled[MAX_PIPES] = {false}; for (i = 0; i < context->stream_count; i++) { if (context->streams[i]->apply_seamless_boot_optimization) { @@ -754,6 +793,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) // requesting data while in PSR. tg->funcs->tg_init(tg); hubp->power_gated = true; + tg_enabled[i] = true; continue; } @@ -777,12 +817,12 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; - hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx); if (tg->funcs->is_tg_enabled(tg)) tg->funcs->unlock(tg); - dc->hwss.disable_plane(dc, pipe_ctx); + dc->hwss.disable_plane(dc, context, pipe_ctx); pipe_ctx->stream_res.tg = NULL; pipe_ctx->plane_res.hubp = NULL; @@ -795,6 +835,20 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) tg->funcs->tg_init(tg); } + /* Clean up MPC tree */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (tg_enabled[i]) { + if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) { + if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) { + int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id; + + if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id])) + dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL; + } + } + } + } + if (pg_cntl != NULL) { if (pg_cntl->funcs->dsc_pg_control != NULL) { uint32_t num_opps = 0; @@ -909,10 +963,10 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx->plane_state = NULL; } -void dcn35_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; - bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom; + bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM; struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL; DC_LOGGER_INIT(dc->ctx->logger); @@ -939,6 +993,8 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, bool hpo_frl_stream_enc_acquired = false; bool hpo_dp_stream_enc_acquired = false; int i = 0, j = 0; + int edp_num = 0; + struct dc_link *edp_links[MAX_NUM_EDP] = { NULL }; memset(update_state, 0, sizeof(struct pg_block_update)); @@ -953,6 +1009,9 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, if (!hpo_frl_stream_enc_acquired && !hpo_dp_stream_enc_acquired) update_state->pg_res_update[PG_HPO] = true; + if (hpo_frl_stream_enc_acquired) + update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true; + update_state->pg_res_update[PG_DWB] = true; for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -970,8 +1029,7 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, if (pipe_ctx->plane_res.dpp) update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->plane_res.hubp->inst] = false; - if ((pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp) && - pipe_ctx->plane_res.mpcc_inst >= 0) + if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp) update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false; if (pipe_ctx->stream_res.dsc) @@ -980,9 +1038,26 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, if (pipe_ctx->stream_res.opp) update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false; - if (pipe_ctx->stream_res.tg) - update_state->pg_pipe_res_update[PG_OPTC][pipe_ctx->stream_res.tg->inst] = false; + if (pipe_ctx->stream_res.hpo_dp_stream_enc) + update_state->pg_pipe_res_update[PG_DPSTREAM][pipe_ctx->stream_res.hpo_dp_stream_enc->inst] = false; + } + /*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/ + for (i = 0; i < dc->res_pool->timing_generator_count; i++) { + struct timing_generator *tg = dc->res_pool->timing_generators[i]; + if (tg && tg->funcs->is_tg_enabled(tg)) { + update_state->pg_pipe_res_update[PG_OPTC][i] = false; + break; + } + } + + dc_get_edp_links(dc, edp_links, &edp_num); + if (edp_num == 0 || + ((!edp_links[0] || !edp_links[0]->edp_sink_present) && + (!edp_links[1] || !edp_links[1]->edp_sink_present))) { + /*eDP not exist on this config, keep Domain24 power on, for S0i3, this will be handled in dmubfw*/ + update_state->pg_pipe_res_update[PG_OPTC][0] = false; } + } void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, @@ -1022,6 +1097,9 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, if (j == PG_OPTC && new_pipe->stream_res.tg) update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true; + + if (j == PG_DPSTREAM && new_pipe->stream_res.hpo_dp_stream_enc) + update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true; } } else if (cur_pipe->plane_state == new_pipe->plane_state || cur_pipe == new_pipe) { @@ -1051,6 +1129,11 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, cur_pipe->stream_res.tg != new_pipe->stream_res.tg && new_pipe->stream_res.tg) update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true; + + if (j == PG_DPSTREAM && + cur_pipe->stream_res.hpo_dp_stream_enc != new_pipe->stream_res.hpo_dp_stream_enc && + new_pipe->stream_res.hpo_dp_stream_enc) + update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true; } } } @@ -1066,10 +1149,34 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, if (hpo_frl_stream_enc_acquired || hpo_dp_stream_enc_acquired) update_state->pg_res_update[PG_HPO] = true; + if (hpo_frl_stream_enc_acquired) + update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true; + } -void dcn35_block_power_control(struct dc *dc, - struct pg_block_update *update_state, bool power_on) +/** + * dcn35_hw_block_power_down() - power down sequence + * + * The following sequence describes the ON-OFF (ONO) for power down: + * + * ONO Region 3, DCPG 25: hpo - SKIPPED + * ONO Region 4, DCPG 0: dchubp0, dpp0 + * ONO Region 6, DCPG 1: dchubp1, dpp1 + * ONO Region 8, DCPG 2: dchubp2, dpp2 + * ONO Region 10, DCPG 3: dchubp3, dpp3 + * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry + * ONO Region 5, DCPG 16: dsc0 + * ONO Region 7, DCPG 17: dsc1 + * ONO Region 9, DCPG 18: dsc2 + * ONO Region 11, DCPG 19: dsc3 + * ONO Region 2, DCPG 24: mpc opp optc dwb + * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed + * + * @dc: Current DC state + * @update_state: update PG sequence states for HW block + */ +void dcn35_hw_block_power_down(struct dc *dc, + struct pg_block_update *update_state) { int i = 0; struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; @@ -1078,64 +1185,111 @@ void dcn35_block_power_control(struct dc *dc, return; if (dc->debug.ignore_pg) return; + if (update_state->pg_res_update[PG_HPO]) { if (pg_cntl->funcs->hpo_pg_control) - pg_cntl->funcs->hpo_pg_control(pg_cntl, power_on); + pg_cntl->funcs->hpo_pg_control(pg_cntl, false); } for (i = 0; i < dc->res_pool->pipe_count; i++) { if (update_state->pg_pipe_res_update[PG_HUBP][i] && update_state->pg_pipe_res_update[PG_DPP][i]) { if (pg_cntl->funcs->hubp_dpp_pg_control) - pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, power_on); + pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false); } - + } + for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) if (update_state->pg_pipe_res_update[PG_DSC][i]) { if (pg_cntl->funcs->dsc_pg_control) - pg_cntl->funcs->dsc_pg_control(pg_cntl, i, power_on); - } - - if (update_state->pg_pipe_res_update[PG_MPCC][i]) { - if (pg_cntl->funcs->mpcc_pg_control) - pg_cntl->funcs->mpcc_pg_control(pg_cntl, i, power_on); + pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false); } - if (update_state->pg_pipe_res_update[PG_OPP][i]) { - if (pg_cntl->funcs->opp_pg_control) - pg_cntl->funcs->opp_pg_control(pg_cntl, i, power_on); - } - if (update_state->pg_pipe_res_update[PG_OPTC][i]) { - if (pg_cntl->funcs->optc_pg_control) - pg_cntl->funcs->optc_pg_control(pg_cntl, i, power_on); - } - } + /*this will need all the clients to unregister optc interruts let dmubfw handle this*/ + if (pg_cntl->funcs->plane_otg_pg_control) + pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false); - if (update_state->pg_res_update[PG_DWB]) { - if (pg_cntl->funcs->dwb_pg_control) - pg_cntl->funcs->dwb_pg_control(pg_cntl, power_on); - } + //domain22, 23, 25 currently always on. - if (pg_cntl->funcs->plane_otg_pg_control) - pg_cntl->funcs->plane_otg_pg_control(pg_cntl, power_on); } -void dcn35_root_clock_control(struct dc *dc, - struct pg_block_update *update_state, bool power_on) +/** + * dcn35_hw_block_power_up() - power up sequence + * + * The following sequence describes the ON-OFF (ONO) for power up: + * + * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED + * ONO Region 2, DCPG 24: mpc opp optc dwb + * ONO Region 5, DCPG 16: dsc0 + * ONO Region 7, DCPG 17: dsc1 + * ONO Region 9, DCPG 18: dsc2 + * ONO Region 11, DCPG 19: dsc3 + * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit + * ONO Region 4, DCPG 0: dchubp0, dpp0 + * ONO Region 6, DCPG 1: dchubp1, dpp1 + * ONO Region 8, DCPG 2: dchubp2, dpp2 + * ONO Region 10, DCPG 3: dchubp3, dpp3 + * ONO Region 3, DCPG 25: hpo - SKIPPED + * + * @dc: Current DC state + * @update_state: update PG sequence states for HW block + */ +void dcn35_hw_block_power_up(struct dc *dc, + struct pg_block_update *update_state) { int i = 0; struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; if (!pg_cntl) return; + if (dc->debug.ignore_pg) + return; + //domain22, 23, 25 currently always on. + /*this will need all the clients to unregister optc interruts let dmubfw handle this*/ + if (pg_cntl->funcs->plane_otg_pg_control) + pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true); + + for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) + if (update_state->pg_pipe_res_update[PG_DSC][i]) { + if (pg_cntl->funcs->dsc_pg_control) + pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true); + } for (i = 0; i < dc->res_pool->pipe_count; i++) { if (update_state->pg_pipe_res_update[PG_HUBP][i] && update_state->pg_pipe_res_update[PG_DPP][i]) { - if (dc->hwseq->funcs.dpp_root_clock_control) - dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on); + if (pg_cntl->funcs->hubp_dpp_pg_control) + pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true); + } + } + if (update_state->pg_res_update[PG_HPO]) { + if (pg_cntl->funcs->hpo_pg_control) + pg_cntl->funcs->hpo_pg_control(pg_cntl, true); + } +} +void dcn35_root_clock_control(struct dc *dc, + struct pg_block_update *update_state, bool power_on) +{ + int i = 0; + struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; + + if (!pg_cntl) + return; + /*enable root clock first when power up*/ + if (power_on) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (update_state->pg_pipe_res_update[PG_HUBP][i] && + update_state->pg_pipe_res_update[PG_DPP][i]) { + if (dc->hwseq->funcs.dpp_root_clock_control) + dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on); + } + if (update_state->pg_pipe_res_update[PG_DPSTREAM][i]) + if (dc->hwseq->funcs.dpstream_root_clock_control) + dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on); } + } + for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { if (update_state->pg_pipe_res_update[PG_DSC][i]) { if (power_on) { if (dc->res_pool->dccg->funcs->enable_dsc) @@ -1146,6 +1300,20 @@ void dcn35_root_clock_control(struct dc *dc, } } } + /*disable root clock first when power down*/ + if (!power_on) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (update_state->pg_pipe_res_update[PG_HUBP][i] && + update_state->pg_pipe_res_update[PG_DPP][i]) { + if (dc->hwseq->funcs.dpp_root_clock_control) + dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on); + } + if (update_state->pg_pipe_res_update[PG_DPSTREAM][i]) + if (dc->hwseq->funcs.dpstream_root_clock_control) + dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on); + } + + } } void dcn35_prepare_bandwidth( @@ -1159,9 +1327,9 @@ void dcn35_prepare_bandwidth( if (dc->hwss.root_clock_control) dc->hwss.root_clock_control(dc, &pg_update_state, true); - - if (dc->hwss.block_power_control) - dc->hwss.block_power_control(dc, &pg_update_state, true); + /*power up required HW block*/ + if (dc->hwss.hw_block_power_up) + dc->hwss.hw_block_power_up(dc, &pg_update_state); } dcn20_prepare_bandwidth(dc, context); @@ -1177,27 +1345,97 @@ void dcn35_optimize_bandwidth( if (dc->hwss.calc_blocks_to_gate) { dc->hwss.calc_blocks_to_gate(dc, context, &pg_update_state); - - if (dc->hwss.block_power_control) - dc->hwss.block_power_control(dc, &pg_update_state, false); + /*try to power down unused block*/ + if (dc->hwss.hw_block_power_down) + dc->hwss.hw_block_power_down(dc, &pg_update_state); if (dc->hwss.root_clock_control) dc->hwss.root_clock_control(dc, &pg_update_state, false); } } -void dcn35_set_idle_state(const struct dc *dc, bool allow_idle) +void dcn35_set_drr(struct pipe_ctx **pipe_ctx, + int num_pipes, struct dc_crtc_timing_adjust adjust) { - // TODO: Find a more suitable communcation - if (dc->clk_mgr->funcs->set_idle_state) - dc->clk_mgr->funcs->set_idle_state(dc->clk_mgr, allow_idle); + int i = 0; + struct drr_params params = {0}; + // DRR set trigger event mapped to OTG_TRIG_A + unsigned int event_triggers = 0x2;//Bit[1]: OTG_TRIG_A + // Note DRR trigger events are generated regardless of whether num frames met. + unsigned int num_frames = 2; + + params.vertical_total_max = adjust.v_total_max; + params.vertical_total_min = adjust.v_total_min; + params.vertical_total_mid = adjust.v_total_mid; + params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num; + + for (i = 0; i < num_pipes; i++) { + if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) { + struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing; + struct dc *dc = pipe_ctx[i]->stream->ctx->dc; + + if (dc->debug.static_screen_wait_frames) { + unsigned int frame_rate = timing->pix_clk_100hz / (timing->h_total * timing->v_total); + + if (frame_rate >= 120 && dc->caps.ips_support && + dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) { + /*ips enable case*/ + num_frames = 2 * (frame_rate % 60); + } + } + if (pipe_ctx[i]->stream_res.tg->funcs->set_drr) + pipe_ctx[i]->stream_res.tg->funcs->set_drr( + pipe_ctx[i]->stream_res.tg, ¶ms); + if (adjust.v_total_max != 0 && adjust.v_total_min != 0) + if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control) + pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control( + pipe_ctx[i]->stream_res.tg, + event_triggers, num_frames); + } + } +} +void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx, + int num_pipes, const struct dc_static_screen_params *params) +{ + unsigned int i; + unsigned int triggers = 0; + + if (params->triggers.surface_update) + triggers |= 0x200;/*bit 9 : 10 0000 0000*/ + if (params->triggers.cursor_update) + triggers |= 0x8;/*bit3*/ + if (params->triggers.force_trigger) + triggers |= 0x1; + for (i = 0; i < num_pipes; i++) + pipe_ctx[i]->stream_res.tg->funcs-> + set_static_screen_control(pipe_ctx[i]->stream_res.tg, + triggers, params->num_frames); } -uint32_t dcn35_get_idle_state(const struct dc *dc) +void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx, + int num_pipes, uint32_t v_total_min, uint32_t v_total_max) { - // TODO: Find a more suitable communcation - if (dc->clk_mgr->funcs->get_idle_state) - return dc->clk_mgr->funcs->get_idle_state(dc->clk_mgr); + int i = 0; + struct long_vtotal_params params = {0}; - return 0; + params.vertical_total_max = v_total_max; + params.vertical_total_min = v_total_min; + + for (i = 0; i < num_pipes; i++) { + if (!pipe_ctx[i]) + continue; + + if (pipe_ctx[i]->stream) { + struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing; + + if (timing) + params.vertical_blank_start = timing->v_total - timing->v_front_porch; + else + params.vertical_blank_start = 0; + + if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs && + pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal) + pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal(pipe_ctx[i]->stream_res.tg, ¶ms); + } + } } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h index 0dff10d179b8..a731c8880d60 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h @@ -37,6 +37,8 @@ void dcn35_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool pow void dcn35_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on); +void dcn35_dpstream_root_clock_control(struct dce_hwseq *hws, unsigned int dp_hpo_inst, bool clock_on); + void dcn35_enable_power_gating_plane(struct dce_hwseq *hws, bool enable); void dcn35_set_dmu_fgcg(struct dce_hwseq *hws, bool enable); @@ -57,14 +59,16 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context); void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); -void dcn35_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, struct pg_block_update *update_state); void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, struct pg_block_update *update_state); -void dcn35_block_power_control(struct dc *dc, - struct pg_block_update *update_state, bool power_on); +void dcn35_hw_block_power_up(struct dc *dc, + struct pg_block_update *update_state); +void dcn35_hw_block_power_down(struct dc *dc, + struct pg_block_update *update_state); void dcn35_root_clock_control(struct dc *dc, struct pg_block_update *update_state, bool power_on); @@ -82,6 +86,13 @@ void dcn35_dsc_pg_control( unsigned int dsc_inst, bool power_on); -void dcn35_set_idle_state(const struct dc *dc, bool allow_idle); -uint32_t dcn35_get_idle_state(const struct dc *dc); +void dcn35_set_drr(struct pipe_ctx **pipe_ctx, + int num_pipes, struct dc_crtc_timing_adjust adjust); + +void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx, + int num_pipes, const struct dc_static_screen_params *params); + +void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx, + int num_pipes, uint32_t v_total_min, uint32_t v_total_max); + #endif /* __DC_HWSS_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index 296bf3a38cb9..df3bf77f3fb4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -68,9 +68,9 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .prepare_bandwidth = dcn35_prepare_bandwidth, .optimize_bandwidth = dcn35_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, - .set_drr = dcn10_set_drr, + .set_drr = dcn35_set_drr, .get_position = dcn10_get_position, - .set_static_screen_control = dcn30_set_static_screen_control, + .set_static_screen_control = dcn35_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, .set_avmute = dcn30_set_avmute, .log_hw_state = dcn10_log_hw_state, @@ -118,10 +118,10 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .update_dsc_pg = dcn32_update_dsc_pg, .calc_blocks_to_gate = dcn35_calc_blocks_to_gate, .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate, - .block_power_control = dcn35_block_power_control, + .hw_block_power_up = dcn35_hw_block_power_up, + .hw_block_power_down = dcn35_hw_block_power_down, .root_clock_control = dcn35_root_clock_control, - .set_idle_state = dcn35_set_idle_state, - .get_idle_state = dcn35_get_idle_state + .set_long_vtotal = dcn35_set_long_vblank, }; static const struct hwseq_private_funcs dcn35_private_funcs = { @@ -147,6 +147,7 @@ static const struct hwseq_private_funcs dcn35_private_funcs = { //.hubp_pg_control = dcn35_hubp_pg_control, .enable_power_gating_plane = dcn35_enable_power_gating_plane, .dpp_root_clock_control = dcn35_dpp_root_clock_control, + .dpstream_root_clock_control = dcn35_dpstream_root_clock_control, .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, .update_odm = dcn35_update_odm, .set_hdr_multiplier = dcn10_set_hdr_multiplier, diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h index b67015032c35..b67015032c35 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile index 59381d24800b..a4b3c1e99ec6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/Makefile +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile @@ -1,5 +1,5 @@ # -# Copyright © 2021 Advanced Micro Devices, Inc. +# Copyright (c) 2022-2024 Advanced Micro Devices, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), @@ -19,12 +19,10 @@ # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # -# Authors: AMD -# -# Makefile for dcn315. +# Makefile for DCN351. -DCN315 = dcn315_resource.o +DCN351 = dcn351_hwseq.o dcn351_init.o -AMD_DAL_DCN315 = $(addprefix $(AMDDALPATH)/dc/dcn315/,$(DCN315)) +AMD_DAL_DCN351 = $(addprefix $(AMDDALPATH)/dc/dcn351/,$(DCN351)) -AMD_DISPLAY_FILES += $(AMD_DAL_DCN315) +AMD_DISPLAY_FILES += $(AMD_DAL_DCN351) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c new file mode 100644 index 000000000000..93fe5b262a3d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c @@ -0,0 +1,182 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "core_types.h" +#include "resource.h" +#include "dcn351_hwseq.h" +#include "dcn35/dcn35_hwseq.h" + +#define DC_LOGGER_INIT(logger) \ + struct dal_logger *dc_logger = logger + +#define DC_LOGGER \ + dc_logger + +void dcn351_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, + struct pg_block_update *update_state) +{ + int i, j; + + dcn35_calc_blocks_to_gate(dc, context, update_state); + + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { + if (!update_state->pg_pipe_res_update[PG_HUBP][i] && + !update_state->pg_pipe_res_update[PG_DPP][i]) { + for (j = i - 1; j >= 0; j--) { + update_state->pg_pipe_res_update[PG_HUBP][j] = false; + update_state->pg_pipe_res_update[PG_DPP][j] = false; + } + + break; + } + } +} + +void dcn351_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, + struct pg_block_update *update_state) +{ + int i, j; + + dcn35_calc_blocks_to_ungate(dc, context, update_state); + + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { + if (update_state->pg_pipe_res_update[PG_HUBP][i] && + update_state->pg_pipe_res_update[PG_DPP][i]) { + for (j = i - 1; j >= 0; j--) { + update_state->pg_pipe_res_update[PG_HUBP][j] = true; + update_state->pg_pipe_res_update[PG_DPP][j] = true; + } + + break; + } + } +} + +/** + * dcn351_hw_block_power_down() - power down sequence + * + * The following sequence describes the ON-OFF (ONO) for power down: + * + * ONO Region 11, DCPG 19: dsc3 + * ONO Region 10, DCPG 3: dchubp3, dpp3 + * ONO Region 9, DCPG 18: dsc2 + * ONO Region 8, DCPG 2: dchubp2, dpp2 + * ONO Region 7, DCPG 17: dsc1 + * ONO Region 6, DCPG 1: dchubp1, dpp1 + * ONO Region 5, DCPG 16: dsc0 + * ONO Region 4, DCPG 0: dchubp0, dpp0 + * ONO Region 3, DCPG 25: hpo - SKIPPED. Should be kept on + * ONO Region 2, DCPG 24: mpc opp optc dwb + * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry + * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed + * + * @dc: Current DC state + * @update_state: update PG sequence states for HW block + */ +void dcn351_hw_block_power_down(struct dc *dc, + struct pg_block_update *update_state) +{ + int i = 0; + struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; + + if (!pg_cntl || dc->debug.ignore_pg) + return; + + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { + if (update_state->pg_pipe_res_update[PG_DSC][i]) { + if (pg_cntl->funcs->dsc_pg_control) + pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false); + } + + if (update_state->pg_pipe_res_update[PG_HUBP][i] && + update_state->pg_pipe_res_update[PG_DPP][i]) { + if (pg_cntl->funcs->hubp_dpp_pg_control) + pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false); + } + } + + // domain25 currently always on. + + /* this will need all the clients to unregister optc interrupts, let dmubfw handle this */ + if (pg_cntl->funcs->plane_otg_pg_control) + pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false); + + // domain23 currently always on. + // domain22 currently always on. +} + +/** + * dcn351_hw_block_power_up() - power up sequence + * + * The following sequence describes the ON-OFF (ONO) for power up: + * + * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED + * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit + * ONO Region 2, DCPG 24: mpc opp optc dwb + * ONO Region 3, DCPG 25: hpo - SKIPPED + * ONO Region 4, DCPG 0: dchubp0, dpp0 + * ONO Region 5, DCPG 16: dsc0 + * ONO Region 6, DCPG 1: dchubp1, dpp1 + * ONO Region 7, DCPG 17: dsc1 + * ONO Region 8, DCPG 2: dchubp2, dpp2 + * ONO Region 9, DCPG 18: dsc2 + * ONO Region 10, DCPG 3: dchubp3, dpp3 + * ONO Region 11, DCPG 19: dsc3 + * + * @dc: Current DC state + * @update_state: update PG sequence states for HW block + */ +void dcn351_hw_block_power_up(struct dc *dc, + struct pg_block_update *update_state) +{ + int i = 0; + struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; + + if (!pg_cntl || dc->debug.ignore_pg) + return; + + // domain22 currently always on. + // domain23 currently always on. + + /* this will need all the clients to unregister optc interrupts, let dmubfw handle this */ + if (pg_cntl->funcs->plane_otg_pg_control) + pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true); + + // domain25 currently always on. + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (update_state->pg_pipe_res_update[PG_HUBP][i] && + update_state->pg_pipe_res_update[PG_DPP][i]) { + if (pg_cntl->funcs->hubp_dpp_pg_control) + pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true); + } + + if (update_state->pg_pipe_res_update[PG_DSC][i]) { + if (pg_cntl->funcs->dsc_pg_control) + pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true); + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h new file mode 100644 index 000000000000..6d8f3bfb668e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HWSS_DCN351_H__ +#define __DC_HWSS_DCN351_H__ + +#include "hw_sequencer_private.h" + +void dcn351_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, + struct pg_block_update *update_state); +void dcn351_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, + struct pg_block_update *update_state); +void dcn351_hw_block_power_up(struct dc *dc, + struct pg_block_update *update_state); +void dcn351_hw_block_power_down(struct dc *dc, + struct pg_block_update *update_state); + +#endif /* __DC_HWSS_DCN351_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c new file mode 100644 index 000000000000..a53092cd619b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -0,0 +1,171 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hwseq.h" +#include "dcn10/dcn10_hwseq.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn21/dcn21_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dcn301/dcn301_hwseq.h" +#include "dcn31/dcn31_hwseq.h" +#include "dcn32/dcn32_hwseq.h" +#include "dcn35/dcn35_hwseq.h" +#include "dcn351/dcn351_hwseq.h" + +#include "dcn351_init.h" + +static const struct hw_sequencer_funcs dcn351_funcs = { + .program_gamut_remap = dcn30_program_gamut_remap, + .init_hw = dcn35_init_hw, + .power_down_on_boot = dcn35_power_down_on_boot, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, + .update_plane_addr = dcn20_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn20_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dcn31_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dcn20_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn32_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn35_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, + .pipe_control_lock = dcn20_pipe_control_lock, + .interdependent_update_lock = dcn10_lock_all_pipes, + .cursor_lock = dcn10_cursor_lock, + .prepare_bandwidth = dcn35_prepare_bandwidth, + .optimize_bandwidth = dcn35_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .set_drr = dcn35_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn35_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dcn30_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_backlight_control = dce110_edp_backlight_control, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_T12 = dce110_edp_wait_for_T12, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn30_enable_writeback, + .disable_writeback = dcn30_disable_writeback, + .update_writeback = dcn30_update_writeback, + .mmhubbub_warmup = dcn30_mmhubbub_warmup, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn30_program_dmdata_engine, + .set_dmdata_attributes = dcn20_set_dmdata_attributes, + .init_sys_ctx = dcn31_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, + .power_down = dce110_power_down, + .set_backlight_level = dcn21_set_backlight_level, + .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, + .set_pipe = dcn21_set_pipe, + .enable_lvds_link_output = dce110_enable_lvds_link_output, + .enable_tmds_link_output = dce110_enable_tmds_link_output, + .enable_dp_link_output = dce110_enable_dp_link_output, + .disable_link_output = dcn32_disable_link_output, + .z10_restore = dcn35_z10_restore, + .z10_save_init = dcn31_z10_save_init, + .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, + .optimize_pwr_state = dcn21_optimize_pwr_state, + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, + .apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations, + .update_dsc_pg = dcn32_update_dsc_pg, + .calc_blocks_to_gate = dcn35_calc_blocks_to_gate, + .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate, + .hw_block_power_up = dcn35_hw_block_power_up, + .hw_block_power_down = dcn35_hw_block_power_down, + .root_clock_control = dcn35_root_clock_control, +}; + +static const struct hwseq_private_funcs dcn351_private_funcs = { + .init_pipes = dcn35_init_pipes, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_mpcc = dcn20_update_mpcc, + .set_input_transfer_func = dcn32_set_input_transfer_func, + .set_output_transfer_func = dcn32_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn31_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn20_init_blank, + .disable_vga = NULL, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn35_plane_atomic_disable, + //.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/ + //.hubp_pg_control = dcn35_hubp_pg_control, + .enable_power_gating_plane = dcn35_enable_power_gating_plane, + .dpp_root_clock_control = dcn35_dpp_root_clock_control, + .dpstream_root_clock_control = dcn35_dpstream_root_clock_control, + .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, + .update_odm = dcn35_update_odm, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_mcm_luts = dcn32_set_mcm_luts, + .setup_hpo_hw_control = dcn35_setup_hpo_hw_control, + .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values, + .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, + .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, + .dsc_pg_control = dcn35_dsc_pg_control, + .dsc_pg_status = dcn32_dsc_pg_status, + .enable_plane = dcn35_enable_plane, +}; + +void dcn351_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn351_funcs; + dc->hwseq->funcs = dcn351_private_funcs; + +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h new file mode 100644 index 000000000000..970b01008b23 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN351_INIT_H__ +#define __DC_DCN351_INIT_H__ + +struct dc; + +void dcn351_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN351_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index 452680fe9aab..7c339e7e7117 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -50,7 +50,7 @@ struct pg_block_update; struct subvp_pipe_control_lock_fast_params { struct dc *dc; bool lock; - struct pipe_ctx *pipe_ctx; + bool subvp_immediate_flip; }; struct pipe_control_lock_params { @@ -200,7 +200,7 @@ struct hw_sequencer_funcs { struct dc_state *context); enum dc_status (*apply_ctx_to_hw)(struct dc *dc, struct dc_state *context); - void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*disable_plane)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank); void (*apply_ctx_for_surface)(struct dc *dc, const struct dc_stream_state *stream, @@ -248,6 +248,7 @@ struct hw_sequencer_funcs { void (*enable_per_frame_crtc_position_reset)(struct dc *dc, int group_size, struct pipe_ctx *grouped_pipes[]); void (*enable_timing_synchronization)(struct dc *dc, + struct dc_state *state, int group_index, int group_size, struct pipe_ctx *grouped_pipes[]); void (*enable_vblanks_synchronization)(struct dc *dc, @@ -338,6 +339,8 @@ struct hw_sequencer_funcs { /* HW State Logging Related */ void (*log_hw_state)(struct dc *dc, struct dc_log_buffer_ctx *log_ctx); + void (*log_color_state)(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx); void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask); void (*clear_status_bits)(struct dc *dc, unsigned int mask); @@ -374,10 +377,14 @@ struct hw_sequencer_funcs { /* Idle Optimization Related */ bool (*apply_idle_power_optimizations)(struct dc *dc, bool enable); - bool (*does_plane_fit_in_mall)(struct dc *dc, struct dc_plane_state *plane, + bool (*does_plane_fit_in_mall)(struct dc *dc, + unsigned int pitch, + unsigned int height, + enum surface_pixel_format format, struct dc_cursor_attributes *cursor_attr); void (*commit_subvp_config)(struct dc *dc, struct dc_state *context); void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context); + void (*disable_phantom_streams)(struct dc *dc, struct dc_state *context); void (*subvp_pipe_control_lock)(struct dc *dc, struct dc_state *context, bool lock, @@ -414,15 +421,16 @@ struct hw_sequencer_funcs { struct pg_block_update *update_state); void (*calc_blocks_to_ungate)(struct dc *dc, struct dc_state *context, struct pg_block_update *update_state); - void (*block_power_control)(struct dc *dc, - struct pg_block_update *update_state, bool power_on); + void (*hw_block_power_up)(struct dc *dc, + struct pg_block_update *update_state); + void (*hw_block_power_down)(struct dc *dc, + struct pg_block_update *update_state); void (*root_clock_control)(struct dc *dc, struct pg_block_update *update_state, bool power_on); - void (*set_idle_state)(const struct dc *dc, bool allow_idle); - uint32_t (*get_idle_state)(const struct dc *dc); bool (*is_pipe_topology_transition_seamless)(struct dc *dc, const struct dc_state *cur_ctx, const struct dc_state *new_ctx); + void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max); }; void color_space_to_black_color( @@ -452,17 +460,18 @@ void get_mpctree_visual_confirm_color( struct tg_color *color); void get_subvp_visual_confirm_color( - struct dc *dc, - struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color); void get_mclk_switch_visual_confirm_color( - struct dc *dc, - struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color); +void set_p_state_switch_method( + struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx); + void hwss_execute_sequence(struct dc *dc, struct block_sequence block_sequence[], int num_steps); @@ -471,8 +480,10 @@ void hwss_build_fast_sequence(struct dc *dc, struct dc_dmub_cmd *dc_dmub_cmd, unsigned int dmub_cmd_count, struct block_sequence block_sequence[], - int *num_steps, - struct pipe_ctx *pipe_ctx); + unsigned int *num_steps, + struct pipe_ctx *pipe_ctx, + struct dc_stream_status *stream_status, + struct dc_state *context); void hwss_send_dmcub_cmd(union block_sequence_params *params); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h index 82c592166875..341219cf4144 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h @@ -79,6 +79,7 @@ struct hwseq_private_funcs { void (*update_plane_addr)(const struct dc *dc, struct pipe_ctx *pipe_ctx); void (*plane_atomic_disconnect)(struct dc *dc, + struct dc_state *state, struct pipe_ctx *pipe_ctx); void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx); bool (*set_input_transfer_func)(struct dc *dc, @@ -119,6 +120,10 @@ struct hwseq_private_funcs { struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on); + void (*dpstream_root_clock_control)( + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool clock_on); void (*dpp_pg_control)(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on); @@ -154,7 +159,6 @@ struct hwseq_private_funcs { void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable); void (*enable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); -#ifdef CONFIG_DRM_AMD_DC_FP void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context); void (*update_force_pstate)(struct dc *dc, struct dc_state *context); void (*update_mall_sel)(struct dc *dc, struct dc_state *context); @@ -164,8 +168,14 @@ struct hwseq_private_funcs { void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx); void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context); + enum dc_status (*apply_single_controller_ctx_to_hw)( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx); -#endif + void (*reset_back_end_for_pipe)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); }; struct dce_hwseq { diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index bac1420b1de8..028b2f971e36 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -90,6 +90,9 @@ struct resource_funcs { void (*update_soc_for_wm_a)( struct dc *dc, struct dc_state *context); + unsigned int (*calculate_mall_ways_from_bytes)( + const struct dc *dc, + unsigned int total_size_in_mall_bytes); /** * @populate_dml_pipes - Populate pipe data struct * @@ -200,11 +203,8 @@ struct resource_funcs { unsigned int pipe_cnt, unsigned int index); - bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context, bool fast_update); - void (*retain_phantom_pipes)(struct dc *dc, struct dc_state *context); void (*get_panel_config_defaults)(struct dc_panel_config *panel_config); - void (*save_mall_state)(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config); - void (*restore_mall_state)(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config); + void (*build_pipe_pix_clk_params)(struct pipe_ctx *pipe_ctx); }; struct audio_support{ @@ -339,7 +339,9 @@ struct stream_resource { }; struct plane_resource { + /* scl_data is scratch space required to program a plane */ struct scaler_data scl_data; + /* Below pointers to hw objects are required to enable the plane */ struct hubp *hubp; struct mem_input *mi; struct input_pixel_processor *ipp; @@ -384,6 +386,16 @@ union pipe_update_flags { uint32_t raw; }; +enum p_state_switch_method { + P_STATE_UNKNOWN = 0, + P_STATE_V_BLANK = 1, + P_STATE_FPO, + P_STATE_V_ACTIVE, + P_STATE_SUB_VP, + P_STATE_DRR_SUB_VP, + P_STATE_V_BLANK_SUB_VP +}; + struct pipe_ctx { struct dc_plane_state *plane_state; struct dc_stream_state *stream; @@ -432,6 +444,7 @@ struct pipe_ctx { struct dwbc *dwbc; struct mcif_wb *mcif_wb; union pipe_update_flags update_flags; + enum p_state_switch_method p_state_type; struct tg_color visual_confirm_color; bool has_vactive_margin; /* subvp_index: only valid if the pipe is a SUBVP_MAIN*/ @@ -461,6 +474,8 @@ struct resource_context { unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS]; int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS]; bool is_mpc_3dlut_acquired[MAX_PIPES]; + /* solely used for build scalar data in dml2 */ + struct pipe_ctx temp_pipe; }; struct dce_bw_output { @@ -486,7 +501,7 @@ struct dcn_bw_writeback { struct dcn_bw_output { struct dc_clocks clk; - struct dcn_watermark_set watermarks; + union dcn_watermark_set watermarks; struct dcn_bw_writeback bw_writeback; int compbuf_size_kb; unsigned int mall_ss_size_bytes; @@ -505,6 +520,7 @@ struct bw_context { union bw_output bw; struct display_mode_lib dml; struct dml2_context *dml2; + struct dml2_context *dml2_dc_power_source; }; struct dc_dmub_cmd { @@ -525,6 +541,14 @@ struct dc_state { * @stream_status: Planes status on a given stream */ struct dc_stream_status stream_status[MAX_PIPES]; + /** + * @phantom_streams: Stream state properties for phantoms + */ + struct dc_stream_state *phantom_streams[MAX_PHANTOM_PIPES]; + /** + * @phantom_planes: Planes state properties for phantoms + */ + struct dc_plane_state *phantom_planes[MAX_PHANTOM_PIPES]; /** * @stream_count: Total of streams in use @@ -533,6 +557,14 @@ struct dc_state { uint8_t stream_mask; /** + * @stream_count: Total phantom streams in use + */ + uint8_t phantom_stream_count; + /** + * @stream_count: Total phantom planes in use + */ + uint8_t phantom_plane_count; + /** * @res_ctx: Persistent state of resources */ struct resource_context res_ctx; @@ -578,16 +610,7 @@ struct dc_state { unsigned int stutter_period_us; } perf_params; - struct { - /* used to temporarily backup plane states of a stream during - * dc update. The reason is that plane states are overwritten - * with surface updates in dc update. Once they are overwritten - * current state is no longer valid. We want to temporarily - * store current value in plane states so we can still recover - * a valid current state during dc update. - */ - struct dc_plane_state plane_states[MAX_SURFACE_NUM]; - } scratch; + enum dc_power_source_type power_source; }; struct replay_context { diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h index 9e4ddc985240..55529c5f471c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h @@ -31,7 +31,7 @@ #define __DCN_CALCS_H__ #include "bw_fixed.h" -#include "../dml/display_mode_lib.h" +#include "dml/display_mode_lib.h" struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h index 33db15d69f23..3f0161d64675 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h @@ -36,7 +36,7 @@ struct abm { }; struct abm_funcs { - void (*abm_init)(struct abm *abm, uint32_t back_light); + void (*abm_init)(struct abm *abm, uint32_t back_light, uint32_t user_level); bool (*set_abm_level)(struct abm *abm, unsigned int abm_level); bool (*set_abm_immediate_disable)(struct abm *abm, unsigned int panel_inst); bool (*set_pipe)(struct abm *abm, unsigned int controller_id, unsigned int panel_inst); @@ -64,7 +64,8 @@ struct abm_funcs { bool (*set_pipe_ex)(struct abm *abm, unsigned int otg_inst, unsigned int option, - unsigned int panel_inst); + unsigned int panel_inst, + unsigned int pwrseq_inst); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h index 6ed1fb8c9300..b6203253111c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h @@ -43,7 +43,8 @@ struct audio_funcs { void (*az_configure)(struct audio *audio, enum signal_type signal, const struct audio_crtc_info *crtc_info, - const struct audio_info *audio_info); + const struct audio_info *audio_info, + const struct audio_dp_link_info *dp_link_info); void (*wall_dto_setup)(struct audio *audio, enum signal_type signal, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index fa9614bcb160..4f7480f60c85 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -62,6 +62,25 @@ struct dcn3_clk_internal { uint32_t CLK4_CLK0_CURRENT_CNT; //fclk }; +struct dcn35_clk_internal { + int dummy; + uint32_t CLK1_CLK0_CURRENT_CNT; //dispclk + uint32_t CLK1_CLK1_CURRENT_CNT; //dppclk + uint32_t CLK1_CLK2_CURRENT_CNT; //dprefclk + uint32_t CLK1_CLK3_CURRENT_CNT; //dcfclk + uint32_t CLK1_CLK4_CURRENT_CNT; //dtbclk + //uint32_t CLK1_CLK5_CURRENT_CNT; //dpiaclk + //uint32_t CLK1_CLK6_CURRENT_CNT; //srdbgclk + uint32_t CLK1_CLK3_DS_CNTL; //dcf_deep_sleep_divider + uint32_t CLK1_CLK3_ALLOW_DS; //dcf_deep_sleep_allow + + uint32_t CLK1_CLK0_BYPASS_CNTL; //dispclk bypass + uint32_t CLK1_CLK1_BYPASS_CNTL; //dppclk bypass + uint32_t CLK1_CLK2_BYPASS_CNTL; //dprefclk bypass + uint32_t CLK1_CLK3_BYPASS_CNTL; //dcfclk bypass + uint32_t CLK1_CLK4_BYPASS_CNTL; //dtbclk bypass +}; + struct dcn301_clk_internal { int dummy; uint32_t CLK1_CLK0_CURRENT_CNT; //dispclk @@ -262,8 +281,6 @@ struct clk_mgr_funcs { void (*set_low_power_state)(struct clk_mgr *clk_mgr); void (*exit_low_power_state)(struct clk_mgr *clk_mgr); bool (*is_ips_supported)(struct clk_mgr *clk_mgr); - void (*set_idle_state)(struct clk_mgr *clk_mgr, bool allow_idle); - uint32_t (*get_idle_state)(struct clk_mgr *clk_mgr); void (*init_clocks)(struct clk_mgr *clk_mgr); @@ -314,6 +331,7 @@ struct clk_mgr { bool force_smu_not_present; bool dc_mode_softmax_enabled; int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes + int dp_dto_source_clock_in_khz; // Used to program DP DTO with ss adjustment on DCN314 int dentist_vco_freq_khz; struct clk_state_registers_and_bypass boot_snapshot; struct clk_bw_params *bw_params; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index 6f4c97543c14..4ba18ea57aad 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -349,13 +349,14 @@ struct clk_mgr_internal { enum dm_pp_clocks_state cur_min_clks_state; bool periodic_retraining_disabled; - unsigned int cur_phyclk_req_table[MAX_PIPES * 2]; + unsigned int cur_phyclk_req_table[MAX_LINKS]; bool smu_present; void *wm_range_table; long long wm_range_table_addr; bool dpm_present; + bool pme_trigger_pending; }; struct clk_mgr_internal_funcs { @@ -393,6 +394,11 @@ static inline int khz_to_mhz_ceil(int khz) return (khz + 999) / 1000; } +static inline int khz_to_mhz_floor(int khz) +{ + return khz / 1000; +} + int clk_mgr_helper_get_active_display_cnt( struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index 13f12f2a3f81..d4c7885fc916 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -59,8 +59,9 @@ enum dentist_dispclk_change_mode { struct dp_dto_params { int otg_inst; enum signal_type signal; - long long pixclk_hz; - long long refclk_hz; + enum streamclk_source clk_src; + uint64_t pixclk_hz; + uint64_t refclk_hz; }; enum pixel_rate_div { @@ -105,6 +106,10 @@ struct dccg_funcs { void (*otg_drop_pixel)(struct dccg *dccg, uint32_t otg_inst); void (*dccg_init)(struct dccg *dccg); + void (*set_dpstreamclk_root_clock_gating)( + struct dccg *dccg, + int dp_hpo_inst, + bool enable); void (*set_dpstreamclk)( struct dccg *dccg, @@ -141,6 +146,11 @@ struct dccg_funcs { enum physymclk_clock_source clk_src, bool force_enable); + void (*set_physymclk_root_clock_gating)( + struct dccg *dccg, + int phy_inst, + bool enable); + void (*set_dtbclk_dto)( struct dccg *dccg, const struct dtbclk_dto_params *params); @@ -196,6 +206,10 @@ struct dccg_funcs { struct dccg *dccg, enum streamclk_source src, uint32_t otg_inst); + void (*set_dto_dscclk)( + struct dccg *dccg, + uint32_t dsc_inst); + void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst); }; #endif //__DAL_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 901891316dfb..305fdc127bfc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -26,6 +26,12 @@ #ifndef __DAL_DCHUBBUB_H__ #define __DAL_DCHUBBUB_H__ +/** + * DOC: overview + * + * There is only one common DCHUBBUB. It contains the common request and return + * blocks for the Data Fabric Interface that are not clock/power gated. + */ enum dcc_control { dcc_control__256_256_xxx, @@ -154,7 +160,7 @@ struct hubbub_funcs { bool (*program_watermarks)( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index f4aa76e02518..ca8de345d039 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -27,6 +27,31 @@ #ifndef __DAL_DPP_H__ #define __DAL_DPP_H__ +/** + * DOC: overview + * + * The DPP (Display Pipe and Plane) block is the unified display data + * processing engine in DCN for processing graphic or video data on per DPP + * rectangle base. This rectangle can be a part of SLS (Single Large Surface), + * or a layer to be blended with other DPP, or a rectangle associated with a + * display tile. + * + * It provides various functions including: + * - graphic color keyer + * - graphic cursor compositing + * - graphic or video image source to destination scaling + * - image sharping + * - video format conversion from 4:2:0 or 4:2:2 to 4:4:4 + * - Color Space Conversion + * - Host LUT gamma adjustment + * - Color Gamut Remap + * - brightness and contrast adjustment. + * + * DPP pipe consists of Converter and Cursor (CNVC), Scaler (DSCL), Color + * Management (CM), Output Buffer (OBUF) and Digital Bypass (DPB) module + * connected in a video/graphics pipeline. + */ + #include "transform.h" #include "cursor_reg_cache.h" @@ -141,6 +166,7 @@ struct dcn_dpp_state { uint32_t igam_input_format; uint32_t dgam_lut_mode; uint32_t rgam_lut_mode; + // gamut_remap data for dcn10_get_cm_states() uint32_t gamut_remap_mode; uint32_t gamut_remap_c11_c12; uint32_t gamut_remap_c13_c14; @@ -148,6 +174,16 @@ struct dcn_dpp_state { uint32_t gamut_remap_c23_c24; uint32_t gamut_remap_c31_c32; uint32_t gamut_remap_c33_c34; + // gamut_remap data for dcn*_log_color_state() + struct dpp_grph_csc_adjustment gamut_remap; + uint32_t shaper_lut_mode; + uint32_t lut3d_mode; + uint32_t lut3d_bit_depth; + uint32_t lut3d_size; + uint32_t blnd_lut_mode; + uint32_t pre_dgam_mode; + uint32_t pre_dgam_select; + uint32_t gamcor_mode; }; struct CM_bias_params { @@ -286,10 +322,13 @@ struct dpp_funcs { const struct pwl_params *params); bool (*dpp_program_3dlut)( struct dpp *dpp, - struct tetrahedral_params *params); + const struct tetrahedral_params *params); void (*dpp_cnv_set_alpha_keyer)( struct dpp *dpp_base, struct cnv_color_keyer_params *color_keyer); + + void (*dpp_get_gamut_remap)(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h index 86b711dcc785..063efc8128a7 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h @@ -147,9 +147,10 @@ struct dwb_caps { unsigned int support_ogam :1; unsigned int support_wbscl :1; unsigned int support_ocsc :1; - unsigned int support_stereo :1; + unsigned int support_stereo :1; + unsigned int support_4k_120p :1; } caps; - unsigned int reserved2[9]; /* Reserved for future use, MUST BE 0. */ + unsigned int reserved2[10]; /* Reserved for future use, MUST BE 0. */ }; struct dwbc { @@ -166,8 +167,9 @@ struct dwbc { bool dwb_is_drc; int wb_src_plane_inst;/*hubp, mpcc, inst*/ uint32_t mask_id; - int otg_inst; - bool mvc_cfg; + int otg_inst; + bool mvc_cfg; + struct dc_dwb_params params; }; struct dwbc_funcs { @@ -188,6 +190,14 @@ struct dwbc_funcs { bool (*is_enabled)( struct dwbc *dwbc); + void (*set_fc_enable)( + struct dwbc *dwbc, + enum dwb_frame_capture_enable enable); + + void (*dwb_set_scaler)( + struct dwbc *dwbc, + struct dc_dwb_params *params); + void (*set_stereo)( struct dwbc *dwbc, struct dwb_stereo_params *stereo_params); @@ -201,9 +211,11 @@ struct dwbc_funcs { struct dwbc *dwbc, struct dwb_warmup_params *warmup_params); - + bool (*dwb_get_mcifbuf_line)( + struct dwbc *dwbc, unsigned int *buf_idx, + unsigned int *cur_line, + unsigned int *over_run); #if defined(CONFIG_DRM_AMD_DC_FP) - void (*dwb_program_output_csc)( struct dwbc *dwbc, enum dc_color_space color_space, @@ -212,17 +224,17 @@ struct dwbc_funcs { bool (*dwb_ogam_set_output_transfer_func)( struct dwbc *dwbc, const struct dc_transfer_func *in_transfer_func_dwb_ogam); - +#endif //TODO: merge with output_transfer_func? bool (*dwb_ogam_set_input_transfer_func)( struct dwbc *dwbc, const struct dc_transfer_func *in_transfer_func_dwb_ogam); -#endif + + void (*get_drr_time_stamp)( + struct dwbc *dwbc, uint32_t *time_stamp); + bool (*get_dwb_status)( struct dwbc *dwbc); - void (*dwb_set_scaler)( - struct dwbc *dwbc, - struct dc_dwb_params *params); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 7f3f9b69e903..72610cd7eae0 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -26,13 +26,24 @@ #ifndef __DAL_HUBP_H__ #define __DAL_HUBP_H__ +/** + * DOC: overview + * + * Display Controller Hub (DCHUB) is the gateway between the Scalable Data Port + * (SDP) and DCN. This component has multiple features, such as memory + * arbitration, rotation, and cursor manipulation. + * + * There is one HUBP allocated per pipe, which fetches data and converts + * different pixel formats (i.e. ARGB8888, NV12, etc) into linear, interleaved + * and fixed-depth streams of pixel data. + */ + #include "mem_input.h" #include "cursor_reg_cache.h" #define OPP_ID_INVALID 0xf #define MAX_TTU 0xffffff - enum cursor_pitch { CURSOR_PITCH_64_PIXELS = 0, CURSOR_PITCH_128_PIXELS, @@ -146,9 +157,7 @@ struct hubp_funcs { void (*set_blank)(struct hubp *hubp, bool blank); void (*set_blank_regs)(struct hubp *hubp, bool blank); -#ifdef CONFIG_DRM_AMD_DC_FP void (*phantom_hubp_post_enable)(struct hubp *hubp); -#endif void (*set_hubp_blank_en)(struct hubp *hubp, bool blank); void (*set_cursor_attributes)( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index b95ae9596c3b..c80ebb407add 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -43,10 +43,12 @@ * to be used inside loops and for determining array sizes. */ #define MAX_PIPES 6 +#define MAX_PHANTOM_PIPES (MAX_PIPES / 2) +#define MAX_LINKS (MAX_PIPES * 2) #define MAX_DIG_LINK_ENCODERS 7 #define MAX_DWB_PIPES 1 #define MAX_HPO_DP2_ENCODERS 4 -#define MAX_HPO_DP2_LINK_ENCODERS 2 +#define MAX_HPO_DP2_LINK_ENCODERS 4 struct gamma_curve { uint32_t offset; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index dbe7afa9d3a2..af9183f5d69b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -163,12 +163,11 @@ struct link_encoder_funcs { enum signal_type (*get_dig_mode)( struct link_encoder *enc); + void (*set_dio_phy_mux)( struct link_encoder *enc, enum encoder_type_select sel, uint32_t hpo_inst); - void (*set_dig_output_mode)( - struct link_encoder *enc, uint8_t pix_per_container); }; /* diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index b72fb314d804..86c12cd6f47d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h @@ -50,11 +50,13 @@ struct dcn_watermarks { uint32_t usr_retraining_ns; }; -struct dcn_watermark_set { - struct dcn_watermarks a; - struct dcn_watermarks b; - struct dcn_watermarks c; - struct dcn_watermarks d; +union dcn_watermark_set { + struct { + struct dcn_watermarks a; + struct dcn_watermarks b; + struct dcn_watermarks c; + struct dcn_watermarks d; + }; // legacy }; struct dce_watermarks { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 61a2406dcc53..34a398f23fc6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -23,13 +23,28 @@ */ /** - * DOC: mpc-overview + * DOC: overview * - * Multiple Pipe/Plane Combined (MPC) is a component in the hardware pipeline + * Multiple Pipe/Plane Combiner (MPC) is a component in the hardware pipeline * that performs blending of multiple planes, using global and per-pixel alpha. * It also performs post-blending color correction operations according to the * hardware capabilities, such as color transformation matrix and gamma 1D and * 3D LUT. + * + * MPC receives output from all DPP pipes and combines them to multiple outputs + * supporting "M MPC inputs -> N MPC outputs" flexible composition + * architecture. It features: + * + * - Programmable blending structure to allow software controlled blending and + * cascading; + * - Programmable window location of each DPP in active region of display; + * - Combining multiple DPP pipes in one active region when a single DPP pipe + * cannot process very large surface; + * - Combining multiple DPP from different SLS with blending; + * - Stereo formats from single DPP in top-bottom or side-by-side modes; + * - Stereo formats from 2 DPPs; + * - Alpha blending of multiple layers from different DPP pipes; + * - Programmable background color; */ #ifndef __DC_MPCC_H__ @@ -83,34 +98,65 @@ enum mpcc_alpha_blend_mode { /** * struct mpcc_blnd_cfg - MPCC blending configuration - * - * @black_color: background color - * @alpha_mode: alpha blend mode (MPCC_ALPHA_BLND_MODE) - * @pre_multiplied_alpha: whether pixel color values were pre-multiplied by the - * alpha channel (MPCC_ALPHA_MULTIPLIED_MODE) - * @global_gain: used when blend mode considers both pixel alpha and plane - * alpha value and assumes the global alpha value. - * @global_alpha: plane alpha value - * @overlap_only: whether overlapping of different planes is allowed - * @bottom_gain_mode: blend mode for bottom gain setting - * @background_color_bpc: background color for bpc - * @top_gain: top gain setting - * @bottom_inside_gain: blend mode for bottom inside - * @bottom_outside_gain: blend mode for bottom outside */ struct mpcc_blnd_cfg { - struct tg_color black_color; /* background color */ - enum mpcc_alpha_blend_mode alpha_mode; /* alpha blend mode */ - bool pre_multiplied_alpha; /* alpha pre-multiplied mode flag */ + /** + * @black_color: background color. + */ + struct tg_color black_color; + + /** + * @alpha_mode: alpha blend mode (MPCC_ALPHA_BLND_MODE). + */ + enum mpcc_alpha_blend_mode alpha_mode; + + /** + * @pre_multiplied_alpha: + * Whether pixel color values were pre-multiplied by the alpha channel + * (MPCC_ALPHA_MULTIPLIED_MODE). + */ + bool pre_multiplied_alpha; + + /** + * @global_gain: Used when blend mode considers both pixel alpha and plane. + */ int global_gain; + + /** + * @global_alpha: Plane alpha value. + */ int global_alpha; + + /** + * @overlap_only: Whether overlapping of different planes is allowed. + */ bool overlap_only; /* MPCC top/bottom gain settings */ + + /** + * @bottom_gain_mode: Blend mode for bottom gain setting. + */ int bottom_gain_mode; + + /** + * @background_color_bpc: Background color for bpc. + */ int background_color_bpc; + + /** + * @top_gain: Top gain setting. + */ int top_gain; + + /** + * @bottom_inside_gain: Blend mode for bottom inside. + */ int bottom_inside_gain; + + /** + * @bottom_outside_gain: Blend mode for bottom outside. + */ int bottom_outside_gain; }; @@ -150,34 +196,58 @@ struct mpc_dwb_flow_control { /** * struct mpcc - MPCC connection and blending configuration for a single MPCC instance. - * @mpcc_id: MPCC physical instance - * @dpp_id: DPP input to this MPCC - * @mpcc_bot: pointer to bottom layer MPCC. NULL when not connected. - * @blnd_cfg: the blending configuration for this MPCC - * @sm_cfg: stereo mix setting for this MPCC - * @shared_bottom: if MPCC output to both OPP and DWB endpoints, true. Otherwise, false. * * This struct is used as a node in an MPC tree. */ struct mpcc { - int mpcc_id; /* MPCC physical instance */ - int dpp_id; /* DPP input to this MPCC */ - struct mpcc *mpcc_bot; /* pointer to bottom layer MPCC. NULL when not connected */ - struct mpcc_blnd_cfg blnd_cfg; /* The blending configuration for this MPCC */ - struct mpcc_sm_cfg sm_cfg; /* stereo mix setting for this MPCC */ - bool shared_bottom; /* TRUE if MPCC output to both OPP and DWB endpoints, else FALSE */ + /** + * @mpcc_id: MPCC physical instance. + */ + int mpcc_id; + + /** + * @dpp_id: DPP input to this MPCC + */ + int dpp_id; + + /** + * @mpcc_bot: Pointer to bottom layer MPCC. NULL when not connected. + */ + struct mpcc *mpcc_bot; + + /** + * @blnd_cfg: The blending configuration for this MPCC. + */ + struct mpcc_blnd_cfg blnd_cfg; + + /** + * @sm_cfg: stereo mix setting for this MPCC + */ + struct mpcc_sm_cfg sm_cfg; + + /** + * @shared_bottom: + * + * If MPCC output to both OPP and DWB endpoints, true. Otherwise, false. + */ + bool shared_bottom; }; /** * struct mpc_tree - MPC tree represents all MPCC connections for a pipe. * - * @opp_id: the OPP instance that owns this MPC tree - * @opp_list: the top MPCC layer of the MPC tree that outputs to OPP endpoint * */ struct mpc_tree { - int opp_id; /* The OPP instance that owns this MPC tree */ - struct mpcc *opp_list; /* The top MPCC layer of the MPC tree that outputs to OPP endpoint */ + /** + * @opp_id: The OPP instance that owns this MPC tree. + */ + int opp_id; + + /** + * @opp_list: the top MPCC layer of the MPC tree that outputs to OPP endpoint + */ + struct mpcc *opp_list; }; struct mpc { @@ -199,6 +269,13 @@ struct mpcc_state { uint32_t overlap_only; uint32_t idle; uint32_t busy; + uint32_t shaper_lut_mode; + uint32_t lut3d_mode; + uint32_t lut3d_bit_depth; + uint32_t lut3d_size; + uint32_t rgam_mode; + uint32_t rgam_lut; + struct mpc_grph_gamut_adjustment gamut_remap; }; /** @@ -217,16 +294,20 @@ struct mpc_funcs { * Only used for planes that are part of blending chain for OPP output * * Parameters: - * [in/out] mpc - MPC context. - * [in/out] tree - MPC tree structure that plane will be added to. - * [in] blnd_cfg - MPCC blending configuration for the new blending layer. - * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer. - * stereo mix must disable for the very bottom layer of the tree config. - * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane. - * [in] dpp_id - DPP instance for the plane to be added. - * [in] mpcc_id - The MPCC physical instance to use for blending. - * - * Return: struct mpcc* - MPCC that was added. + * + * - [in/out] mpc - MPC context. + * - [in/out] tree - MPC tree structure that plane will be added to. + * - [in] blnd_cfg - MPCC blending configuration for the new blending layer. + * - [in] sm_cfg - MPCC stereo mix configuration for the new blending layer. + * stereo mix must disable for the very bottom layer of the tree config. + * - [in] insert_above_mpcc - Insert new plane above this MPCC. + * If NULL, insert as bottom plane. + * - [in] dpp_id - DPP instance for the plane to be added. + * - [in] mpcc_id - The MPCC physical instance to use for blending. + * + * Return: + * + * struct mpcc* - MPCC that was added. */ struct mpcc* (*insert_plane)( struct mpc *mpc, @@ -243,11 +324,14 @@ struct mpc_funcs { * Remove a specified MPCC from the MPC tree. * * Parameters: - * [in/out] mpc - MPC context. - * [in/out] tree - MPC tree structure that plane will be removed from. - * [in/out] mpcc - MPCC to be removed from tree. * - * Return: void + * - [in/out] mpc - MPC context. + * - [in/out] tree - MPC tree structure that plane will be removed from. + * - [in/out] mpcc - MPCC to be removed from tree. + * + * Return: + * + * void */ void (*remove_mpcc)( struct mpc *mpc, @@ -260,9 +344,12 @@ struct mpc_funcs { * Reset the MPCC HW status by disconnecting all muxes. * * Parameters: - * [in/out] mpc - MPC context. * - * Return: void + * - [in/out] mpc - MPC context. + * + * Return: + * + * void */ void (*mpc_init)(struct mpc *mpc); void (*mpc_init_single_inst)( @@ -275,11 +362,14 @@ struct mpc_funcs { * Update the blending configuration for a specified MPCC. * * Parameters: - * [in/out] mpc - MPC context. - * [in] blnd_cfg - MPCC blending configuration. - * [in] mpcc_id - The MPCC physical instance. * - * Return: void + * - [in/out] mpc - MPC context. + * - [in] blnd_cfg - MPCC blending configuration. + * - [in] mpcc_id - The MPCC physical instance. + * + * Return: + * + * void */ void (*update_blending)( struct mpc *mpc, @@ -289,15 +379,18 @@ struct mpc_funcs { /** * @cursor_lock: * - * Lock cursor updates for the specified OPP. - * OPP defines the set of MPCC that are locked together for cursor. + * Lock cursor updates for the specified OPP. OPP defines the set of + * MPCC that are locked together for cursor. * * Parameters: - * [in] mpc - MPC context. - * [in] opp_id - The OPP to lock cursor updates on - * [in] lock - lock/unlock the OPP * - * Return: void + * - [in] mpc - MPC context. + * - [in] opp_id - The OPP to lock cursor updates on + * - [in] lock - lock/unlock the OPP + * + * Return: + * + * void */ void (*cursor_lock)( struct mpc *mpc, @@ -307,20 +400,25 @@ struct mpc_funcs { /** * @insert_plane_to_secondary: * - * Add DPP into secondary MPC tree based on specified blending position. - * Only used for planes that are part of blending chain for DWB output + * Add DPP into secondary MPC tree based on specified blending + * position. Only used for planes that are part of blending chain for + * DWB output * * Parameters: - * [in/out] mpc - MPC context. - * [in/out] tree - MPC tree structure that plane will be added to. - * [in] blnd_cfg - MPCC blending configuration for the new blending layer. - * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer. - * stereo mix must disable for the very bottom layer of the tree config. - * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane. - * [in] dpp_id - DPP instance for the plane to be added. - * [in] mpcc_id - The MPCC physical instance to use for blending. - * - * Return: struct mpcc* - MPCC that was added. + * + * - [in/out] mpc - MPC context. + * - [in/out] tree - MPC tree structure that plane will be added to. + * - [in] blnd_cfg - MPCC blending configuration for the new blending layer. + * - [in] sm_cfg - MPCC stereo mix configuration for the new blending layer. + * stereo mix must disable for the very bottom layer of the tree config. + * - [in] insert_above_mpcc - Insert new plane above this MPCC. If + * NULL, insert as bottom plane. + * - [in] dpp_id - DPP instance for the plane to be added. + * - [in] mpcc_id - The MPCC physical instance to use for blending. + * + * Return: + * + * struct mpcc* - MPCC that was added. */ struct mpcc* (*insert_plane_to_secondary)( struct mpc *mpc, @@ -337,10 +435,14 @@ struct mpc_funcs { * Remove a specified DPP from the 'secondary' MPC tree. * * Parameters: - * [in/out] mpc - MPC context. - * [in/out] tree - MPC tree structure that plane will be removed from. - * [in] mpcc - MPCC to be removed from tree. - * Return: void + * + * - [in/out] mpc - MPC context. + * - [in/out] tree - MPC tree structure that plane will be removed from. + * - [in] mpcc - MPCC to be removed from tree. + * + * Return: + * + * void */ void (*remove_mpcc_from_secondary)( struct mpc *mpc, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index 7617fabbd16e..d89c92370d5b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -23,6 +23,22 @@ * */ +/** + * DOC: overview + * + * The Output Plane Processor (OPP) block groups have functions that format + * pixel streams such that they are suitable for display at the display device. + * The key functions contained in the OPP are: + * + * - Adaptive Backlight Modulation (ABM) + * - Formatter (FMT) which provide pixel-by-pixel operations for format the + * incoming pixel stream. + * - Output Buffer that provide pixel replication, and overlapping. + * - Interface between MPC and OPTC. + * - Clock and reset generation. + * - CRC generation. + */ + #ifndef __DAL_OPP_H__ #define __DAL_OPP_H__ @@ -321,6 +337,9 @@ struct opp_funcs { bool (*dpg_is_blanked)( struct output_pixel_processor *opp); + bool (*dpg_is_pending)(struct output_pixel_processor *opp); + + void (*opp_dpg_set_blank_color)( struct output_pixel_processor *opp, const struct tg_color *color); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h new file mode 100644 index 000000000000..8d32e525f05a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/** + * DOC: overview + * + * Output Pipe Timing Combiner (OPTC) includes two major functional blocks: + * Output Data Mapper (ODM) and Output Timing Generator (OTG). + * + * - ODM: It is Output Data Mapping block. It can combine input data from + * multiple OPP data pipes into one single data stream or split data from one + * OPP data pipe into multiple data streams or just bypass OPP data to DIO. + * - OTG: It is Output Timing Generator. It generates display timing signals to + * drive the display output. + */ + +#ifndef __DC_OPTC_H__ +#define __DC_OPTC_H__ + +#include "timing_generator.h" + +struct optc { + struct timing_generator base; + + const struct dcn_optc_registers *tg_regs; + const struct dcn_optc_shift *tg_shift; + const struct dcn_optc_mask *tg_mask; + + int opp_count; + + uint32_t max_h_total; + uint32_t max_v_total; + + uint32_t min_h_blank; + + uint32_t min_h_sync_width; + uint32_t min_v_sync_width; + uint32_t min_v_blank; + uint32_t min_v_blank_interlace; + + int vstartup_start; + int vupdate_offset; + int vupdate_width; + int vready_offset; + struct dc_crtc_timing orginal_patched_timing; + enum signal_type signal; +}; + +struct dcn_otg_state { + uint32_t v_blank_start; + uint32_t v_blank_end; + uint32_t v_sync_a_pol; + uint32_t v_total; + uint32_t v_total_max; + uint32_t v_total_min; + uint32_t v_total_min_sel; + uint32_t v_total_max_sel; + uint32_t v_sync_a_start; + uint32_t v_sync_a_end; + uint32_t h_blank_start; + uint32_t h_blank_end; + uint32_t h_sync_a_start; + uint32_t h_sync_a_end; + uint32_t h_sync_a_pol; + uint32_t h_total; + uint32_t underflow_occurred_status; + uint32_t otg_enabled; + uint32_t blank_enabled; + uint32_t vertical_interrupt1_en; + uint32_t vertical_interrupt1_line; + uint32_t vertical_interrupt2_en; + uint32_t vertical_interrupt2_line; + uint32_t otg_master_update_lock; + uint32_t otg_double_buffer_control; +}; + +void optc1_read_otg_state(struct optc *optc1, struct dcn_otg_state *s); + +bool optc1_get_hw_timing(struct timing_generator *tg, struct dc_crtc_timing *hw_crtc_timing); + +bool optc1_validate_timing(struct timing_generator *optc, + const struct dc_crtc_timing *timing); + +void optc1_program_timing(struct timing_generator *optc, + const struct dc_crtc_timing *dc_crtc_timing, + int vready_offset, + int vstartup_start, + int vupdate_offset, + int vupdate_width, + const enum signal_type signal, + bool use_vbios); + +void optc1_setup_vertical_interrupt0(struct timing_generator *optc, + uint32_t start_line, + uint32_t end_line); + +void optc1_setup_vertical_interrupt1(struct timing_generator *optc, + uint32_t start_line); + +void optc1_setup_vertical_interrupt2(struct timing_generator *optc, + uint32_t start_line); + +void optc1_program_global_sync(struct timing_generator *optc, + int vready_offset, + int vstartup_start, + int vupdate_offset, + int vupdate_width); + +bool optc1_disable_crtc(struct timing_generator *optc); + +bool optc1_is_counter_moving(struct timing_generator *optc); + +void optc1_get_position(struct timing_generator *optc, + struct crtc_position *position); + +uint32_t optc1_get_vblank_counter(struct timing_generator *optc); + +void optc1_get_crtc_scanoutpos(struct timing_generator *optc, + uint32_t *v_blank_start, + uint32_t *v_blank_end, + uint32_t *h_position, + uint32_t *v_position); + +void optc1_set_early_control(struct timing_generator *optc, + uint32_t early_cntl); + +void optc1_wait_for_state(struct timing_generator *optc, + enum crtc_state state); + +void optc1_set_blank(struct timing_generator *optc, + bool enable_blanking); + +bool optc1_is_blanked(struct timing_generator *optc); + +void optc1_program_blank_color(struct timing_generator *optc, + const struct tg_color *black_color); + +bool optc1_did_triggered_reset_occur(struct timing_generator *optc); + +void optc1_enable_reset_trigger(struct timing_generator *optc, int source_tg_inst); + +void optc1_disable_reset_trigger(struct timing_generator *optc); + +void optc1_lock(struct timing_generator *optc); + +void optc1_unlock(struct timing_generator *optc); + +void optc1_enable_optc_clock(struct timing_generator *optc, bool enable); + +void optc1_set_drr(struct timing_generator *optc, + const struct drr_params *params); + +void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max); + +void optc1_set_static_screen_control(struct timing_generator *optc, + uint32_t event_triggers, + uint32_t num_frames); + +void optc1_program_stereo(struct timing_generator *optc, + const struct dc_crtc_timing *timing, + struct crtc_stereo_flags *flags); + +bool optc1_is_stereo_left_eye(struct timing_generator *optc); + +void optc1_clear_optc_underflow(struct timing_generator *optc); + +void optc1_tg_init(struct timing_generator *optc); + +bool optc1_is_tg_enabled(struct timing_generator *optc); + +bool optc1_is_optc_underflow_occurred(struct timing_generator *optc); + +void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable); + +void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable); + +bool optc1_get_otg_active_size(struct timing_generator *optc, + uint32_t *otg_active_width, + uint32_t *otg_active_height); + +void optc1_enable_crtc_reset(struct timing_generator *optc, + int source_tg_inst, + struct crtc_trigger_info *crtc_tp); + +bool optc1_configure_crc(struct timing_generator *optc, const struct crc_params *params); + +bool optc1_get_crc(struct timing_generator *optc, + uint32_t *r_cr, + uint32_t *g_y, + uint32_t *b_cb); + +bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing); + +void optc1_set_vtg_params(struct timing_generator *optc, + const struct dc_crtc_timing *dc_crtc_timing, + bool program_fp2); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h index 24af9d80b937..e97d964a1791 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h @@ -40,6 +40,7 @@ struct panel_cntl_backlight_registers { unsigned int BL_PWM_PERIOD_CNTL; unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV; unsigned int PANEL_PWRSEQ_REF_DIV2; + unsigned int USER_LEVEL; }; struct panel_cntl_funcs { @@ -56,12 +57,14 @@ struct panel_cntl_funcs { struct panel_cntl_init_data { struct dc_context *ctx; uint32_t inst; + uint32_t eng_id; }; struct panel_cntl { const struct panel_cntl_funcs *funcs; struct dc_context *ctx; uint32_t inst; + uint32_t pwrseq_inst; /* registers setting needs to be saved and restored at InitBacklight */ struct panel_cntl_backlight_registers stored_backlight_registers; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index a15efadb9183..75b9ec21f297 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -178,10 +178,6 @@ struct stream_encoder_funcs { void (*stop_dp_info_packets)( struct stream_encoder *enc); - void (*reset_fifo)( - struct stream_encoder *enc - ); - void (*dp_blank)( struct dc_link *link, struct stream_encoder *enc); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 9a00a99317b2..cd68ecc242c1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -64,6 +64,12 @@ struct drr_params { bool immediate_flip; }; +struct long_vtotal_params { + uint32_t vertical_total_min; + uint32_t vertical_total_max; + uint32_t vertical_blank_start; +}; + #define LEFT_EYE_3D_PRIMARY_SURFACE 1 #define RIGHT_EYE_3D_PRIMARY_SURFACE 0 @@ -182,9 +188,7 @@ struct timing_generator_funcs { bool (*enable_crtc)(struct timing_generator *tg); bool (*disable_crtc)(struct timing_generator *tg); -#ifdef CONFIG_DRM_AMD_DC_FP void (*phantom_crtc_post_enable)(struct timing_generator *tg); -#endif void (*disable_phantom_crtc)(struct timing_generator *tg); bool (*immediate_disable_crtc)(struct timing_generator *tg); bool (*is_counter_moving)(struct timing_generator *tg); @@ -333,6 +337,8 @@ struct timing_generator_funcs { void (*init_odm)(struct timing_generator *tg); void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg); + void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params); + void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h new file mode 100644 index 000000000000..51da368f5c3e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h @@ -0,0 +1,53 @@ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + */ + +#ifndef __DC_VPG_H__ +#define __DC_VPG_H__ + +struct dc_context; +struct dc_info_packet; + +struct vpg; + +struct vpg_funcs { + void (*update_generic_info_packet)( + struct vpg *vpg, + uint32_t packet_index, + const struct dc_info_packet *info_packet, + bool immediate_update); + + void (*vpg_poweron)( + struct vpg *vpg); + + void (*vpg_powerdown)( + struct vpg *vpg); +}; + +struct vpg { + const struct vpg_funcs *funcs; + struct dc_context *ctx; + int inst; +}; + +#endif /* DC_INC_VPG_H_ */
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h index d7685368140a..7ab8ba5e23ed 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -281,11 +281,16 @@ struct link_service { const unsigned int *power_opts); bool (*edp_setup_replay)(struct dc_link *link, const struct dc_stream_state *stream); + bool (*edp_send_replay_cmd)(struct dc_link *link, + enum replay_FW_Message_type msg, + union dmub_replay_cmd_set *cmd_data); bool (*edp_set_coasting_vtotal)( - struct dc_link *link, uint16_t coasting_vtotal); + struct dc_link *link, uint32_t coasting_vtotal); bool (*edp_replay_residency)(const struct dc_link *link, unsigned int *residency, const bool is_start, - const bool is_alpm); + const enum pr_residency_mode mode); + bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link, + const unsigned int *power_opts, uint32_t coasting_vtotal); bool (*edp_wait_for_t12)(struct dc_link *link); bool (*edp_is_ilr_optimization_required)(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 06ca8bfb91e7..361ad6b16b96 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -427,22 +427,18 @@ struct pipe_ctx *resource_get_primary_dpp_pipe(const struct pipe_ctx *dpp_pipe); int resource_get_mpc_slice_index(const struct pipe_ctx *dpp_pipe); /* - * Get number of MPC "cuts" of the plane associated with the pipe. MPC slice - * count is equal to MPC splits + 1. For example if a plane is cut 3 times, it - * will have 4 pieces of slice. - * return - 0 if pipe is not used for a plane with MPCC combine. otherwise - * the number of MPC "cuts" for the plane. + * Get the number of MPC slices associated with the pipe. + * The function returns 0 if the pipe is not associated with an MPC combine + * pipe topology. */ -int resource_get_mpc_slice_count(const struct pipe_ctx *opp_head); +int resource_get_mpc_slice_count(const struct pipe_ctx *pipe); /* - * Get number of ODM "cuts" of the timing associated with the pipe. ODM slice - * count is equal to ODM splits + 1. For example if a timing is cut 3 times, it - * will have 4 pieces of slice. - * return - 0 if pipe is not used for ODM combine. otherwise - * the number of ODM "cuts" for the timing. + * Get the number of ODM slices associated with the pipe. + * The function returns 0 if the pipe is not associated with an ODM combine + * pipe topology. */ -int resource_get_odm_slice_count(const struct pipe_ctx *otg_master); +int resource_get_odm_slice_count(const struct pipe_ctx *pipe); /* Get the ODM slice index counting from 0 from left most slice */ int resource_get_odm_slice_index(const struct pipe_ctx *opp_head); @@ -501,6 +497,29 @@ int recource_find_free_pipe_not_used_in_cur_res_ctx( const struct resource_pool *pool); /* + * Look for a free pipe in new resource context that is used in current resource + * context as an OTG master pipe. + * + * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise + * pipe idx of the free pipe + */ +int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool); + +/* + * Look for a free pipe in new resource context that is used as a secondary DPP + * pipe in current resource context. + * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise + * pipe idx of the free pipe + */ +int resource_find_free_pipe_used_as_cur_sec_dpp( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool); + +/* * Look for a free pipe in new resource context that is used as a secondary DPP * pipe in any MPCC combine in current resource context. * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise @@ -561,20 +580,10 @@ void update_audio_usage( unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format); -void get_audio_check(struct audio_info *aud_modes, - struct audio_check *aud_chk); - bool get_temp_dp_link_res(struct dc_link *link, struct link_resource *link_res, struct dc_link_settings *link_settings); -#if defined(CONFIG_DRM_AMD_DC_FP) -struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt( - const struct resource_context *res_ctx, - const struct resource_pool *pool, - const struct dc_link *link); -#endif - void reset_syncd_pipes_from_disabled_pipes(struct dc *dc, struct dc_state *context); @@ -611,4 +620,9 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc, bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream); +/* Setup dc callbacks for dml2 + * @dc: the display core structure + * @dml2_options: struct to hold callbacks + */ +void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options); #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile index 076f667a82f6..2d4378780c1a 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/Makefile +++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile @@ -170,4 +170,13 @@ IRQ_DCN35 = irq_service_dcn35.o AMD_DAL_IRQ_DCN35= $(addprefix $(AMDDALPATH)/dc/irq/dcn35/,$(IRQ_DCN35)) -AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN35)
\ No newline at end of file +AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN35) + +############################################################################### +# DCN 351 +############################################################################### +IRQ_DCN351 = irq_service_dcn351.o + +AMD_DAL_IRQ_DCN351= $(addprefix $(AMDDALPATH)/dc/irq/dcn351/,$(IRQ_DCN351)) + +AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN351) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c index 1c0d89e675da..bb576a9c5fdb 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c @@ -211,8 +211,12 @@ bool dce110_vblank_set(struct irq_service *irq_service, info->ext_id); uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK; - struct timing_generator *tg = - dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg; + struct timing_generator *tg; + + if (pipe_offset >= MAX_PIPES) + return false; + + tg = dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg; if (enable) { if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c index e8baafa02443..916f0c974637 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c @@ -23,8 +23,6 @@ * */ -#include <linux/slab.h> - #include "dm_services.h" #include "include/logger_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index 03c5e8ff8cbd..42cdfe6c3538 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -23,8 +23,6 @@ * */ -#include <linux/slab.h> - #include "dm_services.h" #include "include/logger_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c new file mode 100644 index 000000000000..7ec8e0de2f01 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c @@ -0,0 +1,409 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright 2024 Advanced Micro Devices, Inc. */ + +#include "dm_services.h" +#include "include/logger_interface.h" +#include "../dce110/irq_service_dce110.h" + + +#include "dcn/dcn_3_5_1_offset.h" +#include "dcn/dcn_3_5_1_sh_mask.h" + +#include "irq_service_dcn351.h" + +#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" + +static enum dc_irq_source to_dal_irq_source_dcn351( + struct irq_service *irq_service, + uint32_t src_id, + uint32_t ext_id) +{ + switch (src_id) { + case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: + return DC_IRQ_SOURCE_VBLANK1; + case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP: + return DC_IRQ_SOURCE_VBLANK2; + case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP: + return DC_IRQ_SOURCE_VBLANK3; + case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP: + return DC_IRQ_SOURCE_VBLANK4; + case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP: + return DC_IRQ_SOURCE_VBLANK5; + case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP: + return DC_IRQ_SOURCE_VBLANK6; + case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC1_VLINE0; + case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC2_VLINE0; + case DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC3_VLINE0; + case DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC4_VLINE0; + case DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC5_VLINE0; + case DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC6_VLINE0; + case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT: + return DC_IRQ_SOURCE_PFLIP1; + case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT: + return DC_IRQ_SOURCE_PFLIP2; + case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT: + return DC_IRQ_SOURCE_PFLIP3; + case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT: + return DC_IRQ_SOURCE_PFLIP4; + case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT: + return DC_IRQ_SOURCE_PFLIP5; + case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT: + return DC_IRQ_SOURCE_PFLIP6; + case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE1; + case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE2; + case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE3; + case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE4; + case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE5; + case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE6; + case DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT: + return DC_IRQ_SOURCE_DMCUB_OUTBOX; + case DCN_1_0__SRCID__DC_HPD1_INT: + /* generic src_id for all HPD and HPDRX interrupts */ + switch (ext_id) { + case DCN_1_0__CTXID__DC_HPD1_INT: + return DC_IRQ_SOURCE_HPD1; + case DCN_1_0__CTXID__DC_HPD2_INT: + return DC_IRQ_SOURCE_HPD2; + case DCN_1_0__CTXID__DC_HPD3_INT: + return DC_IRQ_SOURCE_HPD3; + case DCN_1_0__CTXID__DC_HPD4_INT: + return DC_IRQ_SOURCE_HPD4; + case DCN_1_0__CTXID__DC_HPD5_INT: + return DC_IRQ_SOURCE_HPD5; + case DCN_1_0__CTXID__DC_HPD6_INT: + return DC_IRQ_SOURCE_HPD6; + case DCN_1_0__CTXID__DC_HPD1_RX_INT: + return DC_IRQ_SOURCE_HPD1RX; + case DCN_1_0__CTXID__DC_HPD2_RX_INT: + return DC_IRQ_SOURCE_HPD2RX; + case DCN_1_0__CTXID__DC_HPD3_RX_INT: + return DC_IRQ_SOURCE_HPD3RX; + case DCN_1_0__CTXID__DC_HPD4_RX_INT: + return DC_IRQ_SOURCE_HPD4RX; + case DCN_1_0__CTXID__DC_HPD5_RX_INT: + return DC_IRQ_SOURCE_HPD5RX; + case DCN_1_0__CTXID__DC_HPD6_RX_INT: + return DC_IRQ_SOURCE_HPD6RX; + default: + return DC_IRQ_SOURCE_INVALID; + } + break; + + default: + return DC_IRQ_SOURCE_INVALID; + } +} + +static bool hpd_ack( + struct irq_service *irq_service, + const struct irq_source_info *info) +{ + uint32_t addr = info->status_reg; + uint32_t value = dm_read_reg(irq_service->ctx, addr); + uint32_t current_status = + get_reg_field_value( + value, + HPD0_DC_HPD_INT_STATUS, + DC_HPD_SENSE_DELAYED); + + dal_irq_service_ack_generic(irq_service, info); + + value = dm_read_reg(irq_service->ctx, info->enable_reg); + + set_reg_field_value( + value, + current_status ? 0 : 1, + HPD0_DC_HPD_INT_CONTROL, + DC_HPD_INT_POLARITY); + + dm_write_reg(irq_service->ctx, info->enable_reg, value); + + return true; +} + +static struct irq_source_info_funcs hpd_irq_info_funcs = { + .set = NULL, + .ack = hpd_ack +}; + +static struct irq_source_info_funcs hpd_rx_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static struct irq_source_info_funcs pflip_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static struct irq_source_info_funcs vblank_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static struct irq_source_info_funcs outbox_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static struct irq_source_info_funcs vline0_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +#undef BASE_INNER +#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg] + +/* compile time expand base address. */ +#define BASE(seg) \ + BASE_INNER(seg) + +#define SRI(reg_name, block, id)\ + BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI_DMUB(reg_name)\ + BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define IRQ_REG_ENTRY(base, block, reg_num, reg1, mask1, reg2, mask2)\ + REG_STRUCT[base + reg_num].enable_reg = SRI(reg1, block, reg_num),\ + REG_STRUCT[base + reg_num].enable_mask = \ + block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ + REG_STRUCT[base + reg_num].enable_value[0] = \ + block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ + REG_STRUCT[base + reg_num].enable_value[1] = \ + ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \ + REG_STRUCT[base + reg_num].ack_reg = SRI(reg2, block, reg_num),\ + REG_STRUCT[base + reg_num].ack_mask = \ + block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\ + REG_STRUCT[base + reg_num].ack_value = \ + block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \ + +#define IRQ_REG_ENTRY_DMUB(base, reg1, mask1, reg2, mask2)\ + REG_STRUCT[base].enable_reg = SRI_DMUB(reg1),\ + REG_STRUCT[base].enable_mask = \ + reg1 ## __ ## mask1 ## _MASK,\ + REG_STRUCT[base].enable_value[0] = \ + reg1 ## __ ## mask1 ## _MASK,\ + REG_STRUCT[base].enable_value[1] = \ + ~reg1 ## __ ## mask1 ## _MASK, \ + REG_STRUCT[base].ack_reg = SRI_DMUB(reg2),\ + REG_STRUCT[base].ack_mask = \ + reg2 ## __ ## mask2 ## _MASK,\ + REG_STRUCT[base].ack_value = \ + reg2 ## __ ## mask2 ## _MASK \ + +#define hpd_int_entry(reg_num)\ + IRQ_REG_ENTRY(DC_IRQ_SOURCE_HPD1, HPD, reg_num,\ + DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\ + DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\ + REG_STRUCT[DC_IRQ_SOURCE_HPD1 + reg_num].funcs = &hpd_irq_info_funcs;\ + REG_STRUCT[DC_IRQ_SOURCE_HPD1 + reg_num].status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num);\ + +#define hpd_rx_int_entry(reg_num)\ + IRQ_REG_ENTRY(DC_IRQ_SOURCE_HPD1RX, HPD, reg_num,\ + DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\ + DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\ + REG_STRUCT[DC_IRQ_SOURCE_HPD1RX + reg_num].status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num);\ + REG_STRUCT[DC_IRQ_SOURCE_HPD1RX + reg_num].funcs = &hpd_rx_irq_info_funcs;\ + +#define pflip_int_entry(reg_num)\ + IRQ_REG_ENTRY(DC_IRQ_SOURCE_PFLIP1, HUBPREQ, reg_num,\ + DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\ + DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\ + REG_STRUCT[DC_IRQ_SOURCE_PFLIP1 + reg_num].funcs = &pflip_irq_info_funcs\ + +/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic + * of DCE's DC_IRQ_SOURCE_VUPDATEx. + */ +#define vupdate_no_lock_int_entry(reg_num)\ + IRQ_REG_ENTRY(DC_IRQ_SOURCE_VUPDATE1, OTG, reg_num,\ + OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\ + OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\ + REG_STRUCT[DC_IRQ_SOURCE_VUPDATE1 + reg_num].funcs = &vupdate_no_lock_irq_info_funcs\ + +#define vblank_int_entry(reg_num)\ + IRQ_REG_ENTRY(DC_IRQ_SOURCE_VBLANK1, OTG, reg_num,\ + OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\ + OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\ + REG_STRUCT[DC_IRQ_SOURCE_VBLANK1 + reg_num].funcs = &vblank_irq_info_funcs\ + +#define vline0_int_entry(reg_num)\ + IRQ_REG_ENTRY(DC_IRQ_SOURCE_DC1_VLINE0, OTG, reg_num,\ + OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\ + OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\ + REG_STRUCT[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num].funcs = &vline0_irq_info_funcs\ + +#define dmub_outbox_int_entry()\ + IRQ_REG_ENTRY_DMUB(DC_IRQ_SOURCE_DMCUB_OUTBOX, \ + DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX1_READY_INT_EN,\ + DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX1_READY_INT_ACK),\ + REG_STRUCT[DC_IRQ_SOURCE_DMCUB_OUTBOX].funcs = &outbox_irq_info_funcs + +#define dummy_irq_entry(irqno) \ + REG_STRUCT[irqno].funcs = &dummy_irq_info_funcs\ + +#define i2c_int_entry(reg_num) \ + dummy_irq_entry(DC_IRQ_SOURCE_I2C_DDC ## reg_num) + +#define dp_sink_int_entry(reg_num) \ + dummy_irq_entry(DC_IRQ_SOURCE_DPSINK ## reg_num) + +#define gpio_pad_int_entry(reg_num) \ + dummy_irq_entry(DC_IRQ_SOURCE_GPIOPAD ## reg_num) + +#define dc_underflow_int_entry(reg_num) \ + dummy_irq_entry(DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW) + +static struct irq_source_info_funcs dummy_irq_info_funcs = { + .set = dal_irq_service_dummy_set, + .ack = dal_irq_service_dummy_ack +}; + +#define dcn351_irq_init_part_1() {\ + dummy_irq_entry(DC_IRQ_SOURCE_INVALID); \ + hpd_int_entry(0); \ + hpd_int_entry(1); \ + hpd_int_entry(2); \ + hpd_int_entry(3); \ + hpd_int_entry(4); \ + hpd_rx_int_entry(0); \ + hpd_rx_int_entry(1); \ + hpd_rx_int_entry(2); \ + hpd_rx_int_entry(3); \ + hpd_rx_int_entry(4); \ + i2c_int_entry(1); \ + i2c_int_entry(2); \ + i2c_int_entry(3); \ + i2c_int_entry(4); \ + i2c_int_entry(5); \ + i2c_int_entry(6); \ + dp_sink_int_entry(1); \ + dp_sink_int_entry(2); \ + dp_sink_int_entry(3); \ + dp_sink_int_entry(4); \ + dp_sink_int_entry(5); \ + dp_sink_int_entry(6); \ + dummy_irq_entry(DC_IRQ_SOURCE_TIMER); \ + pflip_int_entry(0); \ + pflip_int_entry(1); \ + pflip_int_entry(2); \ + pflip_int_entry(3); \ + dummy_irq_entry(DC_IRQ_SOURCE_PFLIP5); \ + dummy_irq_entry(DC_IRQ_SOURCE_PFLIP6); \ + dummy_irq_entry(DC_IRQ_SOURCE_PFLIP_UNDERLAY0); \ + gpio_pad_int_entry(0); \ + gpio_pad_int_entry(1); \ + gpio_pad_int_entry(2); \ + gpio_pad_int_entry(3); \ + gpio_pad_int_entry(4); \ + gpio_pad_int_entry(5); \ + gpio_pad_int_entry(6); \ + gpio_pad_int_entry(7); \ + gpio_pad_int_entry(8); \ + gpio_pad_int_entry(9); \ + gpio_pad_int_entry(10); \ + gpio_pad_int_entry(11); \ + gpio_pad_int_entry(12); \ + gpio_pad_int_entry(13); \ + gpio_pad_int_entry(14); \ + gpio_pad_int_entry(15); \ + gpio_pad_int_entry(16); \ + gpio_pad_int_entry(17); \ + gpio_pad_int_entry(18); \ + gpio_pad_int_entry(19); \ + gpio_pad_int_entry(20); \ + gpio_pad_int_entry(21); \ + gpio_pad_int_entry(22); \ + gpio_pad_int_entry(23); \ + gpio_pad_int_entry(24); \ + gpio_pad_int_entry(25); \ + gpio_pad_int_entry(26); \ + gpio_pad_int_entry(27); \ + gpio_pad_int_entry(28); \ + gpio_pad_int_entry(29); \ + gpio_pad_int_entry(30); \ + dc_underflow_int_entry(1); \ + dc_underflow_int_entry(2); \ + dc_underflow_int_entry(3); \ + dc_underflow_int_entry(4); \ + dc_underflow_int_entry(5); \ + dc_underflow_int_entry(6); \ + dummy_irq_entry(DC_IRQ_SOURCE_DMCU_SCP); \ + dummy_irq_entry(DC_IRQ_SOURCE_VBIOS_SW); \ +} + +#define dcn351_irq_init_part_2() {\ + vupdate_no_lock_int_entry(0); \ + vupdate_no_lock_int_entry(1); \ + vupdate_no_lock_int_entry(2); \ + vupdate_no_lock_int_entry(3); \ + vblank_int_entry(0); \ + vblank_int_entry(1); \ + vblank_int_entry(2); \ + vblank_int_entry(3); \ + vline0_int_entry(0); \ + vline0_int_entry(1); \ + vline0_int_entry(2); \ + vline0_int_entry(3); \ + dummy_irq_entry(DC_IRQ_SOURCE_DC5_VLINE1); \ + dummy_irq_entry(DC_IRQ_SOURCE_DC6_VLINE1); \ + dmub_outbox_int_entry(); \ +} + +#define dcn351_irq_init() {\ + dcn351_irq_init_part_1(); \ + dcn351_irq_init_part_2(); \ +} + +static struct irq_source_info irq_source_info_dcn351[DAL_IRQ_SOURCES_NUMBER] = {0}; + +static struct irq_service_funcs irq_service_funcs_dcn351 = { + .to_dal_irq_source = to_dal_irq_source_dcn351 +}; + +static void dcn351_irq_construct( + struct irq_service *irq_service, + struct irq_service_init_data *init_data) +{ + struct dc_context *ctx = init_data->ctx; + +#define REG_STRUCT irq_source_info_dcn351 + dcn351_irq_init(); + + dal_irq_service_construct(irq_service, init_data); + + irq_service->info = irq_source_info_dcn351; + irq_service->funcs = &irq_service_funcs_dcn351; +} + +struct irq_service *dal_irq_service_dcn351_create( + struct irq_service_init_data *init_data) +{ + struct irq_service *irq_service = kzalloc(sizeof(*irq_service), + GFP_KERNEL); + + if (!irq_service) + return NULL; + + dcn351_irq_construct(irq_service, init_data); + return irq_service; +} diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.h b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.h new file mode 100644 index 000000000000..4094631ffec6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright 2021 Advanced Micro Devices, Inc. */ + +#ifndef __DAL_IRQ_SERVICE_DCN351_H__ +#define __DAL_IRQ_SERVICE_DCN351_H__ + +#include "../irq_service.h" + +struct irq_service *dal_irq_service_dcn351_create( + struct irq_service_init_data *init_data); + +#endif /* __DAL_IRQ_SERVICE_DCN351_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 21a39afd274b..8d1a1cc94a8b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -53,6 +53,7 @@ static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate) return LINK_RATE_UHBR10; case DP_TEST_LINK_RATE_UHBR20: return LINK_RATE_UHBR20; + case DP_TEST_LINK_RATE_UHBR13_5_LEGACY: case DP_TEST_LINK_RATE_UHBR13_5: return LINK_RATE_UHBR13_5; default: @@ -60,22 +61,6 @@ static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate) } } -static bool is_dp_phy_sqaure_pattern(enum dp_test_pattern test_pattern) -{ - return (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern && - test_pattern <= DP_TEST_PATTERN_SQUARE_END); -} - -static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern) -{ - if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern && - test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) || - test_pattern == DP_TEST_PATTERN_VIDEO_MODE) - return true; - else - return false; -} - static void dp_retrain_link_dp_test(struct dc_link *link, struct dc_link_settings *link_setting, bool skip_video_pattern) @@ -119,6 +104,11 @@ static void dp_test_send_link_training(struct dc_link *link) 1); link_settings.link_rate = get_link_rate_from_test_link_rate(test_rate); + if (link_settings.link_rate == LINK_RATE_UNKNOWN) { + DC_LOG_ERROR("%s: Invalid test link rate.", __func__); + ASSERT(0); + } + /* Set preferred link settings */ link->verified_link_cap.lane_count = link_settings.lane_count; link->verified_link_cap.link_rate = link_settings.link_rate; @@ -355,7 +345,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) test_pattern_size); } - if (is_dp_phy_sqaure_pattern(test_pattern)) { + if (IS_DP_PHY_SQUARE_PATTERN(test_pattern)) { test_pattern_size = 1; // Square pattern data is 1 byte (DP spec) core_link_read_dpcd( link, @@ -457,7 +447,7 @@ static void set_crtc_test_pattern(struct dc_link *link, controller_color_space = pipe_ctx->stream_res.test_pattern_params.color_space; if (controller_color_space == CONTROLLER_DP_COLOR_SPACE_UDEFINED) { - DC_LOG_WARNING("%s: Color space must be defined for test pattern", __func__); + DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__); ASSERT(0); } @@ -592,6 +582,7 @@ bool dp_set_test_pattern( const unsigned char *p_custom_pattern, unsigned int cust_pattern_size) { + const struct link_hwss *link_hwss; struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; struct pipe_ctx *pipe_ctx = NULL; unsigned int lane; @@ -616,6 +607,8 @@ bool dp_set_test_pattern( if (pipe_ctx == NULL) return false; + link->pending_test_pattern = test_pattern; + /* Reset CRTC Test Pattern if it is currently running and request is VideoMode */ if (link->test_pattern_enabled && test_pattern == DP_TEST_PATTERN_VIDEO_MODE) { @@ -636,12 +629,13 @@ bool dp_set_test_pattern( /* Reset Test Pattern state */ link->test_pattern_enabled = false; link->current_test_pattern = test_pattern; + link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED; return true; } /* Check for PHY Test Patterns */ - if (is_dp_phy_pattern(test_pattern)) { + if (IS_DP_PHY_PATTERN(test_pattern)) { /* Set DPCD Lane Settings before running test pattern */ if (p_link_settings != NULL) { if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && @@ -674,6 +668,7 @@ bool dp_set_test_pattern( /* Set Test Pattern state */ link->test_pattern_enabled = true; link->current_test_pattern = test_pattern; + link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED; if (p_link_settings != NULL) dpcd_set_link_settings(link, p_link_settings); @@ -749,7 +744,7 @@ bool dp_set_test_pattern( return false; if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { - if (is_dp_phy_sqaure_pattern(test_pattern)) + if (IS_DP_PHY_SQUARE_PATTERN(test_pattern)) core_link_write_dpcd(link, DP_LINK_SQUARE_PATTERN, p_custom_pattern, @@ -828,11 +823,9 @@ bool dp_set_test_pattern( pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); /* update MSA to requested color space */ - pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc, - &pipe_ctx->stream->timing, - color_space, - pipe_ctx->stream->use_vsc_sdp_for_colorimetry, - link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); + link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + pipe_ctx->stream->output_color_space = color_space; + link_hwss->setup_stream_attribute(pipe_ctx); if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) { if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) @@ -879,6 +872,7 @@ bool dp_set_test_pattern( /* Set Test Pattern state */ link->test_pattern_enabled = true; link->current_test_pattern = test_pattern; + link->pending_test_pattern = DP_TEST_PATTERN_UNSUPPORTED; } return true; @@ -890,7 +884,7 @@ void dp_set_preferred_link_settings(struct dc *dc, { int i; struct pipe_ctx *pipe; - struct dc_stream_state *link_stream; + struct dc_stream_state *link_stream = 0; struct dc_link_settings store_settings = *link_setting; link->preferred_link_setting = store_settings; diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c index fbcd8fb58ea8..c8c55f196f8d 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c @@ -24,7 +24,6 @@ */ #include "link_dp_trace.h" #include "link/protocols/link_dpcd.h" -#include "link.h" void dp_trace_init(struct dc_link *link) { diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h index f4633d3cf9b9..a1f72fe378ee 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h @@ -22,6 +22,16 @@ * Authors: AMD * */ + +/** + * DOC: overview + * + * Display Input Output (DIO), is the display input and output unit in DCN. It + * includes output encoders to support different display output, like + * DisplayPort, HDMI, DVI interface, and others. It also includes the control + * and status channels for these interfaces. + */ + #ifndef __LINK_HWSS_DIO_H__ #define __LINK_HWSS_DIO_H__ diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c index b659baa23147..348ea4cb832d 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c @@ -80,21 +80,23 @@ static bool set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override(struct dc_ const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0}; const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06}; + if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) + return false; if (tp_params == NULL) return false; - if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN && - link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) { + if (IS_DP_PHY_SQUARE_PATTERN(link->current_test_pattern)) // Deprogram overrides from previous test pattern dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link); - } switch (tp_params->dp_phy_pattern) { case DP_TEST_PATTERN_80BIT_CUSTOM: if (tp_params->custom_pattern_size == 0 || memcmp(tp_params->custom_pattern, pltpat_custom, tp_params->custom_pattern_size) != 0) return false; + hw_tp_params.custom_pattern = tp_params->custom_pattern; + hw_tp_params.custom_pattern_size = tp_params->custom_pattern_size; break; case DP_TEST_PATTERN_D102: break; @@ -185,13 +187,7 @@ static const struct link_hwss dio_fixed_vs_pe_retimer_link_hwss = { bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link) { - if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) - return false; - - if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) - return false; - - return true; + return (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN); } const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void) diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c index b621b97711b6..3e6c7be7e278 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c @@ -74,13 +74,16 @@ static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link, static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link, struct encoder_set_dp_phy_pattern_param *tp_params) { + uint8_t clk_src = 0x4C; + uint8_t pattern = 0x4F; /* SQ128 */ + const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0}; - const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, 0x0}; - const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, 0x0}; + const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, clk_src}; + const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, clk_src}; const uint8_t vendor_lttpr_write_data_pg3[4] = {0x1, 0x10, 0x58, 0x21}; const uint8_t vendor_lttpr_write_data_pg4[4] = {0x1, 0x10, 0x59, 0x21}; - const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, 0x4F}; - const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, 0x4F}; + const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, pattern}; + const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, pattern}; const uint8_t vendor_lttpr_write_data_pg7[4] = {0x1, 0x30, 0x51, 0x20}; const uint8_t vendor_lttpr_write_data_pg8[4] = {0x1, 0x30, 0x52, 0x20}; const uint8_t vendor_lttpr_write_data_pg9[4] = {0x1, 0x30, 0x54, 0x20}; @@ -123,18 +126,20 @@ static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 }; const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06}; + if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) + return false; + if (tp_params == NULL) return false; - if (tp_params->dp_phy_pattern < DP_TEST_PATTERN_SQUARE_BEGIN || - tp_params->dp_phy_pattern > DP_TEST_PATTERN_SQUARE_END) { + if (!IS_DP_PHY_SQUARE_PATTERN(tp_params->dp_phy_pattern)) { // Deprogram overrides from previously set square wave override if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM || link->current_test_pattern == DP_TEST_PATTERN_D102) link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_exit_manual_automation_0[0], sizeof(vendor_lttpr_exit_manual_automation_0)); - else + else if (IS_DP_PHY_SQUARE_PATTERN(link->current_test_pattern)) dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link); return false; @@ -148,8 +153,6 @@ static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(link, tp_params); - dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &link->cur_lane_setting[0]); - return true; } @@ -170,16 +173,18 @@ static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link, const struct dc_link_settings *link_settings, const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) { - link_res->hpo_dp_link_enc->funcs->set_ffe( - link_res->hpo_dp_link_enc, - link_settings, - lane_settings[0].FFE_PRESET.raw); - - // FFE is programmed when retimer is programmed for SQ128, but explicit - // programming needed here as well in case FFE-only update is requested - if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN && - link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) - dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]); + // Don't update our HW FFE when outputting phy test patterns + if (IS_DP_PHY_PATTERN(link->pending_test_pattern)) { + // Directly program FIXED_VS retimer FFE for SQ128 override + if (IS_DP_PHY_SQUARE_PATTERN(link->pending_test_pattern)) { + dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]); + } + } else { + link_res->hpo_dp_link_enc->funcs->set_ffe( + link_res->hpo_dp_link_enc, + link_settings, + lane_settings[0].FFE_PRESET.raw); + } } static void enable_hpo_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link, @@ -214,13 +219,7 @@ static const struct link_hwss hpo_fixed_vs_pe_retimer_dp_link_hwss = { bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link) { - if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) - return false; - - if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) - return false; - - return true; + return requires_fixed_vs_pe_retimer_dio_link_hwss(link); } const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void) diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index d6f0f857c05a..0d523dc43d02 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -41,6 +41,7 @@ #include "protocols/link_dp_dpia.h" #include "protocols/link_dp_phy.h" #include "protocols/link_dp_training.h" +#include "protocols/link_dp_dpia_bw.h" #include "accessories/link_dp_trace.h" #include "link_enc_cfg.h" @@ -515,8 +516,8 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link) static void read_current_link_settings_on_detect(struct dc_link *link) { union lane_count_set lane_count_set = {0}; - uint8_t link_bw_set; - uint8_t link_rate_set; + uint8_t link_bw_set = 0; + uint8_t link_rate_set = 0; uint32_t read_dpcd_retry_cnt = 10; enum dc_status status = DC_ERROR_UNEXPECTED; int i; @@ -879,7 +880,7 @@ static bool detect_link_and_local_sink(struct dc_link *link, (link->dpcd_sink_ext_caps.bits.oled == 1)) { dpcd_set_source_specific_data(link); msleep(post_oui_delay); - set_cached_brightness_aux(link); + set_default_brightness_aux(link); } return true; @@ -991,6 +992,23 @@ static bool detect_link_and_local_sink(struct dc_link *link, if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->reported_link_cap.link_rate > LINK_RATE_HIGH3) link->reported_link_cap.link_rate = LINK_RATE_HIGH3; + + /* + * If this is DP over USB4 link then we need to: + * - Enable BW ALLOC support on DPtx if applicable + */ + if (dc->config.usb4_bw_alloc_support) { + if (link_dp_dpia_set_dptx_usb4_bw_alloc_support(link)) { + /* update with non reduced link cap if bw allocation mode is supported */ + if (link->dpia_bw_alloc_config.nrd_max_link_rate && + link->dpia_bw_alloc_config.nrd_max_lane_count) { + link->reported_link_cap.link_rate = + link->dpia_bw_alloc_config.nrd_max_link_rate; + link->reported_link_cap.lane_count = + link->dpia_bw_alloc_config.nrd_max_lane_count; + } + } + } break; } @@ -1088,6 +1106,9 @@ static bool detect_link_and_local_sink(struct dc_link *link, if (sink->edid_caps.panel_patch.skip_scdc_overwrite) link->ctx->dc->debug.hdmi20_disable = true; + if (sink->edid_caps.panel_patch.remove_sink_ext_caps) + link->dpcd_sink_ext_caps.raw = 0; + if (dc_is_hdmi_signal(link->connector_signal)) read_scdc_caps(link->ddc, link->local_sink); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 34a4a8c0e18c..b53ad18dbfbc 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -55,6 +55,8 @@ #include "dccg.h" #include "clk_mgr.h" #include "atomfirmware.h" +#include "vpg.h" + #define DC_LOGGER \ dc_logger #define DC_LOGGER_INIT(logger) \ @@ -67,7 +69,6 @@ #define RETIMER_REDRIVER_INFO(...) \ DC_LOG_RETIMER_REDRIVER( \ __VA_ARGS__) -#include "dc/dcn30/dcn30_vpg.h" #define MAX_MTP_SLOT_COUNT 64 #define LINK_TRAINING_ATTEMPTS 4 @@ -127,7 +128,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init) if (link->ep_type == DISPLAY_ENDPOINT_PHY && link->link_enc->funcs->get_dig_frontend && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { - unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc); + int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc); if (fe != ENGINE_ID_UNKNOWN) for (j = 0; j < dc->res_pool->stream_enc_count; j++) { @@ -725,7 +726,7 @@ static void set_avmute(struct pipe_ctx *pipe_ctx, bool enable) static void enable_mst_on_sink(struct dc_link *link, bool enable) { - unsigned char mstmCntl; + unsigned char mstmCntl = 0; core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1); if (enable) @@ -776,10 +777,26 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) */ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { + /* TODO: Move this to HWSS as this is hardware programming sequence not a + * link layer sequence + */ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct pipe_ctx *odm_pipe; int opp_cnt = 1; + struct dccg *dccg = dc->res_pool->dccg; + /* It has been found that when DSCCLK is lower than 16Mhz, we will get DCN + * register access hung. When DSCCLk is based on refclk, DSCCLk is always a + * fixed value higher than 16Mhz so the issue doesn't occur. When DSCCLK is + * generated by DTO, DSCCLK would be based on 1/3 dispclk. For small timings + * with DSC such as 480p60Hz, the dispclk could be low enough to trigger + * this problem. We are implementing a workaround here to keep using dscclk + * based on fixed value refclk when timing is smaller than 3x16Mhz (i.e + * 48Mhz) pixel clock to avoid hitting this problem. + */ + bool should_use_dto_dscclk = (dccg->funcs->set_dto_dscclk != NULL) && + stream->timing.pix_clk_100hz > 480000; DC_LOGGER_INIT(dsc->ctx->logger); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) @@ -787,7 +804,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) if (enable) { struct dsc_config dsc_cfg; - struct dsc_optc_config dsc_optc_cfg; + struct dsc_optc_config dsc_optc_cfg = {0}; enum optc_dsc_mode optc_dsc_mode; /* Enable DSC hw block */ @@ -802,11 +819,15 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); + if (should_use_dto_dscclk) + dccg->funcs->set_dto_dscclk(dccg, dsc->inst); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); + if (should_use_dto_dscclk) + dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst); } dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt; dsc_cfg.pic_width *= opp_cnt; @@ -856,9 +877,14 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) } /* disable DSC block */ + if (dccg->funcs->set_ref_dscclk) + dccg->funcs->set_ref_dscclk(dccg, pipe_ctx->stream_res.dsc->inst); pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc); - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { + if (dccg->funcs->set_ref_dscclk) + dccg->funcs->set_ref_dscclk(dccg, odm_pipe->stream_res.dsc->inst); odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc); + } } } @@ -875,11 +901,15 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; struct dc_stream_state *stream = pipe_ctx->stream; - DC_LOGGER_INIT(dsc->ctx->logger); - if (!pipe_ctx->stream->timing.flags.DSC || !dsc) + if (!pipe_ctx->stream->timing.flags.DSC) return false; + if (!dsc) + return false; + + DC_LOGGER_INIT(dsc->ctx->logger); + if (enable) { struct dsc_config dsc_cfg; uint8_t dsc_packed_pps[128]; @@ -1057,18 +1087,21 @@ static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps) uint32_t denominator = 1; /* - * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 + * The 1.006 factor (margin 5300ppm + 300ppm ~ 0.6% as per spec) is not + * required when determining PBN/time slot utilization on the link between + * us and the branch, since that overhead is already accounted for in + * the get_pbn_per_slot function. + * * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on * common multiplier to render an integer PBN for all link rate/lane * counts combinations * calculate - * peak_kbps *= (1006/1000) * peak_kbps *= (64/54) - * peak_kbps *= 8 convert to bytes + * peak_kbps /= (8 * 1000) convert to bytes */ - numerator = 64 * PEAK_FACTOR_X1000; - denominator = 54 * 8 * 1000 * 1000; + numerator = 64; + denominator = 54 * 8 * 1000; kbps *= numerator; peak_kbps = dc_fixpt_from_fraction(kbps, denominator); @@ -1247,86 +1280,6 @@ static void remove_stream_from_alloc_table( } } -static enum dc_status deallocate_mst_payload_with_temp_drm_wa( - struct pipe_ctx *pipe_ctx) -{ - struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->link; - struct dc_dp_mst_stream_allocation_table proposed_table = {0}; - struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); - int i; - bool mst_mode = (link->type == dc_connection_mst_branch); - /* adjust for drm changes*/ - const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); - const struct dc_link_settings empty_link_settings = {0}; - DC_LOGGER_INIT(link->ctx->logger); - - if (link_hwss->ext.set_throttled_vcp_size) - link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); - if (link_hwss->ext.set_hblank_min_symbol_width) - link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, - &empty_link_settings, - avg_time_slots_per_mtp); - - if (dm_helpers_dp_mst_write_payload_allocation_table( - stream->ctx, - stream, - &proposed_table, - false)) - update_mst_stream_alloc_table( - link, - pipe_ctx->stream_res.stream_enc, - pipe_ctx->stream_res.hpo_dp_stream_enc, - &proposed_table); - else - DC_LOG_WARNING("Failed to update" - "MST allocation table for" - "pipe idx:%d\n", - pipe_ctx->pipe_idx); - - DC_LOG_MST("%s" - "stream_count: %d: ", - __func__, - link->mst_stream_alloc_table.stream_count); - - for (i = 0; i < MAX_CONTROLLER_NUM; i++) { - DC_LOG_MST("stream_enc[%d]: %p " - "stream[%d].hpo_dp_stream_enc: %p " - "stream[%d].vcp_id: %d " - "stream[%d].slot_count: %d\n", - i, - (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, - i, - (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, - i, - link->mst_stream_alloc_table.stream_allocations[i].vcp_id, - i, - link->mst_stream_alloc_table.stream_allocations[i].slot_count); - } - - if (link_hwss->ext.update_stream_allocation_table == NULL || - link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { - DC_LOG_DEBUG("Unknown encoding format\n"); - return DC_ERROR_UNEXPECTED; - } - - link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, - &link->mst_stream_alloc_table); - - if (mst_mode) { - dm_helpers_dp_mst_poll_for_allocation_change_trigger( - stream->ctx, - stream); - } - - dm_helpers_dp_mst_send_payload_allocation( - stream->ctx, - stream, - false); - - return DC_OK; -} - static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; @@ -1339,9 +1292,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) const struct dc_link_settings empty_link_settings = {0}; DC_LOGGER_INIT(link->ctx->logger); - if (link->dc->debug.temp_mst_deallocation_sequence) - return deallocate_mst_payload_with_temp_drm_wa(pipe_ctx); - /* deallocate_mst_payload is called before disable link. When mode or * disable/enable monitor, new stream is created which is not in link * stream[] yet. For this, payload is not allocated yet, so de-alloc @@ -1414,16 +1364,14 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, &link->mst_stream_alloc_table); - if (mst_mode) { + if (mst_mode) dm_helpers_dp_mst_poll_for_allocation_change_trigger( stream->ctx, stream); - dm_helpers_dp_mst_send_payload_allocation( - stream->ctx, - stream, - false); - } + dm_helpers_dp_mst_update_mst_mgr_for_deallocation( + stream->ctx, + stream); return DC_OK; } @@ -1504,12 +1452,10 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) stream->ctx, stream); - if (ret != ACT_LINK_LOST) { + if (ret != ACT_LINK_LOST) dm_helpers_dp_mst_send_payload_allocation( stream->ctx, - stream, - true); - } + stream); /* slot X.Y for only current stream */ pbn_per_slot = get_pbn_per_slot(stream); @@ -1630,7 +1576,7 @@ static bool write_128b_132b_sst_payload_allocation_table( break; } } else { - union dpcd_rev dpcdRev; + union dpcd_rev dpcdRev = {0}; if (core_link_read_dpcd( link, @@ -1769,8 +1715,7 @@ enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ dm_helpers_dp_mst_send_payload_allocation( stream->ctx, - stream, - true); + stream); /* notify immediate branch device table update */ if (dm_helpers_dp_mst_write_payload_allocation_table( @@ -1899,8 +1844,7 @@ enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_ /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ dm_helpers_dp_mst_send_payload_allocation( stream->ctx, - stream, - true); + stream); } /* increase throttled vcp size */ @@ -2066,17 +2010,11 @@ static enum dc_status enable_link_dp(struct dc_state *state, } } - /* - * If the link is DP-over-USB4 do the following: - * - Train with fallback when enabling DPIA link. Conventional links are + /* Train with fallback when enabling DPIA link. Conventional links are * trained with fallback during sink detection. - * - Allocate only what the stream needs for bw in Gbps. Inform the CM - * in case stream needs more or less bw from what has been allocated - * earlier at plug time. */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) do_fallback = true; - } /* * Temporary w/a to get DP2.0 link rates to work with SST. @@ -2142,8 +2080,7 @@ static enum dc_status enable_link_dp(struct dc_state *state, if (link->dpcd_sink_ext_caps.bits.oled == 1 || link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 || link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) { - set_cached_brightness_aux(link); - + set_default_brightness_aux(link); if (link->dpcd_sink_ext_caps.bits.oled == 1) msleep(bl_oled_enable_delay); edp_backlight_enable_aux(link, true); @@ -2183,7 +2120,7 @@ static enum dc_status enable_link_dp_mst( struct pipe_ctx *pipe_ctx) { struct dc_link *link = pipe_ctx->stream->link; - unsigned char mstm_cntl; + unsigned char mstm_cntl = 0; /* sink signal type after MST branch is MST. Multiple MST sinks * share one link. Link DP PHY is enable or training only once. @@ -2259,12 +2196,97 @@ static enum dc_status enable_link( return status; } +static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, int bw) +{ + struct dc_link *link = stream->sink->link; + int req_bw = bw; + + DC_LOGGER_INIT(link->ctx->logger); + + if (!link->dpia_bw_alloc_config.bw_alloc_enabled) + return false; + + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + int sink_index = 0; + int i = 0; + + for (i = 0; i < link->sink_count; i++) { + if (link->remote_sinks[i] == NULL) + continue; + + if (stream->sink->sink_id != link->remote_sinks[i]->sink_id) + req_bw += link->dpia_bw_alloc_config.remote_sink_req_bw[i]; + else + sink_index = i; + } + + link->dpia_bw_alloc_config.remote_sink_req_bw[sink_index] = bw; + } + + /* get dp overhead for dp tunneling */ + link->dpia_bw_alloc_config.dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(link); + req_bw += link->dpia_bw_alloc_config.dp_overhead; + + if (link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw)) { + if (req_bw <= link->dpia_bw_alloc_config.allocated_bw) { + DC_LOG_DEBUG("%s, Success in allocate bw for link(%d), allocated_bw(%d), dp_overhead(%d)\n", + __func__, link->link_index, link->dpia_bw_alloc_config.allocated_bw, + link->dpia_bw_alloc_config.dp_overhead); + } else { + // Cannot get the required bandwidth. + DC_LOG_ERROR("%s, Failed to allocate bw for link(%d), allocated_bw(%d), dp_overhead(%d)\n", + __func__, link->link_index, link->dpia_bw_alloc_config.allocated_bw, + link->dpia_bw_alloc_config.dp_overhead); + return false; + } + } else { + DC_LOG_DEBUG("%s, usb4 request bw timeout\n", __func__); + return false; + } + + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + int i = 0; + + for (i = 0; i < link->sink_count; i++) { + if (link->remote_sinks[i] == NULL) + continue; + DC_LOG_DEBUG("%s, remote_sink=%s, request_bw=%d\n", __func__, + (const char *)(&link->remote_sinks[i]->edid_caps.display_name[0]), + link->dpia_bw_alloc_config.remote_sink_req_bw[i]); + } + } + + return true; +} + +static bool allocate_usb4_bandwidth(struct dc_stream_state *stream) +{ + bool ret; + + int bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(stream->sink->link)); + + ret = allocate_usb4_bandwidth_for_stream(stream, bw); + + return ret; +} + +static bool deallocate_usb4_bandwidth(struct dc_stream_state *stream) +{ + bool ret; + + ret = allocate_usb4_bandwidth_for_stream(stream, 0); + + return ret; +} + void link_set_dpms_off(struct pipe_ctx *pipe_ctx) { struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->sink->link; struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; + enum dp_panel_mode panel_mode_dp = dp_get_panel_mode(link); DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); @@ -2291,9 +2313,14 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) dc->hwss.disable_audio_stream(pipe_ctx); + edp_set_panel_assr(link, pipe_ctx, &panel_mode_dp, false); + update_psp_stream_config(pipe_ctx, true); dc->hwss.blank_stream(pipe_ctx); + if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + deallocate_usb4_bandwidth(pipe_ctx->stream); + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && @@ -2536,6 +2563,9 @@ void link_set_dpms_on( } } + if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + allocate_usb4_bandwidth(pipe_ctx->stream); + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) allocate_mst_payload(pipe_ctx); else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 7abfc67d10a6..cf22b8f28ba6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -213,8 +213,10 @@ static void construct_link_service_edp_panel_control(struct link_service *link_s link_srv->edp_get_replay_state = edp_get_replay_state; link_srv->edp_set_replay_allow_active = edp_set_replay_allow_active; link_srv->edp_setup_replay = edp_setup_replay; + link_srv->edp_send_replay_cmd = edp_send_replay_cmd; link_srv->edp_set_coasting_vtotal = edp_set_coasting_vtotal; link_srv->edp_replay_residency = edp_replay_residency; + link_srv->edp_set_replay_power_opt_and_coasting_vtotal = edp_set_replay_power_opt_and_coasting_vtotal; link_srv->edp_wait_for_t12 = edp_wait_for_t12; link_srv->edp_is_ilr_optimization_required = @@ -595,24 +597,6 @@ static bool construct_phy(struct dc_link *link, link->ddc_hw_inst = dal_ddc_get_line(get_ddc_pin(link->ddc)); - - if (link->dc->res_pool->funcs->panel_cntl_create && - (link->link_id.id == CONNECTOR_ID_EDP || - link->link_id.id == CONNECTOR_ID_LVDS)) { - panel_cntl_init_data.ctx = dc_ctx; - panel_cntl_init_data.inst = - panel_cntl_init_data.ctx->dc_edp_id_count; - link->panel_cntl = - link->dc->res_pool->funcs->panel_cntl_create( - &panel_cntl_init_data); - panel_cntl_init_data.ctx->dc_edp_id_count++; - - if (link->panel_cntl == NULL) { - DC_ERROR("Failed to create link panel_cntl!\n"); - goto panel_cntl_create_fail; - } - } - enc_init_data.ctx = dc_ctx; bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, &enc_init_data.encoder); @@ -643,6 +627,23 @@ static bool construct_phy(struct dc_link *link, link->dc->res_pool->dig_link_enc_count++; link->link_enc_hw_inst = link->link_enc->transmitter; + + if (link->dc->res_pool->funcs->panel_cntl_create && + (link->link_id.id == CONNECTOR_ID_EDP || + link->link_id.id == CONNECTOR_ID_LVDS)) { + panel_cntl_init_data.ctx = dc_ctx; + panel_cntl_init_data.inst = panel_cntl_init_data.ctx->dc_edp_id_count; + panel_cntl_init_data.eng_id = link->eng_id; + link->panel_cntl = + link->dc->res_pool->funcs->panel_cntl_create( + &panel_cntl_init_data); + panel_cntl_init_data.ctx->dc_edp_id_count++; + + if (link->panel_cntl == NULL) { + DC_ERROR("Failed to create link panel_cntl!\n"); + goto panel_cntl_create_fail; + } + } for (i = 0; i < 4; i++) { if (bp_funcs->get_device_tag(dc_ctx->dc_bios, link->link_id, i, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index b45fda96eaf6..1aed55b0ab6a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -125,11 +125,9 @@ static bool dp_active_dongle_validate_timing( if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter struct dc_crtc_timing outputTiming = *timing; -#if defined(CONFIG_DRM_AMD_DC_FP) if (timing->flags.DSC && !timing->dsc_cfg.is_frl) /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ outputTiming.flags.DSC = 0; -#endif if (dc_bandwidth_in_kbps_from_timing(&outputTiming, DC_LINK_ENCODING_HDMI_FRL) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) return false; @@ -346,23 +344,61 @@ enum dc_status link_validate_mode_timing( return DC_OK; } +/* + * This function calculates the bandwidth required for the stream timing + * and aggregates the stream bandwidth for the respective dpia link + * + * @stream: pointer to the dc_stream_state struct instance + * @num_streams: number of streams to be validated + * + * return: true if validation is succeeded + */ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams) { - bool ret = true; - int bw_needed[MAX_DPIA_NUM]; - struct dc_link *link[MAX_DPIA_NUM]; + int bw_needed[MAX_DPIA_NUM] = {0}; + struct dc_link *dpia_link[MAX_DPIA_NUM] = {0}; + int num_dpias = 0; + + for (unsigned int i = 0; i < num_streams; ++i) { + if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) { + /* new dpia sst stream, check whether it exceeds max dpia */ + if (num_dpias >= MAX_DPIA_NUM) + return false; - if (!num_streams || num_streams > MAX_DPIA_NUM) - return ret; + dpia_link[num_dpias] = stream[i].link; + bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing, + dc_link_get_highest_encoding_format(dpia_link[num_dpias])); + num_dpias++; + } else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + uint8_t j = 0; + /* check whether its a known dpia link */ + for (; j < num_dpias; ++j) { + if (dpia_link[j] == stream[i].link) + break; + } + + if (j == num_dpias) { + /* new dpia mst stream, check whether it exceeds max dpia */ + if (num_dpias >= MAX_DPIA_NUM) + return false; + else { + dpia_link[j] = stream[i].link; + num_dpias++; + } + } + + bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing, + dc_link_get_highest_encoding_format(dpia_link[j])); + } + } - for (uint8_t i = 0; i < num_streams; ++i) { + /* Include dp overheads */ + for (uint8_t i = 0; i < num_dpias; ++i) { + int dp_overhead = 0; - link[i] = stream[i].link; - bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing, - dc_link_get_highest_encoding_format(link[i])); + dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]); + bw_needed[i] += dp_overhead; } - ret = dpia_validate_usb4_bw(link, bw_needed, num_streams); - - return ret; + return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias); } diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h index 4a954317d0da..595fb05946e9 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h @@ -25,6 +25,7 @@ #ifndef __LINK_VALIDATION_H__ #define __LINK_VALIDATION_H__ #include "link.h" + enum dc_status link_validate_mode_timing( const struct dc_stream_state *stream, struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index db87aa7b5c90..a01d0842bf8e 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -412,12 +412,18 @@ static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link) { enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN; - if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20) + if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20) { cable_max_link_rate = LINK_RATE_UHBR20; - else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) + } else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) { cable_max_link_rate = LINK_RATE_UHBR13_5; - else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10) - cable_max_link_rate = LINK_RATE_UHBR10; + } else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10) { + // allow DP40 cables to do UHBR13.5 for passive or unknown cable type + if (link->dpcd_caps.cable_id.bits.CABLE_TYPE < 2) { + cable_max_link_rate = LINK_RATE_UHBR13_5; + } else { + cable_max_link_rate = LINK_RATE_UHBR10; + } + } return cable_max_link_rate; } @@ -986,7 +992,7 @@ enum dp_link_encoding mst_decide_link_encoding_format(const struct dc_link *link static void read_dp_device_vendor_id(struct dc_link *link) { - struct dp_device_vendor_id dp_id; + struct dp_device_vendor_id dp_id = {0}; /* read IEEE branch device id */ core_link_read_dpcd( @@ -1081,7 +1087,7 @@ static void get_active_converter_info( } if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) { - uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/ + uint8_t det_caps[16] = {0}; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/ union dwnstream_port_caps_byte0 *port_caps = (union dwnstream_port_caps_byte0 *)det_caps; if (core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0, @@ -1166,7 +1172,7 @@ static void get_active_converter_info( set_dongle_type(link->ddc, link->dpcd_caps.dongle_type); { - struct dp_sink_hw_fw_revision dp_hw_fw_revision; + struct dp_sink_hw_fw_revision dp_hw_fw_revision = {0}; core_link_read_dpcd( link, @@ -1236,7 +1242,7 @@ static void apply_usbc_combo_phy_reset_wa(struct dc_link *link, bool dp_overwrite_extended_receiver_cap(struct dc_link *link) { - uint8_t dpcd_data[16]; + uint8_t dpcd_data[16] = {0}; uint32_t read_dpcd_retry_cnt = 3; enum dc_status status = DC_ERROR_UNEXPECTED; union dp_downstream_port_present ds_port = { 0 }; @@ -1392,7 +1398,7 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id) cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data); cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx( link->dc, link->link_enc->transmitter); - if (dm_execute_dmub_cmd(link->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && + if (dc_wake_and_execute_dmub_cmd(link->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.cable_id.header.ret_status == 1) { cable_id->raw = cmd.cable_id.data.output_raw; DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw); @@ -1402,7 +1408,7 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id) static void retrieve_cable_id(struct dc_link *link) { - union dp_cable_id usbc_cable_id; + union dp_cable_id usbc_cable_id = {0}; link->dpcd_caps.cable_id.raw = 0; core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX, @@ -1469,7 +1475,7 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link) enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link) { - uint8_t lttpr_dpcd_data[8]; + uint8_t lttpr_dpcd_data[8] = {0}; enum dc_status status; bool is_lttpr_present; @@ -1925,8 +1931,8 @@ void detect_edp_sink_caps(struct dc_link *link) uint32_t entry; uint32_t link_rate_in_khz; enum dc_link_rate link_rate = LINK_RATE_UNKNOWN; - uint8_t backlight_adj_cap; - uint8_t general_edp_cap; + uint8_t backlight_adj_cap = 0; + uint8_t general_edp_cap = 0; retrieve_link_cap(link); link->dpcd_caps.edp_supported_link_rates_count = 0; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c index 0bb749133909..6af42ba9885c 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c @@ -82,24 +82,33 @@ bool dpia_query_hpd_status(struct dc_link *link) { union dmub_rb_cmd cmd = {0}; struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv; - bool is_hpd_high = false; /* prepare QUERY_HPD command */ cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE; cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1; cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA; - /* Return HPD status reported by DMUB if query successfully executed. */ - if (dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.query_hpd.data.status == AUX_RET_SUCCESS) - is_hpd_high = cmd.query_hpd.data.result; - - DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n", - __func__, - link->link_index, - link->link_id.enum_id - ENUM_ID_1, - cmd.query_hpd.data.status, - cmd.query_hpd.data.result); - - return is_hpd_high; + /* Query dpia hpd status from dmub */ + if (dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, + DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && + cmd.query_hpd.data.status == AUX_RET_SUCCESS) { + DC_LOG_DEBUG("%s: for link(%d) dpia(%d) success, current_hpd_status(%d) new_hpd_status(%d)\n", + __func__, + link->link_index, + link->link_id.enum_id - ENUM_ID_1, + link->hpd_status, + cmd.query_hpd.data.result); + link->hpd_status = cmd.query_hpd.data.result; + } else { + DC_LOG_ERROR("%s: for link(%d) dpia(%d) failed with status(%d), current_hpd_status(%d) new_hpd_status(0)\n", + __func__, + link->link_index, + link->link_id.enum_id - ENUM_ID_1, + cmd.query_hpd.data.status, + link->hpd_status); + link->hpd_status = false; + } + + return link->hpd_status; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c index 7581023daa47..0f1c411523a2 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c @@ -50,15 +50,28 @@ static bool get_bw_alloc_proceed_flag(struct dc_link *tmp) && tmp->hpd_status && tmp->dpia_bw_alloc_config.bw_alloc_enabled); } + static void reset_bw_alloc_struct(struct dc_link *link) { link->dpia_bw_alloc_config.bw_alloc_enabled = false; - link->dpia_bw_alloc_config.sink_verified_bw = 0; - link->dpia_bw_alloc_config.sink_max_bw = 0; + link->dpia_bw_alloc_config.link_verified_bw = 0; + link->dpia_bw_alloc_config.link_max_bw = 0; + link->dpia_bw_alloc_config.allocated_bw = 0; link->dpia_bw_alloc_config.estimated_bw = 0; link->dpia_bw_alloc_config.bw_granularity = 0; + link->dpia_bw_alloc_config.dp_overhead = 0; link->dpia_bw_alloc_config.response_ready = false; + link->dpia_bw_alloc_config.nrd_max_lane_count = 0; + link->dpia_bw_alloc_config.nrd_max_link_rate = 0; + for (int i = 0; i < MAX_SINKS_PER_LINK; i++) + link->dpia_bw_alloc_config.remote_sink_req_bw[i] = 0; + DC_LOG_DEBUG("reset usb4 bw alloc of link(%d)\n", link->link_index); } + +#define BW_GRANULARITY_0 4 // 0.25 Gbps +#define BW_GRANULARITY_1 2 // 0.5 Gbps +#define BW_GRANULARITY_2 1 // 1 Gbps + static uint8_t get_bw_granularity(struct dc_link *link) { uint8_t bw_granularity = 0; @@ -71,16 +84,20 @@ static uint8_t get_bw_granularity(struct dc_link *link) switch (bw_granularity & 0x3) { case 0: - bw_granularity = 4; + bw_granularity = BW_GRANULARITY_0; break; case 1: + bw_granularity = BW_GRANULARITY_1; + break; + case 2: default: - bw_granularity = 2; + bw_granularity = BW_GRANULARITY_2; break; } return bw_granularity; } + static int get_estimated_bw(struct dc_link *link) { uint8_t bw_estimated_bw = 0; @@ -93,31 +110,33 @@ static int get_estimated_bw(struct dc_link *link) return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); } -static bool allocate_usb4_bw(int *stream_allocated_bw, int bw_needed, struct dc_link *link) + +static int get_non_reduced_max_link_rate(struct dc_link *link) { - if (bw_needed > 0) - *stream_allocated_bw += bw_needed; + uint8_t nrd_max_link_rate = 0; - return true; + core_link_read_dpcd( + link, + DP_TUNNELING_MAX_LINK_RATE, + &nrd_max_link_rate, + sizeof(uint8_t)); + + return nrd_max_link_rate; } -static bool deallocate_usb4_bw(int *stream_allocated_bw, int bw_to_dealloc, struct dc_link *link) -{ - bool ret = false; - if (*stream_allocated_bw > 0) { - *stream_allocated_bw -= bw_to_dealloc; - ret = true; - } else { - //Do nothing for now - ret = true; - } +static int get_non_reduced_max_lane_count(struct dc_link *link) +{ + uint8_t nrd_max_lane_count = 0; - // Unplug so reset values - if (!link->hpd_status) - reset_bw_alloc_struct(link); + core_link_read_dpcd( + link, + DP_TUNNELING_MAX_LANE_COUNT, + &nrd_max_lane_count, + sizeof(uint8_t)); - return ret; + return nrd_max_lane_count; } + /* * Read all New BW alloc configuration ex: estimated_bw, allocated_bw, * granuality, Driver_ID, CM_Group, & populate the BW allocation structs @@ -125,67 +144,94 @@ static bool deallocate_usb4_bw(int *stream_allocated_bw, int bw_to_dealloc, stru */ static void init_usb4_bw_struct(struct dc_link *link) { - // Init the known values + reset_bw_alloc_struct(link); + + /* init the known values */ link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link); link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); + link->dpia_bw_alloc_config.nrd_max_link_rate = get_non_reduced_max_link_rate(link); + link->dpia_bw_alloc_config.nrd_max_lane_count = get_non_reduced_max_lane_count(link); + + DC_LOG_DEBUG("%s: bw_granularity(%d), estimated_bw(%d)\n", + __func__, link->dpia_bw_alloc_config.bw_granularity, + link->dpia_bw_alloc_config.estimated_bw); + DC_LOG_DEBUG("%s: nrd_max_link_rate(%d), nrd_max_lane_count(%d)\n", + __func__, link->dpia_bw_alloc_config.nrd_max_link_rate, + link->dpia_bw_alloc_config.nrd_max_lane_count); } + static uint8_t get_lowest_dpia_index(struct dc_link *link) { const struct dc *dc_struct = link->dc; uint8_t idx = 0xFF; int i; - for (i = 0; i < MAX_PIPES * 2; ++i) { + for (i = 0; i < MAX_LINKS; ++i) { if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) continue; - if (idx > dc_struct->links[i]->link_index) + if (idx > dc_struct->links[i]->link_index) { idx = dc_struct->links[i]->link_index; + break; + } } return idx; } + /* - * Get the Max Available BW or Max Estimated BW for each Host Router + * Get the maximum dp tunnel banwidth of host router * - * @link: pointer to the dc_link struct instance - * @type: ESTIMATD BW or MAX AVAILABLE BW + * @dc: pointer to the dc struct instance + * @hr_index: host router index * - * return: response_ready flag from dc_link struct + * return: host router maximum dp tunnel bandwidth */ -static int get_host_router_total_bw(struct dc_link *link, uint8_t type) +static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_index) { - const struct dc *dc_struct = link->dc; - uint8_t lowest_dpia_index = get_lowest_dpia_index(link); - uint8_t idx = (link->link_index - lowest_dpia_index) / 2, idx_temp = 0; - struct dc_link *link_temp; + uint8_t lowest_dpia_index = get_lowest_dpia_index(dc->links[0]); + uint8_t hr_index_temp = 0; + struct dc_link *link_dpia_primary, *link_dpia_secondary; int total_bw = 0; - int i; - - for (i = 0; i < MAX_PIPES * 2; ++i) { - if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) - continue; + for (uint8_t i = 0; i < MAX_LINKS - 1; ++i) { - link_temp = dc_struct->links[i]; - if (!link_temp || !link_temp->hpd_status) + if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) continue; - idx_temp = (link_temp->link_index - lowest_dpia_index) / 2; - - if (idx_temp == idx) { - - if (type == HOST_ROUTER_BW_ESTIMATED) - total_bw += link_temp->dpia_bw_alloc_config.estimated_bw; - else if (type == HOST_ROUTER_BW_ALLOCATED) - total_bw += link_temp->dpia_bw_alloc_config.sink_allocated_bw; + hr_index_temp = (dc->links[i]->link_index - lowest_dpia_index) / 2; + + if (hr_index_temp == hr_index) { + link_dpia_primary = dc->links[i]; + link_dpia_secondary = dc->links[i + 1]; + + /** + * If BW allocation enabled on both DPIAs, then + * HR BW = Estimated(dpia_primary) + Allocated(dpia_secondary) + * otherwise HR BW = Estimated(bw alloc enabled dpia) + */ + if ((link_dpia_primary->hpd_status && + link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) && + (link_dpia_secondary->hpd_status && + link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) { + total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw + + link_dpia_secondary->dpia_bw_alloc_config.allocated_bw; + } else if (link_dpia_primary->hpd_status && + link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) { + total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw; + } else if (link_dpia_secondary->hpd_status && + link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled) { + total_bw += link_dpia_secondary->dpia_bw_alloc_config.estimated_bw; + } + break; } } return total_bw; } + /* * Cleanup function for when the dpia is unplugged to reset struct * and perform any required clean up @@ -194,42 +240,49 @@ static int get_host_router_total_bw(struct dc_link *link, uint8_t type) * * return: none */ -static bool dpia_bw_alloc_unplug(struct dc_link *link) +static void dpia_bw_alloc_unplug(struct dc_link *link) { - if (!link) - return true; - - return deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, - link->dpia_bw_alloc_config.sink_allocated_bw, link); + if (link) { + DC_LOG_DEBUG("%s: resetting bw alloc config for link(%d)\n", + __func__, link->link_index); + reset_bw_alloc_struct(link); + } } + static void set_usb4_req_bw_req(struct dc_link *link, int req_bw) { uint8_t requested_bw; uint32_t temp; - // 1. Add check for this corner case #1 - if (req_bw > link->dpia_bw_alloc_config.estimated_bw) + /* Error check whether request bw greater than allocated */ + if (req_bw > link->dpia_bw_alloc_config.estimated_bw) { + DC_LOG_ERROR("%s: Request bw greater than estimated bw for link(%d)\n", + __func__, link->link_index); req_bw = link->dpia_bw_alloc_config.estimated_bw; + } temp = req_bw * link->dpia_bw_alloc_config.bw_granularity; requested_bw = temp / Kbps_TO_Gbps; - // Always make sure to add more to account for floating points + /* Always make sure to add more to account for floating points */ if (temp % Kbps_TO_Gbps) ++requested_bw; - // 2. Add check for this corner case #2 + /* Error check whether requested and allocated are equal */ req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); - if (req_bw == link->dpia_bw_alloc_config.sink_allocated_bw) - return; + if (req_bw && (req_bw == link->dpia_bw_alloc_config.allocated_bw)) { + DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n", + __func__, link->link_index); + } - if (core_link_write_dpcd( + link->dpia_bw_alloc_config.response_ready = false; // Reset flag + core_link_write_dpcd( link, REQUESTED_BW, &requested_bw, - sizeof(uint8_t)) == DC_OK) - link->dpia_bw_alloc_config.response_ready = false; // Reset flag + sizeof(uint8_t)); } + /* * Return the response_ready flag from dc_link struct * @@ -241,6 +294,7 @@ static bool get_cm_response_ready_flag(struct dc_link *link) { return link->dpia_bw_alloc_config.response_ready; } + // ------------------------------------------------------------------ // PUBLIC FUNCTIONS // ------------------------------------------------------------------ @@ -277,27 +331,35 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link) DPTX_BW_ALLOCATION_MODE_CONTROL, &response, sizeof(uint8_t)) != DC_OK) { - DC_LOG_DEBUG("%s: **** FAILURE Enabling DPtx BW Allocation Mode Support ***\n", - __func__); + DC_LOG_DEBUG("%s: FAILURE Enabling DPtx BW Allocation Mode Support for link(%d)\n", + __func__, link->link_index); } else { // SUCCESS Enabled DPtx BW Allocation Mode Support - link->dpia_bw_alloc_config.bw_alloc_enabled = true; - DC_LOG_DEBUG("%s: **** SUCCESS Enabling DPtx BW Allocation Mode Support ***\n", - __func__); + DC_LOG_DEBUG("%s: SUCCESS Enabling DPtx BW Allocation Mode Support for link(%d)\n", + __func__, link->link_index); ret = true; init_usb4_bw_struct(link); + link->dpia_bw_alloc_config.bw_alloc_enabled = true; + + /* + * During DP tunnel creation, CM preallocates BW and reduces estimated BW of other + * DPIA. CM release preallocation only when allocation is complete. Do zero alloc + * to make the CM to release preallocation and update estimated BW correctly for + * all DPIAs per host router + */ + link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0); } } out: return ret; } + void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result) { int bw_needed = 0; int estimated = 0; - int host_router_total_estimated_bw = 0; if (!get_bw_alloc_proceed_flag((link))) return; @@ -306,14 +368,22 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res case DPIA_BW_REQ_FAILED: - DC_LOG_DEBUG("%s: *** *** BW REQ FAILURE for DP-TX Request *** ***\n", __func__); + /* + * Ideally, we shouldn't run into this case as we always validate available + * bandwidth and request within that limit + */ + estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); + + DC_LOG_ERROR("%s: BW REQ FAILURE for DP-TX Request for link(%d)\n", + __func__, link->link_index); + DC_LOG_ERROR("%s: current estimated_bw(%d), new estimated_bw(%d)\n", + __func__, link->dpia_bw_alloc_config.estimated_bw, estimated); - // Update the new Estimated BW value updated by CM - link->dpia_bw_alloc_config.estimated_bw = - bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); + /* Update the new Estimated BW value updated by CM */ + link->dpia_bw_alloc_config.estimated_bw = estimated; + /* Allocate the previously requested bandwidth */ set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.estimated_bw); - link->dpia_bw_alloc_config.response_ready = false; /* * If FAIL then it is either: @@ -326,68 +396,34 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res case DPIA_BW_REQ_SUCCESS: - DC_LOG_DEBUG("%s: *** BW REQ SUCCESS for DP-TX Request ***\n", __func__); - - // 1. SUCCESS 1st time before any Pruning is done - // 2. SUCCESS after prev. FAIL before any Pruning is done - // 3. SUCCESS after Pruning is done but before enabling link - bw_needed = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); - // 1. - if (!link->dpia_bw_alloc_config.sink_allocated_bw) { - - allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, bw_needed, link); - link->dpia_bw_alloc_config.sink_verified_bw = - link->dpia_bw_alloc_config.sink_allocated_bw; - - // SUCCESS from first attempt - if (link->dpia_bw_alloc_config.sink_allocated_bw > - link->dpia_bw_alloc_config.sink_max_bw) - link->dpia_bw_alloc_config.sink_verified_bw = - link->dpia_bw_alloc_config.sink_max_bw; - } - // 3. - else if (link->dpia_bw_alloc_config.sink_allocated_bw) { - - // Find out how much do we need to de-alloc - if (link->dpia_bw_alloc_config.sink_allocated_bw > bw_needed) - deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, - link->dpia_bw_alloc_config.sink_allocated_bw - bw_needed, link); - else - allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, - bw_needed - link->dpia_bw_alloc_config.sink_allocated_bw, link); - } + DC_LOG_DEBUG("%s: BW REQ SUCCESS for DP-TX Request for link(%d)\n", + __func__, link->link_index); + DC_LOG_DEBUG("%s: current allocated_bw(%d), new allocated_bw(%d)\n", + __func__, link->dpia_bw_alloc_config.allocated_bw, bw_needed); - // 4. If this is the 2nd sink then any unused bw will be reallocated to master DPIA - // => check if estimated_bw changed + link->dpia_bw_alloc_config.allocated_bw = bw_needed; link->dpia_bw_alloc_config.response_ready = true; break; case DPIA_EST_BW_CHANGED: - DC_LOG_DEBUG("%s: *** ESTIMATED BW CHANGED for DP-TX Request ***\n", __func__); - estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); - host_router_total_estimated_bw = get_host_router_total_bw(link, HOST_ROUTER_BW_ESTIMATED); - // 1. If due to unplug of other sink - if (estimated == host_router_total_estimated_bw) { - // First update the estimated & max_bw fields - if (link->dpia_bw_alloc_config.estimated_bw < estimated) - link->dpia_bw_alloc_config.estimated_bw = estimated; - } - // 2. If due to realloc bw btw 2 dpia due to plug OR realloc unused Bw - else { - // We lost estimated bw usually due to plug event of other dpia - link->dpia_bw_alloc_config.estimated_bw = estimated; - } + DC_LOG_DEBUG("%s: ESTIMATED BW CHANGED for link(%d)\n", + __func__, link->link_index); + DC_LOG_DEBUG("%s: current estimated_bw(%d), new estimated_bw(%d)\n", + __func__, link->dpia_bw_alloc_config.estimated_bw, estimated); + + link->dpia_bw_alloc_config.estimated_bw = estimated; break; case DPIA_BW_ALLOC_CAPS_CHANGED: - DC_LOG_DEBUG("%s: *** BW ALLOC CAPABILITY CHANGED for DP-TX Request ***\n", __func__); + DC_LOG_ERROR("%s: BW ALLOC CAPABILITY CHANGED to Disabled for link(%d)\n", + __func__, link->link_index); link->dpia_bw_alloc_config.bw_alloc_enabled = false; break; } @@ -405,21 +441,21 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea if (link->hpd_status && peak_bw > 0) { // If DP over USB4 then we need to check BW allocation - link->dpia_bw_alloc_config.sink_max_bw = peak_bw; - set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw); + link->dpia_bw_alloc_config.link_max_bw = peak_bw; + set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.link_max_bw); do { - if (!(timeout > 0)) + if (timeout > 0) timeout--; else break; - fsleep(10 * 1000); + msleep(10); } while (!get_cm_response_ready_flag(link)); if (!timeout) ret = 0;// ERROR TIMEOUT waiting for response for allocating bw - else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0) - ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED); + else if (link->dpia_bw_alloc_config.allocated_bw > 0) + ret = link->dpia_bw_alloc_config.allocated_bw; } //2. Cold Unplug else if (!link->hpd_status) @@ -428,65 +464,102 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea out: return ret; } -int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw) +bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw) { - int ret = 0; + bool ret = false; uint8_t timeout = 10; + DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n", + __func__, link->link_index, link->hpd_status, + link->dpia_bw_alloc_config.allocated_bw, req_bw); + if (!get_bw_alloc_proceed_flag(link)) goto out; - /* - * Sometimes stream uses same timing parameters as the already - * allocated max sink bw so no need to re-alloc - */ - if (req_bw != link->dpia_bw_alloc_config.sink_allocated_bw) { - set_usb4_req_bw_req(link, req_bw); - do { - if (!(timeout > 0)) - timeout--; - else - break; - udelay(10 * 1000); - } while (!get_cm_response_ready_flag(link)); + set_usb4_req_bw_req(link, req_bw); + do { + if (timeout > 0) + timeout--; + else + break; + msleep(10); + } while (!get_cm_response_ready_flag(link)); - if (!timeout) - ret = 0;// ERROR TIMEOUT waiting for response for allocating bw - else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0) - ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED); - } + if (timeout) + ret = true; out: + DC_LOG_DEBUG("%s: EXIT: timeout(%d), ret(%d)\n", __func__, timeout, ret); return ret; } + bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias) { bool ret = true; - int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }; - uint8_t lowest_dpia_index = 0, dpia_index = 0; - uint8_t i; + int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }, host_router_total_dp_bw = 0; + uint8_t lowest_dpia_index, i, hr_index; if (!num_dpias || num_dpias > MAX_DPIA_NUM) return ret; - //Get total Host Router BW & Validate against each Host Router max BW + lowest_dpia_index = get_lowest_dpia_index(link[0]); + + /* get total Host Router BW with granularity for the given modes */ for (i = 0; i < num_dpias; ++i) { + int granularity_Gbps = 0; + int bw_granularity = 0; if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled) continue; - lowest_dpia_index = get_lowest_dpia_index(link[i]); if (link[i]->link_index < lowest_dpia_index) continue; - dpia_index = (link[i]->link_index - lowest_dpia_index) / 2; - bw_needed_per_hr[dpia_index] += bw_needed_per_dpia[i]; - if (bw_needed_per_hr[dpia_index] > get_host_router_total_bw(link[i], HOST_ROUTER_BW_ALLOCATED)) { + granularity_Gbps = (Kbps_TO_Gbps / link[i]->dpia_bw_alloc_config.bw_granularity); + bw_granularity = (bw_needed_per_dpia[i] / granularity_Gbps) * granularity_Gbps + + ((bw_needed_per_dpia[i] % granularity_Gbps) ? granularity_Gbps : 0); - ret = false; - break; + hr_index = (link[i]->link_index - lowest_dpia_index) / 2; + bw_needed_per_hr[hr_index] += bw_granularity; + } + + /* validate against each Host Router max BW */ + for (hr_index = 0; hr_index < MAX_HR_NUM; ++hr_index) { + if (bw_needed_per_hr[hr_index]) { + host_router_total_dp_bw = get_host_router_total_dp_tunnel_bw(link[0]->dc, hr_index); + if (bw_needed_per_hr[hr_index] > host_router_total_dp_bw) { + ret = false; + break; + } } } return ret; } + +int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link) +{ + int dp_overhead = 0, link_mst_overhead = 0; + + if (!get_bw_alloc_proceed_flag((link))) + return dp_overhead; + + /* if its mst link, add MTPH overhead */ + if ((link->type == dc_connection_mst_branch) && + !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { + /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH + * MST overhead is 1/64 of link bandwidth (excluding any overhead) + */ + const struct dc_link_settings *link_cap = + dc_link_get_link_cap(link); + uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate * + (uint32_t)link_cap->lane_count * + LINK_RATE_REF_FREQ_IN_KHZ * 8; + link_mst_overhead = (link_bw_in_kbps / 64) + ((link_bw_in_kbps % 64) ? 1 : 0); + } + + /* add all the overheads */ + dp_overhead = link_mst_overhead; + + return dp_overhead; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h index 7292690383ae..3b6d8494f9d5 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h @@ -59,9 +59,9 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link); * @link: pointer to the dc_link struct instance * @req_bw: Bw requested by the stream * - * return: allocated bw else return 0 + * return: true if allocated successfully */ -int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw); +bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw); /* * Handle the USB4 BW Allocation related functionality here: @@ -99,4 +99,13 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res */ bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias); +/* + * Obtain all the DP overheads in dp tunneling for the dpia link + * + * @link: pointer to the dc_link struct instance + * + * return: DP overheads in DP tunneling + */ +int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link); + #endif /* DC_INC_LINK_DP_DPIA_BW_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index 34bf8a9ef738..0fcf0b8530ac 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -120,7 +120,7 @@ bool dp_parse_link_loss_status( static bool handle_hpd_irq_psr_sink(struct dc_link *link) { - union dpcd_psr_configuration psr_configuration; + union dpcd_psr_configuration psr_configuration = {0}; if (!link->psr_settings.psr_feature_enabled) return false; @@ -184,14 +184,14 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link) return false; } -static bool handle_hpd_irq_replay_sink(struct dc_link *link) +static void handle_hpd_irq_replay_sink(struct dc_link *link) { - union dpcd_replay_configuration replay_configuration; + union dpcd_replay_configuration replay_configuration = {0}; /*AMD Replay version reuse DP_PSR_ERROR_STATUS for REPLAY_ERROR status.*/ - union psr_error_status replay_error_status; + union psr_error_status replay_error_status = {0}; if (!link->replay_settings.replay_feature_enabled) - return false; + return; dm_helpers_dp_read_dpcd( link->ctx, @@ -219,6 +219,12 @@ static bool handle_hpd_irq_replay_sink(struct dc_link *link) link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR) { bool allow_active; + if (link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR) + link->replay_settings.config.received_desync_error_hpd = 1; + + if (link->replay_settings.config.force_disable_desync_error_check) + return; + /* Acknowledge and clear configuration bits */ dm_helpers_dp_write_dpcd( link->ctx, @@ -243,7 +249,6 @@ static bool handle_hpd_irq_replay_sink(struct dc_link *link) edp_set_replay_allow_active(link, &allow_active, true, false, NULL); } } - return true; } void dp_handle_link_loss(struct dc_link *link) @@ -260,7 +265,7 @@ void dp_handle_link_loss(struct dc_link *link) for (i = count - 1; i >= 0; i--) { // Always use max settings here for DP 1.4a LL Compliance CTS - if (link->is_automated) { + if (link->skip_fallback_on_link_loss) { pipes[i]->link_config.dp_link_settings.lane_count = link->verified_link_cap.lane_count; pipes[i]->link_config.dp_link_settings.link_rate = @@ -275,7 +280,7 @@ void dp_handle_link_loss(struct dc_link *link) static void read_dpcd204h_on_irq_hpd(struct dc_link *link, union hpd_irq_data *irq_data) { enum dc_status retval; - union lane_align_status_updated dpcd_lane_status_updated; + union lane_align_status_updated dpcd_lane_status_updated = {0}; retval = core_link_read_dpcd( link, @@ -315,7 +320,7 @@ enum dc_status dp_read_hpd_rx_irq_data( /* Read 14 bytes in a single read and then copy only the required fields. * This is more efficient than doing it in two separate AUX reads. */ - uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1]; + uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1] = {0}; retval = core_link_read_dpcd( link, @@ -399,7 +404,9 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link, if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { // Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC - link->is_automated = true; + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + link->skip_fallback_on_link_loss = true; + device_service_clear.bits.AUTOMATED_TEST = 1; core_link_write_dpcd( link, @@ -424,9 +431,7 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link, /* PSR-related error was detected and handled */ return true; - if (handle_hpd_irq_replay_sink(link)) - /* Replay-related error was detected and handled */ - return true; + handle_hpd_irq_replay_sink(link); /* If PSR-related error handled, Main link may be off, * so do not handle as a normal sink status change interrupt. diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c index 0050e0a06cbc..2fa4e64e2430 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c @@ -37,6 +37,7 @@ #include "clk_mgr.h" #include "resource.h" #include "link_enc_cfg.h" +#include "atomfirmware.h" #define DC_LOGGER \ link->ctx->logger @@ -100,8 +101,11 @@ void dp_set_hw_lane_settings( { const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + // Don't return here if using FIXED_VS link HWSS and encoding is 128b/132b if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && - !is_immediate_downstream(link, offset)) + !is_immediate_downstream(link, offset) && + (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) || + link_dp_get_encoding_format(&link_settings->link_settings) == DP_8b_10b_ENCODING)) return; if (link_hwss->ext.set_dp_lane_settings) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index 90339c2dfd84..1818970b8eaf 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -517,6 +517,7 @@ enum link_training_result dp_check_link_loss_status( { enum link_training_result status = LINK_TRAINING_SUCCESS; union lane_status lane_status; + union lane_align_status_updated dpcd_lane_status_updated; uint8_t dpcd_buf[6] = {0}; uint32_t lane; @@ -532,10 +533,12 @@ enum link_training_result dp_check_link_loss_status( * check lanes status */ lane_status.raw = dp_get_nibble_at_index(&dpcd_buf[2], lane); + dpcd_lane_status_updated.raw = dpcd_buf[4]; if (!lane_status.bits.CHANNEL_EQ_DONE_0 || !lane_status.bits.CR_DONE_0 || - !lane_status.bits.SYMBOL_LOCKED_0) { + !lane_status.bits.SYMBOL_LOCKED_0 || + !dp_is_interlane_aligned(dpcd_lane_status_updated)) { /* if one of the channel equalization, clock * recovery or symbol lock is dropped * consider it as (link has been @@ -807,7 +810,7 @@ void dp_decide_lane_settings( const struct link_training_settings *lt_settings, const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], - union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]) + union dpcd_training_lane *dpcd_lane_settings) { uint32_t lane; @@ -1068,7 +1071,7 @@ enum dc_status dpcd_set_link_settings( * MUX chip gets link rate set back before link training. */ if (link->connector_signal == SIGNAL_TYPE_EDP) { - uint8_t supported_link_rates[16]; + uint8_t supported_link_rates[16] = {0}; core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, supported_link_rates, sizeof(supported_link_rates)); @@ -1505,10 +1508,7 @@ enum link_training_result dp_perform_link_training( * Non-LT AUX transactions inside training mode. */ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING) - if (link->dc->config.use_old_fixed_vs_sequence) - status = dp_perform_fixed_vs_pe_training_sequence_legacy(link, link_res, <_settings); - else - status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings); + status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings); else if (encoding == DP_8b_10b_ENCODING) status = dp_perform_8b_10b_link_training(link, link_res, <_settings); else if (encoding == DP_128b_132b_ENCODING) @@ -1587,21 +1587,7 @@ bool perform_link_training_with_retries( msleep(delay_dp_power_up_in_ms); } - if (panel_mode == DP_PANEL_MODE_EDP) { - struct cp_psp *cp_psp = &stream->ctx->cp_psp; - - if (cp_psp && cp_psp->funcs.enable_assr) { - /* ASSR is bound to fail with unsigned PSP - * verstage used during devlopment phase. - * Report and continue with eDP panel mode to - * perform eDP link training with right settings - */ - bool result; - result = cp_psp->funcs.enable_assr(cp_psp->handle, link); - if (!result && link->panel_mode != DP_PANEL_MODE_EDP) - panel_mode = DP_PANEL_MODE_DEFAULT; - } - } + edp_set_panel_assr(link, pipe_ctx, &panel_mode, true); dp_set_panel_mode(link, panel_mode); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h index 7d027bac8255..851bd17317a0 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h @@ -111,7 +111,7 @@ void dp_decide_lane_settings( const struct link_training_settings *lt_settings, const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], - union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]); + union dpcd_training_lane *dpcd_lane_settings); enum dc_dp_training_pattern decide_cr_training_pattern( const struct dc_link_settings *link_settings); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c index 4f4e899e5c46..edb21d21952a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c @@ -291,7 +291,7 @@ static enum link_training_result dpia_training_cr_non_transparent( { enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0; uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ - enum dc_status status; + enum dc_status status = DC_ERROR_UNEXPECTED; uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */ uint32_t retry_count = 0; uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL; /* From DP spec, CR read interval is always 100us. */ @@ -617,9 +617,9 @@ static enum link_training_result dpia_training_eq_non_transparent( enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ; uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ uint32_t retries_eq = 0; - enum dc_status status; + enum dc_status status = DC_ERROR_UNEXPECTED; enum dc_dp_training_pattern tr_pattern; - uint32_t wait_time_microsec; + uint32_t wait_time_microsec = 0; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_align_status_updated dpcd_lane_status_updated = {0}; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; @@ -811,7 +811,7 @@ static enum link_training_result dpia_training_eq_transparent( /* Take into consideration corner case for DP 1.4a LL Compliance CTS as USB4 * has to share encoders unlike DP and USBC */ - if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->is_automated && retries_eq)) { + if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->skip_fallback_on_link_loss && retries_eq)) { result = LINK_TRAINING_SUCCESS; break; } @@ -1037,7 +1037,7 @@ enum link_training_result dpia_perform_link_training( */ if (result == LINK_TRAINING_SUCCESS) { fsleep(5000); - if (!link->is_automated) + if (!link->skip_fallback_on_link_loss) result = dp_check_link_loss_status(link, <_settings); } else if (result == LINK_TRAINING_ABORT) dpia_training_abort(link, <_settings, repeater_id); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c index fd8f6f198146..b5cf75975fff 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c @@ -115,7 +115,7 @@ static enum link_training_result perform_fixed_vs_pe_nontransparent_training_seq lt_settings->cr_pattern_time = 16000; /* Fixed VS/PE specific: Toggle link rate */ - apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate); + apply_toggle_rate_wa = ((link->vendor_specific_lttpr_link_rate_wa == target_rate) || (link->vendor_specific_lttpr_link_rate_wa == 0)); target_rate = get_dpcd_link_rate(<_settings->link_settings); toggle_rate = (target_rate == 0x6) ? 0xA : 0x6; @@ -186,351 +186,6 @@ static enum link_training_result perform_fixed_vs_pe_nontransparent_training_seq return status; } - -enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings) -{ - const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; - const uint8_t offset = dp_parse_lttpr_repeater_count( - link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0}; - const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68}; - uint32_t pre_disable_intercept_delay_ms = 0; - uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; - uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; - const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; - const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; - const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; - const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; - const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; - enum link_training_result status = LINK_TRAINING_SUCCESS; - uint8_t lane = 0; - union down_spread_ctrl downspread = {0}; - union lane_count_set lane_count_set = {0}; - uint8_t toggle_rate; - uint8_t rate; - - /* Only 8b/10b is supported */ - ASSERT(link_dp_get_encoding_format(<_settings->link_settings) == - DP_8b_10b_ENCODING); - - if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { - status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings); - return status; - } - - if (offset != 0xFF) { - if (offset == 2) { - pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; - - /* Certain display and cable configuration require extra delay */ - } else if (offset > 2) { - pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; - } - } - - /* Vendor specific: Reset lane settings */ - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset)); - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); - - /* Vendor specific: Enable intercept */ - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en)); - - - /* 1. set link rate, lane count and spread. */ - - downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread); - - lane_count_set.bits.LANE_COUNT_SET = - lt_settings->link_settings.lane_count; - - lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; - lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; - - - if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { - lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = - link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; - } - - core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, - &downspread.raw, sizeof(downspread)); - - core_link_write_dpcd(link, DP_LANE_COUNT_SET, - &lane_count_set.raw, 1); - - rate = get_dpcd_link_rate(<_settings->link_settings); - - /* Vendor specific: Toggle link rate */ - toggle_rate = (rate == 0x6) ? 0xA : 0x6; - - if (link->vendor_specific_lttpr_link_rate_wa == rate) { - core_link_write_dpcd( - link, - DP_LINK_BW_SET, - &toggle_rate, - 1); - } - - link->vendor_specific_lttpr_link_rate_wa = rate; - - core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); - - DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", - __func__, - DP_LINK_BW_SET, - lt_settings->link_settings.link_rate, - DP_LANE_COUNT_SET, - lt_settings->link_settings.lane_count, - lt_settings->enhanced_framing, - DP_DOWNSPREAD_CTRL, - lt_settings->link_settings.link_spread); - - if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2)); - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3)); - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4)); - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5)); - } - - /* 2. Perform link training */ - - /* Perform Clock Recovery Sequence */ - if (status == LINK_TRAINING_SUCCESS) { - const uint8_t max_vendor_dpcd_retries = 10; - uint32_t retries_cr; - uint32_t retry_count; - uint32_t wait_time_microsec; - enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; - union lane_align_status_updated dpcd_lane_status_updated; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - uint8_t i = 0; - - retries_cr = 0; - retry_count = 0; - - memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); - memset(&dpcd_lane_status_updated, '\0', - sizeof(dpcd_lane_status_updated)); - - while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && - (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { - - - /* 1. call HWSS to set lane settings */ - dp_set_hw_lane_settings( - link, - link_res, - lt_settings, - 0); - - /* 2. update DPCD of the receiver */ - if (!retry_count) { - /* EPR #361076 - write as a 5-byte burst, - * but only for the 1-st iteration. - */ - dpcd_set_lt_pattern_and_lane_settings( - link, - lt_settings, - lt_settings->pattern_for_cr, - 0); - /* Vendor specific: Disable intercept */ - for (i = 0; i < max_vendor_dpcd_retries; i++) { - if (pre_disable_intercept_delay_ms != 0) - msleep(pre_disable_intercept_delay_ms); - if (link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_intercept_dis[0], - sizeof(vendor_lttpr_write_data_intercept_dis))) - break; - - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_intercept_en[0], - sizeof(vendor_lttpr_write_data_intercept_en)); - } - } else { - vendor_lttpr_write_data_vs[3] = 0; - vendor_lttpr_write_data_pe[3] = 0; - - for (lane = 0; lane < lane_count; lane++) { - vendor_lttpr_write_data_vs[3] |= - lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); - vendor_lttpr_write_data_pe[3] |= - lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); - } - - /* Vendor specific: Update VS and PE to DPRX requested value */ - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); - - dpcd_set_lane_settings( - link, - lt_settings, - 0); - } - - /* 3. wait receiver to lock-on*/ - wait_time_microsec = lt_settings->cr_pattern_time; - - dp_wait_for_training_aux_rd_interval( - link, - wait_time_microsec); - - /* 4. Read lane status and requested drive - * settings as set by the sink - */ - dp_get_lane_status_and_lane_adjust( - link, - lt_settings, - dpcd_lane_status, - &dpcd_lane_status_updated, - dpcd_lane_adjust, - 0); - - /* 5. check CR done*/ - if (dp_is_cr_done(lane_count, dpcd_lane_status)) { - status = LINK_TRAINING_SUCCESS; - break; - } - - /* 6. max VS reached*/ - if (dp_is_max_vs_reached(lt_settings)) - break; - - /* 7. same lane settings */ - /* Note: settings are the same for all lanes, - * so comparing first lane is sufficient - */ - if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == - dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) - retries_cr++; - else - retries_cr = 0; - - /* 8. update VS/PE/PC2 in lt_settings*/ - dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - retry_count++; - } - - if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { - ASSERT(0); - DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", - __func__, - LINK_TRAINING_MAX_CR_RETRY); - - } - - status = dp_get_cr_failure(lane_count, dpcd_lane_status); - } - - /* Perform Channel EQ Sequence */ - if (status == LINK_TRAINING_SUCCESS) { - enum dc_dp_training_pattern tr_pattern; - uint32_t retries_ch_eq; - uint32_t wait_time_microsec; - enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; - union lane_align_status_updated dpcd_lane_status_updated = {0}; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; - - /* Note: also check that TPS4 is a supported feature*/ - tr_pattern = lt_settings->pattern_for_eq; - - dp_set_hw_training_pattern(link, link_res, tr_pattern, 0); - - status = LINK_TRAINING_EQ_FAIL_EQ; - - for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; - retries_ch_eq++) { - - dp_set_hw_lane_settings(link, link_res, lt_settings, 0); - - vendor_lttpr_write_data_vs[3] = 0; - vendor_lttpr_write_data_pe[3] = 0; - - for (lane = 0; lane < lane_count; lane++) { - vendor_lttpr_write_data_vs[3] |= - lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); - vendor_lttpr_write_data_pe[3] |= - lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); - } - - /* Vendor specific: Update VS and PE to DPRX requested value */ - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); - link_configure_fixed_vs_pe_retimer(link->ddc, - &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); - - /* 2. update DPCD*/ - if (!retries_ch_eq) - /* EPR #361076 - write as a 5-byte burst, - * but only for the 1-st iteration - */ - - dpcd_set_lt_pattern_and_lane_settings( - link, - lt_settings, - tr_pattern, 0); - else - dpcd_set_lane_settings(link, lt_settings, 0); - - /* 3. wait for receiver to lock-on*/ - wait_time_microsec = lt_settings->eq_pattern_time; - - dp_wait_for_training_aux_rd_interval( - link, - wait_time_microsec); - - /* 4. Read lane status and requested - * drive settings as set by the sink - */ - dp_get_lane_status_and_lane_adjust( - link, - lt_settings, - dpcd_lane_status, - &dpcd_lane_status_updated, - dpcd_lane_adjust, - 0); - - /* 5. check CR done*/ - if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { - status = LINK_TRAINING_EQ_FAIL_CR; - break; - } - - /* 6. check CHEQ done*/ - if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && - dp_is_symbol_locked(lane_count, dpcd_lane_status) && - dp_is_interlane_aligned(dpcd_lane_status_updated)) { - status = LINK_TRAINING_SUCCESS; - break; - } - - /* 7. update VS/PE/PC2 in lt_settings*/ - dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, - lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - } - } - - return status; -} - enum link_training_result dp_perform_fixed_vs_pe_training_sequence( struct dc_link *link, const struct link_resource *link_res, @@ -552,6 +207,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; + const uint8_t vendor_lttpr_write_data_dpmf[4] = {0x1, 0x6, 0x70, 0x87}; enum link_training_result status = LINK_TRAINING_SUCCESS; uint8_t lane = 0; union down_spread_ctrl downspread = {0}; @@ -614,18 +270,20 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( rate = get_dpcd_link_rate(<_settings->link_settings); - /* Vendor specific: Toggle link rate */ - toggle_rate = (rate == 0x6) ? 0xA : 0x6; + if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) { + /* Vendor specific: Toggle link rate */ + toggle_rate = (rate == 0x6) ? 0xA : 0x6; - if (link->vendor_specific_lttpr_link_rate_wa == rate) { - core_link_write_dpcd( - link, - DP_LINK_BW_SET, - &toggle_rate, - 1); - } + if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) { + core_link_write_dpcd( + link, + DP_LINK_BW_SET, + &toggle_rate, + 1); + } - link->vendor_specific_lttpr_link_rate_wa = rate; + link->vendor_specific_lttpr_link_rate_wa = rate; + } core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); @@ -639,6 +297,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_dpmf[0], + sizeof(vendor_lttpr_write_data_dpmf)); + if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h index c0d6ea329504..e61970e27661 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h @@ -28,11 +28,6 @@ #define __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__ #include "link_dp_training.h" -enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( - struct dc_link *link, - const struct link_resource *link_res, - struct link_training_settings *lt_settings); - enum link_training_result dp_perform_fixed_vs_pe_training_sequence( struct dc_link *link, const struct link_resource *link_res, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c index 5c9a30211c10..a72c898b64fa 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c @@ -130,7 +130,7 @@ static uint32_t dpcd_get_next_partition_size(const uint32_t address, const uint3 * XXX: Do not allow any two address ranges in this array to overlap */ static const struct dpcd_address_range mandatory_dpcd_blocks[] = { - { DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT }}; + { DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, DP_PHY_REPEATER_128B132B_RATES }}; /* * extend addresses to read all mandatory blocks together @@ -164,7 +164,7 @@ static void dpcd_extend_address_range( if (new_addr_range.start != in_address || new_addr_range.end != end_address) { *out_address = new_addr_range.start; *out_size = ADDRESS_RANGE_SIZE(new_addr_range.start, new_addr_range.end); - *out_data = kzalloc(*out_size * sizeof(**out_data), GFP_KERNEL); + *out_data = kcalloc(*out_size, sizeof(**out_data), GFP_KERNEL); } } @@ -205,7 +205,7 @@ enum dc_status core_link_read_dpcd( uint32_t extended_size; /* size of the remaining partitioned address space */ uint32_t size_left_to_read; - enum dc_status status; + enum dc_status status = DC_ERROR_UNEXPECTED; /* size of the next partition to be read from */ uint32_t partition_size; uint32_t data_index = 0; @@ -234,7 +234,7 @@ enum dc_status core_link_write_dpcd( { uint32_t partition_size; uint32_t data_index = 0; - enum dc_status status; + enum dc_status status = DC_ERROR_UNEXPECTED; while (size) { partition_size = dpcd_get_next_partition_size(address, size); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index e32a7974a4bc..ad9aca790dd7 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -38,6 +38,7 @@ #include "dc/dc_dmub_srv.h" #include "dce/dmub_replay.h" #include "abm.h" +#include "resource.h" #define DC_LOGGER \ link->ctx->logger #define DC_LOGGER_INIT(logger) @@ -170,7 +171,6 @@ bool edp_set_backlight_level_nits(struct dc_link *link, *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; - link->backlight_settings.backlight_millinits = backlight_millinits; if (!link->dpcd_caps.panel_luminance_control) { if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, @@ -288,9 +288,9 @@ bool set_default_brightness_aux(struct dc_link *link) if (link && link->dpcd_sink_ext_caps.bits.oled == 1) { if (!read_default_bl_aux(link, &default_backlight)) default_backlight = 150000; - // if < 1 nits or > 5000, it might be wrong readback + // if > 5000, it might be wrong readback. 0 nits is a valid default value for OLED panel. if (default_backlight < 1000 || default_backlight > 5000000) - default_backlight = 150000; // + default_backlight = 150000; return edp_set_backlight_level_nits(link, true, default_backlight, 0); @@ -298,15 +298,6 @@ bool set_default_brightness_aux(struct dc_link *link) return false; } -bool set_cached_brightness_aux(struct dc_link *link) -{ - if (link->backlight_settings.backlight_millinits) - return edp_set_backlight_level_nits(link, true, - link->backlight_settings.backlight_millinits, 0); - else - return set_default_brightness_aux(link); - return false; -} bool edp_is_ilr_optimization_enabled(struct dc_link *link) { if (link->dpcd_caps.edp_supported_link_rates_count == 0 || !link->panel_config.ilr.optimize_edp_link_rate) @@ -330,8 +321,8 @@ bool edp_is_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing) { struct dc_link_settings link_setting; - uint8_t link_bw_set; - uint8_t link_rate_set; + uint8_t link_bw_set = 0; + uint8_t link_rate_set = 0; uint32_t req_bw; union lane_count_set lane_count_set = {0}; @@ -539,6 +530,9 @@ bool edp_set_backlight_level(const struct dc_link *link, if (dc_is_embedded_signal(link->connector_signal)) { struct pipe_ctx *pipe_ctx = get_pipe_from_link(link); + if (link->panel_cntl) + link->panel_cntl->stored_backlight_registers.USER_LEVEL = backlight_pwm_u16_16; + if (pipe_ctx) { /* Disable brightness ramping when the display is blanked * as it can hang the DMCU @@ -899,7 +893,8 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active, /* Set power optimization flag */ if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) { - if (link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt) { + if (replay != NULL && link->replay_settings.replay_feature_enabled && + replay->funcs->replay_set_power_opt) { replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst); link->replay_settings.replay_power_opt_active = *power_opts; } @@ -937,8 +932,8 @@ bool edp_get_replay_state(const struct dc_link *link, uint64_t *state) bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream) { /* To-do: Setup Replay */ - struct dc *dc = link->ctx->dc; - struct dmub_replay *replay = dc->res_pool->replay; + struct dc *dc; + struct dmub_replay *replay; int i; unsigned int panel_inst; struct replay_context replay_context = { 0 }; @@ -954,6 +949,10 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream if (!link) return false; + dc = link->ctx->dc; + + replay = dc->res_pool->replay; + if (!replay) return false; @@ -982,8 +981,7 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream replay_context.line_time_in_ns = lineTimeInNs; - if (replay) - link->replay_settings.replay_feature_enabled = + link->replay_settings.replay_feature_enabled = replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst); if (link->replay_settings.replay_feature_enabled) { @@ -1007,7 +1005,37 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream return true; } -bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal) +/* + * This is general Interface for Replay to set an 32 bit variable to dmub + * replay_FW_Message_type: Indicates which instruction or variable pass to DMUB + * cmd_data: Value of the config. + */ +bool edp_send_replay_cmd(struct dc_link *link, + enum replay_FW_Message_type msg, + union dmub_replay_cmd_set *cmd_data) +{ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + unsigned int panel_inst; + + if (!replay) + return false; + + DC_LOGGER_INIT(link->ctx->logger); + + if (dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + cmd_data->panel_inst = panel_inst; + else { + DC_LOG_DC("%s(): get edp panel inst fail ", __func__); + return false; + } + + replay->funcs->replay_send_cmd(replay, msg, cmd_data); + + return true; +} + +bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; @@ -1028,7 +1056,7 @@ bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal) } bool edp_replay_residency(const struct dc_link *link, - unsigned int *residency, const bool is_start, const bool is_alpm) + unsigned int *residency, const bool is_start, const enum pr_residency_mode mode) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; @@ -1037,14 +1065,44 @@ bool edp_replay_residency(const struct dc_link *link, if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; + if (!residency) + return false; + if (replay != NULL && link->replay_settings.replay_feature_enabled) - replay->funcs->replay_residency(replay, panel_inst, residency, is_start, is_alpm); + replay->funcs->replay_residency(replay, panel_inst, residency, is_start, mode); else *residency = 0; return true; } +bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, + const unsigned int *power_opts, uint32_t coasting_vtotal) +{ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + unsigned int panel_inst; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + /* Only both power and coasting vtotal changed, this func could return true */ + if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts && + coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) { + if (link->replay_settings.replay_feature_enabled && + replay->funcs->replay_set_power_opt_and_coasting_vtotal) { + replay->funcs->replay_set_power_opt_and_coasting_vtotal(replay, + *power_opts, panel_inst, coasting_vtotal); + link->replay_settings.replay_power_opt_active = *power_opts; + link->replay_settings.coasting_vtotal = coasting_vtotal; + } else + return false; + } else + return false; + + return true; +} + static struct abm *get_abm_from_stream_res(const struct dc_link *link) { int i; @@ -1091,3 +1149,66 @@ int edp_get_target_backlight_pwm(const struct dc_link *link) return (int) abm->funcs->get_target_backlight(abm); } + +static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link, + struct link_resource *link_res, bool enable) +{ + union dmub_rb_cmd cmd; + bool use_hpo_dp_link_enc = false; + uint8_t link_enc_index = 0; + uint8_t phy_type = 0; + uint8_t phy_id = 0; + + if (!pDC->config.use_assr_psp_message) + return; + + memset(&cmd, 0, sizeof(cmd)); + + link_enc_index = link->link_enc->transmitter - TRANSMITTER_UNIPHY_A; + + if (link_res->hpo_dp_link_enc) { + link_enc_index = link_res->hpo_dp_link_enc->inst; + use_hpo_dp_link_enc = true; + } + + if (enable) + phy_type = ((dp_get_panel_mode(link) == DP_PANEL_MODE_EDP) ? 1 : 0); + + phy_id = resource_transmitter_to_phy_idx(pDC, link->link_enc->transmitter); + + cmd.assr_enable.header.type = DMUB_CMD__PSP; + cmd.assr_enable.header.sub_type = DMUB_CMD__PSP_ASSR_ENABLE; + cmd.assr_enable.assr_data.enable = enable; + cmd.assr_enable.assr_data.phy_port_type = phy_type; + cmd.assr_enable.assr_data.phy_port_id = phy_id; + cmd.assr_enable.assr_data.link_enc_index = link_enc_index; + cmd.assr_enable.assr_data.hpo_mode = use_hpo_dp_link_enc; + + dc_wake_and_execute_dmub_cmd(pDC->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +void edp_set_panel_assr(struct dc_link *link, struct pipe_ctx *pipe_ctx, + enum dp_panel_mode *panel_mode, bool enable) +{ + struct link_resource *link_res = &pipe_ctx->link_res; + struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; + + if (*panel_mode != DP_PANEL_MODE_EDP) + return; + + if (link->dc->config.use_assr_psp_message) { + edp_set_assr_enable(link->dc, link, link_res, enable); + } else if (cp_psp && cp_psp->funcs.enable_assr && enable) { + /* ASSR is bound to fail with unsigned PSP + * verstage used during devlopment phase. + * Report and continue with eDP panel mode to + * perform eDP link training with right settings + */ + bool result; + + result = cp_psp->funcs.enable_assr(cp_psp->handle, link); + + if (!result && link->panel_mode != DP_PANEL_MODE_EDP) + *panel_mode = DP_PANEL_MODE_DEFAULT; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index ebf7deb63d13..cb6d95cc36e4 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -30,7 +30,6 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); bool set_default_brightness_aux(struct dc_link *link); -bool set_cached_brightness_aux(struct dc_link *link); void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd); int edp_get_backlight_level(const struct dc_link *link); bool edp_get_backlight_level_nits(struct dc_link *link, @@ -57,10 +56,15 @@ bool edp_set_replay_allow_active(struct dc_link *dc_link, const bool *enable, bool wait, bool force_static, const unsigned int *power_opts); bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream); -bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal); +bool edp_send_replay_cmd(struct dc_link *link, + enum replay_FW_Message_type msg, + union dmub_replay_cmd_set *cmd_data); +bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal); bool edp_replay_residency(const struct dc_link *link, - unsigned int *residency, const bool is_start, const bool is_alpm); + unsigned int *residency, const bool is_start, const enum pr_residency_mode mode); bool edp_get_replay_state(const struct dc_link *link, uint64_t *state); +bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, + const unsigned int *power_opts, uint32_t coasting_vtotal); bool edp_wait_for_t12(struct dc_link *link); bool edp_is_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing); @@ -72,4 +76,6 @@ bool edp_receiver_ready_T9(struct dc_link *link); bool edp_receiver_ready_T7(struct dc_link *link); bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable); void edp_set_panel_power(struct dc_link *link, bool powerOn); +void edp_set_panel_assr(struct dc_link *link, struct pipe_ctx *pipe_ctx, + enum dp_panel_mode *panel_mode, bool enable); #endif /* __DC_LINK_EDP_POWER_CONTROL_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c index e3d729ab5b9f..caa617883f62 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c @@ -35,7 +35,7 @@ bool link_get_hpd_state(struct dc_link *link) { - uint32_t state; + uint32_t state = 0; dal_gpio_lock_pin(link->hpd_gpio); dal_gpio_get_value(link->hpd_gpio, &state); diff --git a/drivers/gpu/drm/amd/display/dc/optc/Makefile b/drivers/gpu/drm/amd/display/dc/optc/Makefile new file mode 100644 index 000000000000..bb213335fb9f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/Makefile @@ -0,0 +1,108 @@ + +# Copyright 2022 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Makefile for the 'optc' sub-component of DAL. +# + + +ifdef CONFIG_DRM_AMD_DC_FP +############################################################################### +# DCN +############################################################################### + +OPTC_DCN10 = dcn10_optc.o + +AMD_DAL_OPTC_DCN10 = $(addprefix $(AMDDALPATH)/dc/optc/dcn10/,$(OPTC_DCN10)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN10) + +############################################################################### + +OPTC_DCN20 = dcn20_optc.o + +AMD_DAL_OPTC_DCN20 = $(addprefix $(AMDDALPATH)/dc/optc/dcn20/,$(OPTC_DCN20)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN20) + +############################################################################### + +OPTC_DCN201 = dcn201_optc.o + +AMD_DAL_OPTC_DCN201 = $(addprefix $(AMDDALPATH)/dc/optc/dcn201/,$(OPTC_DCN201)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN201) + +############################################################################### + +############################################################################### + +############################################################################### + +OPTC_DCN30 = dcn30_optc.o + +AMD_DAL_OPTC_DCN30 = $(addprefix $(AMDDALPATH)/dc/optc/dcn30/,$(OPTC_DCN30)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN30) + +############################################################################### + +OPTC_DCN301 = dcn301_optc.o + +AMD_DAL_OPTC_DCN301 = $(addprefix $(AMDDALPATH)/dc/optc/dcn301/,$(OPTC_DCN301)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN301) + +############################################################################### + +OPTC_DCN31 = dcn31_optc.o + +AMD_DAL_OPTC_DCN31 = $(addprefix $(AMDDALPATH)/dc/optc/dcn31/,$(OPTC_DCN31)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN31) + +############################################################################### + +OPTC_DCN314 = dcn314_optc.o + +AMD_DAL_OPTC_DCN314 = $(addprefix $(AMDDALPATH)/dc/optc/dcn314/,$(OPTC_DCN314)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN314) + +############################################################################### + +OPTC_DCN32 = dcn32_optc.o + +AMD_DAL_OPTC_DCN32 = $(addprefix $(AMDDALPATH)/dc/optc/dcn32/,$(OPTC_DCN32)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN32) + +############################################################################### + +OPTC_DCN35 = dcn35_optc.o + +AMD_DAL_OPTC_DCN35 = $(addprefix $(AMDDALPATH)/dc/optc/dcn35/,$(OPTC_DCN35)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN35) + +############################################################################### + +############################################################################### +endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c index 0e8f4f36c87c..5574bc628053 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c @@ -945,10 +945,19 @@ void optc1_set_drr( OTG_FORCE_LOCK_ON_EVENT, 0, OTG_SET_V_TOTAL_MIN_MASK_EN, 0, OTG_SET_V_TOTAL_MIN_MASK, 0); - } - // Setup manual flow control for EOF via TRIG_A - optc->funcs->setup_manual_trigger(optc); + // Setup manual flow control for EOF via TRIG_A + optc->funcs->setup_manual_trigger(optc); + + } else { + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_SET_V_TOTAL_MIN_MASK, 0, + OTG_V_TOTAL_MIN_SEL, 0, + OTG_V_TOTAL_MAX_SEL, 0, + OTG_FORCE_LOCK_ON_EVENT, 0); + + optc->funcs->set_vtotal_min_max(optc, 0, 0); + } } void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max) @@ -1383,6 +1392,9 @@ void optc1_read_otg_state(struct optc *optc1, REG_GET(OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, &s->vertical_interrupt2_line); + + s->otg_master_update_lock = REG_READ(OTG_MASTER_UPDATE_LOCK); + s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL); } bool optc1_get_otg_active_size(struct timing_generator *optc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h index aaf6c981fd9e..2f3bd7648ba7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h @@ -26,7 +26,7 @@ #ifndef __DC_TIMING_GENERATOR_DCN10_H__ #define __DC_TIMING_GENERATOR_DCN10_H__ -#include "timing_generator.h" +#include "optc.h" #define DCN10TG_FROM_TG(tg)\ container_of(tg, struct optc, base) @@ -129,6 +129,8 @@ struct dcn_optc_registers { uint32_t OTG_V_TOTAL_MID; uint32_t OTG_V_TOTAL_MIN; uint32_t OTG_V_TOTAL_CONTROL; + uint32_t OTG_V_COUNT_STOP_CONTROL; + uint32_t OTG_V_COUNT_STOP_CONTROL2; uint32_t OTG_TRIGA_CNTL; uint32_t OTG_TRIGA_MANUAL_TRIG; uint32_t OTG_MANUAL_FLOW_CONTROL; @@ -515,12 +517,15 @@ struct dcn_optc_registers { type MANUAL_FLOW_CONTROL;\ type MANUAL_FLOW_CONTROL_SEL; +#define V_TOTAL_REGS(type) + #define TG_REG_FIELD_LIST(type) \ TG_REG_FIELD_LIST_DCN1_0(type)\ type OTG_V_SYNC_MODE;\ type OTG_DRR_TRIGGER_WINDOW_START_X;\ type OTG_DRR_TRIGGER_WINDOW_END_X;\ type OTG_DRR_V_TOTAL_CHANGE_LIMIT;\ + V_TOTAL_REGS(type)\ type OTG_OUT_MUX;\ type OTG_M_CONST_DTO_PHASE;\ type OTG_M_CONST_DTO_MODULO;\ @@ -557,7 +562,8 @@ struct dcn_optc_registers { type OTG_CRC_DATA_STREAM_SPLIT_MODE;\ type OTG_CRC_DATA_FORMAT;\ type OTG_V_TOTAL_LAST_USED_BY_DRR;\ - type OTG_DRR_TIMING_DBUF_UPDATE_PENDING; + type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;\ + type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING; #define TG_REG_FIELD_LIST_DCN3_2(type) \ type OTG_H_TIMING_DIV_MODE_MANUAL; @@ -580,7 +586,9 @@ struct dcn_optc_registers { type OTG_CRC1_WINDOWB_X_END_READBACK;\ type OTG_CRC1_WINDOWB_Y_START_READBACK;\ type OTG_CRC1_WINDOWB_Y_END_READBACK;\ - type OPTC_FGCG_REP_DIS; + type OPTC_FGCG_REP_DIS;\ + type OTG_V_COUNT_STOP;\ + type OTG_V_COUNT_STOP_TIMER; struct dcn_optc_shift { TG_REG_FIELD_LIST(uint8_t) @@ -594,190 +602,6 @@ struct dcn_optc_mask { TG_REG_FIELD_LIST_DCN3_5(uint32_t) }; -struct optc { - struct timing_generator base; - - const struct dcn_optc_registers *tg_regs; - const struct dcn_optc_shift *tg_shift; - const struct dcn_optc_mask *tg_mask; - - int opp_count; - - uint32_t max_h_total; - uint32_t max_v_total; - - uint32_t min_h_blank; - - uint32_t min_h_sync_width; - uint32_t min_v_sync_width; - uint32_t min_v_blank; - uint32_t min_v_blank_interlace; - - int vstartup_start; - int vupdate_offset; - int vupdate_width; - int vready_offset; - struct dc_crtc_timing orginal_patched_timing; - enum signal_type signal; -}; - void dcn10_timing_generator_init(struct optc *optc); -struct dcn_otg_state { - uint32_t v_blank_start; - uint32_t v_blank_end; - uint32_t v_sync_a_pol; - uint32_t v_total; - uint32_t v_total_max; - uint32_t v_total_min; - uint32_t v_total_min_sel; - uint32_t v_total_max_sel; - uint32_t v_sync_a_start; - uint32_t v_sync_a_end; - uint32_t h_blank_start; - uint32_t h_blank_end; - uint32_t h_sync_a_start; - uint32_t h_sync_a_end; - uint32_t h_sync_a_pol; - uint32_t h_total; - uint32_t underflow_occurred_status; - uint32_t otg_enabled; - uint32_t blank_enabled; - uint32_t vertical_interrupt1_en; - uint32_t vertical_interrupt1_line; - uint32_t vertical_interrupt2_en; - uint32_t vertical_interrupt2_line; -}; - -void optc1_read_otg_state(struct optc *optc1, - struct dcn_otg_state *s); - -bool optc1_get_hw_timing(struct timing_generator *tg, - struct dc_crtc_timing *hw_crtc_timing); - -bool optc1_validate_timing( - struct timing_generator *optc, - const struct dc_crtc_timing *timing); - -void optc1_program_timing( - struct timing_generator *optc, - const struct dc_crtc_timing *dc_crtc_timing, - int vready_offset, - int vstartup_start, - int vupdate_offset, - int vupdate_width, - const enum signal_type signal, - bool use_vbios); - -void optc1_setup_vertical_interrupt0( - struct timing_generator *optc, - uint32_t start_line, - uint32_t end_line); -void optc1_setup_vertical_interrupt1( - struct timing_generator *optc, - uint32_t start_line); -void optc1_setup_vertical_interrupt2( - struct timing_generator *optc, - uint32_t start_line); - -void optc1_program_global_sync( - struct timing_generator *optc, - int vready_offset, - int vstartup_start, - int vupdate_offset, - int vupdate_width); - -bool optc1_disable_crtc(struct timing_generator *optc); - -bool optc1_is_counter_moving(struct timing_generator *optc); - -void optc1_get_position(struct timing_generator *optc, - struct crtc_position *position); - -uint32_t optc1_get_vblank_counter(struct timing_generator *optc); - -void optc1_get_crtc_scanoutpos( - struct timing_generator *optc, - uint32_t *v_blank_start, - uint32_t *v_blank_end, - uint32_t *h_position, - uint32_t *v_position); - -void optc1_set_early_control( - struct timing_generator *optc, - uint32_t early_cntl); - -void optc1_wait_for_state(struct timing_generator *optc, - enum crtc_state state); - -void optc1_set_blank(struct timing_generator *optc, - bool enable_blanking); - -bool optc1_is_blanked(struct timing_generator *optc); - -void optc1_program_blank_color( - struct timing_generator *optc, - const struct tg_color *black_color); - -bool optc1_did_triggered_reset_occur( - struct timing_generator *optc); - -void optc1_enable_reset_trigger(struct timing_generator *optc, int source_tg_inst); - -void optc1_disable_reset_trigger(struct timing_generator *optc); - -void optc1_lock(struct timing_generator *optc); - -void optc1_unlock(struct timing_generator *optc); - -void optc1_enable_optc_clock(struct timing_generator *optc, bool enable); - -void optc1_set_drr( - struct timing_generator *optc, - const struct drr_params *params); - -void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max); - -void optc1_set_static_screen_control( - struct timing_generator *optc, - uint32_t event_triggers, - uint32_t num_frames); - -void optc1_program_stereo(struct timing_generator *optc, - const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags); - -bool optc1_is_stereo_left_eye(struct timing_generator *optc); - -void optc1_clear_optc_underflow(struct timing_generator *optc); - -void optc1_tg_init(struct timing_generator *optc); - -bool optc1_is_tg_enabled(struct timing_generator *optc); - -bool optc1_is_optc_underflow_occurred(struct timing_generator *optc); - -void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable); - -void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable); - -bool optc1_get_otg_active_size(struct timing_generator *optc, - uint32_t *otg_active_width, - uint32_t *otg_active_height); - -void optc1_enable_crtc_reset( - struct timing_generator *optc, - int source_tg_inst, - struct crtc_trigger_info *crtc_tp); - -bool optc1_configure_crc(struct timing_generator *optc, - const struct crc_params *params); - -bool optc1_get_crc(struct timing_generator *optc, - uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb); - -bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing); - -void optc1_set_vtg_params(struct timing_generator *optc, - const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2); - #endif /* __DC_TIMING_GENERATOR_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c index 58bdbd859bf9..d6f095b4555d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c @@ -462,16 +462,6 @@ void optc2_setup_manual_trigger(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); - /* Set the min/max selectors unconditionally so that - * DMCUB fw may change OTG timings when necessary - * TODO: Remove the w/a after fixing the issue in DMCUB firmware - */ - REG_UPDATE_4(OTG_V_TOTAL_CONTROL, - OTG_V_TOTAL_MIN_SEL, 1, - OTG_V_TOTAL_MAX_SEL, 1, - OTG_FORCE_LOCK_ON_EVENT, 0, - OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ - REG_SET_8(OTG_TRIGA_CNTL, 0, OTG_TRIGA_SOURCE_SELECT, 21, OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h index f7968b9ca16e..c2e03ced392e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h @@ -26,7 +26,7 @@ #ifndef __DC_OPTC_DCN20_H__ #define __DC_OPTC_DCN20_H__ -#include "../dcn10/dcn10_optc.h" +#include "dcn10/dcn10_optc.h" #define TG_COMMON_REG_LIST_DCN2_0(inst) \ TG_COMMON_REG_LIST_DCN(inst),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c index 70fcbec03fb6..70fcbec03fb6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.h index e9545b73513a..e9545b73513a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c index b97bdb868a0e..b97bdb868a0e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h index d3a056c12b0d..d3a056c12b0d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c index b3cfcb887905..b3cfcb887905 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.h index b49585682a15..b49585682a15 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c index 63a677c8ee27..63a677c8ee27 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h index 30b81a448ce2..30b81a448ce2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c index 0086cafb0f7a..0086cafb0f7a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h index 99c098e76116..99c098e76116 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c index a2c4db2cebdd..52eab8fccb7f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c @@ -122,6 +122,13 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi } } +void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg) +{ + struct optc *optc1 = DCN10TG_FROM_TG(tg); + + REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING, 0, 2, 50000); +} + void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -166,6 +173,16 @@ static bool optc32_disable_crtc(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); + REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT, + OPTC_SEG0_SRC_SEL, 0xf, + OPTC_SEG1_SRC_SEL, 0xf, + OPTC_SEG2_SRC_SEL, 0xf, + OPTC_SEG3_SRC_SEL, 0xf, + OPTC_NUM_OF_INPUT_SEGMENT, 0); + + REG_UPDATE(OPTC_MEMORY_CONFIG, + OPTC_MEM_SEL, 0); + /* disable otg request until end of the first line * in the vertical blank region */ @@ -198,6 +215,13 @@ static void optc32_disable_phantom_otg(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); + REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT, + OPTC_SEG0_SRC_SEL, 0xf, + OPTC_SEG1_SRC_SEL, 0xf, + OPTC_SEG2_SRC_SEL, 0xf, + OPTC_SEG3_SRC_SEL, 0xf, + OPTC_NUM_OF_INPUT_SEGMENT, 0); + REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0); } @@ -243,9 +267,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc) OTG_V_TOTAL_MAX_SEL, 1, OTG_FORCE_LOCK_ON_EVENT, 0, OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ - - // Setup manual flow control for EOF via TRIG_A - optc->funcs->setup_manual_trigger(optc); } } @@ -328,6 +349,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = { .set_odm_bypass = optc32_set_odm_bypass, .set_odm_combine = optc32_set_odm_combine, .get_odm_combine_segments = optc32_get_odm_combine_segments, + .wait_odm_doublebuffer_pending_clear = optc32_wait_odm_doublebuffer_pending_clear, .set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode, .get_optc_source = optc2_get_optc_source, .set_out_mux = optc3_set_out_mux, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h index 8ce3b178cab0..0c2c14695561 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h @@ -183,5 +183,6 @@ void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool man void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments); void optc32_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); +void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg); #endif /* __DC_OPTC_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c index a4a39f1638cf..d393be30dff8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c @@ -32,6 +32,7 @@ #include "reg_helper.h" #include "dc.h" #include "dcn_calc_math.h" +#include "dc_dmub_srv.h" #define REG(reg)\ optc1->tg_regs->reg @@ -138,6 +139,16 @@ static bool optc35_disable_crtc(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); + REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT, + OPTC_SEG0_SRC_SEL, 0xf, + OPTC_SEG1_SRC_SEL, 0xf, + OPTC_SEG2_SRC_SEL, 0xf, + OPTC_SEG3_SRC_SEL, 0xf, + OPTC_NUM_OF_INPUT_SEGMENT, 0); + + REG_UPDATE(OPTC_MEMORY_CONFIG, + OPTC_MEM_SEL, 0); + /* disable otg request until end of the first line * in the vertical blank region */ @@ -203,6 +214,167 @@ static bool optc35_configure_crc(struct timing_generator *optc, return true; } +static void optc35_setup_manual_trigger(struct timing_generator *optc) +{ + if (!optc || !optc->ctx) + return; + + struct optc *optc1 = DCN10TG_FROM_TG(optc); + struct dc *dc = optc->ctx->dc; + + if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams) + dc_dmub_srv_set_drr_manual_trigger_cmd(dc, optc->inst); + else { + /* + * MIN_MASK_EN is gone and MASK is now always enabled. + * + * To get it to it work with manual trigger we need to make sure + * we program the correct bit. + */ + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, 1, + OTG_V_TOTAL_MAX_SEL, 1, + OTG_FORCE_LOCK_ON_EVENT, 0, + OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ + + // Setup manual flow control for EOF via TRIG_A + if (optc->funcs && optc->funcs->setup_manual_trigger) + optc->funcs->setup_manual_trigger(optc); + } +} + +void optc35_set_drr( + struct timing_generator *optc, + const struct drr_params *params) +{ + if (!optc || !params) + return; + + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t max_otg_v_total = optc1->max_v_total - 1; + + if (params != NULL && + params->vertical_total_max > 0 && + params->vertical_total_min > 0) { + + if (params->vertical_total_mid != 0) { + + REG_SET(OTG_V_TOTAL_MID, 0, + OTG_V_TOTAL_MID, params->vertical_total_mid - 1); + + REG_UPDATE_2(OTG_V_TOTAL_CONTROL, + OTG_VTOTAL_MID_REPLACING_MAX_EN, 1, + OTG_VTOTAL_MID_FRAME_NUM, + (uint8_t)params->vertical_total_mid_frame_num); + + } + + if (optc->funcs && optc->funcs->set_vtotal_min_max) + optc->funcs->set_vtotal_min_max(optc, + params->vertical_total_min - 1, params->vertical_total_max - 1); + optc35_setup_manual_trigger(optc); + } else { + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_SET_V_TOTAL_MIN_MASK, 0, + OTG_V_TOTAL_MIN_SEL, 0, + OTG_V_TOTAL_MAX_SEL, 0, + OTG_FORCE_LOCK_ON_EVENT, 0); + + if (optc->funcs && optc->funcs->set_vtotal_min_max) + optc->funcs->set_vtotal_min_max(optc, 0, 0); + } + + REG_WRITE(OTG_V_COUNT_STOP_CONTROL, max_otg_v_total); + REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, 0); +} + +static void optc35_set_long_vtotal( + struct timing_generator *optc, + const struct long_vtotal_params *params) +{ + if (!optc || !params) + return; + + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t vcount_stop_timer = 0, vcount_stop = 0; + uint32_t max_otg_v_total = optc1->max_v_total - 1; + + if (params->vertical_total_min <= max_otg_v_total && params->vertical_total_max <= max_otg_v_total) + return; + + if (params->vertical_total_max == 0 || params->vertical_total_min == 0) { + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_SET_V_TOTAL_MIN_MASK, 0, + OTG_V_TOTAL_MIN_SEL, 0, + OTG_V_TOTAL_MAX_SEL, 0, + OTG_FORCE_LOCK_ON_EVENT, 0); + + if (optc->funcs && optc->funcs->set_vtotal_min_max) + optc->funcs->set_vtotal_min_max(optc, 0, 0); + } else if (params->vertical_total_max == params->vertical_total_min) { + vcount_stop = params->vertical_blank_start; + vcount_stop_timer = params->vertical_total_max - max_otg_v_total; + + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, 1, + OTG_V_TOTAL_MAX_SEL, 1, + OTG_FORCE_LOCK_ON_EVENT, 0, + OTG_SET_V_TOTAL_MIN_MASK, 0); + + if (optc->funcs && optc->funcs->set_vtotal_min_max) + optc->funcs->set_vtotal_min_max(optc, max_otg_v_total, max_otg_v_total); + + REG_WRITE(OTG_V_COUNT_STOP_CONTROL, vcount_stop); + REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, vcount_stop_timer); + } else { + // Variable rate, keep DRR trigger mask + if (params->vertical_total_min > max_otg_v_total) { + // cannot be supported + // If MAX_OTG_V_COUNT < DRR trigger < v_total_min < v_total_max, + // DRR trigger will drop the vtotal counting directly to a new frame. + // But it should trigger between v_total_min and v_total_max. + ASSERT(0); + + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_SET_V_TOTAL_MIN_MASK, 0, + OTG_V_TOTAL_MIN_SEL, 0, + OTG_V_TOTAL_MAX_SEL, 0, + OTG_FORCE_LOCK_ON_EVENT, 0); + + if (optc->funcs && optc->funcs->set_vtotal_min_max) + optc->funcs->set_vtotal_min_max(optc, 0, 0); + + REG_WRITE(OTG_V_COUNT_STOP_CONTROL, max_otg_v_total); + REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, 0); + } else { + // For total_min <= MAX_OTG_V_COUNT and total_max > MAX_OTG_V_COUNT + vcount_stop = params->vertical_total_min; + vcount_stop_timer = params->vertical_total_max - max_otg_v_total; + + // Example: + // params->vertical_total_min 1000 + // params->vertical_total_max 2000 + // MAX_OTG_V_COUNT_STOP = 1500 + // + // If DRR event not happened, + // time 0,1,2,3,4,...1000,1001,........,1500,1501,1502, ...1999 + // vcount 0,1,2,3,4....1000...................,1001,1002,1003,...1399 + // vcount2 0,1,2,3,4,..499, + // else (DRR event happened, ex : at line 1004) + // time 0,1,2,3,4,...1000,1001.....1004, 0 + // vcount 0,1,2,3,4....1000,.............. 0 (new frame) + // vcount2 0,1,2, 3, - + if (optc->funcs && optc->funcs->set_vtotal_min_max) + optc->funcs->set_vtotal_min_max(optc, + params->vertical_total_min - 1, max_otg_v_total); + optc35_setup_manual_trigger(optc); + + REG_WRITE(OTG_V_COUNT_STOP_CONTROL, vcount_stop); + REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, vcount_stop_timer); + } + } +} + static struct timing_generator_funcs dcn35_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, @@ -235,7 +407,7 @@ static struct timing_generator_funcs dcn35_tg_funcs = { .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, .enable_optc_clock = optc1_enable_optc_clock, - .set_drr = optc31_set_drr, + .set_drr = optc35_set_drr, .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, .set_vtotal_min_max = optc1_set_vtotal_min_max, .set_static_screen_control = optc1_set_static_screen_control, @@ -265,6 +437,7 @@ static struct timing_generator_funcs dcn35_tg_funcs = { .setup_manual_trigger = optc2_setup_manual_trigger, .get_hw_timing = optc1_get_hw_timing, .init_odm = optc3_init_odm, + .set_long_vtotal = optc35_set_long_vtotal, }; void dcn35_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h index 1f422e4c468f..d077e2392379 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h @@ -65,10 +65,14 @@ SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK, OTG_CRC1_WINDOWB_X_END_READBACK, mask_sh),\ SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_START_READBACK, mask_sh),\ SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_END_READBACK, mask_sh),\ - SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh) + SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh),\ + SF(OTG0_OTG_V_COUNT_STOP_CONTROL, OTG_V_COUNT_STOP, mask_sh),\ + SF(OTG0_OTG_V_COUNT_STOP_CONTROL2, OTG_V_COUNT_STOP_TIMER, mask_sh) void dcn35_timing_generator_init(struct optc *optc1); void dcn35_timing_generator_set_fgcg(struct optc *optc1, bool enable); +void optc35_set_drr(struct timing_generator *optc, const struct drr_params *params); + #endif /* __DC_OPTC_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile new file mode 100644 index 000000000000..db9048974d74 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile @@ -0,0 +1,201 @@ + +# Copyright 2022 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Makefile for the 'resource' sub-component of DAL. +# + + +############################################################################### +# DCE +############################################################################### + +RESOURCE_DCE100 = dce100_resource.o + +AMD_DAL_RESOURCE_DCE100 = $(addprefix $(AMDDALPATH)/dc/resource/dce100/,$(RESOURCE_DCE100)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE100) + +############################################################################### + +RESOURCE_DCE110 = dce110_resource.o + +AMD_DAL_RESOURCE_DCE110 = $(addprefix $(AMDDALPATH)/dc/resource/dce110/,$(RESOURCE_DCE110)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE110) + +############################################################################### + +RESOURCE_DCE112 = dce112_resource.o + +AMD_DAL_RESOURCE_DCE112 = $(addprefix $(AMDDALPATH)/dc/resource/dce112/,$(RESOURCE_DCE112)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE112) + +############################################################################### + +RESOURCE_DCE120 = dce120_resource.o + +AMD_DAL_RESOURCE_DCE120 = $(addprefix $(AMDDALPATH)/dc/resource/dce120/,$(RESOURCE_DCE120)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE120) + +############################################################################### + +RESOURCE_DCE80 = dce80_resource.o + +AMD_DAL_RESOURCE_DCE80 = $(addprefix $(AMDDALPATH)/dc/resource/dce80/,$(RESOURCE_DCE80)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE80) + +ifdef CONFIG_DRM_AMD_DC_FP +############################################################################### +# DCN +############################################################################### + +RESOURCE_DCN10 = dcn10_resource.o + +AMD_DAL_RESOURCE_DCN10 = $(addprefix $(AMDDALPATH)/dc/resource/dcn10/,$(RESOURCE_DCN10)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN10) + +############################################################################### + +RESOURCE_DCN20 = dcn20_resource.o + +AMD_DAL_RESOURCE_DCN20 = $(addprefix $(AMDDALPATH)/dc/resource/dcn20/,$(RESOURCE_DCN20)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN20) + +############################################################################### + +RESOURCE_DCN201 = dcn201_resource.o + +AMD_DAL_RESOURCE_DCN201 = $(addprefix $(AMDDALPATH)/dc/resource/dcn201/,$(RESOURCE_DCN201)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN201) + +############################################################################### + +RESOURCE_DCN21 = dcn21_resource.o + +AMD_DAL_RESOURCE_DCN21 = $(addprefix $(AMDDALPATH)/dc/resource/dcn21/,$(RESOURCE_DCN21)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN21) + +############################################################################### + +RESOURCE_DCN30 = dcn30_resource.o + +AMD_DAL_RESOURCE_DCN30 = $(addprefix $(AMDDALPATH)/dc/resource/dcn30/,$(RESOURCE_DCN30)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN30) + +############################################################################### + +RESOURCE_DCN301 = dcn301_resource.o + +AMD_DAL_RESOURCE_DCN301 = $(addprefix $(AMDDALPATH)/dc/resource/dcn301/,$(RESOURCE_DCN301)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN301) + +############################################################################### + +RESOURCE_DCN302 = dcn302_resource.o + +AMD_DAL_RESOURCE_DCN302 = $(addprefix $(AMDDALPATH)/dc/resource/dcn302/,$(RESOURCE_DCN302)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN302) + +############################################################################### + +RESOURCE_DCN303 = dcn303_resource.o + +AMD_DAL_RESOURCE_DCN303 = $(addprefix $(AMDDALPATH)/dc/resource/dcn303/,$(RESOURCE_DCN303)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN303) + +############################################################################### + +RESOURCE_DCN31 = dcn31_resource.o + +AMD_DAL_RESOURCE_DCN31 = $(addprefix $(AMDDALPATH)/dc/resource/dcn31/,$(RESOURCE_DCN31)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN31) + +############################################################################### + +RESOURCE_DCN314 = dcn314_resource.o + +AMD_DAL_RESOURCE_DCN314 = $(addprefix $(AMDDALPATH)/dc/resource/dcn314/,$(RESOURCE_DCN314)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN314) + +############################################################################### + +RESOURCE_DCN315 = dcn315_resource.o + +AMD_DAL_RESOURCE_DCN315 = $(addprefix $(AMDDALPATH)/dc/resource/dcn315/,$(RESOURCE_DCN315)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN315) + +############################################################################### + +RESOURCE_DCN316 = dcn316_resource.o + +AMD_DAL_RESOURCE_DCN316 = $(addprefix $(AMDDALPATH)/dc/resource/dcn316/,$(RESOURCE_DCN316)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN316) + +############################################################################### + +RESOURCE_DCN32 = dcn32_resource.o + +AMD_DAL_RESOURCE_DCN32 = $(addprefix $(AMDDALPATH)/dc/resource/dcn32/,$(RESOURCE_DCN32)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN32) + +############################################################################### + +RESOURCE_DCN321 = dcn321_resource.o + +AMD_DAL_RESOURCE_DCN321 = $(addprefix $(AMDDALPATH)/dc/resource/dcn321/,$(RESOURCE_DCN321)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN321) + +############################################################################### + +RESOURCE_DCN35 = dcn35_resource.o + +AMD_DAL_RESOURCE_DCN35 = $(addprefix $(AMDDALPATH)/dc/resource/dcn35/,$(RESOURCE_DCN35)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN35) + +############################################################################### + +RESOURCE_DCN351 = dcn351_resource.o + +AMD_DAL_RESOURCE_DCN351 = $(addprefix $(AMDDALPATH)/dc/resource/dcn351/,$(RESOURCE_DCN351)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN351) + +############################################################################### + +endif diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c index 53a5f4cb648c..53a5f4cb648c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h index fecab7c560f5..fecab7c560f5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c index fe518fd27b08..fe518fd27b08 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.h index aa4531e0800e..aa4531e0800e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c index d1edac46c9a0..88afb2a30eef 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c @@ -864,8 +864,6 @@ static struct clock_source *find_matching_pll( default: return NULL; } - - return NULL; } static enum dc_status build_mapped_resource( diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h index 1f57ebc6f9b4..1f57ebc6f9b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c index 962de79be169..621825a51f46 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c @@ -36,7 +36,7 @@ #include "dce110/dce110_resource.h" #include "virtual/virtual_stream_encoder.h" -#include "dce120_timing_generator.h" +#include "dce120/dce120_timing_generator.h" #include "irq/dce120/irq_service_dce120.h" #include "dce/dce_opp.h" #include "dce/dce_clock_source.h" @@ -1060,7 +1060,7 @@ static bool dce120_resource_construct( struct irq_service_init_data irq_init_data; static const struct resource_create_funcs *res_funcs; bool is_vg20 = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev); - uint32_t pipe_fuses; + uint32_t pipe_fuses = 0; ctx->dc_bios->regs = &bios_regs; diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.h index 3d1f3cf012f4..3d1f3cf012f4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt new file mode 100644 index 000000000000..19dd73bc9ab0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt @@ -0,0 +1,4 @@ +dal3_subdirectory_sources( + dce80_resource.c + dce80_resource.h + )
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c index 35a2cce0c2b8..56ee45e12b46 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c @@ -56,7 +56,6 @@ #include "dce/dce_aux.h" #include "dce/dce_abm.h" #include "dce/dce_i2c.h" -/* TODO remove this include */ #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT #include "gmc/gmc_7_1_d.h" diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.h index eff31ab83a39..eff31ab83a39 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c index b94c5c97eee7..563c5eec83ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c @@ -26,29 +26,32 @@ #include "dm_services.h" #include "dc.h" -#include "dcn10_init.h" +#include "dcn10/dcn10_init.h" #include "resource.h" #include "include/irq_service_interface.h" -#include "dcn10_resource.h" -#include "dcn10_ipp.h" -#include "dcn10_mpc.h" +#include "dcn10/dcn10_resource.h" +#include "dcn10/dcn10_ipp.h" +#include "dcn10/dcn10_mpc.h" + +#include "dcn10/dcn10_dwb.h" + #include "irq/dcn10/irq_service_dcn10.h" -#include "dcn10_dpp.h" -#include "dcn10_optc.h" +#include "dcn10/dcn10_dpp.h" +#include "dcn10/dcn10_optc.h" #include "dcn10/dcn10_hwseq.h" #include "dce110/dce110_hwseq.h" -#include "dcn10_opp.h" -#include "dcn10_link_encoder.h" -#include "dcn10_stream_encoder.h" +#include "dcn10/dcn10_opp.h" +#include "dcn10/dcn10_link_encoder.h" +#include "dcn10/dcn10_stream_encoder.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dce112/dce112_resource.h" -#include "dcn10_hubp.h" -#include "dcn10_hubbub.h" +#include "dcn10/dcn10_hubp.h" +#include "dcn10/dcn10_hubbub.h" #include "dce/dce_panel_cntl.h" #include "soc15_hw_ip.h" @@ -510,7 +513,7 @@ static const struct dc_plane_cap plane_cap = { .argb8888 = true, .nv12 = true, .fp16 = true, - .p010 = true + .p010 = false }, .max_upscale_factor = { @@ -566,6 +569,7 @@ static const struct dc_debug_options debug_defaults_diags = { .disable_pplib_clock_request = true, .disable_pplib_wm_range = true, .underflow_assert_delay_us = 0xFFFFFFFF, + .enable_legacy_fast_update = true, }; static void dcn10_dpp_destroy(struct dpp **dpp) @@ -1247,7 +1251,10 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link( /* Store first available for MST second display * in daisy chain use case */ - j = i; + + if (pool->stream_enc[i]->id != ENGINE_ID_VIRTUAL) + j = i; + if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id == link->link_enc->preferred_engine) return pool->stream_enc[i]; @@ -1625,6 +1632,7 @@ static bool dcn10_resource_construct( /* valid pipe num */ pool->base.pipe_count = j; pool->base.timing_generator_count = j; + pool->base.mpcc_count = j; /* within dml lib, it is hard code to 4. If ASIC pipe is fused, * the value may be changed diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h index bf8e33cd8147..bf8e33cd8147 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index 0a422fbb14bc..0a939437e19f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -24,12 +24,10 @@ * */ -#include <linux/slab.h> - #include "dm_services.h" #include "dc.h" -#include "dcn20_init.h" +#include "dcn20/dcn20_init.h" #include "resource.h" #include "include/irq_service_interface.h" @@ -39,31 +37,34 @@ #include "dcn10/dcn10_hubp.h" #include "dcn10/dcn10_ipp.h" -#include "dcn20_hubbub.h" -#include "dcn20_mpc.h" -#include "dcn20_hubp.h" +#include "dcn20/dcn20_hubbub.h" +#include "dcn20/dcn20_mpc.h" +#include "dcn20/dcn20_hubp.h" #include "irq/dcn20/irq_service_dcn20.h" -#include "dcn20_dpp.h" -#include "dcn20_optc.h" +#include "dcn20/dcn20_dpp.h" +#include "dcn20/dcn20_optc.h" #include "dcn20/dcn20_hwseq.h" #include "dce110/dce110_hwseq.h" #include "dcn10/dcn10_resource.h" -#include "dcn20_opp.h" +#include "dcn20/dcn20_opp.h" -#include "dcn20_dsc.h" +#include "dcn20/dcn20_dsc.h" -#include "dcn20_link_encoder.h" -#include "dcn20_stream_encoder.h" +#include "dcn20/dcn20_link_encoder.h" +#include "dcn20/dcn20_stream_encoder.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" #include "virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dml/display_mode_vba.h" -#include "dcn20_dccg.h" -#include "dcn20_vmid.h" +#include "dcn20/dcn20_dccg.h" +#include "dcn20/dcn20_vmid.h" #include "dce/dce_panel_cntl.h" +#include "dcn20/dcn20_dwb.h" +#include "dcn20/dcn20_mmhubbub.h" + #include "navi10_ip_offset.h" #include "dcn/dcn_2_0_0_offset.h" @@ -73,9 +74,6 @@ #include "nbio/nbio_2_3_offset.h" -#include "dcn20/dcn20_dwb.h" -#include "dcn20/dcn20_mmhubbub.h" - #include "mmhub/mmhub_2_0_0_offset.h" #include "mmhub/mmhub_2_0_0_sh_mask.h" @@ -85,11 +83,10 @@ #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "vm_helper.h" -#include "link_enc_cfg.h" - -#include "amdgpu_socbb.h" +#include "link_enc_cfg.h" #include "link.h" + #define DC_LOGGER_INIT(logger) #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL @@ -1273,15 +1270,24 @@ static void build_clamping_params(struct dc_stream_state *stream) stream->clamping.pixel_encoding = stream->timing.pixel_encoding; } -static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx) +void dcn20_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx) { - get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); - pipe_ctx->clock_source->funcs->get_pix_clk_dividers( - pipe_ctx->clock_source, - &pipe_ctx->stream_res.pix_clk_params, - &pipe_ctx->pll_settings); + pipe_ctx->clock_source, + &pipe_ctx->stream_res.pix_clk_params, + &pipe_ctx->pll_settings); +} + +static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx) +{ + struct resource_pool *pool = pipe_ctx->stream->ctx->dc->res_pool; + + if (pool->funcs->build_pipe_pix_clk_params) { + pool->funcs->build_pipe_pix_clk_params(pipe_ctx); + } else { + dcn20_build_pipe_pix_clk_params(pipe_ctx); + } pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; @@ -2447,6 +2453,7 @@ static bool dcn20_resource_construct( dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h index 37ecaccc5d12..4cee3fa11a7f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h @@ -165,6 +165,7 @@ enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream); enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state); +void dcn20_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx); #endif /* __DC_RESOURCE_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c index bca22d867696..070a4efb308b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -26,7 +26,7 @@ #include "dm_services.h" #include "dc.h" -#include "dcn201_init.h" +#include "dcn201/dcn201_init.h" #include "dml/dcn20/dcn20_fpu.h" #include "resource.h" #include "include/irq_service_interface.h" @@ -36,16 +36,16 @@ #include "dcn10/dcn10_hubp.h" #include "dcn10/dcn10_ipp.h" -#include "dcn201_mpc.h" -#include "dcn201_hubp.h" +#include "dcn201/dcn201_mpc.h" +#include "dcn201/dcn201_hubp.h" #include "irq/dcn201/irq_service_dcn201.h" #include "dcn201/dcn201_dpp.h" #include "dcn201/dcn201_hubbub.h" -#include "dcn201_dccg.h" -#include "dcn201_optc.h" +#include "dcn201/dcn201_dccg.h" +#include "dcn201/dcn201_optc.h" #include "dcn201/dcn201_hwseq.h" #include "dce110/dce110_hwseq.h" -#include "dcn201_opp.h" +#include "dcn201/dcn201_opp.h" #include "dcn201/dcn201_link_encoder.h" #include "dcn20/dcn20_stream_encoder.h" #include "dce/dce_clock_source.h" @@ -55,7 +55,6 @@ #include "dce110/dce110_resource.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" -#include "dcn201_hubbub.h" #include "dcn10/dcn10_resource.h" #include "cyan_skillfish_ip_offset.h" @@ -182,6 +181,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn201_soc = { .socclk_mhz = 1254.0, .dram_speed_mts = 14000.0, }, + /* state4 is not an actual state, just defines unsupported for dml*/ { .state = 4, .dscclk_mhz = 400.0, @@ -566,6 +566,8 @@ static const struct resource_caps res_cap_dnc201 = { .num_audio = 2, .num_stream_encoder = 2, .num_pll = 2, + .num_dwb = 0, + .num_dsc = 0, .num_ddc = 2, }; @@ -612,7 +614,7 @@ static const struct dc_debug_options debug_defaults_drv = { .scl_reset_length10 = true, .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, - .enable_tri_buf = false, + .enable_tri_buf = true, .enable_legacy_fast_update = true, .using_dml2 = false, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.h index e0467d17d4ae..e0467d17d4ae 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c index 42277b280586..8663cbc3d1cf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c @@ -29,7 +29,7 @@ #include "dm_services.h" #include "dc.h" -#include "dcn21_init.h" +#include "dcn21/dcn21_init.h" #include "resource.h" #include "include/irq_service_interface.h" @@ -44,7 +44,7 @@ #include "dcn20/dcn20_hubbub.h" #include "dcn20/dcn20_mpc.h" #include "dcn20/dcn20_hubp.h" -#include "dcn21_hubp.h" +#include "dcn21/dcn21_hubp.h" #include "irq/dcn21/irq_service_dcn21.h" #include "dcn20/dcn20_dpp.h" #include "dcn20/dcn20_optc.h" @@ -61,7 +61,7 @@ #include "dml/display_mode_vba.h" #include "dcn20/dcn20_dccg.h" #include "dcn21/dcn21_dccg.h" -#include "dcn21_hubbub.h" +#include "dcn21/dcn21_hubbub.h" #include "dcn10/dcn10_resource.h" #include "dce/dce_panel_cntl.h" @@ -581,32 +581,6 @@ static const struct resource_caps res_cap_rn = { .num_dsc = 3, }; -#ifdef DIAGS_BUILD -static const struct resource_caps res_cap_rn_FPGA_4pipe = { - .num_timing_generator = 4, - .num_opp = 4, - .num_video_plane = 4, - .num_audio = 7, - .num_stream_encoder = 4, - .num_pll = 4, - .num_dwb = 1, - .num_ddc = 4, - .num_dsc = 0, -}; - -static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = { - .num_timing_generator = 2, - .num_opp = 2, - .num_video_plane = 2, - .num_audio = 7, - .num_stream_encoder = 2, - .num_pll = 4, - .num_dwb = 1, - .num_ddc = 4, - .num_dsc = 2, -}; -#endif - static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCN_UNIVERSAL, .per_pixel_alpha = true, @@ -713,9 +687,8 @@ static void dcn21_resource_destruct(struct dcn21_resource_pool *pool) pool->base.hubps[i] = NULL; } - if (pool->base.irqs != NULL) { + if (pool->base.irqs != NULL) dal_irq_service_destroy(&pool->base.irqs); - } } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { @@ -1416,16 +1389,11 @@ static bool dcn21_resource_construct( struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; uint32_t pipe_fuses = read_pipe_fuses(ctx); - uint32_t num_pipes; + uint32_t num_pipes = 0; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap_rn; -#ifdef DIAGS_BUILD - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - //pool->base.res_cap = &res_cap_nv10_FPGA_2pipe_dsc; - pool->base.res_cap = &res_cap_rn_FPGA_4pipe; -#endif pool->base.funcs = &dcn21_res_pool_funcs; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h index f7ecc002c2f7..f7ecc002c2f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index 7b259cb5f418..f35cc307830b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -27,7 +27,7 @@ #include "dm_services.h" #include "dc.h" -#include "dcn30_init.h" +#include "dcn30/dcn30_init.h" #include "resource.h" #include "include/irq_service_interface.h" @@ -1639,7 +1639,7 @@ noinline bool dcn30_internal_validate_bw( int split[MAX_PIPES] = { 0 }; bool merge[MAX_PIPES] = { false }; bool newly_split[MAX_PIPES] = { false }; - int pipe_cnt, i, pipe_idx, vlevel; + int pipe_cnt, i, pipe_idx, vlevel = 0; struct vba_vars_st *vba = &context->bw_ctx.dml.vba; ASSERT(pipes); @@ -1682,6 +1682,7 @@ noinline bool dcn30_internal_validate_bw( * We don't actually support prefetch mode 2, so require that we * at least support prefetch mode 1. */ + context->bw_ctx.dml.validate_max_state = fast_validate; context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = dm_allow_self_refresh; @@ -1691,6 +1692,7 @@ noinline bool dcn30_internal_validate_bw( memset(merge, 0, sizeof(merge)); vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); } + context->bw_ctx.dml.validate_max_state = false; } dml_log_mode_support_params(&context->bw_ctx.dml); @@ -2048,6 +2050,9 @@ bool dcn30_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_COUNT(); + if (!pipes) + goto validate_fail; + DC_FP_START(); out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); DC_FP_END(); @@ -2167,6 +2172,17 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params optimal_uclk_for_dcfclk_sta_targets[i] = bw_params->clk_table.entries[j].memclk_mhz * 16; break; + } else { + /* condition where (dcfclk_sta_targets[i] >= optimal_dcfclk_for_uclk[j]): + * If it just so happens that the memory bandwidth is low enough such that + * all the optimal DCFCLK for each UCLK is lower than the smallest DCFCLK STA + * target, we need to populate the optimal UCLK for each DCFCLK STA target to + * be the max UCLK. + */ + if (j == num_uclk_states - 1) { + optimal_uclk_for_dcfclk_sta_targets[i] = + bw_params->clk_table.entries[j].memclk_mhz * 16; + } } } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h index 8e6b8b7368fd..8e6b8b7368fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c index f3b75f283aa2..7538b548c572 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c @@ -27,7 +27,7 @@ #include "dm_services.h" #include "dc.h" -#include "dcn301_init.h" +#include "dcn301/dcn301_init.h" #include "resource.h" #include "include/irq_service_interface.h" @@ -61,7 +61,7 @@ #include "dcn10/dcn10_resource.h" #include "dcn30/dcn30_dio_stream_encoder.h" #include "dcn301/dcn301_dio_link_encoder.h" -#include "dcn301_panel_cntl.h" +#include "dcn301/dcn301_panel_cntl.h" #include "vangogh_ip_offset.h" @@ -999,7 +999,7 @@ static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id vpg = dcn301_vpg_create(ctx, vpg_inst); afmt = dcn301_afmt_create(ctx, afmt_inst); - if (!enc1 || !vpg || !afmt) { + if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) { kfree(enc1); kfree(vpg); kfree(afmt); diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.h index ae8672680cdd..ae8672680cdd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c index 63ac984a04f7..5791b5cc2875 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c @@ -23,9 +23,9 @@ * */ -#include "dcn302_init.h" +#include "dcn302/dcn302_init.h" #include "dcn302_resource.h" -#include "dcn302_dccg.h" +#include "dcn302/dcn302_dccg.h" #include "irq/dcn302/irq_service_dcn302.h" #include "dcn30/dcn30_dio_link_encoder.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.h index 9f24e73b92b3..9f24e73b92b3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c index 49cb7fde416a..8bc1bcaeaa47 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c @@ -23,9 +23,9 @@ * Authors: AMD */ -#include "dcn303_init.h" +#include "dcn303/dcn303_init.h" #include "dcn303_resource.h" -#include "dcn303_dccg.h" +#include "dcn303/dcn303_dccg.h" #include "irq/dcn303/irq_service_dcn303.h" #include "dcn30/dcn30_dio_link_encoder.h" @@ -1143,7 +1143,7 @@ static bool dcn303_resource_construct( int i; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; - struct ddc_service_init_data ddc_init_data; + struct ddc_service_init_data ddc_init_data = {0}; ctx->dc_bios->regs = &bios_regs; diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.h index 37cf1525820b..37cf1525820b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index 79416cfb22f0..d4c3e2754f51 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -70,12 +70,11 @@ #include "dml/dcn31/dcn31_fpu.h" #include "dcn31/dcn31_dccg.h" #include "dcn10/dcn10_resource.h" -#include "dcn31_panel_cntl.h" +#include "dcn31/dcn31_panel_cntl.h" #include "dcn30/dcn30_dwb.h" #include "dcn30/dcn30_mmhubbub.h" -// TODO: change include headers /amd/include/asic_reg after upstream #include "yellow_carp_offset.h" #include "dcn/dcn_3_1_2_offset.h" #include "dcn/dcn_3_1_2_sh_mask.h" @@ -892,7 +891,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = true, .enable_legacy_fast_update = true, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ - .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, + .dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE, .using_dml2 = false, }; @@ -1311,6 +1310,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( /* allocate HPO link encoder */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + if (!hpo_dp_enc31) + return NULL; /* out of memory */ hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, &hpo_dp_link_enc_regs[inst], @@ -1645,7 +1646,7 @@ int dcn31_populate_dml_pipes_from_context( { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = 0; bool upscaled = false; DC_FP_START(); @@ -1767,11 +1768,14 @@ bool dcn31_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_COUNT(); + if (!pipes) + goto validate_fail; + DC_FP_START(); out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); DC_FP_END(); - // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg + // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg if (pipe_cnt == 0) fast_validate = false; @@ -1941,8 +1945,6 @@ static bool dcn31_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; - dc->config.use_old_fixed_vs_sequence = true; - /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h index 901436591ed4..901436591ed4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index 677361d74a4e..ff50f43e4c00 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -871,7 +871,7 @@ static const struct dc_plane_cap plane_cap = { static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = false, .enable_z9_disable_interface = true, - .minimum_z8_residency_time = 2000, + .minimum_z8_residency_time = 2100, .psr_skip_crtc_disable = true, .replay_skip_crtc_disabled = true, .disable_dmcu = true, @@ -925,27 +925,10 @@ static const struct dc_debug_options debug_defaults_drv = { }, .seamless_boot_odm_combine = true, + .enable_legacy_fast_update = true, .using_dml2 = false, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true -}; - static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1384,6 +1367,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( /* allocate HPO link encoder */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + if (!hpo_dp_enc31) + return NULL; /* out of memory */ hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, &hpo_dp_link_enc_regs[inst], @@ -1744,6 +1729,9 @@ bool dcn314_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_COUNT(); + if (!pipes) + goto validate_fail; + if (filter_modes_for_single_channel_workaround(dc, context)) goto validate_fail; @@ -1938,8 +1926,6 @@ static bool dcn314_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else - dc->debug = debug_defaults_diags; /* Disable pipe power gating */ dc->debug.disable_dpp_power_gate = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h index 49ffe71018df..49ffe71018df 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c index cb8024eee8e4..4ce0f4bf1d9b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c @@ -1309,6 +1309,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( /* allocate HPO link encoder */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + if (!hpo_dp_enc31) + return NULL; /* out of memory */ hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, &hpo_dp_link_enc_regs[inst], @@ -1631,8 +1633,10 @@ static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context) int i; struct resource_context *res_ctx = &context->res_ctx; - /*Don't apply for single stream*/ - if (context->stream_count < 2) + /* Only apply for dual stream scenarios with edp*/ + if (context->stream_count != 2) + return false; + if (context->streams[0]->signal != SIGNAL_TYPE_EDP && context->streams[1]->signal != SIGNAL_TYPE_EDP) return false; for (i = 0; i < dc->res_pool->pipe_count; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.h index 22849eaa6f24..22849eaa6f24 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c index b9753d4606f8..5fd52c5fcee4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c @@ -125,7 +125,6 @@ #include "link_enc_cfg.h" #define DCN3_16_MAX_DET_SIZE 384 -#define DCN3_16_MIN_COMPBUF_SIZE_KB 128 #define DCN3_16_CRB_SEGMENT_SIZE_KB 64 enum dcn31_clk_src_array_id { @@ -1306,6 +1305,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( /* allocate HPO link encoder */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + if (!hpo_dp_enc31) + return NULL; /* out of memory */ hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, &hpo_dp_link_enc_regs[inst], @@ -1614,7 +1615,7 @@ static int dcn316_populate_dml_pipes_from_context( { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = 0; const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB; DC_FP_START(); diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.h index aba6d634131b..aba6d634131b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 89b072447dba..abd76345d1e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -27,7 +27,7 @@ #include "dm_services.h" #include "dc.h" -#include "dcn32_init.h" +#include "dcn32/dcn32_init.h" #include "resource.h" #include "include/irq_service_interface.h" @@ -41,7 +41,7 @@ #include "dcn31/dcn31_hubbub.h" #include "dcn32/dcn32_hubbub.h" #include "dcn32/dcn32_mpc.h" -#include "dcn32_hubp.h" +#include "dcn32/dcn32_hubp.h" #include "irq/dcn32/irq_service_dcn32.h" #include "dcn32/dcn32_dpp.h" #include "dcn32/dcn32_optc.h" @@ -89,6 +89,8 @@ #include "dcn20/dcn20_vmid.h" #include "dml/dcn32/dcn32_fpu.h" +#include "dc_state_priv.h" + #include "dml2/dml2_wrapper.h" #define DC_LOGGER_INIT(logger) @@ -1302,6 +1304,8 @@ static struct hpo_dp_link_encoder *dcn32_hpo_dp_link_encoder_create( /* allocate HPO link encoder */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + if (!hpo_dp_enc31) + return NULL; /* out of memory */ #undef REG_STRUCT #define REG_STRUCT hpo_dp_link_enc_regs @@ -1644,7 +1648,7 @@ static void dcn32_enable_phantom_plane(struct dc *dc, if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) phantom_plane = prev_phantom_plane; else - phantom_plane = dc_create_plane_state(dc); + phantom_plane = dc_state_create_phantom_plane(dc, context, curr_pipe->plane_state); memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address)); memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality, @@ -1665,9 +1669,7 @@ static void dcn32_enable_phantom_plane(struct dc *dc, phantom_plane->clip_rect.y = 0; phantom_plane->clip_rect.height = phantom_stream->src.height; - phantom_plane->is_phantom = true; - - dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context); + dc_state_add_phantom_plane(dc, phantom_stream, phantom_plane, context); curr_pipe = curr_pipe->bottom_pipe; prev_phantom_plane = phantom_plane; @@ -1683,13 +1685,7 @@ static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc, struct dc_stream_state *phantom_stream = NULL; struct pipe_ctx *ref_pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx]; - phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink); - phantom_stream->signal = SIGNAL_TYPE_VIRTUAL; - phantom_stream->dpms_off = true; - phantom_stream->mall_stream_config.type = SUBVP_PHANTOM; - phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream; - ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN; - ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream; + phantom_stream = dc_state_create_phantom_stream(dc, context, ref_pipe->stream); /* stream has limited viewport and small timing */ memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing)); @@ -1699,81 +1695,10 @@ static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc, dcn32_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream, pipes, pipe_cnt, dc_pipe_idx); DC_FP_END(); - dc_add_stream_to_ctx(dc, context, phantom_stream); + dc_state_add_phantom_stream(dc, context, phantom_stream, ref_pipe->stream); return phantom_stream; } -void dcn32_retain_phantom_pipes(struct dc *dc, struct dc_state *context) -{ - int i; - struct dc_plane_state *phantom_plane = NULL; - struct dc_stream_state *phantom_stream = NULL; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - if (resource_is_pipe_type(pipe, OTG_MASTER) && - resource_is_pipe_type(pipe, DPP_PIPE) && - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - phantom_plane = pipe->plane_state; - phantom_stream = pipe->stream; - - dc_plane_state_retain(phantom_plane); - dc_stream_retain(phantom_stream); - } - } -} - -// return true if removed piped from ctx, false otherwise -bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context, bool fast_update) -{ - int i; - bool removed_pipe = false; - struct dc_plane_state *phantom_plane = NULL; - struct dc_stream_state *phantom_stream = NULL; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - // build scaling params for phantom pipes - if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - phantom_plane = pipe->plane_state; - phantom_stream = pipe->stream; - - dc_rem_all_planes_for_stream(dc, pipe->stream, context); - dc_remove_stream_from_ctx(dc, context, pipe->stream); - - /* Ref count is incremented on allocation and also when added to the context. - * Therefore we must call release for the the phantom plane and stream once - * they are removed from the ctx to finally decrement the refcount to 0 to free. - */ - dc_plane_state_release(phantom_plane); - dc_stream_release(phantom_stream); - - removed_pipe = true; - } - - /* For non-full updates, a shallow copy of the current state - * is created. In this case we don't want to erase the current - * state (there can be 2 HIRQL threads, one in flip, and one in - * checkMPO) that can cause a race condition. - * - * This is just a workaround, needs a proper fix. - */ - if (!fast_update) { - // Clear all phantom stream info - if (pipe->stream) { - pipe->stream->mall_stream_config.type = SUBVP_NONE; - pipe->stream->mall_stream_config.paired_stream = NULL; - } - - if (pipe->plane_state) { - pipe->plane_state->is_phantom = false; - } - } - } - return removed_pipe; -} - /* TODO: Input to this function should indicate which pipe indexes (or streams) * require a phantom pipe / stream */ @@ -1798,7 +1723,7 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context, // We determine which phantom pipes were added by comparing with // the phantom stream. if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream && - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { pipe->stream->use_dynamic_meta = false; pipe->plane_state->flip_immediate = false; if (!resource_build_scaling_params(pipe)) { @@ -1817,7 +1742,6 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val int vlevel = 0; int pipe_cnt = 0; display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); - struct mall_temp_config mall_temp_config; /* To handle Freesync properly, setting FreeSync DML parameters * to its default state for the first stage of validation @@ -1827,29 +1751,15 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val DC_LOGGER_INIT(dc->ctx->logger); - /* For fast validation, there are situations where a shallow copy of - * of the dc->current_state is created for the validation. In this case - * we want to save and restore the mall config because we always - * teardown subvp at the beginning of validation (and don't attempt - * to add it back if it's fast validation). If we don't restore the - * subvp config in cases of fast validation + shallow copy of the - * dc->current_state, the dc->current_state will have a partially - * removed subvp state when we did not intend to remove it. - */ - if (fast_validate) { - memset(&mall_temp_config, 0, sizeof(mall_temp_config)); - dcn32_save_mall_state(dc, context, &mall_temp_config); - } - BW_VAL_TRACE_COUNT(); + if (!pipes) + goto validate_fail; + DC_FP_START(); out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); DC_FP_END(); - if (fast_validate) - dcn32_restore_mall_state(dc, context, &mall_temp_config); - if (pipe_cnt == 0) goto validate_out; @@ -1866,6 +1776,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); dcn32_override_min_req_memclk(dc, context); + dcn32_override_min_req_dcfclk(dc, context); BW_VAL_TRACE_END_WATERMARKS(); @@ -1893,7 +1804,9 @@ bool dcn32_validate_bandwidth(struct dc *dc, bool out = false; if (dc->debug.using_dml2) - out = dml2_validate(dc, context, fast_validate); + out = dml2_validate(dc, context, + context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, + fast_validate); else out = dml1_validate(dc, context, fast_validate); return out; @@ -1909,9 +1822,48 @@ int dcn32_populate_dml_pipes_from_context( struct pipe_ctx *pipe = NULL; bool subvp_in_use = false; struct dc_crtc_timing *timing; + int subvp_main_pipe_index = -1; + enum mall_stream_type mall_type; + bool single_display_subvp = false; + struct dc_stream_state *stream = NULL; + int num_subvp_main = 0; + int num_subvp_phantom = 0; + int num_subvp_none = 0; + int odm_slice_count; dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + /* For single display subvp, look for subvp main so if we have phantom + * pipe, we can set odm policy to match main pipe + */ + for (i = 0; i < context->stream_count; i++) { + stream = context->streams[i]; + mall_type = dc_state_get_stream_subvp_type(context, stream); + if (mall_type == SUBVP_MAIN) + num_subvp_main++; + else if (mall_type == SUBVP_PHANTOM) + num_subvp_phantom++; + else + num_subvp_none++; + } + if (num_subvp_main == 1 && num_subvp_phantom == 1 && num_subvp_none == 0) + single_display_subvp = true; + + if (single_display_subvp) { + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &res_ctx->pipe_ctx[i]; + if (!res_ctx->pipe_ctx[i].stream) + continue; + + mall_type = dc_state_get_pipe_subvp_type(context, pipe); + if (mall_type == SUBVP_MAIN) { + if (resource_is_pipe_type(pipe, OTG_MASTER)) + subvp_main_pipe_index = i; + } + pipe_cnt++; + } + } + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { if (!res_ctx->pipe_ctx[i].stream) @@ -1924,7 +1876,36 @@ int dcn32_populate_dml_pipes_from_context( dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt); DC_FP_END(); pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; - pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; + if (dc->config.enable_windowed_mpo_odm && + dc->debug.enable_single_display_2to1_odm_policy) { + /* For single display subvp, if pipe is phantom pipe, + * then copy odm policy from subvp main pipe + */ + mall_type = dc_state_get_pipe_subvp_type(context, pipe); + if (single_display_subvp && (mall_type == SUBVP_PHANTOM)) { + if (subvp_main_pipe_index < 0) { + odm_slice_count = -1; + ASSERT(0); + } else { + odm_slice_count = resource_get_odm_slice_count(&res_ctx->pipe_ctx[subvp_main_pipe_index]); + } + } else { + odm_slice_count = resource_get_odm_slice_count(pipe); + } + switch (odm_slice_count) { + case 2: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; + break; + case 4: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1; + break; + default: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; + } + } else { + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; + } + pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19; @@ -1933,7 +1914,7 @@ int dcn32_populate_dml_pipes_from_context( * This is just a workaround -- needs a proper fix. */ if (!fast_validate) { - switch (pipe->stream->mall_stream_config.type) { + switch (dc_state_get_pipe_subvp_type(context, pipe)) { case SUBVP_MAIN: pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport; subvp_in_use = true; @@ -1992,6 +1973,22 @@ int dcn32_populate_dml_pipes_from_context( return pipe_cnt; } +unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes) +{ + uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways; + + /* add 2 lines for worst case alignment */ + cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2; + + total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size; + lines_per_way = total_cache_lines / dc->caps.cache_num_ways; + num_ways = cache_lines_used / lines_per_way; + if (cache_lines_used % lines_per_way > 0) + num_ways++; + + return num_ways; +} + static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap, .get_subvp_en = dcn32_subvp_in_use, @@ -2009,8 +2006,20 @@ void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context, static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { + struct dml2_configuration_options dml2_opt = dc->dml2_options; + DC_FP_START(); + dcn32_update_bw_bounding_box_fpu(dc, bw_params); + + dml2_opt.use_clock_dc_limits = false; + if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) + dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2); + + dml2_opt.use_clock_dc_limits = true; + if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) + dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + DC_FP_END(); } @@ -2037,10 +2046,8 @@ static struct resource_funcs dcn32_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .add_phantom_pipes = dcn32_add_phantom_pipes, - .remove_phantom_pipes = dcn32_remove_phantom_pipes, - .retain_phantom_pipes = dcn32_retain_phantom_pipes, - .save_mall_state = dcn32_save_mall_state, - .restore_mall_state = dcn32_restore_mall_state, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params, + .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes, }; static uint32_t read_pipe_fuses(struct dc_context *ctx) @@ -2129,7 +2136,8 @@ static bool dcn32_resource_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 4; - dc->caps.mall_size_total = 0; + /* total size = mall per channel * num channels * 1024 * 1024 */ + dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576; dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.cache_line_size = 64; @@ -2202,6 +2210,7 @@ static bool dcn32_resource_construct( dc->config.use_pipe_ctx_sync_logic = true; dc->config.dc_mode_clk_limit_support = true; + dc->config.enable_windowed_mpo_odm = true; /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { @@ -2442,27 +2451,10 @@ static bool dcn32_resource_construct( dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; - dc->dml2_options.callbacks.dc = dc; - dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; + resource_init_common_dml2_callbacks(dc, &dc->dml2_options); dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; - dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; - dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; - dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; - dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; - dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; - dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; - - dc->dml2_options.svp_pstate.callbacks.dc = dc; - dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context; - dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx; - dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params; - dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state; - dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context; - dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx; - dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink; - dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release; - dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release; dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc; + dc->dml2_options.svp_pstate.callbacks.calculate_mall_ways_from_bytes = pool->base.funcs->calculate_mall_ways_from_bytes; dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us; dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us; @@ -2560,7 +2552,7 @@ struct resource_pool *dcn32_create_resource_pool( * full update which delays the flip for 1 frame. If we use the original pipe * we don't have to toggle its power. So we can flip faster. */ -static int find_optimal_free_pipe_as_secondary_dpp_pipe( +int dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe( const struct resource_context *cur_res_ctx, struct resource_context *new_res_ctx, const struct resource_pool *pool, @@ -2743,7 +2735,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( return dcn32_acquire_idle_pipe_for_head_pipe_in_layer( new_ctx, pool, opp_head_pipe->stream, opp_head_pipe); - free_pipe_idx = find_optimal_free_pipe_as_secondary_dpp_pipe( + free_pipe_idx = dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe( &cur_ctx->res_ctx, &new_ctx->res_ctx, pool, opp_head_pipe); if (free_pipe_idx >= 0) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h index b931008114c9..fee67fbab8e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -39,8 +39,10 @@ #define DCN3_2_MBLK_HEIGHT_8BPE 64 #define DCN3_2_DCFCLK_DS_INIT_KHZ 10000 // Choose 10Mhz for init DCFCLK DS freq #define SUBVP_HIGH_REFRESH_LIST_LEN 4 +#define SUBVP_ACTIVE_MARGIN_LIST_LEN 2 #define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800 #define DCN3_2_VMIN_DISPCLK_HZ 717000000 +#define MIN_SUBVP_DCFCLK_KHZ 400000 #define TO_DCN32_RES_POOL(pool)\ container_of(pool, struct dcn32_resource_pool, base) @@ -57,6 +59,15 @@ struct subvp_high_refresh_list { } res[SUBVP_HIGH_REFRESH_LIST_LEN]; }; +struct subvp_active_margin_list { + int min_refresh; + int max_refresh; + struct { + int width; + int height; + } res[SUBVP_ACTIVE_MARGIN_LIST_LEN]; +}; + struct dcn32_resource_pool { struct resource_pool base; }; @@ -81,12 +92,6 @@ bool dcn32_release_post_bldn_3dlut( struct dc_3dlut **lut, struct dc_transfer_func **shaper); -bool dcn32_remove_phantom_pipes(struct dc *dc, - struct dc_state *context, bool fast_update); - -void dcn32_retain_phantom_pipes(struct dc *dc, - struct dc_state *context); - void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -108,10 +113,6 @@ void dcn32_calculate_wm_and_dlg( int pipe_cnt, int vlevel); -uint32_t dcn32_helper_mall_bytes_to_ways( - struct dc *dc, - uint32_t total_size_in_mall_bytes); - uint32_t dcn32_helper_calculate_mall_bytes_for_cursor( struct dc *dc, struct pipe_ctx *pipe_ctx, @@ -136,6 +137,12 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context); bool dcn32_is_center_timing(struct pipe_ctx *pipe); bool dcn32_is_psr_capable(struct pipe_ctx *pipe); +int dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *new_opp_head); + struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( const struct dc_state *cur_ctx, struct dc_state *new_ctx, @@ -159,15 +166,7 @@ void dcn32_determine_det_override(struct dc *dc, void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes); -void dcn32_save_mall_state(struct dc *dc, - struct dc_state *context, - struct mall_temp_config *temp_config); - -void dcn32_restore_mall_state(struct dc *dc, - struct dc_state *context, - struct mall_temp_config *temp_config); - -struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context); +struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context); bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe); @@ -183,6 +182,12 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context); bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int vlevel); +void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes); + +void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context); + +unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes); + /* definitions for run time init of reg offsets */ /* CLK SRC */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index f7de3eca1225..e4b360d89b3b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -63,7 +63,7 @@ #include "dcn31/dcn31_apg.h" #include "dcn31/dcn31_dio_link_encoder.h" #include "dcn32/dcn32_dio_link_encoder.h" -#include "dcn321_dio_link_encoder.h" +#include "dcn321/dcn321_dio_link_encoder.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" @@ -92,6 +92,8 @@ #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" +#include "dc_state_priv.h" + #define DC_LOGGER_INIT(logger) enum dcn321_clk_src_array_id { @@ -1286,6 +1288,8 @@ static struct hpo_dp_link_encoder *dcn321_hpo_dp_link_encoder_create( /* allocate HPO link encoder */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + if (!hpo_dp_enc31) + return NULL; /* out of memory */ #undef REG_STRUCT #define REG_STRUCT hpo_dp_link_enc_regs @@ -1577,8 +1581,20 @@ static struct dc_cap_funcs cap_funcs = { static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { + struct dml2_configuration_options dml2_opt = dc->dml2_options; + DC_FP_START(); + dcn321_update_bw_bounding_box_fpu(dc, bw_params); + + dml2_opt.use_clock_dc_limits = false; + if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) + dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2); + + dml2_opt.use_clock_dc_limits = true; + if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) + dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + DC_FP_END(); } @@ -1605,10 +1621,8 @@ static struct resource_funcs dcn321_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .add_phantom_pipes = dcn32_add_phantom_pipes, - .remove_phantom_pipes = dcn32_remove_phantom_pipes, - .retain_phantom_pipes = dcn32_retain_phantom_pipes, - .save_mall_state = dcn32_save_mall_state, - .restore_mall_state = dcn32_restore_mall_state, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params, + .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes, }; static uint32_t read_pipe_fuses(struct dc_context *ctx) @@ -1696,7 +1710,9 @@ static bool dcn321_resource_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 4; - dc->caps.mall_size_total = 0; + /* total size = mall per channel * num channels * 1024 * 1024 */ + dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576; + dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.cache_line_size = 64; dc->caps.cache_num_ways = 16; @@ -1761,6 +1777,7 @@ static bool dcn321_resource_construct( dc->caps.color.mpc.ocsc = 1; dc->config.dc_mode_clk_limit_support = true; + dc->config.enable_windowed_mpo_odm = true; /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { @@ -1996,27 +2013,10 @@ static bool dcn321_resource_construct( dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; - dc->dml2_options.callbacks.dc = dc; - dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; + resource_init_common_dml2_callbacks(dc, &dc->dml2_options); dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; - dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; - dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; - dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; - dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; - dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; - dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; - - dc->dml2_options.svp_pstate.callbacks.dc = dc; - dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context; - dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx; - dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params; - dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state; - dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context; - dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx; - dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink; - dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release; - dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release; dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc; + dc->dml2_options.svp_pstate.callbacks.calculate_mall_ways_from_bytes = pool->base.funcs->calculate_mall_ways_from_bytes; dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us; dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us; diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.h index 82cbf009f2d3..82cbf009f2d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 3c7c810bab1f..2df8a742516c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -78,7 +78,7 @@ #include "dcn10/dcn10_resource.h" #include "dcn31/dcn31_panel_cntl.h" #include "dcn35/dcn35_hwseq.h" -#include "dcn35_dio_link_encoder.h" +#include "dcn35/dcn35_dio_link_encoder.h" #include "dml/dcn31/dcn31_fpu.h" /*todo*/ #include "dml/dcn35/dcn35_fpu.h" #include "dcn35/dcn35_dwb.h" @@ -96,12 +96,15 @@ #include "reg_helper.h" #include "dce/dmub_abm.h" #include "dce/dmub_psr.h" +#include "dce/dmub_replay.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "dml/dcn31/display_mode_vba_31.h" /*temp*/ #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" +#include "dc_state_priv.h" + #include "link_enc_cfg.h" #define DC_LOGGER_INIT(logger) @@ -610,7 +613,35 @@ static struct dce_hwseq_registers hwseq_reg; HWS_SF(, DMU_CLK_CNTL, LONO_FGCG_REP_DIS, mask_sh),\ HWS_SF(, DMU_CLK_CNTL, LONO_DISPCLK_GATE_DISABLE, mask_sh),\ HWS_SF(, DMU_CLK_CNTL, LONO_SOCCLK_GATE_DISABLE, mask_sh),\ - HWS_SF(, DMU_CLK_CNTL, LONO_DMCUBCLK_GATE_DISABLE, mask_sh) + HWS_SF(, DMU_CLK_CNTL, LONO_DMCUBCLK_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK0_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK1_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK2_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK3_GATE_DISABLE, mask_sh) static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCN35_MASK_SH_LIST(__SHIFT) @@ -670,7 +701,7 @@ static const struct dc_plane_cap plane_cap = { // 6:1 downscaling ratio: 1000/6 = 166.666 .max_downscale_factor = { - .argb8888 = 167, + .argb8888 = 250, .nv12 = 167, .fp16 = 167 }, @@ -689,7 +720,9 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dcc = DCC_ENABLE, .disable_dpp_power_gate = true, .disable_hubp_power_gate = true, - .disable_clock_gate = true, + .disable_optc_power_gate = true, /*should the same as above two*/ + .disable_hpo_power_gate = false, /*dmubfw force domain25 on*/ + .disable_clock_gate = false, .disable_dsc_power_gate = true, .vsr_support = true, .performance_trace = false, @@ -719,43 +752,46 @@ static const struct dc_debug_options debug_defaults_drv = { .bits = { .dpp = true, .dsc = true,/*dscclk and dsc pg*/ - .hdmistream = false, - .hdmichar = false, - .dpstream = false, - .symclk32_se = false, - .symclk32_le = false, - .symclk_fe = false, - .physymclk = false, - .dpiasymclk = false, + .hdmistream = true, + .hdmichar = true, + .dpstream = true, + .symclk32_se = true, + .symclk32_le = true, + .symclk_fe = true, + .physymclk = true, + .dpiasymclk = true, } }, .seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ + .minimum_z8_residency_time = 1, /* Always allow when other conditions are met */ .using_dml2 = true, .support_eDP1_5 = true, .enable_hpo_pg_support = false, .enable_legacy_fast_update = true, - .enable_single_display_2to1_odm_policy = false, - .disable_idle_power_optimizations = true, + .enable_single_display_2to1_odm_policy = true, + .disable_idle_power_optimizations = false, .dmcub_emulation = false, .disable_boot_optimizations = false, .disable_unbounded_requesting = false, .disable_mem_low_power = false, - .enable_hpo_pg_support = false, //must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions .enable_double_buffered_dsc_pg_support = true, .enable_dp_dig_pixel_rate_div_policy = 1, .disable_z10 = false, .ignore_pg = true, .psp_disabled_wa = true, - .ips2_eval_delay_us = 200, - .ips2_entry_delay_us = 400 + .ips2_eval_delay_us = 2000, + .ips2_entry_delay_us = 800, + .disable_dmub_reallow_idle = false, + .static_screen_wait_frames = 2, }; static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, .disallow_psrsu = false, + .disallow_replay = false, }, .ilr = { .optimize_edp_link_rate = true, @@ -1332,6 +1368,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( /* allocate HPO link encoder */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + if (!hpo_dp_enc31) + return NULL; /* out of memory */ #undef REG_STRUCT #define REG_STRUCT hpo_dp_link_enc_regs @@ -1514,6 +1552,9 @@ static void dcn35_resource_destruct(struct dcn35_resource_pool *pool) if (pool->base.psr != NULL) dmub_psr_destroy(&pool->base.psr); + if (pool->base.replay != NULL) + dmub_replay_destroy(&pool->base.replay); + if (pool->base.pg_cntl != NULL) dcn_pg_cntl_destroy(&pool->base.pg_cntl); @@ -1695,7 +1736,16 @@ static bool dcn35_validate_bandwidth(struct dc *dc, { bool out = false; - out = dml2_validate(dc, context, fast_validate); + out = dml2_validate(dc, context, + context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, + fast_validate); + + if (fast_validate) + return out; + + DC_FP_START(); + dcn35_decide_zstate_support(dc, context); + DC_FP_END(); return out; } @@ -1842,7 +1892,7 @@ static bool dcn35_resource_construct( /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; - dc->config.use_default_clock_table = false; + /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { @@ -1861,7 +1911,8 @@ static bool dcn35_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - + /*HW default is to have all the FGCG enabled, SW no need to program them*/ + dc->debug.enable_fine_grain_clock_gating.u32All = 0xFFFF; // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); @@ -1991,6 +2042,14 @@ static bool dcn35_resource_construct( goto create_fail; } + /* Replay */ + pool->base.replay = dmub_replay_create(ctx); + if (pool->base.replay == NULL) { + dm_error("DC: failed to create replay obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + /* ABM */ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { pool->base.multiple_abms[i] = dmub_abm_create(ctx, @@ -2078,19 +2137,14 @@ static bool dcn35_resource_construct( dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; dc->dml2_options.use_native_pstate_optimization = true; dc->dml2_options.use_native_soc_bb_construction = true; + dc->dml2_options.minimize_dispclk_using_odm = false; if (dc->config.EnableMinDispClkODM) dc->dml2_options.minimize_dispclk_using_odm = true; dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm; - dc->dml2_options.callbacks.dc = dc; - dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; + resource_init_common_dml2_callbacks(dc, &dc->dml2_options); dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; - dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; - dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; - dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; - dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; - dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; - dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; + dc->dml2_options.max_segments_per_hubp = 24; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h index 99aea102e3f7..f97bb4cb3761 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h @@ -166,6 +166,7 @@ struct resource_pool *dcn35_create_resource_pool( SR(MMHUBBUB_MEM_PWR_CNTL), \ SR(DCCG_GATE_DISABLE_CNTL), \ SR(DCCG_GATE_DISABLE_CNTL2), \ + SR(DCCG_GATE_DISABLE_CNTL4), \ SR(DCCG_GATE_DISABLE_CNTL5), \ SR(DCFCLK_CNTL),\ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ @@ -239,6 +240,8 @@ struct resource_pool *dcn35_create_resource_pool( SRI_ARR(OTG_V_TOTAL_MAX, OTG, inst),\ SRI_ARR(OTG_V_TOTAL_MIN, OTG, inst),\ SRI_ARR(OTG_V_TOTAL_CONTROL, OTG, inst),\ + SRI_ARR(OTG_V_COUNT_STOP_CONTROL, OTG, inst),\ + SRI_ARR(OTG_V_COUNT_STOP_CONTROL2, OTG, inst),\ SRI_ARR(OTG_TRIGA_CNTL, OTG, inst),\ SRI_ARR(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\ SRI_ARR(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c new file mode 100644 index 000000000000..ddf9560ab772 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -0,0 +1,2163 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright 2024 Advanced Micro Devices, Inc. */ + + +#include "dm_services.h" +#include "dc.h" + +#include "dcn31/dcn31_init.h" +#include "dcn351/dcn351_init.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dcn351_resource.h" + +#include "dcn20/dcn20_resource.h" +#include "dcn30/dcn30_resource.h" +#include "dcn31/dcn31_resource.h" +#include "dcn32/dcn32_resource.h" +#include "dcn35/dcn35_resource.h" + +#include "dcn10/dcn10_ipp.h" +#include "dcn30/dcn30_hubbub.h" +#include "dcn31/dcn31_hubbub.h" +#include "dcn35/dcn35_hubbub.h" +#include "dcn32/dcn32_mpc.h" +#include "dcn35/dcn35_hubp.h" +#include "irq/dcn351/irq_service_dcn351.h" +#include "dcn35/dcn35_dpp.h" +#include "dcn35/dcn35_optc.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dce110/dce110_hwseq.h" +#include "dcn35/dcn35_opp.h" +#include "dcn35/dcn35_dsc.h" +#include "dcn30/dcn30_vpg.h" +#include "dcn30/dcn30_afmt.h" + +#include "dcn31/dcn31_dio_link_encoder.h" +#include "dcn35/dcn35_dio_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_link_encoder.h" +#include "dcn32/dcn32_hpo_dp_link_encoder.h" +#include "link.h" +#include "dcn31/dcn31_apg.h" +#include "dcn32/dcn32_dio_link_encoder.h" +#include "dcn31/dcn31_vpg.h" +#include "dcn31/dcn31_afmt.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "clk_mgr.h" +#include "virtual/virtual_stream_encoder.h" +#include "dce110/dce110_resource.h" +#include "dml/display_mode_vba.h" +#include "dcn35/dcn35_dccg.h" +#include "dcn35/dcn35_pg_cntl.h" +#include "dcn10/dcn10_resource.h" +#include "dcn31/dcn31_panel_cntl.h" +#include "dcn35/dcn35_hwseq.h" +#include "dcn35/dcn35_dio_link_encoder.h" +#include "dml/dcn31/dcn31_fpu.h" /*todo*/ +#include "dml/dcn35/dcn35_fpu.h" +#include "dml/dcn351/dcn351_fpu.h" +#include "dcn35/dcn35_dwb.h" +#include "dcn35/dcn35_mmhubbub.h" + +#include "dcn/dcn_3_5_1_offset.h" +#include "dcn/dcn_3_5_1_sh_mask.h" +#include "nbio/nbio_7_11_0_offset.h" +#include "mmhub/mmhub_3_3_0_offset.h" +#include "mmhub/mmhub_3_3_0_sh_mask.h" + +#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0 +#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL + +#include "reg_helper.h" +#include "dce/dmub_abm.h" +#include "dce/dmub_psr.h" +#include "dce/dmub_replay.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" +#include "dml/dcn31/display_mode_vba_31.h" /*temp*/ +#include "vm_helper.h" +#include "dcn20/dcn20_vmid.h" + +#include "dml2/dml2_wrapper.h" + +#include "link_enc_cfg.h" +#define DC_LOGGER_INIT(logger) + +enum dcn351_clk_src_array_id { + DCN351_CLK_SRC_PLL0, + DCN351_CLK_SRC_PLL1, + DCN351_CLK_SRC_PLL2, + DCN351_CLK_SRC_PLL3, + DCN351_CLK_SRC_PLL4, + DCN351_CLK_SRC_TOTAL +}; + +/* begin ********************* + * macros to expend register list macro defined in HW object header file + */ + +/* DCN */ +/* TODO awful hack. fixup dcn20_dwb.h */ +#undef BASE_INNER +#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg] + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SR_ARR(reg_name, id) \ + REG_STRUCT[id].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name + +#define SR_ARR_INIT(reg_name, id, value) \ + REG_STRUCT[id].reg_name = value + +#define SRI(reg_name, block, id)\ + REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI_ARR(reg_name, block, id)\ + REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SR_ARR_I2C(reg_name, id) \ + REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name + +#define SRI_ARR_I2C(reg_name, block, id)\ + REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI_ARR_ALPHABET(reg_name, block, index, id)\ + REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI2(reg_name, block, id)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SRI2_ARR(reg_name, block, id)\ + REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SRIR(var_name, reg_name, block, id)\ + .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII(reg_name, block, id)\ + REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_ARR_2(reg_name, block, id, inst)\ + REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_MPC_RMU(reg_name, block, id)\ + .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_DWB(reg_name, temp_name, block, id)\ + REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## temp_name + +#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define DCCG_SRII(reg_name, block, id)\ + REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + reg ## reg_name ## _ ## block ## id + +/* NBIO */ +#define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg] + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX2_ ## reg_name + +#define NBIO_SR_ARR(reg_name, id)\ + REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX2_ ## reg_name + +#define bios_regs_init() \ + ( \ + NBIO_SR(BIOS_SCRATCH_3),\ + NBIO_SR(BIOS_SCRATCH_6)\ + ) + +static struct bios_registers bios_regs; + +#define clk_src_regs_init(index, pllid)\ + CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid) + +static struct dce110_clk_src_regs clk_src_regs[5]; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN3_1_4(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN3_1_4(_MASK) +}; + +#define abm_regs_init(id)\ + ABM_DCN32_REG_LIST_RI(id) + +static struct dce_abm_registers abm_regs[4]; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN35(_MASK) +}; + +#define audio_regs_init(id)\ + AUD_COMMON_REG_LIST_RI(id) + +static struct dce_audio_registers audio_regs[7]; + + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define vpg_regs_init(id)\ + VPG_DCN31_REG_LIST_RI(id) + +static struct dcn31_vpg_registers vpg_regs[10]; + +static const struct dcn31_vpg_shift vpg_shift = { + DCN31_VPG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_vpg_mask vpg_mask = { + DCN31_VPG_MASK_SH_LIST(_MASK) +}; + +#define afmt_regs_init(id)\ + AFMT_DCN31_REG_LIST_RI(id) + +static struct dcn31_afmt_registers afmt_regs[6]; + +static const struct dcn31_afmt_shift afmt_shift = { + DCN31_AFMT_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_afmt_mask afmt_mask = { + DCN31_AFMT_MASK_SH_LIST(_MASK) +}; + +#define apg_regs_init(id)\ + APG_DCN31_REG_LIST_RI(id) + +static struct dcn31_apg_registers apg_regs[4]; + +static const struct dcn31_apg_shift apg_shift = { + DCN31_APG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_apg_mask apg_mask = { + DCN31_APG_MASK_SH_LIST(_MASK) +}; + +#define stream_enc_regs_init(id)\ + SE_DCN35_REG_LIST_RI(id) + +static struct dcn10_stream_enc_registers stream_enc_regs[5]; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN35(_MASK) +}; + +#define aux_regs_init(id)\ + DCN2_AUX_REG_LIST_RI(id) + +static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5]; + +#define hpd_regs_init(id)\ + HPD_REG_LIST_RI(id) + +static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5]; + + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +#define link_regs_init(id, phyid)\ + ( \ + LE_DCN35_REG_LIST_RI(id), \ + UNIPHY_DCN2_REG_LIST_RI(id, phyid)\ + ) + +static struct dcn10_link_enc_registers link_enc_regs[5]; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN35(__SHIFT), \ + //DPCS_DCN31_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN35(_MASK), \ + //DPCS_DCN31_MASK_SH_LIST(_MASK) +}; + +#define hpo_dp_stream_encoder_reg_init(id)\ + DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id) + +static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4]; + +static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) +}; + +#define hpo_dp_link_encoder_reg_init(id)\ + DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id) + /*DCN3_1_RDPCSTX_REG_LIST(0),*/ + /*DCN3_1_RDPCSTX_REG_LIST(1),*/ + /*DCN3_1_RDPCSTX_REG_LIST(2),*/ + /*DCN3_1_RDPCSTX_REG_LIST(3),*/ + +static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2]; + +static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { + DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { + DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(_MASK) +}; + +#define dpp_regs_init(id)\ + DPP_REG_LIST_DCN35_RI(id) + +static struct dcn3_dpp_registers dpp_regs[4]; + +static const struct dcn35_dpp_shift tf_shift = { + DPP_REG_LIST_SH_MASK_DCN35(__SHIFT) +}; + +static const struct dcn35_dpp_mask tf_mask = { + DPP_REG_LIST_SH_MASK_DCN35(_MASK) +}; + +#define opp_regs_init(id)\ + OPP_REG_LIST_DCN35_RI(id) + +static struct dcn35_opp_registers opp_regs[4]; + +static const struct dcn35_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dcn35_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN35(_MASK) +}; + +#define aux_engine_regs_init(id)\ + ( \ + AUX_COMMON_REG_LIST0_RI(id), \ + SR_ARR_INIT(AUXN_IMPCAL, id, 0), \ + SR_ARR_INIT(AUXP_IMPCAL, id, 0), \ + SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK) \ + ) + +static struct dce110_aux_registers aux_engine_regs[5]; + +#define dwbc_regs_dcn3_init(id)\ + DWBC_COMMON_REG_LIST_DCN30_RI(id) + +static struct dcn30_dwbc_registers dwbc35_regs[1]; + +static const struct dcn35_dwbc_shift dwbc35_shift = { + DWBC_COMMON_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dcn35_dwbc_mask dwbc35_mask = { + DWBC_COMMON_MASK_SH_LIST_DCN35(_MASK) +}; + +#define mcif_wb_regs_dcn3_init(id)\ + MCIF_WB_COMMON_REG_LIST_DCN3_5_RI(id) + +static struct dcn35_mmhubbub_registers mcif_wb35_regs[1]; + +static const struct dcn35_mmhubbub_shift mcif_wb35_shift = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN3_5(__SHIFT) +}; + +static const struct dcn35_mmhubbub_mask mcif_wb35_mask = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN3_5(_MASK) +}; + +#define dsc_regsDCN35_init(id)\ + DSC_REG_LIST_DCN20_RI(id) + +static struct dcn20_dsc_registers dsc_regs[4]; + +static const struct dcn35_dsc_shift dsc_shift = { + DSC_REG_LIST_SH_MASK_DCN35(__SHIFT) +}; + +static const struct dcn35_dsc_mask dsc_mask = { + DSC_REG_LIST_SH_MASK_DCN35(_MASK) +}; + +static struct dcn30_mpc_registers mpc_regs; + +#define dcn_mpc_regs_init() \ + MPC_REG_LIST_DCN3_2_RI(0),\ + MPC_REG_LIST_DCN3_2_RI(1),\ + MPC_REG_LIST_DCN3_2_RI(2),\ + MPC_REG_LIST_DCN3_2_RI(3),\ + MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\ + MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\ + MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\ + MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\ + MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0) + +static const struct dcn30_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct dcn30_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN32(_MASK) +}; + +#define optc_regs_init(id)\ + OPTC_COMMON_REG_LIST_DCN3_5_RI(id) + +static struct dcn_optc_registers optc_regs[4]; + +static const struct dcn_optc_shift optc_shift = { + OPTC_COMMON_MASK_SH_LIST_DCN3_5(__SHIFT) +}; + +static const struct dcn_optc_mask optc_mask = { + OPTC_COMMON_MASK_SH_LIST_DCN3_5(_MASK) +}; + +#define hubp_regs_init(id)\ + HUBP_REG_LIST_DCN30_RI(id) + +static struct dcn_hubp2_registers hubp_regs[4]; + + +static const struct dcn35_hubp2_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dcn35_hubp2_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN35(_MASK) +}; + +static struct dcn_hubbub_registers hubbub_reg; + +#define hubbub_reg_init()\ + HUBBUB_REG_LIST_DCN35(0) + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN35(_MASK) +}; + +static struct dccg_registers dccg_regs; + +#define dccg_regs_init()\ + DCCG_REG_LIST_DCN35() + +static const struct dccg_shift dccg_shift = { + DCCG_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_MASK_SH_LIST_DCN35(_MASK) +}; + +static struct pg_cntl_registers pg_cntl_regs; + +#define pg_cntl_dcn35_regs_init() \ + PG_CNTL_REG_LIST_DCN35() + +static const struct pg_cntl_shift pg_cntl_shift = { + PG_CNTL_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct pg_cntl_mask pg_cntl_mask = { + PG_CNTL_MASK_SH_LIST_DCN35(_MASK) +}; + +#define SRII2(reg_name_pre, reg_name_post, id)\ + .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ + ## id ## _ ## reg_name_post ## _BASE_IDX) + \ + reg ## reg_name_pre ## id ## _ ## reg_name_post + +static struct dce_hwseq_registers hwseq_reg; + +#define hwseq_reg_init()\ + HWSEQ_DCN35_REG_LIST() + +#define HWSEQ_DCN35_MASK_SH_LIST(mask_sh)\ + HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ + HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ + HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN22_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN23_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN23_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN24_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN24_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN25_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN25_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN22_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN23_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN24_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN25_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ + HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ + HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ + HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, DISPCLK_R_DMU_GATE_DIS, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, DISPCLK_G_RBBMIF_GATE_DIS, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, RBBMIF_FGCG_REP_DIS, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, DPREFCLK_ALLOW_DS_CLKSTOP, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, DISPCLK_ALLOW_DS_CLKSTOP, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, DPPCLK_ALLOW_DS_CLKSTOP, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, DTBCLK_ALLOW_DS_CLKSTOP, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, DCFCLK_ALLOW_DS_CLKSTOP, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, DPIACLK_ALLOW_DS_CLKSTOP, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, LONO_FGCG_REP_DIS, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, LONO_DISPCLK_GATE_DISABLE, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, LONO_SOCCLK_GATE_DISABLE, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, LONO_DMCUBCLK_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK0_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK1_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK2_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK3_GATE_DISABLE, mask_sh) + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN35_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN35_MASK_SH_LIST(_MASK) +}; + +#define vmid_regs_init(id)\ + DCN20_VMID_REG_LIST_RI(id) + +static struct dcn_vmid_registers vmid_regs[16]; + +static const struct dcn20_vmid_shift vmid_shifts = { + DCN20_VMID_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn20_vmid_mask vmid_masks = { + DCN20_VMID_MASK_SH_LIST(_MASK) +}; + +static const struct resource_caps res_cap_dcn351 = { + .num_timing_generator = 4, + .num_opp = 4, + .num_video_plane = 4, + .num_audio = 5, + .num_stream_encoder = 5, + .num_dig_link_enc = 5, + .num_hpo_dp_stream_encoder = 4, + .num_hpo_dp_link_encoder = 2, + .num_pll = 4,/*1 c10 edp, 3xc20 combo PHY*/ + .num_dwb = 1, + .num_ddc = 5, + .num_vmid = 16, + .num_mpc_3dlut = 2, + .num_dsc = 4, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .per_pixel_alpha = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true, + .p010 = true, + .ayuv = false, + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 16000 + }, + + // 6:1 downscaling ratio: 1000/6 = 166.666 + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 167, + .fp16 = 167 + }, + 64, + 64 +}; + +static const struct dc_debug_options debug_defaults_drv = { + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = false, + .pipe_split_policy = MPC_SPLIT_AVOID, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .disable_dpp_power_gate = true, + .disable_hubp_power_gate = true, + .disable_optc_power_gate = true, /*should the same as above two*/ + .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/ + .disable_clock_gate = false, + .disable_dsc_power_gate = true, + .vsr_support = true, + .performance_trace = false, + .max_downscale_src_width = 4096,/*upto true 4k*/ + .disable_pplib_wm_range = false, + .scl_reset_length10 = true, + .sanity_checks = false, + .underflow_assert_delay_us = 0xFFFFFFFF, + .dwb_fi_phase = -1, // -1 = disable, + .dmub_command_table = true, + .pstate_enabled = true, + .use_max_lb = true, + .enable_mem_low_power = { + .bits = { + .vga = false, + .i2c = true, + .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled + .dscl = true, + .cm = true, + .mpc = true, + .optc = true, + .vpg = true, + .afmt = true, + } + }, + .root_clock_optimization = { + .bits = { + .dpp = true, + .dsc = true,/*dscclk and dsc pg*/ + .hdmistream = true, + .hdmichar = true, + .dpstream = true, + .symclk32_se = true, + .symclk32_le = true, + .symclk_fe = true, + .physymclk = true, + .dpiasymclk = true, + } + }, + .seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT, + .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ + .minimum_z8_residency_time = 2100, + .using_dml2 = true, + .support_eDP1_5 = true, + .enable_hpo_pg_support = false, + .enable_legacy_fast_update = true, + .enable_single_display_2to1_odm_policy = true, + .disable_idle_power_optimizations = false, + .dmcub_emulation = false, + .disable_boot_optimizations = false, + .disable_unbounded_requesting = false, + .disable_mem_low_power = false, + //must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions + .enable_double_buffered_dsc_pg_support = true, + .enable_dp_dig_pixel_rate_div_policy = 1, + .disable_z10 = false, + .ignore_pg = true, + .psp_disabled_wa = true, + .ips2_eval_delay_us = 2000, + .ips2_entry_delay_us = 800, + .disable_dmub_reallow_idle = true, + .static_screen_wait_frames = 2, +}; + +static const struct dc_panel_config panel_config_defaults = { + .psr = { + .disable_psr = false, + .disallow_psrsu = false, + .disallow_replay = false, + }, + .ilr = { + .optimize_edp_link_rate = true, + }, +}; + +static void dcn35_dpp_destroy(struct dpp **dpp) +{ + kfree(TO_DCN20_DPP(*dpp)); + *dpp = NULL; +} + +static struct dpp *dcn35_dpp_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); + bool success = (dpp != NULL); + + if (!success) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT dpp_regs + dpp_regs_init(0), + dpp_regs_init(1), + dpp_regs_init(2), + dpp_regs_init(3); + + success = dpp35_construct(dpp, ctx, inst, &dpp_regs[inst], &tf_shift, + &tf_mask); + if (success) { + dpp35_set_fgcg( + dpp, + ctx->dc->debug.enable_fine_grain_clock_gating.bits.dpp); + return &dpp->base; + } + + BREAK_TO_DEBUGGER(); + kfree(dpp); + return NULL; +} + +static struct output_pixel_processor *dcn35_opp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_opp *opp = + kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); + + if (!opp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + +#undef REG_STRUCT +#define REG_STRUCT opp_regs + opp_regs_init(0), + opp_regs_init(1), + opp_regs_init(2), + opp_regs_init(3); + + dcn35_opp_construct(opp, ctx, inst, + &opp_regs[inst], &opp_shift, &opp_mask); + + dcn35_opp_set_fgcg(opp, ctx->dc->debug.enable_fine_grain_clock_gating.bits.opp); + + return &opp->base; +} + +static struct dce_aux *dcn31_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT aux_engine_regs + aux_engine_regs_init(0), + aux_engine_regs_init(1), + aux_engine_regs_init(2), + aux_engine_regs_init(3), + aux_engine_regs_init(4); + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} + +#define i2c_inst_regs_init(id)\ + I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id) + +static struct dce_i2c_registers i2c_hw_regs[5]; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN35(_MASK) +}; + +/* ========================================================== */ + +/* + * DPIA index | Preferred Encoder | Host Router + * 0 | C | 0 + * 1 | First Available | 0 + * 2 | D | 1 + * 3 | First Available | 1 + */ +/* ========================================================== */ +static const enum engine_id dpia_to_preferred_enc_id_table[] = { + ENGINE_ID_DIGC, + ENGINE_ID_DIGC, + ENGINE_ID_DIGD, + ENGINE_ID_DIGD +}; + +static enum engine_id dcn351_get_preferred_eng_id_dpia(unsigned int dpia_index) +{ + return dpia_to_preferred_enc_id_table[dpia_index]; +} + +static struct dce_i2c_hw *dcn31_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT i2c_hw_regs + i2c_inst_regs_init(1), + i2c_inst_regs_init(2), + i2c_inst_regs_init(3), + i2c_inst_regs_init(4), + i2c_inst_regs_init(5); + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} +static struct mpc *dcn35_mpc_create( + struct dc_context *ctx, + int num_mpcc, + int num_rmu) +{ + struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL); + + if (!mpc30) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT mpc_regs + dcn_mpc_regs_init(); + + dcn32_mpc_construct(mpc30, ctx, + &mpc_regs, + &mpc_shift, + &mpc_mask, + num_mpcc, + num_rmu); + + return &mpc30->base; +} + +static struct hubbub *dcn35_hubbub_create(struct dc_context *ctx) +{ + int i; + + struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), + GFP_KERNEL); + + if (!hubbub3) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT hubbub_reg + hubbub_reg_init(); + +#undef REG_STRUCT +#define REG_STRUCT vmid_regs + vmid_regs_init(0), + vmid_regs_init(1), + vmid_regs_init(2), + vmid_regs_init(3), + vmid_regs_init(4), + vmid_regs_init(5), + vmid_regs_init(6), + vmid_regs_init(7), + vmid_regs_init(8), + vmid_regs_init(9), + vmid_regs_init(10), + vmid_regs_init(11), + vmid_regs_init(12), + vmid_regs_init(13), + vmid_regs_init(14), + vmid_regs_init(15); + + hubbub35_construct(hubbub3, ctx, + &hubbub_reg, + &hubbub_shift, + &hubbub_mask, + 384,/*ctx->dc->dml.ip.det_buffer_size_kbytes,*/ + 8, /*ctx->dc->dml.ip.pixel_chunk_size_kbytes,*/ + 1792 /*ctx->dc->dml.ip.config_return_buffer_size_in_kbytes*/); + + + for (i = 0; i < res_cap_dcn351.num_vmid; i++) { + struct dcn20_vmid *vmid = &hubbub3->vmid[i]; + + vmid->ctx = ctx; + + vmid->regs = &vmid_regs[i]; + vmid->shifts = &vmid_shifts; + vmid->masks = &vmid_masks; + } + + return &hubbub3->base; +} + +static struct timing_generator *dcn35_timing_generator_create( + struct dc_context *ctx, + uint32_t instance) +{ + struct optc *tgn10 = + kzalloc(sizeof(struct optc), GFP_KERNEL); + + if (!tgn10) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT optc_regs + optc_regs_init(0), + optc_regs_init(1), + optc_regs_init(2), + optc_regs_init(3); + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &optc_regs[instance]; + tgn10->tg_shift = &optc_shift; + tgn10->tg_mask = &optc_mask; + + dcn35_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +static struct link_encoder *dcn35_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dcn20_link_encoder *enc20 = + kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + + if (!enc20) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT link_enc_aux_regs + aux_regs_init(0), + aux_regs_init(1), + aux_regs_init(2), + aux_regs_init(3), + aux_regs_init(4); + +#undef REG_STRUCT +#define REG_STRUCT link_enc_hpd_regs + hpd_regs_init(0), + hpd_regs_init(1), + hpd_regs_init(2), + hpd_regs_init(3), + hpd_regs_init(4); + +#undef REG_STRUCT +#define REG_STRUCT link_enc_regs + link_regs_init(0, A), + link_regs_init(1, B), + link_regs_init(2, C), + link_regs_init(3, D), + link_regs_init(4, E); + + dcn35_link_encoder_construct(enc20, + enc_init_data, + &link_enc_feature, + &link_enc_regs[enc_init_data->transmitter], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc20->enc10.base; +} + +/* Create a minimal link encoder object not associated with a particular + * physical connector. + * resource_funcs.link_enc_create_minimal + */ +static struct link_encoder *dcn31_link_enc_create_minimal( + struct dc_context *ctx, enum engine_id eng_id) +{ + struct dcn20_link_encoder *enc20; + + if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc) + return NULL; + + enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + if (!enc20) + return NULL; + + dcn31_link_encoder_construct_minimal( + enc20, + ctx, + &link_enc_feature, + &link_enc_regs[eng_id - ENGINE_ID_DIGA], + eng_id); + + return &enc20->enc10.base; +} + +static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dcn31_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dcn31_panel_cntl_construct(panel_cntl, init_data); + + return &panel_cntl->base; +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX), + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); + +} + +static struct audio *dcn31_create_audio( + struct dc_context *ctx, unsigned int inst) +{ + +#undef REG_STRUCT +#define REG_STRUCT audio_regs + audio_regs_init(0), + audio_regs_init(1), + audio_regs_init(2), + audio_regs_init(3), + audio_regs_init(4); + audio_regs_init(5); + audio_regs_init(6); + + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct vpg *dcn31_vpg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL); + + if (!vpg31) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT vpg_regs + vpg_regs_init(0), + vpg_regs_init(1), + vpg_regs_init(2), + vpg_regs_init(3), + vpg_regs_init(4), + vpg_regs_init(5), + vpg_regs_init(6), + vpg_regs_init(7), + vpg_regs_init(8), + vpg_regs_init(9); + + vpg31_construct(vpg31, ctx, inst, + &vpg_regs[inst], + &vpg_shift, + &vpg_mask); + + return &vpg31->base; +} + +static struct afmt *dcn31_afmt_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL); + + if (!afmt31) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT afmt_regs + afmt_regs_init(0), + afmt_regs_init(1), + afmt_regs_init(2), + afmt_regs_init(3), + afmt_regs_init(4), + afmt_regs_init(5); + + afmt31_construct(afmt31, ctx, inst, + &afmt_regs[inst], + &afmt_shift, + &afmt_mask); + + // Light sleep by default, no need to power down here + + return &afmt31->base; +} + +static struct apg *dcn31_apg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); + + if (!apg31) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT apg_regs + apg_regs_init(0), + apg_regs_init(1), + apg_regs_init(2), + apg_regs_init(3); + + apg31_construct(apg31, ctx, inst, + &apg_regs[inst], + &apg_shift, + &apg_mask); + + return &apg31->base; +} + +static struct stream_encoder *dcn35_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1; + struct vpg *vpg; + struct afmt *afmt; + int vpg_inst; + int afmt_inst; + + /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ + if (eng_id <= ENGINE_ID_DIGF) { + vpg_inst = eng_id; + afmt_inst = eng_id; + } else + return NULL; + + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); + vpg = dcn31_vpg_create(ctx, vpg_inst); + afmt = dcn31_afmt_create(ctx, afmt_inst); + + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); + return NULL; + } + +#undef REG_STRUCT +#define REG_STRUCT stream_enc_regs + stream_enc_regs_init(0), + stream_enc_regs_init(1), + stream_enc_regs_init(2), + stream_enc_regs_init(3), + stream_enc_regs_init(4); + + dcn35_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, + eng_id, vpg, afmt, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + +static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; + struct vpg *vpg; + struct apg *apg; + uint32_t hpo_dp_inst; + uint32_t vpg_inst; + uint32_t apg_inst; + + ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); + hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; + + /* Mapping of VPG register blocks to HPO DP block instance: + * VPG[6] -> HPO_DP[0] + * VPG[7] -> HPO_DP[1] + * VPG[8] -> HPO_DP[2] + * VPG[9] -> HPO_DP[3] + */ + vpg_inst = hpo_dp_inst + 6; + + /* Mapping of APG register blocks to HPO DP block instance: + * APG[0] -> HPO_DP[0] + * APG[1] -> HPO_DP[1] + * APG[2] -> HPO_DP[2] + * APG[3] -> HPO_DP[3] + */ + apg_inst = hpo_dp_inst; + + /* allocate HPO stream encoder and create VPG sub-block */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); + vpg = dcn31_vpg_create(ctx, vpg_inst); + apg = dcn31_apg_create(ctx, apg_inst); + + if (!hpo_dp_enc31 || !vpg || !apg) { + kfree(hpo_dp_enc31); + kfree(vpg); + kfree(apg); + return NULL; + } + +#undef REG_STRUCT +#define REG_STRUCT hpo_dp_stream_enc_regs + hpo_dp_stream_encoder_reg_init(0), + hpo_dp_stream_encoder_reg_init(1), + hpo_dp_stream_encoder_reg_init(2), + hpo_dp_stream_encoder_reg_init(3); + + dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, + hpo_dp_inst, eng_id, vpg, apg, + &hpo_dp_stream_enc_regs[hpo_dp_inst], + &hpo_dp_se_shift, &hpo_dp_se_mask); + + return &hpo_dp_enc31->base; +} + +static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( + uint8_t inst, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; + + /* allocate HPO link encoder */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + if (!hpo_dp_enc31) + return NULL; /* out of memory */ + +#undef REG_STRUCT +#define REG_STRUCT hpo_dp_link_enc_regs + hpo_dp_link_encoder_reg_init(0), + hpo_dp_link_encoder_reg_init(1); + + hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, + &hpo_dp_link_enc_regs[inst], + &hpo_dp_le_shift, &hpo_dp_le_mask); + + return &hpo_dp_enc31->base; +} + +static struct dce_hwseq *dcn351_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + +#undef REG_STRUCT +#define REG_STRUCT hwseq_reg + hwseq_reg_init(); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn31_create_audio, + .create_stream_encoder = dcn35_stream_encoder_create, + .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, + .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, + .create_hwseq = dcn351_hwseq_create, +}; + +static void dcn351_resource_destruct(struct dcn351_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) { + if (pool->base.stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); + pool->base.stream_enc[i]->vpg = NULL; + } + if (pool->base.stream_enc[i]->afmt != NULL) { + kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); + pool->base.stream_enc[i]->afmt = NULL; + } + kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); + pool->base.stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { + if (pool->base.hpo_dp_stream_enc[i] != NULL) { + if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); + pool->base.hpo_dp_stream_enc[i]->vpg = NULL; + } + if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { + kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); + pool->base.hpo_dp_stream_enc[i]->apg = NULL; + } + kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); + pool->base.hpo_dp_stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { + if (pool->base.hpo_dp_link_enc[i] != NULL) { + kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); + pool->base.hpo_dp_link_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + if (pool->base.dscs[i] != NULL) + dcn20_dsc_destroy(&pool->base.dscs[i]); + } + + if (pool->base.mpc != NULL) { + kfree(TO_DCN20_MPC(pool->base.mpc)); + pool->base.mpc = NULL; + } + if (pool->base.hubbub != NULL) { + kfree(pool->base.hubbub); + pool->base.hubbub = NULL; + } + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.dpps[i] != NULL) + dcn35_dpp_destroy(&pool->base.dpps[i]); + + if (pool->base.ipps[i] != NULL) + pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.hubps[i] != NULL) { + kfree(TO_DCN20_HUBP(pool->base.hubps[i])); + pool->base.hubps[i] = NULL; + } + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + if (pool->base.opps[i] != NULL) + pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dwb; i++) { + if (pool->base.dwbc[i] != NULL) { + kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); + pool->base.dwbc[i] = NULL; + } + if (pool->base.mcif_wb[i] != NULL) { + kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); + pool->base.mcif_wb[i] = NULL; + } + } + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i]) + dce_aud_destroy(&pool->base.audios[i]); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dcn20_clock_source_destroy(&pool->base.clock_sources[i]); + pool->base.clock_sources[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { + if (pool->base.mpc_lut[i] != NULL) { + dc_3dlut_func_release(pool->base.mpc_lut[i]); + pool->base.mpc_lut[i] = NULL; + } + if (pool->base.mpc_shaper[i] != NULL) { + dc_transfer_func_release(pool->base.mpc_shaper[i]); + pool->base.mpc_shaper[i] = NULL; + } + } + + if (pool->base.dp_clock_source != NULL) { + dcn20_clock_source_destroy(&pool->base.dp_clock_source); + pool->base.dp_clock_source = NULL; + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.multiple_abms[i] != NULL) + dce_abm_destroy(&pool->base.multiple_abms[i]); + } + + if (pool->base.psr != NULL) + dmub_psr_destroy(&pool->base.psr); + + if (pool->base.replay != NULL) + dmub_replay_destroy(&pool->base.replay); + + if (pool->base.pg_cntl != NULL) + dcn_pg_cntl_destroy(&pool->base.pg_cntl); + + if (pool->base.dccg != NULL) + dcn_dccg_destroy(&pool->base.dccg); +} + +static struct hubp *dcn35_hubp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn20_hubp *hubp2 = + kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); + + if (!hubp2) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT hubp_regs + hubp_regs_init(0), + hubp_regs_init(1), + hubp_regs_init(2), + hubp_regs_init(3); + + if (hubp35_construct(hubp2, ctx, inst, + &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp2->base; + + BREAK_TO_DEBUGGER(); + kfree(hubp2); + return NULL; +} + +static void dcn35_dwbc_init(struct dcn30_dwbc *dwbc30, struct dc_context *ctx) +{ + dcn35_dwbc_set_fgcg( + dwbc30, ctx->dc->debug.enable_fine_grain_clock_gating.bits.dwb); +} + +static bool dcn35_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), + GFP_KERNEL); + + if (!dwbc30) { + dm_error("DC: failed to create dwbc30!\n"); + return false; + } + +#undef REG_STRUCT +#define REG_STRUCT dwbc35_regs + dwbc_regs_dcn3_init(0); + + dcn35_dwbc_construct(dwbc30, ctx, + &dwbc35_regs[i], + &dwbc35_shift, + &dwbc35_mask, + i); + + pool->dwbc[i] = &dwbc30->base; + + dcn35_dwbc_init(dwbc30, ctx); + } + return true; +} + +static void dcn35_mmhubbub_init(struct dcn30_mmhubbub *mcif_wb30, + struct dc_context *ctx) +{ + dcn35_mmhubbub_set_fgcg( + mcif_wb30, + ctx->dc->debug.enable_fine_grain_clock_gating.bits.mmhubbub); +} + +static bool dcn35_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), + GFP_KERNEL); + + if (!mcif_wb30) { + dm_error("DC: failed to create mcif_wb30!\n"); + return false; + } + +#undef REG_STRUCT +#define REG_STRUCT mcif_wb35_regs + mcif_wb_regs_dcn3_init(0); + + dcn35_mmhubbub_construct(mcif_wb30, ctx, + &mcif_wb35_regs[i], + &mcif_wb35_shift, + &mcif_wb35_mask, + i); + + dcn35_mmhubbub_init(mcif_wb30, ctx); + + pool->mcif_wb[i] = &mcif_wb30->base; + } + return true; +} + +static struct display_stream_compressor *dcn35_dsc_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_dsc *dsc = + kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); + + if (!dsc) { + BREAK_TO_DEBUGGER(); + return NULL; + } + +#undef REG_STRUCT +#define REG_STRUCT dsc_regs + dsc_regsDCN35_init(0), + dsc_regsDCN35_init(1), + dsc_regsDCN35_init(2), + dsc_regsDCN35_init(3); + + dsc35_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); + dsc35_set_fgcg(dsc, + ctx->dc->debug.enable_fine_grain_clock_gating.bits.dsc); + return &dsc->base; +} + +static void dcn351_destroy_resource_pool(struct resource_pool **pool) +{ + struct dcn351_resource_pool *dcn351_pool = TO_DCN351_RES_POOL(*pool); + + dcn351_resource_destruct(dcn351_pool); + kfree(dcn351_pool); + *pool = NULL; +} + +static struct clock_source *dcn35_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dcn31_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + BREAK_TO_DEBUGGER(); + return NULL; +} + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap +}; + +static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config) +{ + *panel_config = panel_config_defaults; +} + + +static bool dcn351_validate_bandwidth(struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + bool out = false; + + out = dml2_validate(dc, context, + context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, + fast_validate); + + if (fast_validate) + return out; + + DC_FP_START(); + dcn35_decide_zstate_support(dc, context); + DC_FP_END(); + + return out; +} + +static struct resource_funcs dcn351_res_pool_funcs = { + .destroy = dcn351_destroy_resource_pool, + .link_enc_create = dcn35_link_encoder_create, + .link_enc_create_minimal = dcn31_link_enc_create_minimal, + .link_encs_assign = link_enc_cfg_link_encs_assign, + .link_enc_unassign = link_enc_cfg_link_enc_unassign, + .panel_cntl_create = dcn31_panel_cntl_create, + .validate_bandwidth = dcn351_validate_bandwidth, + .calculate_wm_and_dlg = NULL, + .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, + .populate_dml_pipes = dcn351_populate_dml_pipes_from_context_fpu, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, + .release_pipe = dcn20_release_pipe, + .add_stream_to_ctx = dcn30_add_stream_to_ctx, + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, + .set_mcif_arb_params = dcn30_set_mcif_arb_params, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, + .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, + .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, + .update_bw_bounding_box = dcn351_update_bw_bounding_box_fpu, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, + .get_panel_config_defaults = dcn35_get_panel_config_defaults, + .get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia, +}; + +static bool dcn351_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dcn351_resource_pool *pool) +{ + int i; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data init_data; + +#undef REG_STRUCT +#define REG_STRUCT bios_regs + bios_regs_init(); + +#undef REG_STRUCT +#define REG_STRUCT clk_src_regs + clk_src_regs_init(0, A), + clk_src_regs_init(1, B), + clk_src_regs_init(2, C), + clk_src_regs_init(3, D), + clk_src_regs_init(4, E); + +#undef REG_STRUCT +#define REG_STRUCT abm_regs + abm_regs_init(0), + abm_regs_init(1), + abm_regs_init(2), + abm_regs_init(3); + +#undef REG_STRUCT +#define REG_STRUCT dccg_regs + dccg_regs_init(); + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap_dcn351; + + pool->base.funcs = &dcn351_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.pipe_count = pool->base.res_cap->num_timing_generator; + pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 600; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 100; + dc->caps.max_cursor_size = 256; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; + dc->caps.max_slave_planes = 2; + dc->caps.max_slave_yuv_planes = 2; + dc->caps.max_slave_rgb_planes = 2; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + if (dc->config.forceHBR2CP2520) + dc->caps.force_dp_tps4_for_cp2520 = false; + dc->caps.dp_hpo = true; + dc->caps.dp_hdmi21_pcon_support = true; + + dc->caps.edp_dsc_support = true; + dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; + dc->caps.is_apu = true; + dc->caps.seamless_odm = true; + + dc->caps.zstate_support = true; + dc->caps.ips_support = true; + dc->caps.max_v_total = (1 << 15) - 1; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; + dc->caps.color.dpp.dgam_rom_caps.pq = 1; + dc->caps.color.dpp.dgam_rom_caps.hlg = 1; + dc->caps.color.dpp.post_csc = 1; + dc->caps.color.dpp.gamma_corr = 1; + dc->caps.color.dpp.dgam_rom_for_yuv = 0; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 + // no OGAM ROM on DCN301 + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 1; + dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2 + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + /* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order + * to provide some margin. + * It's expected for furture ASIC to have equal or higher value, in order to + * have determinstic power improvement from generate to genration. + * (i.e., we should not expect new ASIC generation with lower vmin rate) + */ + dc->caps.max_disp_clock_khz_at_vmin = 650000; + + /* Use pipe context based otg sync logic */ + dc->config.use_pipe_ctx_sync_logic = true; + + /* Use psp mailbox to enable assr */ + dc->config.use_assr_psp_message = true; + + /* read VBIOS LTTPR caps */ + { + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + /* interop bit is implicit */ + { + dc->caps.vbios_lttpr_aware = true; + } + } + + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) + dc->debug = debug_defaults_drv; + + /*HW default is to have all the FGCG enabled, SW no need to program them*/ + dc->debug.enable_fine_grain_clock_gating.u32All = 0xFFFF; + // Init the vm_helper + if (dc->vm_helper) + vm_helper_init(dc->vm_helper, 16); + + /************************************************* + * Create resources * + *************************************************/ + + /* Clock Sources for Pixel Clock*/ + pool->base.clock_sources[DCN351_CLK_SRC_PLL0] = + dcn35_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCN351_CLK_SRC_PLL1] = + dcn35_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + pool->base.clock_sources[DCN351_CLK_SRC_PLL2] = + dcn35_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + pool->base.clock_sources[DCN351_CLK_SRC_PLL3] = + dcn35_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->base.clock_sources[DCN351_CLK_SRC_PLL4] = + dcn35_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); + + pool->base.clk_src_count = DCN351_CLK_SRC_TOTAL; + + /* todo: not reuse phy_pll registers */ + pool->base.dp_clock_source = + dcn35_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + /*temp till dml2 fully work without dml1*/ + dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip, DML_PROJECT_DCN31); + + /* TODO: DCCG */ + pool->base.dccg = dccg35_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->base.dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + +#undef REG_STRUCT +#define REG_STRUCT pg_cntl_regs + pg_cntl_dcn35_regs_init(); + + pool->base.pg_cntl = pg_cntl35_create(ctx, &pg_cntl_regs, &pg_cntl_shift, &pg_cntl_mask); + if (pool->base.pg_cntl == NULL) { + dm_error("DC: failed to create power gate control!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* TODO: IRQ */ + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dcn351_create(&init_data); + if (!pool->base.irqs) + goto create_fail; + + /* HUBBUB */ + pool->base.hubbub = dcn35_hubbub_create(ctx); + if (pool->base.hubbub == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + /* HUBPs, DPPs, OPPs and TGs */ + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.hubps[i] = dcn35_hubp_create(ctx, i); + if (pool->base.hubps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create hubps!\n"); + goto create_fail; + } + + pool->base.dpps[i] = dcn35_dpp_create(ctx, i); + if (pool->base.dpps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create dpps!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + pool->base.opps[i] = dcn35_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.timing_generators[i] = dcn35_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + } + pool->base.timing_generator_count = i; + + /* PSR */ + pool->base.psr = dmub_psr_create(ctx); + if (pool->base.psr == NULL) { + dm_error("DC: failed to create psr obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* Replay */ + pool->base.replay = dmub_replay_create(ctx); + if (pool->base.replay == NULL) { + dm_error("DC: failed to create replay obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* ABM */ + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.multiple_abms[i] = dmub_abm_create(ctx, + &abm_regs[i], + &abm_shift, + &abm_mask); + if (pool->base.multiple_abms[i] == NULL) { + dm_error("DC: failed to create abm for pipe %d!\n", i); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* MPC and DSC */ + pool->base.mpc = dcn35_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); + if (pool->base.mpc == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + pool->base.dscs[i] = dcn35_dsc_create(ctx, i); + if (pool->base.dscs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create display stream compressor %d!\n", i); + goto create_fail; + } + } + + /* DWB and MMHUBBUB */ + if (!dcn35_dwbc_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dwbc!\n"); + goto create_fail; + } + + if (!dcn35_mmhubbub_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mcif_wb!\n"); + goto create_fail; + } + + /* AUX and I2C */ + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dcn31_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + /* DCN3.5 has 6 DPIA */ + pool->base.usb4_dpia_count = 4; + if (dc->debug.dpia_debug.bits.disable_dpia) + pool->base.usb4_dpia_count = 0; + + /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto create_fail; + + /* HW Sequencer and Plane caps */ + dcn351_hw_sequencer_construct(dc); + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + + dc->dcn_ip->max_num_dpp = pool->base.pipe_count; + + dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; + dc->dml2_options.use_native_pstate_optimization = true; + dc->dml2_options.use_native_soc_bb_construction = true; + dc->dml2_options.minimize_dispclk_using_odm = false; + if (dc->config.EnableMinDispClkODM) + dc->dml2_options.minimize_dispclk_using_odm = true; + dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm; + + resource_init_common_dml2_callbacks(dc, &dc->dml2_options); + dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; + + dc->dml2_options.max_segments_per_hubp = 24; + dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ + + if (dc->config.sdpif_request_limit_words_per_umc == 0) + dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/ + + return true; + +create_fail: + + dcn351_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn351_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc) +{ + struct dcn351_resource_pool *pool = + kzalloc(sizeof(struct dcn351_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dcn351_resource_construct(init_data->num_virtual_links, dc, pool)) + return &pool->base; + + BREAK_TO_DEBUGGER(); + kfree(pool); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h new file mode 100644 index 000000000000..f3e045777a3d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright 2024 Advanced Micro Devices, Inc. */ + +#ifndef _DCN351_RESOURCE_H_ +#define _DCN351_RESOURCE_H_ + +#include "core_types.h" + +extern struct _vcs_dpi_ip_params_st dcn3_51_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc; + +#define TO_DCN351_RES_POOL(pool)\ + container_of(pool, struct dcn351_resource_pool, base) + +struct dcn351_resource_pool { + struct resource_pool base; +}; + +struct resource_pool *dcn351_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc); + +#endif /* _DCN351_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 9665ada0f894..2fde1f043d50 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -71,6 +71,8 @@ extern "C" { #endif +#define DMUB_PC_SNAPSHOT_COUNT 10 + /* Forward declarations */ struct dmub_srv; struct dmub_srv_common_regs; @@ -78,6 +80,12 @@ struct dmub_srv_dcn31_regs; struct dmcub_trace_buf_entry; +/* enum dmub_window_memory_type - memory location type specification for windows */ +enum dmub_window_memory_type { + DMUB_WINDOW_MEMORY_TYPE_FB = 0, + DMUB_WINDOW_MEMORY_TYPE_GART +}; + /* enum dmub_status - return code for dmcub functions */ enum dmub_status { DMUB_STATUS_OK = 0, @@ -86,6 +94,7 @@ enum dmub_status { DMUB_STATUS_TIMEOUT, DMUB_STATUS_INVALID, DMUB_STATUS_HW_FAILURE, + DMUB_STATUS_POWER_STATE_D3 }; /* enum dmub_asic - dmub asic identifier */ @@ -105,6 +114,7 @@ enum dmub_asic { DMUB_ASIC_DCN32, DMUB_ASIC_DCN321, DMUB_ASIC_DCN35, + DMUB_ASIC_DCN351, DMUB_ASIC_MAX, }; @@ -118,6 +128,7 @@ enum dmub_window_id { DMUB_WINDOW_5_TRACEBUFF, DMUB_WINDOW_6_FW_STATE, DMUB_WINDOW_7_SCRATCH_MEM, + DMUB_WINDOW_SHARED_STATE, DMUB_WINDOW_TOTAL, }; @@ -150,6 +161,13 @@ enum dmub_memory_access_type { DMUB_MEMORY_ACCESS_DMA }; +/* enum dmub_power_state type - to track DC power state in dmub_srv */ +enum dmub_srv_power_state_type { + DMUB_POWER_STATE_UNDEFINED = 0, + DMUB_POWER_STATE_D0 = 1, + DMUB_POWER_STATE_D3 = 8 +}; + /** * struct dmub_region - dmub hw memory region * @base: base address for region, must be 256 byte aligned @@ -195,6 +213,7 @@ struct dmub_srv_region_params { uint32_t vbios_size; const uint8_t *fw_inst_const; const uint8_t *fw_bss_data; + const enum dmub_window_memory_type *window_memory_type; }; /** @@ -214,20 +233,26 @@ struct dmub_srv_region_params { */ struct dmub_srv_region_info { uint32_t fb_size; + uint32_t gart_size; uint8_t num_regions; struct dmub_region regions[DMUB_WINDOW_TOTAL]; }; /** - * struct dmub_srv_fb_params - parameters used for driver fb setup + * struct dmub_srv_memory_params - parameters used for driver fb setup * @region_info: region info calculated by dmub service - * @cpu_addr: base cpu address for the framebuffer - * @gpu_addr: base gpu virtual address for the framebuffer + * @cpu_fb_addr: base cpu address for the framebuffer + * @cpu_inbox_addr: base cpu address for the gart + * @gpu_fb_addr: base gpu virtual address for the framebuffer + * @gpu_inbox_addr: base gpu virtual address for the gart */ -struct dmub_srv_fb_params { +struct dmub_srv_memory_params { const struct dmub_srv_region_info *region_info; - void *cpu_addr; - uint64_t gpu_addr; + void *cpu_fb_addr; + void *cpu_gart_addr; + uint64_t gpu_fb_addr; + uint64_t gpu_gart_addr; + const enum dmub_window_memory_type *window_memory_type; }; /** @@ -272,18 +297,30 @@ struct dmub_srv_hw_params { bool dpia_hpd_int_enable_supported; bool disable_clock_gate; bool disallow_dispclk_dppclk_ds; + bool ips_sequential_ono; enum dmub_memory_access_type mem_access_type; enum dmub_ips_disable_type disable_ips; }; /** + * struct dmub_srv_debug - Debug info for dmub_srv + * @timeout_occured: Indicates a timeout occured on any message from driver to dmub + * @timeout_cmd: first cmd sent from driver that timed out - subsequent timeouts are not stored + */ +struct dmub_srv_debug { + bool timeout_occured; + union dmub_rb_cmd timeout_cmd; + unsigned long long timestamp; +}; + +/** * struct dmub_diagnostic_data - Diagnostic data retrieved from DMCUB for * debugging purposes, including logging, crash analysis, etc. */ struct dmub_diagnostic_data { uint32_t dmcub_version; uint32_t scratch[17]; - uint32_t pc; + uint32_t pc[DMUB_PC_SNAPSHOT_COUNT]; uint32_t undefined_address_fault_addr; uint32_t inst_fetch_fault_addr; uint32_t data_write_fault_addr; @@ -294,6 +331,7 @@ struct dmub_diagnostic_data { uint32_t inbox0_wptr; uint32_t inbox0_size; uint32_t gpint_datain0; + struct dmub_srv_debug timeout_info; uint8_t is_dmcub_enabled : 1; uint8_t is_dmcub_soft_reset : 1; uint8_t is_dmcub_secure_reset : 1; @@ -347,7 +385,8 @@ struct dmub_srv_hw_funcs { const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6); + const struct dmub_window *cw6, + const struct dmub_window *region6); void (*setup_mailbox)(struct dmub_srv *dmub, const struct dmub_region *inbox1); @@ -429,7 +468,6 @@ struct dmub_srv_create_params { struct dmub_srv_base_funcs funcs; struct dmub_srv_hw_funcs *hw_funcs; void *user_ctx; - struct dc_context *dc_ctx; enum dmub_asic asic; uint32_t fw_version; bool is_virtual; @@ -441,6 +479,7 @@ struct dmub_srv_create_params { * @user_ctx: user provided context for the dmub_srv * @fw_version: the current firmware version, if any * @is_virtual: false if hardware support only + * @shared_state: dmub shared state between firmware and driver * @fw_state: dmub firmware state pointer */ struct dmub_srv { @@ -449,6 +488,7 @@ struct dmub_srv { uint32_t fw_version; bool is_virtual; struct dmub_fb scratch_mem_fb; + volatile struct dmub_shared_state_feature_block *shared_state; volatile const struct dmub_fw_state *fw_state; /* private: internal use only */ @@ -479,6 +519,9 @@ struct dmub_srv { /* Feature capabilities reported by fw */ struct dmub_feature_caps feature_caps; struct dmub_visual_confirm_color visual_confirm_color; + + enum dmub_srv_power_state_type power_state; + struct dmub_srv_debug debug; }; /** @@ -563,8 +606,8 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, * DMUB_STATUS_OK - success * DMUB_STATUS_INVALID - unspecified error */ -enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, - const struct dmub_srv_fb_params *params, +enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub, + const struct dmub_srv_memory_params *params, struct dmub_srv_fb_info *out); /** @@ -883,6 +926,18 @@ enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub); */ void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index); +/** + * dmub_srv_set_power_state() - Track DC power state in dmub_srv + * @dmub: The dmub service + * @power_state: DC power state setting + * + * Store DC power state in dmub_srv. If dmub_srv is in D3, then don't send messages to DMUB + * + * Return: + * void + */ +void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state); + #if defined(__cplusplus) } #endif diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index bc907ae2052d..e85fd3ac52c7 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -26,15 +26,6 @@ #ifndef DMUB_CMD_H #define DMUB_CMD_H -#if defined(_TEST_HARNESS) || defined(FPGA_USB4) -#include "dmub_fw_types.h" -#include "include_legacy/atomfirmware.h" - -#if defined(_TEST_HARNESS) -#include <string.h> -#endif -#else - #include <asm/byteorder.h> #include <linux/types.h> #include <linux/string.h> @@ -42,8 +33,6 @@ #include "atomfirmware.h" -#endif // defined(_TEST_HARNESS) || defined(FPGA_USB4) - //<DMUB_TYPES>================================================================== /* Basic type definitions. */ @@ -108,6 +97,9 @@ /* Maximum number of planes on any ASIC. */ #define DMUB_MAX_PLANES 6 +/* Maximum number of phantom planes on any ASIC */ +#define DMUB_MAX_PHANTOM_PLANES ((DMUB_MAX_PLANES) / 2) + /* Trace buffer offset for entry */ #define TRACE_BUFFER_ENTRY_OFFSET 16 @@ -185,8 +177,7 @@ union abm_flags { unsigned int disable_abm_requested : 1; /** - * @disable_abm_immediately: Indicates if driver has requested ABM to be disabled - * immediately. + * @disable_abm_immediately: Indicates if driver has requested ABM to be disabled immediately. */ unsigned int disable_abm_immediately : 1; @@ -206,6 +197,11 @@ union abm_flags { * of user backlight level. */ unsigned int abm_gradual_bl_change : 1; + + /** + * @abm_new_frame: Indicates if a new frame update needed for ABM to ramp up into steady + */ + unsigned int abm_new_frame : 1; } bitfields; unsigned int u32All; @@ -404,15 +400,16 @@ union replay_debug_flags { /** * 0x400 (bit 10) - * @force_disable_ips1: Force disable IPS1 state + * @enable_ips_visual_confirm: Enable IPS visual confirm when entering IPS + * If we enter IPS2, the Visual confirm bar will change to yellow */ - uint32_t force_disable_ips1 : 1; + uint32_t enable_ips_visual_confirm : 1; /** * 0x800 (bit 11) - * @force_disable_ips2: Force disable IPS2 state + * @enable_ips_residency_profiling: Enable IPS residency profiling */ - uint32_t force_disable_ips2 : 1; + uint32_t enable_ips_residency_profiling : 1; uint32_t reserved : 20; } bitfields; @@ -472,7 +469,7 @@ struct dmub_feature_caps { * Max PSR version supported by FW. */ uint8_t psr; - uint8_t fw_assisted_mclk_switch; + uint8_t fw_assisted_mclk_switch_ver; uint8_t reserved[4]; uint8_t subvp_psr_support; uint8_t gecc_enable; @@ -519,6 +516,8 @@ struct dmub_visual_confirm_color { * @trace_buffer_size: size of the tracebuffer region * @fw_version: the firmware version information * @dal_fw: 1 if the firmware is DAL + * @shared_state_size: size of the shared state region in bytes + * @shared_state_features: number of shared state features */ struct dmub_fw_meta_info { uint32_t magic_value; /**< magic value identifying DMUB firmware meta info */ @@ -527,6 +526,9 @@ struct dmub_fw_meta_info { uint32_t fw_version; /**< the firmware version information */ uint8_t dal_fw; /**< 1 if the firmware is DAL */ uint8_t reserved[3]; /**< padding bits */ + uint32_t shared_state_size; /**< size of the shared state region in bytes */ + uint16_t shared_state_features; /**< number of shared state features */ + uint16_t reserved2; /**< padding bytes */ }; /** @@ -583,6 +585,7 @@ union dmub_fw_boot_status { uint32_t fams_enabled : 1; /**< 1 if VBIOS data is deferred programmed */ uint32_t detection_required: 1; /**< if detection need to be triggered by driver */ uint32_t hw_power_init_done: 1; /**< 1 if hw power init is completed */ + uint32_t ono_regions_enabled: 1; /**< 1 if ONO regions are enabled */ } bits; /**< status bits */ uint32_t all; /**< 32-bit access to status bits */ }; @@ -599,6 +602,7 @@ enum dmub_fw_boot_status_bit { DMUB_FW_BOOT_STATUS_BIT_FAMS_ENABLED = (1 << 5), /**< 1 if FAMS is enabled*/ DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED = (1 << 6), /**< 1 if detection need to be triggered by driver*/ DMUB_FW_BOOT_STATUS_BIT_HW_POWER_INIT_DONE = (1 << 7), /**< 1 if hw power init is completed */ + DMUB_FW_BOOT_STATUS_BIT_ONO_REGIONS_ENABLED = (1 << 8), /**< 1 if ONO regions are enabled */ }; /* Register bit definition for SCRATCH5 */ @@ -617,9 +621,13 @@ enum dmub_lvtma_status_bit { }; enum dmub_ips_disable_type { - DMUB_IPS_DISABLE_IPS1 = 1, - DMUB_IPS_DISABLE_IPS2 = 2, - DMUB_IPS_DISABLE_IPS2_Z10 = 3, + DMUB_IPS_ENABLE = 0, + DMUB_IPS_DISABLE_ALL = 1, + DMUB_IPS_DISABLE_IPS1 = 2, + DMUB_IPS_DISABLE_IPS2 = 3, + DMUB_IPS_DISABLE_IPS2_Z10 = 4, + DMUB_IPS_DISABLE_DYNAMIC = 5, + DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF = 6, }; #define DMUB_IPS1_ALLOW_MASK 0x00000001 @@ -649,12 +657,13 @@ union dmub_fw_boot_options { uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/ uint32_t usb4_cm_version: 1; /**< 1 CM support */ uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */ - uint32_t usb4_dpia_bw_alloc_supported: 1; /* 1 if USB4 dpia BW allocation supported */ + uint32_t reserved0: 1; uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/ uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */ uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/ - uint32_t ips_disable: 2; /* options to disable ips support*/ - uint32_t reserved : 10; /**< reserved */ + uint32_t ips_disable: 3; /* options to disable ips support*/ + uint32_t ips_sequential_ono: 1; /**< 1 to enable sequential ONO IPS sequence */ + uint32_t reserved : 9; /**< reserved */ } bits; /**< boot bits */ uint32_t all; /**< 32-bit access to bits */ }; @@ -666,6 +675,123 @@ enum dmub_fw_boot_options_bit { }; //============================================================================== +//< DMUB_SHARED_STATE>========================================================== +//============================================================================== + +/** + * Shared firmware state between driver and firmware for lockless communication + * in situations where the inbox/outbox may be unavailable. + * + * Each structure *must* be at most 256-bytes in size. The layout allocation is + * described below: + * + * [Header (256 Bytes)][Feature 1 (256 Bytes)][Feature 2 (256 Bytes)]... + */ + +/** + * enum dmub_shared_state_feature_id - List of shared state features. + */ +enum dmub_shared_state_feature_id { + DMUB_SHARED_SHARE_FEATURE__INVALID = 0, + DMUB_SHARED_SHARE_FEATURE__IPS_FW = 1, + DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER = 2, + DMUB_SHARED_STATE_FEATURE__LAST, /* Total number of features. */ +}; + +/** + * struct dmub_shared_state_ips_fw - Firmware signals for IPS. + */ +union dmub_shared_state_ips_fw_signals { + struct { + uint32_t ips1_commit : 1; /**< 1 if in IPS1 */ + uint32_t ips2_commit : 1; /**< 1 if in IPS2 */ + uint32_t in_idle : 1; /**< 1 if DMCUB is in idle */ + uint32_t reserved_bits : 29; /**< Reversed */ + } bits; + uint32_t all; +}; + +/** + * struct dmub_shared_state_ips_signals - Firmware signals for IPS. + */ +union dmub_shared_state_ips_driver_signals { + struct { + uint32_t allow_pg : 1; /**< 1 if PG is allowed */ + uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */ + uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */ + uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */ + uint32_t reserved_bits : 28; /**< Reversed bits */ + } bits; + uint32_t all; +}; + +/** + * IPS FW Version + */ +#define DMUB_SHARED_STATE__IPS_FW_VERSION 1 + +/** + * struct dmub_shared_state_ips_fw - Firmware state for IPS. + */ +struct dmub_shared_state_ips_fw { + union dmub_shared_state_ips_fw_signals signals; /**< 4 bytes, IPS signal bits */ + uint32_t rcg_entry_count; /**< Entry counter for RCG */ + uint32_t rcg_exit_count; /**< Exit counter for RCG */ + uint32_t ips1_entry_count; /**< Entry counter for IPS1 */ + uint32_t ips1_exit_count; /**< Exit counter for IPS1 */ + uint32_t ips2_entry_count; /**< Entry counter for IPS2 */ + uint32_t ips2_exit_count; /**< Exit counter for IPS2 */ + uint32_t reserved[55]; /**< Reversed, to be updated when adding new fields. */ +}; /* 248-bytes, fixed */ + +/** + * IPS Driver Version + */ +#define DMUB_SHARED_STATE__IPS_DRIVER_VERSION 1 + +/** + * struct dmub_shared_state_ips_driver - Driver state for IPS. + */ +struct dmub_shared_state_ips_driver { + union dmub_shared_state_ips_driver_signals signals; /**< 4 bytes, IPS signal bits */ + uint32_t reserved[61]; /**< Reversed, to be updated when adding new fields. */ +}; /* 248-bytes, fixed */ + +/** + * enum dmub_shared_state_feature_common - Generic payload. + */ +struct dmub_shared_state_feature_common { + uint32_t padding[62]; +}; /* 248-bytes, fixed */ + +/** + * enum dmub_shared_state_feature_header - Feature description. + */ +struct dmub_shared_state_feature_header { + uint16_t id; /**< Feature ID */ + uint16_t version; /**< Feature version */ + uint32_t reserved; /**< Reserved bytes. */ +}; /* 8 bytes, fixed */ + +/** + * struct dmub_shared_state_feature_block - Feature block. + */ +struct dmub_shared_state_feature_block { + struct dmub_shared_state_feature_header header; /**< Shared state header. */ + union dmub_shared_feature_state_union { + struct dmub_shared_state_feature_common common; /**< Generic data */ + struct dmub_shared_state_ips_fw ips_fw; /**< IPS firmware state */ + struct dmub_shared_state_ips_driver ips_driver; /**< IPS driver state */ + } data; /**< Shared state data. */ +}; /* 256-bytes, fixed */ + +/** + * Shared state size in bytes. + */ +#define DMUB_FW_HEADER_SHARED_STATE_SIZE \ + ((DMUB_SHARED_STATE_FEATURE__LAST + 1) * sizeof(struct dmub_shared_state_feature_block)) + +//============================================================================== //</DMUB_STATUS>================================================================ //============================================================================== //< DMUB_VBIOS>================================================================= @@ -703,6 +829,10 @@ enum dmub_cmd_vbios_type { */ DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT = 26, /** + * Control PHY FSM + */ + DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM = 29, + /** * Controls domain power gating */ DMUB_CMD__VBIOS_DOMAIN_CONTROL = 28, @@ -813,18 +943,61 @@ enum dmub_gpint_command { * RETURN: Lower 32-bit mask. */ DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK = 101, + /** - * DESC: Updates the trace buffer lower 32-bit mask. + * DESC: Updates the trace buffer mask bit0~bit15. * ARGS: The new mask * RETURN: Lower 32-bit mask. */ DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD0 = 102, + /** - * DESC: Updates the trace buffer mask bi0~bit15. + * DESC: Updates the trace buffer mask bit16~bit31. * ARGS: The new mask * RETURN: Lower 32-bit mask. */ DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1 = 103, + + /** + * DESC: Updates the trace buffer mask bit32~bit47. + * ARGS: The new mask + * RETURN: Lower 32-bit mask. + */ + DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD2 = 114, + + /** + * DESC: Updates the trace buffer mask bit48~bit63. + * ARGS: The new mask + * RETURN: Lower 32-bit mask. + */ + DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD3 = 115, + + /** + * DESC: Read the trace buffer mask bi0~bit15. + */ + DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD0 = 116, + + /** + * DESC: Read the trace buffer mask bit16~bit31. + */ + DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD1 = 117, + + /** + * DESC: Read the trace buffer mask bi32~bit47. + */ + DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD2 = 118, + + /** + * DESC: Updates the trace buffer mask bit32~bit63. + */ + DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD3 = 119, + + /** + * DESC: Enable measurements for various task duration + * ARGS: 0 - Disable measurement + * 1 - Enable measurement + */ + DMUB_GPINT__TRACE_DMUB_WAKE_ACTIVITY = 123, }; /** @@ -1034,6 +1207,11 @@ enum dmub_cmd_type { */ DMUB_CMD__DPIA_HPD_INT_ENABLE = 86, + /** + * Command type used for all PSP commands. + */ + DMUB_CMD__PSP = 88, + DMUB_CMD__VBIOS = 128, }; @@ -1223,11 +1401,11 @@ struct dmub_cmd_PLAT_54186_wa { uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; /**< reg value */ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; /**< reg value */ struct { - uint8_t hubp_inst : 4; /**< HUBP instance */ - uint8_t tmz_surface : 1; /**< TMZ enable or disable */ - uint8_t immediate :1; /**< Immediate flip */ - uint8_t vmid : 4; /**< VMID */ - uint8_t grph_stereo : 1; /**< 1 if stereo */ + uint32_t hubp_inst : 4; /**< HUBP instance */ + uint32_t tmz_surface : 1; /**< TMZ enable or disable */ + uint32_t immediate :1; /**< Immediate flip */ + uint32_t vmid : 4; /**< VMID */ + uint32_t grph_stereo : 1; /**< 1 if stereo */ uint32_t reserved : 21; /**< Reserved */ } flip_params; /**< Pageflip parameters */ uint32_t reserved[9]; /**< Reserved bits */ @@ -1298,6 +1476,10 @@ enum dmub_cmd_cab_type { * Fit surfaces in CAB (i.e. CAB enable) */ DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB = 2, + /** + * Do not fit surfaces in CAB (i.e. no CAB) + */ + DMUB_CMD__CAB_DCN_SS_NOT_FIT_IN_CAB = 3, }; /** @@ -1432,7 +1614,7 @@ struct dmub_rb_cmd_idle_opt_dcn_restore { */ struct dmub_dcn_notify_idle_cntl_data { uint8_t driver_idle; - uint8_t pad[1]; + uint8_t reserved[59]; }; /** @@ -2098,7 +2280,7 @@ enum psr_version { /** * PSR not supported. */ - PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF, + PSR_VERSION_UNSUPPORTED = 0xFF, // psr_version field is only 8 bits wide }; /** @@ -2153,6 +2335,11 @@ enum phy_link_rate { * UHBR10 - 20.0 Gbps/Lane */ PHY_RATE_2000 = 11, + + PHY_RATE_675 = 12, + /** + * Rate 12 - 6.75 Gbps/Lane + */ }; /** @@ -2171,6 +2358,7 @@ enum dmub_phy_fsm_state { DMUB_PHY_FSM_POWER_DOWN, DMUB_PHY_FSM_PLL_EN, DMUB_PHY_FSM_TX_EN, + DMUB_PHY_FSM_TX_EN_TEST_MODE, DMUB_PHY_FSM_FAST_LP, DMUB_PHY_FSM_P2_PLL_OFF_CPM, DMUB_PHY_FSM_P2_PLL_OFF_PG, @@ -2775,17 +2963,49 @@ struct dmub_rb_cmd_psr_set_power_opt { struct dmub_cmd_psr_set_power_opt_data psr_set_power_opt_data; }; +/** + * Definition of Replay Residency GPINT command. + * Bit[0] - Residency mode for Revision 0 + * Bit[1] - Enable/Disable state + * Bit[2-3] - Revision number + * Bit[4-7] - Residency mode for Revision 1 + * Bit[8] - Panel instance + * Bit[9-15] - Reserved + */ + +enum pr_residency_mode { + PR_RESIDENCY_MODE_PHY = 0x0, + PR_RESIDENCY_MODE_ALPM, + PR_RESIDENCY_MODE_IPS2, + PR_RESIDENCY_MODE_FRAME_CNT, + PR_RESIDENCY_MODE_ENABLEMENT_PERIOD, +}; + #define REPLAY_RESIDENCY_MODE_SHIFT (0) #define REPLAY_RESIDENCY_ENABLE_SHIFT (1) +#define REPLAY_RESIDENCY_REVISION_SHIFT (2) +#define REPLAY_RESIDENCY_MODE2_SHIFT (4) #define REPLAY_RESIDENCY_MODE_MASK (0x1 << REPLAY_RESIDENCY_MODE_SHIFT) -# define REPLAY_RESIDENCY_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT) -# define REPLAY_RESIDENCY_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT) +# define REPLAY_RESIDENCY_FIELD_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT) +# define REPLAY_RESIDENCY_FIELD_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT) + +#define REPLAY_RESIDENCY_MODE2_MASK (0xF << REPLAY_RESIDENCY_MODE2_SHIFT) +# define REPLAY_RESIDENCY_FIELD_MODE2_IPS (0x1 << REPLAY_RESIDENCY_MODE2_SHIFT) +# define REPLAY_RESIDENCY_FIELD_MODE2_FRAME_CNT (0x2 << REPLAY_RESIDENCY_MODE2_SHIFT) +# define REPLAY_RESIDENCY_FIELD_MODE2_EN_PERIOD (0x3 << REPLAY_RESIDENCY_MODE2_SHIFT) #define REPLAY_RESIDENCY_ENABLE_MASK (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT) # define REPLAY_RESIDENCY_DISABLE (0x0 << REPLAY_RESIDENCY_ENABLE_SHIFT) # define REPLAY_RESIDENCY_ENABLE (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT) +#define REPLAY_RESIDENCY_REVISION_MASK (0x3 << REPLAY_RESIDENCY_REVISION_SHIFT) +# define REPLAY_RESIDENCY_REVISION_0 (0x0 << REPLAY_RESIDENCY_REVISION_SHIFT) +# define REPLAY_RESIDENCY_REVISION_1 (0x1 << REPLAY_RESIDENCY_REVISION_SHIFT) + +/** + * Definition of a replay_state. + */ enum replay_state { REPLAY_STATE_0 = 0x0, REPLAY_STATE_1 = 0x10, @@ -2835,6 +3055,23 @@ enum dmub_cmd_replay_type { * Set power opt and coasting vtotal. */ DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL = 4, + /** + * Set disabled iiming sync. + */ + DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED = 5, + /** + * Set Residency Frameupdate Timer. + */ + DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER = 6, + /** + * Set pseudo vtotal + */ + DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL = 7, + /** + * Set adaptive sync sdp enabled + */ + DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP = 8, + }; /** @@ -2998,6 +3235,60 @@ struct dmub_cmd_replay_set_power_opt_data { }; /** + * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED command. + */ +struct dmub_cmd_replay_set_timing_sync_data { + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * REPLAY set_timing_sync + */ + uint8_t timing_sync_supported; + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad[2]; +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command. + */ +struct dmub_cmd_replay_set_pseudo_vtotal { + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Source Vtotal that Replay + IPS + ABM full screen video src vtotal + */ + uint16_t vtotal; + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad; +}; +struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data { + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * enabled: set adaptive sync sdp enabled + */ + uint8_t force_disabled; + + uint8_t pad[2]; +}; + +/** * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command. */ struct dmub_rb_cmd_replay_set_power_opt { @@ -3029,6 +3320,14 @@ struct dmub_cmd_replay_set_coasting_vtotal_data { * Currently the support is only for 0 or 1 */ uint8_t panel_inst; + /** + * 16-bit value dicated by driver that indicates the coasting vtotal high byte part. + */ + uint16_t coasting_vtotal_high; + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad[2]; }; /** @@ -3064,6 +3363,110 @@ struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal { }; /** + * Definition of a DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED command. + */ +struct dmub_rb_cmd_replay_set_timing_sync { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED command. + */ + struct dmub_cmd_replay_set_timing_sync_data replay_set_timing_sync_data; +}; + +/** + * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command. + */ +struct dmub_rb_cmd_replay_set_pseudo_vtotal { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command. + */ + struct dmub_cmd_replay_set_pseudo_vtotal data; +}; + +/** + * Definition of a DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command. + */ +struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command. + */ + struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data data; +}; + +/** + * Data passed from driver to FW in DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command. + */ +struct dmub_cmd_replay_frameupdate_timer_data { + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Replay Frameupdate Timer Enable or not + */ + uint8_t enable; + /** + * REPLAY force reflash frame update number + */ + uint16_t frameupdate_count; +}; +/** + * Definition of DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER + */ +struct dmub_rb_cmd_replay_set_frameupdate_timer { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command. + */ + struct dmub_cmd_replay_frameupdate_timer_data data; +}; + +/** + * Definition union of replay command set + */ +union dmub_replay_cmd_set { + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Definition of DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED command data. + */ + struct dmub_cmd_replay_set_timing_sync_data sync_data; + /** + * Definition of DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command data. + */ + struct dmub_cmd_replay_frameupdate_timer_data timer_data; + /** + * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command data. + */ + struct dmub_cmd_replay_set_pseudo_vtotal pseudo_vtotal_data; + /** + * Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command data. + */ + struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data disabled_adaptive_sync_sdp_data; + +}; + +/** * Set of HW components that can be locked. * * Note: If updating with more HW components, fields @@ -3143,7 +3546,7 @@ enum hw_lock_client { /** * Replay is the client of HW Lock Manager. */ - HW_LOCK_CLIENT_REPLAY = 4, + HW_LOCK_CLIENT_REPLAY = 4, /** * Invalid client. */ @@ -3352,6 +3755,16 @@ struct dmub_cmd_abm_set_pipe_data { * TODO: Remove. */ uint8_t ramping_boundary; + + /** + * PwrSeq HW Instance. + */ + uint8_t pwrseq_inst; + + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad[3]; }; /** @@ -3620,7 +4033,6 @@ struct dmub_cmd_abm_pause_data { uint8_t pad[1]; }; - /** * Definition of a DMUB_CMD__ABM_PAUSE command. */ @@ -3727,13 +4139,17 @@ enum dmub_cmd_panel_cntl_type { * Queries backlight info for the embedded panel. */ DMUB_CMD__PANEL_CNTL_QUERY_BACKLIGHT_INFO = 1, + /** + * Sets the PWM Freq as per user's requirement. + */ + DMUB_CMD__PANEL_DEBUG_PWM_FREQ = 2, }; /** * struct dmub_cmd_panel_cntl_data - Panel control data. */ struct dmub_cmd_panel_cntl_data { - uint32_t inst; /**< panel instance */ + uint32_t pwrseq_inst; /**< pwrseq instance */ uint32_t current_backlight; /* in/out */ uint32_t bl_pwm_cntl; /* in/out */ uint32_t bl_pwm_period_cntl; /* in/out */ @@ -3792,7 +4208,7 @@ struct dmub_cmd_lvtma_control_data { uint8_t uc_pwr_action; /**< LVTMA_ACTION */ uint8_t bypass_panel_control_wait; uint8_t reserved_0[2]; /**< For future use */ - uint8_t panel_inst; /**< LVTMA control instance */ + uint8_t pwrseq_inst; /**< LVTMA control instance */ uint8_t reserved_1[3]; /**< For future use */ }; @@ -3828,6 +4244,34 @@ struct dmub_rb_cmd_transmitter_query_dp_alt { struct dmub_rb_cmd_transmitter_query_dp_alt_data data; /**< payload */ }; +struct phy_test_mode { + uint8_t mode; + uint8_t pat0; + uint8_t pad[2]; +}; + +/** + * Data passed in/out in a DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM command. + */ +struct dmub_rb_cmd_transmitter_set_phy_fsm_data { + uint8_t phy_id; /**< 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4=UNIPHYE, 5=UNIPHYF */ + uint8_t mode; /**< HDMI/DP/DP2 etc */ + uint8_t lane_num; /**< Number of lanes */ + uint32_t symclk_100Hz; /**< PLL symclock in 100hz */ + struct phy_test_mode test_mode; + enum dmub_phy_fsm_state state; + uint32_t status; + uint8_t pad; +}; + +/** + * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM command. + */ +struct dmub_rb_cmd_transmitter_set_phy_fsm { + struct dmub_cmd_header header; /**< header */ + struct dmub_rb_cmd_transmitter_set_phy_fsm_data data; /**< payload */ +}; + /** * Maximum number of bytes a chunk sent to DMUB for parsing */ @@ -3950,6 +4394,65 @@ struct dmub_rb_cmd_secure_display { }; /** + * Command type of a DMUB_CMD__PSP command + */ +enum dmub_cmd_psp_type { + DMUB_CMD__PSP_ASSR_ENABLE = 0 +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__PSP_ASSR_ENABLE command. + */ +struct dmub_cmd_assr_enable_data { + /** + * ASSR enable or disable. + */ + uint8_t enable; + /** + * PHY port type. + * Indicates eDP / non-eDP port type + */ + uint8_t phy_port_type; + /** + * PHY port ID. + */ + uint8_t phy_port_id; + /** + * Link encoder index. + */ + uint8_t link_enc_index; + /** + * HPO mode. + */ + uint8_t hpo_mode; + + /** + * Reserved field. + */ + uint8_t reserved[7]; +}; + +/** + * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command. + */ +struct dmub_rb_cmd_assr_enable { + /** + * Command header. + */ + struct dmub_cmd_header header; + + /** + * Assr data. + */ + struct dmub_cmd_assr_enable_data assr_data; + + /** + * Reserved field. + */ + uint32_t reserved[3]; +}; + +/** * union dmub_rb_cmd - DMUB inbox command. */ union dmub_rb_cmd { @@ -4046,6 +4549,7 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__MALL command. */ struct dmub_rb_cmd_mall mall; + /** * Definition of a DMUB_CMD__CAB command. */ @@ -4067,6 +4571,7 @@ union dmub_rb_cmd { * Definition of DMUB_CMD__PANEL_CNTL commands. */ struct dmub_rb_cmd_panel_cntl panel_cntl; + /** * Definition of a DMUB_CMD__ABM_SET_PIPE command. */ @@ -4138,6 +4643,10 @@ union dmub_rb_cmd { */ struct dmub_rb_cmd_transmitter_query_dp_alt query_dp_alt; /** + * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM command. + */ + struct dmub_rb_cmd_transmitter_set_phy_fsm set_phy_fsm; + /** * Definition of a DMUB_CMD__DPIA_DIG1_CONTROL command. */ struct dmub_rb_cmd_dig1_dpia_control dig1_dpia_control; @@ -4195,6 +4704,25 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL command. */ struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal replay_set_power_opt_and_coasting_vtotal; + + struct dmub_rb_cmd_replay_set_timing_sync replay_set_timing_sync; + /** + * Definition of a DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command. + */ + struct dmub_rb_cmd_replay_set_frameupdate_timer replay_set_frameupdate_timer; + /** + * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command. + */ + struct dmub_rb_cmd_replay_set_pseudo_vtotal replay_set_pseudo_vtotal; + /** + * Definition of a DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command. + */ + struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp replay_disabled_adaptive_sync_sdp; + /** + * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command. + */ + struct dmub_rb_cmd_assr_enable assr_enable; + }; /** @@ -4470,10 +4998,6 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb) uint64_t *data = (uint64_t *)((uint8_t *)(rb->base_address) + rptr); uint8_t i; - /* Don't remove this. - * The contents need to actually be read from the ring buffer - * for this function to be effective. - */ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++) (void)READ_ONCE(*data++); @@ -4522,5 +5046,4 @@ static inline void dmub_rb_get_return_data(struct dmub_rb *rb, //============================================================================== //</DMUB_RB>==================================================================== //============================================================================== - #endif /* _DMUB_CMD_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile index 08aaf84affaf..50a98448e2e8 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/Makefile +++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile @@ -25,6 +25,7 @@ DMUB += dmub_dcn30.o dmub_dcn301.o dmub_dcn302.o dmub_dcn303.o DMUB += dmub_dcn31.o dmub_dcn314.o dmub_dcn315.o dmub_dcn316.o DMUB += dmub_dcn32.o DMUB += dmub_dcn35.o +DMUB += dmub_dcn351.o AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB)) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index 98dad0d47e72..e500ca9ae09c 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -191,7 +191,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6) + const struct dmub_window *cw6, + const struct dmub_window *region6) { union dmub_addr offset; uint64_t fb_base, fb_offset; @@ -471,4 +472,5 @@ void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); diag_data->is_cw6_enabled = is_cw6_enabled; + diag_data->timeout_info = dmub->debug; } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h index 1df128e57ed3..de287b101848 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h @@ -197,7 +197,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6); + const struct dmub_window *cw6, + const struct dmub_window *region6); void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, const struct dmub_region *inbox1); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c index 81dae75e9ff8..a4abe951c838 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c @@ -124,7 +124,8 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6) + const struct dmub_window *cw6, + const struct dmub_window *region6) { union dmub_addr offset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h index 9a3afffd9b0f..066f35a50094 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h @@ -43,7 +43,8 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6); + const struct dmub_window *cw6, + const struct dmub_window *region6); #endif /* _DMUB_DCN30_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 094e9f864557..662c34e9495c 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -187,7 +187,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6) + const struct dmub_window *cw6, + const struct dmub_window *region6) { union dmub_addr offset; @@ -465,6 +466,7 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); diag_data->is_cw6_enabled = is_cw6_enabled; + diag_data->timeout_info = dmub->debug; } bool dmub_dcn31_should_detect(struct dmub_srv *dmub) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h index 4d520a893c7b..eccdab4986ce 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h @@ -199,7 +199,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6); + const struct dmub_window *cw6, + const struct dmub_window *region6); void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub, const struct dmub_region *inbox1); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c index 2daa1e0c8061..e1da270502cc 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c @@ -32,8 +32,6 @@ #include "dcn/dcn_3_2_0_offset.h" #include "dcn/dcn_3_2_0_sh_mask.h" -#define DCN_BASE__INST0_SEG2 0x000034C0 - #define BASE_INNER(seg) ctx->dcn_reg_offsets[seg] #define CTX dmub #define REGS dmub->regs_dcn32 @@ -218,7 +216,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6) + const struct dmub_window *cw6, + const struct dmub_window *region6) { union dmub_addr offset; @@ -479,6 +478,8 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti diag_data->is_cw6_enabled = is_cw6_enabled; diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); + + diag_data->timeout_info = dmub->debug; } void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub) { diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h index b0cd8d29402f..29c1132951af 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h @@ -206,7 +206,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6); + const struct dmub_window *cw6, + const struct dmub_window *region6); void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub, const struct dmub_region *inbox1); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 6d1fbea0f6ba..70e63aeb8f89 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -229,7 +229,8 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6) + const struct dmub_window *cw6, + const struct dmub_window *region6) { union dmub_addr offset; @@ -275,6 +276,15 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub, REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0, DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top, DMCUB_REGION3_CW6_ENABLE, 1); + + offset = region6->offset; + + REG_WRITE(DMCUB_REGION6_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION6_OFFSET_HIGH, offset.u.high_part); + REG_SET_2(DMCUB_REGION6_TOP_ADDRESS, 0, + DMCUB_REGION6_TOP_ADDRESS, + region6->region.top - region6->region.base - 1, + DMCUB_REGION6_ENABLE, 1); } void dmub_dcn35_setup_mailbox(struct dmub_srv *dmub, @@ -410,6 +420,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu boot_options.bits.disable_clk_ds = params->disallow_dispclk_dppclk_ds; boot_options.bits.disable_clk_gate = params->disable_clock_gate; boot_options.bits.ips_disable = params->disable_ips; + boot_options.bits.ips_sequential_ono = params->ips_sequential_ono; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } @@ -506,6 +517,7 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti diag_data->is_cw6_enabled = is_cw6_enabled; diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); + diag_data->timeout_info = dmub->debug; } void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub) { @@ -545,8 +557,14 @@ uint32_t dmub_dcn35_read_inbox0_ack_register(struct dmub_srv *dmub) bool dmub_dcn35_is_hw_powered_up(struct dmub_srv *dmub) { union dmub_fw_boot_status status; + uint32_t is_enable; + + REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enable); + if (is_enable == 0) + return false; status.all = REG_READ(DMCUB_SCRATCH0); - return status.bits.hw_power_init_done; + return (status.bits.dal_fw && status.bits.hw_power_init_done && status.bits.mailbox_rdy) || + (!status.bits.dal_fw && status.bits.mailbox_rdy); } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h index 129a7031d2ae..686e97c00ccc 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h @@ -89,6 +89,9 @@ struct dmub_srv; DMUB_SR(DMCUB_REGION5_OFFSET) \ DMUB_SR(DMCUB_REGION5_OFFSET_HIGH) \ DMUB_SR(DMCUB_REGION5_TOP_ADDRESS) \ + DMUB_SR(DMCUB_REGION6_OFFSET) \ + DMUB_SR(DMCUB_REGION6_OFFSET_HIGH) \ + DMUB_SR(DMCUB_REGION6_TOP_ADDRESS) \ DMUB_SR(DMCUB_SCRATCH0) \ DMUB_SR(DMCUB_SCRATCH1) \ DMUB_SR(DMCUB_SCRATCH2) \ @@ -154,6 +157,8 @@ struct dmub_srv; DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \ DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_TOP_ADDRESS) \ DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_ENABLE) \ + DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_TOP_ADDRESS) \ + DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_ENABLE) \ DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \ DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \ DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \ @@ -214,7 +219,8 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw3, const struct dmub_window *cw4, const struct dmub_window *cw5, - const struct dmub_window *cw6); + const struct dmub_window *cw6, + const struct dmub_window *region6); void dmub_dcn35_setup_mailbox(struct dmub_srv *dmub, const struct dmub_region *inbox1); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.c new file mode 100644 index 000000000000..8f40b9f6706c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.c @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright 2024 Advanced Micro Devices, Inc. */ + +#include "../dmub_srv.h" +#include "dmub_reg.h" +#include "dmub_dcn351.h" + +#include "dcn/dcn_3_5_1_offset.h" +#include "dcn/dcn_3_5_1_sh_mask.h" + +#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg] +#define CTX dmub +#define REGS dmub->regs_dcn35 +#define REG_OFFSET_EXP(reg_name) BASE(reg##reg_name##_BASE_IDX) + reg##reg_name + +void dmub_srv_dcn351_regs_init(struct dmub_srv *dmub, struct dc_context *ctx) +{ + struct dmub_srv_dcn35_regs *regs = dmub->regs_dcn35; +#define REG_STRUCT regs + +#define DMUB_SR(reg) REG_STRUCT->offset.reg = REG_OFFSET_EXP(reg); + DMUB_DCN35_REGS() + DMCUB_INTERNAL_REGS() +#undef DMUB_SR + +#define DMUB_SF(reg, field) REG_STRUCT->mask.reg##__##field = FD_MASK(reg, field); + DMUB_DCN35_FIELDS() +#undef DMUB_SF + +#define DMUB_SF(reg, field) REG_STRUCT->shift.reg##__##field = FD_SHIFT(reg, field); + DMUB_DCN35_FIELDS() +#undef DMUB_SF +#undef REG_STRUCT +} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.h new file mode 100644 index 000000000000..4121fa1b301d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright 2024 Advanced Micro Devices, Inc. */ + +#ifndef _DMUB_DCN351_H_ +#define _DMUB_DCN351_H_ + +#include "dmub_dcn35.h" + +struct dmub_srv; + +void dmub_srv_dcn351_regs_init(struct dmub_srv *dmub, struct dc_context *ctx); + +#endif /* _DMUB_DCN351_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index e43e8d4bfe37..90e878195d95 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -37,6 +37,7 @@ #include "dmub_dcn316.h" #include "dmub_dcn32.h" #include "dmub_dcn35.h" +#include "dmub_dcn351.h" #include "os_types.h" /* * Note: the DMUB service is standalone. No additional headers should be @@ -64,7 +65,7 @@ /* Default scratch mem size. */ -#define DMUB_SCRATCH_MEM_SIZE (256) +#define DMUB_SCRATCH_MEM_SIZE (1024) /* Number of windows in use. */ #define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL) @@ -78,6 +79,7 @@ #define DMUB_CW6_BASE (0x66000000) #define DMUB_REGION5_BASE (0xA0000000) +#define DMUB_REGION6_BASE (0xC0000000) static struct dmub_srv_dcn32_regs dmub_srv_dcn32_regs; static struct dmub_srv_dcn35_regs dmub_srv_dcn35_regs; @@ -314,6 +316,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) break; case DMUB_ASIC_DCN35: + case DMUB_ASIC_DCN351: dmub->regs_dcn35 = &dmub_srv_dcn35_regs; funcs->configure_dmub_in_system_memory = dmub_dcn35_configure_dmub_in_system_memory; funcs->send_inbox0_cmd = dmub_dcn35_send_inbox0_cmd; @@ -350,6 +353,8 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->get_diagnostic_data = dmub_dcn35_get_diagnostic_data; funcs->init_reg_offsets = dmub_srv_dcn35_regs_init; + if (asic == DMUB_ASIC_DCN351) + funcs->init_reg_offsets = dmub_srv_dcn351_regs_init; funcs->is_hw_powered_up = dmub_dcn35_is_hw_powered_up; funcs->should_detect = dmub_dcn35_should_detect; @@ -417,51 +422,44 @@ void dmub_srv_destroy(struct dmub_srv *dmub) dmub_memset(dmub, 0, sizeof(*dmub)); } +static uint32_t dmub_srv_calc_regions_for_memory_type(const struct dmub_srv_region_params *params, + struct dmub_srv_region_info *out, + const uint32_t *window_sizes, + enum dmub_window_memory_type memory_type) +{ + uint32_t i, top = 0; + + for (i = 0; i < DMUB_WINDOW_TOTAL; ++i) { + if (params->window_memory_type[i] == memory_type) { + struct dmub_region *region = &out->regions[i]; + + region->base = dmub_align(top, 256); + region->top = region->base + dmub_align(window_sizes[i], 64); + top = region->top; + } + } + + return dmub_align(top, 4096); +} + enum dmub_status -dmub_srv_calc_region_info(struct dmub_srv *dmub, - const struct dmub_srv_region_params *params, - struct dmub_srv_region_info *out) + dmub_srv_calc_region_info(struct dmub_srv *dmub, + const struct dmub_srv_region_params *params, + struct dmub_srv_region_info *out) { - struct dmub_region *inst = &out->regions[DMUB_WINDOW_0_INST_CONST]; - struct dmub_region *stack = &out->regions[DMUB_WINDOW_1_STACK]; - struct dmub_region *data = &out->regions[DMUB_WINDOW_2_BSS_DATA]; - struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS]; - struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX]; - struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF]; - struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE]; - struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM]; const struct dmub_fw_meta_info *fw_info; uint32_t fw_state_size = DMUB_FW_STATE_SIZE; uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE; - uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE; + uint32_t window_sizes[DMUB_WINDOW_TOTAL] = { 0 }; if (!dmub->sw_init) return DMUB_STATUS_INVALID; memset(out, 0, sizeof(*out)); + memset(window_sizes, 0, sizeof(window_sizes)); out->num_regions = DMUB_NUM_WINDOWS; - inst->base = 0x0; - inst->top = inst->base + params->inst_const_size; - - data->base = dmub_align(inst->top, 256); - data->top = data->base + params->bss_data_size; - - /* - * All cache windows below should be aligned to the size - * of the DMCUB cache line, 64 bytes. - */ - - stack->base = dmub_align(data->top, 256); - stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE; - - bios->base = dmub_align(stack->top, 256); - bios->top = bios->base + params->vbios_size; - - mail->base = dmub_align(bios->top, 256); - mail->top = mail->base + DMUB_MAILBOX_SIZE; - fw_info = dmub_get_fw_meta_info(params); if (fw_info) { @@ -479,26 +477,29 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub, dmub->fw_version = fw_info->fw_version; } - trace_buff->base = dmub_align(mail->top, 256); - trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64); + window_sizes[DMUB_WINDOW_0_INST_CONST] = params->inst_const_size; + window_sizes[DMUB_WINDOW_1_STACK] = DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE; + window_sizes[DMUB_WINDOW_2_BSS_DATA] = params->bss_data_size; + window_sizes[DMUB_WINDOW_3_VBIOS] = params->vbios_size; + window_sizes[DMUB_WINDOW_4_MAILBOX] = DMUB_MAILBOX_SIZE; + window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size; + window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size; + window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE; + window_sizes[DMUB_WINDOW_SHARED_STATE] = DMUB_FW_HEADER_SHARED_STATE_SIZE; - fw_state->base = dmub_align(trace_buff->top, 256); - fw_state->top = fw_state->base + dmub_align(fw_state_size, 64); + out->fb_size = + dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB); - scratch_mem->base = dmub_align(fw_state->top, 256); - scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64); - - out->fb_size = dmub_align(scratch_mem->top, 4096); + out->gart_size = + dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_GART); return DMUB_STATUS_OK; } -enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, - const struct dmub_srv_fb_params *params, +enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub, + const struct dmub_srv_memory_params *params, struct dmub_srv_fb_info *out) { - uint8_t *cpu_base; - uint64_t gpu_base; uint32_t i; if (!dmub->sw_init) @@ -509,15 +510,18 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, if (params->region_info->num_regions != DMUB_NUM_WINDOWS) return DMUB_STATUS_INVALID; - cpu_base = (uint8_t *)params->cpu_addr; - gpu_base = params->gpu_addr; - for (i = 0; i < DMUB_NUM_WINDOWS; ++i) { const struct dmub_region *reg = ¶ms->region_info->regions[i]; - out->fb[i].cpu_addr = cpu_base + reg->base; - out->fb[i].gpu_addr = gpu_base + reg->base; + if (params->window_memory_type[i] == DMUB_WINDOW_MEMORY_TYPE_GART) { + out->fb[i].cpu_addr = (uint8_t *)params->cpu_gart_addr + reg->base; + out->fb[i].gpu_addr = params->gpu_gart_addr + reg->base; + } else { + out->fb[i].cpu_addr = (uint8_t *)params->cpu_fb_addr + reg->base; + out->fb[i].gpu_addr = params->gpu_fb_addr + reg->base; + } + out->fb[i].size = reg->top - reg->base; } @@ -567,9 +571,10 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; + struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE]; struct dmub_rb_init_params rb_params, outbox0_rb_params; - struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6; + struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6, region6; struct dmub_region inbox1, outbox1, outbox0; if (!dmub->sw_init) @@ -654,10 +659,16 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, dmub->fw_state = fw_state_fb->cpu_addr; + region6.offset.quad_part = shared_state_fb->gpu_addr; + region6.region.base = DMUB_CW6_BASE; + region6.region.top = region6.region.base + shared_state_fb->size; + + dmub->shared_state = shared_state_fb->cpu_addr; + dmub->scratch_mem_fb = *scratch_mem_fb; if (dmub->hw_funcs.setup_windows) - dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6); + dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, ®ion6); if (dmub->hw_funcs.setup_outbox0) dmub->hw_funcs.setup_outbox0(dmub, &outbox0); @@ -697,6 +708,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, dmub->hw_funcs.reset_release(dmub); dmub->hw_init = true; + dmub->power_state = DMUB_POWER_STATE_D0; return DMUB_STATUS_OK; } @@ -707,9 +719,16 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub) return DMUB_STATUS_INVALID; if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) { - dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); - dmub->inbox1_rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub); - dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt; + uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); + uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub); + + if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) { + return DMUB_STATUS_HW_FAILURE; + } else { + dmub->inbox1_rb.rptr = rptr; + dmub->inbox1_rb.wrpt = wptr; + dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt; + } } return DMUB_STATUS_OK; @@ -743,6 +762,14 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, if (!dmub->hw_init) return DMUB_STATUS_INVALID; + if (dmub->power_state != DMUB_POWER_STATE_D0) + return DMUB_STATUS_POWER_STATE_D3; + + if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity || + dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) { + return DMUB_STATUS_HW_FAILURE; + } + if (dmub_rb_push_front(&dmub->inbox1_rb, cmd)) return DMUB_STATUS_OK; @@ -756,6 +783,9 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) if (!dmub->hw_init) return DMUB_STATUS_INVALID; + if (dmub->power_state != DMUB_POWER_STATE_D0) + return DMUB_STATUS_POWER_STATE_D3; + /** * Read back all the queued commands to ensure that they've * been flushed to framebuffer memory. Otherwise DMCUB might @@ -777,8 +807,10 @@ bool dmub_srv_is_hw_pwr_up(struct dmub_srv *dmub) if (!dmub->hw_funcs.is_hw_powered_up) return true; - return dmub->hw_funcs.is_hw_powered_up(dmub) && - dmub->hw_funcs.is_hw_init(dmub); + if (!dmub->hw_funcs.is_hw_powered_up(dmub)) + return false; + + return true; } enum dmub_status dmub_srv_wait_for_hw_pwr_up(struct dmub_srv *dmub, @@ -1049,6 +1081,7 @@ enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t ti ack = dmub->hw_funcs.read_inbox0_ack_register(dmub); if (ack) return DMUB_STATUS_OK; + udelay(1); } return DMUB_STATUS_TIMEOUT; } @@ -1071,3 +1104,11 @@ void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_ subvp_index); } } + +void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state) +{ + if (!dmub || !dmub->hw_init) + return; + + dmub->power_state = dmub_srv_power_state; +} diff --git a/drivers/gpu/drm/amd/display/include/audio_types.h b/drivers/gpu/drm/amd/display/include/audio_types.h index 66a54da0641c..e4a26143f14c 100644 --- a/drivers/gpu/drm/amd/display/include/audio_types.h +++ b/drivers/gpu/drm/amd/display/include/audio_types.h @@ -27,11 +27,21 @@ #define __AUDIO_TYPES_H__ #include "signal_types.h" +#include "fixed31_32.h" +#include "dc_dp_types.h" #define AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 20 #define MAX_HW_AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 18 #define MULTI_CHANNEL_SPLIT_NO_ASSO_INFO 0xFFFFFFFF +struct audio_dp_link_info { + uint32_t link_bandwidth_kbps; + uint32_t hblank_min_symbol_width; + enum dp_link_encoding encoding; + enum dc_link_rate link_rate; + enum dc_lane_count lane_count; + bool is_mst; +}; struct audio_crtc_info { uint32_t h_total; @@ -42,7 +52,10 @@ struct audio_crtc_info { uint32_t calculated_pixel_clock_100Hz; /* in 100Hz */ uint32_t refresh_rate; enum dc_color_depth color_depth; + enum dc_pixel_encoding pixel_encoding; bool interlaced; + uint32_t dsc_bits_per_pixel; + uint32_t dsc_num_slices; }; struct azalia_clock_info { uint32_t pixel_clock_in_10khz; @@ -64,7 +77,7 @@ enum audio_dto_source { /* PLL information required for AZALIA DTO calculation */ struct audio_pll_info { - uint32_t dp_dto_source_clock_in_khz; + uint32_t audio_dto_source_clock_in_khz; uint32_t feed_back_divider; enum audio_dto_source dto_source; bool ss_enabled; @@ -95,6 +108,8 @@ struct audio_output { enum signal_type signal; /* video timing */ struct audio_crtc_info crtc_info; + /* DP link info */ + struct audio_dp_link_info dp_link_info; /* PLL for audio */ struct audio_pll_info pll_info; }; diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index e317089cf6ee..c9ec46c6b4c6 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -250,11 +250,13 @@ enum { #define GC_11_0_0_A0 0x1 #define GC_11_0_2_A0 0x10 #define GC_11_0_3_A0 0x20 +#define GC_11_0_4_A0 0xC0 #define GC_11_UNKNOWN 0xFF #define ASICREV_IS_GC_11_0_0(eChipRev) (eChipRev < GC_11_0_2_A0) #define ASICREV_IS_GC_11_0_2(eChipRev) (eChipRev >= GC_11_0_2_A0 && eChipRev < GC_11_0_3_A0) #define ASICREV_IS_GC_11_0_3(eChipRev) (eChipRev >= GC_11_0_3_A0 && eChipRev < GC_11_UNKNOWN) +#define ASICREV_IS_GC_11_0_4(eChipRev) (eChipRev >= GC_11_0_4_A0 && eChipRev < GC_11_UNKNOWN) /* * ASIC chip ID diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h index 1c6f24cb1d2f..447768dec887 100644 --- a/drivers/gpu/drm/amd/display/include/dal_types.h +++ b/drivers/gpu/drm/amd/display/include/dal_types.h @@ -27,7 +27,6 @@ #define __DAL_TYPES_H__ #include "signal_types.h" -#include "dc_types.h" struct dal_logger; struct dc_bios; diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h index bc96d0211360..813463ffe15c 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h @@ -417,6 +417,8 @@ struct integrated_info { /* V2.1 */ struct edp_info edp1_info; struct edp_info edp2_info; + uint32_t gpuclk_ss_percentage; + uint32_t gpuclk_ss_type; }; /* diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h index c6bbd262f1ac..08ee0350b31f 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_id.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h @@ -226,8 +226,8 @@ enum dp_alt_mode { struct graphics_object_id { uint32_t id:8; - uint32_t enum_id:4; - uint32_t type:4; + enum object_enum_id enum_id; + enum object_type type; uint32_t reserved:16; /* for padding. total size should be u32 */ }; diff --git a/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h b/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h index 42229b4effdc..eced9ad91f1d 100644 --- a/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h +++ b/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h @@ -69,6 +69,11 @@ enum hdcp_message_id { HDCP_MESSAGE_ID_READ_RXSTATUS, HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE, + /* PS175 chip */ + + HDCP_MESSAGE_ID_WRITE_PS175_CMD, + HDCP_MESSAGE_ID_READ_PS175_RSP, + HDCP_MESSAGE_ID_MAX }; diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index 1b8ab20f1715..1867aac57cf2 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -73,7 +73,6 @@ struct link_training_settings { enum dc_pre_emphasis *pre_emphasis; enum dc_post_cursor2 *post_cursor2; bool should_set_fec_ready; - /* TODO - factor lane_settings out because it changes during LT */ union dc_dp_ffe_preset *ffe_preset; uint16_t cr_pattern_time; @@ -169,6 +168,15 @@ enum dp_test_pattern { DP_TEST_PATTERN_UNSUPPORTED }; +#define IS_DP_PHY_SQUARE_PATTERN(test_pattern)\ + (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern &&\ + test_pattern <= DP_TEST_PATTERN_SQUARE_END) + +#define IS_DP_PHY_PATTERN(test_pattern)\ + ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern &&\ + test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) ||\ + test_pattern == DP_TEST_PATTERN_VIDEO_MODE) + enum dp_test_pattern_color_space { DP_TEST_PATTERN_COLOR_SPACE_RGB, DP_TEST_PATTERN_COLOR_SPACE_YCBCR601, diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index f39e2785e618..83479951732a 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -64,6 +64,7 @@ #define DC_LOG_DWB(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__) #define DC_LOG_DP2(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__) #define DC_LOG_AUTO_DPM_TEST(...) pr_debug("[AutoDPMTest]: "__VA_ARGS__) +#define DC_LOG_IPS(...) pr_debug("[IPS]: "__VA_ARGS__) struct dc_log_buffer_ctx { char *buf; diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h index 1b14b17a79c7..a10d6b988aab 100644 --- a/drivers/gpu/drm/amd/display/include/signal_types.h +++ b/drivers/gpu/drm/amd/display/include/signal_types.h @@ -118,6 +118,19 @@ static inline bool dc_is_dvi_signal(enum signal_type signal) } } +static inline bool dc_is_tmds_signal(enum signal_type signal) +{ + switch (signal) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + return true; + break; + default: + return false; + } +} + static inline bool dc_is_dvi_single_link_signal(enum signal_type signal) { return (signal == SIGNAL_TYPE_DVI_SINGLE_LINK); diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 8b5c27857671..3699e633801d 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1059,7 +1059,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, struct fixed31_32 min_display; struct fixed31_32 max_content; struct fixed31_32 clip = dc_fixpt_one; - struct fixed31_32 output; + struct fixed31_32 output = dc_fixpt_zero; bool use_eetf = false; bool is_clipped = false; struct fixed31_32 sdr_white_level; diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index ccecddafeb05..d09627c15b9c 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -81,6 +81,7 @@ fail_alloc_context: void mod_freesync_destroy(struct mod_freesync *mod_freesync) { struct core_freesync *core_freesync = NULL; + if (mod_freesync == NULL) return; core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); @@ -157,13 +158,13 @@ static unsigned int calc_v_total_from_duration( if (duration_in_us > vrr->max_duration_in_us) duration_in_us = vrr->max_duration_in_us; - if (dc_is_hdmi_signal(stream->signal)) { + if (dc_is_hdmi_signal(stream->signal)) { // change for HDMI to comply with spec uint32_t h_total_up_scaled; h_total_up_scaled = stream->timing.h_total * 10000; v_total = div_u64((unsigned long long)duration_in_us * stream->timing.pix_clk_100hz + (h_total_up_scaled - 1), - h_total_up_scaled); + h_total_up_scaled); //ceiling for MMax and MMin for MVRR } else { v_total = div64_u64(div64_u64(((unsigned long long)( duration_in_us) * (stream->timing.pix_clk_100hz / 10)), @@ -278,9 +279,8 @@ static void apply_below_the_range(struct core_freesync *core_freesync, } } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) { /* Enter Below the Range */ - if (!in_out_vrr->btr.btr_active) { + if (!in_out_vrr->btr.btr_active) in_out_vrr->btr.btr_active = true; - } } /* BTR set to "not active" so disengage */ @@ -693,10 +693,12 @@ static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf, if (app_tf != TRANSFER_FUNC_UNKNOWN) { infopacket->valid = true; - if (app_tf != TRANSFER_FUNC_PQ2084) { + if (app_tf == TRANSFER_FUNC_PQ2084) + infopacket->sb[9] |= 0x20; // PB9 = [Bit 5 = PQ EOTF Active] + else { infopacket->sb[6] |= 0x08; // PB6 = [Bit 3 = Native Color Active] if (app_tf == TRANSFER_FUNC_GAMMA_22) - infopacket->sb[9] |= 0x04; // PB6 = [Bit 2 = Gamma 2.2 EOTF Active] + infopacket->sb[9] |= 0x04; // PB9 = [Bit 2 = Gamma 2.2 EOTF Active] } } } @@ -1055,7 +1057,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, in_out_vrr->fixed_refresh_in_uhz = 0; refresh_range = div_u64(in_out_vrr->max_refresh_in_uhz + 500000, 1000000) - -+ div_u64(in_out_vrr->min_refresh_in_uhz + 500000, 1000000); + div_u64(in_out_vrr->min_refresh_in_uhz + 500000, 1000000); in_out_vrr->supported = true; } @@ -1124,6 +1126,8 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, in_out_vrr->adjust.v_total_min = stream->timing.v_total; in_out_vrr->adjust.v_total_max = stream->timing.v_total; } + + in_out_vrr->adjust.allow_otg_v_count_halt = (in_config->state == VRR_STATE_ACTIVE_FIXED) ? true : false; } void mod_freesync_handle_preflip(struct mod_freesync *mod_freesync, diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index 1ddb4f5eac8e..182e7532dda8 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -63,6 +63,7 @@ static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp) static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp) { enum mod_hdcp_status status; + if (is_dp_hdcp(hdcp)) { status = (hdcp->auth.msg.hdcp1.bstatus & DP_BSTATUS_R0_PRIME_READY) ? @@ -131,9 +132,8 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp) static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) { /* Avoid device count == 0 to do authentication */ - if (0 == get_device_count(hdcp)) { + if (get_device_count(hdcp) == 0) return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE; - } /* Some MST display may choose to report the internal panel as an HDCP RX. * To update this condition with 1(because the immediate repeater's internal diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index 91c22b96ebde..c996365e84b0 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -151,7 +151,7 @@ out: static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp) { - enum mod_hdcp_status status; + enum mod_hdcp_status status = MOD_HDCP_STATUS_FAILURE; uint8_t size; uint16_t max_wait = 20; // units of ms uint16_t num_polls = 5; @@ -208,9 +208,8 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp) static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) { /* Avoid device count == 0 to do authentication */ - if (0 == get_device_count(hdcp)) { + if (get_device_count(hdcp) == 0) return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE; - } /* Some MST display may choose to report the internal panel as an HDCP RX. */ /* To update this condition with 1(because the immediate repeater's internal */ @@ -689,9 +688,8 @@ static enum mod_hdcp_status validate_stream_ready(struct mod_hdcp *hdcp, if (is_hdmi_dvi_sl_hdcp(hdcp)) { if (!process_rxstatus(hdcp, event_ctx, input, &status)) goto out; - if (event_ctx->rx_id_list_ready) { + if (event_ctx->rx_id_list_ready) goto out; - } } if (is_hdmi_dvi_sl_hdcp(hdcp)) if (!mod_hdcp_execute_and_set(check_stream_ready_available, diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c index f7b5583ee609..8e9caae7c955 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c @@ -156,6 +156,10 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp, uint32_t cur_size = 0; uint32_t data_offset = 0; + if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) { + return MOD_HDCP_STATUS_DDC_FAILURE; + } + if (is_dp_hdcp(hdcp)) { while (buf_len > 0) { cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE); @@ -215,6 +219,10 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp, uint32_t cur_size = 0; uint32_t data_offset = 0; + if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) { + return MOD_HDCP_STATUS_DDC_FAILURE; + } + if (is_dp_hdcp(hdcp)) { while (buf_len > 0) { cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE); diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h index c62df3bcc7cb..1d83c1b9da10 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h @@ -86,10 +86,12 @@ #define HDCP_CPIRQ_TRACE(hdcp) \ HDCP_LOG_FSM(hdcp, "[Link %d] --> CPIRQ", hdcp->config.index) #define HDCP_EVENT_TRACE(hdcp, event) \ - if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \ - HDCP_TIMEOUT_TRACE(hdcp); \ - else if (event == MOD_HDCP_EVENT_CPIRQ) \ - HDCP_CPIRQ_TRACE(hdcp) + do { \ + if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \ + HDCP_TIMEOUT_TRACE(hdcp); \ + else if (event == MOD_HDCP_EVENT_CPIRQ) \ + HDCP_CPIRQ_TRACE(hdcp); \ + } while (0) /* TODO: find some way to tell if logging is off to save time */ #define HDCP_DDC_READ_TRACE(hdcp, msg_name, msg, msg_size) do { \ mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index ee67a35c2a8e..7c9805705fd3 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -443,7 +443,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE) - continue; + continue; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -513,6 +513,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp) hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + if (!display) + return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index; if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0) @@ -926,7 +929,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE) - continue; + continue; hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index; hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h index 5b71bc96b98c..7844ea91650b 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h @@ -98,9 +98,9 @@ enum ta_dtm_encoder_type { * This enum defines software value for dio_output_type */ typedef enum { - TA_DTM_DIO_OUTPUT_TYPE__INVALID, - TA_DTM_DIO_OUTPUT_TYPE__DIRECT, - TA_DTM_DIO_OUTPUT_TYPE__DPIA + TA_DTM_DIO_OUTPUT_TYPE__INVALID, + TA_DTM_DIO_OUTPUT_TYPE__DIRECT, + TA_DTM_DIO_OUTPUT_TYPE__DPIA } ta_dtm_dio_output_type; struct ta_dtm_topology_update_input_v3 { @@ -237,11 +237,11 @@ enum ta_hdcp2_hdcp2_msg_id_max_size { #define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127 #define TA_HDCP__HDCP1_V_PRIME_SIZE 20 #define TA_HDCP__HDCP2_TX_BUF_MAX_SIZE \ - TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM + 6 + (TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM + 6) // 64 bits boundaries #define TA_HDCP__HDCP2_RX_BUF_MAX_SIZE \ - TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO + 4 + (TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO + 4) enum ta_hdcp_status { TA_HDCP_STATUS__SUCCESS = 0x00, diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index afe1f6cce528..cc3dc9b589f6 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -23,34 +23,6 @@ * */ - - - -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - #ifndef MOD_FREESYNC_H_ #define MOD_FREESYNC_H_ diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h index 5960dd760e91..8ce6c22e5d04 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h @@ -57,10 +57,10 @@ void mod_stats_update_event(struct mod_stats *mod_stats, unsigned int length); void mod_stats_update_flip(struct mod_stats *mod_stats, - unsigned long timestamp_in_ns); + unsigned long long timestamp_in_ns); void mod_stats_update_vupdate(struct mod_stats *mod_stats, - unsigned long timestamp_in_ns); + unsigned long long timestamp_in_ns); void mod_stats_update_freesync(struct mod_stats *mod_stats, unsigned int v_total_min, diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index 84f9b412a4f1..a344e2e49b0e 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -536,8 +536,6 @@ void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream, mod_build_adaptive_sync_infopacket_v2(stream, param, info_packet); break; case FREESYNC_TYPE_PCON_IN_WHITELIST: - mod_build_adaptive_sync_infopacket_v1(info_packet); - break; case ADAPTIVE_SYNC_TYPE_EDP: mod_build_adaptive_sync_infopacket_v1(info_packet); break; diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index a522a7c02911..2a3698fd2dc2 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -31,7 +31,7 @@ #define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b)) #define bswap16_based_on_endian(big_endian, value) \ - (big_endian) ? cpu_to_be16(value) : cpu_to_le16(value) + ((big_endian) ? cpu_to_be16(value) : cpu_to_le16(value)) /* Possible Min Reduction config from least aggressive to most aggressive * 0 1 2 3 4 5 6 7 8 9 10 11 12 @@ -839,6 +839,8 @@ bool is_psr_su_specific_panel(struct dc_link *link) ((dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x08) || (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x07))) isPSRSUSupported = false; + else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03) + isPSRSUSupported = false; else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1) isPSRSUSupported = true; } @@ -971,6 +973,39 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, return true; } +void set_replay_coasting_vtotal(struct dc_link *link, + enum replay_coasting_vtotal_type type, + uint32_t vtotal) +{ + link->replay_settings.coasting_vtotal_table[type] = vtotal; +} + +void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal) +{ + link->replay_settings.abm_with_ips_on_full_screen_video_pseudo_vtotal = vtotal; +} + +void calculate_replay_link_off_frame_count(struct dc_link *link, + uint16_t vtotal, uint16_t htotal) +{ + uint8_t max_link_off_frame_count = 0; + uint16_t max_deviation_line = 0, pixel_deviation_per_line = 0; + + max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line; + pixel_deviation_per_line = link->dpcd_caps.pr_info.pixel_deviation_per_line; + + if (htotal != 0 && vtotal != 0) + max_link_off_frame_count = htotal * max_deviation_line / (pixel_deviation_per_line * vtotal); + else + ASSERT(0); + + link->replay_settings.link_off_frame_count_level = + max_link_off_frame_count >= PR_LINK_OFF_FRAME_COUNT_BEST ? PR_LINK_OFF_FRAME_COUNT_BEST : + max_link_off_frame_count >= PR_LINK_OFF_FRAME_COUNT_GOOD ? PR_LINK_OFF_FRAME_COUNT_GOOD : + PR_LINK_OFF_FRAME_COUNT_FAIL; + +} + bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_backlight_caps *caps) { unsigned int data_points_size; diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index d9e0d67d67f7..ff7e6f3cd6be 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -54,6 +54,12 @@ bool dmub_init_abm_config(struct resource_pool *res_pool, unsigned int inst); void init_replay_config(struct dc_link *link, struct replay_config *pr_config); +void set_replay_coasting_vtotal(struct dc_link *link, + enum replay_coasting_vtotal_type type, + uint32_t vtotal); +void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal); +void calculate_replay_link_off_frame_count(struct dc_link *link, + uint16_t vtotal, uint16_t htotal); bool is_psr_su_specific_panel(struct dc_link *link); void mod_power_calc_psr_configs(struct psr_config *psr_config, |