diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display')
66 files changed, 2718 insertions, 2092 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index dd688cfed6aa..aa43bb253ea2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -76,6 +76,16 @@ #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); +/** + * DOC: overview + * + * The AMDgpu display manager, **amdgpu_dm** (or even simpler, + * **dm**) sits between DRM and DC. It acts as a liason, converting DRM + * requests into DC requests, and DC responses into DRM responses. + * + * The root control structure is &struct amdgpu_display_manager. + */ + /* basic init/fini API */ static int amdgpu_dm_init(struct amdgpu_device *adev); static void amdgpu_dm_fini(struct amdgpu_device *adev); @@ -95,7 +105,7 @@ static void amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector); static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, - struct amdgpu_plane *aplane, + struct drm_plane *plane, unsigned long possible_crtcs); static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, struct drm_plane *plane, @@ -379,11 +389,6 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector) } -/* - * Init display KMS - * - * Returns 0 on success - */ static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; @@ -429,6 +434,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->asic_type < CHIP_RAVEN) init_data.flags.gpu_vm_support = true; + if (amdgpu_dc_feature_mask & DC_FBC_MASK) + init_data.flags.fbc_support = true; + /* Display Core create. */ adev->dm.dc = dc_create(&init_data); @@ -660,6 +668,26 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) drm_modeset_unlock(&dev->mode_config.connection_mutex); } +/** + * dm_hw_init() - Initialize DC device + * @handle: The base driver device containing the amdpgu_dm device. + * + * Initialize the &struct amdgpu_display_manager device. This involves calling + * the initializers of each DM component, then populating the struct with them. + * + * Although the function implies hardware initialization, both hardware and + * software are initialized here. Splitting them out to their relevant init + * hooks is a future TODO item. + * + * Some notable things that are initialized here: + * + * - Display Core, both software and hardware + * - DC modules that we need (freesync and color management) + * - DRM software states + * - Interrupt sources and handlers + * - Vblank support + * - Debug FS entries, if enabled + */ static int dm_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -670,6 +698,14 @@ static int dm_hw_init(void *handle) return 0; } +/** + * dm_hw_fini() - Teardown DC device + * @handle: The base driver device containing the amdpgu_dm device. + * + * Teardown components within &struct amdgpu_display_manager that require + * cleanup. This involves cleaning up the DRM device, DC, and any modules that + * were loaded. Also flush IRQ workqueues and disable them. + */ static int dm_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -895,6 +931,16 @@ static int dm_resume(void *handle) return ret; } +/** + * DOC: DM Lifecycle + * + * DM (and consequently DC) is registered in the amdgpu base driver as a IP + * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to + * the base driver's device list to be initialized and torn down accordingly. + * + * The functions to do so are provided as hooks in &struct amd_ip_funcs. + */ + static const struct amd_ip_funcs amdgpu_dm_funcs = { .name = "dm", .early_init = dm_early_init, @@ -962,6 +1008,12 @@ dm_atomic_state_alloc_free(struct drm_atomic_state *state) kfree(dm_state); } +/** + * DOC: atomic + * + * *WIP* + */ + static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { .fb_create = amdgpu_display_user_framebuffer_create, .output_poll_changed = drm_fb_helper_output_poll_changed, @@ -1524,15 +1576,23 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) { struct amdgpu_display_manager *dm = bl_get_data(bd); + /* backlight_pwm_u16_16 parameter is in unsigned 32 bit, 16 bit integer + * and 16 bit fractional, where 1.0 is max backlight value. + * bd->props.brightness is 8 bit format and needs to be converted by + * scaling via copy lower byte to upper byte of 16 bit value. + */ + uint32_t brightness = bd->props.brightness * 0x101; + /* * PWM interperts 0 as 100% rather than 0% because of HW - * limitation for level 0.So limiting minimum brightness level + * limitation for level 0. So limiting minimum brightness level * to 1. */ if (bd->props.brightness < 1) - return 1; + brightness = 0x101; + if (dc_link_set_backlight_level(dm->backlight_link, - bd->props.brightness, 0, 0)) + brightness, 0, 0)) return 0; else return 1; @@ -1584,18 +1644,18 @@ static int initialize_plane(struct amdgpu_display_manager *dm, struct amdgpu_mode_info *mode_info, int plane_id) { - struct amdgpu_plane *plane; + struct drm_plane *plane; unsigned long possible_crtcs; int ret = 0; - plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL); + plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); mode_info->planes[plane_id] = plane; if (!plane) { DRM_ERROR("KMS: Failed to allocate plane\n"); return -ENOMEM; } - plane->base.type = mode_info->plane_type[plane_id]; + plane->type = mode_info->plane_type[plane_id]; /* * HACK: IGT tests expect that each plane can only have @@ -1686,7 +1746,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } for (i = 0; i < dm->dc->caps.max_streams; i++) - if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) { + if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { DRM_ERROR("KMS: Failed to initialize crtc\n"); goto fail; } @@ -2707,18 +2767,11 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, drm_connector = &aconnector->base; if (!aconnector->dc_sink) { - /* - * Create dc_sink when necessary to MST - * Don't apply fake_sink to MST - */ - if (aconnector->mst_port) { - dm_dp_mst_dc_sink_create(drm_connector); - return stream; + if (!aconnector->mst_port) { + sink = create_fake_sink(aconnector); + if (!sink) + return stream; } - - sink = create_fake_sink(aconnector); - if (!sink) - return stream; } else { sink = aconnector->dc_sink; } @@ -3307,7 +3360,7 @@ void dm_drm_plane_destroy_state(struct drm_plane *plane, static const struct drm_plane_funcs dm_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = drm_plane_cleanup, + .destroy = drm_primary_helper_destroy, .reset = dm_drm_plane_reset, .atomic_duplicate_state = dm_drm_plane_duplicate_state, .atomic_destroy_state = dm_drm_plane_destroy_state, @@ -3468,49 +3521,49 @@ static const u32 cursor_formats[] = { }; static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, - struct amdgpu_plane *aplane, + struct drm_plane *plane, unsigned long possible_crtcs) { int res = -EPERM; - switch (aplane->base.type) { + switch (plane->type) { case DRM_PLANE_TYPE_PRIMARY: res = drm_universal_plane_init( dm->adev->ddev, - &aplane->base, + plane, possible_crtcs, &dm_plane_funcs, rgb_formats, ARRAY_SIZE(rgb_formats), - NULL, aplane->base.type, NULL); + NULL, plane->type, NULL); break; case DRM_PLANE_TYPE_OVERLAY: res = drm_universal_plane_init( dm->adev->ddev, - &aplane->base, + plane, possible_crtcs, &dm_plane_funcs, yuv_formats, ARRAY_SIZE(yuv_formats), - NULL, aplane->base.type, NULL); + NULL, plane->type, NULL); break; case DRM_PLANE_TYPE_CURSOR: res = drm_universal_plane_init( dm->adev->ddev, - &aplane->base, + plane, possible_crtcs, &dm_plane_funcs, cursor_formats, ARRAY_SIZE(cursor_formats), - NULL, aplane->base.type, NULL); + NULL, plane->type, NULL); break; } - drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs); + drm_plane_helper_add(plane, &dm_plane_helper_funcs); /* Create (reset) the plane state */ - if (aplane->base.funcs->reset) - aplane->base.funcs->reset(&aplane->base); + if (plane->funcs->reset) + plane->funcs->reset(plane); return res; @@ -3521,7 +3574,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, uint32_t crtc_index) { struct amdgpu_crtc *acrtc = NULL; - struct amdgpu_plane *cursor_plane; + struct drm_plane *cursor_plane; int res = -ENOMEM; @@ -3529,7 +3582,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, if (!cursor_plane) goto fail; - cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR; + cursor_plane->type = DRM_PLANE_TYPE_CURSOR; res = amdgpu_dm_plane_init(dm, cursor_plane, 0); acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); @@ -3540,7 +3593,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, dm->ddev, &acrtc->base, plane, - &cursor_plane->base, + cursor_plane, &amdgpu_dm_crtc_funcs, NULL); if (res) @@ -3779,12 +3832,12 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, case DRM_MODE_CONNECTOR_HDMIA: aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; aconnector->base.ycbcr_420_allowed = - link->link_enc->features.ycbcr420_supported ? true : false; + link->link_enc->features.hdmi_ycbcr420_supported ? true : false; break; case DRM_MODE_CONNECTOR_DisplayPort: aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; aconnector->base.ycbcr_420_allowed = - link->link_enc->features.ycbcr420_supported ? true : false; + link->link_enc->features.dp_ycbcr420_supported ? true : false; break; case DRM_MODE_CONNECTOR_DVID: aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; @@ -4542,6 +4595,14 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev, /*TODO Handle EINTR, reenable IRQ*/ } +/** + * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. + * @state: The atomic state to commit + * + * This will tell DC to commit the constructed DC state from atomic_check, + * programming the hardware. Any failures here implies a hardware failure, since + * atomic check should have filtered anything non-kosher. + */ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; @@ -5313,6 +5374,12 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru struct dc_stream_update stream_update; enum surface_update_type update_type = UPDATE_TYPE_FAST; + if (!updates || !surface) { + DRM_ERROR("Plane or surface update failed to allocate"); + /* Set type to FULL to avoid crashing in DC*/ + update_type = UPDATE_TYPE_FULL; + goto ret; + } for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); @@ -5388,6 +5455,31 @@ ret: return update_type; } +/** + * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. + * @dev: The DRM device + * @state: The atomic state to commit + * + * Validate that the given atomic state is programmable by DC into hardware. + * This involves constructing a &struct dc_state reflecting the new hardware + * state we wish to commit, then querying DC to see if it is programmable. It's + * important not to modify the existing DC state. Otherwise, atomic_check + * may unexpectedly commit hardware changes. + * + * When validating the DC state, it's important that the right locks are + * acquired. For full updates case which removes/adds/updates streams on one + * CRTC while flipping on another CRTC, acquiring global lock will guarantee + * that any such full update commit will wait for completion of any outstanding + * flip using DRMs synchronization events. See + * dm_determine_update_type_for_commit() + * + * Note that DM adds the affected connectors for all CRTCs in state, when that + * might not seem necessary. This is because DC stream creation requires the + * DC sink, which is tied to the DRM connector state. Cleaning this up should + * be possible but non-trivial - a possible TODO item. + * + * Return: -Error code if validation failed. + */ static int amdgpu_dm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { @@ -5490,15 +5582,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, lock_and_validation_needed = true; } - /* - * For full updates case when - * removing/adding/updating streams on one CRTC while flipping - * on another CRTC, - * acquiring global lock will guarantee that any such full - * update commit - * will wait for completion of any outstanding flip using DRMs - * synchronization events. - */ update_type = dm_determine_update_type_for_commit(dc, state); if (overall_update_type < update_type) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 978b34a5011c..d6960644d714 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -59,49 +59,100 @@ struct common_irq_params { enum dc_irq_source irq_src; }; +/** + * struct irq_list_head - Linked-list for low context IRQ handlers. + * + * @head: The list_head within &struct handler_data + * @work: A work_struct containing the deferred handler work + */ struct irq_list_head { struct list_head head; /* In case this interrupt needs post-processing, 'work' will be queued*/ struct work_struct work; }; +/** + * struct dm_compressor_info - Buffer info used by frame buffer compression + * @cpu_addr: MMIO cpu addr + * @bo_ptr: Pointer to the buffer object + * @gpu_addr: MMIO gpu addr + */ struct dm_comressor_info { void *cpu_addr; struct amdgpu_bo *bo_ptr; uint64_t gpu_addr; }; +/** + * struct amdgpu_display_manager - Central amdgpu display manager device + * + * @dc: Display Core control structure + * @adev: AMDGPU base driver structure + * @ddev: DRM base driver structure + * @display_indexes_num: Max number of display streams supported + * @irq_handler_list_table_lock: Synchronizes access to IRQ tables + * @backlight_dev: Backlight control device + * @cached_state: Caches device atomic state for suspend/resume + * @compressor: Frame buffer compression buffer. See &struct dm_comressor_info + */ struct amdgpu_display_manager { + struct dc *dc; + + /** + * @cgs_device: + * + * The Common Graphics Services device. It provides an interface for + * accessing registers. + */ struct cgs_device *cgs_device; - struct amdgpu_device *adev; /*AMD base driver*/ - struct drm_device *ddev; /*DRM base driver*/ + struct amdgpu_device *adev; + struct drm_device *ddev; u16 display_indexes_num; - /* - * 'irq_source_handler_table' holds a list of handlers - * per (DAL) IRQ source. + /** + * @irq_handler_list_low_tab: + * + * Low priority IRQ handler table. * - * Each IRQ source may need to be handled at different contexts. - * By 'context' we mean, for example: - * - The ISR context, which is the direct interrupt handler. - * - The 'deferred' context - this is the post-processing of the - * interrupt, but at a lower priority. + * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ + * source. Low priority IRQ handlers are deferred to a workqueue to be + * processed. Hence, they can sleep. * * Note that handlers are called in the same order as they were * registered (FIFO). */ struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER]; + + /** + * @irq_handler_list_high_tab: + * + * High priority IRQ handler table. + * + * It is a n*m table, same as &irq_handler_list_low_tab. However, + * handlers in this table are not deferred and are called immediately. + */ struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER]; + /** + * @pflip_params: + * + * Page flip IRQ parameters, passed to registered handlers when + * triggered. + */ struct common_irq_params pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1]; + /** + * @vblank_params: + * + * Vertical blanking IRQ parameters, passed to registered handlers when + * triggered. + */ struct common_irq_params vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1]; - /* this spin lock synchronizes access to 'irq_handler_list_table' */ spinlock_t irq_handler_list_table_lock; struct backlight_device *backlight_dev; @@ -110,9 +161,6 @@ struct amdgpu_display_manager { struct mod_freesync *freesync_module; - /** - * Caches device atomic state for suspend/resume - */ struct drm_atomic_state *cached_state; struct dm_comressor_info compressor; @@ -160,8 +208,6 @@ struct amdgpu_dm_connector { struct mutex hpd_lock; bool fake_enable; - - bool mst_connected; }; #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index be19e6861189..216e48cec716 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -164,7 +164,7 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc) */ stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; ret = mod_color_calculate_regamma_params(stream->out_transfer_func, - gamma, true, adev->asic_type <= CHIP_RAVEN); + gamma, true, adev->asic_type <= CHIP_RAVEN, NULL); dc_gamma_release(&gamma); if (!ret) { stream->out_transfer_func->type = old_type; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 01fc5717b657..f088ac585978 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -75,6 +75,11 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) return -EINVAL; } + if (!stream_state) { + DRM_ERROR("No stream state for CRTC%d\n", crtc->index); + return -EINVAL; + } + /* When enabling CRC, we should also disable dithering. */ if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) { if (dc_stream_configure_crc(stream_state->ctx->dc, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index a212178f2edc..cd10f77cdeb0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -32,16 +32,55 @@ #include "amdgpu_dm.h" #include "amdgpu_dm_irq.h" +/** + * DOC: overview + * + * DM provides another layer of IRQ management on top of what the base driver + * already provides. This is something that could be cleaned up, and is a + * future TODO item. + * + * The base driver provides IRQ source registration with DRM, handler + * registration into the base driver's IRQ table, and a handler callback + * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic + * handler looks up the IRQ table, and calls the respective + * &amdgpu_irq_src_funcs.process hookups. + * + * What DM provides on top are two IRQ tables specifically for top-half and + * bottom-half IRQ handling, with the bottom-half implementing workqueues: + * + * - &amdgpu_display_manager.irq_handler_list_high_tab + * - &amdgpu_display_manager.irq_handler_list_low_tab + * + * They override the base driver's IRQ table, and the effect can be seen + * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They + * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up + * DM's IRQ tables. However, in order for base driver to recognize this hook, DM + * still needs to register the IRQ with the base driver. See + * dce110_register_irq_handlers() and dcn10_register_irq_handlers(). + * + * To expose DC's hardware interrupt toggle to the base driver, DM implements + * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through + * amdgpu_irq_update() to enable or disable the interrupt. + */ + /****************************************************************************** * Private declarations. *****************************************************************************/ +/** + * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers. + * + * @list: Linked list entry referencing the next/previous handler + * @handler: Handler function + * @handler_arg: Argument passed to the handler when triggered + * @dm: DM which this handler belongs to + * @irq_source: DC interrupt source that this handler is registered for + */ struct amdgpu_dm_irq_handler_data { struct list_head list; interrupt_handler handler; void *handler_arg; - /* DM which this handler belongs to */ struct amdgpu_display_manager *dm; /* DAL irq source which registered for this interrupt. */ enum dc_irq_source irq_source; @@ -68,7 +107,7 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd, } /** - * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper. + * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper. * * @work: work struct */ @@ -99,8 +138,8 @@ static void dm_irq_work_func(struct work_struct *work) * (The most common use is HPD interrupt) */ } -/** - * Remove a handler and return a pointer to hander list from which the +/* + * Remove a handler and return a pointer to handler list from which the * handler was removed. */ static struct list_head *remove_irq_handler(struct amdgpu_device *adev, @@ -203,6 +242,24 @@ static bool validate_irq_unregistration_params(enum dc_irq_source irq_source, * Note: caller is responsible for input validation. *****************************************************************************/ +/** + * amdgpu_dm_irq_register_interrupt() - Register a handler within DM. + * @adev: The base driver device containing the DM device. + * @int_params: Interrupt parameters containing the source, and handler context + * @ih: Function pointer to the interrupt handler to register + * @handler_args: Arguments passed to the handler when the interrupt occurs + * + * Register an interrupt handler for the given IRQ source, under the given + * context. The context can either be high or low. High context handlers are + * executed directly within ISR context, while low context is executed within a + * workqueue, thereby allowing operations that sleep. + * + * Registered handlers are called in a FIFO manner, i.e. the most recently + * registered handler will be called first. + * + * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ + * source, handler function, and args + */ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, struct dc_interrupt_params *int_params, void (*ih)(void *), @@ -261,6 +318,15 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, return handler_data; } +/** + * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table + * @adev: The base driver device containing the DM device + * @irq_source: IRQ source to remove the given handler from + * @ih: Function pointer to the interrupt handler to unregister + * + * Go through both low and high context IRQ tables, and find the given handler + * for the given irq source. If found, remove it. Otherwise, do nothing. + */ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, enum dc_irq_source irq_source, void *ih) @@ -295,6 +361,20 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, } } +/** + * amdgpu_dm_irq_init() - Initialize DM IRQ management + * @adev: The base driver device containing the DM device + * + * Initialize DM's high and low context IRQ tables. + * + * The N by M table contains N IRQ sources, with M + * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The + * list_heads are initialized here. When an interrupt n is triggered, all m + * handlers are called in sequence, FIFO according to registration order. + * + * The low context table requires special steps to initialize, since handlers + * will be deferred to a workqueue. See &struct irq_list_head. + */ int amdgpu_dm_irq_init(struct amdgpu_device *adev) { int src; @@ -317,7 +397,12 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev) return 0; } -/* DM IRQ and timer resource release */ +/** + * amdgpu_dm_irq_fini() - Tear down DM IRQ management + * @adev: The base driver device containing the DM device + * + * Flush all work within the low context IRQ table. + */ void amdgpu_dm_irq_fini(struct amdgpu_device *adev) { int src; @@ -414,7 +499,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) return 0; } -/** +/* * amdgpu_dm_irq_schedule_work - schedule all work items registered for the * "irq_source". */ @@ -439,8 +524,9 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev, } -/** amdgpu_dm_irq_immediate_work - * Callback high irq work immediately, don't send to work queue +/* + * amdgpu_dm_irq_immediate_work + * Callback high irq work immediately, don't send to work queue */ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, enum dc_irq_source irq_source) @@ -467,11 +553,14 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); } -/* - * amdgpu_dm_irq_handler +/** + * amdgpu_dm_irq_handler - Generic DM IRQ handler + * @adev: amdgpu base driver device containing the DM device + * @source: Unused + * @entry: Data about the triggered interrupt * - * Generic IRQ handler, calls all registered high irq work immediately, and - * schedules work for low irq + * Calls all registered high irq work immediately, and schedules work for low + * irq. The DM IRQ table is used to find the corresponding handlers. */ static int amdgpu_dm_irq_handler(struct amdgpu_device *adev, struct amdgpu_irq_src *source, @@ -613,7 +702,7 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) adev->hpd_irq.funcs = &dm_hpd_irq_funcs; } -/* +/** * amdgpu_dm_hpd_init - hpd setup callback. * * @adev: amdgpu_device pointer diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 03601d717fed..d02c32a1039c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -205,40 +205,6 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { .atomic_get_property = amdgpu_dm_connector_atomic_get_property }; -void dm_dp_mst_dc_sink_create(struct drm_connector *connector) -{ - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct dc_sink *dc_sink; - struct dc_sink_init_data init_params = { - .link = aconnector->dc_link, - .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; - - /* FIXME none of this is safe. we shouldn't touch aconnector here in - * atomic_check - */ - - /* - * TODO: Need to further figure out why ddc.algo is NULL while MST port exists - */ - if (!aconnector->port || !aconnector->port->aux.ddc.algo) - return; - - ASSERT(aconnector->edid); - - dc_sink = dc_link_add_remote_sink( - aconnector->dc_link, - (uint8_t *)aconnector->edid, - (aconnector->edid->extensions + 1) * EDID_LENGTH, - &init_params); - - dc_sink->priv = aconnector; - aconnector->dc_sink = dc_sink; - - if (aconnector->dc_sink) - amdgpu_dm_update_freesync_caps( - connector, aconnector->edid); -} - static int dm_dp_mst_get_modes(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); @@ -319,12 +285,7 @@ dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector) struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder; struct drm_encoder *encoder; - const struct drm_connector_helper_funcs *connector_funcs = - connector->base.helper_private; - struct drm_encoder *enc_master = - connector_funcs->best_encoder(&connector->base); - DRM_DEBUG_KMS("enc master is %p\n", enc_master); amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL); if (!amdgpu_encoder) return NULL; @@ -354,25 +315,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct amdgpu_device *adev = dev->dev_private; struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - aconnector = to_amdgpu_dm_connector(connector); - if (aconnector->mst_port == master - && !aconnector->port) { - DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n", - aconnector, connector->base.id, aconnector->mst_port); - - aconnector->port = port; - drm_connector_set_path_property(connector, pathprop); - - drm_connector_list_iter_end(&conn_iter); - aconnector->mst_connected = true; - return &aconnector->base; - } - } - drm_connector_list_iter_end(&conn_iter); aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); if (!aconnector) @@ -421,8 +363,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, */ amdgpu_dm_connector_funcs_reset(connector); - aconnector->mst_connected = true; - DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", aconnector, connector->base.id, aconnector->mst_port); @@ -434,6 +374,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_connector *connector) { + struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr); + struct drm_device *dev = master->base.dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", @@ -447,7 +390,10 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, aconnector->dc_sink = NULL; } - aconnector->mst_connected = false; + drm_connector_unregister(connector); + if (adev->mode_info.rfbdev) + drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector); + drm_connector_put(connector); } static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) @@ -458,18 +404,10 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) drm_kms_helper_hotplug_event(dev); } -static void dm_dp_mst_link_status_reset(struct drm_connector *connector) -{ - mutex_lock(&connector->dev->mode_config.mutex); - drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); - mutex_unlock(&connector->dev->mode_config.mutex); -} - static void dm_dp_mst_register_connector(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct amdgpu_device *adev = dev->dev_private; - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); if (adev->mode_info.rfbdev) drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector); @@ -477,9 +415,6 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector) DRM_ERROR("adev->mode_info.rfbdev is NULL\n"); drm_connector_register(connector); - - if (aconnector->mst_connected) - dm_dp_mst_link_status_reset(connector); } static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 8cf51da26657..2da851b40042 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -31,6 +31,5 @@ struct amdgpu_dm_connector; void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector); -void dm_dp_mst_dc_sink_create(struct drm_connector *connector); #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 12001a006b2d..9d2d6986b983 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -485,11 +485,11 @@ void pp_rv_set_display_requirement(struct pp_smu *pp, return; clock.clock_type = amd_pp_dcf_clock; - clock.clock_freq_in_khz = req->hard_min_dcefclk_khz; + clock.clock_freq_in_khz = req->hard_min_dcefclk_mhz * 1000; pp_funcs->display_clock_voltage_request(pp_handle, &clock); clock.clock_type = amd_pp_f_clock; - clock.clock_freq_in_khz = req->hard_min_fclk_khz; + clock.clock_freq_in_khz = req->hard_min_fclk_mhz * 1000; pp_funcs->display_clock_voltage_request(pp_handle, &clock); } @@ -518,13 +518,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp, wm_dce_clocks[i].wm_set_id = ranges->reader_wm_sets[i].wm_inst; wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz = - ranges->reader_wm_sets[i].max_drain_clk_khz; + ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000; wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz = - ranges->reader_wm_sets[i].min_drain_clk_khz; + ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000; wm_dce_clocks[i].wm_max_mem_clk_in_khz = - ranges->reader_wm_sets[i].max_fill_clk_khz; + ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000; wm_dce_clocks[i].wm_min_mem_clk_in_khz = - ranges->reader_wm_sets[i].min_fill_clk_khz; + ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000; } for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) { @@ -534,13 +534,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp, wm_soc_clocks[i].wm_set_id = ranges->writer_wm_sets[i].wm_inst; wm_soc_clocks[i].wm_max_socclk_clk_in_khz = - ranges->writer_wm_sets[i].max_fill_clk_khz; + ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000; wm_soc_clocks[i].wm_min_socclk_clk_in_khz = - ranges->writer_wm_sets[i].min_fill_clk_khz; + ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000; wm_soc_clocks[i].wm_max_mem_clk_in_khz = - ranges->writer_wm_sets[i].max_drain_clk_khz; + ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000; wm_soc_clocks[i].wm_min_mem_clk_in_khz = - ranges->writer_wm_sets[i].min_drain_clk_khz; + ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000; } pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges); diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 0e1dc1b1a48d..c2ab026aee91 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -2030,7 +2030,7 @@ static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object, static struct device_id device_type_from_device_id(uint16_t device_id) { - struct device_id result_device_id; + struct device_id result_device_id = {0}; switch (device_id) { case ATOM_DEVICE_LCD1_SUPPORT: diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index ff764da21b6f..751bb614fc0e 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1884,6 +1884,8 @@ static const struct dc_vbios_funcs vbios_funcs = { .is_accelerated_mode = bios_parser_is_accelerated_mode, + .is_active_display = bios_is_active_display, + .set_scratch_critical_state = bios_parser_set_scratch_critical_state, diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c index d4589470985c..fdda8aa8e303 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c @@ -88,3 +88,96 @@ uint32_t bios_get_vga_enabled_displays( return active_disp; } +bool bios_is_active_display( + struct dc_bios *bios, + enum signal_type signal, + const struct connector_device_tag_info *device_tag) +{ + uint32_t active = 0; + uint32_t connected = 0; + uint32_t bios_scratch_0 = 0; + uint32_t bios_scratch_3 = 0; + + switch (signal) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + case SIGNAL_TYPE_DISPLAY_PORT: + case SIGNAL_TYPE_DISPLAY_PORT_MST: + { + if (device_tag->dev_id.device_type == DEVICE_TYPE_DFP) { + switch (device_tag->dev_id.enum_id) { + case 1: + { + active = ATOM_S3_DFP1_ACTIVE; + connected = 0x0008; //ATOM_DISPLAY_DFP1_CONNECT + } + break; + + case 2: + { + active = ATOM_S3_DFP2_ACTIVE; + connected = 0x0080; //ATOM_DISPLAY_DFP2_CONNECT + } + break; + + case 3: + { + active = ATOM_S3_DFP3_ACTIVE; + connected = 0x0200; //ATOM_DISPLAY_DFP3_CONNECT + } + break; + + case 4: + { + active = ATOM_S3_DFP4_ACTIVE; + connected = 0x0400; //ATOM_DISPLAY_DFP4_CONNECT + } + break; + + case 5: + { + active = ATOM_S3_DFP5_ACTIVE; + connected = 0x0800; //ATOM_DISPLAY_DFP5_CONNECT + } + break; + + case 6: + { + active = ATOM_S3_DFP6_ACTIVE; + connected = 0x0040; //ATOM_DISPLAY_DFP6_CONNECT + } + break; + + default: + break; + } + } + } + break; + + case SIGNAL_TYPE_LVDS: + case SIGNAL_TYPE_EDP: + { + active = ATOM_S3_LCD1_ACTIVE; + connected = 0x0002; //ATOM_DISPLAY_LCD1_CONNECT + } + break; + + default: + break; + } + + + if (bios->regs->BIOS_SCRATCH_0) /*follow up with other asic, todo*/ + bios_scratch_0 = REG_READ(BIOS_SCRATCH_0); + if (bios->regs->BIOS_SCRATCH_3) /*follow up with other asic, todo*/ + bios_scratch_3 = REG_READ(BIOS_SCRATCH_3); + + bios_scratch_3 &= ATOM_S3_DEVICE_ACTIVE_MASK; + if ((active & bios_scratch_3) && (connected & bios_scratch_0)) + return true; + + return false; +} + diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h index 75a29e68fb27..f33cac2147e3 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h @@ -35,6 +35,10 @@ bool bios_is_accelerated_mode(struct dc_bios *bios); void bios_set_scratch_acc_mode_change(struct dc_bios *bios); void bios_set_scratch_critical_state(struct dc_bios *bios, bool state); uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios); +bool bios_is_active_display( + struct dc_bios *bios, + enum signal_type signal, + const struct connector_device_tag_info *device_tag); #define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type))) diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 3208188b7ed4..43e4a2be0fa6 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -1423,27 +1423,27 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc) ranges.num_reader_wm_sets = WM_SET_COUNT; ranges.num_writer_wm_sets = WM_SET_COUNT; ranges.reader_wm_sets[0].wm_inst = WM_A; - ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz; - ranges.reader_wm_sets[0].max_drain_clk_khz = overdrive; - ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz; - ranges.reader_wm_sets[0].max_fill_clk_khz = overdrive; + ranges.reader_wm_sets[0].min_drain_clk_mhz = min_dcfclk_khz / 1000; + ranges.reader_wm_sets[0].max_drain_clk_mhz = overdrive / 1000; + ranges.reader_wm_sets[0].min_fill_clk_mhz = min_fclk_khz / 1000; + ranges.reader_wm_sets[0].max_fill_clk_mhz = overdrive / 1000; ranges.writer_wm_sets[0].wm_inst = WM_A; - ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz; - ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive; - ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz; - ranges.writer_wm_sets[0].max_drain_clk_khz = overdrive; + ranges.writer_wm_sets[0].min_fill_clk_mhz = socclk_khz / 1000; + ranges.writer_wm_sets[0].max_fill_clk_mhz = overdrive / 1000; + ranges.writer_wm_sets[0].min_drain_clk_mhz = min_fclk_khz / 1000; + ranges.writer_wm_sets[0].max_drain_clk_mhz = overdrive / 1000; if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) { ranges.reader_wm_sets[0].wm_inst = WM_A; - ranges.reader_wm_sets[0].min_drain_clk_khz = 300000; - ranges.reader_wm_sets[0].max_drain_clk_khz = 5000000; - ranges.reader_wm_sets[0].min_fill_clk_khz = 800000; - ranges.reader_wm_sets[0].max_fill_clk_khz = 5000000; + ranges.reader_wm_sets[0].min_drain_clk_mhz = 300; + ranges.reader_wm_sets[0].max_drain_clk_mhz = 5000; + ranges.reader_wm_sets[0].min_fill_clk_mhz = 800; + ranges.reader_wm_sets[0].max_fill_clk_mhz = 5000; ranges.writer_wm_sets[0].wm_inst = WM_A; - ranges.writer_wm_sets[0].min_fill_clk_khz = 200000; - ranges.writer_wm_sets[0].max_fill_clk_khz = 5000000; - ranges.writer_wm_sets[0].min_drain_clk_khz = 800000; - ranges.writer_wm_sets[0].max_drain_clk_khz = 5000000; + ranges.writer_wm_sets[0].min_fill_clk_mhz = 200; + ranges.writer_wm_sets[0].max_fill_clk_mhz = 5000; + ranges.writer_wm_sets[0].min_drain_clk_mhz = 800; + ranges.writer_wm_sets[0].max_drain_clk_mhz = 5000; } ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0]; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 7c491c91465f..3279e26c3440 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -391,9 +391,11 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) == stream) { pipes = &dc->current_state->res_ctx.pipe_ctx[i]; - dc->hwss.program_csc_matrix(pipes, - stream->output_color_space, - stream->csc_color_matrix.matrix); + dc->hwss.program_output_csc(dc, + pipes, + stream->output_color_space, + stream->csc_color_matrix.matrix, + pipes->plane_res.hubp->opp_id); ret = true; } } @@ -941,7 +943,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c if (!dcb->funcs->is_accelerated_mode(dcb)) dc->hwss.enable_accelerated_mode(dc, context); - dc->hwss.set_bandwidth(dc, context, false); + dc->hwss.prepare_bandwidth(dc, context); /* re-program planes for existing stream, in case we need to * free up plane resource for later use @@ -957,8 +959,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c } /* Program hardware */ - dc->hwss.ready_shared_resources(dc, context); - for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); @@ -1012,7 +1012,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc_enable_stereo(dc, context, dc_streams, context->stream_count); /* pplib is notified if disp_num changed */ - dc->hwss.set_bandwidth(dc, context, true); + dc->hwss.optimize_bandwidth(dc, context); dc_release_state(dc->current_state); @@ -1020,8 +1020,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc_retain_state(dc->current_state); - dc->hwss.optimize_shared_resources(dc); - return result; } @@ -1063,7 +1061,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc) dc->optimized_required = false; - dc->hwss.set_bandwidth(dc, context, true); + dc->hwss.optimize_bandwidth(dc, context); return true; } @@ -1369,35 +1367,6 @@ static struct dc_stream_status *stream_get_status( static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; -static void notify_display_count_to_smu( - struct dc *dc, - struct dc_state *context) -{ - int i, display_count; - struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; - - /* - * if function pointer not set up, this message is - * sent as part of pplib_apply_display_requirements. - * So just return. - */ - if (!pp_smu || !pp_smu->set_display_count) - return; - - display_count = 0; - for (i = 0; i < context->stream_count; i++) { - const struct dc_stream_state *stream = context->streams[i]; - - /* only notify active stream */ - if (stream->dpms_off) - continue; - - display_count++; - } - - pp_smu->set_display_count(&pp_smu->pp_smu, display_count); -} - static void commit_planes_do_stream_update(struct dc *dc, struct dc_stream_state *stream, struct dc_stream_update *stream_update, @@ -1422,7 +1391,6 @@ static void commit_planes_do_stream_update(struct dc *dc, stream_update->adjust->v_total_max); if (stream_update->periodic_fn_vsync_delta && - pipe_ctx->stream_res.tg && pipe_ctx->stream_res.tg->funcs->program_vline_interrupt) pipe_ctx->stream_res.tg->funcs->program_vline_interrupt( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, @@ -1448,19 +1416,13 @@ static void commit_planes_do_stream_update(struct dc *dc, if (stream_update->dpms_off) { if (*stream_update->dpms_off) { core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE); - dc->hwss.pplib_apply_display_requirements( - dc, dc->current_state); - notify_display_count_to_smu(dc, dc->current_state); + dc->hwss.optimize_bandwidth(dc, dc->current_state); } else { - dc->hwss.pplib_apply_display_requirements( - dc, dc->current_state); - notify_display_count_to_smu(dc, dc->current_state); + dc->hwss.prepare_bandwidth(dc, dc->current_state); core_link_enable_stream(dc->current_state, pipe_ctx); } } - - if (stream_update->abm_level && pipe_ctx->stream_res.abm) { if (pipe_ctx->stream_res.tg->funcs->is_blanked) { // if otg funcs defined check if blanked before programming @@ -1487,7 +1449,7 @@ static void commit_planes_for_stream(struct dc *dc, struct pipe_ctx *top_pipe_to_program = NULL; if (update_type == UPDATE_TYPE_FULL) { - dc->hwss.set_bandwidth(dc, context, false); + dc->hwss.prepare_bandwidth(dc, context); context_clock_trace(dc, context); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index e1ebdf7b5eaf..73d049506618 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -311,7 +311,7 @@ void context_timing_trace( { int i; struct dc *core_dc = dc; - int h_pos[MAX_PIPES], v_pos[MAX_PIPES]; + int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0}; struct crtc_position position; unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index; DC_LOGGER_INIT(dc->ctx->logger); @@ -322,8 +322,7 @@ void context_timing_trace( /* get_position() returns CRTC vertical/horizontal counter * hence not applicable for underlay pipe */ - if (pipe_ctx->stream == NULL - || pipe_ctx->pipe_idx == underlay_idx) + if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx) continue; pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position); @@ -333,7 +332,7 @@ void context_timing_trace( for (i = 0; i < core_dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; - if (pipe_ctx->stream == NULL) + if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx) continue; TIMING_TRACE("OTG_%d H_tot:%d V_tot:%d H_pos:%d V_pos:%d\n", diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index fb04a4ad141f..7ee9c033acbd 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1357,28 +1357,13 @@ static enum dc_status enable_link_dp( struct dc_link *link = stream->sink->link; struct dc_link_settings link_settings = {0}; enum dp_panel_mode panel_mode; - enum dc_link_rate max_link_rate = LINK_RATE_HIGH2; /* get link settings for video mode timing */ decide_link_settings(stream, &link_settings); - /* raise clock state for HBR3 if required. Confirmed with HW DCE/DPCS - * logic for HBR3 still needs Nominal (0.8V) on VDDC rail - */ - if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE) - max_link_rate = LINK_RATE_HIGH3; - - if (link_settings.link_rate == max_link_rate) { - struct dc_clocks clocks = state->bw.dcn.clk; - - /* dce/dcn compat, do not update dispclk */ - clocks.dispclk_khz = 0; - /* 27mhz = 27000000hz= 27000khz */ - clocks.phyclk_khz = link_settings.link_rate * 27000; - - state->dis_clk->funcs->update_clocks( - state->dis_clk, &clocks, false); - } + pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = + link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; + state->dccg->funcs->update_clocks(state->dccg, state, false); dp_enable_link_phy( link, @@ -1722,7 +1707,7 @@ static void write_i2c_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ - offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n", + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ @@ -1734,7 +1719,7 @@ static void write_i2c_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ - offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n", + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ @@ -2156,14 +2141,16 @@ int dc_link_get_backlight_level(const struct dc_link *link) { struct abm *abm = link->ctx->dc->res_pool->abm; - if (abm == NULL || abm->funcs->get_current_backlight_8_bit == NULL) + if (abm == NULL || abm->funcs->get_current_backlight == NULL) return DC_ERROR_UNEXPECTED; - return (int) abm->funcs->get_current_backlight_8_bit(abm); + return (int) abm->funcs->get_current_backlight(abm); } -bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level, - uint32_t frame_ramp, const struct dc_stream_state *stream) +bool dc_link_set_backlight_level(const struct dc_link *link, + uint32_t backlight_pwm_u16_16, + uint32_t frame_ramp, + const struct dc_stream_state *stream) { struct dc *core_dc = link->ctx->dc; struct abm *abm = core_dc->res_pool->abm; @@ -2175,19 +2162,17 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level, if ((dmcu == NULL) || (abm == NULL) || - (abm->funcs->set_backlight_level == NULL)) + (abm->funcs->set_backlight_level_pwm == NULL)) return false; - if (stream) { - if (stream->bl_pwm_level == EDP_BACKLIGHT_RAMP_DISABLE_LEVEL) - frame_ramp = 0; - - ((struct dc_stream_state *)stream)->bl_pwm_level = level; - } + if (stream) + ((struct dc_stream_state *)stream)->bl_pwm_level = + backlight_pwm_u16_16; use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); - DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", level, level); + DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", + backlight_pwm_u16_16, backlight_pwm_u16_16); if (dc_is_embedded_signal(link->connector_signal)) { if (stream != NULL) { @@ -2204,9 +2189,9 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level, 1; } } - abm->funcs->set_backlight_level( + abm->funcs->set_backlight_level_pwm( abm, - level, + backlight_pwm_u16_16, frame_ramp, controller_id, use_smooth_brightness); @@ -2220,7 +2205,7 @@ bool dc_link_set_abm_disable(const struct dc_link *link) struct dc *core_dc = link->ctx->dc; struct abm *abm = core_dc->res_pool->abm; - if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL)) + if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL)) return false; abm->funcs->set_abm_immediate_disable(abm); @@ -2609,6 +2594,10 @@ void core_link_enable_stream( core_dc->hwss.unblank_stream(pipe_ctx, &pipe_ctx->stream->sink->link->cur_link_settings); + dc_link_set_backlight_level(pipe_ctx->stream->sink->link, + pipe_ctx->stream->bl_pwm_level, + 0, + pipe_ctx->stream); } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index b6fe29b9fb65..fc65b0055167 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -499,8 +499,13 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; bool flip_vert_scan_dir = false, flip_horz_scan_dir = false; + /* - * Need to calculate the scan direction for viewport to properly determine offset + * We need take horizontal mirror into account. On an unrotated surface this means + * that the viewport offset is actually the offset from the other side of source + * image so we have to subtract the right edge of the viewport from the right edge of + * the source window. Similar to mirror we need to take into account how offset is + * affected for 270/180 rotations */ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) { flip_vert_scan_dir = true; @@ -510,6 +515,9 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) flip_horz_scan_dir = true; + if (pipe_ctx->plane_state->horizontal_mirror) + flip_horz_scan_dir = !flip_horz_scan_dir; + if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE || stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { pri_split = false; @@ -540,45 +548,27 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ; /* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio + * note: surf_src.ofs should be added after rotation/mirror offset direction + * adjustment since it is already in viewport space * num_pixels = clip.num_pix * scl_ratio */ - data->viewport.x = surf_src.x + (clip.x - plane_state->dst_rect.x) * + data->viewport.x = (clip.x - plane_state->dst_rect.x) * surf_src.width / plane_state->dst_rect.width; data->viewport.width = clip.width * surf_src.width / plane_state->dst_rect.width; - data->viewport.y = surf_src.y + (clip.y - plane_state->dst_rect.y) * + data->viewport.y = (clip.y - plane_state->dst_rect.y) * surf_src.height / plane_state->dst_rect.height; data->viewport.height = clip.height * surf_src.height / plane_state->dst_rect.height; - /* To transfer the x, y to correct coordinate on mirror image (camera). - * deg 0 : transfer x, - * deg 90 : don't need to transfer, - * deg180 : transfer y, - * deg270 : transfer x and y. - * To transfer the x, y to correct coordinate on non-mirror image (video). - * deg 0 : don't need to transfer, - * deg 90 : transfer y, - * deg180 : transfer x and y, - * deg270 : transfer x. - */ - if (pipe_ctx->plane_state->horizontal_mirror) { - if (flip_horz_scan_dir && !flip_vert_scan_dir) { - data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height; - data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width; - } else if (flip_horz_scan_dir && flip_vert_scan_dir) - data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height; - else { - if (!flip_horz_scan_dir && !flip_vert_scan_dir) - data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width; - } - } else { - if (flip_horz_scan_dir) - data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width; - if (flip_vert_scan_dir) - data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height; - } + if (flip_vert_scan_dir) + data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height; + if (flip_horz_scan_dir) + data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width; + + data->viewport.x += surf_src.x; + data->viewport.y += surf_src.y; /* Round down, compensate in init */ data->viewport_c.x = data->viewport.x / vpc_div; @@ -773,22 +763,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *r else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) flip_horz_scan_dir = true; + if (pipe_ctx->plane_state->horizontal_mirror) + flip_horz_scan_dir = !flip_horz_scan_dir; + if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) { rect_swap_helper(&src); rect_swap_helper(&data->viewport_c); rect_swap_helper(&data->viewport); - - if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270 && - pipe_ctx->plane_state->horizontal_mirror) { - flip_vert_scan_dir = true; - } - if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 && - pipe_ctx->plane_state->horizontal_mirror) { - flip_vert_scan_dir = false; - } - } else if (pipe_ctx->plane_state->horizontal_mirror) - flip_horz_scan_dir = !flip_horz_scan_dir; + } /* * Init calculated according to formula: @@ -1115,9 +1098,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( pipe_ctx->plane_state->format); - if (pipe_ctx->stream->timing.flags.INTERLACE) - pipe_ctx->stream->dst.height *= 2; - calculate_scaling_ratios(pipe_ctx); calculate_viewport(pipe_ctx); @@ -1138,9 +1118,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; - if (pipe_ctx->stream->timing.flags.INTERLACE) - pipe_ctx->plane_res.scl_data.v_active *= 2; - /* Taps calculations */ if (pipe_ctx->plane_res.xfm != NULL) @@ -1185,9 +1162,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) plane_state->dst_rect.x, plane_state->dst_rect.y); - if (pipe_ctx->stream->timing.flags.INTERLACE) - pipe_ctx->stream->dst.height /= 2; - return res; } @@ -2071,7 +2045,7 @@ void dc_resource_state_construct( const struct dc *dc, struct dc_state *dst_ctx) { - dst_ctx->dis_clk = dc->res_pool->dccg; + dst_ctx->dccg = dc->res_pool->clk_mgr; } enum dc_status dc_validate_global_state( diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 2ac848a106ba..e113439aaa86 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -106,6 +106,7 @@ static void construct(struct dc_stream_state *stream, stream->out_transfer_func = dc_create_transfer_func(); stream->out_transfer_func->type = TF_TYPE_BYPASS; + stream->out_transfer_func->ctx = stream->ctx; } static void destruct(struct dc_stream_state *stream) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index 8fb3aefd195c..c60c9b4c3075 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -44,6 +44,7 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state plane_state->in_transfer_func = dc_create_transfer_func(); plane_state->in_transfer_func->type = TF_TYPE_BYPASS; + plane_state->in_transfer_func->ctx = ctx; } static void destruct(struct dc_plane_state *plane_state) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 199527171100..d16a20c84792 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -38,7 +38,7 @@ #include "inc/compressor.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.1.68" +#define DC_VER "3.2.04" #define MAX_SURFACES 3 #define MAX_STREAMS 6 @@ -169,6 +169,7 @@ struct link_training_settings; struct dc_config { bool gpu_vm_support; bool disable_disp_pll_sharing; + bool fbc_support; }; enum visual_confirm { @@ -249,8 +250,6 @@ struct dc_debug_options { bool disable_dmcu; bool disable_psr; bool force_abm_enable; - bool disable_hbup_pg; - bool disable_dpp_pg; bool disable_stereo_support; bool vsr_support; bool performance_trace; @@ -304,11 +303,6 @@ struct dc { struct hw_sequencer_funcs hwss; struct dce_hwseq *hwseq; - /* temp store of dm_pp_display_configuration - * to compare to see if display config changed - */ - struct dm_pp_display_configuration prev_display_config; - bool optimized_required; /* FBC compressor */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h index 8130b95ccc53..a8b3cedf9431 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h @@ -86,6 +86,10 @@ struct dc_vbios_funcs { bool (*is_accelerated_mode)( struct dc_bios *bios); + bool (*is_active_display)( + struct dc_bios *bios, + enum signal_type signal, + const struct connector_device_tag_info *device_tag); void (*set_scratch_critical_state)( struct dc_bios *bios, bool state); @@ -141,6 +145,7 @@ struct dc_vbios_funcs { }; struct bios_registers { + uint32_t BIOS_SCRATCH_0; uint32_t BIOS_SCRATCH_3; uint32_t BIOS_SCRATCH_6; }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 3bfdccceb524..8738f27a8708 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -138,9 +138,14 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_ return dc->links[link_index]; } -/* Set backlight level of an embedded panel (eDP, LVDS). */ -bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level, - uint32_t frame_ramp, const struct dc_stream_state *stream); +/* Set backlight level of an embedded panel (eDP, LVDS). + * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer + * and 16 bit fractional, where 1.0 is max backlight value. + */ +bool dc_link_set_backlight_level(const struct dc_link *dc_link, + uint32_t backlight_pwm_u16_16, + uint32_t frame_ramp, + const struct dc_stream_state *stream); int dc_link_get_backlight_level(const struct dc_link *dc_link); diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile index 8f7f0e8b341f..6d7b64a743ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile @@ -28,7 +28,7 @@ DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \ dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \ -dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \ +dce_clk_mgr.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \ dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE)) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index 29294db1a96b..2a342eae80fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -54,7 +54,7 @@ #define MCP_DISABLE_ABM_IMMEDIATELY 255 -static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce) +static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *abm_dce) { uint64_t current_backlight; uint32_t round_result; @@ -103,45 +103,21 @@ static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce) return (uint32_t)(current_backlight); } -static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level) +static void driver_set_backlight_level(struct dce_abm *abm_dce, + uint32_t backlight_pwm_u16_16) { - uint32_t backlight_24bit; - uint32_t backlight_17bit; uint32_t backlight_16bit; uint32_t masked_pwm_period; - uint8_t rounding_bit; uint8_t bit_count; uint64_t active_duty_cycle; uint32_t pwm_period_bitcnt; /* - * 1. Convert 8-bit value to 17 bit U1.16 format - * (1 integer, 16 fractional bits) - */ - - /* 1.1 multiply 8 bit value by 0x10101 to get a 24 bit value, - * effectively multiplying value by 256/255 - * eg. for a level of 0xEF, backlight_24bit = 0xEF * 0x10101 = 0xEFEFEF - */ - backlight_24bit = level * 0x10101; - - /* 1.2 The upper 16 bits of the 24 bit value is the fraction, lower 8 - * used for rounding, take most significant bit of fraction for - * rounding, e.g. for 0xEFEFEF, rounding bit is 1 - */ - rounding_bit = (backlight_24bit >> 7) & 1; - - /* 1.3 Add the upper 16 bits of the 24 bit value with the rounding bit - * resulting in a 17 bit value e.g. 0xEFF0 = (0xEFEFEF >> 8) + 1 - */ - backlight_17bit = (backlight_24bit >> 8) + rounding_bit; - - /* - * 2. Find 16 bit backlight active duty cycle, where 0 <= backlight + * 1. Find 16 bit backlight active duty cycle, where 0 <= backlight * active duty cycle <= backlight period */ - /* 2.1 Apply bitmask for backlight period value based on value of BITCNT + /* 1.1 Apply bitmask for backlight period value based on value of BITCNT */ REG_GET_2(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt, @@ -155,13 +131,13 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level) /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */ masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1); - /* 2.2 Calculate integer active duty cycle required upper 16 bits + /* 1.2 Calculate integer active duty cycle required upper 16 bits * contain integer component, lower 16 bits contain fractional component * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24 */ - active_duty_cycle = backlight_17bit * masked_pwm_period; + active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period; - /* 2.3 Calculate 16 bit active duty cycle from integer and fractional + /* 1.3 Calculate 16 bit active duty cycle from integer and fractional * components shift by bitCount then mask 16 bits and add rounding bit * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0 */ @@ -170,23 +146,23 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level) backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1; /* - * 3. Program register with updated value + * 2. Program register with updated value */ - /* 3.1 Lock group 2 backlight registers */ + /* 2.1 Lock group 2 backlight registers */ REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1, BL_PWM_GRP1_REG_LOCK, 1); - // 3.2 Write new active duty cycle + // 2.2 Write new active duty cycle REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit); - /* 3.3 Unlock group 2 backlight registers */ + /* 2.3 Unlock group 2 backlight registers */ REG_UPDATE(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, 0); - /* 5.4.4 Wait for pending bit to be cleared */ + /* 3 Wait for pending bit to be cleared */ REG_WAIT(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, 0, 1, 10000); @@ -194,16 +170,21 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level) static void dmcu_set_backlight_level( struct dce_abm *abm_dce, - uint32_t level, + uint32_t backlight_pwm_u16_16, uint32_t frame_ramp, uint32_t controller_id) { - unsigned int backlight_16_bit = (level * 0x10101) >> 8; - unsigned int backlight_17_bit = backlight_16_bit + - (((backlight_16_bit & 0x80) >> 7) & 1); + unsigned int backlight_8_bit = 0; uint32_t rampingBoundary = 0xFFFF; uint32_t s2; + if (backlight_pwm_u16_16 & 0x10000) + // Check for max backlight condition + backlight_8_bit = 0xFF; + else + // Take MSB of fractional part since backlight is not max + backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF; + /* set ramping boundary */ REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary); @@ -220,7 +201,7 @@ static void dmcu_set_backlight_level( 0, 1, 80000); /* setDMCUParam_BL */ - REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_17_bit); + REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_pwm_u16_16); /* write ramp */ if (controller_id == 0) @@ -237,9 +218,9 @@ static void dmcu_set_backlight_level( s2 = REG_READ(BIOS_SCRATCH_2); s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK; - level &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >> + backlight_8_bit &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >> ATOM_S2_CURRENT_BL_LEVEL_SHIFT); - s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT); + s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT); REG_WRITE(BIOS_SCRATCH_2, s2); } @@ -247,7 +228,7 @@ static void dmcu_set_backlight_level( static void dce_abm_init(struct abm *abm) { struct dce_abm *abm_dce = TO_DCE_ABM(abm); - unsigned int backlight = get_current_backlight_16_bit(abm_dce); + unsigned int backlight = calculate_16_bit_backlight_from_pwm(abm_dce); REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103); REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101); @@ -284,12 +265,26 @@ static void dce_abm_init(struct abm *abm) ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1); } -static unsigned int dce_abm_get_current_backlight_8_bit(struct abm *abm) +static unsigned int dce_abm_get_current_backlight(struct abm *abm) { struct dce_abm *abm_dce = TO_DCE_ABM(abm); unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL); - return (backlight >> 8); + /* return backlight in hardware format which is unsigned 17 bits, with + * 1 bit integer and 16 bit fractional + */ + return backlight; +} + +static unsigned int dce_abm_get_target_backlight(struct abm *abm) +{ + struct dce_abm *abm_dce = TO_DCE_ABM(abm); + unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL); + + /* return backlight in hardware format which is unsigned 17 bits, with + * 1 bit integer and 16 bit fractional + */ + return backlight; } static bool dce_abm_set_level(struct abm *abm, uint32_t level) @@ -396,9 +391,9 @@ static bool dce_abm_init_backlight(struct abm *abm) return true; } -static bool dce_abm_set_backlight_level( +static bool dce_abm_set_backlight_level_pwm( struct abm *abm, - unsigned int backlight_level, + unsigned int backlight_pwm_u16_16, unsigned int frame_ramp, unsigned int controller_id, bool use_smooth_brightness) @@ -406,16 +401,16 @@ static bool dce_abm_set_backlight_level( struct dce_abm *abm_dce = TO_DCE_ABM(abm); DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", - backlight_level, backlight_level); + backlight_pwm_u16_16, backlight_pwm_u16_16); /* If DMCU is in reset state, DMCU is uninitialized */ if (use_smooth_brightness) dmcu_set_backlight_level(abm_dce, - backlight_level, + backlight_pwm_u16_16, frame_ramp, controller_id); else - driver_set_backlight_level(abm_dce, backlight_level); + driver_set_backlight_level(abm_dce, backlight_pwm_u16_16); return true; } @@ -424,8 +419,9 @@ static const struct abm_funcs dce_funcs = { .abm_init = dce_abm_init, .set_abm_level = dce_abm_set_level, .init_backlight = dce_abm_init_backlight, - .set_backlight_level = dce_abm_set_backlight_level, - .get_current_backlight_8_bit = dce_abm_get_current_backlight_8_bit, + .set_backlight_level_pwm = dce_abm_set_backlight_level_pwm, + .get_current_backlight = dce_abm_get_current_backlight, + .get_target_backlight = dce_abm_get_target_backlight, .set_abm_immediate_disable = dce_abm_immediate_disable }; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c new file mode 100644 index 000000000000..9a28a04417d1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c @@ -0,0 +1,879 @@ +/* + * Copyright 2012-16 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce_clk_mgr.h" + +#include "reg_helper.h" +#include "dmcu.h" +#include "core_types.h" +#include "dal_asic_id.h" + +#define TO_DCE_CLK_MGR(clocks)\ + container_of(clocks, struct dce_clk_mgr, base) + +#define REG(reg) \ + (clk_mgr_dce->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name + +#define CTX \ + clk_mgr_dce->base.ctx +#define DC_LOGGER \ + clk_mgr->ctx->logger + +/* Max clock values for each state indexed by "enum clocks_state": */ +static const struct state_dependent_clocks dce80_max_clks_by_state[] = { +/* ClocksStateInvalid - should not be used */ +{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, +/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */ +{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, +/* ClocksStateLow */ +{ .display_clk_khz = 352000, .pixel_clk_khz = 330000}, +/* ClocksStateNominal */ +{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 }, +/* ClocksStatePerformance */ +{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } }; + +static const struct state_dependent_clocks dce110_max_clks_by_state[] = { +/*ClocksStateInvalid - should not be used*/ +{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, +/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ +{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, +/*ClocksStateLow*/ +{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, +/*ClocksStateNominal*/ +{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 }, +/*ClocksStatePerformance*/ +{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } }; + +static const struct state_dependent_clocks dce112_max_clks_by_state[] = { +/*ClocksStateInvalid - should not be used*/ +{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, +/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ +{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 }, +/*ClocksStateLow*/ +{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 }, +/*ClocksStateNominal*/ +{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 }, +/*ClocksStatePerformance*/ +{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } }; + +static const struct state_dependent_clocks dce120_max_clks_by_state[] = { +/*ClocksStateInvalid - should not be used*/ +{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, +/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ +{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, +/*ClocksStateLow*/ +{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 }, +/*ClocksStateNominal*/ +{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 }, +/*ClocksStatePerformance*/ +{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } }; + +static int dentist_get_divider_from_did(int did) +{ + if (did < DENTIST_BASE_DID_1) + did = DENTIST_BASE_DID_1; + if (did > DENTIST_MAX_DID) + did = DENTIST_MAX_DID; + + if (did < DENTIST_BASE_DID_2) { + return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP + * (did - DENTIST_BASE_DID_1); + } else if (did < DENTIST_BASE_DID_3) { + return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP + * (did - DENTIST_BASE_DID_2); + } else if (did < DENTIST_BASE_DID_4) { + return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP + * (did - DENTIST_BASE_DID_3); + } else { + return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP + * (did - DENTIST_BASE_DID_4); + } +} + +/* SW will adjust DP REF Clock average value for all purposes + * (DP DTO / DP Audio DTO and DP GTC) + if clock is spread for all cases: + -if SS enabled on DP Ref clock and HW de-spreading enabled with SW + calculations for DS_INCR/DS_MODULO (this is planned to be default case) + -if SS enabled on DP Ref clock and HW de-spreading enabled with HW + calculations (not planned to be used, but average clock should still + be valid) + -if SS enabled on DP Ref clock and HW de-spreading disabled + (should not be case with CIK) then SW should program all rates + generated according to average value (case as with previous ASICs) + */ +static int clk_mgr_adjust_dp_ref_freq_for_ss(struct dce_clk_mgr *clk_mgr_dce, int dp_ref_clk_khz) +{ + if (clk_mgr_dce->ss_on_dprefclk && clk_mgr_dce->dprefclk_ss_divider != 0) { + struct fixed31_32 ss_percentage = dc_fixpt_div_int( + dc_fixpt_from_fraction(clk_mgr_dce->dprefclk_ss_percentage, + clk_mgr_dce->dprefclk_ss_divider), 200); + struct fixed31_32 adj_dp_ref_clk_khz; + + ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage); + adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz); + dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz); + } + return dp_ref_clk_khz; +} + +static int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr) +{ + struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); + int dprefclk_wdivider; + int dprefclk_src_sel; + int dp_ref_clk_khz = 600000; + int target_div; + + /* ASSERT DP Reference Clock source is from DFS*/ + REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel); + ASSERT(dprefclk_src_sel == 0); + + /* Read the mmDENTIST_DISPCLK_CNTL to get the currently + * programmed DID DENTIST_DPREFCLK_WDIVIDER*/ + REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider); + + /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/ + target_div = dentist_get_divider_from_did(dprefclk_wdivider); + + /* Calculate the current DFS clock, in kHz.*/ + dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr_dce->dentist_vco_freq_khz) / target_div; + + return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, dp_ref_clk_khz); +} + +int dce12_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr) +{ + struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); + + return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, clk_mgr_dce->dprefclk_khz); +} + +/* unit: in_khz before mode set, get pixel clock from context. ASIC register + * may not be programmed yet + */ +static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context) +{ + uint32_t max_pix_clk = 0; + int i; + + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->stream == NULL) + continue; + + /* do not check under lay */ + if (pipe_ctx->top_pipe) + continue; + + if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk) + max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk; + + /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS + * logic for HBR3 still needs Nominal (0.8V) on VDDC rail + */ + if (dc_is_dp_signal(pipe_ctx->stream->signal) && + pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk) + max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk; + } + + return max_pix_clk; +} + +static enum dm_pp_clocks_state dce_get_required_clocks_state( + struct clk_mgr *clk_mgr, + struct dc_state *context) +{ + struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); + int i; + enum dm_pp_clocks_state low_req_clk; + int max_pix_clk = get_max_pixel_clock_for_all_paths(context); + + /* Iterate from highest supported to lowest valid state, and update + * lowest RequiredState with the lowest state that satisfies + * all required clocks + */ + for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--) + if (context->bw.dce.dispclk_khz > + clk_mgr_dce->max_clks_by_state[i].display_clk_khz + || max_pix_clk > + clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz) + break; + + low_req_clk = i + 1; + if (low_req_clk > clk_mgr_dce->max_clks_state) { + /* set max clock state for high phyclock, invalid on exceeding display clock */ + if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz + < context->bw.dce.dispclk_khz) + low_req_clk = DM_PP_CLOCKS_STATE_INVALID; + else + low_req_clk = clk_mgr_dce->max_clks_state; + } + + return low_req_clk; +} + +static int dce_set_clock( + struct clk_mgr *clk_mgr, + int requested_clk_khz) +{ + struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); + struct bp_pixel_clock_parameters pxl_clk_params = { 0 }; + struct dc_bios *bp = clk_mgr->ctx->dc_bios; + int actual_clock = requested_clk_khz; + struct dmcu *dmcu = clk_mgr_dce->base.ctx->dc->res_pool->dmcu; + + /* Make sure requested clock isn't lower than minimum threshold*/ + if (requested_clk_khz > 0) + requested_clk_khz = max(requested_clk_khz, + clk_mgr_dce->dentist_vco_freq_khz / 64); + + /* Prepare to program display clock*/ + pxl_clk_params.target_pixel_clock = requested_clk_khz; + pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; + + if (clk_mgr_dce->dfs_bypass_active) + pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true; + + bp->funcs->program_display_engine_pll(bp, &pxl_clk_params); + + if (clk_mgr_dce->dfs_bypass_active) { + /* Cache the fixed display clock*/ + clk_mgr_dce->dfs_bypass_disp_clk = + pxl_clk_params.dfs_bypass_display_clock; + actual_clock = pxl_clk_params.dfs_bypass_display_clock; + } + + /* from power down, we need mark the clock state as ClocksStateNominal + * from HWReset, so when resume we will call pplib voltage regulator.*/ + if (requested_clk_khz == 0) + clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; + + dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7); + + return actual_clock; +} + +int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz) +{ + struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); + struct bp_set_dce_clock_parameters dce_clk_params; + struct dc_bios *bp = clk_mgr->ctx->dc_bios; + struct dc *core_dc = clk_mgr->ctx->dc; + struct dmcu *dmcu = core_dc->res_pool->dmcu; + int actual_clock = requested_clk_khz; + /* Prepare to program display clock*/ + memset(&dce_clk_params, 0, sizeof(dce_clk_params)); + + /* Make sure requested clock isn't lower than minimum threshold*/ + if (requested_clk_khz > 0) + requested_clk_khz = max(requested_clk_khz, + clk_mgr_dce->dentist_vco_freq_khz / 62); + + dce_clk_params.target_clock_frequency = requested_clk_khz; + dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; + dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK; + + bp->funcs->set_dce_clock(bp, &dce_clk_params); + actual_clock = dce_clk_params.target_clock_frequency; + + /* from power down, we need mark the clock state as ClocksStateNominal + * from HWReset, so when resume we will call pplib voltage regulator.*/ + if (requested_clk_khz == 0) + clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; + + /*Program DP ref Clock*/ + /*VBIOS will determine DPREFCLK frequency, so we don't set it*/ + dce_clk_params.target_clock_frequency = 0; + dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK; + if (!ASICREV_IS_VEGA20_P(clk_mgr->ctx->asic_id.hw_internal_rev)) + dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = + (dce_clk_params.pll_id == + CLOCK_SOURCE_COMBO_DISPLAY_PLL0); + else + dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false; + + bp->funcs->set_dce_clock(bp, &dce_clk_params); + + if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) + dmcu->funcs->set_psr_wait_loop(dmcu, + actual_clock / 1000 / 7); + } + + clk_mgr_dce->dfs_bypass_disp_clk = actual_clock; + return actual_clock; +} + +static void dce_clock_read_integrated_info(struct dce_clk_mgr *clk_mgr_dce) +{ + struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug; + struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; + struct integrated_info info = { { { 0 } } }; + struct dc_firmware_info fw_info = { { 0 } }; + int i; + + if (bp->integrated_info) + info = *bp->integrated_info; + + clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq; + if (clk_mgr_dce->dentist_vco_freq_khz == 0) { + bp->funcs->get_firmware_info(bp, &fw_info); + clk_mgr_dce->dentist_vco_freq_khz = + fw_info.smu_gpu_pll_output_freq; + if (clk_mgr_dce->dentist_vco_freq_khz == 0) + clk_mgr_dce->dentist_vco_freq_khz = 3600000; + } + + /*update the maximum display clock for each power state*/ + for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) { + enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID; + + switch (i) { + case 0: + clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW; + break; + + case 1: + clk_state = DM_PP_CLOCKS_STATE_LOW; + break; + + case 2: + clk_state = DM_PP_CLOCKS_STATE_NOMINAL; + break; + + case 3: + clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE; + break; + + default: + clk_state = DM_PP_CLOCKS_STATE_INVALID; + break; + } + + /*Do not allow bad VBIOS/SBIOS to override with invalid values, + * check for > 100MHz*/ + if (info.disp_clk_voltage[i].max_supported_clk >= 100000) + clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz = + info.disp_clk_voltage[i].max_supported_clk; + } + + if (!debug->disable_dfs_bypass && bp->integrated_info) + if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) + clk_mgr_dce->dfs_bypass_enabled = true; +} + +void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce) +{ + struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; + int ss_info_num = bp->funcs->get_ss_entry_number( + bp, AS_SIGNAL_TYPE_GPU_PLL); + + if (ss_info_num) { + struct spread_spectrum_info info = { { 0 } }; + enum bp_result result = bp->funcs->get_spread_spectrum_info( + bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info); + + /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS + * even if SS not enabled and in that case + * SSInfo.spreadSpectrumPercentage !=0 would be sign + * that SS is enabled + */ + if (result == BP_RESULT_OK && + info.spread_spectrum_percentage != 0) { + clk_mgr_dce->ss_on_dprefclk = true; + clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider; + + if (info.type.CENTER_MODE == 0) { + /* TODO: Currently for DP Reference clock we + * need only SS percentage for + * downspread */ + clk_mgr_dce->dprefclk_ss_percentage = + info.spread_spectrum_percentage; + } + + return; + } + + result = bp->funcs->get_spread_spectrum_info( + bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info); + + /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS + * even if SS not enabled and in that case + * SSInfo.spreadSpectrumPercentage !=0 would be sign + * that SS is enabled + */ + if (result == BP_RESULT_OK && + info.spread_spectrum_percentage != 0) { + clk_mgr_dce->ss_on_dprefclk = true; + clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider; + + if (info.type.CENTER_MODE == 0) { + /* Currently for DP Reference clock we + * need only SS percentage for + * downspread */ + clk_mgr_dce->dprefclk_ss_percentage = + info.spread_spectrum_percentage; + } + } + } +} + +void dce110_fill_display_configs( + const struct dc_state *context, + struct dm_pp_display_configuration *pp_display_cfg) +{ + int j; + int num_cfgs = 0; + + for (j = 0; j < context->stream_count; j++) { + int k; + + const struct dc_stream_state *stream = context->streams[j]; + struct dm_pp_single_disp_config *cfg = + &pp_display_cfg->disp_configs[num_cfgs]; + const struct pipe_ctx *pipe_ctx = NULL; + + for (k = 0; k < MAX_PIPES; k++) + if (stream == context->res_ctx.pipe_ctx[k].stream) { + pipe_ctx = &context->res_ctx.pipe_ctx[k]; + break; + } + + ASSERT(pipe_ctx != NULL); + + /* only notify active stream */ + if (stream->dpms_off) + continue; + + num_cfgs++; + cfg->signal = pipe_ctx->stream->signal; + cfg->pipe_idx = pipe_ctx->stream_res.tg->inst; + cfg->src_height = stream->src.height; + cfg->src_width = stream->src.width; + cfg->ddi_channel_mapping = + stream->sink->link->ddi_channel_mapping.raw; + cfg->transmitter = + stream->sink->link->link_enc->transmitter; + cfg->link_settings.lane_count = + stream->sink->link->cur_link_settings.lane_count; + cfg->link_settings.link_rate = + stream->sink->link->cur_link_settings.link_rate; + cfg->link_settings.link_spread = + stream->sink->link->cur_link_settings.link_spread; + cfg->sym_clock = stream->phy_pix_clk; + /* Round v_refresh*/ + cfg->v_refresh = stream->timing.pix_clk_khz * 1000; + cfg->v_refresh /= stream->timing.h_total; + cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) + / stream->timing.v_total; + } + + pp_display_cfg->display_count = num_cfgs; +} + +static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context) +{ + uint8_t j; + uint32_t min_vertical_blank_time = -1; + + for (j = 0; j < context->stream_count; j++) { + struct dc_stream_state *stream = context->streams[j]; + uint32_t vertical_blank_in_pixels = 0; + uint32_t vertical_blank_time = 0; + + vertical_blank_in_pixels = stream->timing.h_total * + (stream->timing.v_total + - stream->timing.v_addressable); + + vertical_blank_time = vertical_blank_in_pixels + * 1000 / stream->timing.pix_clk_khz; + + if (min_vertical_blank_time > vertical_blank_time) + min_vertical_blank_time = vertical_blank_time; + } + + return min_vertical_blank_time; +} + +static int determine_sclk_from_bounding_box( + const struct dc *dc, + int required_sclk) +{ + int i; + + /* + * Some asics do not give us sclk levels, so we just report the actual + * required sclk + */ + if (dc->sclk_lvls.num_levels == 0) + return required_sclk; + + for (i = 0; i < dc->sclk_lvls.num_levels; i++) { + if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk) + return dc->sclk_lvls.clocks_in_khz[i]; + } + /* + * even maximum level could not satisfy requirement, this + * is unexpected at this stage, should have been caught at + * validation time + */ + ASSERT(0); + return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1]; +} + +static void dce_pplib_apply_display_requirements( + struct dc *dc, + struct dc_state *context) +{ + struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; + + pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); + + dce110_fill_display_configs(context, pp_display_cfg); + + if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) + dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); +} + +static void dce11_pplib_apply_display_requirements( + struct dc *dc, + struct dc_state *context) +{ + struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; + + pp_display_cfg->all_displays_in_sync = + context->bw.dce.all_displays_in_sync; + pp_display_cfg->nb_pstate_switch_disable = + context->bw.dce.nbp_state_change_enable == false; + pp_display_cfg->cpu_cc6_disable = + context->bw.dce.cpuc_state_change_enable == false; + pp_display_cfg->cpu_pstate_disable = + context->bw.dce.cpup_state_change_enable == false; + pp_display_cfg->cpu_pstate_separation_time = + context->bw.dce.blackout_recovery_time_us; + + pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz + / MEMORY_TYPE_MULTIPLIER_CZ; + + pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box( + dc, + context->bw.dce.sclk_khz); + + pp_display_cfg->min_engine_clock_deep_sleep_khz + = context->bw.dce.sclk_deep_sleep_khz; + + pp_display_cfg->avail_mclk_switch_time_us = + dce110_get_min_vblank_time_us(context); + /* TODO: dce11.2*/ + pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; + + pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz; + + dce110_fill_display_configs(context, pp_display_cfg); + + /* TODO: is this still applicable?*/ + if (pp_display_cfg->display_count == 1) { + const struct dc_crtc_timing *timing = + &context->streams[0]->timing; + + pp_display_cfg->crtc_index = + pp_display_cfg->disp_configs[0].pipe_idx; + pp_display_cfg->line_time_in_us = timing->h_total * 1000 / timing->pix_clk_khz; + } + + if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) + dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); +} + +static void dce_update_clocks(struct clk_mgr *clk_mgr, + struct dc_state *context, + bool safe_to_lower) +{ + struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); + struct dm_pp_power_level_change_request level_change_req; + int unpatched_disp_clk = context->bw.dce.dispclk_khz; + + /*TODO: W/A for dal3 linux, investigate why this works */ + if (!clk_mgr_dce->dfs_bypass_active) + context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; + + level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); + /* get max clock state from PPLIB */ + if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) + || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { + if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) + clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; + } + + if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { + context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz); + clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; + } + dce_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); + + context->bw.dce.dispclk_khz = unpatched_disp_clk; +} + +static void dce11_update_clocks(struct clk_mgr *clk_mgr, + struct dc_state *context, + bool safe_to_lower) +{ + struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); + struct dm_pp_power_level_change_request level_change_req; + + level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); + /* get max clock state from PPLIB */ + if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) + || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { + if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) + clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; + } + + if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { + context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz); + clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; + } + dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); +} + +static void dce112_update_clocks(struct clk_mgr *clk_mgr, + struct dc_state *context, + bool safe_to_lower) +{ + struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); + struct dm_pp_power_level_change_request level_change_req; + + level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); + /* get max clock state from PPLIB */ + if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) + || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { + if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) + clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; + } + + if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { + context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz); + clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; + } + dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); +} + +static void dce12_update_clocks(struct clk_mgr *clk_mgr, + struct dc_state *context, + bool safe_to_lower) +{ + struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); + struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; + int max_pix_clk = get_max_pixel_clock_for_all_paths(context); + int unpatched_disp_clk = context->bw.dce.dispclk_khz; + + /*TODO: W/A for dal3 linux, investigate why this works */ + if (!clk_mgr_dce->dfs_bypass_active) + context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; + + if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { + clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK; + clock_voltage_req.clocks_in_khz = context->bw.dce.dispclk_khz; + context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz); + clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; + + dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req); + } + + if (should_set_clock(safe_to_lower, max_pix_clk, clk_mgr->clks.phyclk_khz)) { + clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK; + clock_voltage_req.clocks_in_khz = max_pix_clk; + clk_mgr->clks.phyclk_khz = max_pix_clk; + + dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req); + } + dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); + + context->bw.dce.dispclk_khz = unpatched_disp_clk; +} + +static const struct clk_mgr_funcs dce120_funcs = { + .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .update_clocks = dce12_update_clocks +}; + +static const struct clk_mgr_funcs dce112_funcs = { + .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, + .update_clocks = dce112_update_clocks +}; + +static const struct clk_mgr_funcs dce110_funcs = { + .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, + .update_clocks = dce11_update_clocks, +}; + +static const struct clk_mgr_funcs dce_funcs = { + .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, + .update_clocks = dce_update_clocks +}; + +static void dce_clk_mgr_construct( + struct dce_clk_mgr *clk_mgr_dce, + struct dc_context *ctx, + const struct clk_mgr_registers *regs, + const struct clk_mgr_shift *clk_shift, + const struct clk_mgr_mask *clk_mask) +{ + struct clk_mgr *base = &clk_mgr_dce->base; + struct dm_pp_static_clock_info static_clk_info = {0}; + + base->ctx = ctx; + base->funcs = &dce_funcs; + + clk_mgr_dce->regs = regs; + clk_mgr_dce->clk_mgr_shift = clk_shift; + clk_mgr_dce->clk_mgr_mask = clk_mask; + + clk_mgr_dce->dfs_bypass_disp_clk = 0; + + clk_mgr_dce->dprefclk_ss_percentage = 0; + clk_mgr_dce->dprefclk_ss_divider = 1000; + clk_mgr_dce->ss_on_dprefclk = false; + + + if (dm_pp_get_static_clocks(ctx, &static_clk_info)) + clk_mgr_dce->max_clks_state = static_clk_info.max_clocks_state; + else + clk_mgr_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; + clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID; + + dce_clock_read_integrated_info(clk_mgr_dce); + dce_clock_read_ss_info(clk_mgr_dce); +} + +struct clk_mgr *dce_clk_mgr_create( + struct dc_context *ctx, + const struct clk_mgr_registers *regs, + const struct clk_mgr_shift *clk_shift, + const struct clk_mgr_mask *clk_mask) +{ + struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); + + if (clk_mgr_dce == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + memcpy(clk_mgr_dce->max_clks_by_state, + dce80_max_clks_by_state, + sizeof(dce80_max_clks_by_state)); + + dce_clk_mgr_construct( + clk_mgr_dce, ctx, regs, clk_shift, clk_mask); + + return &clk_mgr_dce->base; +} + +struct clk_mgr *dce110_clk_mgr_create( + struct dc_context *ctx, + const struct clk_mgr_registers *regs, + const struct clk_mgr_shift *clk_shift, + const struct clk_mgr_mask *clk_mask) +{ + struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); + + if (clk_mgr_dce == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + memcpy(clk_mgr_dce->max_clks_by_state, + dce110_max_clks_by_state, + sizeof(dce110_max_clks_by_state)); + + dce_clk_mgr_construct( + clk_mgr_dce, ctx, regs, clk_shift, clk_mask); + + clk_mgr_dce->base.funcs = &dce110_funcs; + + return &clk_mgr_dce->base; +} + +struct clk_mgr *dce112_clk_mgr_create( + struct dc_context *ctx, + const struct clk_mgr_registers *regs, + const struct clk_mgr_shift *clk_shift, + const struct clk_mgr_mask *clk_mask) +{ + struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); + + if (clk_mgr_dce == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + memcpy(clk_mgr_dce->max_clks_by_state, + dce112_max_clks_by_state, + sizeof(dce112_max_clks_by_state)); + + dce_clk_mgr_construct( + clk_mgr_dce, ctx, regs, clk_shift, clk_mask); + + clk_mgr_dce->base.funcs = &dce112_funcs; + + return &clk_mgr_dce->base; +} + +struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx) +{ + struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); + + if (clk_mgr_dce == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + memcpy(clk_mgr_dce->max_clks_by_state, + dce120_max_clks_by_state, + sizeof(dce120_max_clks_by_state)); + + dce_clk_mgr_construct( + clk_mgr_dce, ctx, NULL, NULL, NULL); + + clk_mgr_dce->dprefclk_khz = 600000; + clk_mgr_dce->base.funcs = &dce120_funcs; + + return &clk_mgr_dce->base; +} + +void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr) +{ + struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(*clk_mgr); + + kfree(clk_mgr_dce); + *clk_mgr = NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h index 34fdb386c884..046077797416 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h @@ -24,10 +24,13 @@ */ -#ifndef _DCE_CLOCKS_H_ -#define _DCE_CLOCKS_H_ +#ifndef _DCE_CLK_MGR_H_ +#define _DCE_CLK_MGR_H_ -#include "display_clock.h" +#include "clk_mgr.h" +#include "dccg.h" + +#define MEMORY_TYPE_MULTIPLIER_CZ 4 #define CLK_COMMON_REG_LIST_DCE_BASE() \ .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \ @@ -53,24 +56,31 @@ type DENTIST_DISPCLK_WDIVIDER; \ type DENTIST_DISPCLK_CHG_DONE; -struct dccg_shift { +struct clk_mgr_shift { CLK_REG_FIELD_LIST(uint8_t) }; -struct dccg_mask { +struct clk_mgr_mask { CLK_REG_FIELD_LIST(uint32_t) }; -struct dccg_registers { +struct clk_mgr_registers { uint32_t DPREFCLK_CNTL; uint32_t DENTIST_DISPCLK_CNTL; }; -struct dce_dccg { - struct dccg base; - const struct dccg_registers *regs; - const struct dccg_shift *clk_shift; - const struct dccg_mask *clk_mask; +struct state_dependent_clocks { + int display_clk_khz; + int pixel_clk_khz; +}; + +struct dce_clk_mgr { + struct clk_mgr base; + const struct clk_mgr_registers *regs; + const struct clk_mgr_shift *clk_mgr_shift; + const struct clk_mgr_mask *clk_mgr_mask; + + struct dccg *dccg; struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES]; @@ -91,33 +101,68 @@ struct dce_dccg { /* DPREFCLK SS percentage Divider (100 or 1000) */ int dprefclk_ss_divider; int dprefclk_khz; + + enum dm_pp_clocks_state max_clks_state; + enum dm_pp_clocks_state cur_min_clks_state; }; +/* Starting DID for each range */ +enum dentist_base_divider_id { + DENTIST_BASE_DID_1 = 0x08, + DENTIST_BASE_DID_2 = 0x40, + DENTIST_BASE_DID_3 = 0x60, + DENTIST_BASE_DID_4 = 0x7e, + DENTIST_MAX_DID = 0x7f +}; -struct dccg *dce_dccg_create( - struct dc_context *ctx, - const struct dccg_registers *regs, - const struct dccg_shift *clk_shift, - const struct dccg_mask *clk_mask); +/* Starting point and step size for each divider range.*/ +enum dentist_divider_range { + DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */ + DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */ + DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */ + DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */ + DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */ + DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */ + DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */ + DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */ + DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4 +}; + +static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk) +{ + return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk); +} + +void dce_clock_read_ss_info(struct dce_clk_mgr *dccg_dce); + +int dce12_get_dp_ref_freq_khz(struct clk_mgr *dccg); -struct dccg *dce110_dccg_create( +void dce110_fill_display_configs( + const struct dc_state *context, + struct dm_pp_display_configuration *pp_display_cfg); + +int dce112_set_clock(struct clk_mgr *dccg, int requested_clk_khz); + +struct clk_mgr *dce_clk_mgr_create( struct dc_context *ctx, - const struct dccg_registers *regs, - const struct dccg_shift *clk_shift, - const struct dccg_mask *clk_mask); + const struct clk_mgr_registers *regs, + const struct clk_mgr_shift *clk_shift, + const struct clk_mgr_mask *clk_mask); -struct dccg *dce112_dccg_create( +struct clk_mgr *dce110_clk_mgr_create( struct dc_context *ctx, - const struct dccg_registers *regs, - const struct dccg_shift *clk_shift, - const struct dccg_mask *clk_mask); + const struct clk_mgr_registers *regs, + const struct clk_mgr_shift *clk_shift, + const struct clk_mgr_mask *clk_mask); -struct dccg *dce120_dccg_create(struct dc_context *ctx); +struct clk_mgr *dce112_clk_mgr_create( + struct dc_context *ctx, + const struct clk_mgr_registers *regs, + const struct clk_mgr_shift *clk_shift, + const struct clk_mgr_mask *clk_mask); -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 -struct dccg *dcn1_dccg_create(struct dc_context *ctx); -#endif +struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx); -void dce_dccg_destroy(struct dccg **dccg); +void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr); -#endif /* _DCE_CLOCKS_H_ */ +#endif /* _DCE_CLK_MGR_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c deleted file mode 100644 index d89a097ba936..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c +++ /dev/null @@ -1,947 +0,0 @@ -/* - * Copyright 2012-16 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dce_clocks.h" -#include "dm_services.h" -#include "reg_helper.h" -#include "fixed31_32.h" -#include "bios_parser_interface.h" -#include "dc.h" -#include "dmcu.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) -#include "dcn_calcs.h" -#endif -#include "core_types.h" -#include "dc_types.h" -#include "dal_asic_id.h" - -#define TO_DCE_CLOCKS(clocks)\ - container_of(clocks, struct dce_dccg, base) - -#define REG(reg) \ - (clk_dce->regs->reg) - -#undef FN -#define FN(reg_name, field_name) \ - clk_dce->clk_shift->field_name, clk_dce->clk_mask->field_name - -#define CTX \ - clk_dce->base.ctx -#define DC_LOGGER \ - clk->ctx->logger - -/* Max clock values for each state indexed by "enum clocks_state": */ -static const struct state_dependent_clocks dce80_max_clks_by_state[] = { -/* ClocksStateInvalid - should not be used */ -{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, -/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */ -{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, -/* ClocksStateLow */ -{ .display_clk_khz = 352000, .pixel_clk_khz = 330000}, -/* ClocksStateNominal */ -{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 }, -/* ClocksStatePerformance */ -{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } }; - -static const struct state_dependent_clocks dce110_max_clks_by_state[] = { -/*ClocksStateInvalid - should not be used*/ -{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, -/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ -{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, -/*ClocksStateLow*/ -{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, -/*ClocksStateNominal*/ -{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 }, -/*ClocksStatePerformance*/ -{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } }; - -static const struct state_dependent_clocks dce112_max_clks_by_state[] = { -/*ClocksStateInvalid - should not be used*/ -{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, -/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ -{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 }, -/*ClocksStateLow*/ -{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 }, -/*ClocksStateNominal*/ -{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 }, -/*ClocksStatePerformance*/ -{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } }; - -static const struct state_dependent_clocks dce120_max_clks_by_state[] = { -/*ClocksStateInvalid - should not be used*/ -{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, -/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ -{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, -/*ClocksStateLow*/ -{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 }, -/*ClocksStateNominal*/ -{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 }, -/*ClocksStatePerformance*/ -{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } }; - -/* Starting DID for each range */ -enum dentist_base_divider_id { - DENTIST_BASE_DID_1 = 0x08, - DENTIST_BASE_DID_2 = 0x40, - DENTIST_BASE_DID_3 = 0x60, - DENTIST_BASE_DID_4 = 0x7e, - DENTIST_MAX_DID = 0x7f -}; - -/* Starting point and step size for each divider range.*/ -enum dentist_divider_range { - DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */ - DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */ - DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */ - DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */ - DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */ - DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */ - DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */ - DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */ - DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4 -}; - -static int dentist_get_divider_from_did(int did) -{ - if (did < DENTIST_BASE_DID_1) - did = DENTIST_BASE_DID_1; - if (did > DENTIST_MAX_DID) - did = DENTIST_MAX_DID; - - if (did < DENTIST_BASE_DID_2) { - return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP - * (did - DENTIST_BASE_DID_1); - } else if (did < DENTIST_BASE_DID_3) { - return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP - * (did - DENTIST_BASE_DID_2); - } else if (did < DENTIST_BASE_DID_4) { - return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP - * (did - DENTIST_BASE_DID_3); - } else { - return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP - * (did - DENTIST_BASE_DID_4); - } -} - -/* SW will adjust DP REF Clock average value for all purposes - * (DP DTO / DP Audio DTO and DP GTC) - if clock is spread for all cases: - -if SS enabled on DP Ref clock and HW de-spreading enabled with SW - calculations for DS_INCR/DS_MODULO (this is planned to be default case) - -if SS enabled on DP Ref clock and HW de-spreading enabled with HW - calculations (not planned to be used, but average clock should still - be valid) - -if SS enabled on DP Ref clock and HW de-spreading disabled - (should not be case with CIK) then SW should program all rates - generated according to average value (case as with previous ASICs) - */ -static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *clk_dce, int dp_ref_clk_khz) -{ - if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) { - struct fixed31_32 ss_percentage = dc_fixpt_div_int( - dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage, - clk_dce->dprefclk_ss_divider), 200); - struct fixed31_32 adj_dp_ref_clk_khz; - - ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage); - adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz); - dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz); - } - return dp_ref_clk_khz; -} - -static int dce_get_dp_ref_freq_khz(struct dccg *clk) -{ - struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); - int dprefclk_wdivider; - int dprefclk_src_sel; - int dp_ref_clk_khz = 600000; - int target_div; - - /* ASSERT DP Reference Clock source is from DFS*/ - REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel); - ASSERT(dprefclk_src_sel == 0); - - /* Read the mmDENTIST_DISPCLK_CNTL to get the currently - * programmed DID DENTIST_DPREFCLK_WDIVIDER*/ - REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider); - - /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/ - target_div = dentist_get_divider_from_did(dprefclk_wdivider); - - /* Calculate the current DFS clock, in kHz.*/ - dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_dce->dentist_vco_freq_khz) / target_div; - - return dccg_adjust_dp_ref_freq_for_ss(clk_dce, dp_ref_clk_khz); -} - -static int dce12_get_dp_ref_freq_khz(struct dccg *clk) -{ - struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); - - return dccg_adjust_dp_ref_freq_for_ss(clk_dce, clk_dce->dprefclk_khz); -} - -static enum dm_pp_clocks_state dce_get_required_clocks_state( - struct dccg *clk, - struct dc_clocks *req_clocks) -{ - struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); - int i; - enum dm_pp_clocks_state low_req_clk; - - /* Iterate from highest supported to lowest valid state, and update - * lowest RequiredState with the lowest state that satisfies - * all required clocks - */ - for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--) - if (req_clocks->dispclk_khz > - clk_dce->max_clks_by_state[i].display_clk_khz - || req_clocks->phyclk_khz > - clk_dce->max_clks_by_state[i].pixel_clk_khz) - break; - - low_req_clk = i + 1; - if (low_req_clk > clk->max_clks_state) { - /* set max clock state for high phyclock, invalid on exceeding display clock */ - if (clk_dce->max_clks_by_state[clk->max_clks_state].display_clk_khz - < req_clocks->dispclk_khz) - low_req_clk = DM_PP_CLOCKS_STATE_INVALID; - else - low_req_clk = clk->max_clks_state; - } - - return low_req_clk; -} - -static int dce_set_clock( - struct dccg *clk, - int requested_clk_khz) -{ - struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); - struct bp_pixel_clock_parameters pxl_clk_params = { 0 }; - struct dc_bios *bp = clk->ctx->dc_bios; - int actual_clock = requested_clk_khz; - - /* Make sure requested clock isn't lower than minimum threshold*/ - if (requested_clk_khz > 0) - requested_clk_khz = max(requested_clk_khz, - clk_dce->dentist_vco_freq_khz / 64); - - /* Prepare to program display clock*/ - pxl_clk_params.target_pixel_clock = requested_clk_khz; - pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; - - if (clk_dce->dfs_bypass_active) - pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true; - - bp->funcs->program_display_engine_pll(bp, &pxl_clk_params); - - if (clk_dce->dfs_bypass_active) { - /* Cache the fixed display clock*/ - clk_dce->dfs_bypass_disp_clk = - pxl_clk_params.dfs_bypass_display_clock; - actual_clock = pxl_clk_params.dfs_bypass_display_clock; - } - - /* from power down, we need mark the clock state as ClocksStateNominal - * from HWReset, so when resume we will call pplib voltage regulator.*/ - if (requested_clk_khz == 0) - clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; - return actual_clock; -} - -static int dce_psr_set_clock( - struct dccg *clk, - int requested_clk_khz) -{ - struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); - struct dc_context *ctx = clk_dce->base.ctx; - struct dc *core_dc = ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; - int actual_clk_khz = requested_clk_khz; - - actual_clk_khz = dce_set_clock(clk, requested_clk_khz); - - dmcu->funcs->set_psr_wait_loop(dmcu, actual_clk_khz / 1000 / 7); - return actual_clk_khz; -} - -static int dce112_set_clock( - struct dccg *clk, - int requested_clk_khz) -{ - struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); - struct bp_set_dce_clock_parameters dce_clk_params; - struct dc_bios *bp = clk->ctx->dc_bios; - struct dc *core_dc = clk->ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; - int actual_clock = requested_clk_khz; - /* Prepare to program display clock*/ - memset(&dce_clk_params, 0, sizeof(dce_clk_params)); - - /* Make sure requested clock isn't lower than minimum threshold*/ - if (requested_clk_khz > 0) - requested_clk_khz = max(requested_clk_khz, - clk_dce->dentist_vco_freq_khz / 62); - - dce_clk_params.target_clock_frequency = requested_clk_khz; - dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; - dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK; - - bp->funcs->set_dce_clock(bp, &dce_clk_params); - actual_clock = dce_clk_params.target_clock_frequency; - - /* from power down, we need mark the clock state as ClocksStateNominal - * from HWReset, so when resume we will call pplib voltage regulator.*/ - if (requested_clk_khz == 0) - clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; - - /*Program DP ref Clock*/ - /*VBIOS will determine DPREFCLK frequency, so we don't set it*/ - dce_clk_params.target_clock_frequency = 0; - dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK; - if (!ASICREV_IS_VEGA20_P(clk->ctx->asic_id.hw_internal_rev)) - dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = - (dce_clk_params.pll_id == - CLOCK_SOURCE_COMBO_DISPLAY_PLL0); - else - dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false; - - bp->funcs->set_dce_clock(bp, &dce_clk_params); - - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { - if (clk_dce->dfs_bypass_disp_clk != actual_clock) - dmcu->funcs->set_psr_wait_loop(dmcu, - actual_clock / 1000 / 7); - } - - clk_dce->dfs_bypass_disp_clk = actual_clock; - return actual_clock; -} - -static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce) -{ - struct dc_debug_options *debug = &clk_dce->base.ctx->dc->debug; - struct dc_bios *bp = clk_dce->base.ctx->dc_bios; - struct integrated_info info = { { { 0 } } }; - struct dc_firmware_info fw_info = { { 0 } }; - int i; - - if (bp->integrated_info) - info = *bp->integrated_info; - - clk_dce->dentist_vco_freq_khz = info.dentist_vco_freq; - if (clk_dce->dentist_vco_freq_khz == 0) { - bp->funcs->get_firmware_info(bp, &fw_info); - clk_dce->dentist_vco_freq_khz = - fw_info.smu_gpu_pll_output_freq; - if (clk_dce->dentist_vco_freq_khz == 0) - clk_dce->dentist_vco_freq_khz = 3600000; - } - - /*update the maximum display clock for each power state*/ - for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) { - enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID; - - switch (i) { - case 0: - clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW; - break; - - case 1: - clk_state = DM_PP_CLOCKS_STATE_LOW; - break; - - case 2: - clk_state = DM_PP_CLOCKS_STATE_NOMINAL; - break; - - case 3: - clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE; - break; - - default: - clk_state = DM_PP_CLOCKS_STATE_INVALID; - break; - } - - /*Do not allow bad VBIOS/SBIOS to override with invalid values, - * check for > 100MHz*/ - if (info.disp_clk_voltage[i].max_supported_clk >= 100000) - clk_dce->max_clks_by_state[clk_state].display_clk_khz = - info.disp_clk_voltage[i].max_supported_clk; - } - - if (!debug->disable_dfs_bypass && bp->integrated_info) - if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) - clk_dce->dfs_bypass_enabled = true; -} - -static void dce_clock_read_ss_info(struct dce_dccg *clk_dce) -{ - struct dc_bios *bp = clk_dce->base.ctx->dc_bios; - int ss_info_num = bp->funcs->get_ss_entry_number( - bp, AS_SIGNAL_TYPE_GPU_PLL); - - if (ss_info_num) { - struct spread_spectrum_info info = { { 0 } }; - enum bp_result result = bp->funcs->get_spread_spectrum_info( - bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info); - - /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS - * even if SS not enabled and in that case - * SSInfo.spreadSpectrumPercentage !=0 would be sign - * that SS is enabled - */ - if (result == BP_RESULT_OK && - info.spread_spectrum_percentage != 0) { - clk_dce->ss_on_dprefclk = true; - clk_dce->dprefclk_ss_divider = info.spread_percentage_divider; - - if (info.type.CENTER_MODE == 0) { - /* TODO: Currently for DP Reference clock we - * need only SS percentage for - * downspread */ - clk_dce->dprefclk_ss_percentage = - info.spread_spectrum_percentage; - } - - return; - } - - result = bp->funcs->get_spread_spectrum_info( - bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info); - - /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS - * even if SS not enabled and in that case - * SSInfo.spreadSpectrumPercentage !=0 would be sign - * that SS is enabled - */ - if (result == BP_RESULT_OK && - info.spread_spectrum_percentage != 0) { - clk_dce->ss_on_dprefclk = true; - clk_dce->dprefclk_ss_divider = info.spread_percentage_divider; - - if (info.type.CENTER_MODE == 0) { - /* Currently for DP Reference clock we - * need only SS percentage for - * downspread */ - clk_dce->dprefclk_ss_percentage = - info.spread_spectrum_percentage; - } - } - } -} - -static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk) -{ - return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk); -} - -static void dce12_update_clocks(struct dccg *dccg, - struct dc_clocks *new_clocks, - bool safe_to_lower) -{ - struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; - - /* TODO: Investigate why this is needed to fix display corruption. */ - new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100; - - if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) { - clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK; - clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz; - new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz); - dccg->clks.dispclk_khz = new_clocks->dispclk_khz; - - dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req); - } - - if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) { - clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK; - clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz; - dccg->clks.phyclk_khz = new_clocks->phyclk_khz; - - dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req); - } -} - -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 -static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks) -{ - bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; - bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz; - int disp_clk_threshold = new_clocks->max_supported_dppclk_khz; - bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz; - - /* increase clock, looking for div is 0 for current, request div is 1*/ - if (dispclk_increase) { - /* already divided by 2, no need to reach target clk with 2 steps*/ - if (cur_dpp_div) - return new_clocks->dispclk_khz; - - /* request disp clk is lower than maximum supported dpp clk, - * no need to reach target clk with two steps. - */ - if (new_clocks->dispclk_khz <= disp_clk_threshold) - return new_clocks->dispclk_khz; - - /* target dpp clk not request divided by 2, still within threshold */ - if (!request_dpp_div) - return new_clocks->dispclk_khz; - - } else { - /* decrease clock, looking for current dppclk divided by 2, - * request dppclk not divided by 2. - */ - - /* current dpp clk not divided by 2, no need to ramp*/ - if (!cur_dpp_div) - return new_clocks->dispclk_khz; - - /* current disp clk is lower than current maximum dpp clk, - * no need to ramp - */ - if (dccg->clks.dispclk_khz <= disp_clk_threshold) - return new_clocks->dispclk_khz; - - /* request dpp clk need to be divided by 2 */ - if (request_dpp_div) - return new_clocks->dispclk_khz; - } - - return disp_clk_threshold; -} - -static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks) -{ - struct dc *dc = dccg->ctx->dc; - int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks); - bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; - int i; - - /* set disp clk to dpp clk threshold */ - dccg->funcs->set_dispclk(dccg, dispclk_to_dpp_threshold); - - /* update request dpp clk division option */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; - - if (!pipe_ctx->plane_state) - continue; - - pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control( - pipe_ctx->plane_res.dpp, - request_dpp_div, - true); - } - - /* If target clk not same as dppclk threshold, set to target clock */ - if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz) - dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz); - - dccg->clks.dispclk_khz = new_clocks->dispclk_khz; - dccg->clks.dppclk_khz = new_clocks->dppclk_khz; - dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz; -} - -static void dcn1_update_clocks(struct dccg *dccg, - struct dc_clocks *new_clocks, - bool safe_to_lower) -{ - struct dc *dc = dccg->ctx->dc; - struct pp_smu_display_requirement_rv *smu_req_cur = - &dc->res_pool->pp_smu_req; - struct pp_smu_display_requirement_rv smu_req = *smu_req_cur; - struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; - struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; - bool send_request_to_increase = false; - bool send_request_to_lower = false; - - if (new_clocks->phyclk_khz) - smu_req.display_count = 1; - else - smu_req.display_count = 0; - - if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz - || new_clocks->phyclk_khz > dccg->clks.phyclk_khz - || new_clocks->fclk_khz > dccg->clks.fclk_khz - || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz) - send_request_to_increase = true; - - if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) { - dccg->clks.phyclk_khz = new_clocks->phyclk_khz; - - send_request_to_lower = true; - } - - if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) { - dccg->clks.fclk_khz = new_clocks->fclk_khz; - clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK; - clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz; - smu_req.hard_min_fclk_khz = new_clocks->fclk_khz; - - dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req); - send_request_to_lower = true; - } - - if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) { - dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz; - smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz; - - send_request_to_lower = true; - } - - if (should_set_clock(safe_to_lower, - new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) { - dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; - smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz; - - send_request_to_lower = true; - } - - /* make sure dcf clk is before dpp clk to - * make sure we have enough voltage to run dpp clk - */ - if (send_request_to_increase) { - /*use dcfclk to request voltage*/ - clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK; - clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); - dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req); - if (pp_smu->set_display_requirement) - pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); - } - - /* dcn1 dppclk is tied to dispclk */ - /* program dispclk on = as a w/a for sleep resume clock ramping issues */ - if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz) - || new_clocks->dispclk_khz == dccg->clks.dispclk_khz) { - dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks); - dccg->clks.dispclk_khz = new_clocks->dispclk_khz; - - send_request_to_lower = true; - } - - if (!send_request_to_increase && send_request_to_lower) { - /*use dcfclk to request voltage*/ - clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK; - clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); - dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req); - if (pp_smu->set_display_requirement) - pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); - } - - - *smu_req_cur = smu_req; -} -#endif - -static void dce_update_clocks(struct dccg *dccg, - struct dc_clocks *new_clocks, - bool safe_to_lower) -{ - struct dm_pp_power_level_change_request level_change_req; - struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg); - - /* TODO: Investigate why this is needed to fix display corruption. */ - if (!clk_dce->dfs_bypass_active) - new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100; - - level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks); - /* get max clock state from PPLIB */ - if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower) - || level_change_req.power_level > dccg->cur_min_clks_state) { - if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req)) - dccg->cur_min_clks_state = level_change_req.power_level; - } - - if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) { - new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz); - dccg->clks.dispclk_khz = new_clocks->dispclk_khz; - } -} - -static bool dce_update_dfs_bypass( - struct dccg *dccg, - struct dc *dc, - struct dc_state *context, - int requested_clock_khz) -{ - struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg); - struct resource_context *res_ctx = &context->res_ctx; - enum signal_type signal_type = SIGNAL_TYPE_NONE; - bool was_active = clk_dce->dfs_bypass_active; - int i; - - /* Disable DFS bypass by default. */ - clk_dce->dfs_bypass_active = false; - - /* Check that DFS bypass is available. */ - if (!clk_dce->dfs_bypass_enabled) - goto update; - - /* Check if the requested display clock is below the threshold. */ - if (requested_clock_khz >= 400000) - goto update; - - /* DFS-bypass should only be enabled on single stream setups */ - if (context->stream_count != 1) - goto update; - - /* Check that the stream's signal type is an embedded panel */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (res_ctx->pipe_ctx[i].stream) { - struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; - - signal_type = pipe_ctx->stream->sink->link->connector_signal; - break; - } - } - - if (signal_type == SIGNAL_TYPE_EDP || - signal_type == SIGNAL_TYPE_LVDS) - clk_dce->dfs_bypass_active = true; - -update: - /* Update the clock state. We don't need to respect safe_to_lower - * because DFS bypass should always be greater than the current - * display clock frequency. - */ - if (was_active != clk_dce->dfs_bypass_active) { - dccg->clks.dispclk_khz = - dccg->funcs->set_dispclk(dccg, dccg->clks.dispclk_khz); - return true; - } - - return false; -} - -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 -static const struct display_clock_funcs dcn1_funcs = { - .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, - .set_dispclk = dce112_set_clock, - .update_clocks = dcn1_update_clocks -}; -#endif - -static const struct display_clock_funcs dce120_funcs = { - .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, - .set_dispclk = dce112_set_clock, - .update_clocks = dce12_update_clocks -}; - -static const struct display_clock_funcs dce112_funcs = { - .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, - .set_dispclk = dce112_set_clock, - .update_clocks = dce_update_clocks -}; - -static const struct display_clock_funcs dce110_funcs = { - .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, - .set_dispclk = dce_psr_set_clock, - .update_clocks = dce_update_clocks, - .update_dfs_bypass = dce_update_dfs_bypass -}; - -static const struct display_clock_funcs dce_funcs = { - .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, - .set_dispclk = dce_set_clock, - .update_clocks = dce_update_clocks -}; - -static void dce_dccg_construct( - struct dce_dccg *clk_dce, - struct dc_context *ctx, - const struct dccg_registers *regs, - const struct dccg_shift *clk_shift, - const struct dccg_mask *clk_mask) -{ - struct dccg *base = &clk_dce->base; - - base->ctx = ctx; - base->funcs = &dce_funcs; - - clk_dce->regs = regs; - clk_dce->clk_shift = clk_shift; - clk_dce->clk_mask = clk_mask; - - clk_dce->dfs_bypass_disp_clk = 0; - - clk_dce->dprefclk_ss_percentage = 0; - clk_dce->dprefclk_ss_divider = 1000; - clk_dce->ss_on_dprefclk = false; - - base->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; - base->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID; - - dce_clock_read_integrated_info(clk_dce); - dce_clock_read_ss_info(clk_dce); -} - -struct dccg *dce_dccg_create( - struct dc_context *ctx, - const struct dccg_registers *regs, - const struct dccg_shift *clk_shift, - const struct dccg_mask *clk_mask) -{ - struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); - - if (clk_dce == NULL) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - memcpy(clk_dce->max_clks_by_state, - dce80_max_clks_by_state, - sizeof(dce80_max_clks_by_state)); - - dce_dccg_construct( - clk_dce, ctx, regs, clk_shift, clk_mask); - - return &clk_dce->base; -} - -struct dccg *dce110_dccg_create( - struct dc_context *ctx, - const struct dccg_registers *regs, - const struct dccg_shift *clk_shift, - const struct dccg_mask *clk_mask) -{ - struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); - - if (clk_dce == NULL) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - memcpy(clk_dce->max_clks_by_state, - dce110_max_clks_by_state, - sizeof(dce110_max_clks_by_state)); - - dce_dccg_construct( - clk_dce, ctx, regs, clk_shift, clk_mask); - - clk_dce->base.funcs = &dce110_funcs; - - return &clk_dce->base; -} - -struct dccg *dce112_dccg_create( - struct dc_context *ctx, - const struct dccg_registers *regs, - const struct dccg_shift *clk_shift, - const struct dccg_mask *clk_mask) -{ - struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); - - if (clk_dce == NULL) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - memcpy(clk_dce->max_clks_by_state, - dce112_max_clks_by_state, - sizeof(dce112_max_clks_by_state)); - - dce_dccg_construct( - clk_dce, ctx, regs, clk_shift, clk_mask); - - clk_dce->base.funcs = &dce112_funcs; - - return &clk_dce->base; -} - -struct dccg *dce120_dccg_create(struct dc_context *ctx) -{ - struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); - - if (clk_dce == NULL) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - memcpy(clk_dce->max_clks_by_state, - dce120_max_clks_by_state, - sizeof(dce120_max_clks_by_state)); - - dce_dccg_construct( - clk_dce, ctx, NULL, NULL, NULL); - - clk_dce->dprefclk_khz = 600000; - clk_dce->base.funcs = &dce120_funcs; - - return &clk_dce->base; -} - -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 -struct dccg *dcn1_dccg_create(struct dc_context *ctx) -{ - struct dc_debug_options *debug = &ctx->dc->debug; - struct dc_bios *bp = ctx->dc_bios; - struct dc_firmware_info fw_info = { { 0 } }; - struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); - - if (clk_dce == NULL) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - clk_dce->base.ctx = ctx; - clk_dce->base.funcs = &dcn1_funcs; - - clk_dce->dfs_bypass_disp_clk = 0; - - clk_dce->dprefclk_ss_percentage = 0; - clk_dce->dprefclk_ss_divider = 1000; - clk_dce->ss_on_dprefclk = false; - - clk_dce->dprefclk_khz = 600000; - if (bp->integrated_info) - clk_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; - if (clk_dce->dentist_vco_freq_khz == 0) { - bp->funcs->get_firmware_info(bp, &fw_info); - clk_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq; - if (clk_dce->dentist_vco_freq_khz == 0) - clk_dce->dentist_vco_freq_khz = 3600000; - } - - if (!debug->disable_dfs_bypass && bp->integrated_info) - if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) - clk_dce->dfs_bypass_enabled = true; - - dce_clock_read_ss_info(clk_dce); - - return &clk_dce->base; -} -#endif - -void dce_dccg_destroy(struct dccg **dccg) -{ - struct dce_dccg *clk_dce = TO_DCE_CLOCKS(*dccg); - - kfree(clk_dce); - *dccg = NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index 64dc75378541..c83a7f05f14c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -233,6 +233,16 @@ struct dce_hwseq_registers { uint32_t DOMAIN5_PG_CONFIG; uint32_t DOMAIN6_PG_CONFIG; uint32_t DOMAIN7_PG_CONFIG; + uint32_t DOMAIN8_PG_CONFIG; + uint32_t DOMAIN9_PG_CONFIG; + uint32_t DOMAIN10_PG_CONFIG; + uint32_t DOMAIN11_PG_CONFIG; + uint32_t DOMAIN16_PG_CONFIG; + uint32_t DOMAIN17_PG_CONFIG; + uint32_t DOMAIN18_PG_CONFIG; + uint32_t DOMAIN19_PG_CONFIG; + uint32_t DOMAIN20_PG_CONFIG; + uint32_t DOMAIN21_PG_CONFIG; uint32_t DOMAIN0_PG_STATUS; uint32_t DOMAIN1_PG_STATUS; uint32_t DOMAIN2_PG_STATUS; @@ -241,6 +251,16 @@ struct dce_hwseq_registers { uint32_t DOMAIN5_PG_STATUS; uint32_t DOMAIN6_PG_STATUS; uint32_t DOMAIN7_PG_STATUS; + uint32_t DOMAIN8_PG_STATUS; + uint32_t DOMAIN9_PG_STATUS; + uint32_t DOMAIN10_PG_STATUS; + uint32_t DOMAIN11_PG_STATUS; + uint32_t DOMAIN16_PG_STATUS; + uint32_t DOMAIN17_PG_STATUS; + uint32_t DOMAIN18_PG_STATUS; + uint32_t DOMAIN19_PG_STATUS; + uint32_t DOMAIN20_PG_STATUS; + uint32_t DOMAIN21_PG_STATUS; uint32_t DIO_MEM_PWR_CTRL; uint32_t DCCG_GATE_DISABLE_CNTL; uint32_t DCCG_GATE_DISABLE_CNTL2; @@ -262,6 +282,8 @@ struct dce_hwseq_registers { uint32_t D2VGA_CONTROL; uint32_t D3VGA_CONTROL; uint32_t D4VGA_CONTROL; + uint32_t D5VGA_CONTROL; + uint32_t D6VGA_CONTROL; uint32_t VGA_TEST_CONTROL; /* MMHUB registers. read only. temporary hack */ uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32; @@ -489,6 +511,26 @@ struct dce_hwseq_registers { type DOMAIN6_POWER_GATE; \ type DOMAIN7_POWER_FORCEON; \ type DOMAIN7_POWER_GATE; \ + type DOMAIN8_POWER_FORCEON; \ + type DOMAIN8_POWER_GATE; \ + type DOMAIN9_POWER_FORCEON; \ + type DOMAIN9_POWER_GATE; \ + type DOMAIN10_POWER_FORCEON; \ + type DOMAIN10_POWER_GATE; \ + type DOMAIN11_POWER_FORCEON; \ + type DOMAIN11_POWER_GATE; \ + type DOMAIN16_POWER_FORCEON; \ + type DOMAIN16_POWER_GATE; \ + type DOMAIN17_POWER_FORCEON; \ + type DOMAIN17_POWER_GATE; \ + type DOMAIN18_POWER_FORCEON; \ + type DOMAIN18_POWER_GATE; \ + type DOMAIN19_POWER_FORCEON; \ + type DOMAIN19_POWER_GATE; \ + type DOMAIN20_POWER_FORCEON; \ + type DOMAIN20_POWER_GATE; \ + type DOMAIN21_POWER_FORCEON; \ + type DOMAIN21_POWER_GATE; \ type DOMAIN0_PGFSM_PWR_STATUS; \ type DOMAIN1_PGFSM_PWR_STATUS; \ type DOMAIN2_PGFSM_PWR_STATUS; \ @@ -497,6 +539,16 @@ struct dce_hwseq_registers { type DOMAIN5_PGFSM_PWR_STATUS; \ type DOMAIN6_PGFSM_PWR_STATUS; \ type DOMAIN7_PGFSM_PWR_STATUS; \ + type DOMAIN8_PGFSM_PWR_STATUS; \ + type DOMAIN9_PGFSM_PWR_STATUS; \ + type DOMAIN10_PGFSM_PWR_STATUS; \ + type DOMAIN11_PGFSM_PWR_STATUS; \ + type DOMAIN16_PGFSM_PWR_STATUS; \ + type DOMAIN17_PGFSM_PWR_STATUS; \ + type DOMAIN18_PGFSM_PWR_STATUS; \ + type DOMAIN19_PGFSM_PWR_STATUS; \ + type DOMAIN20_PGFSM_PWR_STATUS; \ + type DOMAIN21_PGFSM_PWR_STATUS; \ type DCFCLK_GATE_DIS; \ type DCHUBBUB_GLOBAL_TIMER_REFDIV; \ type VGA_TEST_ENABLE; \ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 366bc8c2c643..3e18ea84b1f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -645,7 +645,7 @@ static bool dce110_link_encoder_validate_hdmi_output( return false; /* DCE11 HW does not support 420 */ - if (!enc110->base.features.ycbcr420_supported && + if (!enc110->base.features.hdmi_ycbcr420_supported && crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) return false; diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c index 74c05e878807..bc50a8e25f4f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c @@ -105,74 +105,18 @@ bool dce100_enable_display_power_gating( return false; } -static void dce100_pplib_apply_display_requirements( - struct dc *dc, - struct dc_state *context) -{ - struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; - - pp_display_cfg->avail_mclk_switch_time_us = - dce110_get_min_vblank_time_us(context); - /*pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz - / MEMORY_TYPE_MULTIPLIER;*/ - - dce110_fill_display_configs(context, pp_display_cfg); - - if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof( - struct dm_pp_display_configuration)) != 0) - dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); - - dc->prev_display_config = *pp_display_cfg; -} - -/* unit: in_khz before mode set, get pixel clock from context. ASIC register - * may not be programmed yet - */ -static uint32_t get_max_pixel_clock_for_all_paths( - struct dc *dc, - struct dc_state *context) -{ - uint32_t max_pix_clk = 0; - int i; - - for (i = 0; i < MAX_PIPES; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - - if (pipe_ctx->stream == NULL) - continue; - - /* do not check under lay */ - if (pipe_ctx->top_pipe) - continue; - - if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk) - max_pix_clk = - pipe_ctx->stream_res.pix_clk_params.requested_pix_clk; - } - return max_pix_clk; -} - -void dce100_set_bandwidth( +void dce100_prepare_bandwidth( struct dc *dc, - struct dc_state *context, - bool decrease_allowed) + struct dc_state *context) { - struct dc_clocks req_clks; - - req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; - req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context); - dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool); - dc->res_pool->dccg->funcs->update_clocks( - dc->res_pool->dccg, - &req_clks, - decrease_allowed); - - dce100_pplib_apply_display_requirements(dc, context); + dc->res_pool->clk_mgr->funcs->update_clocks( + dc->res_pool->clk_mgr, + context, + false); } - /**************************************************************************/ void dce100_hw_sequencer_construct(struct dc *dc) @@ -180,8 +124,7 @@ void dce100_hw_sequencer_construct(struct dc *dc) dce110_hw_sequencer_construct(dc); dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; - dc->hwss.set_bandwidth = dce100_set_bandwidth; - dc->hwss.pplib_apply_display_requirements = - dce100_pplib_apply_display_requirements; + dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; + dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth; } diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h index c6ec0ed6ec3d..acd418515346 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h @@ -33,10 +33,9 @@ struct dc_state; void dce100_hw_sequencer_construct(struct dc *dc); -void dce100_set_bandwidth( +void dce100_prepare_bandwidth( struct dc *dc, - struct dc_state *context, - bool decrease_allowed); + struct dc_state *context); bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id, struct dc_bios *dcb, diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 14754a87156c..6ae51a5dfc04 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -36,11 +36,11 @@ #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" +#include "dce/dce_clk_mgr.h" #include "dce/dce_mem_input.h" #include "dce/dce_ipp.h" #include "dce/dce_transform.h" #include "dce/dce_opp.h" -#include "dce/dce_clocks.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" @@ -137,15 +137,15 @@ static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = { .reg_name = mm ## block ## id ## _ ## reg_name -static const struct dccg_registers disp_clk_regs = { +static const struct clk_mgr_registers disp_clk_regs = { CLK_COMMON_REG_LIST_DCE_BASE() }; -static const struct dccg_shift disp_clk_shift = { +static const struct clk_mgr_shift disp_clk_shift = { CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; -static const struct dccg_mask disp_clk_mask = { +static const struct clk_mgr_mask disp_clk_mask = { CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; @@ -722,8 +722,8 @@ static void destruct(struct dce110_resource_pool *pool) dce_aud_destroy(&pool->base.audios[i]); } - if (pool->base.dccg != NULL) - dce_dccg_destroy(&pool->base.dccg); + if (pool->base.clk_mgr != NULL) + dce_clk_mgr_destroy(&pool->base.clk_mgr); if (pool->base.abm != NULL) dce_abm_destroy(&pool->base.abm); @@ -767,7 +767,7 @@ bool dce100_validate_bandwidth( if (at_least_one_pipe) { /* TODO implement when needed but for now hardcode max value*/ context->bw.dce.dispclk_khz = 681000; - context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER; + context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; } else { context->bw.dce.dispclk_khz = 0; context->bw.dce.yclk_khz = 0; @@ -860,7 +860,6 @@ static bool construct( struct dc_context *ctx = dc->ctx; struct dc_firmware_info info; struct dc_bios *bp; - struct dm_pp_static_clock_info static_clk_info = {0}; ctx->dc_bios->regs = &bios_regs; @@ -908,11 +907,11 @@ static bool construct( } } - pool->base.dccg = dce_dccg_create(ctx, + pool->base.clk_mgr = dce_clk_mgr_create(ctx, &disp_clk_regs, &disp_clk_shift, &disp_clk_mask); - if (pool->base.dccg == NULL) { + if (pool->base.clk_mgr == NULL) { dm_error("DC: failed to create display clock!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; @@ -938,12 +937,6 @@ static bool construct( goto res_create_fail; } - /* get static clock information for PPLIB or firmware, save - * max_clock_state - */ - if (dm_pp_get_static_clocks(ctx, &static_clk_info)) - pool->base.dccg->max_clks_state = - static_clk_info.max_clocks_state; { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index b75ede5f84f7..9724a17e352b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -548,14 +548,14 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf, regamma_params->hw_points_num = hw_points; - i = 1; - for (k = 0; k < 16 && i < 16; k++) { + k = 0; + for (i = 1; i < 16; i++) { if (seg_distr[k] != -1) { regamma_params->arr_curve_points[k].segments_num = seg_distr[k]; regamma_params->arr_curve_points[i].offset = regamma_params->arr_curve_points[k].offset + (1 << seg_distr[k]); } - i++; + k++; } if (seg_distr[k] != -1) @@ -1085,7 +1085,6 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { link->dc->hwss.edp_backlight_control(link, true); - stream->bl_pwm_level = EDP_BACKLIGHT_RAMP_DISABLE_LEVEL; } } void dce110_blank_stream(struct pipe_ctx *pipe_ctx) @@ -1192,8 +1191,8 @@ static void build_audio_output( if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { audio_output->pll_info.dp_dto_source_clock_in_khz = - state->dis_clk->funcs->get_dp_ref_clk_frequency( - state->dis_clk); + state->dccg->funcs->get_dp_ref_clk_frequency( + state->dccg); } audio_output->pll_info.feed_back_divider = @@ -1547,6 +1546,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) int i; struct dc_link *edp_link_to_turnoff = NULL; struct dc_link *edp_link = get_link_for_edp(dc); + struct dc_bios *bios = dc->ctx->dc_bios; bool can_edp_fast_boot_optimize = false; bool apply_edp_fast_boot_optimization = false; @@ -1573,6 +1573,20 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) if (context->streams[i]->signal == SIGNAL_TYPE_EDP) { context->streams[i]->apply_edp_fast_boot_optimization = true; apply_edp_fast_boot_optimization = true; + + /* When after S4 and S5, vbios may post edp and previous dpms_off + * doesn't make sense. + * Update dpms_off state to align hw and sw state via check + * vBios scratch register. + */ + if (bios->funcs->is_active_display) { + const struct connector_device_tag_info *device_tag = &(edp_link->device_tag); + + if (bios->funcs->is_active_display(bios, + context->streams[i]->signal, + device_tag)) + context->streams[i]->dpms_off = false; + } } } } @@ -1736,41 +1750,18 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx, if (events->force_trigger) value |= 0x1; - value |= 0x84; + if (num_pipes) { + struct dc *dc = pipe_ctx[0]->stream->ctx->dc; + + if (dc->fbc_compressor) + value |= 0x84; + } for (i = 0; i < num_pipes; i++) pipe_ctx[i]->stream_res.tg->funcs-> set_static_screen_control(pipe_ctx[i]->stream_res.tg, value); } -/* unit: in_khz before mode set, get pixel clock from context. ASIC register - * may not be programmed yet - */ -static uint32_t get_max_pixel_clock_for_all_paths( - struct dc *dc, - struct dc_state *context) -{ - uint32_t max_pix_clk = 0; - int i; - - for (i = 0; i < MAX_PIPES; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - - if (pipe_ctx->stream == NULL) - continue; - - /* do not check under lay */ - if (pipe_ctx->top_pipe) - continue; - - if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk) - max_pix_clk = - pipe_ctx->stream_res.pix_clk_params.requested_pix_clk; - } - - return max_pix_clk; -} - /* * Check if FBC can be enabled */ @@ -2380,191 +2371,33 @@ static void init_hw(struct dc *dc) } -void dce110_fill_display_configs( - const struct dc_state *context, - struct dm_pp_display_configuration *pp_display_cfg) -{ - int j; - int num_cfgs = 0; - - for (j = 0; j < context->stream_count; j++) { - int k; - - const struct dc_stream_state *stream = context->streams[j]; - struct dm_pp_single_disp_config *cfg = - &pp_display_cfg->disp_configs[num_cfgs]; - const struct pipe_ctx *pipe_ctx = NULL; - - for (k = 0; k < MAX_PIPES; k++) - if (stream == context->res_ctx.pipe_ctx[k].stream) { - pipe_ctx = &context->res_ctx.pipe_ctx[k]; - break; - } - - ASSERT(pipe_ctx != NULL); - - /* only notify active stream */ - if (stream->dpms_off) - continue; - - num_cfgs++; - cfg->signal = pipe_ctx->stream->signal; - cfg->pipe_idx = pipe_ctx->stream_res.tg->inst; - cfg->src_height = stream->src.height; - cfg->src_width = stream->src.width; - cfg->ddi_channel_mapping = - stream->sink->link->ddi_channel_mapping.raw; - cfg->transmitter = - stream->sink->link->link_enc->transmitter; - cfg->link_settings.lane_count = - stream->sink->link->cur_link_settings.lane_count; - cfg->link_settings.link_rate = - stream->sink->link->cur_link_settings.link_rate; - cfg->link_settings.link_spread = - stream->sink->link->cur_link_settings.link_spread; - cfg->sym_clock = stream->phy_pix_clk; - /* Round v_refresh*/ - cfg->v_refresh = stream->timing.pix_clk_khz * 1000; - cfg->v_refresh /= stream->timing.h_total; - cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) - / stream->timing.v_total; - } - - pp_display_cfg->display_count = num_cfgs; -} - -uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context) -{ - uint8_t j; - uint32_t min_vertical_blank_time = -1; - - for (j = 0; j < context->stream_count; j++) { - struct dc_stream_state *stream = context->streams[j]; - uint32_t vertical_blank_in_pixels = 0; - uint32_t vertical_blank_time = 0; - - vertical_blank_in_pixels = stream->timing.h_total * - (stream->timing.v_total - - stream->timing.v_addressable); - - vertical_blank_time = vertical_blank_in_pixels - * 1000 / stream->timing.pix_clk_khz; - - if (min_vertical_blank_time > vertical_blank_time) - min_vertical_blank_time = vertical_blank_time; - } - - return min_vertical_blank_time; -} - -static int determine_sclk_from_bounding_box( - const struct dc *dc, - int required_sclk) -{ - int i; - /* - * Some asics do not give us sclk levels, so we just report the actual - * required sclk - */ - if (dc->sclk_lvls.num_levels == 0) - return required_sclk; - - for (i = 0; i < dc->sclk_lvls.num_levels; i++) { - if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk) - return dc->sclk_lvls.clocks_in_khz[i]; - } - /* - * even maximum level could not satisfy requirement, this - * is unexpected at this stage, should have been caught at - * validation time - */ - ASSERT(0); - return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1]; -} - -static void pplib_apply_display_requirements( - struct dc *dc, - struct dc_state *context) +void dce110_prepare_bandwidth( + struct dc *dc, + struct dc_state *context) { - struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; + struct clk_mgr *dccg = dc->res_pool->clk_mgr; - pp_display_cfg->all_displays_in_sync = - context->bw.dce.all_displays_in_sync; - pp_display_cfg->nb_pstate_switch_disable = - context->bw.dce.nbp_state_change_enable == false; - pp_display_cfg->cpu_cc6_disable = - context->bw.dce.cpuc_state_change_enable == false; - pp_display_cfg->cpu_pstate_disable = - context->bw.dce.cpup_state_change_enable == false; - pp_display_cfg->cpu_pstate_separation_time = - context->bw.dce.blackout_recovery_time_us; + dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool); - pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz - / MEMORY_TYPE_MULTIPLIER; - - pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box( - dc, - context->bw.dce.sclk_khz); - - pp_display_cfg->min_engine_clock_deep_sleep_khz - = context->bw.dce.sclk_deep_sleep_khz; - - pp_display_cfg->avail_mclk_switch_time_us = - dce110_get_min_vblank_time_us(context); - /* TODO: dce11.2*/ - pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; - - pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz; - - dce110_fill_display_configs(context, pp_display_cfg); - - /* TODO: is this still applicable?*/ - if (pp_display_cfg->display_count == 1) { - const struct dc_crtc_timing *timing = - &context->streams[0]->timing; - - pp_display_cfg->crtc_index = - pp_display_cfg->disp_configs[0].pipe_idx; - pp_display_cfg->line_time_in_us = timing->h_total * 1000 - / timing->pix_clk_khz; - } - - if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof( - struct dm_pp_display_configuration)) != 0) - dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); - - dc->prev_display_config = *pp_display_cfg; + dccg->funcs->update_clocks( + dccg, + context, + false); } -static void dce110_set_bandwidth( +void dce110_optimize_bandwidth( struct dc *dc, - struct dc_state *context, - bool decrease_allowed) + struct dc_state *context) { - struct dc_clocks req_clks; - struct dccg *dccg = dc->res_pool->dccg; - - req_clks.dispclk_khz = context->bw.dce.dispclk_khz; - req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context); - - if (decrease_allowed) - dce110_set_displaymarks(dc, context); - else - dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool); + struct clk_mgr *dccg = dc->res_pool->clk_mgr; - if (dccg->funcs->update_dfs_bypass) - dccg->funcs->update_dfs_bypass( - dccg, - dc, - context, - req_clks.dispclk_khz); + dce110_set_displaymarks(dc, context); dccg->funcs->update_clocks( dccg, - &req_clks, - decrease_allowed); - pplib_apply_display_requirements(dc, context); + context, + true); } static void dce110_program_front_end_for_pipe( @@ -2769,28 +2602,6 @@ static void dce110_wait_for_mpcc_disconnect( /* do nothing*/ } -static void program_csc_matrix(struct pipe_ctx *pipe_ctx, - enum dc_color_space colorspace, - uint16_t *matrix) -{ - int i; - struct out_csc_color_matrix tbl_entry; - - if (pipe_ctx->stream->csc_color_matrix.enable_adjustment - == true) { - enum dc_color_space color_space = - pipe_ctx->stream->output_color_space; - - //uint16_t matrix[12]; - for (i = 0; i < 12; i++) - tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i]; - - tbl_entry.color_space = color_space; - //tbl_entry.regval = matrix; - pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.xfm, &tbl_entry); - } -} - void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx) { struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; @@ -2839,13 +2650,8 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.xfm, attributes); } -static void ready_shared_resources(struct dc *dc, struct dc_state *context) {} - -static void optimize_shared_resources(struct dc *dc) {} - static const struct hw_sequencer_funcs dce110_funcs = { .program_gamut_remap = program_gamut_remap, - .program_csc_matrix = program_csc_matrix, .init_hw = init_hw, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = dce110_apply_ctx_for_surface, @@ -2868,7 +2674,8 @@ static const struct hw_sequencer_funcs dce110_funcs = { .enable_display_power_gating = dce110_enable_display_power_gating, .disable_plane = dce110_power_down_fe, .pipe_control_lock = dce_pipe_control_lock, - .set_bandwidth = dce110_set_bandwidth, + .prepare_bandwidth = dce110_prepare_bandwidth, + .optimize_bandwidth = dce110_optimize_bandwidth, .set_drr = set_drr, .get_position = get_position, .set_static_screen_control = set_static_screen_control, @@ -2877,9 +2684,6 @@ static const struct hw_sequencer_funcs dce110_funcs = { .setup_stereo = NULL, .set_avmute = dce110_set_avmute, .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect, - .ready_shared_resources = ready_shared_resources, - .optimize_shared_resources = optimize_shared_resources, - .pplib_apply_display_requirements = pplib_apply_display_requirements, .edp_backlight_control = hwss_edp_backlight_control, .edp_power_control = hwss_edp_power_control, .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h index d6db3dbd9015..cd3e36d52a52 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h @@ -40,7 +40,6 @@ enum dc_status dce110_apply_ctx_to_hw( struct dc_state *context); - void dce110_enable_stream(struct pipe_ctx *pipe_ctx); void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option); @@ -64,11 +63,13 @@ void dce110_set_safe_displaymarks( struct resource_context *res_ctx, const struct resource_pool *pool); -void dce110_fill_display_configs( - const struct dc_state *context, - struct dm_pp_display_configuration *pp_display_cfg); +void dce110_prepare_bandwidth( + struct dc *dc, + struct dc_state *context); -uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context); +void dce110_optimize_bandwidth( + struct dc *dc, + struct dc_state *context); void dp_receiver_power_ctrl(struct dc_link *link, bool on); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index e3624ca24574..e33d11785b1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -31,6 +31,7 @@ #include "resource.h" #include "dce110/dce110_resource.h" +#include "dce/dce_clk_mgr.h" #include "include/irq_service_interface.h" #include "dce/dce_audio.h" #include "dce110/dce110_timing_generator.h" @@ -45,7 +46,6 @@ #include "dce110/dce110_transform_v.h" #include "dce/dce_opp.h" #include "dce110/dce110_opp_v.h" -#include "dce/dce_clocks.h" #include "dce/dce_clock_source.h" #include "dce/dce_hwseq.h" #include "dce110/dce110_hw_sequencer.h" @@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = { #define SRI(reg_name, block, id)\ .reg_name = mm ## block ## id ## _ ## reg_name -static const struct dccg_registers disp_clk_regs = { +static const struct clk_mgr_registers disp_clk_regs = { CLK_COMMON_REG_LIST_DCE_BASE() }; -static const struct dccg_shift disp_clk_shift = { +static const struct clk_mgr_shift disp_clk_shift = { CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; -static const struct dccg_mask disp_clk_mask = { +static const struct clk_mgr_mask disp_clk_mask = { CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; @@ -760,8 +760,8 @@ static void destruct(struct dce110_resource_pool *pool) if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); - if (pool->base.dccg != NULL) - dce_dccg_destroy(&pool->base.dccg); + if (pool->base.clk_mgr != NULL) + dce_clk_mgr_destroy(&pool->base.clk_mgr); if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); @@ -1173,12 +1173,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) &clks); dc->bw_vbios->low_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000); + clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER, + clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER, + clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); } @@ -1201,7 +1201,6 @@ static bool construct( struct dc_context *ctx = dc->ctx; struct dc_firmware_info info; struct dc_bios *bp; - struct dm_pp_static_clock_info static_clk_info = {0}; ctx->dc_bios->regs = &bios_regs; @@ -1257,11 +1256,11 @@ static bool construct( } } - pool->base.dccg = dce110_dccg_create(ctx, + pool->base.clk_mgr = dce110_clk_mgr_create(ctx, &disp_clk_regs, &disp_clk_shift, &disp_clk_mask); - if (pool->base.dccg == NULL) { + if (pool->base.clk_mgr == NULL) { dm_error("DC: failed to create display clock!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; @@ -1287,13 +1286,6 @@ static bool construct( goto res_create_fail; } - /* get static clock information for PPLIB or firmware, save - * max_clock_state - */ - if (dm_pp_get_static_clocks(ctx, &static_clk_info)) - pool->base.dccg->max_clks_state = - static_clk_info.max_clocks_state; - { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; @@ -1362,7 +1354,8 @@ static bool construct( pool->base.sw_i2cs[i] = NULL; } - dc->fbc_compressor = dce110_compressor_create(ctx); + if (dc->config.fbc_support) + dc->fbc_compressor = dce110_compressor_create(ctx); if (!underlay_create(ctx, &pool->base)) goto res_create_fail; diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 3ce79c208ddf..969d4e72dc94 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -35,6 +35,7 @@ #include "irq/dce110/irq_service_dce110.h" +#include "dce/dce_clk_mgr.h" #include "dce/dce_mem_input.h" #include "dce/dce_transform.h" #include "dce/dce_link_encoder.h" @@ -42,7 +43,6 @@ #include "dce/dce_audio.h" #include "dce/dce_opp.h" #include "dce/dce_ipp.h" -#include "dce/dce_clocks.h" #include "dce/dce_clock_source.h" #include "dce/dce_hwseq.h" @@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = { .reg_name = mm ## block ## id ## _ ## reg_name -static const struct dccg_registers disp_clk_regs = { +static const struct clk_mgr_registers disp_clk_regs = { CLK_COMMON_REG_LIST_DCE_BASE() }; -static const struct dccg_shift disp_clk_shift = { +static const struct clk_mgr_shift disp_clk_shift = { CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; -static const struct dccg_mask disp_clk_mask = { +static const struct clk_mgr_mask disp_clk_mask = { CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; @@ -551,7 +551,8 @@ static struct transform *dce112_transform_create( static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, - .ycbcr420_supported = true, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = false, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, @@ -749,8 +750,8 @@ static void destruct(struct dce110_resource_pool *pool) if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); - if (pool->base.dccg != NULL) - dce_dccg_destroy(&pool->base.dccg); + if (pool->base.clk_mgr != NULL) + dce_clk_mgr_destroy(&pool->base.clk_mgr); if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); @@ -1015,12 +1016,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) &clks); dc->bw_vbios->low_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000); + clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER, + clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER, + clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); return; @@ -1056,12 +1057,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) * YCLK = UMACLK*m_memoryTypeMultiplier */ dc->bw_vbios->low_yclk = bw_frc_to_fixed( - mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000); + mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, + mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, + mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000); /* Now notify PPLib/SMU about which Watermarks sets they should select @@ -1131,7 +1132,6 @@ static bool construct( { unsigned int i; struct dc_context *ctx = dc->ctx; - struct dm_pp_static_clock_info static_clk_info = {0}; ctx->dc_bios->regs = &bios_regs; @@ -1199,11 +1199,11 @@ static bool construct( } } - pool->base.dccg = dce112_dccg_create(ctx, + pool->base.clk_mgr = dce112_clk_mgr_create(ctx, &disp_clk_regs, &disp_clk_shift, &disp_clk_mask); - if (pool->base.dccg == NULL) { + if (pool->base.clk_mgr == NULL) { dm_error("DC: failed to create display clock!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; @@ -1229,13 +1229,6 @@ static bool construct( goto res_create_fail; } - /* get static clock information for PPLIB or firmware, save - * max_clock_state - */ - if (dm_pp_get_static_clocks(ctx, &static_clk_info)) - pool->base.dccg->max_clks_state = - static_clk_info.max_clocks_state; - { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 79ab5f9f9115..f12696674eb0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -31,6 +31,7 @@ #include "resource.h" #include "include/irq_service_interface.h" #include "dce120_resource.h" + #include "dce112/dce112_resource.h" #include "dce110/dce110_resource.h" @@ -39,7 +40,6 @@ #include "irq/dce120/irq_service_dce120.h" #include "dce/dce_opp.h" #include "dce/dce_clock_source.h" -#include "dce/dce_clocks.h" #include "dce/dce_ipp.h" #include "dce/dce_mem_input.h" @@ -47,6 +47,7 @@ #include "dce120/dce120_hw_sequencer.h" #include "dce/dce_transform.h" +#include "dce/dce_clk_mgr.h" #include "dce/dce_audio.h" #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" @@ -573,8 +574,8 @@ static void destruct(struct dce110_resource_pool *pool) if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); - if (pool->base.dccg != NULL) - dce_dccg_destroy(&pool->base.dccg); + if (pool->base.clk_mgr != NULL) + dce_clk_mgr_destroy(&pool->base.clk_mgr); } static void read_dce_straps( @@ -606,7 +607,8 @@ static struct audio *create_audio( static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, - .ycbcr420_supported = true, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = false, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, @@ -834,12 +836,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) * YCLK = UMACLK*m_memoryTypeMultiplier */ dc->bw_vbios->low_yclk = bw_frc_to_fixed( - mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000); + mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, + mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, + mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000); /* Now notify PPLib/SMU about which Watermarks sets they should select @@ -973,8 +975,8 @@ static bool construct( } } - pool->base.dccg = dce120_dccg_create(ctx); - if (pool->base.dccg == NULL) { + pool->base.clk_mgr = dce120_clk_mgr_create(ctx); + if (pool->base.clk_mgr == NULL) { dm_error("DC: failed to create display clock!\n"); BREAK_TO_DEBUGGER(); goto dccg_create_fail; diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c index 6c6a1a16af19..a60a90e68d91 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c @@ -76,6 +76,7 @@ void dce80_hw_sequencer_construct(struct dc *dc) dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; dc->hwss.pipe_control_lock = dce_pipe_control_lock; - dc->hwss.set_bandwidth = dce100_set_bandwidth; + dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; + dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth; } diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index d68f951f9869..6d40b3d54ac1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -37,6 +37,7 @@ #include "dce110/dce110_timing_generator.h" #include "dce110/dce110_resource.h" #include "dce80/dce80_timing_generator.h" +#include "dce/dce_clk_mgr.h" #include "dce/dce_mem_input.h" #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" @@ -44,7 +45,6 @@ #include "dce/dce_ipp.h" #include "dce/dce_transform.h" #include "dce/dce_opp.h" -#include "dce/dce_clocks.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" @@ -155,15 +155,15 @@ static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = { .reg_name = mm ## block ## id ## _ ## reg_name -static const struct dccg_registers disp_clk_regs = { +static const struct clk_mgr_registers disp_clk_regs = { CLK_COMMON_REG_LIST_DCE_BASE() }; -static const struct dccg_shift disp_clk_shift = { +static const struct clk_mgr_shift disp_clk_shift = { CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) }; -static const struct dccg_mask disp_clk_mask = { +static const struct clk_mgr_mask disp_clk_mask = { CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) }; @@ -779,8 +779,8 @@ static void destruct(struct dce110_resource_pool *pool) } } - if (pool->base.dccg != NULL) - dce_dccg_destroy(&pool->base.dccg); + if (pool->base.clk_mgr != NULL) + dce_clk_mgr_destroy(&pool->base.clk_mgr); if (pool->base.irqs != NULL) { dal_irq_service_destroy(&pool->base.irqs); @@ -793,7 +793,7 @@ bool dce80_validate_bandwidth( { /* TODO implement when needed but for now hardcode max value*/ context->bw.dce.dispclk_khz = 681000; - context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER; + context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; return true; } @@ -855,7 +855,6 @@ static bool dce80_construct( struct dc_context *ctx = dc->ctx; struct dc_firmware_info info; struct dc_bios *bp; - struct dm_pp_static_clock_info static_clk_info = {0}; ctx->dc_bios->regs = &bios_regs; @@ -918,11 +917,11 @@ static bool dce80_construct( } } - pool->base.dccg = dce_dccg_create(ctx, + pool->base.clk_mgr = dce_clk_mgr_create(ctx, &disp_clk_regs, &disp_clk_shift, &disp_clk_mask); - if (pool->base.dccg == NULL) { + if (pool->base.clk_mgr == NULL) { dm_error("DC: failed to create display clock!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; @@ -948,10 +947,6 @@ static bool dce80_construct( goto res_create_fail; } - if (dm_pp_get_static_clocks(ctx, &static_clk_info)) - pool->base.dccg->max_clks_state = - static_clk_info.max_clocks_state; - { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; @@ -1065,7 +1060,6 @@ static bool dce81_construct( struct dc_context *ctx = dc->ctx; struct dc_firmware_info info; struct dc_bios *bp; - struct dm_pp_static_clock_info static_clk_info = {0}; ctx->dc_bios->regs = &bios_regs; @@ -1128,11 +1122,11 @@ static bool dce81_construct( } } - pool->base.dccg = dce_dccg_create(ctx, + pool->base.clk_mgr = dce_clk_mgr_create(ctx, &disp_clk_regs, &disp_clk_shift, &disp_clk_mask); - if (pool->base.dccg == NULL) { + if (pool->base.clk_mgr == NULL) { dm_error("DC: failed to create display clock!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; @@ -1158,10 +1152,6 @@ static bool dce81_construct( goto res_create_fail; } - if (dm_pp_get_static_clocks(ctx, &static_clk_info)) - pool->base.dccg->max_clks_state = - static_clk_info.max_clocks_state; - { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; @@ -1275,7 +1265,6 @@ static bool dce83_construct( struct dc_context *ctx = dc->ctx; struct dc_firmware_info info; struct dc_bios *bp; - struct dm_pp_static_clock_info static_clk_info = {0}; ctx->dc_bios->regs = &bios_regs; @@ -1334,11 +1323,11 @@ static bool dce83_construct( } } - pool->base.dccg = dce_dccg_create(ctx, + pool->base.clk_mgr = dce_clk_mgr_create(ctx, &disp_clk_regs, &disp_clk_shift, &disp_clk_mask); - if (pool->base.dccg == NULL) { + if (pool->base.clk_mgr == NULL) { dm_error("DC: failed to create display clock!\n"); BREAK_TO_DEBUGGER(); goto res_create_fail; @@ -1364,10 +1353,6 @@ static bool dce83_construct( goto res_create_fail; } - if (dm_pp_get_static_clocks(ctx, &static_clk_info)) - pool->base.dccg->max_clks_state = - static_clk_info.max_clocks_state; - { struct irq_service_init_data init_data; init_data.ctx = dc->ctx; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile index 032f872be89c..55f293c8a3c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile @@ -24,7 +24,7 @@ DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \ dcn10_dpp.o dcn10_opp.o dcn10_optc.o \ - dcn10_hubp.o dcn10_mpc.o \ + dcn10_hubp.o dcn10_mpc.o dcn10_clk_mgr.o \ dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \ dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c new file mode 100644 index 000000000000..20f531d27e2b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c @@ -0,0 +1,379 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dcn10_clk_mgr.h" + +#include "reg_helper.h" +#include "core_types.h" + +#define TO_DCE_CLK_MGR(clocks)\ + container_of(clocks, struct dce_clk_mgr, base) + +#define REG(reg) \ + (clk_mgr_dce->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name + +#define CTX \ + clk_mgr_dce->base.ctx +#define DC_LOGGER \ + clk_mgr->ctx->logger + +void dcn1_pplib_apply_display_requirements( + struct dc *dc, + struct dc_state *context) +{ + struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; + + pp_display_cfg->min_engine_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz; + pp_display_cfg->min_memory_clock_khz = dc->res_pool->clk_mgr->clks.fclk_khz; + pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz; + pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz; + pp_display_cfg->min_dcfclock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz; + pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz; + dce110_fill_display_configs(context, pp_display_cfg); + + dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); +} + +static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks) +{ + bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; + bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz; + int disp_clk_threshold = new_clocks->max_supported_dppclk_khz; + bool cur_dpp_div = clk_mgr->clks.dispclk_khz > clk_mgr->clks.dppclk_khz; + + /* increase clock, looking for div is 0 for current, request div is 1*/ + if (dispclk_increase) { + /* already divided by 2, no need to reach target clk with 2 steps*/ + if (cur_dpp_div) + return new_clocks->dispclk_khz; + + /* request disp clk is lower than maximum supported dpp clk, + * no need to reach target clk with two steps. + */ + if (new_clocks->dispclk_khz <= disp_clk_threshold) + return new_clocks->dispclk_khz; + + /* target dpp clk not request divided by 2, still within threshold */ + if (!request_dpp_div) + return new_clocks->dispclk_khz; + + } else { + /* decrease clock, looking for current dppclk divided by 2, + * request dppclk not divided by 2. + */ + + /* current dpp clk not divided by 2, no need to ramp*/ + if (!cur_dpp_div) + return new_clocks->dispclk_khz; + + /* current disp clk is lower than current maximum dpp clk, + * no need to ramp + */ + if (clk_mgr->clks.dispclk_khz <= disp_clk_threshold) + return new_clocks->dispclk_khz; + + /* request dpp clk need to be divided by 2 */ + if (request_dpp_div) + return new_clocks->dispclk_khz; + } + + return disp_clk_threshold; +} + +static void dcn1_ramp_up_dispclk_with_dpp(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks) +{ + struct dc *dc = clk_mgr->ctx->dc; + int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(clk_mgr, new_clocks); + bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; + int i; + + /* set disp clk to dpp clk threshold */ + dce112_set_clock(clk_mgr, dispclk_to_dpp_threshold); + + /* update request dpp clk division option */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx->plane_state) + continue; + + pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control( + pipe_ctx->plane_res.dpp, + request_dpp_div, + true); + } + + /* If target clk not same as dppclk threshold, set to target clock */ + if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz) + dce112_set_clock(clk_mgr, new_clocks->dispclk_khz); + + clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; + clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz; + clk_mgr->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz; +} + +static int get_active_display_cnt( + struct dc *dc, + struct dc_state *context) +{ + int i, display_count; + + display_count = 0; + for (i = 0; i < context->stream_count; i++) { + const struct dc_stream_state *stream = context->streams[i]; + + /* + * Only notify active stream or virtual stream. + * Need to notify virtual stream to work around + * headless case. HPD does not fire when system is in + * S0i2. + */ + if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL) + display_count++; + } + + return display_count; +} + +static void notify_deep_sleep_dcfclk_to_smu( + struct pp_smu_funcs_rv *pp_smu, int min_dcef_deep_sleep_clk_khz) +{ + int min_dcef_deep_sleep_clk_mhz; //minimum required DCEF Deep Sleep clock in mhz + /* + * if function pointer not set up, this message is + * sent as part of pplib_apply_display_requirements. + * So just return. + */ + if (!pp_smu || !pp_smu->set_min_deep_sleep_dcfclk) + return; + + min_dcef_deep_sleep_clk_mhz = (min_dcef_deep_sleep_clk_khz + 999) / 1000; //Round up + pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, min_dcef_deep_sleep_clk_mhz); +} + +static void notify_hard_min_dcfclk_to_smu( + struct pp_smu_funcs_rv *pp_smu, int min_dcf_clk_khz) +{ + int min_dcf_clk_mhz; //minimum required DCF clock in mhz + + /* + * if function pointer not set up, this message is + * sent as part of pplib_apply_display_requirements. + * So just return. + */ + if (!pp_smu || !pp_smu->set_hard_min_dcfclk_by_freq) + return; + + min_dcf_clk_mhz = min_dcf_clk_khz / 1000; + + pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, min_dcf_clk_mhz); +} + +static void notify_hard_min_fclk_to_smu( + struct pp_smu_funcs_rv *pp_smu, int min_f_clk_khz) +{ + int min_f_clk_mhz; //minimum required F clock in mhz + + /* + * if function pointer not set up, this message is + * sent as part of pplib_apply_display_requirements. + * So just return. + */ + if (!pp_smu || !pp_smu->set_hard_min_fclk_by_freq) + return; + + min_f_clk_mhz = min_f_clk_khz / 1000; + + pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, min_f_clk_mhz); +} + +static void dcn1_update_clocks(struct clk_mgr *clk_mgr, + struct dc_state *context, + bool safe_to_lower) +{ + struct dc *dc = clk_mgr->ctx->dc; + struct dc_clocks *new_clocks = &context->bw.dcn.clk; + struct pp_smu_display_requirement_rv *smu_req_cur = + &dc->res_pool->pp_smu_req; + struct pp_smu_display_requirement_rv smu_req = *smu_req_cur; + struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; + struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; + bool send_request_to_increase = false; + bool send_request_to_lower = false; + int display_count; + + bool enter_display_off = false; + + display_count = get_active_display_cnt(dc, context); + + if (display_count == 0) + enter_display_off = true; + + if (enter_display_off == safe_to_lower) { + /* + * Notify SMU active displays + * if function pointer not set up, this message is + * sent as part of pplib_apply_display_requirements. + */ + if (pp_smu->set_display_count) + pp_smu->set_display_count(&pp_smu->pp_smu, display_count); + else + smu_req.display_count = display_count; + + } + + if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz + || new_clocks->phyclk_khz > clk_mgr->clks.phyclk_khz + || new_clocks->fclk_khz > clk_mgr->clks.fclk_khz + || new_clocks->dcfclk_khz > clk_mgr->clks.dcfclk_khz) + send_request_to_increase = true; + + if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) { + clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz; + + send_request_to_lower = true; + } + + // F Clock + if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) { + clk_mgr->clks.fclk_khz = new_clocks->fclk_khz; + clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK; + clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz; + smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000; + + notify_hard_min_fclk_to_smu(pp_smu, new_clocks->fclk_khz); + + send_request_to_lower = true; + } + + //DCF Clock + if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) { + clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz; + smu_req.hard_min_dcefclk_mhz = new_clocks->dcfclk_khz / 1000; + + send_request_to_lower = true; + } + + if (should_set_clock(safe_to_lower, + new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) { + clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; + smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz / 1000; + + send_request_to_lower = true; + } + + /* make sure dcf clk is before dpp clk to + * make sure we have enough voltage to run dpp clk + */ + if (send_request_to_increase) { + /*use dcfclk to request voltage*/ + clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK; + clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); + + notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz); + + if (pp_smu->set_display_requirement) + pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); + + notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz); + dcn1_pplib_apply_display_requirements(dc, context); + } + + /* dcn1 dppclk is tied to dispclk */ + /* program dispclk on = as a w/a for sleep resume clock ramping issues */ + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz) + || new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) { + dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks); + clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; + + send_request_to_lower = true; + } + + if (!send_request_to_increase && send_request_to_lower) { + /*use dcfclk to request voltage*/ + clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK; + clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); + + notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz); + + if (pp_smu->set_display_requirement) + pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); + + notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz); + dcn1_pplib_apply_display_requirements(dc, context); + } + + + *smu_req_cur = smu_req; +} + +static const struct clk_mgr_funcs dcn1_funcs = { + .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .update_clocks = dcn1_update_clocks +}; + +struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx) +{ + struct dc_debug_options *debug = &ctx->dc->debug; + struct dc_bios *bp = ctx->dc_bios; + struct dc_firmware_info fw_info = { { 0 } }; + struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); + + if (clk_mgr_dce == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + clk_mgr_dce->base.ctx = ctx; + clk_mgr_dce->base.funcs = &dcn1_funcs; + + clk_mgr_dce->dfs_bypass_disp_clk = 0; + + clk_mgr_dce->dprefclk_ss_percentage = 0; + clk_mgr_dce->dprefclk_ss_divider = 1000; + clk_mgr_dce->ss_on_dprefclk = false; + + clk_mgr_dce->dprefclk_khz = 600000; + if (bp->integrated_info) + clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; + if (clk_mgr_dce->dentist_vco_freq_khz == 0) { + bp->funcs->get_firmware_info(bp, &fw_info); + clk_mgr_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq; + if (clk_mgr_dce->dentist_vco_freq_khz == 0) + clk_mgr_dce->dentist_vco_freq_khz = 3600000; + } + + if (!debug->disable_dfs_bypass && bp->integrated_info) + if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) + clk_mgr_dce->dfs_bypass_enabled = true; + + dce_clock_read_ss_info(clk_mgr_dce); + + return &clk_mgr_dce->base; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h new file mode 100644 index 000000000000..9dbaf6578006 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h @@ -0,0 +1,37 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN10_CLK_MGR_H__ +#define __DCN10_CLK_MGR_H__ + +#include "../dce/dce_clk_mgr.h" + +void dcn1_pplib_apply_display_requirements( + struct dc *dc, + struct dc_state *context); + +struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx); + +#endif //__DCN10_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index 5d95a997fd9f..3eea44092a04 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -71,39 +71,39 @@ void cm_helper_program_xfer_func( unsigned int i = 0; REG_SET_2(reg->start_cntl_b, 0, - exp_region_start, params->arr_points[0].custom_float_x, + exp_region_start, params->corner_points[0].blue.custom_float_x, exp_resion_start_segment, 0); REG_SET_2(reg->start_cntl_g, 0, - exp_region_start, params->arr_points[0].custom_float_x, + exp_region_start, params->corner_points[0].green.custom_float_x, exp_resion_start_segment, 0); REG_SET_2(reg->start_cntl_r, 0, - exp_region_start, params->arr_points[0].custom_float_x, + exp_region_start, params->corner_points[0].red.custom_float_x, exp_resion_start_segment, 0); REG_SET(reg->start_slope_cntl_b, 0, - field_region_linear_slope, params->arr_points[0].custom_float_slope); + field_region_linear_slope, params->corner_points[0].blue.custom_float_slope); REG_SET(reg->start_slope_cntl_g, 0, - field_region_linear_slope, params->arr_points[0].custom_float_slope); + field_region_linear_slope, params->corner_points[0].green.custom_float_slope); REG_SET(reg->start_slope_cntl_r, 0, - field_region_linear_slope, params->arr_points[0].custom_float_slope); + field_region_linear_slope, params->corner_points[0].red.custom_float_slope); REG_SET(reg->start_end_cntl1_b, 0, - field_region_end, params->arr_points[1].custom_float_x); + field_region_end, params->corner_points[1].blue.custom_float_x); REG_SET_2(reg->start_end_cntl2_b, 0, - field_region_end_slope, params->arr_points[1].custom_float_slope, - field_region_end_base, params->arr_points[1].custom_float_y); + field_region_end_slope, params->corner_points[1].blue.custom_float_slope, + field_region_end_base, params->corner_points[1].blue.custom_float_y); REG_SET(reg->start_end_cntl1_g, 0, - field_region_end, params->arr_points[1].custom_float_x); + field_region_end, params->corner_points[1].green.custom_float_x); REG_SET_2(reg->start_end_cntl2_g, 0, - field_region_end_slope, params->arr_points[1].custom_float_slope, - field_region_end_base, params->arr_points[1].custom_float_y); + field_region_end_slope, params->corner_points[1].green.custom_float_slope, + field_region_end_base, params->corner_points[1].green.custom_float_y); REG_SET(reg->start_end_cntl1_r, 0, - field_region_end, params->arr_points[1].custom_float_x); + field_region_end, params->corner_points[1].red.custom_float_x); REG_SET_2(reg->start_end_cntl2_r, 0, - field_region_end_slope, params->arr_points[1].custom_float_slope, - field_region_end_base, params->arr_points[1].custom_float_y); + field_region_end_slope, params->corner_points[1].red.custom_float_slope, + field_region_end_base, params->corner_points[1].red.custom_float_y); for (reg_region_cur = reg->region_start; reg_region_cur <= reg->region_end; @@ -127,7 +127,7 @@ void cm_helper_program_xfer_func( bool cm_helper_convert_to_custom_float( struct pwl_result_data *rgb_resulted, - struct curve_points *arr_points, + struct curve_points3 *corner_points, uint32_t hw_points_num, bool fixpoint) { @@ -141,20 +141,53 @@ bool cm_helper_convert_to_custom_float( fmt.mantissa_bits = 12; fmt.sign = false; - if (!convert_to_custom_float_format(arr_points[0].x, &fmt, - &arr_points[0].custom_float_x)) { + /* corner_points[0] - beginning base, slope offset for R,G,B + * corner_points[1] - end base, slope offset for R,G,B + */ + if (!convert_to_custom_float_format(corner_points[0].red.x, &fmt, + &corner_points[0].red.custom_float_x)) { + BREAK_TO_DEBUGGER(); + return false; + } + if (!convert_to_custom_float_format(corner_points[0].green.x, &fmt, + &corner_points[0].green.custom_float_x)) { + BREAK_TO_DEBUGGER(); + return false; + } + if (!convert_to_custom_float_format(corner_points[0].blue.x, &fmt, + &corner_points[0].blue.custom_float_x)) { BREAK_TO_DEBUGGER(); return false; } - if (!convert_to_custom_float_format(arr_points[0].offset, &fmt, - &arr_points[0].custom_float_offset)) { + if (!convert_to_custom_float_format(corner_points[0].red.offset, &fmt, + &corner_points[0].red.custom_float_offset)) { + BREAK_TO_DEBUGGER(); + return false; + } + if (!convert_to_custom_float_format(corner_points[0].green.offset, &fmt, + &corner_points[0].green.custom_float_offset)) { + BREAK_TO_DEBUGGER(); + return false; + } + if (!convert_to_custom_float_format(corner_points[0].blue.offset, &fmt, + &corner_points[0].blue.custom_float_offset)) { BREAK_TO_DEBUGGER(); return false; } - if (!convert_to_custom_float_format(arr_points[0].slope, &fmt, - &arr_points[0].custom_float_slope)) { + if (!convert_to_custom_float_format(corner_points[0].red.slope, &fmt, + &corner_points[0].red.custom_float_slope)) { + BREAK_TO_DEBUGGER(); + return false; + } + if (!convert_to_custom_float_format(corner_points[0].green.slope, &fmt, + &corner_points[0].green.custom_float_slope)) { + BREAK_TO_DEBUGGER(); + return false; + } + if (!convert_to_custom_float_format(corner_points[0].blue.slope, &fmt, + &corner_points[0].blue.custom_float_slope)) { BREAK_TO_DEBUGGER(); return false; } @@ -162,22 +195,59 @@ bool cm_helper_convert_to_custom_float( fmt.mantissa_bits = 10; fmt.sign = false; - if (!convert_to_custom_float_format(arr_points[1].x, &fmt, - &arr_points[1].custom_float_x)) { + if (!convert_to_custom_float_format(corner_points[1].red.x, &fmt, + &corner_points[1].red.custom_float_x)) { BREAK_TO_DEBUGGER(); return false; } - - if (fixpoint == true) - arr_points[1].custom_float_y = dc_fixpt_clamp_u0d14(arr_points[1].y); - else if (!convert_to_custom_float_format(arr_points[1].y, &fmt, - &arr_points[1].custom_float_y)) { + if (!convert_to_custom_float_format(corner_points[1].green.x, &fmt, + &corner_points[1].green.custom_float_x)) { + BREAK_TO_DEBUGGER(); + return false; + } + if (!convert_to_custom_float_format(corner_points[1].blue.x, &fmt, + &corner_points[1].blue.custom_float_x)) { BREAK_TO_DEBUGGER(); return false; } - if (!convert_to_custom_float_format(arr_points[1].slope, &fmt, - &arr_points[1].custom_float_slope)) { + if (fixpoint == true) { + corner_points[1].red.custom_float_y = + dc_fixpt_clamp_u0d14(corner_points[1].red.y); + corner_points[1].green.custom_float_y = + dc_fixpt_clamp_u0d14(corner_points[1].green.y); + corner_points[1].blue.custom_float_y = + dc_fixpt_clamp_u0d14(corner_points[1].blue.y); + } else { + if (!convert_to_custom_float_format(corner_points[1].red.y, + &fmt, &corner_points[1].red.custom_float_y)) { + BREAK_TO_DEBUGGER(); + return false; + } + if (!convert_to_custom_float_format(corner_points[1].green.y, + &fmt, &corner_points[1].green.custom_float_y)) { + BREAK_TO_DEBUGGER(); + return false; + } + if (!convert_to_custom_float_format(corner_points[1].blue.y, + &fmt, &corner_points[1].blue.custom_float_y)) { + BREAK_TO_DEBUGGER(); + return false; + } + } + + if (!convert_to_custom_float_format(corner_points[1].red.slope, &fmt, + &corner_points[1].red.custom_float_slope)) { + BREAK_TO_DEBUGGER(); + return false; + } + if (!convert_to_custom_float_format(corner_points[1].green.slope, &fmt, + &corner_points[1].green.custom_float_slope)) { + BREAK_TO_DEBUGGER(); + return false; + } + if (!convert_to_custom_float_format(corner_points[1].blue.slope, &fmt, + &corner_points[1].blue.custom_float_slope)) { BREAK_TO_DEBUGGER(); return false; } @@ -242,15 +312,10 @@ bool cm_helper_translate_curve_to_hw_format( const struct dc_transfer_func *output_tf, struct pwl_params *lut_params, bool fixpoint) { - struct curve_points *arr_points; + struct curve_points3 *corner_points; struct pwl_result_data *rgb_resulted; struct pwl_result_data *rgb; struct pwl_result_data *rgb_plus_1; - struct fixed31_32 y_r; - struct fixed31_32 y_g; - struct fixed31_32 y_b; - struct fixed31_32 y1_min; - struct fixed31_32 y3_max; int32_t region_start, region_end; int32_t i; @@ -261,14 +326,14 @@ bool cm_helper_translate_curve_to_hw_format( PERF_TRACE(); - arr_points = lut_params->arr_points; + corner_points = lut_params->corner_points; rgb_resulted = lut_params->rgb_resulted; hw_points = 0; memset(lut_params, 0, sizeof(struct pwl_params)); memset(seg_distr, 0, sizeof(seg_distr)); - if (output_tf->tf == TRANSFER_FUNCTION_PQ) { + if (output_tf->tf == TRANSFER_FUNCTION_PQ || output_tf->tf == TRANSFER_FUNCTION_GAMMA22) { /* 32 segments * segments are from 2^-25 to 2^7 */ @@ -327,31 +392,37 @@ bool cm_helper_translate_curve_to_hw_format( rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; - arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2), + // All 3 color channels have same x + corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), dc_fixpt_from_int(region_start)); - arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2), - dc_fixpt_from_int(region_end)); + corner_points[0].green.x = corner_points[0].red.x; + corner_points[0].blue.x = corner_points[0].red.x; - y_r = rgb_resulted[0].red; - y_g = rgb_resulted[0].green; - y_b = rgb_resulted[0].blue; + corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), + dc_fixpt_from_int(region_end)); + corner_points[1].green.x = corner_points[1].red.x; + corner_points[1].blue.x = corner_points[1].red.x; - y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b)); + corner_points[0].red.y = rgb_resulted[0].red; + corner_points[0].green.y = rgb_resulted[0].green; + corner_points[0].blue.y = rgb_resulted[0].blue; - arr_points[0].y = y1_min; - arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x); - y_r = rgb_resulted[hw_points - 1].red; - y_g = rgb_resulted[hw_points - 1].green; - y_b = rgb_resulted[hw_points - 1].blue; + corner_points[0].red.slope = dc_fixpt_div(corner_points[0].red.y, + corner_points[0].red.x); + corner_points[0].green.slope = dc_fixpt_div(corner_points[0].green.y, + corner_points[0].green.x); + corner_points[0].blue.slope = dc_fixpt_div(corner_points[0].blue.y, + corner_points[0].blue.x); /* see comment above, m_arrPoints[1].y should be the Y value for the * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1) */ - y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b)); - - arr_points[1].y = y3_max; - - arr_points[1].slope = dc_fixpt_zero; + corner_points[1].red.y = rgb_resulted[hw_points - 1].red; + corner_points[1].green.y = rgb_resulted[hw_points - 1].green; + corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue; + corner_points[1].red.slope = dc_fixpt_zero; + corner_points[1].green.slope = dc_fixpt_zero; + corner_points[1].blue.slope = dc_fixpt_zero; if (output_tf->tf == TRANSFER_FUNCTION_PQ) { /* for PQ, we want to have a straight line from last HW X point, @@ -360,9 +431,15 @@ bool cm_helper_translate_curve_to_hw_format( const struct fixed31_32 end_value = dc_fixpt_from_int(125); - arr_points[1].slope = dc_fixpt_div( - dc_fixpt_sub(dc_fixpt_one, arr_points[1].y), - dc_fixpt_sub(end_value, arr_points[1].x)); + corner_points[1].red.slope = dc_fixpt_div( + dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y), + dc_fixpt_sub(end_value, corner_points[1].red.x)); + corner_points[1].green.slope = dc_fixpt_div( + dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y), + dc_fixpt_sub(end_value, corner_points[1].green.x)); + corner_points[1].blue.slope = dc_fixpt_div( + dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y), + dc_fixpt_sub(end_value, corner_points[1].blue.x)); } lut_params->hw_points_num = hw_points; @@ -411,7 +488,7 @@ bool cm_helper_translate_curve_to_hw_format( ++i; } cm_helper_convert_to_custom_float(rgb_resulted, - lut_params->arr_points, + lut_params->corner_points, hw_points, fixpoint); return true; @@ -424,15 +501,10 @@ bool cm_helper_translate_curve_to_degamma_hw_format( const struct dc_transfer_func *output_tf, struct pwl_params *lut_params) { - struct curve_points *arr_points; + struct curve_points3 *corner_points; struct pwl_result_data *rgb_resulted; struct pwl_result_data *rgb; struct pwl_result_data *rgb_plus_1; - struct fixed31_32 y_r; - struct fixed31_32 y_g; - struct fixed31_32 y_b; - struct fixed31_32 y1_min; - struct fixed31_32 y3_max; int32_t region_start, region_end; int32_t i; @@ -443,7 +515,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format( PERF_TRACE(); - arr_points = lut_params->arr_points; + corner_points = lut_params->corner_points; rgb_resulted = lut_params->rgb_resulted; hw_points = 0; @@ -489,31 +561,28 @@ bool cm_helper_translate_curve_to_degamma_hw_format( rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; - arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2), + corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), dc_fixpt_from_int(region_start)); - arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2), + corner_points[0].green.x = corner_points[0].red.x; + corner_points[0].blue.x = corner_points[0].red.x; + corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), dc_fixpt_from_int(region_end)); + corner_points[1].green.x = corner_points[1].red.x; + corner_points[1].blue.x = corner_points[1].red.x; - y_r = rgb_resulted[0].red; - y_g = rgb_resulted[0].green; - y_b = rgb_resulted[0].blue; - - y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b)); - - arr_points[0].y = y1_min; - arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x); - y_r = rgb_resulted[hw_points - 1].red; - y_g = rgb_resulted[hw_points - 1].green; - y_b = rgb_resulted[hw_points - 1].blue; + corner_points[0].red.y = rgb_resulted[0].red; + corner_points[0].green.y = rgb_resulted[0].green; + corner_points[0].blue.y = rgb_resulted[0].blue; /* see comment above, m_arrPoints[1].y should be the Y value for the * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1) */ - y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b)); - - arr_points[1].y = y3_max; - - arr_points[1].slope = dc_fixpt_zero; + corner_points[1].red.y = rgb_resulted[hw_points - 1].red; + corner_points[1].green.y = rgb_resulted[hw_points - 1].green; + corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue; + corner_points[1].red.slope = dc_fixpt_zero; + corner_points[1].green.slope = dc_fixpt_zero; + corner_points[1].blue.slope = dc_fixpt_zero; if (output_tf->tf == TRANSFER_FUNCTION_PQ) { /* for PQ, we want to have a straight line from last HW X point, @@ -522,9 +591,15 @@ bool cm_helper_translate_curve_to_degamma_hw_format( const struct fixed31_32 end_value = dc_fixpt_from_int(125); - arr_points[1].slope = dc_fixpt_div( - dc_fixpt_sub(dc_fixpt_one, arr_points[1].y), - dc_fixpt_sub(end_value, arr_points[1].x)); + corner_points[1].red.slope = dc_fixpt_div( + dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y), + dc_fixpt_sub(end_value, corner_points[1].red.x)); + corner_points[1].green.slope = dc_fixpt_div( + dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y), + dc_fixpt_sub(end_value, corner_points[1].green.x)); + corner_points[1].blue.slope = dc_fixpt_div( + dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y), + dc_fixpt_sub(end_value, corner_points[1].blue.x)); } lut_params->hw_points_num = hw_points; @@ -564,7 +639,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format( ++i; } cm_helper_convert_to_custom_float(rgb_resulted, - lut_params->arr_points, + lut_params->corner_points, hw_points, false); return true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h index 7a531b02871f..5ae4d69391a5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h @@ -98,7 +98,7 @@ void cm_helper_program_xfer_func( bool cm_helper_convert_to_custom_float( struct pwl_result_data *rgb_resulted, - struct curve_points *arr_points, + struct curve_points3 *corner_points, uint32_t hw_points_num, bool fixpoint); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 193184affefb..87495dea45ec 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -45,6 +45,7 @@ #include "dcn10_hubbub.h" #include "dcn10_cm_common.h" #include "dc_link_dp.h" +#include "dccg.h" #define DC_LOGGER_INIT(logger) @@ -786,7 +787,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx != NULL) { hubp = pipe_ctx->plane_res.hubp; - if (hubp != NULL) { + if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) { if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) { /* one pipe underflow, we will reset all the pipes*/ need_recover = true; @@ -812,7 +813,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) if (pipe_ctx != NULL) { hubp = pipe_ctx->plane_res.hubp; /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/ - if (hubp != NULL) + if (hubp != NULL && hubp->funcs->set_hubp_blank_en) hubp->funcs->set_hubp_blank_en(hubp, true); } } @@ -825,7 +826,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) if (pipe_ctx != NULL) { hubp = pipe_ctx->plane_res.hubp; /*DCHUBP_CNTL:HUBP_DISABLE=1*/ - if (hubp != NULL) + if (hubp != NULL && hubp->funcs->hubp_disable_control) hubp->funcs->hubp_disable_control(hubp, true); } } @@ -835,7 +836,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) if (pipe_ctx != NULL) { hubp = pipe_ctx->plane_res.hubp; /*DCHUBP_CNTL:HUBP_DISABLE=0*/ - if (hubp != NULL) + if (hubp != NULL && hubp->funcs->hubp_disable_control) hubp->funcs->hubp_disable_control(hubp, true); } } @@ -847,7 +848,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) if (pipe_ctx != NULL) { hubp = pipe_ctx->plane_res.hubp; /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/ - if (hubp != NULL) + if (hubp != NULL && hubp->funcs->set_hubp_blank_en) hubp->funcs->set_hubp_blank_en(hubp, true); } } @@ -1126,7 +1127,7 @@ static void dcn10_init_hw(struct dc *dc) enable_power_gating_plane(dc->hwseq, true); - memset(&dc->res_pool->dccg->clks, 0, sizeof(dc->res_pool->dccg->clks)); + memset(&dc->res_pool->clk_mgr->clks, 0, sizeof(dc->res_pool->clk_mgr->clks)); } static void reset_hw_ctx_wrap( @@ -1603,7 +1604,7 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1, } -static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp) +void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp) { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); struct vm_system_aperture_param apt = { {{ 0 } } }; @@ -1703,33 +1704,22 @@ static void program_gamut_remap(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust); } - -static void program_csc_matrix(struct pipe_ctx *pipe_ctx, +static void dcn10_program_output_csc(struct dc *dc, + struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace, - uint16_t *matrix) + uint16_t *matrix, + int opp_id) { if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) { - if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) - pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix); + if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) + pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix); } else { if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL) pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace); } } -static void dcn10_program_output_csc(struct dc *dc, - struct pipe_ctx *pipe_ctx, - enum dc_color_space colorspace, - uint16_t *matrix, - int opp_id) -{ - if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) - program_csc_matrix(pipe_ctx, - colorspace, - matrix); -} - -static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) +bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) { if (pipe_ctx->plane_state->visible) return true; @@ -1738,7 +1728,7 @@ static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) return false; } -static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) +bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) { if (pipe_ctx->plane_state->visible) return true; @@ -1747,7 +1737,7 @@ static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) return false; } -static bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx) +bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx) { if (pipe_ctx->plane_state->visible) return true; @@ -1943,10 +1933,6 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) struct mpc *mpc = dc->res_pool->mpc; struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); - - - /* TODO: proper fix once fpga works */ - if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { dcn10_get_hdr_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); @@ -2026,8 +2012,6 @@ static void update_scaler(struct pipe_ctx *pipe_ctx) bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe; - /* TODO: proper fix once fpga works */ - pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha; pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP; /* scaler configuration */ @@ -2035,7 +2019,7 @@ static void update_scaler(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); } -static void update_dchubp_dpp( +void update_dchubp_dpp( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) @@ -2052,16 +2036,22 @@ static void update_dchubp_dpp( */ if (plane_state->update_flags.bits.full_update) { bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <= - dc->res_pool->dccg->clks.dispclk_khz / 2; + dc->res_pool->clk_mgr->clks.dispclk_khz / 2; dpp->funcs->dpp_dppclk_control( dpp, should_divided_by_2, true); - dc->res_pool->dccg->clks.dppclk_khz = should_divided_by_2 ? - dc->res_pool->dccg->clks.dispclk_khz / 2 : - dc->res_pool->dccg->clks.dispclk_khz; + if (dc->res_pool->dccg) + dc->res_pool->dccg->funcs->update_dpp_dto( + dc->res_pool->dccg, + dpp->inst, + pipe_ctx->plane_res.bw.calc.dppclk_khz); + else + dc->res_pool->clk_mgr->clks.dppclk_khz = should_divided_by_2 ? + dc->res_pool->clk_mgr->clks.dispclk_khz / 2 : + dc->res_pool->clk_mgr->clks.dispclk_khz; } /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG @@ -2182,7 +2172,7 @@ static void dcn10_blank_pixel_data( } } -static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx) +void set_hdr_multiplier(struct pipe_ctx *pipe_ctx) { struct fixed31_32 multiplier = dc_fixpt_from_fraction( pipe_ctx->plane_state->sdr_white_level, 80); @@ -2257,47 +2247,7 @@ static void program_all_pipe_in_tree( } } -static void dcn10_pplib_apply_display_requirements( - struct dc *dc, - struct dc_state *context) -{ - struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; - - pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz; - pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz; - pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz; - pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz; - pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz; - pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz; - dce110_fill_display_configs(context, pp_display_cfg); - - if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof( - struct dm_pp_display_configuration)) != 0) - dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); - - dc->prev_display_config = *pp_display_cfg; -} - -static void optimize_shared_resources(struct dc *dc) -{ - if (dc->current_state->stream_count == 0) { - /* S0i2 message */ - dcn10_pplib_apply_display_requirements(dc, dc->current_state); - } - - if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) - dcn_bw_notify_pplib_of_wm_ranges(dc); -} - -static void ready_shared_resources(struct dc *dc, struct dc_state *context) -{ - /* S0i2 message */ - if (dc->current_state->stream_count == 0 && - context->stream_count != 0) - dcn10_pplib_apply_display_requirements(dc, context); -} - -static struct pipe_ctx *find_top_pipe_for_stream( +struct pipe_ctx *find_top_pipe_for_stream( struct dc *dc, struct dc_state *context, const struct dc_stream_state *stream) @@ -2398,10 +2348,9 @@ static void dcn10_apply_ctx_for_surface( hubbub1_wm_change_req_wa(dc->res_pool->hubbub); } -static void dcn10_set_bandwidth( +static void dcn10_prepare_bandwidth( struct dc *dc, - struct dc_state *context, - bool safe_to_lower) + struct dc_state *context) { if (dc->debug.sanity_checks) dcn10_verify_allow_pstate_change_high(dc); @@ -2410,12 +2359,39 @@ static void dcn10_set_bandwidth( if (context->stream_count == 0) context->bw.dcn.clk.phyclk_khz = 0; - dc->res_pool->dccg->funcs->update_clocks( - dc->res_pool->dccg, - &context->bw.dcn.clk, - safe_to_lower); + dc->res_pool->clk_mgr->funcs->update_clocks( + dc->res_pool->clk_mgr, + context, + false); + } - dcn10_pplib_apply_display_requirements(dc, context); + hubbub1_program_watermarks(dc->res_pool->hubbub, + &context->bw.dcn.watermarks, + dc->res_pool->ref_clock_inKhz / 1000, + true); + + if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) + dcn_bw_notify_pplib_of_wm_ranges(dc); + + if (dc->debug.sanity_checks) + dcn10_verify_allow_pstate_change_high(dc); +} + +static void dcn10_optimize_bandwidth( + struct dc *dc, + struct dc_state *context) +{ + if (dc->debug.sanity_checks) + dcn10_verify_allow_pstate_change_high(dc); + + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + if (context->stream_count == 0) + context->bw.dcn.clk.phyclk_khz = 0; + + dc->res_pool->clk_mgr->funcs->update_clocks( + dc->res_pool->clk_mgr, + context, + true); } hubbub1_program_watermarks(dc->res_pool->hubbub, @@ -2423,6 +2399,9 @@ static void dcn10_set_bandwidth( dc->res_pool->ref_clock_inKhz / 1000, true); + if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) + dcn_bw_notify_pplib_of_wm_ranges(dc); + if (dc->debug.sanity_checks) dcn10_verify_allow_pstate_change_high(dc); } @@ -2694,7 +2673,6 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx) static const struct hw_sequencer_funcs dcn10_funcs = { .program_gamut_remap = program_gamut_remap, - .program_csc_matrix = program_csc_matrix, .init_hw = dcn10_init_hw, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, @@ -2721,7 +2699,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .disable_plane = dcn10_disable_plane, .blank_pixel_data = dcn10_blank_pixel_data, .pipe_control_lock = dcn10_pipe_control_lock, - .set_bandwidth = dcn10_set_bandwidth, + .prepare_bandwidth = dcn10_prepare_bandwidth, + .optimize_bandwidth = dcn10_optimize_bandwidth, .reset_hw_ctx_wrap = reset_hw_ctx_wrap, .enable_stream_timing = dcn10_enable_stream_timing, .set_drr = set_drr, @@ -2732,10 +2711,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .log_hw_state = dcn10_log_hw_state, .get_hw_state = dcn10_get_hw_state, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, - .ready_shared_resources = ready_shared_resources, - .optimize_shared_resources = optimize_shared_resources, - .pplib_apply_display_requirements = - dcn10_pplib_apply_display_requirements, .edp_backlight_control = hwss_edp_backlight_control, .edp_power_control = hwss_edp_power_control, .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 84d461e0ed3e..5e5610c9e600 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -51,4 +51,24 @@ void dcn10_get_hw_state( char *pBuf, unsigned int bufSize, unsigned int mask); +bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx); + +bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx); + +bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx); + +void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp); + +void set_hdr_multiplier(struct pipe_ctx *pipe_ctx); + +void update_dchubp_dpp( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); + +struct pipe_ctx *find_top_pipe_for_stream( + struct dc *dc, + struct dc_state *context, + const struct dc_stream_state *stream); + #endif /* __DC_HWSS_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index ba6a8686062f..477ab9222216 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -589,7 +589,7 @@ static bool dcn10_link_encoder_validate_hdmi_output( return false; /* DCE11 HW does not support 420 */ - if (!enc10->base.features.ycbcr420_supported && + if (!enc10->base.features.hdmi_ycbcr420_supported && crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) return false; @@ -606,8 +606,10 @@ bool dcn10_link_encoder_validate_dp_output( const struct dcn10_link_encoder *enc10, const struct dc_crtc_timing *crtc_timing) { - if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) - return false; + if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) { + if (!enc10->base.features.dp_ycbcr420_supported) + return false; + } return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 54626682bab2..7d1f66797cb3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -87,9 +87,8 @@ static void optc1_disable_stereo(struct timing_generator *optc) REG_SET(OTG_STEREO_CONTROL, 0, OTG_STEREO_EN, 0); - REG_SET_3(OTG_3D_STRUCTURE_CONTROL, 0, + REG_SET_2(OTG_3D_STRUCTURE_CONTROL, 0, OTG_3D_STRUCTURE_EN, 0, - OTG_3D_STRUCTURE_V_UPDATE_MODE, 0, OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0); } @@ -274,10 +273,12 @@ void optc1_program_timing( * program the reg for interrupt postition. */ vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1; - if (vertical_line_start < 0) { - ASSERT(0); + v_fp2 = 0; + if (vertical_line_start < 0) + v_fp2 = -vertical_line_start; + if (vertical_line_start < 0) vertical_line_start = 0; - } + REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0, OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start); @@ -296,9 +297,6 @@ void optc1_program_timing( if (patched_crtc_timing.flags.INTERLACE == 1) field_num = 1; } - v_fp2 = 0; - if (optc->dlg_otg_param.vstartup_start > asic_blank_end) - v_fp2 = optc->dlg_otg_param.vstartup_start > asic_blank_end; /* Interlace */ if (patched_crtc_timing.flags.INTERLACE == 1) { @@ -1155,9 +1153,8 @@ static void optc1_enable_stereo(struct timing_generator *optc, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, 1); if (flags->PROGRAM_STEREO) - REG_UPDATE_3(OTG_3D_STRUCTURE_CONTROL, + REG_UPDATE_2(OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_EN, flags->FRAME_PACKED, - OTG_3D_STRUCTURE_V_UPDATE_MODE, flags->FRAME_PACKED, OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index a71453a15ae3..47dbe4bb294a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -28,23 +28,23 @@ #include "resource.h" #include "include/irq_service_interface.h" -#include "dcn10/dcn10_resource.h" +#include "dcn10_resource.h" -#include "dcn10/dcn10_ipp.h" -#include "dcn10/dcn10_mpc.h" +#include "dcn10_ipp.h" +#include "dcn10_mpc.h" #include "irq/dcn10/irq_service_dcn10.h" -#include "dcn10/dcn10_dpp.h" +#include "dcn10_dpp.h" #include "dcn10_optc.h" -#include "dcn10/dcn10_hw_sequencer.h" +#include "dcn10_hw_sequencer.h" #include "dce110/dce110_hw_sequencer.h" -#include "dcn10/dcn10_opp.h" -#include "dcn10/dcn10_link_encoder.h" -#include "dcn10/dcn10_stream_encoder.h" -#include "dce/dce_clocks.h" +#include "dcn10_opp.h" +#include "dcn10_link_encoder.h" +#include "dcn10_stream_encoder.h" +#include "dcn10_clk_mgr.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" -#include "../virtual/virtual_stream_encoder.h" +#include "virtual/virtual_stream_encoder.h" #include "dce110/dce110_resource.h" #include "dce112/dce112_resource.h" #include "dcn10_hubp.h" @@ -438,6 +438,7 @@ static const struct dcn_optc_mask tg_mask = { static const struct bios_registers bios_regs = { + NBIO_SR(BIOS_SCRATCH_0), NBIO_SR(BIOS_SCRATCH_3), NBIO_SR(BIOS_SCRATCH_6) }; @@ -719,7 +720,8 @@ static struct timing_generator *dcn10_timing_generator_create( static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 600000, - .ycbcr420_supported = true, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = false, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, @@ -949,8 +951,8 @@ static void destruct(struct dcn10_resource_pool *pool) if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); - if (pool->base.dccg != NULL) - dce_dccg_destroy(&pool->base.dccg); + if (pool->base.clk_mgr != NULL) + dce_clk_mgr_destroy(&pool->base.clk_mgr); kfree(pool->base.pp_smu); } @@ -1276,8 +1278,8 @@ static bool construct( } } - pool->base.dccg = dcn1_dccg_create(ctx); - if (pool->base.dccg == NULL) { + pool->base.clk_mgr = dcn1_clk_mgr_create(ctx); + if (pool->base.clk_mgr == NULL) { dm_error("DC: failed to create display clock!\n"); BREAK_TO_DEBUGGER(); goto fail; diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index f2ea8452d48f..beb08fd12b1d 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h @@ -55,10 +55,10 @@ struct pp_smu { struct pp_smu_wm_set_range { unsigned int wm_inst; - uint32_t min_fill_clk_khz; - uint32_t max_fill_clk_khz; - uint32_t min_drain_clk_khz; - uint32_t max_drain_clk_khz; + uint32_t min_fill_clk_mhz; + uint32_t max_fill_clk_mhz; + uint32_t min_drain_clk_mhz; + uint32_t max_drain_clk_mhz; }; #define MAX_WATERMARK_SETS 4 @@ -77,15 +77,15 @@ struct pp_smu_display_requirement_rv { */ unsigned int display_count; - /* PPSMC_MSG_SetHardMinFclkByFreq: khz + /* PPSMC_MSG_SetHardMinFclkByFreq: mhz * FCLK will vary with DPM, but never below requested hard min */ - unsigned int hard_min_fclk_khz; + unsigned int hard_min_fclk_mhz; - /* PPSMC_MSG_SetHardMinDcefclkByFreq: khz + /* PPSMC_MSG_SetHardMinDcefclkByFreq: mhz * fixed clock at requested freq, either from FCH bypass or DFS */ - unsigned int hard_min_dcefclk_khz; + unsigned int hard_min_dcefclk_mhz; /* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz * when DF is in cstate, dcf clock is further divided down @@ -103,13 +103,19 @@ struct pp_smu_funcs_rv { void (*set_display_count)(struct pp_smu *pp, int count); /* which SMU message? are reader and writer WM separate SMU msg? */ + /* + * PPSMC_MSG_SetDriverDramAddrHigh + * PPSMC_MSG_SetDriverDramAddrLow + * PPSMC_MSG_TransferTableDram2Smu + * + * */ void (*set_wm_ranges)(struct pp_smu *pp, struct pp_smu_wm_range_sets *ranges); /* PPSMC_MSG_SetHardMinDcfclkByFreq * fixed clock at requested freq, either from FCH bypass or DFS */ - void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int khz); + void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int mhz); /* PPSMC_MSG_SetMinDeepSleepDcfclk * when DF is in cstate, dcf clock is further divided down @@ -120,12 +126,12 @@ struct pp_smu_funcs_rv { /* PPSMC_MSG_SetHardMinFclkByFreq * FCLK will vary with DPM, but never below requested hard min */ - void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int khz); + void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int mhz); /* PPSMC_MSG_SetHardMinSocclkByFreq * Needed for DWB support */ - void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int khz); + void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int mhz); /* PME w/a */ void (*set_pme_wa_enable)(struct pp_smu *pp); diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h index 2b83f922ac02..1af8c777b3ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h @@ -208,22 +208,20 @@ struct dm_bl_data_point { /* Brightness level as effective value in range 0-255, * corresponding to above percentage */ - uint8_t signalLevel; + uint8_t signal_level; }; /* Total size of the structure should not exceed 256 bytes */ struct dm_acpi_atif_backlight_caps { - - uint16_t size; /* Bytes 0-1 (2 bytes) */ uint16_t flags; /* Byted 2-3 (2 bytes) */ - uint8_t errorCode; /* Byte 4 */ - uint8_t acLevelPercentage; /* Byte 5 */ - uint8_t dcLevelPercentage; /* Byte 6 */ - uint8_t minInputSignal; /* Byte 7 */ - uint8_t maxInputSignal; /* Byte 8 */ - uint8_t numOfDataPoints; /* Byte 9 */ - struct dm_bl_data_point dataPoints[99]; /* Bytes 10-207 (198 bytes)*/ + uint8_t error_code; /* Byte 4 */ + uint8_t ac_level_percentage; /* Byte 5 */ + uint8_t dc_level_percentage; /* Byte 6 */ + uint8_t min_input_signal; /* Byte 7 */ + uint8_t max_input_signal; /* Byte 8 */ + uint8_t num_data_points; /* Byte 9 */ + struct dm_bl_data_point data_points[99]; /* Bytes 10-207 (198 bytes)*/ }; enum dm_acpi_display_type { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index cbafce649e33..5dd04520ceca 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -113,7 +113,8 @@ struct _vcs_dpi_soc_bounding_box_st { int use_urgent_burst_bw; double max_hscl_ratio; double max_vscl_ratio; - struct _vcs_dpi_voltage_scaling_st clock_limits[7]; + unsigned int num_states; + struct _vcs_dpi_voltage_scaling_st clock_limits[8]; }; struct _vcs_dpi_ip_params_st { diff --git a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h index 39ee8eba3c31..d1656c9d50df 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h +++ b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h @@ -126,7 +126,7 @@ static inline struct bw_fixed bw_div(const struct bw_fixed arg1, const struct bw static inline struct bw_fixed bw_mod(const struct bw_fixed arg1, const struct bw_fixed arg2) { struct bw_fixed res; - div64_u64_rem(arg1.value, arg2.value, &res.value); + div64_u64_rem(arg1.value, arg2.value, (uint64_t *)&res.value); return res; } diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index c1976c175b57..e3ee96afa60e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -82,7 +82,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option); void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); /********** DAL Core*********************/ -#include "display_clock.h" +#include "hw/clk_mgr.h" #include "transform.h" #include "dpp.h" @@ -169,6 +169,7 @@ struct resource_pool { unsigned int audio_count; struct audio_support audio_support; + struct clk_mgr *clk_mgr; struct dccg *dccg; struct irq_service *irqs; @@ -287,7 +288,7 @@ struct dc_state { struct dcn_bw_internal_vars dcn_bw_vars; #endif - struct dccg *dis_clk; + struct clk_mgr *dccg; struct kref refcount; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h index e688eb9b975c..ece954a40a8e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h @@ -31,8 +31,8 @@ #define __DCN_CALCS_H__ #include "bw_fixed.h" -#include "display_clock.h" #include "../dml/display_mode_lib.h" +#include "hw/clk_mgr.h" struct dc; struct dc_state; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h index a83a48494613..abc961c0906e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h @@ -47,12 +47,18 @@ struct abm_funcs { bool (*set_abm_level)(struct abm *abm, unsigned int abm_level); bool (*set_abm_immediate_disable)(struct abm *abm); bool (*init_backlight)(struct abm *abm); - bool (*set_backlight_level)(struct abm *abm, - unsigned int backlight_level, + + /* backlight_pwm_u16_16 is unsigned 32 bit, + * 16 bit integer + 16 fractional, where 1.0 is max backlight value. + */ + bool (*set_backlight_level_pwm)(struct abm *abm, + unsigned int backlight_pwm_u16_16, unsigned int frame_ramp, unsigned int controller_id, bool use_smooth_brightness); - unsigned int (*get_current_backlight_8_bit)(struct abm *abm); + + unsigned int (*get_current_backlight)(struct abm *abm); + unsigned int (*get_target_backlight)(struct abm *abm); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 689faa16c0ae..23a4b18e5fee 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -23,41 +23,25 @@ * */ -#ifndef __DISPLAY_CLOCK_H__ -#define __DISPLAY_CLOCK_H__ +#ifndef __DAL_CLK_MGR_H__ +#define __DAL_CLK_MGR_H__ #include "dm_services_types.h" #include "dc.h" -/* Structure containing all state-dependent clocks - * (dependent on "enum clocks_state") */ -struct state_dependent_clocks { - int display_clk_khz; - int pixel_clk_khz; -}; - -struct dccg { +struct clk_mgr { struct dc_context *ctx; - const struct display_clock_funcs *funcs; + const struct clk_mgr_funcs *funcs; - enum dm_pp_clocks_state max_clks_state; - enum dm_pp_clocks_state cur_min_clks_state; struct dc_clocks clks; }; -struct display_clock_funcs { - void (*update_clocks)(struct dccg *dccg, - struct dc_clocks *new_clocks, +struct clk_mgr_funcs { + void (*update_clocks)(struct clk_mgr *clk_mgr, + struct dc_state *context, bool safe_to_lower); - int (*set_dispclk)(struct dccg *dccg, - int requested_clock_khz); - - int (*get_dp_ref_clk_frequency)(struct dccg *dccg); - bool (*update_dfs_bypass)(struct dccg *dccg, - struct dc *dc, - struct dc_state *context, - int requested_clock_khz); + int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr); }; -#endif /* __DISPLAY_CLOCK_H__ */ +#endif /* __DAL_CLK_MGR_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h new file mode 100644 index 000000000000..95a56d012626 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -0,0 +1,44 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DCCG_H__ +#define __DAL_DCCG_H__ + +#include "dc_types.h" + +struct dccg { + struct dc_context *ctx; + const struct dccg_funcs *funcs; + + int ref_dppclk; +}; + +struct dccg_funcs { + void (*update_dpp_dto)(struct dccg *dccg, + int dpp_inst, + int req_dppclk); +}; + +#endif //__DAL_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index cf7433ebf91a..da85537a4488 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -53,6 +53,12 @@ struct curve_points { uint32_t custom_float_slope; }; +struct curve_points3 { + struct curve_points red; + struct curve_points green; + struct curve_points blue; +}; + struct pwl_result_data { struct fixed31_32 red; struct fixed31_32 green; @@ -71,9 +77,17 @@ struct pwl_result_data { uint32_t delta_blue_reg; }; +/* arr_curve_points - regamma regions/segments specification + * arr_points - beginning and end point specified separately (only one on DCE) + * corner_points - beginning and end point for all 3 colors (DCN) + * rgb_resulted - final curve + */ struct pwl_params { struct gamma_curve arr_curve_points[34]; - struct curve_points arr_points[2]; + union { + struct curve_points arr_points[2]; + struct curve_points3 corner_points[2]; + }; struct pwl_result_data rgb_resulted[256 + 3]; uint32_t hw_points_num; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index e28e9770e0a3..c20fdcaac53b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -65,7 +65,8 @@ struct encoder_feature_support { enum dc_color_depth max_hdmi_deep_color; unsigned int max_hdmi_pixel_clock; - bool ycbcr420_supported; + bool hdmi_ycbcr420_supported; + bool dp_ycbcr420_supported; }; union dpcd_psr_configuration { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index da89c2edb07c..06df02ddff6a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h @@ -31,7 +31,7 @@ #include "dml/display_mode_structs.h" struct dchub_init_data; -struct cstate_pstate_watermarks_st { +struct cstate_pstate_watermarks_st1 { uint32_t cstate_exit_ns; uint32_t cstate_enter_plus_exit_ns; uint32_t pstate_change_ns; @@ -40,7 +40,7 @@ struct cstate_pstate_watermarks_st { struct dcn_watermarks { uint32_t pte_meta_urgent_ns; uint32_t urgent_ns; - struct cstate_pstate_watermarks_st cstate_pstate; + struct cstate_pstate_watermarks_st1 cstate_pstate; }; struct dcn_watermark_set { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 26f29d5da3d8..e9b702ce02dd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -32,8 +32,6 @@ #include "inc/hw/link_encoder.h" #include "core_status.h" -#define EDP_BACKLIGHT_RAMP_DISABLE_LEVEL 0xFFFFFFFF - enum pipe_gating_control { PIPE_GATING_CONTROL_DISABLE = 0, PIPE_GATING_CONTROL_ENABLE, @@ -87,11 +85,6 @@ struct hw_sequencer_funcs { void (*program_gamut_remap)( struct pipe_ctx *pipe_ctx); - void (*program_csc_matrix)( - struct pipe_ctx *pipe_ctx, - enum dc_color_space colorspace, - uint16_t *matrix); - void (*program_output_csc)(struct dc *dc, struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace, @@ -177,10 +170,12 @@ struct hw_sequencer_funcs { struct pipe_ctx *pipe_ctx, bool blank); - void (*set_bandwidth)( + void (*prepare_bandwidth)( struct dc *dc, - struct dc_state *context, - bool safe_to_lower); + struct dc_state *context); + void (*optimize_bandwidth)( + struct dc *dc, + struct dc_state *context); void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes, int vmin, int vmax); @@ -210,11 +205,6 @@ struct hw_sequencer_funcs { struct resource_pool *res_pool, struct pipe_ctx *pipe_ctx); - void (*ready_shared_resources)(struct dc *dc, struct dc_state *context); - void (*optimize_shared_resources)(struct dc *dc); - void (*pplib_apply_display_requirements)( - struct dc *dc, - struct dc_state *context); void (*edp_power_control)( struct dc_link *link, bool enable); diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 33b99e3ab10d..0086a2f1d21a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -30,9 +30,6 @@ #include "dal_asic_id.h" #include "dm_pp_smu.h" -/* TODO unhardcode, 4 for CZ*/ -#define MEMORY_TYPE_MULTIPLIER 4 - enum dce_version resource_parse_asic_id( struct hw_asic_id asic_id); diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index cdcefd087487..7480f072c375 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -306,6 +306,18 @@ static struct fixed31_32 translate_from_linear_space( a1); } +static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg) +{ + struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10); + + return translate_from_linear_space(arg, + dc_fixpt_zero, + dc_fixpt_zero, + dc_fixpt_zero, + dc_fixpt_zero, + gamma); +} + static struct fixed31_32 translate_to_linear_space( struct fixed31_32 arg, struct fixed31_32 a0, @@ -709,6 +721,169 @@ static void build_regamma(struct pwl_float_data_ex *rgb_regamma, } } +static void hermite_spline_eetf(struct fixed31_32 input_x, + struct fixed31_32 max_display, + struct fixed31_32 min_display, + struct fixed31_32 max_content, + struct fixed31_32 *out_x) +{ + struct fixed31_32 min_lum_pq; + struct fixed31_32 max_lum_pq; + struct fixed31_32 max_content_pq; + struct fixed31_32 ks; + struct fixed31_32 E1; + struct fixed31_32 E2; + struct fixed31_32 E3; + struct fixed31_32 t; + struct fixed31_32 t2; + struct fixed31_32 t3; + struct fixed31_32 two; + struct fixed31_32 three; + struct fixed31_32 temp1; + struct fixed31_32 temp2; + struct fixed31_32 a = dc_fixpt_from_fraction(15, 10); + struct fixed31_32 b = dc_fixpt_from_fraction(5, 10); + struct fixed31_32 epsilon = dc_fixpt_from_fraction(1, 1000000); // dc_fixpt_epsilon is a bit too small + + if (dc_fixpt_eq(max_content, dc_fixpt_zero)) { + *out_x = dc_fixpt_zero; + return; + } + + compute_pq(input_x, &E1); + compute_pq(dc_fixpt_div(min_display, max_content), &min_lum_pq); + compute_pq(dc_fixpt_div(max_display, max_content), &max_lum_pq); + compute_pq(dc_fixpt_one, &max_content_pq); // always 1? DAL2 code is weird + a = dc_fixpt_div(dc_fixpt_add(dc_fixpt_one, b), max_content_pq); // (1+b)/maxContent + ks = dc_fixpt_sub(dc_fixpt_mul(a, max_lum_pq), b); // a * max_lum_pq - b + + if (dc_fixpt_lt(E1, ks)) + E2 = E1; + else if (dc_fixpt_le(ks, E1) && dc_fixpt_le(E1, dc_fixpt_one)) { + if (dc_fixpt_lt(epsilon, dc_fixpt_sub(dc_fixpt_one, ks))) + // t = (E1 - ks) / (1 - ks) + t = dc_fixpt_div(dc_fixpt_sub(E1, ks), + dc_fixpt_sub(dc_fixpt_one, ks)); + else + t = dc_fixpt_zero; + + two = dc_fixpt_from_int(2); + three = dc_fixpt_from_int(3); + + t2 = dc_fixpt_mul(t, t); + t3 = dc_fixpt_mul(t2, t); + temp1 = dc_fixpt_mul(two, t3); + temp2 = dc_fixpt_mul(three, t2); + + // (2t^3 - 3t^2 + 1) * ks + E2 = dc_fixpt_mul(ks, dc_fixpt_add(dc_fixpt_one, + dc_fixpt_sub(temp1, temp2))); + + // (-2t^3 + 3t^2) * max_lum_pq + E2 = dc_fixpt_add(E2, dc_fixpt_mul(max_lum_pq, + dc_fixpt_sub(temp2, temp1))); + + temp1 = dc_fixpt_mul(two, t2); + temp2 = dc_fixpt_sub(dc_fixpt_one, ks); + + // (t^3 - 2t^2 + t) * (1-ks) + E2 = dc_fixpt_add(E2, dc_fixpt_mul(temp2, + dc_fixpt_add(t, dc_fixpt_sub(t3, temp1)))); + } else + E2 = dc_fixpt_one; + + temp1 = dc_fixpt_sub(dc_fixpt_one, E2); + temp2 = dc_fixpt_mul(temp1, temp1); + temp2 = dc_fixpt_mul(temp2, temp2); + // temp2 = (1-E2)^4 + + E3 = dc_fixpt_add(E2, dc_fixpt_mul(min_lum_pq, temp2)); + compute_de_pq(E3, out_x); + + *out_x = dc_fixpt_div(*out_x, dc_fixpt_div(max_display, max_content)); +} + +static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, + uint32_t hw_points_num, + const struct hw_x_point *coordinate_x, + const struct freesync_hdr_tf_params *fs_params) +{ + uint32_t i; + struct pwl_float_data_ex *rgb = rgb_regamma; + const struct hw_x_point *coord_x = coordinate_x; + struct fixed31_32 scaledX = dc_fixpt_zero; + struct fixed31_32 scaledX1 = dc_fixpt_zero; + struct fixed31_32 max_display = dc_fixpt_from_int(fs_params->max_display); + struct fixed31_32 min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000); + struct fixed31_32 max_content = dc_fixpt_from_int(fs_params->max_content); + struct fixed31_32 min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000); + struct fixed31_32 clip = dc_fixpt_one; + struct fixed31_32 output; + bool use_eetf = false; + bool is_clipped = false; + struct fixed31_32 sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level); + + if (fs_params == NULL || fs_params->max_content == 0 || + fs_params->max_display == 0) + return false; + + if (fs_params->min_display > 1000) // cap at 0.1 at the bottom + min_display = dc_fixpt_from_fraction(1, 10); + if (fs_params->max_display < 100) // cap at 100 at the top + max_display = dc_fixpt_from_int(100); + + if (fs_params->min_content < fs_params->min_display) + use_eetf = true; + else + min_content = min_display; + + if (fs_params->max_content > fs_params->max_display) + use_eetf = true; + else + max_content = max_display; + + rgb += 32; // first 32 points have problems with fixed point, too small + coord_x += 32; + for (i = 32; i <= hw_points_num; i++) { + if (!is_clipped) { + if (use_eetf) { + /*max content is equal 1 */ + scaledX1 = dc_fixpt_div(coord_x->x, + dc_fixpt_div(max_content, sdr_white_level)); + hermite_spline_eetf(scaledX1, max_display, min_display, + max_content, &scaledX); + } else + scaledX = dc_fixpt_div(coord_x->x, + dc_fixpt_div(max_display, sdr_white_level)); + + if (dc_fixpt_lt(scaledX, clip)) { + if (dc_fixpt_lt(scaledX, dc_fixpt_zero)) + output = dc_fixpt_zero; + else + output = calculate_gamma22(scaledX); + + rgb->r = output; + rgb->g = output; + rgb->b = output; + } else { + is_clipped = true; + rgb->r = clip; + rgb->g = clip; + rgb->b = clip; + } + } else { + rgb->r = clip; + rgb->g = clip; + rgb->b = clip; + } + + ++coord_x; + ++rgb; + } + + return true; +} + static void build_degamma(struct pwl_float_data_ex *curve, uint32_t hw_points_num, const struct hw_x_point *coordinate_x, bool is_2_4) @@ -1356,7 +1531,8 @@ static bool map_regamma_hw_to_x_user( #define _EXTRA_POINTS 3 bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, - const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed) + const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed, + const struct freesync_hdr_tf_params *fs_params) { struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; struct dividers dividers; @@ -1374,7 +1550,7 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, /* we can use hardcoded curve for plain SRGB TF */ if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true && output_tf->tf == TRANSFER_FUNCTION_SRGB && - (!mapUserRamp && ramp->type == GAMMA_RGB_256)) + (ramp->is_identity || (!mapUserRamp && ramp->type == GAMMA_RGB_256))) return true; output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; @@ -1424,6 +1600,12 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, MAX_HW_POINTS, coordinates_x, output_tf->sdr_ref_white_level); + } else if (tf == TRANSFER_FUNCTION_GAMMA22 && + fs_params != NULL) { + build_freesync_hdr(rgb_regamma, + MAX_HW_POINTS, + coordinates_x, + fs_params); } else { tf_pts->end_exponent = 0; tf_pts->x_point_at_y1_red = 1; diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h index 63ccb9c91224..a6e164df090a 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h @@ -73,12 +73,21 @@ struct regamma_lut { }; }; +struct freesync_hdr_tf_params { + unsigned int sdr_white_level; + unsigned int min_content; // luminance in 1/10000 nits + unsigned int max_content; // luminance in nits + unsigned int min_display; // luminance in 1/10000 nits + unsigned int max_display; // luminance in nits +}; + void setup_x_points_distribution(void); void precompute_pq(void); void precompute_de_pq(void); bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, - const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed); + const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed, + const struct freesync_hdr_tf_params *fs_params); bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf, const struct dc_gamma *ramp, bool mapUserRamp); diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 4018c7180d00..620a171620ee 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -37,6 +37,8 @@ #define RENDER_TIMES_MAX_COUNT 10 /* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */ #define BTR_EXIT_MARGIN 2000 +/*Threshold to exit fixed refresh rate*/ +#define FIXED_REFRESH_EXIT_MARGIN_IN_HZ 4 /* Number of consecutive frames to check before entering/exiting fixed refresh*/ #define FIXED_REFRESH_ENTER_FRAME_COUNT 5 #define FIXED_REFRESH_EXIT_FRAME_COUNT 5 @@ -257,40 +259,14 @@ static void apply_below_the_range(struct core_freesync *core_freesync, if (in_out_vrr->btr.btr_active) { in_out_vrr->btr.frame_counter = 0; in_out_vrr->btr.btr_active = false; - - /* Exit Fixed Refresh mode */ - } else if (in_out_vrr->fixed.fixed_active) { - - in_out_vrr->fixed.frame_counter++; - - if (in_out_vrr->fixed.frame_counter > - FIXED_REFRESH_EXIT_FRAME_COUNT) { - in_out_vrr->fixed.frame_counter = 0; - in_out_vrr->fixed.fixed_active = false; - } } } else if (last_render_time_in_us > max_render_time_in_us) { /* Enter Below the Range */ - if (!in_out_vrr->btr.btr_active && - in_out_vrr->btr.btr_enabled) { - in_out_vrr->btr.btr_active = true; - - /* Enter Fixed Refresh mode */ - } else if (!in_out_vrr->fixed.fixed_active && - !in_out_vrr->btr.btr_enabled) { - in_out_vrr->fixed.frame_counter++; - - if (in_out_vrr->fixed.frame_counter > - FIXED_REFRESH_ENTER_FRAME_COUNT) { - in_out_vrr->fixed.frame_counter = 0; - in_out_vrr->fixed.fixed_active = true; - } - } + in_out_vrr->btr.btr_active = true; } /* BTR set to "not active" so disengage */ if (!in_out_vrr->btr.btr_active) { - in_out_vrr->btr.btr_active = false; in_out_vrr->btr.inserted_duration_in_us = 0; in_out_vrr->btr.frames_to_insert = 0; in_out_vrr->btr.frame_counter = 0; @@ -375,7 +351,12 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync, bool update = false; unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us; - if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) { + //Compute the exit refresh rate and exit frame duration + unsigned int exit_refresh_rate_in_milli_hz = ((1000000000/max_render_time_in_us) + + (1000*FIXED_REFRESH_EXIT_MARGIN_IN_HZ)); + unsigned int exit_frame_duration_in_us = 1000000000/exit_refresh_rate_in_milli_hz; + + if (last_render_time_in_us < exit_frame_duration_in_us) { /* Exit Fixed Refresh mode */ if (in_out_vrr->fixed.fixed_active) { in_out_vrr->fixed.frame_counter++; |