diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display')
33 files changed, 565 insertions, 257 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 48f2b3710e7c..d53c60b37cc6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -918,6 +918,23 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } + if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) { + adev->dm.dc->debug.force_single_disp_pipe_split = false; + adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; + } + + if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) + adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; + + if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) + adev->dm.dc->debug.disable_stutter = true; + + if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) + adev->dm.dc->debug.disable_dsc = true; + + if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) + adev->dm.dc->debug.disable_clock_gate = true; + r = dm_dmub_hw_init(adev); if (r) { DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -1521,10 +1538,114 @@ static int dm_hw_fini(void *handle) return 0; } + +static int dm_enable_vblank(struct drm_crtc *crtc); +static void dm_disable_vblank(struct drm_crtc *crtc); + +static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, + struct dc_state *state, bool enable) +{ + enum dc_irq_source irq_source; + struct amdgpu_crtc *acrtc; + int rc = -EBUSY; + int i = 0; + + for (i = 0; i < state->stream_count; i++) { + acrtc = get_crtc_by_otg_inst( + adev, state->stream_status[i].primary_otg_inst); + + if (acrtc && state->stream_status[i].plane_count != 0) { + irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; + rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; + DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n", + acrtc->crtc_id, enable ? "en" : "dis", rc); + if (rc) + DRM_WARN("Failed to %s pflip interrupts\n", + enable ? "enable" : "disable"); + + if (enable) { + rc = dm_enable_vblank(&acrtc->base); + if (rc) + DRM_WARN("Failed to enable vblank interrupts\n"); + } else { + dm_disable_vblank(&acrtc->base); + } + + } + } + +} + +enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) +{ + struct dc_state *context = NULL; + enum dc_status res = DC_ERROR_UNEXPECTED; + int i; + struct dc_stream_state *del_streams[MAX_PIPES]; + int del_streams_count = 0; + + memset(del_streams, 0, sizeof(del_streams)); + + context = dc_create_state(dc); + if (context == NULL) + goto context_alloc_fail; + + dc_resource_state_copy_construct_current(dc, context); + + /* First remove from context all streams */ + for (i = 0; i < context->stream_count; i++) { + struct dc_stream_state *stream = context->streams[i]; + + del_streams[del_streams_count++] = stream; + } + + /* Remove all planes for removed streams and then remove the streams */ + for (i = 0; i < del_streams_count; i++) { + if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { + res = DC_FAIL_DETACH_SURFACES; + goto fail; + } + + res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); + if (res != DC_OK) + goto fail; + } + + + res = dc_validate_global_state(dc, context, false); + + if (res != DC_OK) { + DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res); + goto fail; + } + + res = dc_commit_state(dc, context); + +fail: + dc_release_state(context); + +context_alloc_fail: + return res; +} + static int dm_suspend(void *handle) { struct amdgpu_device *adev = handle; struct amdgpu_display_manager *dm = &adev->dm; + int ret = 0; + + if (adev->in_gpu_reset) { + mutex_lock(&dm->dc_lock); + dm->cached_dc_state = dc_copy_state(dm->dc->current_state); + + dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); + + amdgpu_dm_commit_zero_streams(dm->dc); + + amdgpu_dm_irq_suspend(adev); + + return ret; + } WARN_ON(adev->dm.cached_state); adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev); @@ -1640,6 +1761,46 @@ static void emulated_link_detect(struct dc_link *link) } +static void dm_gpureset_commit_state(struct dc_state *dc_state, + struct amdgpu_display_manager *dm) +{ + struct { + struct dc_surface_update surface_updates[MAX_SURFACES]; + struct dc_plane_info plane_infos[MAX_SURFACES]; + struct dc_scaling_info scaling_infos[MAX_SURFACES]; + struct dc_flip_addrs flip_addrs[MAX_SURFACES]; + struct dc_stream_update stream_update; + } * bundle; + int k, m; + + bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); + + if (!bundle) { + dm_error("Failed to allocate update bundle\n"); + goto cleanup; + } + + for (k = 0; k < dc_state->stream_count; k++) { + bundle->stream_update.stream = dc_state->streams[k]; + + for (m = 0; m < dc_state->stream_status->plane_count; m++) { + bundle->surface_updates[m].surface = + dc_state->stream_status->plane_states[m]; + bundle->surface_updates[m].surface->force_full_update = + true; + } + dc_commit_updates_for_stream( + dm->dc, bundle->surface_updates, + dc_state->stream_status->plane_count, + dc_state->streams[k], &bundle->stream_update, dc_state); + } + +cleanup: + kfree(bundle); + + return; +} + static int dm_resume(void *handle) { struct amdgpu_device *adev = handle; @@ -1656,8 +1817,44 @@ static int dm_resume(void *handle) struct dm_plane_state *dm_new_plane_state; struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); enum dc_connection_type new_connection_type = dc_connection_none; - int i, r; + struct dc_state *dc_state; + int i, r, j; + if (adev->in_gpu_reset) { + dc_state = dm->cached_dc_state; + + r = dm_dmub_hw_init(adev); + if (r) + DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); + + dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); + dc_resume(dm->dc); + + amdgpu_dm_irq_resume_early(adev); + + for (i = 0; i < dc_state->stream_count; i++) { + dc_state->streams[i]->mode_changed = true; + for (j = 0; j < dc_state->stream_status->plane_count; j++) { + dc_state->stream_status->plane_states[j]->update_flags.raw + = 0xffffffff; + } + } + + WARN_ON(!dc_commit_state(dm->dc, dc_state)); + + dm_gpureset_commit_state(dm->cached_dc_state, dm); + + dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); + + dc_release_state(dm->cached_dc_state); + dm->cached_dc_state = NULL; + + amdgpu_dm_irq_resume_late(adev); + + mutex_unlock(&dm->dc_lock); + + return 0; + } /* Recreate dc_state - DC invalidates it when setting power state to S3. */ dc_release_state(dm_state->context); dm_state->context = dc_create_state(dm->dc); @@ -3022,9 +3219,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) goto fail; } - if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) - dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; - /* No userspace support. */ dm->dc->debug.disable_tri_buf = true; @@ -3651,6 +3845,10 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, case DRM_FORMAT_ARGB16161616F: plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F; break; + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F; + break; default: DRM_ERROR( "Unsupported screen format %s\n", @@ -3822,8 +4020,7 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode, static enum dc_color_depth convert_color_depth_from_display_info(const struct drm_connector *connector, - const struct drm_connector_state *state, - bool is_y420) + bool is_y420, int requested_bpc) { uint8_t bpc; @@ -3843,10 +4040,7 @@ convert_color_depth_from_display_info(const struct drm_connector *connector, bpc = bpc ? bpc : 8; } - if (!state) - state = connector->state; - - if (state) { + if (requested_bpc > 0) { /* * Cap display bpc based on the user requested value. * @@ -3855,7 +4049,7 @@ convert_color_depth_from_display_info(const struct drm_connector *connector, * or if this was called outside of atomic check, so it * can't be used directly. */ - bpc = min(bpc, state->max_requested_bpc); + bpc = min_t(u8, bpc, requested_bpc); /* Round down to the nearest even number. */ bpc = bpc - (bpc & 1); @@ -3977,7 +4171,8 @@ static void fill_stream_properties_from_drm_display_mode( const struct drm_display_mode *mode_in, const struct drm_connector *connector, const struct drm_connector_state *connector_state, - const struct dc_stream_state *old_stream) + const struct dc_stream_state *old_stream, + int requested_bpc) { struct dc_crtc_timing *timing_out = &stream->timing; const struct drm_display_info *info = &connector->display_info; @@ -4007,8 +4202,9 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; timing_out->display_color_depth = convert_color_depth_from_display_info( - connector, connector_state, - (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)); + connector, + (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420), + requested_bpc); timing_out->scan_type = SCANNING_TYPE_NODATA; timing_out->hdmi_vic = 0; @@ -4214,7 +4410,8 @@ static struct dc_stream_state * create_stream_for_sink(struct amdgpu_dm_connector *aconnector, const struct drm_display_mode *drm_mode, const struct dm_connector_state *dm_state, - const struct dc_stream_state *old_stream) + const struct dc_stream_state *old_stream, + int requested_bpc) { struct drm_display_mode *preferred_mode = NULL; struct drm_connector *drm_connector; @@ -4299,10 +4496,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, */ if (!scale || mode_refresh != preferred_refresh) fill_stream_properties_from_drm_display_mode(stream, - &mode, &aconnector->base, con_state, NULL); + &mode, &aconnector->base, con_state, NULL, requested_bpc); else fill_stream_properties_from_drm_display_mode(stream, - &mode, &aconnector->base, con_state, old_stream); + &mode, &aconnector->base, con_state, old_stream, requested_bpc); stream->timing.flags.DSC = 0; @@ -4821,16 +5018,54 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) create_eml_sink(aconnector); } +static struct dc_stream_state * +create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, + const struct drm_display_mode *drm_mode, + const struct dm_connector_state *dm_state, + const struct dc_stream_state *old_stream) +{ + struct drm_connector *connector = &aconnector->base; + struct amdgpu_device *adev = connector->dev->dev_private; + struct dc_stream_state *stream; + int requested_bpc = connector->state ? connector->state->max_requested_bpc : 8; + enum dc_status dc_result = DC_OK; + + do { + stream = create_stream_for_sink(aconnector, drm_mode, + dm_state, old_stream, + requested_bpc); + if (stream == NULL) { + DRM_ERROR("Failed to create stream for sink!\n"); + break; + } + + dc_result = dc_validate_stream(adev->dm.dc, stream); + + if (dc_result != DC_OK) { + DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n", + drm_mode->hdisplay, + drm_mode->vdisplay, + drm_mode->clock, + dc_result); + + dc_stream_release(stream); + stream = NULL; + requested_bpc -= 2; /* lower bpc to retry validation */ + } + + } while (stream == NULL && requested_bpc >= 6); + + return stream; +} + enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { int result = MODE_ERROR; struct dc_sink *dc_sink; - struct amdgpu_device *adev = connector->dev->dev_private; /* TODO: Unhardcode stream count */ struct dc_stream_state *stream; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - enum dc_status dc_result = DC_OK; if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || (mode->flags & DRM_MODE_FLAG_DBLSCAN)) @@ -4851,24 +5086,11 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec goto fail; } - stream = create_stream_for_sink(aconnector, mode, NULL, NULL); - if (stream == NULL) { - DRM_ERROR("Failed to create stream for sink!\n"); - goto fail; - } - - dc_result = dc_validate_stream(adev->dm.dc, stream); - - if (dc_result == DC_OK) + stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL); + if (stream) { + dc_stream_release(stream); result = MODE_OK; - else - DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n", - mode->hdisplay, - mode->vdisplay, - mode->clock, - dc_result); - - dc_stream_release(stream); + } fail: /* TODO: error handling*/ @@ -5191,10 +5413,12 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, return 0; if (!state->duplicated) { + int max_bpc = conn_state->max_requested_bpc; is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && aconnector->force_yuv420_output; - color_depth = convert_color_depth_from_display_info(connector, conn_state, - is_y420); + color_depth = convert_color_depth_from_display_info(connector, + is_y420, + max_bpc); bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; clock = adjusted_mode->clock; dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false); @@ -5566,6 +5790,8 @@ static int get_plane_formats(const struct drm_plane *plane, if (plane_cap && plane_cap->pixel_format_support.fp16) { formats[num_formats++] = DRM_FORMAT_XRGB16161616F; formats[num_formats++] = DRM_FORMAT_ARGB16161616F; + formats[num_formats++] = DRM_FORMAT_XBGR16161616F; + formats[num_formats++] = DRM_FORMAT_ABGR16161616F; } break; @@ -7622,10 +7848,10 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) goto skip_modeset; - new_stream = create_stream_for_sink(aconnector, - &new_crtc_state->mode, - dm_new_conn_state, - dm_old_crtc_state->stream); + new_stream = create_validate_stream_for_sink(aconnector, + &new_crtc_state->mode, + dm_new_conn_state, + dm_old_crtc_state->stream); /* * we can have no stream on ACTION_SET if a display @@ -7910,13 +8136,6 @@ static int dm_update_plane_state(struct dc *dc, return -EINVAL; } - if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width || - new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) { - DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n", - new_plane_state->crtc_x, new_plane_state->crtc_y); - return -EINVAL; - } - return 0; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 5cab3e65d992..d61186ff411d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -315,6 +315,7 @@ struct amdgpu_display_manager { #endif struct drm_atomic_state *cached_state; + struct dc_state *cached_dc_state; struct dm_comressor_info compressor; diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 3960a8db94cb..1e5a92b192a1 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -690,6 +690,26 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v, struct dc_debug_options *dbg, struct dc_state *context) { + int i; + + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + /** + * Workaround for avoiding pipe-split in cases where we'd split + * planes that are too small, resulting in splits that aren't + * valid for the scaler. + */ + if (pipe->plane_state && + (pipe->plane_state->dst_rect.width <= 16 || + pipe->plane_state->dst_rect.height <= 16 || + pipe->plane_state->src_rect.width <= 16 || + pipe->plane_state->src_rect.height <= 16)) { + hack_disable_optional_pipe_split(v); + return; + } + } + if (dbg->pipe_split_policy == MPC_SPLIT_AVOID) hack_disable_optional_pipe_split(v); @@ -702,7 +722,6 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v, hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz); } - unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev, uint32_t pci_revision_id) { /* for low power RV2 variants, the highest voltage level we want is 0 */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index c08de6823db4..48ab51533d5d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -3245,6 +3245,10 @@ void core_link_enable_stream( dp_set_dsc_enable(pipe_ctx, true); } + + if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { + core_link_set_avmute(pipe_ctx, false); + } } void core_link_disable_stream(struct pipe_ctx *pipe_ctx) @@ -3257,6 +3261,10 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) dc_is_virtual_signal(pipe_ctx->stream->signal)) return; + if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { + core_link_set_avmute(pipe_ctx, true); + } + #if defined(CONFIG_DRM_AMD_DC_HDCP) update_psp_stream_config(pipe_ctx, true); #endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 1db592372435..91cd884d6f25 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -219,6 +219,30 @@ static enum dpcd_training_patterns return dpcd_tr_pattern; } +static uint8_t dc_dp_initialize_scrambling_data_symbols( + struct dc_link *link, + enum dc_dp_training_pattern pattern) +{ + uint8_t disable_scrabled_data_symbols = 0; + + switch (pattern) { + case DP_TRAINING_PATTERN_SEQUENCE_1: + case DP_TRAINING_PATTERN_SEQUENCE_2: + case DP_TRAINING_PATTERN_SEQUENCE_3: + disable_scrabled_data_symbols = 1; + break; + case DP_TRAINING_PATTERN_SEQUENCE_4: + disable_scrabled_data_symbols = 0; + break; + default: + ASSERT(0); + DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", + __func__, pattern); + break; + } + return disable_scrabled_data_symbols; +} + static inline bool is_repeater(struct dc_link *link, uint32_t offset) { return (!link->is_lttpr_mode_transparent && offset != 0); @@ -251,6 +275,9 @@ static void dpcd_set_lt_pattern_and_lane_settings( dpcd_pattern.v1_4.TRAINING_PATTERN_SET = dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern); + dpcd_pattern.v1_4.SCRAMBLING_DISABLE = + dc_dp_initialize_scrambling_data_symbols(link, pattern); + dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET] = dpcd_pattern.raw; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index cb5d11f11cad..0c5619364e7d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -532,6 +532,24 @@ static inline void get_vp_scan_direction( *flip_horz_scan_dir = !*flip_horz_scan_dir; } +int get_num_mpc_splits(struct pipe_ctx *pipe) +{ + int mpc_split_count = 0; + struct pipe_ctx *other_pipe = pipe->bottom_pipe; + + while (other_pipe && other_pipe->plane_state == pipe->plane_state) { + mpc_split_count++; + other_pipe = other_pipe->bottom_pipe; + } + other_pipe = pipe->top_pipe; + while (other_pipe && other_pipe->plane_state == pipe->plane_state) { + mpc_split_count++; + other_pipe = other_pipe->top_pipe; + } + + return mpc_split_count; +} + int get_num_odm_splits(struct pipe_ctx *pipe) { int odm_split_count = 0; @@ -556,16 +574,11 @@ static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *spli /*Check for mpc split*/ struct pipe_ctx *split_pipe = pipe_ctx->top_pipe; + *split_count = get_num_mpc_splits(pipe_ctx); while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) { (*split_idx)++; - (*split_count)++; split_pipe = split_pipe->top_pipe; } - split_pipe = pipe_ctx->bottom_pipe; - while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) { - (*split_count)++; - split_pipe = split_pipe->bottom_pipe; - } } else { /*Get odm split index*/ struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe; @@ -2666,6 +2679,9 @@ bool pipe_need_reprogram( false == pipe_ctx_old->stream->dpms_off) return true; + if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc) + return true; + return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 4a7796de2ff5..51b3fe502670 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -411,7 +411,7 @@ static const struct dc_plane_cap plane_cap = { .pixel_format_support = { .argb8888 = true, .nv12 = false, - .fp16 = false + .fp16 = true }, .max_upscale_factor = { diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 9a9764cbd78d..8f362e8c1787 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -516,7 +516,7 @@ static const struct dc_plane_cap plane_cap = { .pixel_format_support = { .argb8888 = true, .nv12 = false, - .fp16 = false + .fp16 = true }, .max_upscale_factor = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index deccab0228d2..75637c291e75 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -93,7 +93,6 @@ void hubbub1_wm_read_state(struct hubbub *hubbub, void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow) { struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); - /* * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 1 means do not allow stutter * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 means allow stutter diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index f36d1f57b846..77f16921e7f0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -737,7 +737,8 @@ void dcn10_bios_golden_init(struct dc *dc) if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) if (allow_self_fresh_force_enable == false && dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub)) - dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, true); + dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, + !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); } @@ -1682,12 +1683,81 @@ void dcn10_pipe_control_lock( hws->funcs.verify_allow_pstate_change_high(dc); } +/** + * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE. + * + * Software keepout workaround to prevent cursor update locking from stalling + * out cursor updates indefinitely or from old values from being retained in + * the case where the viewport changes in the same frame as the cursor. + * + * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's + * too close to VUPDATE, then stall out until VUPDATE finishes. + * + * TODO: Optimize cursor programming to be once per frame before VUPDATE + * to avoid the need for this workaround. + */ +static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct crtc_position position; + uint32_t vupdate_start, vupdate_end; + unsigned int lines_to_vupdate, us_to_vupdate, vpos; + unsigned int us_per_line, us_vupdate; + + if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position) + return; + + if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg) + return; + + dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start, + &vupdate_end); + + dc->hwss.get_position(&pipe_ctx, 1, &position); + vpos = position.vertical_count; + + /* Avoid wraparound calculation issues */ + vupdate_start += stream->timing.v_total; + vupdate_end += stream->timing.v_total; + vpos += stream->timing.v_total; + + if (vpos <= vupdate_start) { + /* VPOS is in VACTIVE or back porch. */ + lines_to_vupdate = vupdate_start - vpos; + } else if (vpos > vupdate_end) { + /* VPOS is in the front porch. */ + return; + } else { + /* VPOS is in VUPDATE. */ + lines_to_vupdate = 0; + } + + /* Calculate time until VUPDATE in microseconds. */ + us_per_line = + stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz; + us_to_vupdate = lines_to_vupdate * us_per_line; + + /* 70 us is a conservative estimate of cursor update time*/ + if (us_to_vupdate > 70) + return; + + /* Stall out until the cursor update completes. */ + if (vupdate_end < vupdate_start) + vupdate_end += stream->timing.v_total; + us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line; + udelay(us_to_vupdate + us_vupdate); +} + void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock) { /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */ if (!pipe || pipe->top_pipe) return; + /* Prevent cursor lock from stalling out cursor updates. */ + if (lock) + delay_cursor_until_vupdate(dc, pipe); + dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc, pipe->stream_res.opp->inst, lock); } @@ -3301,7 +3371,7 @@ int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx) return vertical_line_start; } -static void dcn10_calc_vupdate_position( +void dcn10_calc_vupdate_position( struct dc *dc, struct pipe_ctx *pipe_ctx, uint32_t *start_line, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index af51424315d5..42b6e016d71e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -34,6 +34,11 @@ struct dc; void dcn10_hw_sequencer_construct(struct dc *dc); int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx); +void dcn10_calc_vupdate_position( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + uint32_t *start_line, + uint32_t *end_line); void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx); enum dc_status dcn10_enable_stream_timing( struct pipe_ctx *pipe_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c index 897a3d25685a..7cb8c3fb2665 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c @@ -72,6 +72,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .set_clock = dcn10_set_clock, .get_clock = dcn10_get_clock, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, .set_backlight_level = dce110_set_backlight_level, .set_abm_immediate_disable = dce110_set_abm_immediate_disable, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h index 501532dd523a..c478213ba7ad 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h @@ -80,6 +80,7 @@ struct dcn20_hubbub { const struct dcn_hubbub_mask *masks; unsigned int debug_test_index_pstate; struct dcn_watermark_set watermarks; + int num_vmid; struct dcn20_vmid vmid[16]; unsigned int detile_buf_size; }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index a8bcd747d7ba..2fbde4241559 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -83,6 +83,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .init_vm_ctx = dcn20_init_vm_ctx, .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, .set_backlight_level = dce110_set_backlight_level, .set_abm_immediate_disable = dce110_set_abm_immediate_disable, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 778e2e8fd2c6..cef1aa938ab5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -1663,22 +1663,32 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state } -static void acquire_dsc(struct resource_context *res_ctx, - const struct resource_pool *pool, +void dcn20_acquire_dsc(const struct dc *dc, + struct resource_context *res_ctx, struct display_stream_compressor **dsc, int pipe_idx) { int i; + const struct resource_pool *pool = dc->res_pool; + struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc; - ASSERT(*dsc == NULL); + ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */ *dsc = NULL; + /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */ if (pool->res_cap->num_dsc == pool->res_cap->num_opp) { *dsc = pool->dscs[pipe_idx]; res_ctx->is_dsc_acquired[pipe_idx] = true; return; } + /* Return old DSC to avoid the need for re-programming */ + if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) { + *dsc = dsc_old; + res_ctx->is_dsc_acquired[dsc_old->inst] = true; + return ; + } + /* Find first free DSC */ for (i = 0; i < pool->res_cap->num_dsc; i++) if (!res_ctx->is_dsc_acquired[i]) { @@ -1710,7 +1720,6 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, { enum dc_status result = DC_OK; int i; - const struct resource_pool *pool = dc->res_pool; /* Get a DSC if required and available */ for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1722,7 +1731,7 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, if (pipe_ctx->stream_res.dsc) continue; - acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i); + dcn20_acquire_dsc(dc, &dc_ctx->res_ctx, &pipe_ctx->stream_res.dsc, i); /* The number of DSCs can be less than the number of pipes */ if (!pipe_ctx->stream_res.dsc) { @@ -1850,12 +1859,13 @@ static void swizzle_to_dml_params( } bool dcn20_split_stream_for_odm( + const struct dc *dc, struct resource_context *res_ctx, - const struct resource_pool *pool, struct pipe_ctx *prev_odm_pipe, struct pipe_ctx *next_odm_pipe) { int pipe_idx = next_odm_pipe->pipe_idx; + const struct resource_pool *pool = dc->res_pool; *next_odm_pipe = *prev_odm_pipe; @@ -1913,7 +1923,7 @@ bool dcn20_split_stream_for_odm( } next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx]; if (next_odm_pipe->stream->timing.flags.DSC == 1) { - acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx); + dcn20_acquire_dsc(dc, res_ctx, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx); ASSERT(next_odm_pipe->stream_res.dsc); if (next_odm_pipe->stream_res.dsc == NULL) return false; @@ -2576,27 +2586,6 @@ static void dcn20_merge_pipes_for_validate( } } -int dcn20_find_previous_split_count(struct pipe_ctx *pipe) -{ - int previous_split = 1; - struct pipe_ctx *current_pipe = pipe; - - while (current_pipe->bottom_pipe) { - if (current_pipe->plane_state != current_pipe->bottom_pipe->plane_state) - break; - previous_split++; - current_pipe = current_pipe->bottom_pipe; - } - current_pipe = pipe; - while (current_pipe->top_pipe) { - if (current_pipe->plane_state != current_pipe->top_pipe->plane_state) - break; - previous_split++; - current_pipe = current_pipe->top_pipe; - } - return previous_split; -} - int dcn20_validate_apply_pipe_split_flags( struct dc *dc, struct dc_state *context, @@ -2608,6 +2597,8 @@ int dcn20_validate_apply_pipe_split_flags( int plane_count = 0; bool force_split = false; bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID; + struct vba_vars_st *v = &context->bw_ctx.dml.vba; + int max_mpc_comb = v->maxMpcComb; if (context->stream_count > 1) { if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) @@ -2615,10 +2606,22 @@ int dcn20_validate_apply_pipe_split_flags( } else if (dc->debug.force_single_disp_pipe_split) force_split = true; - /* TODO: fix dc bugs and remove this split threshold thing */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + /** + * Workaround for avoiding pipe-split in cases where we'd split + * planes that are too small, resulting in splits that aren't + * valid for the scaler. + */ + if (pipe->plane_state && + (pipe->plane_state->dst_rect.width <= 16 || + pipe->plane_state->dst_rect.height <= 16 || + pipe->plane_state->src_rect.width <= 16 || + pipe->plane_state->src_rect.height <= 16)) + avoid_split = true; + + /* TODO: fix dc bugs and remove this split threshold thing */ if (pipe->stream && !pipe->prev_odm_pipe && (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state)) ++plane_count; @@ -2628,15 +2631,13 @@ int dcn20_validate_apply_pipe_split_flags( /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */ if (avoid_split) { - int max_mpc_comb = context->bw_ctx.dml.vba.maxMpcComb; - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++) - if (context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1 && - context->bw_ctx.dml.vba.ModeSupport[vlevel][0]) + if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 && + v->ModeSupport[vlevel][0]) break; /* Impossible to not split this pipe */ if (vlevel > context->bw_ctx.dml.soc.num_states) @@ -2645,21 +2646,21 @@ int dcn20_validate_apply_pipe_split_flags( max_mpc_comb = 0; pipe_idx++; } - context->bw_ctx.dml.vba.maxMpcComb = max_mpc_comb; + v->maxMpcComb = max_mpc_comb; } /* Split loop sets which pipe should be split based on dml outputs and dc flags */ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - int pipe_plane = context->bw_ctx.dml.vba.pipe_plane[pipe_idx]; + int pipe_plane = v->pipe_plane[pipe_idx]; + bool split4mpc = context->stream_count == 1 && plane_count == 1 + && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4; if (!context->res_ctx.pipe_ctx[i].stream) continue; - if (force_split - || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_plane] > 1) { - if (context->stream_count == 1 && plane_count == 1 - && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4) + if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] > 1) { + if (split4mpc) split[i] = 4; else split[i] = 2; @@ -2675,66 +2676,72 @@ int dcn20_validate_apply_pipe_split_flags( split[i] = 2; if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { split[i] = 2; - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1; + v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1; } - context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] = - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane]; - - if (pipe->prev_odm_pipe && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] != dm_odm_combine_mode_disabled) { - /*Already split odm pipe tree, don't try to split again*/ - split[i] = 0; - split[pipe->prev_odm_pipe->pipe_idx] = 0; - } else if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state - && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) { - /*If 2 way split but can support 4 way split, then split each pipe again*/ - if (context->stream_count == 1 && plane_count == 1 - && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4) { - split[i] = 2; - } else { + v->ODMCombineEnabled[pipe_plane] = + v->ODMCombineEnablePerState[vlevel][pipe_plane]; + + if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) { + if (get_num_mpc_splits(pipe) == 1) { + /*If need split for mpc but 2 way split already*/ + if (split[i] == 4) + split[i] = 2; /* 2 -> 4 MPC */ + else if (split[i] == 2) + split[i] = 0; /* 2 -> 2 MPC */ + else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) + merge[i] = true; /* 2 -> 1 MPC */ + } else if (get_num_mpc_splits(pipe) == 3) { + /*If need split for mpc but 4 way split already*/ + if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe) + || !pipe->bottom_pipe)) { + merge[i] = true; /* 4 -> 2 MPC */ + } else if (split[i] == 0 && pipe->top_pipe && + pipe->top_pipe->plane_state == pipe->plane_state) + merge[i] = true; /* 4 -> 1 MPC */ split[i] = 0; - split[pipe->top_pipe->pipe_idx] = 0; - } - } else if (pipe->prev_odm_pipe || (dcn20_find_previous_split_count(pipe) == 2 && pipe->top_pipe)) { - if (split[i] == 0) { - /*Exiting mpc/odm combine*/ - merge[i] = true; - } else { - /*Transition from mpc combine to odm combine or vice versa*/ - ASSERT(0); /*should not actually happen yet*/ - split[i] = 2; - merge[i] = true; + } else if (get_num_odm_splits(pipe)) { + /* ODM -> MPC transition */ + ASSERT(0); /* NOT expected yet */ if (pipe->prev_odm_pipe) { - split[pipe->prev_odm_pipe->pipe_idx] = 2; - merge[pipe->prev_odm_pipe->pipe_idx] = true; - } else { - split[pipe->top_pipe->pipe_idx] = 2; - merge[pipe->top_pipe->pipe_idx] = true; + split[i] = 0; + merge[i] = true; } } - } else if (dcn20_find_previous_split_count(pipe) == 3) { - if (split[i] == 0 && !pipe->top_pipe) { - merge[pipe->bottom_pipe->pipe_idx] = true; - merge[pipe->bottom_pipe->bottom_pipe->pipe_idx] = true; - } else if (split[i] == 2 && !pipe->top_pipe) { - merge[pipe->bottom_pipe->bottom_pipe->pipe_idx] = true; - split[i] = 0; - } - } else if (dcn20_find_previous_split_count(pipe) == 4) { - if (split[i] == 0 && !pipe->top_pipe) { - merge[pipe->bottom_pipe->pipe_idx] = true; - merge[pipe->bottom_pipe->bottom_pipe->pipe_idx] = true; - merge[pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx] = true; - } else if (split[i] == 2 && !pipe->top_pipe) { - merge[pipe->bottom_pipe->bottom_pipe->pipe_idx] = true; - merge[pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx] = true; + } else { + if (get_num_odm_splits(pipe) == 1) { + /*If need split for odm but 2 way split already*/ + if (split[i] == 4) + split[i] = 2; /* 2 -> 4 ODM */ + else if (split[i] == 2) + split[i] = 0; /* 2 -> 2 ODM */ + else if (pipe->prev_odm_pipe) { + ASSERT(0); /* NOT expected yet */ + merge[i] = true; /* exit ODM */ + } + } else if (get_num_odm_splits(pipe) == 3) { + /*If need split for odm but 4 way split already*/ + if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe) + || !pipe->next_odm_pipe)) { + ASSERT(0); /* NOT expected yet */ + merge[i] = true; /* 4 -> 2 ODM */ + } else if (split[i] == 0 && pipe->prev_odm_pipe) { + ASSERT(0); /* NOT expected yet */ + merge[i] = true; /* exit ODM */ + } split[i] = 0; + } else if (get_num_mpc_splits(pipe)) { + /* MPC -> ODM transition */ + ASSERT(0); /* NOT expected yet */ + if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { + split[i] = 0; + merge[i] = true; + } } } /* Adjust dppclk when split is forced, do not bother with dispclk */ - if (split[i] != 0 - && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1) - context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2; + if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1) + v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2; pipe_idx++; } @@ -2792,7 +2799,7 @@ bool dcn20_fast_validate_bw( hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); ASSERT(hsplit_pipe); if (!dcn20_split_stream_for_odm( - &context->res_ctx, dc->res_pool, + dc, &context->res_ctx, pipe, hsplit_pipe)) goto validate_fail; pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx; @@ -2821,7 +2828,7 @@ bool dcn20_fast_validate_bw( } if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { if (!dcn20_split_stream_for_odm( - &context->res_ctx, dc->res_pool, + dc, &context->res_ctx, pipe, hsplit_pipe)) goto validate_fail; dcn20_build_mapped_resource(dc, context, pipe->stream); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index d5448c9b0e15..2c1959845c29 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -119,7 +119,6 @@ void dcn20_set_mcif_arb_params( display_e2e_pipe_params_st *pipes, int pipe_cnt); bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); -int dcn20_find_previous_split_count(struct pipe_ctx *pipe); int dcn20_validate_apply_pipe_split_flags( struct dc *dc, struct dc_state *context, @@ -136,10 +135,14 @@ void dcn20_split_stream_for_mpc( struct pipe_ctx *primary_pipe, struct pipe_ctx *secondary_pipe); bool dcn20_split_stream_for_odm( + const struct dc *dc, struct resource_context *res_ctx, - const struct resource_pool *pool, struct pipe_ctx *prev_odm_pipe, struct pipe_ctx *next_odm_pipe); +void dcn20_acquire_dsc(const struct dc *dc, + struct resource_context *res_ctx, + struct display_stream_compressor **dsc, + int pipe_idx); struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, struct resource_context *res_ctx, const struct resource_pool *pool, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c index 5e2d14b897af..129f0b62f751 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c @@ -49,11 +49,6 @@ #define FN(reg_name, field_name) \ hubbub1->shifts->field_name, hubbub1->masks->field_name -#ifdef NUM_VMID -#undef NUM_VMID -#endif -#define NUM_VMID 16 - static uint32_t convert_and_clamp( uint32_t wm_ns, uint32_t refclk_mhz, @@ -138,7 +133,7 @@ int hubbub21_init_dchub(struct hubbub *hubbub, dcn21_dchvm_init(hubbub); - return NUM_VMID; + return hubbub1->num_vmid; } bool hubbub21_program_urgent_watermarks( diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index e97dfaa656e9..a5baef7e7a7d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -86,6 +86,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, .power_down = dce110_power_down, .set_backlight_level = dce110_set_backlight_level, .set_abm_immediate_disable = dce110_set_abm_immediate_disable, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 419cdde624f5..f00a56835084 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -805,7 +805,7 @@ static const struct resource_caps res_cap_rn = { .num_pll = 5, // maybe 3 because the last two used for USB-c .num_dwb = 1, .num_ddc = 5, - .num_vmid = 1, + .num_vmid = 16, .num_dsc = 3, }; @@ -1295,6 +1295,7 @@ static struct hubbub *dcn21_hubbub_create(struct dc_context *ctx) vmid->shifts = &vmid_shifts; vmid->masks = &vmid_masks; } + hubbub->num_vmid = res_cap_rn.num_vmid; return &hubbub->base; } diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 7ee8b8460a9b..e34c3376efc1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -63,10 +63,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags) endif CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dml_common_defs.o := $(dml_ccflags) DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \ - dml_common_defs.o ifdef CONFIG_DRM_AMD_DC_DCN DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h index 8c86b63ddf07..1e557ddcb638 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h @@ -26,7 +26,6 @@ #ifndef __DML20_DISPLAY_RQ_DLG_CALC_H__ #define __DML20_DISPLAY_RQ_DLG_CALC_H__ -#include "../dml_common_defs.h" #include "../display_rq_dlg_helpers.h" struct display_mode_lib; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h index 0378406bf7e7..0d53e871a9d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h @@ -26,7 +26,6 @@ #ifndef __DML20V2_DISPLAY_RQ_DLG_CALC_H__ #define __DML20V2_DISPLAY_RQ_DLG_CALC_H__ -#include "../dml_common_defs.h" #include "../display_rq_dlg_helpers.h" struct display_mode_lib; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h index 83e95f8cbff2..e8f7785e3fc6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h @@ -26,7 +26,7 @@ #ifndef __DML21_DISPLAY_RQ_DLG_CALC_H__ #define __DML21_DISPLAY_RQ_DLG_CALC_H__ -#include "../dml_common_defs.h" +#include "dm_services.h" #include "../display_rq_dlg_helpers.h" struct display_mode_lib; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index cf2758ca5b02..c77c3d827e4a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -25,8 +25,10 @@ #ifndef __DISPLAY_MODE_LIB_H__ #define __DISPLAY_MODE_LIB_H__ - -#include "dml_common_defs.h" +#include "dm_services.h" +#include "dc_features.h" +#include "display_mode_structs.h" +#include "display_mode_enums.h" #include "display_mode_vba.h" enum dml_project { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 6a7b20927a6b..3f559e725ab1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -27,8 +27,6 @@ #ifndef __DML2_DISPLAY_MODE_VBA_H__ #define __DML2_DISPLAY_MODE_VBA_H__ -#include "dml_common_defs.h" - struct display_mode_lib; void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib); diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h index 1f24db830737..2555ef0358c2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h @@ -26,7 +26,6 @@ #ifndef __DISPLAY_RQ_DLG_HELPERS_H__ #define __DISPLAY_RQ_DLG_HELPERS_H__ -#include "dml_common_defs.h" #include "display_mode_lib.h" /* Function: Printer functions diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h index 304164986bd8..9c06913ad767 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h @@ -26,8 +26,6 @@ #ifndef __DISPLAY_RQ_DLG_CALC_H__ #define __DISPLAY_RQ_DLG_CALC_H__ -#include "dml_common_defs.h" - struct display_mode_lib; #include "display_rq_dlg_helpers.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c deleted file mode 100644 index 723af0b2dda0..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dml_common_defs.h" -#include "dcn_calc_math.h" - -#include "dml_inline_defs.h" - -double dml_round(double a) -{ - double round_pt = 0.5; - double ceil = dml_ceil(a, 1); - double floor = dml_floor(a, 1); - - if (a - floor >= round_pt) - return ceil; - else - return floor; -} - - diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h deleted file mode 100644 index f78cbae9db88..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_COMMON_DEFS_H__ -#define __DC_COMMON_DEFS_H__ - -#include "dm_services.h" -#include "dc_features.h" -#include "display_mode_structs.h" -#include "display_mode_enums.h" - - -double dml_round(double a); - -#endif /* __DC_COMMON_DEFS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h index ded71ea82413..02e06c9b3230 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h @@ -26,7 +26,6 @@ #ifndef __DML_INLINE_DEFS_H__ #define __DML_INLINE_DEFS_H__ -#include "dml_common_defs.h" #include "dcn_calc_math.h" #include "dml_logger.h" @@ -75,6 +74,18 @@ static inline double dml_floor(double a, double granularity) return (double) dcn_bw_floor2(a, granularity); } +static inline double dml_round(double a) +{ + double round_pt = 0.5; + double ceil = dml_ceil(a, 1); + double floor = dml_floor(a, 1); + + if (a - floor >= round_pt) + return ceil; + else + return floor; +} + static inline int dml_log2(double x) { return dml_round((double)dcn_bw_log(x, 2)); @@ -112,7 +123,7 @@ static inline double dml_log(double x, double base) static inline unsigned int dml_round_to_multiple(unsigned int num, unsigned int multiple, - bool up) + unsigned char up) { unsigned int remainder; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 3b2ea9bdb62c..8e72f077e552 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -96,6 +96,11 @@ struct hw_sequencer_funcs { void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes, struct crtc_position *position); int (*get_vupdate_offset_from_vsync)(struct pipe_ctx *pipe_ctx); + void (*calc_vupdate_position)( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + uint32_t *start_line, + uint32_t *end_line); void (*enable_per_frame_crtc_position_reset)(struct dc *dc, int group_size, struct pipe_ctx *grouped_pipes[]); void (*enable_timing_synchronization)(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 109c589eb97c..a9be495af922 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -177,6 +177,8 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format); void get_audio_check(struct audio_info *aud_modes, struct audio_check *aud_chk); +int get_num_mpc_splits(struct pipe_ctx *pipe); + int get_num_odm_splits(struct pipe_ctx *pipe); #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c index 00f132f8ad55..61ee4be35d27 100644 --- a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c +++ b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c @@ -112,9 +112,12 @@ uint8_t mod_vmid_get_for_ptb(struct mod_vmid *mod_vmid, uint64_t ptb) evict_vmids(core_vmid); vmid = get_next_available_vmid(core_vmid); - add_ptb_to_table(core_vmid, vmid, ptb); + if (vmid != -1) { + add_ptb_to_table(core_vmid, vmid, ptb); - dc_setup_vm_context(core_vmid->dc, &va_config, vmid); + dc_setup_vm_context(core_vmid->dc, &va_config, vmid); + } else + ASSERT(0); } return vmid; |