diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display')
10 files changed, 130 insertions, 44 deletions
| diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 50c783e19f5a..31bce529f685 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1503,8 +1503,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  		case IP_VERSION(3, 0, 1):  		case IP_VERSION(3, 1, 2):  		case IP_VERSION(3, 1, 3): -		case IP_VERSION(3, 1, 4): -		case IP_VERSION(3, 1, 5):  		case IP_VERSION(3, 1, 6):  			init_data.flags.gpu_vm_support = true;  			break; @@ -1730,10 +1728,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)  		adev->dm.vblank_control_workqueue = NULL;  	} -	for (i = 0; i < adev->dm.display_indexes_num; i++) { -		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base); -	} -  	amdgpu_dm_destroy_drm_device(&adev->dm);  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) @@ -4361,6 +4355,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)  		amdgpu_set_panel_orientation(&aconnector->base);  	} +	/* If we didn't find a panel, notify the acpi video detection */ +	if (dm->adev->flags & AMD_IS_APU && dm->num_of_edps == 0) +		acpi_video_report_nolcd(); +  	/* Software is initialized. Now we can register interrupt handlers. */  	switch (adev->asic_type) {  #if defined(CONFIG_DRM_AMD_DC_SI) @@ -4503,6 +4501,17 @@ DEVICE_ATTR_WO(s3_debug);  static int dm_early_init(void *handle)  {  	struct amdgpu_device *adev = (struct amdgpu_device *)handle; +	struct amdgpu_mode_info *mode_info = &adev->mode_info; +	struct atom_context *ctx = mode_info->atom_context; +	int index = GetIndexIntoMasterTable(DATA, Object_Header); +	u16 data_offset; + +	/* if there is no object header, skip DM */ +	if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { +		adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; +		dev_info(adev->dev, "No object header, skipping DM\n"); +		return -ENOENT; +	}  	switch (adev->asic_type) {  #if defined(CONFIG_DRM_AMD_DC_SI) @@ -5307,8 +5316,6 @@ static void fill_stream_properties_from_drm_display_mode(  	timing_out->aspect_ratio = get_aspect_ratio(mode_in); -	stream->output_color_space = get_output_color_space(timing_out); -  	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;  	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;  	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { @@ -5319,6 +5326,8 @@ static void fill_stream_properties_from_drm_display_mode(  			adjust_colour_depth_from_display_info(timing_out, info);  		}  	} + +	stream->output_color_space = get_output_color_space(timing_out);  }  static void fill_audio_info(struct audio_info *audio_info, @@ -5831,7 +5840,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  		 */  		DRM_DEBUG_DRIVER("No preferred mode found\n");  	} else { -		recalculate_timing = is_freesync_video_mode(&mode, aconnector); +		recalculate_timing = amdgpu_freesync_vid_mode && +				 is_freesync_video_mode(&mode, aconnector);  		if (recalculate_timing) {  			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);  			drm_mode_copy(&saved_mode, &mode); @@ -6982,7 +6992,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect  	struct amdgpu_dm_connector *amdgpu_dm_connector =  		to_amdgpu_dm_connector(connector); -	if (!edid) +	if (!(amdgpu_freesync_vid_mode && edid))  		return;  	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) @@ -8846,7 +8856,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,  		 * TODO: Refactor this function to allow this check to work  		 * in all conditions.  		 */ -		if (dm_new_crtc_state->stream && +		if (amdgpu_freesync_vid_mode && +		    dm_new_crtc_state->stream &&  		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))  			goto skip_modeset; @@ -8881,7 +8892,14 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,  		if (!dm_old_crtc_state->stream)  			goto skip_modeset; -		if (dm_new_crtc_state->stream && +		/* Unset freesync video if it was active before */ +		if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) { +			dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE; +			dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0; +		} + +		/* Now check if we should set freesync video mode */ +		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&  		    is_timing_unchanged_for_freesync(new_crtc_state,  						     old_crtc_state)) {  			new_crtc_state->mode_changed = false; @@ -8893,7 +8911,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,  			set_freesync_fixed_config(dm_new_crtc_state);  			goto skip_modeset; -		} else if (aconnector && +		} else if (amdgpu_freesync_vid_mode && aconnector &&  			   is_freesync_video_mode(&new_crtc_state->mode,  						  aconnector)) {  			struct drm_display_mode *high_mode; @@ -9497,6 +9515,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  	bool lock_and_validation_needed = false;  	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;  #if defined(CONFIG_DRM_AMD_DC_DCN) +	struct drm_dp_mst_topology_mgr *mgr; +	struct drm_dp_mst_topology_state *mst_state;  	struct dsc_mst_fairness_vars vars[MAX_PIPES];  #endif @@ -9524,8 +9544,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  			goto fail;  		} -		if (dm_old_con_state->abm_level != -		    dm_new_con_state->abm_level) +		if (dm_old_con_state->abm_level != dm_new_con_state->abm_level || +		    dm_old_con_state->scaling != dm_new_con_state->scaling)  			new_crtc_state->connectors_changed = true;  	} @@ -9745,6 +9765,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  		lock_and_validation_needed = true;  	} +#if defined(CONFIG_DRM_AMD_DC_DCN) +	/* set the slot info for each mst_state based on the link encoding format */ +	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { +		struct amdgpu_dm_connector *aconnector; +		struct drm_connector *connector; +		struct drm_connector_list_iter iter; +		u8 link_coding_cap; + +		drm_connector_list_iter_begin(dev, &iter); +		drm_for_each_connector_iter(connector, &iter) { +			if (connector->index == mst_state->mgr->conn_base_id) { +				aconnector = to_amdgpu_dm_connector(connector); +				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); +				drm_dp_mst_update_slots(mst_state, link_coding_cap); + +				break; +			} +		} +		drm_connector_list_iter_end(&iter); +	} +#endif +  	/**  	 * Streams and planes are reset when there are changes that affect  	 * bandwidth. Anything that affects bandwidth needs to go through diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 6994c9a1ed85..5cff56bb8f56 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -120,23 +120,50 @@ enum dc_edid_status dm_helpers_parse_edid_caps(  }  static void -fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state, -				   struct amdgpu_dm_connector *aconnector, +fill_dc_mst_payload_table_from_drm(struct dc_link *link, +				   bool enable, +				   struct drm_dp_mst_atomic_payload *target_payload,  				   struct dc_dp_mst_stream_allocation_table *table)  {  	struct dc_dp_mst_stream_allocation_table new_table = { 0 };  	struct dc_dp_mst_stream_allocation *sa; -	struct drm_dp_mst_atomic_payload *payload; +	struct link_mst_stream_allocation_table copy_of_link_table = +										link->mst_stream_alloc_table; + +	int i; +	int current_hw_table_stream_cnt = copy_of_link_table.stream_count; +	struct link_mst_stream_allocation *dc_alloc; + +	/* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/ +	if (enable) { +		dc_alloc = +		©_of_link_table.stream_allocations[current_hw_table_stream_cnt]; +		dc_alloc->vcp_id = target_payload->vcpi; +		dc_alloc->slot_count = target_payload->time_slots; +	} else { +		for (i = 0; i < copy_of_link_table.stream_count; i++) { +			dc_alloc = +			©_of_link_table.stream_allocations[i]; + +			if (dc_alloc->vcp_id == target_payload->vcpi) { +				dc_alloc->vcp_id = 0; +				dc_alloc->slot_count = 0; +				break; +			} +		} +		ASSERT(i != copy_of_link_table.stream_count); +	}  	/* Fill payload info*/ -	list_for_each_entry(payload, &mst_state->payloads, next) { -		if (payload->delete) -			continue; - -		sa = &new_table.stream_allocations[new_table.stream_count]; -		sa->slot_count = payload->time_slots; -		sa->vcp_id = payload->vcpi; -		new_table.stream_count++; +	for (i = 0; i < MAX_CONTROLLER_NUM; i++) { +		dc_alloc = +			©_of_link_table.stream_allocations[i]; +		if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) { +			sa = &new_table.stream_allocations[new_table.stream_count]; +			sa->slot_count = dc_alloc->slot_count; +			sa->vcp_id = dc_alloc->vcp_id; +			new_table.stream_count++; +		}  	}  	/* Overwrite the old table */ @@ -185,7 +212,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(  	 * AUX message. The sequence is slot 1-63 allocated sequence for each  	 * stream. AMD ASIC stream slot allocation should follow the same  	 * sequence. copy DRM MST allocation to dc */ -	fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table); +	fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);  	return true;  } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 1edf7385f8d8..abdbd4352f6f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -468,7 +468,6 @@ static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs  static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)  {  	drm_encoder_cleanup(encoder); -	kfree(encoder);  }  static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { @@ -904,11 +903,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,  	if (IS_ERR(mst_state))  		return PTR_ERR(mst_state); -	mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link); -#if defined(CONFIG_DRM_AMD_DC_DCN) -	drm_dp_mst_update_slots(mst_state, dc_link_dp_mst_decide_link_encoding_format(dc_link)); -#endif -  	/* Set up params */  	for (i = 0; i < dc_state->stream_count; i++) {  		struct dc_dsc_policy dsc_policy = {0}; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 471078fc3900..652270a0b498 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -90,8 +90,8 @@ static const struct out_csc_color_matrix_type output_csc_matrix[] = {  		{ 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,  				0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },  	{ COLOR_SPACE_YCBCR2020_TYPE, -		{ 0x1000, 0xF149, 0xFEB7, 0x0000, 0x0868, 0x15B2, -				0x01E6, 0x0000, 0xFB88, 0xF478, 0x1000, 0x0000} }, +		{ 0x1000, 0xF149, 0xFEB7, 0x1004, 0x0868, 0x15B2, +				0x01E6, 0x201, 0xFB88, 0xF478, 0x1000, 0x1004} },  	{ COLOR_SPACE_YCBCR709_BLACK_TYPE,  		{ 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000,  				0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} }, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 342e906ae26e..c88f044666fe 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -3995,10 +3995,13 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)  	struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);  	int i;  	bool mst_mode = (link->type == dc_connection_mst_branch); +	/* adjust for drm changes*/ +	bool update_drm_mst_state = true;  	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);  	const struct dc_link_settings empty_link_settings = {0};  	DC_LOGGER_INIT(link->ctx->logger); +  	/* deallocate_mst_payload is called before disable link. When mode or  	 * disable/enable monitor, new stream is created which is not in link  	 * stream[] yet. For this, payload is not allocated yet, so de-alloc @@ -4014,7 +4017,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)  				&empty_link_settings,  				avg_time_slots_per_mtp); -	if (mst_mode) { +	if (mst_mode || update_drm_mst_state) {  		/* when link is in mst mode, reply on mst manager to remove  		 * payload  		 */ @@ -4077,11 +4080,18 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)  			stream->ctx,  			stream); +		if (!update_drm_mst_state) +			dm_helpers_dp_mst_send_payload_allocation( +				stream->ctx, +				stream, +				false); +	} + +	if (update_drm_mst_state)  		dm_helpers_dp_mst_send_payload_allocation(  			stream->ctx,  			stream,  			false); -	}  	return DC_OK;  } diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index f9ea1e86707f..79850a68f62a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -874,8 +874,9 @@ static const struct dc_plane_cap plane_cap = {  	},  	// 6:1 downscaling ratio: 1000/6 = 166.666 +	// 4:1 downscaling ratio for ARGB888 to prevent underflow during P010 playback: 1000/4 = 250  	.max_downscale_factor = { -			.argb8888 = 167, +			.argb8888 = 250,  			.nv12 = 167,  			.fp16 = 167  	}, @@ -1763,7 +1764,7 @@ static bool dcn314_resource_construct(  	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;  	pool->base.pipe_count = pool->base.res_cap->num_timing_generator;  	pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; -	dc->caps.max_downscale_ratio = 600; +	dc->caps.max_downscale_ratio = 400;  	dc->caps.i2c_speed_in_khz = 100;  	dc->caps.i2c_speed_in_khz_hdcp = 100;  	dc->caps.max_cursor_size = 256; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c index dc4649458567..a4e9fd5307c6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c @@ -94,7 +94,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {  	.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,  	.calc_vupdate_position = dcn10_calc_vupdate_position,  	.apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations, -	.does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall, +	.does_plane_fit_in_mall = NULL,  	.set_backlight_level = dcn21_set_backlight_level,  	.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,  	.hardware_release = dcn30_hardware_release, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c index 950669f2c10d..cb7c0c878423 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c @@ -3183,7 +3183,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman  		} else {  			v->MIN_DST_Y_NEXT_START[k] = v->VTotal[k] - v->VFrontPorch[k] + v->VTotal[k] - v->VActive[k] - v->VStartup[k];  		} -		v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / (double)v->HTotal[k] / v->PixelClock[k], 1.0) / 4.0; +		v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / ((double)v->HTotal[k] / v->PixelClock[k]), 1.0) / 4.0;  		if (((v->VUpdateOffsetPix[k] + v->VUpdateWidthPix[k] + v->VReadyOffsetPix[k]) / v->HTotal[k])  				<= (isInterlaceTiming ?  						dml_floor((v->VTotal[k] - v->VActive[k] - v->VFrontPorch[k] - v->VStartup[k]) / 2.0, 1.0) : diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index 5af601cff1a0..b53feeaf5cf1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -6257,12 +6257,12 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface  	double SwathSizePerSurfaceC[DC__NUM_DPP__MAX];  	bool NotEnoughDETSwathFillLatencyHiding = false; -	/* calculate sum of single swath size for all pipes in bytes*/ +	/* calculate sum of single swath size for all pipes in bytes */  	for (k = 0; k < NumberOfActiveSurfaces; k++) { -		SwathSizePerSurfaceY[k] += SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k]; +		SwathSizePerSurfaceY[k] = SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k];  		if (SwathHeightC[k] != 0) -			SwathSizePerSurfaceC[k] += SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k]; +			SwathSizePerSurfaceC[k] = SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k];  		else  			SwathSizePerSurfaceC[k] = 0; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 4a122925c3ae..92c18bfb98b3 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -532,6 +532,9 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,  	if (dmub->hw_funcs.reset)  		dmub->hw_funcs.reset(dmub); +	/* reset the cache of the last wptr as well now that hw is reset */ +	dmub->inbox1_last_wptr = 0; +  	cw0.offset.quad_part = inst_fb->gpu_addr;  	cw0.region.base = DMUB_CW0_BASE;  	cw0.region.top = cw0.region.base + inst_fb->size - 1; @@ -649,6 +652,15 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)  	if (dmub->hw_funcs.reset)  		dmub->hw_funcs.reset(dmub); +	/* mailboxes have been reset in hw, so reset the sw state as well */ +	dmub->inbox1_last_wptr = 0; +	dmub->inbox1_rb.wrpt = 0; +	dmub->inbox1_rb.rptr = 0; +	dmub->outbox0_rb.wrpt = 0; +	dmub->outbox0_rb.rptr = 0; +	dmub->outbox1_rb.wrpt = 0; +	dmub->outbox1_rb.rptr = 0; +  	dmub->hw_init = false;  	return DMUB_STATUS_OK; |