diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display')
109 files changed, 2414 insertions, 1235 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile index 91fb72c96545..718e123a3230 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -27,6 +27,10 @@  AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o amdgpu_dm_color.o +ifdef CONFIG_DRM_AMD_DC_DCN +AMDGPUDM += dc_fpu.o +endif +  ifneq ($(CONFIG_DRM_AMD_DC),)  AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o  endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index b53f49a23ddc..816723691d51 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -618,6 +618,7 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)  }  #endif +#define DMUB_TRACE_MAX_READ 64  /**   * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt   * @interrupt_params: used for determining the Outbox instance @@ -625,7 +626,6 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)   * Handles the Outbox Interrupt   * event handler.   */ -#define DMUB_TRACE_MAX_READ 64  static void dm_dmub_outbox1_low_irq(void *interrupt_params)  {  	struct dmub_notification notify; @@ -1044,10 +1044,10 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_  }  #endif  #if defined(CONFIG_DRM_AMD_DC_DCN) -static void event_mall_stutter(struct work_struct *work) +static void vblank_control_worker(struct work_struct *work)  { - -	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work); +	struct vblank_control_work *vblank_work = +		container_of(work, struct vblank_control_work, work);  	struct amdgpu_display_manager *dm = vblank_work->dm;  	mutex_lock(&dm->dc_lock); @@ -1061,27 +1061,25 @@ static void event_mall_stutter(struct work_struct *work)  	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); -	mutex_unlock(&dm->dc_lock); -} - -static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc) -{ - -	int max_caps = dc->caps.max_links; -	struct vblank_workqueue *vblank_work; -	int i = 0; - -	vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL); -	if (ZERO_OR_NULL_PTR(vblank_work)) { -		kfree(vblank_work); -		return NULL; +	/* Control PSR based on vblank requirements from OS */ +	if (vblank_work->stream && vblank_work->stream->link) { +		if (vblank_work->enable) { +			if (vblank_work->stream->link->psr_settings.psr_allow_active) +				amdgpu_dm_psr_disable(vblank_work->stream); +		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled && +			   !vblank_work->stream->link->psr_settings.psr_allow_active && +			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) { +			amdgpu_dm_psr_enable(vblank_work->stream); +		}  	} -	for (i = 0; i < max_caps; i++) -		INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter); +	mutex_unlock(&dm->dc_lock); + +	dc_stream_release(vblank_work->stream); -	return vblank_work; +	kfree(vblank_work);  } +  #endif  static int amdgpu_dm_init(struct amdgpu_device *adev)  { @@ -1224,12 +1222,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  #if defined(CONFIG_DRM_AMD_DC_DCN)  	if (adev->dm.dc->caps.max_links > 0) { -		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc); - -		if (!adev->dm.vblank_workqueue) +		adev->dm.vblank_control_workqueue = +			create_singlethread_workqueue("dm_vblank_control_workqueue"); +		if (!adev->dm.vblank_control_workqueue)  			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); -		else -			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);  	}  #endif @@ -1302,6 +1298,13 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)  {  	int i; +#if defined(CONFIG_DRM_AMD_DC_DCN) +	if (adev->dm.vblank_control_workqueue) { +		destroy_workqueue(adev->dm.vblank_control_workqueue); +		adev->dm.vblank_control_workqueue = NULL; +	} +#endif +  	for (i = 0; i < adev->dm.display_indexes_num; i++) {  		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);  	} @@ -1325,14 +1328,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)  		dc_deinit_callbacks(adev->dm.dc);  #endif -#if defined(CONFIG_DRM_AMD_DC_DCN) -	if (adev->dm.vblank_workqueue) { -		adev->dm.vblank_workqueue->dm = NULL; -		kfree(adev->dm.vblank_workqueue); -		adev->dm.vblank_workqueue = NULL; -	} -#endif -  	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);  	if (dc_enable_dmub_notifications(adev->dm.dc)) { @@ -1548,6 +1543,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)  	}  	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; +	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);  	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {  		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = @@ -1561,7 +1557,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)  			 adev->dm.dmcub_fw_version);  	} -	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);  	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);  	dmub_srv = adev->dm.dmub_srv; @@ -2412,6 +2407,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)  	static const u8 pre_computed_values[] = {  		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,  		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98}; +	int i;  	if (!aconnector || !aconnector->dc_link)  		return; @@ -2423,7 +2419,13 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)  	conn_base = &aconnector->base;  	adev = drm_to_adev(conn_base->dev);  	dm = &adev->dm; -	caps = &dm->backlight_caps; +	for (i = 0; i < dm->num_of_edps; i++) { +		if (link == dm->backlight_link[i]) +			break; +	} +	if (i >= dm->num_of_edps) +		return; +	caps = &dm->backlight_caps[i];  	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;  	caps->aux_support = false;  	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll; @@ -3423,35 +3425,36 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)  #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\  	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) -static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm) +static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, +					    int bl_idx)  {  #if defined(CONFIG_ACPI)  	struct amdgpu_dm_backlight_caps caps;  	memset(&caps, 0, sizeof(caps)); -	if (dm->backlight_caps.caps_valid) +	if (dm->backlight_caps[bl_idx].caps_valid)  		return;  	amdgpu_acpi_get_backlight_caps(&caps);  	if (caps.caps_valid) { -		dm->backlight_caps.caps_valid = true; +		dm->backlight_caps[bl_idx].caps_valid = true;  		if (caps.aux_support)  			return; -		dm->backlight_caps.min_input_signal = caps.min_input_signal; -		dm->backlight_caps.max_input_signal = caps.max_input_signal; +		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal; +		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;  	} else { -		dm->backlight_caps.min_input_signal = +		dm->backlight_caps[bl_idx].min_input_signal =  				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; -		dm->backlight_caps.max_input_signal = +		dm->backlight_caps[bl_idx].max_input_signal =  				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;  	}  #else -	if (dm->backlight_caps.aux_support) +	if (dm->backlight_caps[bl_idx].aux_support)  		return; -	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; -	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; +	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; +	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;  #endif  } @@ -3502,41 +3505,31 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap  }  static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, +					 int bl_idx,  					 u32 user_brightness)  {  	struct amdgpu_dm_backlight_caps caps; -	struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP]; -	u32 brightness[AMDGPU_DM_MAX_NUM_EDP]; +	struct dc_link *link; +	u32 brightness;  	bool rc; -	int i; -	amdgpu_dm_update_backlight_caps(dm); -	caps = dm->backlight_caps; +	amdgpu_dm_update_backlight_caps(dm, bl_idx); +	caps = dm->backlight_caps[bl_idx]; -	for (i = 0; i < dm->num_of_edps; i++) { -		dm->brightness[i] = user_brightness; -		brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]); -		link[i] = (struct dc_link *)dm->backlight_link[i]; -	} +	dm->brightness[bl_idx] = user_brightness; +	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); +	link = (struct dc_link *)dm->backlight_link[bl_idx];  	/* Change brightness based on AUX property */  	if (caps.aux_support) { -		for (i = 0; i < dm->num_of_edps; i++) { -			rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i], -				AUX_BL_DEFAULT_TRANSITION_TIME_MS); -			if (!rc) { -				DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i); -				break; -			} -		} +		rc = dc_link_set_backlight_level_nits(link, true, brightness, +						      AUX_BL_DEFAULT_TRANSITION_TIME_MS); +		if (!rc) +			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);  	} else { -		for (i = 0; i < dm->num_of_edps; i++) { -			rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0); -			if (!rc) { -				DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i); -				break; -			} -		} +		rc = dc_link_set_backlight_level(link, brightness, 0); +		if (!rc) +			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);  	}  	return rc ? 0 : 1; @@ -3545,33 +3538,41 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,  static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)  {  	struct amdgpu_display_manager *dm = bl_get_data(bd); +	int i; -	amdgpu_dm_backlight_set_level(dm, bd->props.brightness); +	for (i = 0; i < dm->num_of_edps; i++) { +		if (bd == dm->backlight_dev[i]) +			break; +	} +	if (i >= AMDGPU_DM_MAX_NUM_EDP) +		i = 0; +	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);  	return 0;  } -static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm) +static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, +					 int bl_idx)  {  	struct amdgpu_dm_backlight_caps caps; +	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; -	amdgpu_dm_update_backlight_caps(dm); -	caps = dm->backlight_caps; +	amdgpu_dm_update_backlight_caps(dm, bl_idx); +	caps = dm->backlight_caps[bl_idx];  	if (caps.aux_support) { -		struct dc_link *link = (struct dc_link *)dm->backlight_link[0];  		u32 avg, peak;  		bool rc;  		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);  		if (!rc) -			return dm->brightness[0]; +			return dm->brightness[bl_idx];  		return convert_brightness_to_user(&caps, avg);  	} else { -		int ret = dc_link_get_backlight_level(dm->backlight_link[0]); +		int ret = dc_link_get_backlight_level(link);  		if (ret == DC_ERROR_UNEXPECTED) -			return dm->brightness[0]; +			return dm->brightness[bl_idx];  		return convert_brightness_to_user(&caps, ret);  	}  } @@ -3579,8 +3580,15 @@ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)  static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)  {  	struct amdgpu_display_manager *dm = bl_get_data(bd); +	int i; -	return amdgpu_dm_backlight_get_level(dm); +	for (i = 0; i < dm->num_of_edps; i++) { +		if (bd == dm->backlight_dev[i]) +			break; +	} +	if (i >= AMDGPU_DM_MAX_NUM_EDP) +		i = 0; +	return amdgpu_dm_backlight_get_level(dm, i);  }  static const struct backlight_ops amdgpu_dm_backlight_ops = { @@ -3594,31 +3602,28 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)  {  	char bl_name[16];  	struct backlight_properties props = { 0 }; -	int i; -	amdgpu_dm_update_backlight_caps(dm); -	for (i = 0; i < dm->num_of_edps; i++) -		dm->brightness[i] = AMDGPU_MAX_BL_LEVEL; +	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps); +	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;  	props.max_brightness = AMDGPU_MAX_BL_LEVEL;  	props.brightness = AMDGPU_MAX_BL_LEVEL;  	props.type = BACKLIGHT_RAW;  	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", -		 adev_to_drm(dm->adev)->primary->index); +		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps); -	dm->backlight_dev = backlight_device_register(bl_name, -						      adev_to_drm(dm->adev)->dev, -						      dm, -						      &amdgpu_dm_backlight_ops, -						      &props); +	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name, +								       adev_to_drm(dm->adev)->dev, +								       dm, +								       &amdgpu_dm_backlight_ops, +								       &props); -	if (IS_ERR(dm->backlight_dev)) +	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))  		DRM_ERROR("DM: Backlight registration failed!\n");  	else  		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);  } -  #endif  static int initialize_plane(struct amdgpu_display_manager *dm, @@ -3675,10 +3680,10 @@ static void register_backlight_device(struct amdgpu_display_manager *dm,  		 * DM initialization because not having a backlight control  		 * is better then a black screen.  		 */ -		if (!dm->backlight_dev) +		if (!dm->backlight_dev[dm->num_of_edps])  			amdgpu_dm_register_backlight_device(dm); -		if (dm->backlight_dev) { +		if (dm->backlight_dev[dm->num_of_edps]) {  			dm->backlight_link[dm->num_of_edps] = link;  			dm->num_of_edps++;  		} @@ -4747,7 +4752,7 @@ fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,  					  const bool force_disable_dcc)  {  	const uint64_t modifier = afb->base.modifier; -	int ret; +	int ret = 0;  	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);  	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier); @@ -4765,9 +4770,9 @@ fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,  	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);  	if (ret) -		return ret; +		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret); -	return 0; +	return ret;  }  static int @@ -5994,7 +5999,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)  	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);  #if defined(CONFIG_DRM_AMD_DC_DCN)  	struct amdgpu_display_manager *dm = &adev->dm; -	unsigned long flags; +	struct vblank_control_work *work;  #endif  	int rc = 0; @@ -6019,12 +6024,21 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)  		return 0;  #if defined(CONFIG_DRM_AMD_DC_DCN) -	spin_lock_irqsave(&dm->vblank_lock, flags); -	dm->vblank_workqueue->dm = dm; -	dm->vblank_workqueue->otg_inst = acrtc->otg_inst; -	dm->vblank_workqueue->enable = enable; -	spin_unlock_irqrestore(&dm->vblank_lock, flags); -	schedule_work(&dm->vblank_workqueue->mall_work); +	work = kzalloc(sizeof(*work), GFP_ATOMIC); +	if (!work) +		return -ENOMEM; + +	INIT_WORK(&work->work, vblank_control_worker); +	work->dm = dm; +	work->acrtc = acrtc; +	work->enable = enable; + +	if (acrtc_state->stream) { +		dc_stream_retain(acrtc_state->stream); +		work->stream = acrtc_state->stream; +	} + +	queue_work(dm->vblank_control_workqueue, &work->work);  #endif  	return 0; @@ -6198,6 +6212,7 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)  	const struct dc_link *link = aconnector->dc_link;  	struct amdgpu_device *adev = drm_to_adev(connector->dev);  	struct amdgpu_display_manager *dm = &adev->dm; +	int i;  	/*  	 * Call only if mst_mgr was iniitalized before since it's not done @@ -6208,12 +6223,11 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)  #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\  	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) - -	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) && -	    link->type != dc_connection_none && -	    dm->backlight_dev) { -		backlight_device_unregister(dm->backlight_dev); -		dm->backlight_dev = NULL; +	for (i = 0; i < dm->num_of_edps; i++) { +		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) { +			backlight_device_unregister(dm->backlight_dev[i]); +			dm->backlight_dev[i] = NULL; +		}  	}  #endif @@ -7570,8 +7584,10 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)  	 * 60 	    - Commonly used  	 * 48,72,96 - Multiples of 24  	 */ -	const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000, -					 48000, 50000, 60000, 72000, 96000 }; +	static const uint32_t common_rates[] = { +		23976, 24000, 25000, 29970, 30000, +		48000, 50000, 60000, 72000, 96000 +	};  	/*  	 * Find mode with highest refresh rate with the same resolution @@ -8627,6 +8643,14 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,  	/* Update the planes if changed or disable if we don't have any. */  	if ((planes_count || acrtc_state->active_planes == 0) &&  		acrtc_state->stream) { +#if defined(CONFIG_DRM_AMD_DC_DCN) +		/* +		 * If PSR or idle optimizations are enabled then flush out +		 * any pending work before hardware programming. +		 */ +		flush_workqueue(dm->vblank_control_workqueue); +#endif +  		bundle->stream_update.stream = acrtc_state->stream;  		if (new_pcrtc_state->mode_changed) {  			bundle->stream_update.src = acrtc_state->stream->src; @@ -8695,16 +8719,20 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,  				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&  				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)  			amdgpu_dm_link_setup_psr(acrtc_state->stream); -		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) && -				acrtc_state->stream->link->psr_settings.psr_feature_enabled && -				!acrtc_state->stream->link->psr_settings.psr_allow_active) { -			struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *) -					acrtc_state->stream->dm_stream_context; + +		/* Decrement skip count when PSR is enabled and we're doing fast updates. */ +		if (acrtc_state->update_type == UPDATE_TYPE_FAST && +		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) { +			struct amdgpu_dm_connector *aconn = +				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;  			if (aconn->psr_skip_count > 0)  				aconn->psr_skip_count--; -			else -				amdgpu_dm_psr_enable(acrtc_state->stream); + +			/* Allow PSR when skip count is 0. */ +			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count; +		} else { +			acrtc_attach->dm_irq_params.allow_psr_entry = false;  		}  		mutex_unlock(&dm->dc_lock); @@ -8953,8 +8981,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  	if (dc_state) {  		/* if there mode set or reset, disable eDP PSR */ -		if (mode_set_reset_required) +		if (mode_set_reset_required) { +#if defined(CONFIG_DRM_AMD_DC_DCN) +			flush_workqueue(dm->vblank_control_workqueue); +#endif  			amdgpu_dm_psr_disable_all(dm); +		}  		dm_enable_per_frame_crtc_master_sync(dc_state);  		mutex_lock(&dm->dc_lock); @@ -9191,8 +9223,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\  	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)  	/* restore the backlight level */ -	if (dm->backlight_dev && (amdgpu_dm_backlight_get_level(dm) != dm->brightness[0])) -		amdgpu_dm_backlight_set_level(dm, dm->brightness[0]); +	for (i = 0; i < dm->num_of_edps; i++) { +		if (dm->backlight_dev[i] && +		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i])) +			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); +	}  #endif  	/*  	 * send vblank event on all events not handled in flip and @@ -9605,7 +9640,12 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,  		} else if (amdgpu_freesync_vid_mode && aconnector &&  			   is_freesync_video_mode(&new_crtc_state->mode,  						  aconnector)) { -			set_freesync_fixed_config(dm_new_crtc_state); +			struct drm_display_mode *high_mode; + +			high_mode = get_highest_refresh_rate_mode(aconnector, false); +			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) { +				set_freesync_fixed_config(dm_new_crtc_state); +			}  		}  		ret = dm_atomic_get_state(state, &dm_state); @@ -10549,13 +10589,68 @@ static bool is_dp_capable_without_timing_msa(struct dc *dc,  	return capable;  } -static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, +static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, +		unsigned int offset, +		unsigned int total_length, +		uint8_t *data, +		unsigned int length, +		struct amdgpu_hdmi_vsdb_info *vsdb) +{ +	bool res; +	union dmub_rb_cmd cmd; +	struct dmub_cmd_send_edid_cea *input; +	struct dmub_cmd_edid_cea_output *output; + +	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES) +		return false; + +	memset(&cmd, 0, sizeof(cmd)); + +	input = &cmd.edid_cea.data.input; + +	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA; +	cmd.edid_cea.header.sub_type = 0; +	cmd.edid_cea.header.payload_bytes = +		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header); +	input->offset = offset; +	input->length = length; +	input->total_length = total_length; +	memcpy(input->payload, data, length); + +	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd); +	if (!res) { +		DRM_ERROR("EDID CEA parser failed\n"); +		return false; +	} + +	output = &cmd.edid_cea.data.output; + +	if (output->type == DMUB_CMD__EDID_CEA_ACK) { +		if (!output->ack.success) { +			DRM_ERROR("EDID CEA ack failed at offset %d\n", +					output->ack.offset); +		} +	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) { +		if (!output->amd_vsdb.vsdb_found) +			return false; + +		vsdb->freesync_supported = output->amd_vsdb.freesync_supported; +		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version; +		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; +		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; +	} else { +		DRM_WARN("Unknown EDID CEA parser results\n"); +		return false; +	} + +	return true; +} + +static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,  		uint8_t *edid_ext, int len,  		struct amdgpu_hdmi_vsdb_info *vsdb_info)  {  	int i; -	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); -	struct dc *dc = adev->dm.dc;  	/* send extension block to DMCU for parsing */  	for (i = 0; i < len; i += 8) { @@ -10563,14 +10658,14 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,  		int offset;  		/* send 8 bytes a time */ -		if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8)) +		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))  			return false;  		if (i+8 == len) {  			/* EDID block sent completed, expect result */  			int version, min_rate, max_rate; -			res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate); +			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);  			if (res) {  				/* amd vsdb found */  				vsdb_info->freesync_supported = 1; @@ -10584,7 +10679,7 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,  		}  		/* check for ack*/ -		res = dc_edid_parser_recv_cea_ack(dc, &offset); +		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);  		if (!res)  			return false;  	} @@ -10592,6 +10687,34 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,  	return false;  } +static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, +		uint8_t *edid_ext, int len, +		struct amdgpu_hdmi_vsdb_info *vsdb_info) +{ +	int i; + +	/* send extension block to DMCU for parsing */ +	for (i = 0; i < len; i += 8) { +		/* send 8 bytes a time */ +		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info)) +			return false; +	} + +	return vsdb_info->freesync_supported; +} + +static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, +		uint8_t *edid_ext, int len, +		struct amdgpu_hdmi_vsdb_info *vsdb_info) +{ +	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); + +	if (adev->dm.dmub_srv) +		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); +	else +		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); +} +  static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,  		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)  { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 9522d4ca299e..d1d353a7c77d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -60,6 +60,7 @@ enum aux_return_code_type;  /* Forward declarations */  struct amdgpu_device; +struct amdgpu_crtc;  struct drm_device;  struct dc;  struct amdgpu_bo; @@ -86,16 +87,18 @@ struct dm_compressor_info {  };  /** - * struct vblank_workqueue - Works to be executed in a separate thread during vblank - * @mall_work: work for mall stutter + * struct vblank_control_work - Work data for vblank control + * @work: Kernel work data for the work event   * @dm: amdgpu display manager device - * @otg_inst: otg instance of which vblank is being set - * @enable: true if enable vblank + * @acrtc: amdgpu CRTC instance for which the event has occurred + * @stream: DC stream for which the event has occurred + * @enable: true if enabling vblank   */ -struct vblank_workqueue { -	struct work_struct mall_work; +struct vblank_control_work { +	struct work_struct work;  	struct amdgpu_display_manager *dm; -	int otg_inst; +	struct amdgpu_crtc *acrtc; +	struct dc_stream_state *stream;  	bool enable;  }; @@ -365,13 +368,13 @@ struct amdgpu_display_manager {  	spinlock_t irq_handler_list_table_lock; -	struct backlight_device *backlight_dev; +	struct backlight_device *backlight_dev[AMDGPU_DM_MAX_NUM_EDP];  	const struct dc_link *backlight_link[AMDGPU_DM_MAX_NUM_EDP];  	uint8_t num_of_edps; -	struct amdgpu_dm_backlight_caps backlight_caps; +	struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP];  	struct mod_freesync *freesync_module;  #ifdef CONFIG_DRM_AMD_DC_HDCP @@ -380,11 +383,11 @@ struct amdgpu_display_manager {  #if defined(CONFIG_DRM_AMD_DC_DCN)  	/** -	 * @vblank_workqueue: +	 * @vblank_control_workqueue:  	 * -	 * amdgpu workqueue during vblank +	 * Deferred work for vblank control events.  	 */ -	struct vblank_workqueue *vblank_workqueue; +	struct workqueue_struct *vblank_control_workqueue;  #endif  	struct drm_atomic_state *cached_state; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index f1145086a468..87daa78a32b8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -197,29 +197,29 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf,  	rd_buf_ptr = rd_buf; -	str_len = strlen("Current:  %d  %d  %d  "); -	snprintf(rd_buf_ptr, str_len, "Current:  %d  %d  %d  ", +	str_len = strlen("Current:  %d  0x%x  %d  "); +	snprintf(rd_buf_ptr, str_len, "Current:  %d  0x%x  %d  ",  			link->cur_link_settings.lane_count,  			link->cur_link_settings.link_rate,  			link->cur_link_settings.link_spread);  	rd_buf_ptr += str_len; -	str_len = strlen("Verified:  %d  %d  %d  "); -	snprintf(rd_buf_ptr, str_len, "Verified:  %d  %d  %d  ", +	str_len = strlen("Verified:  %d  0x%x  %d  "); +	snprintf(rd_buf_ptr, str_len, "Verified:  %d  0x%x  %d  ",  			link->verified_link_cap.lane_count,  			link->verified_link_cap.link_rate,  			link->verified_link_cap.link_spread);  	rd_buf_ptr += str_len; -	str_len = strlen("Reported:  %d  %d  %d  "); -	snprintf(rd_buf_ptr, str_len, "Reported:  %d  %d  %d  ", +	str_len = strlen("Reported:  %d  0x%x  %d  "); +	snprintf(rd_buf_ptr, str_len, "Reported:  %d  0x%x  %d  ",  			link->reported_link_cap.lane_count,  			link->reported_link_cap.link_rate,  			link->reported_link_cap.link_spread);  	rd_buf_ptr += str_len; -	str_len = strlen("Preferred:  %d  %d  %d  "); -	snprintf(rd_buf_ptr, str_len, "Preferred:  %d  %d  %d\n", +	str_len = strlen("Preferred:  %d  0x%x  %d  "); +	snprintf(rd_buf_ptr, str_len, "Preferred:  %d  0x%x  %d\n",  			link->preferred_link_setting.lane_count,  			link->preferred_link_setting.link_rate,  			link->preferred_link_setting.link_spread); @@ -377,7 +377,7 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,  	if (!rd_buf)  		return -EINVAL; -	snprintf(rd_buf, rd_buf_size, "  %d  %d  %d  ", +	snprintf(rd_buf, rd_buf_size, "  %d  %d  %d\n",  			link->cur_lane_setting.VOLTAGE_SWING,  			link->cur_lane_setting.PRE_EMPHASIS,  			link->cur_lane_setting.POST_CURSOR2); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index e63c6885c757..c5f1dc3b5961 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -79,12 +79,12 @@ static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint  	struct ta_hdcp_shared_memory *hdcp_cmd; -	if (!psp->hdcp_context.hdcp_initialized) { +	if (!psp->hdcp_context.context.initialized) {  		DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");  		return NULL;  	} -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_GET_SRM; @@ -105,12 +105,12 @@ static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size,  	struct ta_hdcp_shared_memory *hdcp_cmd; -	if (!psp->hdcp_context.hdcp_initialized) { +	if (!psp->hdcp_context.context.initialized) {  		DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");  		return -EINVAL;  	} -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	memcpy(hdcp_cmd->in_msg.hdcp_set_srm.srm_buf, srm, srm_size); @@ -414,12 +414,12 @@ static bool enable_assr(void *handle, struct dc_link *link)  	struct ta_dtm_shared_memory *dtm_cmd;  	bool res = true; -	if (!psp->dtm_context.dtm_initialized) { +	if (!psp->dtm_context.context.initialized) {  		DRM_INFO("Failed to enable ASSR, DTM TA is not initialized.");  		return false;  	} -	dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; +	dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;  	mutex_lock(&psp->dtm_context.mutex);  	memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); @@ -655,10 +655,8 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct  		INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);  		hdcp_work[i].hdcp.config.psp.handle = &adev->psp; -		if (dc->ctx->dce_version == DCN_VERSION_3_1) { +		if (dc->ctx->dce_version == DCN_VERSION_3_1)  			hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1; -			hdcp_work[i].hdcp.config.psp.caps.opm_state_query_supported = false; -		}  		hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);  		hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;  		hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 40f617bbb86f..4aba0e8c84f8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -584,7 +584,7 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,  		handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);  		/*allocate a new amdgpu_dm_irq_handler_data*/ -		handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL); +		handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC);  		if (!handler_data_add) {  			DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");  			return; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h index f3b93ba69a27..79b5f9999fec 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h @@ -33,6 +33,7 @@ struct dm_irq_params {  	struct mod_vrr_params vrr_params;  	struct dc_stream_state *stream;  	int active_planes; +	bool allow_psr_entry;  	struct mod_freesync_config freesync_config;  #ifdef CONFIG_DEBUG_FS diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 5568d4e518e6..1bcba6943fd7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -213,6 +213,29 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)  			drm_connector_update_edid_property(  				&aconnector->base,  				NULL); + +			DRM_DEBUG_KMS("Can't get EDID of %s. Add default remote sink.", connector->name); +			if (!aconnector->dc_sink) { +				struct dc_sink *dc_sink; +				struct dc_sink_init_data init_params = { +					.link = aconnector->dc_link, +					.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; + +				dc_sink = dc_link_add_remote_sink( +					aconnector->dc_link, +					NULL, +					0, +					&init_params); + +				if (!dc_sink) { +					DRM_ERROR("Unable to add a remote sink\n"); +					return 0; +				} + +				dc_sink->priv = aconnector; +				aconnector->dc_sink = dc_sink; +			} +  			return ret;  		} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h index 46a33f64cf8e..fdcaea22b456 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h @@ -637,6 +637,30 @@ TRACE_EVENT(amdgpu_refresh_rate_track,  		  __entry->refresh_rate_ns)  ); +TRACE_EVENT(dcn_fpu, +	    TP_PROTO(bool begin, const char *function, const int line, const int recursion_depth), +	    TP_ARGS(begin, function, line, recursion_depth), + +	    TP_STRUCT__entry( +			     __field(bool, begin) +			     __field(const char *, function) +			     __field(int, line) +			     __field(int, recursion_depth) +	    ), +	    TP_fast_assign( +			   __entry->begin = begin; +			   __entry->function = function; +			   __entry->line = line; +			   __entry->recursion_depth = recursion_depth; +	    ), +	    TP_printk("%s: recursion_depth: %d: %s()+%d:", +		      __entry->begin ? "begin" : "end", +		      __entry->recursion_depth, +		      __entry->function, +		      __entry->line +	    ) +); +  #endif /* _AMDGPU_DM_TRACE_H_ */  #undef TRACE_INCLUDE_PATH diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c new file mode 100644 index 000000000000..c9f47d167472 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc_trace.h" + +#if defined(CONFIG_X86) +#include <asm/fpu/api.h> +#elif defined(CONFIG_PPC64) +#include <asm/switch_to.h> +#include <asm/cputable.h> +#endif + +/** + * DOC: DC FPU manipulation overview + * + * DC core uses FPU operations in multiple parts of the code, which requires a + * more specialized way to manage these areas' entrance. To fulfill this + * requirement, we created some wrapper functions that encapsulate + * kernel_fpu_begin/end to better fit our need in the display component. In + * summary, in this file, you can find functions related to FPU operation + * management. + */ + +static DEFINE_PER_CPU(int, fpu_recursion_depth); + +/** + * dc_assert_fp_enabled - Check if FPU protection is enabled + * + * This function tells if the code is already under FPU protection or not. A + * function that works as an API for a set of FPU operations can use this + * function for checking if the caller invoked it after DC_FP_START(). For + * example, take a look at dcn2x.c file. + */ +inline void dc_assert_fp_enabled(void) +{ +	int *pcpu, depth = 0; + +	pcpu = get_cpu_ptr(&fpu_recursion_depth); +	depth = *pcpu; +	put_cpu_ptr(&fpu_recursion_depth); + +	ASSERT(depth > 1); +} + +/** + * dc_fpu_begin - Enables FPU protection + * @function_name: A string containing the function name for debug purposes + *   (usually __func__) + * + * @line: A line number where DC_FP_START was invoked for debug purpose + *   (usually __LINE__) + * + * This function is responsible for managing the use of kernel_fpu_begin() with + * the advantage of providing an event trace for debugging. + * + * Note: Do not call this function directly; always use DC_FP_START(). + */ +void dc_fpu_begin(const char *function_name, const int line) +{ +	int *pcpu; + +	pcpu = get_cpu_ptr(&fpu_recursion_depth); +	*pcpu += 1; + +	if (*pcpu == 1) { +#if defined(CONFIG_X86) +		kernel_fpu_begin(); +#elif defined(CONFIG_PPC64) +		if (cpu_has_feature(CPU_FTR_VSX_COMP)) { +			preempt_disable(); +			enable_kernel_vsx(); +		} else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { +			preempt_disable(); +			enable_kernel_altivec(); +		} else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { +			preempt_disable(); +			enable_kernel_fp(); +		} +#endif +	} + +	TRACE_DCN_FPU(true, function_name, line, *pcpu); +	put_cpu_ptr(&fpu_recursion_depth); +} + +/** + * dc_fpu_end - Disable FPU protection + * @function_name: A string containing the function name for debug purposes + * @line: A-line number where DC_FP_END was invoked for debug purpose + * + * This function is responsible for managing the use of kernel_fpu_end() with + * the advantage of providing an event trace for debugging. + * + * Note: Do not call this function directly; always use DC_FP_END(). + */ +void dc_fpu_end(const char *function_name, const int line) +{ +	int *pcpu; + +	pcpu = get_cpu_ptr(&fpu_recursion_depth); +	*pcpu -= 1; +	if (*pcpu <= 0) { +#if defined(CONFIG_X86) +		kernel_fpu_end(); +#elif defined(CONFIG_PPC64) +		if (cpu_has_feature(CPU_FTR_VSX_COMP)) { +			disable_kernel_vsx(); +			preempt_enable(); +		} else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { +			disable_kernel_altivec(); +			preempt_enable(); +		} else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { +			disable_kernel_fp(); +			preempt_enable(); +		} +#endif +	} + +	TRACE_DCN_FPU(false, function_name, line, *pcpu); +	put_cpu_ptr(&fpu_recursion_depth); +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.h b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.h new file mode 100644 index 000000000000..b8275b397920 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_FPU_H__ +#define __DC_FPU_H__ + +void dc_assert_fp_enabled(void); +void dc_fpu_begin(const char *function_name, const int line); +void dc_fpu_end(const char *function_name, const int line); + +#endif /* __DC_FPU_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index e133edc587d3..76ec8ec92efd 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@ -264,9 +264,9 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,  		if (pp_smu->set_hard_min_fclk_by_freq &&  				pp_smu->set_hard_min_dcfclk_by_freq &&  				pp_smu->set_min_deep_sleep_dcfclk) { -			pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000); -			pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000); -			pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000); +			pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->fclk_khz)); +			pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->dcfclk_khz)); +			pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->dcfclk_deep_sleep_khz));  		}  	} @@ -284,9 +284,9 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,  		if (pp_smu->set_hard_min_fclk_by_freq &&  				pp_smu->set_hard_min_dcfclk_by_freq &&  				pp_smu->set_min_deep_sleep_dcfclk) { -			pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000); -			pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000); -			pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000); +			pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->fclk_khz)); +			pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->dcfclk_khz)); +			pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->dcfclk_deep_sleep_khz));  		}  	}  } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index dbc7cde00433..fe18bb9e19aa 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -130,7 +130,7 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di  	actual_dispclk_set_mhz = rv1_vbios_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_SetDispclkFreq, -			requested_dispclk_khz / 1000); +			khz_to_mhz_ceil(requested_dispclk_khz));  	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {  		if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { @@ -150,7 +150,7 @@ int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)  	actual_dprefclk_set_mhz = rv1_vbios_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_SetDprefclkFreq, -			clk_mgr->base.dprefclk_khz / 1000); +			khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));  	/* TODO: add code for programing DP DTO, currently this is down by command table */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index a5331b96f551..0d01aa9f15a6 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -253,20 +253,20 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,  	if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {  		clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;  		if (pp_smu && pp_smu->set_hard_min_dcfclk_by_freq) -			pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.dcfclk_khz / 1000); +			pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz));  	}  	if (should_set_clock(safe_to_lower,  			new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {  		clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;  		if (pp_smu && pp_smu->set_min_deep_sleep_dcfclk) -			pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, clk_mgr_base->clks.dcfclk_deep_sleep_khz / 1000); +			pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz));  	}  	if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz)) {  		clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;  		if (pp_smu && pp_smu->set_hard_min_socclk_by_freq) -			pp_smu->set_hard_min_socclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.socclk_khz / 1000); +			pp_smu->set_hard_min_socclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(clk_mgr_base->clks.socclk_khz));  	}  	total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context); @@ -281,7 +281,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,  	if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {  		clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;  		if (pp_smu && pp_smu->set_hard_min_uclk_by_freq) -			pp_smu->set_hard_min_uclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.dramclk_khz / 1000); +			pp_smu->set_hard_min_uclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));  	}  	if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { @@ -306,7 +306,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,  		clk_mgr_base->clks.disp_dpp_voltage_level_khz = new_clocks->disp_dpp_voltage_level_khz;  		if (pp_smu && pp_smu->set_voltage_by_freq) -			pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.disp_dpp_voltage_level_khz / 1000); +			pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.disp_dpp_voltage_level_khz));  	}  	if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) { @@ -502,7 +502,7 @@ static void dcn2_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc  	if (max_phyclk_req != clk_mgr_base->clks.phyclk_khz) {  		clk_mgr_base->clks.phyclk_khz = max_phyclk_req; -		pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PHYCLK, clk_mgr_base->clks.phyclk_khz / 1000); +		pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PHYCLK, khz_to_mhz_ceil(clk_mgr_base->clks.phyclk_khz));  	}  } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index c6f494f0dcea..6185f9475fa2 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -66,9 +66,11 @@ int rn_get_active_display_cnt_wa(  	for (i = 0; i < context->stream_count; i++) {  		const struct dc_stream_state *stream = context->streams[i]; +		/* Extend the WA to DP for Linux*/  		if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||  				stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || -				stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) +				stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK || +				stream->signal == SIGNAL_TYPE_DISPLAY_PORT)  			tmds_present = true;  	} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index 7deeec9d1c7c..9f7eed6688c4 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -126,7 +126,7 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis  	actual_dispclk_set_mhz = rn_vbios_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_SetDispclkFreq, -			requested_dispclk_khz / 1000); +			khz_to_mhz_ceil(requested_dispclk_khz));  	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {  		if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { @@ -138,7 +138,7 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis  	// pmfw always set clock more than or equal requested clock  	if (!IS_DIAG_DC(dc->ctx->dce_environment)) -		ASSERT(actual_dispclk_set_mhz >= requested_dispclk_khz / 1000); +		ASSERT(actual_dispclk_set_mhz >= khz_to_mhz_ceil(requested_dispclk_khz));  	return actual_dispclk_set_mhz * 1000;  } @@ -150,7 +150,7 @@ int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)  	actual_dprefclk_set_mhz = rn_vbios_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_SetDprefclkFreq, -			clk_mgr->base.dprefclk_khz / 1000); +			khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));  	/* TODO: add code for programing DP DTO, currently this is down by command table */ @@ -167,7 +167,7 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque  	actual_dcfclk_set_mhz = rn_vbios_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_SetHardMinDcfclkByFreq, -			requested_dcfclk_khz / 1000); +			khz_to_mhz_ceil(requested_dcfclk_khz));  	return actual_dcfclk_set_mhz * 1000;  } @@ -182,7 +182,7 @@ int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int  	actual_min_ds_dcfclk_mhz = rn_vbios_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_SetMinDeepSleepDcfclk, -			requested_min_ds_dcfclk_khz / 1000); +			khz_to_mhz_ceil(requested_min_ds_dcfclk_khz));  	return actual_min_ds_dcfclk_mhz * 1000;  } @@ -192,7 +192,7 @@ void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phy  	rn_vbios_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_SetPhyclkVoltageByFreq, -			requested_phyclk_khz / 1000); +			khz_to_mhz_ceil(requested_phyclk_khz));  }  int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz) @@ -203,10 +203,10 @@ int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_  	actual_dppclk_set_mhz = rn_vbios_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_SetDppclkFreq, -			requested_dpp_khz / 1000); +			khz_to_mhz_ceil(requested_dpp_khz));  	if (!IS_DIAG_DC(dc->ctx->dce_environment)) -		ASSERT(actual_dppclk_set_mhz >= requested_dpp_khz / 1000); +		ASSERT(actual_dppclk_set_mhz >= khz_to_mhz_ceil(requested_dpp_khz));  	return actual_dppclk_set_mhz * 1000;  } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index af7004b770ae..1861a147a7fa 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -284,12 +284,12 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,  	if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {  		clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz; -		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCEFCLK, clk_mgr_base->clks.dcfclk_khz / 1000); +		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCEFCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz));  	}  	if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {  		clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; -		dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz / 1000); +		dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz));  	}  	if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz)) @@ -317,20 +317,20 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,  	/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */  	if (clk_mgr_base->clks.p_state_change_support &&  			(update_uclk || !clk_mgr_base->clks.prev_p_state_change_support)) -		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, clk_mgr_base->clks.dramclk_khz / 1000); +		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));  	if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {  		if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)  			dpp_clock_lowered = true;  		clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz; -		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_PIXCLK, clk_mgr_base->clks.dppclk_khz / 1000); +		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_PIXCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));  		update_dppclk = true;  	}  	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {  		clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; -		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000); +		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dispclk_khz));  		update_dispclk = true;  	} @@ -396,12 +396,17 @@ static void dcn3_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current_  	if (!clk_mgr->smu_present)  		return; -	if (current_mode) -		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, -				clk_mgr_base->clks.dramclk_khz / 1000); -	else +	if (current_mode) { +		if (clk_mgr_base->clks.p_state_change_support) +			dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, +					khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz)); +		else +			dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, +					clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz); +	} else {  		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,  				clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz); +	}  }  /* Set max memclk to highest DPM value */ @@ -489,7 +494,7 @@ static void dcn30_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct d  	if (max_phyclk_req != clk_mgr_base->clks.phyclk_khz) {  		clk_mgr_base->clks.phyclk_khz = max_phyclk_req; -		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_PHYCLK, clk_mgr_base->clks.phyclk_khz / 1000); +		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_PHYCLK, khz_to_mhz_ceil(clk_mgr_base->clks.phyclk_khz));  	}  } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c index 07774fa2c2cf..6ea642615854 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c @@ -133,7 +133,7 @@ int dcn301_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispc  	actual_dispclk_set_mhz = dcn301_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_SetDispclkFreq, -			requested_dispclk_khz / 1000); +			khz_to_mhz_ceil(requested_dispclk_khz));  	return actual_dispclk_set_mhz * 1000;  } @@ -147,7 +147,7 @@ int dcn301_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)  	actual_dprefclk_set_mhz = dcn301_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_SetDprefclkFreq, -			clk_mgr->base.dprefclk_khz / 1000); +			khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));  	/* TODO: add code for programing DP DTO, currently this is down by command table */ @@ -163,7 +163,7 @@ int dcn301_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request  	actual_dcfclk_set_mhz = dcn301_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_SetHardMinDcfclkByFreq, -			requested_dcfclk_khz / 1000); +			khz_to_mhz_ceil(requested_dcfclk_khz));  	return actual_dcfclk_set_mhz * 1000;  } @@ -177,7 +177,7 @@ int dcn301_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int r  	actual_min_ds_dcfclk_mhz = dcn301_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_SetMinDeepSleepDcfclk, -			requested_min_ds_dcfclk_khz / 1000); +			khz_to_mhz_ceil(requested_min_ds_dcfclk_khz));  	return actual_min_ds_dcfclk_mhz * 1000;  } @@ -191,7 +191,7 @@ int dcn301_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_kh  	actual_dppclk_set_mhz = dcn301_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_SetDppclkFreq, -			requested_dpp_khz / 1000); +			khz_to_mhz_ceil(requested_dpp_khz));  	return actual_dppclk_set_mhz * 1000;  } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index c636b589d69d..7046da14bb2a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -114,7 +114,7 @@ void vg_update_clocks(struct clk_mgr *clk_mgr_base,  			display_count = vg_get_active_display_cnt_wa(dc, context);  			/* if we can go lower, go lower */ -			if (display_count == 0) { +			if (display_count == 0 && !IS_DIAG_DC(dc->ctx->dce_environment)) {  				union display_idle_optimization_u idle_info = { 0 };  				idle_info.idle_info.df_request_disabled = 1; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index dad4a4c18bcf..8c2b77eb9459 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -147,7 +147,7 @@ int dcn31_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispcl  	actual_dispclk_set_mhz = dcn31_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_SetDispclkFreq, -			(requested_dispclk_khz + 999) / 1000); +			khz_to_mhz_ceil(requested_dispclk_khz));  	return actual_dispclk_set_mhz * 1000;  } @@ -162,7 +162,7 @@ int dcn31_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)  	actual_dprefclk_set_mhz = dcn31_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_SetDprefclkFreq, -			(clk_mgr->base.dprefclk_khz + 999) / 1000); +			khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));  	/* TODO: add code for programing DP DTO, currently this is down by command table */ @@ -182,7 +182,7 @@ int dcn31_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requeste  	actual_dcfclk_set_mhz = dcn31_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_SetHardMinDcfclkByFreq, -			(requested_dcfclk_khz + 999) / 1000); +			khz_to_mhz_ceil(requested_dcfclk_khz));  	return actual_dcfclk_set_mhz * 1000;  } @@ -200,7 +200,7 @@ int dcn31_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int re  	actual_min_ds_dcfclk_mhz = dcn31_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_SetMinDeepSleepDcfclk, -			(requested_min_ds_dcfclk_khz + 999) / 1000); +			khz_to_mhz_ceil(requested_min_ds_dcfclk_khz));  	return actual_min_ds_dcfclk_mhz * 1000;  } @@ -215,7 +215,7 @@ int dcn31_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz  	actual_dppclk_set_mhz = dcn31_smu_send_msg_with_param(  			clk_mgr,  			VBIOSSMC_MSG_SetDppclkFreq, -			(requested_dpp_khz + 999) / 1000); +			khz_to_mhz_ceil(requested_dpp_khz));  	return actual_dppclk_set_mhz * 1000;  } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 605e297b7a59..c798c65d4276 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1481,6 +1481,22 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,  	return true;  } +static inline bool should_update_pipe_for_stream( +		struct dc_state *context, +		struct pipe_ctx *pipe_ctx, +		struct dc_stream_state *stream) +{ +	return (pipe_ctx->stream && pipe_ctx->stream == stream); +} + +static inline bool should_update_pipe_for_plane( +		struct dc_state *context, +		struct pipe_ctx *pipe_ctx, +		struct dc_plane_state *plane_state) +{ +	return (pipe_ctx->plane_state == plane_state); +} +  void dc_enable_stereo(  	struct dc *dc,  	struct dc_state *context, @@ -1491,12 +1507,15 @@ void dc_enable_stereo(  	struct pipe_ctx *pipe;  	for (i = 0; i < MAX_PIPES; i++) { -		if (context != NULL) +		if (context != NULL) {  			pipe = &context->res_ctx.pipe_ctx[i]; -		else +		} else { +			context = dc->current_state;  			pipe = &dc->current_state->res_ctx.pipe_ctx[i]; -		for (j = 0 ; pipe && j < stream_count; j++)  { -			if (streams[j] && streams[j] == pipe->stream && +		} + +		for (j = 0; pipe && j < stream_count; j++)  { +			if (should_update_pipe_for_stream(context, pipe, streams[j]) &&  				dc->hwss.setup_stereo)  				dc->hwss.setup_stereo(pipe, dc);  		} @@ -1530,6 +1549,12 @@ void dc_z10_restore(struct dc *dc)  	if (dc->hwss.z10_restore)  		dc->hwss.z10_restore(dc);  } + +void dc_z10_save_init(struct dc *dc) +{ +	if (dc->hwss.z10_save_init) +		dc->hwss.z10_save_init(dc); +}  #endif  /*   * Applies given context to HW and copy it into current context. @@ -2623,6 +2648,7 @@ static void commit_planes_for_stream(struct dc *dc,  {  	int i, j;  	struct pipe_ctx *top_pipe_to_program = NULL; +	bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);  #if defined(CONFIG_DRM_AMD_DC_DCN)  	dc_z10_restore(dc); @@ -2694,7 +2720,7 @@ static void commit_planes_for_stream(struct dc *dc,  						top_pipe_to_program->stream_res.tg);  		} -	if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock) +	if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)  		dc->hwss.interdependent_update_lock(dc, context, true);  	else  		/* Lock the top pipe while updating plane addrs, since freesync requires @@ -2717,7 +2743,7 @@ static void commit_planes_for_stream(struct dc *dc,  		if (dc->hwss.program_front_end_for_ctx)  			dc->hwss.program_front_end_for_ctx(dc, context); -		if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock) +		if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)  			dc->hwss.interdependent_update_lock(dc, context, false);  		else  			dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); @@ -2733,14 +2759,14 @@ static void commit_planes_for_stream(struct dc *dc,  				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];  				if (!pipe_ctx->plane_state)  					continue; -				if (pipe_ctx->plane_state != plane_state) +				if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))  					continue; -				plane_state->triplebuffer_flips = false; +				pipe_ctx->plane_state->triplebuffer_flips = false;  				if (update_type == UPDATE_TYPE_FAST &&  					dc->hwss.program_triplebuffer != NULL && -					!plane_state->flip_immediate && dc->debug.enable_tri_buf) { +					!pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {  						/*triple buffer for VUpdate  only*/ -						plane_state->triplebuffer_flips = true; +						pipe_ctx->plane_state->triplebuffer_flips = true;  				}  			}  			if (update_type == UPDATE_TYPE_FULL) { @@ -2756,8 +2782,7 @@ static void commit_planes_for_stream(struct dc *dc,  		if (!pipe_ctx->top_pipe &&  			!pipe_ctx->prev_odm_pipe && -			pipe_ctx->stream && -			pipe_ctx->stream == stream) { +			should_update_pipe_for_stream(context, pipe_ctx, stream)) {  			struct dc_stream_status *stream_status = NULL;  			if (!pipe_ctx->plane_state) @@ -2810,15 +2835,15 @@ static void commit_planes_for_stream(struct dc *dc,  				for (j = 0; j < dc->res_pool->pipe_count; j++) {  					struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; -					if (pipe_ctx->stream != stream) +					if (!should_update_pipe_for_stream(context, pipe_ctx, stream))  						continue; -					if (pipe_ctx->plane_state != plane_state) +					if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))  						continue;  					// GSL has to be used for flip immediate  					dc->hwss.set_flip_control_gsl(pipe_ctx, -							plane_state->flip_immediate); +							pipe_ctx->plane_state->flip_immediate);  				}  			} @@ -2829,25 +2854,26 @@ static void commit_planes_for_stream(struct dc *dc,  			for (j = 0; j < dc->res_pool->pipe_count; j++) {  				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; -				if (pipe_ctx->stream != stream) +				if (!should_update_pipe_for_stream(context, pipe_ctx, stream))  					continue; -				if (pipe_ctx->plane_state != plane_state) +				if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))  					continue; +  				/*program triple buffer after lock based on flip type*/  				if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {  					/*only enable triplebuffer for  fast_update*/  					dc->hwss.program_triplebuffer( -						dc, pipe_ctx, plane_state->triplebuffer_flips); +						dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);  				} -				if (srf_updates[i].flip_addr) +				if (pipe_ctx->plane_state->update_flags.bits.addr_update)  					dc->hwss.update_plane_addr(dc, pipe_ctx);  			}  		}  	} -	if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock) +	if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)  		dc->hwss.interdependent_update_lock(dc, context, false);  	else  		dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); @@ -2891,7 +2917,7 @@ static void commit_planes_for_stream(struct dc *dc,  			continue;  		if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe || -				!pipe_ctx->stream || pipe_ctx->stream != stream || +				!pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||  				!pipe_ctx->plane_state->update_flags.bits.addr_update ||  				pipe_ctx->plane_state->skip_manual_trigger)  			continue; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 6132b645bfd1..8bd7f42a8053 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -541,6 +541,7 @@ static void link_disconnect_sink(struct dc_link *link)  	}  	link->dpcd_sink_count = 0; +	//link->dpcd_caps.dpcd_rev.raw = 0;  }  static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *link) @@ -742,6 +743,7 @@ static bool detect_dp(struct dc_link *link,  								sink_caps,  								audio_support);  		link->dpcd_caps.dongle_type = sink_caps->dongle_type; +		link->dpcd_caps.dpcd_rev.raw = 0;  	}  	return true; @@ -1663,6 +1665,12 @@ struct dc_link *link_create(const struct link_init_data *init_params)  	if (false == dc_link_construct(link, init_params))  		goto construct_fail; +	/* +	 * Must use preferred_link_setting, not reported_link_cap or verified_link_cap, +	 * since struct preferred_link_setting won't be reset after S3. +	 */ +	link->preferred_link_setting.dpcd_source_device_specific_field_support = true; +  	return link;  construct_fail: @@ -3509,61 +3517,6 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)  	}  } -uint32_t dc_bandwidth_in_kbps_from_timing( -	const struct dc_crtc_timing *timing) -{ -	uint32_t bits_per_channel = 0; -	uint32_t kbps; - -#if defined(CONFIG_DRM_AMD_DC_DCN) -	if (timing->flags.DSC) -		return dc_dsc_stream_bandwidth_in_kbps(timing, -				timing->dsc_cfg.bits_per_pixel, -				timing->dsc_cfg.num_slices_h, -				timing->dsc_cfg.is_dp); -#endif - -	switch (timing->display_color_depth) { -	case COLOR_DEPTH_666: -		bits_per_channel = 6; -		break; -	case COLOR_DEPTH_888: -		bits_per_channel = 8; -		break; -	case COLOR_DEPTH_101010: -		bits_per_channel = 10; -		break; -	case COLOR_DEPTH_121212: -		bits_per_channel = 12; -		break; -	case COLOR_DEPTH_141414: -		bits_per_channel = 14; -		break; -	case COLOR_DEPTH_161616: -		bits_per_channel = 16; -		break; -	default: -		ASSERT(bits_per_channel != 0); -		bits_per_channel = 8; -		break; -	} - -	kbps = timing->pix_clk_100hz / 10; -	kbps *= bits_per_channel; - -	if (timing->flags.Y_ONLY != 1) { -		/*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/ -		kbps *= 3; -		if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) -			kbps /= 2; -		else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) -			kbps = kbps * 2 / 3; -	} - -	return kbps; - -} -  void dc_link_set_drive_settings(struct dc *dc,  				struct link_training_settings *lt_settings,  				const struct dc_link *link) @@ -3769,3 +3722,58 @@ bool dc_link_should_enable_fec(const struct dc_link *link)  	return ret;  } + +uint32_t dc_bandwidth_in_kbps_from_timing( +		const struct dc_crtc_timing *timing) +{ +	uint32_t bits_per_channel = 0; +	uint32_t kbps; + +#if defined(CONFIG_DRM_AMD_DC_DCN) +	if (timing->flags.DSC) +		return dc_dsc_stream_bandwidth_in_kbps(timing, +				timing->dsc_cfg.bits_per_pixel, +				timing->dsc_cfg.num_slices_h, +				timing->dsc_cfg.is_dp); +#endif + +	switch (timing->display_color_depth) { +	case COLOR_DEPTH_666: +		bits_per_channel = 6; +		break; +	case COLOR_DEPTH_888: +		bits_per_channel = 8; +		break; +	case COLOR_DEPTH_101010: +		bits_per_channel = 10; +		break; +	case COLOR_DEPTH_121212: +		bits_per_channel = 12; +		break; +	case COLOR_DEPTH_141414: +		bits_per_channel = 14; +		break; +	case COLOR_DEPTH_161616: +		bits_per_channel = 16; +		break; +	default: +		ASSERT(bits_per_channel != 0); +		bits_per_channel = 8; +		break; +	} + +	kbps = timing->pix_clk_100hz / 10; +	kbps *= bits_per_channel; + +	if (timing->flags.Y_ONLY != 1) { +		/*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/ +		kbps *= 3; +		if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) +			kbps /= 2; +		else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) +			kbps = kbps * 2 / 3; +	} + +	return kbps; + +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 9fb8c46dc606..cd025c12f17b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1241,29 +1241,15 @@ enum link_training_result dp_check_link_loss_status(  static inline void decide_8b_10b_training_settings(  	 struct dc_link *link,  	const struct dc_link_settings *link_setting, -	const struct dc_link_training_overrides *overrides,  	struct link_training_settings *lt_settings)  { -	uint32_t lane; -  	memset(lt_settings, '\0', sizeof(struct link_training_settings));  	/* Initialize link settings */  	lt_settings->link_settings.use_link_rate_set = link_setting->use_link_rate_set;  	lt_settings->link_settings.link_rate_set = link_setting->link_rate_set; - -	if (link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) -		lt_settings->link_settings.link_rate = link->preferred_link_setting.link_rate; -	else -		lt_settings->link_settings.link_rate = link_setting->link_rate; - -	if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN) -		lt_settings->link_settings.lane_count = link->preferred_link_setting.lane_count; -	else -		lt_settings->link_settings.lane_count = link_setting->lane_count; - -	/*@todo[vdevulap] move SS to LS, should not be handled by displaypath*/ - +	lt_settings->link_settings.link_rate = link_setting->link_rate; +	lt_settings->link_settings.lane_count = link_setting->lane_count;  	/* TODO hard coded to SS for now  	 * lt_settings.link_settings.link_spread =  	 * dal_display_path_is_ss_supported( @@ -1271,30 +1257,52 @@ static inline void decide_8b_10b_training_settings(  	 * LINK_SPREAD_05_DOWNSPREAD_30KHZ :  	 * LINK_SPREAD_DISABLED;  	 */ -	/* Initialize link spread */ -	if (link->dp_ss_off) -		lt_settings->link_settings.link_spread = LINK_SPREAD_DISABLED; -	else if (overrides->downspread != NULL) -		lt_settings->link_settings.link_spread -			= *overrides->downspread -			? LINK_SPREAD_05_DOWNSPREAD_30KHZ -			: LINK_SPREAD_DISABLED; -	else -		lt_settings->link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ; - +	lt_settings->link_settings.link_spread = link->dp_ss_off ? +			LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ;  	lt_settings->lttpr_mode = link->lttpr_mode; +	lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting); +	lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting); +	lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting); +	lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting); +	lt_settings->enhanced_framing = 1; +	lt_settings->should_set_fec_ready = true; +} -	/* Initialize lane settings overrides */ +void dp_decide_training_settings( +		struct dc_link *link, +		const struct dc_link_settings *link_settings, +		struct link_training_settings *lt_settings) +{ +	if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) +		decide_8b_10b_training_settings(link, link_settings, lt_settings); +} + +static void override_training_settings( +		struct dc_link *link, +		const struct dc_link_training_overrides *overrides, +		struct link_training_settings *lt_settings) +{ +	uint32_t lane; + +	/* Override link settings */ +	if (link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) +		lt_settings->link_settings.link_rate = link->preferred_link_setting.link_rate; +	if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN) +		lt_settings->link_settings.lane_count = link->preferred_link_setting.lane_count; + +	/* Override link spread */ +	if (!link->dp_ss_off && overrides->downspread != NULL) +		lt_settings->link_settings.link_spread = *overrides->downspread ? +				LINK_SPREAD_05_DOWNSPREAD_30KHZ +				: LINK_SPREAD_DISABLED; + +	/* Override lane settings */  	if (overrides->voltage_swing != NULL)  		lt_settings->voltage_swing = overrides->voltage_swing; -  	if (overrides->pre_emphasis != NULL)  		lt_settings->pre_emphasis = overrides->pre_emphasis; -  	if (overrides->post_cursor2 != NULL)  		lt_settings->post_cursor2 = overrides->post_cursor2; - -	/* Initialize lane settings (VS/PE/PC2) */  	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {  		lt_settings->lane_settings[lane].VOLTAGE_SWING =  			lt_settings->voltage_swing != NULL ? @@ -1313,45 +1321,22 @@ static inline void decide_8b_10b_training_settings(  	/* Initialize training timings */  	if (overrides->cr_pattern_time != NULL)  		lt_settings->cr_pattern_time = *overrides->cr_pattern_time; -	else -		lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting);  	if (overrides->eq_pattern_time != NULL)  		lt_settings->eq_pattern_time = *overrides->eq_pattern_time; -	else -		lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);  	if (overrides->pattern_for_cr != NULL)  		lt_settings->pattern_for_cr = *overrides->pattern_for_cr; -	else -		lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);  	if (overrides->pattern_for_eq != NULL)  		lt_settings->pattern_for_eq = *overrides->pattern_for_eq; -	else -		lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);  	if (overrides->enhanced_framing != NULL)  		lt_settings->enhanced_framing = *overrides->enhanced_framing; -	else -		lt_settings->enhanced_framing = 1;  	if (link->preferred_training_settings.fec_enable != NULL)  		lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable; -	else -		lt_settings->should_set_fec_ready = true;  } -void dp_decide_training_settings( -		struct dc_link *link, -		const struct dc_link_settings *link_settings, -		const struct dc_link_training_overrides *overrides, -		struct link_training_settings *lt_settings) -{ -	if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) -		decide_8b_10b_training_settings(link, link_settings, overrides, lt_settings); -} - -  uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count)  {  	switch (lttpr_repeater_count) { @@ -1581,6 +1566,9 @@ bool dc_link_dp_perform_link_training_skip_aux(  	dp_decide_training_settings(  			link,  			link_setting, +			<_settings); +	override_training_settings( +			link,  			&link->preferred_training_settings,  			<_settings); @@ -1727,6 +1715,9 @@ enum link_training_result dc_link_dp_perform_link_training(  	dp_decide_training_settings(  			link,  			link_settings, +			<_settings); +	override_training_settings( +			link,  			&link->preferred_training_settings,  			<_settings); @@ -1939,11 +1930,13 @@ enum link_training_result dc_link_dp_sync_lt_attempt(  	bool fec_enable = false;  	dp_decide_training_settings( -		link, -		link_settings, -		lt_overrides, -		<_settings); - +			link, +			link_settings, +			<_settings); +	override_training_settings( +			link, +			lt_overrides, +			<_settings);  	/* Setup MST Mode */  	if (lt_overrides->mst_enable)  		set_dp_mst_mode(link, *lt_overrides->mst_enable); @@ -3602,29 +3595,12 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)  bool dp_retrieve_lttpr_cap(struct dc_link *link)  {  	uint8_t lttpr_dpcd_data[6]; -	bool vbios_lttpr_enable = false; -	bool vbios_lttpr_interop = false; -	struct dc_bios *bios = link->dc->ctx->dc_bios; +	bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable; +	bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;  	enum dc_status status = DC_ERROR_UNEXPECTED;  	bool is_lttpr_present = false;  	memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data)); -	/* Query BIOS to determine if LTTPR functionality is forced on by system */ -	if (bios->funcs->get_lttpr_caps) { -		enum bp_result bp_query_result; -		uint8_t is_vbios_lttpr_enable = 0; - -		bp_query_result = bios->funcs->get_lttpr_caps(bios, &is_vbios_lttpr_enable); -		vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; -	} - -	if (bios->funcs->get_lttpr_interop) { -		enum bp_result bp_query_result; -		uint8_t is_vbios_interop_enabled = 0; - -		bp_query_result = bios->funcs->get_lttpr_interop(bios, &is_vbios_interop_enabled); -		vbios_lttpr_interop = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; -	}  	/*  	 * Logic to determine LTTPR mode @@ -4793,10 +4769,18 @@ void dpcd_set_source_specific_data(struct dc_link *link)  			uint8_t hblank_size = (uint8_t)link->dc->caps.min_horizontal_blanking_period; -			result_write_min_hblank = core_link_write_dpcd(link, -				DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size), -				sizeof(hblank_size)); +			if (link->preferred_link_setting.dpcd_source_device_specific_field_support) { +				result_write_min_hblank = core_link_write_dpcd(link, +					DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size), +					sizeof(hblank_size)); + +				if (result_write_min_hblank == DC_ERROR_UNEXPECTED) +					link->preferred_link_setting.dpcd_source_device_specific_field_support = false; +			} else { +				DC_LOG_DC("Sink device does not support 00340h DPCD write. Skipping on purpose.\n"); +			}  		} +  		DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,  							WPP_BIT_FLAG_DC_DETECTION_DP_CAPS,  							"result=%u link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x branch_dev_name='%c%c%c%c%c%c'", diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c index fe234760a0f5..72970e49800a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c @@ -1,3 +1,28 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +  #include <inc/core_status.h>  #include <dc_link.h>  #include <inc/link_hwss.h> diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index 1a89d565c92e..de80a9ea4cfa 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -305,7 +305,7 @@ struct link_encoder *link_enc_cfg_get_next_avail_link_enc(  	const struct dc_state *state)  {  	struct link_encoder *link_enc = NULL; -	enum engine_id eng_id = ENGINE_ID_UNKNOWN; +	enum engine_id eng_id;  	eng_id = find_first_avail_link_enc(dc->ctx, state);  	if (eng_id != ENGINE_ID_UNKNOWN) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 1596f6b7fed7..a60396d5be44 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1030,7 +1030,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)  	/* Timing borders are part of vactive that we are also supposed to skip in addition  	 * to any stream dst offset. Since dm logic assumes dst is in addressable -	 * space we need to add the the left and top borders to dst offsets temporarily. +	 * space we need to add the left and top borders to dst offsets temporarily.  	 * TODO: fix in DM, stream dst is supposed to be in vactive  	 */  	pipe_ctx->stream->dst.x += timing->h_border_left; @@ -1051,6 +1051,11 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)  	/* depends on scaling ratios and recout, does not calculate offset yet */  	calculate_viewport_size(pipe_ctx); +	/* Stopgap for validation of ODM + MPO on one side of screen case */ +	if (pipe_ctx->plane_res.scl_data.viewport.height < 1 || +			pipe_ctx->plane_res.scl_data.viewport.width < 1) +		return false; +  	/*  	 * LB calculations depend on vp size, h/v_active and scaling ratios  	 * Setting line buffer pixel depth to 24bpp yields banding diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c index 31761f3595a6..28ef9760fa34 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c @@ -62,3 +62,27 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification  	status = dmub_srv_stat_get_notification(dmub, notify);  	ASSERT(status == DMUB_STATUS_OK);  } + +/** + ***************************************************************************** + *  Function: dc_stat_get_dmub_dataout + * + *  @brief + *		Calls dmub layer to retrieve dmub gpint dataout + * + *  @param + *		[in] dc: dc structure + *		[in] dataout: dmub gpint dataout + * + *  @return + *     None + ***************************************************************************** + */ +void dc_stat_get_dmub_dataout(const struct dc *dc, uint32_t *dataout) +{ +	struct dmub_srv *dmub = dc->ctx->dmub_srv->dmub; +	enum dmub_status status; + +	status = dmub_srv_get_gpint_dataout(dmub, dataout); +	ASSERT(status == DMUB_STATUS_OK); +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 45931ee14a6e..f0f54f4d3d9b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -221,6 +221,9 @@ struct dc_stream_status *dc_stream_get_status_from_state(  {  	uint8_t i; +	if (state == NULL) +		return NULL; +  	for (i = 0; i < state->stream_count; i++) {  		if (stream == state->streams[i])  			return &state->stream_status[i]; @@ -243,6 +246,40 @@ struct dc_stream_status *dc_stream_get_status(  	return dc_stream_get_status_from_state(dc->current_state, stream);  } +static void program_cursor_attributes( +	struct dc *dc, +	struct dc_stream_state *stream, +	const struct dc_cursor_attributes *attributes) +{ +	int i; +	struct resource_context *res_ctx; +	struct pipe_ctx *pipe_to_program = NULL; + +	if (!stream) +		return; + +	res_ctx = &dc->current_state->res_ctx; + +	for (i = 0; i < MAX_PIPES; i++) { +		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; + +		if (pipe_ctx->stream != stream) +			continue; + +		if (!pipe_to_program) { +			pipe_to_program = pipe_ctx; +			dc->hwss.cursor_lock(dc, pipe_to_program, true); +		} + +		dc->hwss.set_cursor_attribute(pipe_ctx); +		if (dc->hwss.set_cursor_sdr_white_level) +			dc->hwss.set_cursor_sdr_white_level(pipe_ctx); +	} + +	if (pipe_to_program) +		dc->hwss.cursor_lock(dc, pipe_to_program, false); +} +  #ifndef TRIM_FSFT  /*   * dc_optimize_timing_for_fsft() - dc to optimize timing @@ -267,10 +304,7 @@ bool dc_stream_set_cursor_attributes(  	struct dc_stream_state *stream,  	const struct dc_cursor_attributes *attributes)  { -	int i;  	struct dc  *dc; -	struct resource_context *res_ctx; -	struct pipe_ctx *pipe_to_program = NULL;  #if defined(CONFIG_DRM_AMD_DC_DCN)  	bool reset_idle_optimizations = false;  #endif @@ -290,7 +324,6 @@ bool dc_stream_set_cursor_attributes(  	}  	dc = stream->ctx->dc; -	res_ctx = &dc->current_state->res_ctx;  	stream->cursor_attributes = *attributes;  #if defined(CONFIG_DRM_AMD_DC_DCN) @@ -302,11 +335,39 @@ bool dc_stream_set_cursor_attributes(  	}  #endif +	program_cursor_attributes(dc, stream, attributes); + +#if defined(CONFIG_DRM_AMD_DC_DCN) +	/* re-enable idle optimizations if necessary */ +	if (reset_idle_optimizations) +		dc_allow_idle_optimizations(dc, true); + +#endif +	return true; +} + +static void program_cursor_position( +	struct dc *dc, +	struct dc_stream_state *stream, +	const struct dc_cursor_position *position) +{ +	int i; +	struct resource_context *res_ctx; +	struct pipe_ctx *pipe_to_program = NULL; + +	if (!stream) +		return; + +	res_ctx = &dc->current_state->res_ctx;  	for (i = 0; i < MAX_PIPES; i++) {  		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; -		if (pipe_ctx->stream != stream) +		if (pipe_ctx->stream != stream || +				(!pipe_ctx->plane_res.mi  && !pipe_ctx->plane_res.hubp) || +				!pipe_ctx->plane_state || +				(!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) || +				(!pipe_ctx->plane_res.ipp && !pipe_ctx->plane_res.dpp))  			continue;  		if (!pipe_to_program) { @@ -314,31 +375,18 @@ bool dc_stream_set_cursor_attributes(  			dc->hwss.cursor_lock(dc, pipe_to_program, true);  		} -		dc->hwss.set_cursor_attribute(pipe_ctx); -		if (dc->hwss.set_cursor_sdr_white_level) -			dc->hwss.set_cursor_sdr_white_level(pipe_ctx); +		dc->hwss.set_cursor_position(pipe_ctx);  	}  	if (pipe_to_program)  		dc->hwss.cursor_lock(dc, pipe_to_program, false); - -#if defined(CONFIG_DRM_AMD_DC_DCN) -	/* re-enable idle optimizations if necessary */ -	if (reset_idle_optimizations) -		dc_allow_idle_optimizations(dc, true); - -#endif -	return true;  }  bool dc_stream_set_cursor_position(  	struct dc_stream_state *stream,  	const struct dc_cursor_position *position)  { -	int i;  	struct dc  *dc; -	struct resource_context *res_ctx; -	struct pipe_ctx *pipe_to_program = NULL;  #if defined(CONFIG_DRM_AMD_DC_DCN)  	bool reset_idle_optimizations = false;  #endif @@ -354,7 +402,6 @@ bool dc_stream_set_cursor_position(  	}  	dc = stream->ctx->dc; -	res_ctx = &dc->current_state->res_ctx;  #if defined(CONFIG_DRM_AMD_DC_DCN)  	dc_z10_restore(dc); @@ -367,27 +414,7 @@ bool dc_stream_set_cursor_position(  #endif  	stream->cursor_position = *position; -	for (i = 0; i < MAX_PIPES; i++) { -		struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; - -		if (pipe_ctx->stream != stream || -				(!pipe_ctx->plane_res.mi  && !pipe_ctx->plane_res.hubp) || -				!pipe_ctx->plane_state || -				(!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) || -				(!pipe_ctx->plane_res.ipp && !pipe_ctx->plane_res.dpp)) -			continue; - -		if (!pipe_to_program) { -			pipe_to_program = pipe_ctx; -			dc->hwss.cursor_lock(dc, pipe_to_program, true); -		} - -		dc->hwss.set_cursor_position(pipe_ctx); -	} - -	if (pipe_to_program) -		dc->hwss.cursor_lock(dc, pipe_to_program, false); - +	program_cursor_position(dc, stream, position);  #if defined(CONFIG_DRM_AMD_DC_DCN)  	/* re-enable idle optimizations if necessary */  	if (reset_idle_optimizations) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c index f2b39ec35c89..cde8ed2560b3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c @@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c  		 */  		memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));  		dc->vm_pa_config.valid = true; +#if defined(CONFIG_DRM_AMD_DC_DCN) +		dc_z10_save_init(dc); +#endif  	}  	return num_vmids; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 8dcea8ff5c5a..3ab52d9a82cf 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -45,7 +45,7 @@  /* forward declaration */  struct aux_payload; -#define DC_VER "3.2.141" +#define DC_VER "3.2.149"  #define MAX_SURFACES 3  #define MAX_PLANES 6 @@ -183,6 +183,8 @@ struct dc_caps {  	unsigned int cursor_cache_size;  	struct dc_plane_cap planes[MAX_PLANES];  	struct dc_color_caps color; +	bool vbios_lttpr_aware; +	bool vbios_lttpr_enable;  };  struct dc_bug_wa { @@ -458,7 +460,65 @@ union mem_low_power_enable_options {  	uint32_t u32All;  }; +struct dc_debug_data { +	uint32_t ltFailCount; +	uint32_t i2cErrorCount; +	uint32_t auxErrorCount; +}; + +struct dc_phy_addr_space_config { +	struct { +		uint64_t start_addr; +		uint64_t end_addr; +		uint64_t fb_top; +		uint64_t fb_offset; +		uint64_t fb_base; +		uint64_t agp_top; +		uint64_t agp_bot; +		uint64_t agp_base; +	} system_aperture; + +	struct { +		uint64_t page_table_start_addr; +		uint64_t page_table_end_addr; +		uint64_t page_table_base_addr; +		bool base_addr_is_mc_addr; +	} gart_config; + +	bool valid; +	bool is_hvm_enabled; +	uint64_t page_table_default_page_addr; +}; + +struct dc_virtual_addr_space_config { +	uint64_t	page_table_base_addr; +	uint64_t	page_table_start_addr; +	uint64_t	page_table_end_addr; +	uint32_t	page_table_block_size_in_bytes; +	uint8_t		page_table_depth; // 1 = 1 level, 2 = 2 level, etc.  0 = invalid +}; + +struct dc_bounding_box_overrides { +	int sr_exit_time_ns; +	int sr_enter_plus_exit_time_ns; +	int urgent_latency_ns; +	int percent_of_ideal_drambw; +	int dram_clock_change_latency_ns; +	int dummy_clock_change_latency_ns; +	/* This forces a hard min on the DCFCLK we use +	 * for DML.  Unlike the debug option for forcing +	 * DCFCLK, this override affects watermark calculations +	 */ +	int min_dcfclk_mhz; +}; + +struct dc_state; +struct resource_pool; +struct dce_hwseq; +  struct dc_debug_options { +	bool native422_support; +	bool disable_dsc;  	enum visual_confirm visual_confirm;  	bool sanity_checks;  	bool max_disp_clk; @@ -484,7 +544,6 @@ struct dc_debug_options {  	bool disable_dsc_power_gate;  	int dsc_min_slice_height_override;  	int dsc_bpp_increment_div; -	bool native422_support;  	bool disable_pplib_wm_range;  	enum wm_report_mode pplib_wm_report_mode;  	unsigned int min_disp_clk_khz; @@ -554,7 +613,6 @@ struct dc_debug_options {  	bool validate_dml_output;  	bool enable_dmcub_surface_flip;  	bool usbc_combo_phy_reset_wa; -	bool disable_dsc;  	bool enable_dram_clock_change_one_display_vactive;  	union mem_low_power_enable_options enable_mem_low_power;  	bool force_vblank_alignment; @@ -572,69 +630,13 @@ struct dc_debug_options {  #endif  }; -struct dc_debug_data { -	uint32_t ltFailCount; -	uint32_t i2cErrorCount; -	uint32_t auxErrorCount; -}; - -struct dc_phy_addr_space_config { -	struct { -		uint64_t start_addr; -		uint64_t end_addr; -		uint64_t fb_top; -		uint64_t fb_offset; -		uint64_t fb_base; -		uint64_t agp_top; -		uint64_t agp_bot; -		uint64_t agp_base; -	} system_aperture; - -	struct { -		uint64_t page_table_start_addr; -		uint64_t page_table_end_addr; -		uint64_t page_table_base_addr; -#if defined(CONFIG_DRM_AMD_DC_DCN) -		bool base_addr_is_mc_addr; -#endif -	} gart_config; - -	bool valid; -	bool is_hvm_enabled; -	uint64_t page_table_default_page_addr; -}; - -struct dc_virtual_addr_space_config { -	uint64_t	page_table_base_addr; -	uint64_t	page_table_start_addr; -	uint64_t	page_table_end_addr; -	uint32_t	page_table_block_size_in_bytes; -	uint8_t		page_table_depth; // 1 = 1 level, 2 = 2 level, etc.  0 = invalid -}; - -struct dc_bounding_box_overrides { -	int sr_exit_time_ns; -	int sr_enter_plus_exit_time_ns; -	int urgent_latency_ns; -	int percent_of_ideal_drambw; -	int dram_clock_change_latency_ns; -	int dummy_clock_change_latency_ns; -	/* This forces a hard min on the DCFCLK we use -	 * for DML.  Unlike the debug option for forcing -	 * DCFCLK, this override affects watermark calculations -	 */ -	int min_dcfclk_mhz; -}; - -struct resource_pool; -struct dce_hwseq;  struct gpu_info_soc_bounding_box_v1_0;  struct dc { +	struct dc_debug_options debug;  	struct dc_versions versions;  	struct dc_caps caps;  	struct dc_cap_funcs cap_funcs;  	struct dc_config config; -	struct dc_debug_options debug;  	struct dc_bounding_box_overrides bb_overrides;  	struct dc_bug_wa work_arounds;  	struct dc_context *ctx; @@ -1336,6 +1338,7 @@ void dc_hardware_release(struct dc *dc);  bool dc_set_psr_allow_active(struct dc *dc, bool enable);  #if defined(CONFIG_DRM_AMD_DC_DCN)  void dc_z10_restore(struct dc *dc); +void dc_z10_save_init(struct dc *dc);  #endif  bool dc_enable_dmub_notifications(struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 1948cd9427d7..4f54bde1bb1c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -109,6 +109,7 @@ struct dc_link_settings {  	enum dc_link_spread link_spread;  	bool use_link_rate_set;  	uint8_t link_rate_set; +	bool dpcd_source_device_specific_field_support;  };  struct dc_lane_settings { diff --git a/drivers/gpu/drm/amd/display/dc/dc_stat.h b/drivers/gpu/drm/amd/display/dc/dc_stat.h index 2a000ba54ddb..aacbfd786c6c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stat.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stat.h @@ -38,5 +38,6 @@  #include "dmub/dmub_srv.h"  void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification *notify); +void dc_stat_get_dmub_dataout(const struct dc *dc, uint32_t *dataout);  #endif /* _DC_STAT_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_trace.h b/drivers/gpu/drm/amd/display/dc/dc_trace.h index d2615357269b..c711797e5c9e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_trace.h +++ b/drivers/gpu/drm/amd/display/dc/dc_trace.h @@ -37,3 +37,6 @@  #define TRACE_DCN_CLOCK_STATE(dcn_clocks) \  	trace_amdgpu_dm_dc_clocks_state(dcn_clocks) + +#define TRACE_DCN_FPU(begin, function, line, ref_count) \ +	trace_dcn_fpu(begin, function, line, ref_count) diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 8016e22114ce..c1532930169b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -75,18 +75,6 @@ enum dce_environment {  #define IS_DIAG_DC(dce_environment) \  	(IS_FPGA_MAXIMUS_DC(dce_environment) || (dce_environment == DCE_ENV_DIAG)) -struct hw_asic_id { -	uint32_t chip_id; -	uint32_t chip_family; -	uint32_t pci_revision_id; -	uint32_t hw_internal_rev; -	uint32_t vram_type; -	uint32_t vram_width; -	uint32_t feature_flags; -	uint32_t fake_paths_num; -	void *atombios_base_address; -}; -  struct dc_perf_trace {  	unsigned long read_count;  	unsigned long write_count; @@ -94,36 +82,7 @@ struct dc_perf_trace {  	unsigned long last_entry_write;  }; -struct dc_context { -	struct dc *dc; - -	void *driver_context; /* e.g. amdgpu_device */ -	struct dc_perf_trace *perf_trace; -	void *cgs_device; - -	enum dce_environment dce_environment; -	struct hw_asic_id asic_id; - -	/* todo: below should probably move to dc.  to facilitate removal -	 * of AS we will store these here -	 */ -	enum dce_version dce_version; -	struct dc_bios *dc_bios; -	bool created_bios; -	struct gpio_service *gpio_service; -	uint32_t dc_sink_id_count; -	uint32_t dc_stream_id_count; -	uint32_t dc_edp_id_count; -	uint64_t fbc_gpu_addr; -	struct dc_dmub_srv *dmub_srv; - -#ifdef CONFIG_DRM_AMD_DC_HDCP -	struct cp_psp cp_psp; -#endif -}; - - -#define DC_MAX_EDID_BUFFER_SIZE 1280 +#define DC_MAX_EDID_BUFFER_SIZE 2048  #define DC_EDID_BLOCK_SIZE 128  #define MAX_SURFACE_NUM 4  #define NUM_PIXEL_FORMATS 10 @@ -836,6 +795,46 @@ struct dc_clock_config {  	uint32_t current_clock_khz;/*current clock in use*/  }; +struct hw_asic_id { +	uint32_t chip_id; +	uint32_t chip_family; +	uint32_t pci_revision_id; +	uint32_t hw_internal_rev; +	uint32_t vram_type; +	uint32_t vram_width; +	uint32_t feature_flags; +	uint32_t fake_paths_num; +	void *atombios_base_address; +}; + +struct dc_context { +	struct dc *dc; + +	void *driver_context; /* e.g. amdgpu_device */ +	struct dc_perf_trace *perf_trace; +	void *cgs_device; + +	enum dce_environment dce_environment; +	struct hw_asic_id asic_id; + +	/* todo: below should probably move to dc.  to facilitate removal +	 * of AS we will store these here +	 */ +	enum dce_version dce_version; +	struct dc_bios *dc_bios; +	bool created_bios; +	struct gpio_service *gpio_service; +	uint32_t dc_sink_id_count; +	uint32_t dc_stream_id_count; +	uint32_t dc_edp_id_count; +	uint64_t fbc_gpu_addr; +	struct dc_dmub_srv *dmub_srv; +#ifdef CONFIG_DRM_AMD_DC_HDCP +	struct cp_psp cp_psp; +#endif + +}; +  /* DSC DPCD capabilities */  union dsc_slice_caps1 {  	struct { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index 2fb88e54a4bf..e14f99b4b0c3 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -42,6 +42,11 @@  #define DC_LOGGER \  	engine->ctx->logger +#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ +#define IS_DC_I2CAUX_LOGGING_ENABLED() (false) +#define LOG_FLAG_Error_I2cAux LOG_ERROR +#define LOG_FLAG_I2cAux_DceAux LOG_I2C_AUX +  #include "reg_helper.h"  #undef FN @@ -71,6 +76,8 @@ enum {  #define DEFAULT_AUX_ENGINE_MULT   0  #define DEFAULT_AUX_ENGINE_LENGTH 69 +#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ +  static void release_engine(  	struct dce_aux *engine)  { @@ -621,6 +628,58 @@ int dce_aux_transfer_dmub_raw(struct ddc_service *ddc,  #define AUX_MAX_INVALID_REPLY_RETRIES 2  #define AUX_MAX_TIMEOUT_RETRIES 3 +static void dce_aux_log_payload(const char *payload_name, +	unsigned char *payload, uint32_t length, uint32_t max_length_to_log) +{ +	if (!IS_DC_I2CAUX_LOGGING_ENABLED()) +		return; + +	if (payload && length) { +		char hex_str[128] = {0}; +		char *hex_str_ptr = &hex_str[0]; +		uint32_t hex_str_remaining = sizeof(hex_str); +		unsigned char *payload_ptr = payload; +		unsigned char *payload_max_to_log_ptr = payload_ptr + min(max_length_to_log, length); +		unsigned int count; +		char *padding = ""; + +		while (payload_ptr < payload_max_to_log_ptr) { +			count = snprintf_count(hex_str_ptr, hex_str_remaining, "%s%02X", padding, *payload_ptr); +			padding = " "; +			hex_str_remaining -= count; +			hex_str_ptr += count; +			payload_ptr++; +		} + +		count = snprintf_count(hex_str_ptr, hex_str_remaining, "   "); +		hex_str_remaining -= count; +		hex_str_ptr += count; + +		payload_ptr = payload; +		while (payload_ptr < payload_max_to_log_ptr) { +			count = snprintf_count(hex_str_ptr, hex_str_remaining, "%c", +				*payload_ptr >= ' ' ? *payload_ptr : '.'); +			hex_str_remaining -= count; +			hex_str_ptr += count; +			payload_ptr++; +		} + +		DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_VERBOSE, +					LOG_FLAG_I2cAux_DceAux, +					"dce_aux_log_payload: %s: length=%u: data: %s%s", +					payload_name, +					length, +					hex_str, +					(length > max_length_to_log ? " (...)" : " ")); +	} else { +		DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_VERBOSE, +					LOG_FLAG_I2cAux_DceAux, +					"dce_aux_log_payload: %s: length=%u: data: <empty payload>", +					payload_name, +					length); +	} +} +  bool dce_aux_transfer_with_retries(struct ddc_service *ddc,  		struct aux_payload *payload)  { @@ -646,7 +705,34 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,  	}  	for (i = 0; i < AUX_MAX_RETRIES; i++) { +		DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, +					LOG_FLAG_I2cAux_DceAux, +					"dce_aux_transfer_with_retries: link_index=%u: START: retry %d of %d: address=0x%04x length=%u write=%d mot=%d", +					ddc && ddc->link ? ddc->link->link_index : UINT_MAX, +					i + 1, +					(int)AUX_MAX_RETRIES, +					payload->address, +					payload->length, +					(unsigned int) payload->write, +					(unsigned int) payload->mot); +		if (payload->write) +			dce_aux_log_payload("  write", payload->data, payload->length, 16);  		ret = dce_aux_transfer_raw(ddc, payload, &operation_result); +		DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, +					LOG_FLAG_I2cAux_DceAux, +					"dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d payload->reply=%u", +					ddc && ddc->link ? ddc->link->link_index : UINT_MAX, +					i + 1, +					(int)AUX_MAX_RETRIES, +					payload->address, +					payload->length, +					(unsigned int) payload->write, +					(unsigned int) payload->mot, +					ret, +					(int)operation_result, +					(unsigned int) *payload->reply); +		if (!payload->write) +			dce_aux_log_payload("  read", payload->data, ret > 0 ? ret : 0, 16);  		switch (operation_result) {  		case AUX_RET_SUCCESS: @@ -655,30 +741,64 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,  			switch (*payload->reply) {  			case AUX_TRANSACTION_REPLY_AUX_ACK: +				DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, +							LOG_FLAG_I2cAux_DceAux, +							"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_ACK");  				if (!payload->write && payload->length != ret) { -					if (++aux_ack_retries >= AUX_MAX_RETRIES) +					if (++aux_ack_retries >= AUX_MAX_RETRIES) { +						DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, +									LOG_FLAG_Error_I2cAux, +									"dce_aux_transfer_with_retries: FAILURE: aux_ack_retries=%d >= AUX_MAX_RETRIES=%d", +									aux_defer_retries, +									AUX_MAX_RETRIES);  						goto fail; -					else +					} else {  						udelay(300); +					}  				} else  					return true;  			break;  			case AUX_TRANSACTION_REPLY_AUX_DEFER: +				DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, +							LOG_FLAG_I2cAux_DceAux, +							"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_DEFER"); +  				/* polling_timeout_period is in us */  				defer_time_in_ms += aux110->polling_timeout_period / 1000;  				++aux_defer_retries;  				fallthrough;  			case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER: +				if (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER) +					DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, +								LOG_FLAG_I2cAux_DceAux, +								"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER"); +  				retry_on_defer = true;  				fallthrough;  			case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK: +				if (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK) +					DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, +								LOG_FLAG_I2cAux_DceAux, +								"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK"); +  				if (aux_defer_retries >= AUX_MIN_DEFER_RETRIES  						&& defer_time_in_ms >= AUX_MAX_DEFER_TIMEOUT_MS) { +					DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, +								LOG_FLAG_Error_I2cAux, +								"dce_aux_transfer_with_retries: FAILURE: aux_defer_retries=%d >= AUX_MIN_DEFER_RETRIES=%d && defer_time_in_ms=%d >= AUX_MAX_DEFER_TIMEOUT_MS=%d", +								aux_defer_retries, +								AUX_MIN_DEFER_RETRIES, +								defer_time_in_ms, +								AUX_MAX_DEFER_TIMEOUT_MS);  					goto fail;  				} else {  					if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) ||  						(*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) { +						DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, +									LOG_FLAG_I2cAux_DceAux, +									"dce_aux_transfer_with_retries: payload->defer_delay=%u", +									payload->defer_delay);  						if (payload->defer_delay > 1) {  							msleep(payload->defer_delay);  							defer_time_in_ms += payload->defer_delay; @@ -691,37 +811,86 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,  				break;  			case AUX_TRANSACTION_REPLY_I2C_DEFER: +				DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, +							LOG_FLAG_I2cAux_DceAux, +							"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_DEFER"); +  				aux_defer_retries = 0; -				if (++aux_i2c_defer_retries >= AUX_MAX_I2C_DEFER_RETRIES) +				if (++aux_i2c_defer_retries >= AUX_MAX_I2C_DEFER_RETRIES) { +					DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, +								LOG_FLAG_Error_I2cAux, +								"dce_aux_transfer_with_retries: FAILURE: aux_i2c_defer_retries=%d >= AUX_MAX_I2C_DEFER_RETRIES=%d", +								aux_i2c_defer_retries, +								AUX_MAX_I2C_DEFER_RETRIES);  					goto fail; +				}  				break;  			case AUX_TRANSACTION_REPLY_AUX_NACK: +				DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, +							LOG_FLAG_I2cAux_DceAux, +							"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_NACK"); +				goto fail; +  			case AUX_TRANSACTION_REPLY_HPD_DISCON: +				DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, +							LOG_FLAG_I2cAux_DceAux, +							"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_HPD_DISCON"); +				goto fail; +  			default: +				DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, +							LOG_FLAG_Error_I2cAux, +							"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: FAILURE: AUX_TRANSACTION_REPLY_* unknown, default case.");  				goto fail;  			}  			break;  		case AUX_RET_ERROR_INVALID_REPLY: -			if (++aux_invalid_reply_retries >= AUX_MAX_INVALID_REPLY_RETRIES) +			DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, +						LOG_FLAG_I2cAux_DceAux, +						"dce_aux_transfer_with_retries: AUX_RET_ERROR_INVALID_REPLY"); +			if (++aux_invalid_reply_retries >= AUX_MAX_INVALID_REPLY_RETRIES) { +				DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, +							LOG_FLAG_Error_I2cAux, +							"dce_aux_transfer_with_retries: FAILURE: aux_invalid_reply_retries=%d >= AUX_MAX_INVALID_REPLY_RETRIES=%d", +							aux_invalid_reply_retries, +							AUX_MAX_INVALID_REPLY_RETRIES);  				goto fail; -			else +			} else  				udelay(400);  			break;  		case AUX_RET_ERROR_TIMEOUT: +			DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, +						LOG_FLAG_I2cAux_DceAux, +						"dce_aux_transfer_with_retries: AUX_RET_ERROR_TIMEOUT");  			// Check whether a DEFER had occurred before the timeout.  			// If so, treat timeout as a DEFER.  			if (retry_on_defer) { -				if (++aux_defer_retries >= AUX_MIN_DEFER_RETRIES) +				if (++aux_defer_retries >= AUX_MIN_DEFER_RETRIES) { +					DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, +								LOG_FLAG_Error_I2cAux, +								"dce_aux_transfer_with_retries: FAILURE: aux_defer_retries=%d >= AUX_MIN_DEFER_RETRIES=%d", +								aux_defer_retries, +								AUX_MIN_DEFER_RETRIES);  					goto fail; -				else if (payload->defer_delay > 0) +				} else if (payload->defer_delay > 0) { +					DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, +								LOG_FLAG_I2cAux_DceAux, +								"dce_aux_transfer_with_retries: payload->defer_delay=%u", +								payload->defer_delay);  					msleep(payload->defer_delay); +				}  			} else { -				if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES) +				if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES) { +					DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, +								LOG_FLAG_Error_I2cAux, +								"dce_aux_transfer_with_retries: FAILURE: aux_timeout_retries=%d >= AUX_MAX_TIMEOUT_RETRIES=%d", +								aux_timeout_retries, +								AUX_MAX_TIMEOUT_RETRIES);  					goto fail; -				else { +				} else {  					/*  					 * DP 1.4, 2.8.2:  AUX Transaction Response/Reply Timeouts  					 * According to the DP spec there should be 3 retries total @@ -736,12 +905,25 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,  		case AUX_RET_ERROR_ENGINE_ACQUIRE:  		case AUX_RET_ERROR_UNKNOWN:  		default: +			DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, +						LOG_FLAG_I2cAux_DceAux, +						"dce_aux_transfer_with_retries: Failure: operation_result=%d", +						(int)operation_result);  			goto fail;  		}  	}  fail: +	DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, +				LOG_FLAG_Error_I2cAux, +				"dce_aux_transfer_with_retries: FAILURE");  	if (!payload_reply)  		payload->reply = NULL; + +	DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, +				WPP_BIT_FLAG_DC_ERROR, +				"AUX transaction failed. Result: %d", +				operation_result); +  	return false;  } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index 10d42ae0cffe..aa8403bc4c83 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -29,6 +29,8 @@  #include "dmub/dmub_srv.h"  #include "core_types.h" +#define DC_TRACE_LEVEL_MESSAGE(...)	do {} while (0) /* do nothing */ +  #define MAX_PIPES 6  /* @@ -96,10 +98,19 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state,  			// Return invalid state when GPINT times out  			*state = PSR_STATE_INVALID; -		// Assert if max retry hit -		if (retry_count >= 1000) -			ASSERT(0);  	} while (++retry_count <= 1000 && *state == PSR_STATE_INVALID); + +	// Assert if max retry hit +	if (retry_count >= 1000 && *state == PSR_STATE_INVALID) { +		ASSERT(0); +		DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, +				WPP_BIT_FLAG_Firmware_PsrState, +				"Unable to get PSR state from FW."); +	} else +		DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_VERBOSE, +				WPP_BIT_FLAG_Firmware_PsrState, +				"Got PSR state from FW. PSR state: %d, Retry count: %d", +				*state, retry_count);  }  /* @@ -207,7 +218,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_  	cmd.psr_set_level.header.sub_type = DMUB_CMD__PSR_SET_LEVEL;  	cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data);  	cmd.psr_set_level.psr_set_level_data.psr_level = psr_level; -	cmd.psr_set_level.psr_set_level_data.cmd_version = PSR_VERSION_1; +	cmd.psr_set_level.psr_set_level_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;  	cmd.psr_set_level.psr_set_level_data.panel_inst = panel_inst;  	dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);  	dc_dmub_srv_cmd_execute(dc->dmub_srv); @@ -293,7 +304,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,  	copy_settings_data->debug.bitfields.use_hw_lock_mgr		= 1;  	copy_settings_data->fec_enable_status = (link->fec_state == dc_link_fec_enabled);  	copy_settings_data->fec_enable_delay_in100us = link->dc->debug.fec_enable_delay_in100us; -	copy_settings_data->cmd_version =  PSR_VERSION_1; +	copy_settings_data->cmd_version =  DMUB_CMD_PSR_CONTROL_VERSION_1;  	copy_settings_data->panel_inst = panel_inst;  	dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h index 8d7e92d5d3e4..39485bdeb90e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h @@ -121,6 +121,10 @@ struct dcn_hubbub_registers {  	uint32_t DCN_VM_AGP_BASE;  	uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB;  	uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB; +	uint32_t DCN_VM_FAULT_ADDR_MSB; +	uint32_t DCN_VM_FAULT_ADDR_LSB; +	uint32_t DCN_VM_FAULT_CNTL; +	uint32_t DCN_VM_FAULT_STATUS;  	uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_A;  	uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_B;  	uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_C; @@ -233,7 +237,19 @@ struct dcn_hubbub_registers {  		type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;\  		type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D;\  		type DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB;\ -		type DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB +		type DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB;\ +		type DCN_VM_FAULT_ADDR_MSB;\ +		type DCN_VM_FAULT_ADDR_LSB;\ +		type DCN_VM_ERROR_STATUS_CLEAR;\ +		type DCN_VM_ERROR_STATUS_MODE;\ +		type DCN_VM_ERROR_INTERRUPT_ENABLE;\ +		type DCN_VM_RANGE_FAULT_DISABLE;\ +		type DCN_VM_PRQ_FAULT_DISABLE;\ +		type DCN_VM_ERROR_STATUS;\ +		type DCN_VM_ERROR_VMID;\ +		type DCN_VM_ERROR_TABLE_LEVEL;\ +		type DCN_VM_ERROR_PIPE;\ +		type DCN_VM_ERROR_INTERRUPT_STATUS  #define HUBBUB_STUTTER_REG_FIELD_LIST(type) \  		type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;\ @@ -303,6 +319,7 @@ struct dcn_hubbub_registers {  		type DET3_SIZE_CURRENT;\  		type COMPBUF_SIZE;\  		type COMPBUF_SIZE_CURRENT;\ +		type CONFIG_ERROR;\  		type COMPBUF_RESERVED_SPACE_64B;\  		type COMPBUF_RESERVED_SPACE_ZS;\  		type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 04303fe9c659..ea185c877323 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -871,6 +871,8 @@ void hubp1_read_state_common(struct hubp *hubp)  	struct _vcs_dpi_display_dlg_regs_st *dlg_attr = &s->dlg_attr;  	struct _vcs_dpi_display_ttu_regs_st *ttu_attr = &s->ttu_attr;  	struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs; +	uint32_t aperture_low_msb, aperture_low_lsb; +	uint32_t aperture_high_msb, aperture_high_lsb;  	/* Requester */  	REG_GET(HUBPRET_CONTROL, @@ -881,6 +883,22 @@ void hubp1_read_state_common(struct hubp *hubp)  			MRQ_EXPANSION_MODE, &rq_regs->mrq_expansion_mode,  			CRQ_EXPANSION_MODE, &rq_regs->crq_expansion_mode); +	REG_GET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, +			MC_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, &aperture_low_msb); + +	REG_GET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, +			MC_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, &aperture_low_lsb); + +	REG_GET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, +			MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, &aperture_high_msb); + +	REG_GET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, +			MC_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, &aperture_high_lsb); + +	// On DCN1, aperture is broken down into MSB and LSB; only keep bits [47:18] to match later DCN format +	rq_regs->aperture_low_addr = (aperture_low_msb << 26) | (aperture_low_lsb >> 6); +	rq_regs->aperture_high_addr = (aperture_high_msb << 26) | (aperture_high_lsb >> 6); +  	/* DLG - Per hubp */  	REG_GET_2(BLANK_OFFSET_0,  		REFCYC_H_BLANK_END, &dlg_attr->refcyc_h_blank_end, @@ -1037,6 +1055,17 @@ void hubp1_read_state_common(struct hubp *hubp)  			QoS_LEVEL_LOW_WM, &s->qos_level_low_wm,  			QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm); +	REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS, +			PRIMARY_SURFACE_ADDRESS, &s->primary_surface_addr_lo); + +	REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, +			PRIMARY_SURFACE_ADDRESS, &s->primary_surface_addr_hi); + +	REG_GET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, +			PRIMARY_META_SURFACE_ADDRESS, &s->primary_meta_addr_lo); + +	REG_GET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, +			PRIMARY_META_SURFACE_ADDRESS, &s->primary_meta_addr_hi);  }  void hubp1_read_state(struct hubp *hubp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index e2f2f6995935..9cb8c383d673 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -682,6 +682,10 @@ struct dcn_hubp_state {  	uint32_t min_ttu_vblank;  	uint32_t qos_level_low_wm;  	uint32_t qos_level_high_wm; +	uint32_t primary_surface_addr_lo; +	uint32_t primary_surface_addr_hi; +	uint32_t primary_meta_addr_lo; +	uint32_t primary_meta_addr_hi;  };  struct dcn10_hubp { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index c545eddabdcc..df8a7718a85f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1502,25 +1502,22 @@ void dcn10_init_hw(struct dc *dc)  void dcn10_power_down_on_boot(struct dc *dc)  {  	struct dc_link *edp_links[MAX_NUM_EDP]; -	struct dc_link *edp_link; +	struct dc_link *edp_link = NULL;  	int edp_num;  	int i = 0;  	get_edp_links(dc, edp_links, &edp_num); - -	if (edp_num) { -		for (i = 0; i < edp_num; i++) { -			edp_link = edp_links[i]; -			if (edp_link->link_enc->funcs->is_dig_enabled && -					edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && -					dc->hwseq->funcs.edp_backlight_control && -					dc->hwss.power_down && -					dc->hwss.edp_power_control) { -				dc->hwseq->funcs.edp_backlight_control(edp_link, false); -				dc->hwss.power_down(dc); -				dc->hwss.edp_power_control(edp_link, false); -			} -		} +	if (edp_num) +		edp_link = edp_links[0]; + +	if (edp_link && edp_link->link_enc->funcs->is_dig_enabled && +			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && +			dc->hwseq->funcs.edp_backlight_control && +			dc->hwss.power_down && +			dc->hwss.edp_power_control) { +		dc->hwseq->funcs.edp_backlight_control(edp_link, false); +		dc->hwss.power_down(dc); +		dc->hwss.edp_power_control(edp_link, false);  	} else {  		for (i = 0; i < dc->link_count; i++) {  			struct dc_link *link = dc->links[i]; @@ -3180,8 +3177,12 @@ void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)  static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)  {  	struct pipe_ctx *test_pipe; -	const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2; +	const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data; +	const struct rect *r1 = &scl_data->recout, *r2;  	int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b; +	int cur_layer = pipe_ctx->plane_state->layer_index; +	bool upper_pipe_exists = false; +	struct fixed31_32 one = dc_fixpt_from_int(1);  	/**  	 * Disable the cursor if there's another pipe above this with a @@ -3199,8 +3200,17 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)  		if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)  			return true; + +		if (test_pipe->plane_state->layer_index < cur_layer) +			upper_pipe_exists = true;  	} +	// if plane scaled, assume an upper plane can handle cursor if it exists. +	if (upper_pipe_exists && +			(scl_data->ratios.horz.value != one.value || +			scl_data->ratios.vert.value != one.value)) +		return true; +  	return false;  } @@ -3631,13 +3641,12 @@ enum dc_status dcn10_set_clock(struct dc *dc,  	struct dc_clock_config clock_cfg = {0};  	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk; -	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock) -				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, -						context, clock_type, &clock_cfg); - -	if (!dc->clk_mgr->funcs->get_clock) +	if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)  		return DC_FAIL_UNSUPPORTED_1; +	dc->clk_mgr->funcs->get_clock(dc->clk_mgr, +		context, clock_type, &clock_cfg); +  	if (clk_khz > clock_cfg.max_clock_khz)  		return DC_FAIL_CLK_EXCEED_MAX; @@ -3655,7 +3664,7 @@ enum dc_status dcn10_set_clock(struct dc *dc,  	else  		return DC_ERROR_UNEXPECTED; -	if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks) +	if (dc->clk_mgr->funcs->update_clocks)  				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,  				context, true);  	return DC_OK; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 3696faf12d86..37848f4577b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -1388,6 +1388,12 @@ void optc1_read_otg_state(struct optc *optc1,  	REG_GET(OPTC_INPUT_GLOBAL_CONTROL,  			OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status); + +	REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL, +			OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &s->vertical_interrupt2_en); + +	REG_GET(OTG_VERTICAL_INTERRUPT2_POSITION, +			OTG_VERTICAL_INTERRUPT2_LINE_START, &s->vertical_interrupt2_line);  }  bool optc1_get_otg_active_size(struct timing_generator *optc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index 29d6fbe0093a..c50c29984d51 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -578,6 +578,8 @@ struct dcn_otg_state {  	uint32_t underflow_occurred_status;  	uint32_t otg_enabled;  	uint32_t blank_enabled; +	uint32_t vertical_interrupt2_en; +	uint32_t vertical_interrupt2_line;  };  void optc1_read_otg_state(struct optc *optc1, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index f1a08a7736ac..cf364ae93138 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -522,16 +522,21 @@ void enc1_stream_encoder_hdmi_set_stream_attribute(  	switch (crtc_timing->display_color_depth) {  	case COLOR_DEPTH_888:  		REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0); +		DC_LOG_DEBUG("HDMI source set to 24BPP deep color depth\n");  		break;  	case COLOR_DEPTH_101010:  		if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {  			REG_UPDATE_2(HDMI_CONTROL,  					HDMI_DEEP_COLOR_DEPTH, 1,  					HDMI_DEEP_COLOR_ENABLE, 0); +			DC_LOG_DEBUG("HDMI source 30BPP deep color depth"  \ +				"disabled for YCBCR422 pixel encoding\n");  		} else {  			REG_UPDATE_2(HDMI_CONTROL,  					HDMI_DEEP_COLOR_DEPTH, 1,  					HDMI_DEEP_COLOR_ENABLE, 1); +			DC_LOG_DEBUG("HDMI source 30BPP deep color depth"  \ +				"enabled for YCBCR422 non-pixel encoding\n");  			}  		break;  	case COLOR_DEPTH_121212: @@ -539,16 +544,22 @@ void enc1_stream_encoder_hdmi_set_stream_attribute(  			REG_UPDATE_2(HDMI_CONTROL,  					HDMI_DEEP_COLOR_DEPTH, 2,  					HDMI_DEEP_COLOR_ENABLE, 0); +			DC_LOG_DEBUG("HDMI source 36BPP deep color depth"  \ +				"disabled for YCBCR422 pixel encoding\n");  		} else {  			REG_UPDATE_2(HDMI_CONTROL,  					HDMI_DEEP_COLOR_DEPTH, 2,  					HDMI_DEEP_COLOR_ENABLE, 1); +			DC_LOG_DEBUG("HDMI source 36BPP deep color depth"  \ +				"enabled for non-pixel YCBCR422 encoding\n");  			}  		break;  	case COLOR_DEPTH_161616:  		REG_UPDATE_2(HDMI_CONTROL,  				HDMI_DEEP_COLOR_DEPTH, 3,  				HDMI_DEEP_COLOR_ENABLE, 1); +		DC_LOG_DEBUG("HDMI source deep color depth enabled in"  \ +				"reserved mode\n");  		break;  	default:  		break; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index 91a9305d42e8..aacb1fb5c73e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -605,6 +605,26 @@ static bool hubbub2_program_watermarks(  	return wm_pending;  } +void hubbub2_read_state(struct hubbub *hubbub, struct dcn_hubbub_state *hubbub_state) +{ +	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + +	if (REG(DCN_VM_FAULT_ADDR_MSB)) +		hubbub_state->vm_fault_addr_msb = REG_READ(DCN_VM_FAULT_ADDR_MSB); + +	if (REG(DCN_VM_FAULT_ADDR_LSB)) +		hubbub_state->vm_fault_addr_msb = REG_READ(DCN_VM_FAULT_ADDR_LSB); + +	if (REG(DCN_VM_FAULT_CNTL)) +		REG_GET(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, &hubbub_state->vm_error_mode); + +	if (REG(DCN_VM_FAULT_STATUS)) { +		 REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, &hubbub_state->vm_error_status); +		 REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, &hubbub_state->vm_error_vmid); +		 REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, &hubbub_state->vm_error_pipe); +	} +} +  static const struct hubbub_funcs hubbub2_funcs = {  	.update_dchub = hubbub2_update_dchub,  	.init_dchub_sys_ctx = hubbub2_init_dchub_sys_ctx, @@ -617,6 +637,7 @@ static const struct hubbub_funcs hubbub2_funcs = {  	.program_watermarks = hubbub2_program_watermarks,  	.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,  	.allow_self_refresh_control = hubbub1_allow_self_refresh_control, +	.hubbub_read_state = hubbub2_read_state,  };  void hubbub2_construct(struct dcn20_hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h index 10af257d90ef..2f6146bf1d32 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h @@ -29,16 +29,6 @@  #include "dcn10/dcn10_hubbub.h"  #include "dcn20_vmid.h" -#define HUBBUB_REG_LIST_DCN20_COMMON()\ -	HUBBUB_REG_LIST_DCN_COMMON(), \ -	SR(DCHUBBUB_CRC_CTRL), \ -	SR(DCN_VM_FB_LOCATION_BASE),\ -	SR(DCN_VM_FB_LOCATION_TOP),\ -	SR(DCN_VM_FB_OFFSET),\ -	SR(DCN_VM_AGP_BOT),\ -	SR(DCN_VM_AGP_TOP),\ -	SR(DCN_VM_AGP_BASE) -  #define TO_DCN20_HUBBUB(hubbub)\  	container_of(hubbub, struct dcn20_hubbub, base) @@ -50,7 +40,11 @@  	SR(DCN_VM_FB_OFFSET),\  	SR(DCN_VM_AGP_BOT),\  	SR(DCN_VM_AGP_TOP),\ -	SR(DCN_VM_AGP_BASE) +	SR(DCN_VM_AGP_BASE),\ +	SR(DCN_VM_FAULT_ADDR_MSB), \ +	SR(DCN_VM_FAULT_ADDR_LSB), \ +	SR(DCN_VM_FAULT_CNTL), \ +	SR(DCN_VM_FAULT_STATUS)  #define HUBBUB_REG_LIST_DCN20(id)\  	HUBBUB_REG_LIST_DCN20_COMMON(), \ @@ -71,7 +65,19 @@  	HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \  	HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \  	HUBBUB_SF(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, mask_sh), \ -	HUBBUB_SF(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, mask_sh) +	HUBBUB_SF(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_ADDR_MSB, DCN_VM_FAULT_ADDR_MSB, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_ADDR_LSB, DCN_VM_FAULT_ADDR_LSB, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_CLEAR, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_INTERRUPT_ENABLE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_RANGE_FAULT_DISABLE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_PRQ_FAULT_DISABLE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh)  struct dcn20_hubbub {  	struct hubbub base; @@ -131,4 +137,7 @@ void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub,  void hubbub2_wm_read_state(struct hubbub *hubbub,  		struct dcn_hubbub_wm *wm); +void hubbub2_read_state(struct hubbub *hubbub, +		struct dcn_hubbub_state *hubbub_state); +  #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 7e54058715aa..5adf42a7cc27 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -1080,6 +1080,12 @@ void hubp2_read_state_common(struct hubp *hubp)  			MRQ_EXPANSION_MODE, &rq_regs->mrq_expansion_mode,  			CRQ_EXPANSION_MODE, &rq_regs->crq_expansion_mode); +	REG_GET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, +			MC_VM_SYSTEM_APERTURE_HIGH_ADDR, &rq_regs->aperture_high_addr); + +	REG_GET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR, +			MC_VM_SYSTEM_APERTURE_LOW_ADDR, &rq_regs->aperture_low_addr); +  	/* DLG - Per hubp */  	REG_GET_2(BLANK_OFFSET_0,  		REFCYC_H_BLANK_END, &dlg_attr->refcyc_h_blank_end, @@ -1236,6 +1242,17 @@ void hubp2_read_state_common(struct hubp *hubp)  			QoS_LEVEL_LOW_WM, &s->qos_level_low_wm,  			QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm); +	REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS, +			PRIMARY_SURFACE_ADDRESS, &s->primary_surface_addr_lo); + +	REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, +			PRIMARY_SURFACE_ADDRESS, &s->primary_surface_addr_hi); + +	REG_GET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, +			PRIMARY_META_SURFACE_ADDRESS, &s->primary_meta_addr_lo); + +	REG_GET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, +			PRIMARY_META_SURFACE_ADDRESS, &s->primary_meta_addr_hi);  }  void hubp2_read_state(struct hubp *hubp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 5c2853654cca..a47ba1d45be9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1723,13 +1723,15 @@ void dcn20_program_front_end_for_ctx(  				pipe = pipe->bottom_pipe;  			} -			/* Program secondary blending tree and writeback pipes */ -			pipe = &context->res_ctx.pipe_ctx[i]; -			if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0 -					&& (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw) -					&& hws->funcs.program_all_writeback_pipes_in_tree) -				hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);  		} +		/* Program secondary blending tree and writeback pipes */ +		pipe = &context->res_ctx.pipe_ctx[i]; +		if (!pipe->top_pipe && !pipe->prev_odm_pipe +				&& pipe->stream && pipe->stream->num_wb_info > 0 +				&& (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw) +					|| pipe->stream->update_flags.raw) +				&& hws->funcs.program_all_writeback_pipes_in_tree) +			hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);  	}  } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index 7fa9fc656b0c..f6e747f25ebe 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -464,7 +464,7 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc)  	REG_UPDATE_2(OTG_GLOBAL_CONTROL1,  			MASTER_UPDATE_LOCK_DB_X, -			h_blank_start - 200 - 1, +			(h_blank_start - 200 - 1) / optc1->opp_count,  			MASTER_UPDATE_LOCK_DB_Y,  			v_blank_start - 1);  } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index b173fa3653b5..e3e01b17c164 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -35,6 +35,8 @@  #include "include/irq_service_interface.h"  #include "dcn20/dcn20_resource.h" +#include "dml/dcn2x/dcn2x.h" +  #include "dcn10/dcn10_hubp.h"  #include "dcn10/dcn10_ipp.h"  #include "dcn20_hubbub.h" @@ -1974,43 +1976,6 @@ void dcn20_split_stream_for_mpc(  	ASSERT(primary_pipe->plane_state);  } -void dcn20_populate_dml_writeback_from_context( -		struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) -{ -	int pipe_cnt, i; - -	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { -		struct dc_writeback_info *wb_info = &res_ctx->pipe_ctx[i].stream->writeback_info[0]; - -		if (!res_ctx->pipe_ctx[i].stream) -			continue; - -		/* Set writeback information */ -		pipes[pipe_cnt].dout.wb_enable = (wb_info->wb_enabled == true) ? 1 : 0; -		pipes[pipe_cnt].dout.num_active_wb++; -		pipes[pipe_cnt].dout.wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_height; -		pipes[pipe_cnt].dout.wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_width; -		pipes[pipe_cnt].dout.wb.wb_dst_width = wb_info->dwb_params.dest_width; -		pipes[pipe_cnt].dout.wb.wb_dst_height = wb_info->dwb_params.dest_height; -		pipes[pipe_cnt].dout.wb.wb_htaps_luma = 1; -		pipes[pipe_cnt].dout.wb.wb_vtaps_luma = 1; -		pipes[pipe_cnt].dout.wb.wb_htaps_chroma = wb_info->dwb_params.scaler_taps.h_taps_c; -		pipes[pipe_cnt].dout.wb.wb_vtaps_chroma = wb_info->dwb_params.scaler_taps.v_taps_c; -		pipes[pipe_cnt].dout.wb.wb_hratio = 1.0; -		pipes[pipe_cnt].dout.wb.wb_vratio = 1.0; -		if (wb_info->dwb_params.out_format == dwb_scaler_mode_yuv420) { -			if (wb_info->dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC) -				pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_8; -			else -				pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_10; -		} else -			pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_444_32; - -		pipe_cnt++; -	} - -} -  int dcn20_populate_dml_pipes_from_context(  		struct dc *dc,  		struct dc_state *context, @@ -2392,7 +2357,9 @@ int dcn20_populate_dml_pipes_from_context(  	}  	/* populate writeback information */ +	DC_FP_START();  	dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes); +	DC_FP_END();  	return pipe_cnt;  } @@ -2462,7 +2429,7 @@ void dcn20_set_mcif_arb_params(  				wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;  				wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;  			} -			wb_arb_params->time_per_pixel = 16.0 / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* 4 bit fraction, ms */ +			wb_arb_params->time_per_pixel = 16.0 * 1000 / (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk / 1000); /* 4 bit fraction, ms */  			wb_arb_params->slice_lines = 32;  			wb_arb_params->arbitration_slice = 2;  			wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel, @@ -2531,16 +2498,16 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,  		 * pick that pipe as secondary  		 * Same logic applies for ODM pipes  		 */ -		if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) { -			preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx; +		if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) { +			preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx;  			if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {  				secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];  				secondary_pipe->pipe_idx = preferred_pipe_idx;  			}  		}  		if (secondary_pipe == NULL && -				dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) { -			preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx; +				dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) { +			preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx;  			if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {  				secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];  				secondary_pipe->pipe_idx = preferred_pipe_idx; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index c8f3127bbcdf..6ec8ff45f0f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -58,8 +58,6 @@ struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(  		struct dc_state *state,  		const struct resource_pool *pool,  		struct dc_stream_state *stream); -void dcn20_populate_dml_writeback_from_context( -		struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);  struct stream_encoder *dcn20_stream_encoder_create(  	enum engine_id eng_id, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c index 42fbb5e6d505..36044cb8ec83 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c @@ -701,6 +701,7 @@ static const struct hubbub_funcs hubbub21_funcs = {  	.program_watermarks = hubbub21_program_watermarks,  	.allow_self_refresh_control = hubbub1_allow_self_refresh_control,  	.apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa, +	.hubbub_read_state = hubbub2_read_state,  };  void hubbub21_construct(struct dcn20_hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h index ef3ef28509ed..d8eb2bb7282c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h @@ -108,7 +108,19 @@  	HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \  	HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \  	HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \ -	HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh) +	HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_ADDR_MSB, DCN_VM_FAULT_ADDR_MSB, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_ADDR_LSB, DCN_VM_FAULT_ADDR_LSB, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_CLEAR, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_INTERRUPT_ENABLE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_RANGE_FAULT_DISABLE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_PRQ_FAULT_DISABLE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh)  void dcn21_dchvm_init(struct hubbub *hubbub);  int hubbub21_init_dchub(struct hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index b0c9180b808f..3de1bcf9b3d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -833,7 +833,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {  	.dmdata_set_attributes = hubp2_dmdata_set_attributes,  	.dmdata_load = hubp2_dmdata_load,  	.dmdata_status_done = hubp2_dmdata_status_done, -	.hubp_read_state = hubp1_read_state, +	.hubp_read_state = hubp2_read_state,  	.hubp_clear_underflow = hubp1_clear_underflow,  	.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,  	.hubp_init = hubp21_init, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index bf0a198eae15..fbbdf9976183 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -35,6 +35,8 @@  #include "include/irq_service_interface.h"  #include "dcn20/dcn20_resource.h" +#include "dml/dcn2x/dcn2x.h" +  #include "clk_mgr.h"  #include "dcn10/dcn10_hubp.h"  #include "dcn10/dcn10_ipp.h" @@ -884,7 +886,8 @@ static const struct dc_debug_options debug_defaults_drv = {  		.disable_48mhz_pwrdwn = false,  		.usbc_combo_phy_reset_wa = true,  		.dmub_command_table = true, -		.use_max_lb = true +		.use_max_lb = true, +		.optimize_edp_link_rate = true  };  static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c index 3fe9e41e4dbd..6a3d3a0ec0a3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c @@ -49,6 +49,11 @@  static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30,  	struct dcn3_xfer_func_reg *reg)  { +	reg->shifts.field_region_start_base = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B; +	reg->masks.field_region_start_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_BASE_B; +	reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B; +	reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B; +  	reg->shifts.exp_region0_lut_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;  	reg->masks.exp_region0_lut_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;  	reg->shifts.exp_region0_num_segments = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; @@ -66,8 +71,6 @@ static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30,  	reg->masks.field_region_end_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_BASE_B;  	reg->shifts.field_region_linear_slope = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;  	reg->masks.field_region_linear_slope = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B; -	reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B; -	reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B;  	reg->shifts.exp_region_start = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_B;  	reg->masks.exp_region_start = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_B;  	reg->shifts.exp_resion_start_segment = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B; @@ -147,18 +150,19 @@ static enum dc_lut_mode dwb3_get_ogam_current(  	uint32_t state_mode;  	uint32_t ram_select; -	REG_GET(DWB_OGAM_CONTROL, -		DWB_OGAM_MODE, &state_mode); -	REG_GET(DWB_OGAM_CONTROL, -		DWB_OGAM_SELECT, &ram_select); +	REG_GET_2(DWB_OGAM_CONTROL, +		DWB_OGAM_MODE_CURRENT, &state_mode, +		DWB_OGAM_SELECT_CURRENT, &ram_select);  	if (state_mode == 0) {  		mode = LUT_BYPASS;  	} else if (state_mode == 2) {  		if (ram_select == 0)  			mode = LUT_RAM_A; -		else +		else if (ram_select == 1)  			mode = LUT_RAM_B; +		else +			mode = LUT_BYPASS;  	} else {  		// Reserved value  		mode = LUT_BYPASS; @@ -172,10 +176,10 @@ static void dwb3_configure_ogam_lut(  	struct dcn30_dwbc *dwbc30,  	bool is_ram_a)  { -	REG_UPDATE(DWB_OGAM_LUT_CONTROL, -		DWB_OGAM_LUT_READ_COLOR_SEL, 7); -	REG_UPDATE(DWB_OGAM_CONTROL, -		DWB_OGAM_SELECT, is_ram_a == true ? 0 : 1); +	REG_UPDATE_2(DWB_OGAM_LUT_CONTROL, +		DWB_OGAM_LUT_WRITE_COLOR_MASK, 7, +		DWB_OGAM_LUT_HOST_SEL, (is_ram_a == true) ? 0 : 1); +  	REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);  } @@ -185,17 +189,45 @@ static void dwb3_program_ogam_pwl(struct dcn30_dwbc *dwbc30,  {  	uint32_t i; -    // triple base implementation -	for (i = 0; i < num/2; i++) { -		REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].red_reg); -		REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].green_reg); -		REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].blue_reg); -		REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].red_reg); -		REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].green_reg); -		REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].blue_reg); -		REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].red_reg); -		REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].green_reg); -		REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].blue_reg); +	uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg; +	uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg; +	uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg; + +	if (is_rgb_equal(rgb,  num)) { +		for (i = 0 ; i < num; i++) +			REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg); + +		REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red); + +	} else { + +		REG_UPDATE(DWB_OGAM_LUT_CONTROL, +				DWB_OGAM_LUT_WRITE_COLOR_MASK, 4); + +		for (i = 0 ; i < num; i++) +			REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].red_reg); + +		REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_red); + +		REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0); + +		REG_UPDATE(DWB_OGAM_LUT_CONTROL, +				DWB_OGAM_LUT_WRITE_COLOR_MASK, 2); + +		for (i = 0 ; i < num; i++) +			REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].green_reg); + +		REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_green); + +		REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0); + +		REG_UPDATE(DWB_OGAM_LUT_CONTROL, +				DWB_OGAM_LUT_WRITE_COLOR_MASK, 1); + +		for (i = 0 ; i < num; i++) +			REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[i].blue_reg); + +		REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, last_base_value_blue);  	}  } @@ -211,6 +243,8 @@ static bool dwb3_program_ogam_lut(  		return false;  	} +	REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2); +  	current_mode = dwb3_get_ogam_current(dwbc30);  	if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)  		next_mode = LUT_RAM_B; @@ -227,8 +261,7 @@ static bool dwb3_program_ogam_lut(  	dwb3_program_ogam_pwl(  		dwbc30, params->rgb_resulted, params->hw_points_num); -	REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2); -	REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1); +	REG_UPDATE(DWB_OGAM_CONTROL, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1);  	return true;  } @@ -271,14 +304,19 @@ static void dwb3_program_gamut_remap(  	struct color_matrices_reg gam_regs; -	REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format); -  	if (regval == NULL || select == CM_GAMUT_REMAP_MODE_BYPASS) {  		REG_SET(DWB_GAMUT_REMAP_MODE, 0,  				DWB_GAMUT_REMAP_MODE, 0);  		return;  	} +	REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format); + +	gam_regs.shifts.csc_c11 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C11; +	gam_regs.masks.csc_c11  = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C11; +	gam_regs.shifts.csc_c12 = dwbc30->dwbc_shift->DWB_GAMUT_REMAPA_C12; +	gam_regs.masks.csc_c12 = dwbc30->dwbc_mask->DWB_GAMUT_REMAPA_C12; +  	switch (select) {  	case CM_GAMUT_REMAP_MODE_RAMA_COEFF:  		gam_regs.csc_c11_c12 = REG(DWB_GAMUT_REMAPA_C11_C12); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c index c0980da6dc49..f4414de96acc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c @@ -451,6 +451,7 @@ static const struct hubbub_funcs hubbub30_funcs = {  	.force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes,  	.force_pstate_change_control = hubbub3_force_pstate_change_control,  	.init_watermarks = hubbub3_init_watermarks, +	.hubbub_read_state = hubbub2_read_state,  };  void hubbub3_construct(struct dcn20_hubbub *hubbub3, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h index c0bd0fb09455..7b597908b937 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h @@ -87,7 +87,19 @@  	HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, mask_sh), \  	HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, mask_sh), \  	HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, mask_sh), \ -	HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, mask_sh) +	HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_ADDR_MSB, DCN_VM_FAULT_ADDR_MSB, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_ADDR_LSB, DCN_VM_FAULT_ADDR_LSB, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_CLEAR, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_INTERRUPT_ENABLE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_RANGE_FAULT_DISABLE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_PRQ_FAULT_DISABLE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh)  void hubbub3_construct(struct dcn20_hubbub *hubbub3,  	struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index c68e3a708a33..fafed1e4a998 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -398,12 +398,22 @@ void dcn30_program_all_writeback_pipes_in_tree(  			for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {  				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe]; +				if (!pipe_ctx->plane_state) +					continue; +  				if (pipe_ctx->plane_state == wb_info.writeback_source_plane) {  					wb_info.mpcc_inst = pipe_ctx->plane_res.mpcc_inst;  					break;  				}  			} -			ASSERT(wb_info.mpcc_inst != -1); + +			if (wb_info.mpcc_inst == -1) { +				/* Disable writeback pipe and disconnect from MPCC +				 * if source plane has been removed +				 */ +				dc->hwss.disable_writeback(dc, wb_info.dwb_pipe_inst); +				continue; +			}  			ASSERT(wb_info.dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);  			dwb = dc->res_pool->dwbc[wb_info.dwb_pipe_inst]; @@ -580,22 +590,19 @@ void dcn30_init_hw(struct dc *dc)  	 */  	if (dc->config.power_down_display_on_boot) {  		struct dc_link *edp_links[MAX_NUM_EDP]; -		struct dc_link *edp_link; +		struct dc_link *edp_link = NULL;  		get_edp_links(dc, edp_links, &edp_num); -		if (edp_num) { -			for (i = 0; i < edp_num; i++) { -				edp_link = edp_links[i]; -				if (edp_link->link_enc->funcs->is_dig_enabled && -						edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && -						dc->hwss.edp_backlight_control && -						dc->hwss.power_down && -						dc->hwss.edp_power_control) { -					dc->hwss.edp_backlight_control(edp_link, false); -					dc->hwss.power_down(dc); -					dc->hwss.edp_power_control(edp_link, false); -				} -			} +		if (edp_num) +			edp_link = edp_links[0]; +		if (edp_link && edp_link->link_enc->funcs->is_dig_enabled && +				edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && +				dc->hwss.edp_backlight_control && +				dc->hwss.power_down && +				dc->hwss.edp_power_control) { +			dc->hwss.edp_backlight_control(edp_link, false); +			dc->hwss.power_down(dc); +			dc->hwss.edp_power_control(edp_link, false);  		} else {  			for (i = 0; i < dc->link_count; i++) {  				struct dc_link *link = dc->links[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c index f37e8254df21..089be7347591 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c @@ -109,11 +109,9 @@ void optc3_lock(struct timing_generator *optc)  	REG_SET(OTG_MASTER_UPDATE_LOCK, 0,  		OTG_MASTER_UPDATE_LOCK, 1); -	/* Should be fast, status does not update on maximus */ -	if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) -		REG_WAIT(OTG_MASTER_UPDATE_LOCK, -				UPDATE_LOCK_STATUS, 1, -				1, 10); +	REG_WAIT(OTG_MASTER_UPDATE_LOCK, +			UPDATE_LOCK_STATUS, 1, +			1, 10);  }  void optc3_set_out_mux(struct timing_generator *optc, enum otg_out_mux_dest dest) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 596c97dce67e..a0de309475a9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -92,7 +92,7 @@  #define DC_LOGGER_INIT(logger)  struct _vcs_dpi_ip_params_st dcn3_0_ip = { -	.use_min_dcfclk = 1, +	.use_min_dcfclk = 0,  	.clamp_min_dcfclk = 0,  	.odm_capable = 1,  	.gpuvm_enable = 0, @@ -1788,7 +1788,6 @@ static bool dcn30_split_stream_for_mpc_or_odm(  		}  		pri_pipe->next_odm_pipe = sec_pipe;  		sec_pipe->prev_odm_pipe = pri_pipe; -		ASSERT(sec_pipe->top_pipe == NULL);  		if (!sec_pipe->top_pipe)  			sec_pipe->stream_res.opp = pool->opps[pipe_idx]; @@ -2399,16 +2398,37 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params  	dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;  	if (bw_params->clk_table.entries[0].memclk_mhz) { +		int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0; + +		for (i = 0; i < MAX_NUM_DPM_LVL; i++) { +			if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) +				max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; +			if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) +				max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; +			if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) +				max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; +			if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) +				max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; +		} + +		if (!max_dcfclk_mhz) +			max_dcfclk_mhz = dcn3_0_soc.clock_limits[0].dcfclk_mhz; +		if (!max_dispclk_mhz) +			max_dispclk_mhz = dcn3_0_soc.clock_limits[0].dispclk_mhz; +		if (!max_dppclk_mhz) +			max_dppclk_mhz = dcn3_0_soc.clock_limits[0].dppclk_mhz; +		if (!max_phyclk_mhz) +			max_phyclk_mhz = dcn3_0_soc.clock_limits[0].phyclk_mhz; -		if (bw_params->clk_table.entries[1].dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { +		if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {  			// If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array -			dcfclk_sta_targets[num_dcfclk_sta_targets] = bw_params->clk_table.entries[1].dcfclk_mhz; +			dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;  			num_dcfclk_sta_targets++; -		} else if (bw_params->clk_table.entries[1].dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { +		} else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {  			// If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates  			for (i = 0; i < num_dcfclk_sta_targets; i++) { -				if (dcfclk_sta_targets[i] > bw_params->clk_table.entries[1].dcfclk_mhz) { -					dcfclk_sta_targets[i] = bw_params->clk_table.entries[1].dcfclk_mhz; +				if (dcfclk_sta_targets[i] > max_dcfclk_mhz) { +					dcfclk_sta_targets[i] = max_dcfclk_mhz;  					break;  				}  			} @@ -2448,7 +2468,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params  				dcfclk_mhz[num_states] = dcfclk_sta_targets[i];  				dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];  			} else { -				if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= bw_params->clk_table.entries[1].dcfclk_mhz) { +				if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {  					dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];  					dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;  				} else { @@ -2463,11 +2483,12 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params  		}  		while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES && -				optimal_dcfclk_for_uclk[j] <= bw_params->clk_table.entries[1].dcfclk_mhz) { +				optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {  			dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];  			dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;  		} +		dcn3_0_soc.num_states = num_states;  		for (i = 0; i < dcn3_0_soc.num_states; i++) {  			dcn3_0_soc.clock_limits[i].state = i;  			dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i]; @@ -2475,9 +2496,9 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params  			dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];  			/* Fill all states with max values of all other clocks */ -			dcn3_0_soc.clock_limits[i].dispclk_mhz = bw_params->clk_table.entries[1].dispclk_mhz; -			dcn3_0_soc.clock_limits[i].dppclk_mhz  = bw_params->clk_table.entries[1].dppclk_mhz; -			dcn3_0_soc.clock_limits[i].phyclk_mhz  = bw_params->clk_table.entries[1].phyclk_mhz; +			dcn3_0_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz; +			dcn3_0_soc.clock_limits[i].dppclk_mhz  = max_dppclk_mhz; +			dcn3_0_soc.clock_limits[i].phyclk_mhz  = max_phyclk_mhz;  			dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz;  			/* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */  			/* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */ @@ -2490,11 +2511,6 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params  		if (dc->current_state)  			dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);  	} - -	/* re-init DML with updated bb */ -	dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30); -	if (dc->current_state) -		dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);  }  static const struct resource_funcs dcn30_res_pool_funcs = { @@ -2617,6 +2633,26 @@ static bool dcn30_resource_construct(  	dc->caps.color.mpc.ogam_rom_caps.hlg = 0;  	dc->caps.color.mpc.ocsc = 1; +	/* read VBIOS LTTPR caps */ +	{ +		if (ctx->dc_bios->funcs->get_lttpr_caps) { +			enum bp_result bp_query_result; +			uint8_t is_vbios_lttpr_enable = 0; + +			bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); +			dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; +		} + +		if (ctx->dc_bios->funcs->get_lttpr_interop) { +			enum bp_result bp_query_result; +			uint8_t is_vbios_interop_enabled = 0; + +			bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, +					&is_vbios_interop_enabled); +			dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; +		} +	} +  	if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)  		dc->debug = debug_defaults_drv;  	else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c index a0b96b3c083f..1e3bd2e9cdcc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c @@ -62,6 +62,7 @@ static const struct hubbub_funcs hubbub301_funcs = {  	.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,  	.force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes,  	.force_pstate_change_control = hubbub3_force_pstate_change_control, +	.hubbub_read_state = hubbub2_read_state,  };  void hubbub301_construct(struct dcn20_hubbub *hubbub3, diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 9776d1737818..912285fdce18 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -1622,106 +1622,12 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b  	dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);  } -static void calculate_wm_set_for_vlevel( -		int vlevel, -		struct wm_range_table_entry *table_entry, -		struct dcn_watermarks *wm_set, -		struct display_mode_lib *dml, -		display_e2e_pipe_params_st *pipes, -		int pipe_cnt) -{ -	double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us; - -	ASSERT(vlevel < dml->soc.num_states); -	/* only pipe 0 is read for voltage and dcf/soc clocks */ -	pipes[0].clks_cfg.voltage = vlevel; -	pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz; -	pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; - -	dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; -	dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; -	dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; - -	wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; -	wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; -	wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; -	wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; -	wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; -	wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; -	wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; -	wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; -	dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; - -} - -static void dcn301_calculate_wm_and_dlg( -		struct dc *dc, struct dc_state *context, -		display_e2e_pipe_params_st *pipes, -		int pipe_cnt, -		int vlevel_req) -{ -	int i, pipe_idx; -	int vlevel, vlevel_max; -	struct wm_range_table_entry *table_entry; -	struct clk_bw_params *bw_params = dc->clk_mgr->bw_params; - -	ASSERT(bw_params); - -	vlevel_max = bw_params->clk_table.num_entries - 1; - -	/* WM Set D */ -	table_entry = &bw_params->wm_table.entries[WM_D]; -	if (table_entry->wm_type == WM_TYPE_RETRAINING) -		vlevel = 0; -	else -		vlevel = vlevel_max; -	calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d, -						&context->bw_ctx.dml, pipes, pipe_cnt); -	/* WM Set C */ -	table_entry = &bw_params->wm_table.entries[WM_C]; -	vlevel = min(max(vlevel_req, 2), vlevel_max); -	calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c, -						&context->bw_ctx.dml, pipes, pipe_cnt); -	/* WM Set B */ -	table_entry = &bw_params->wm_table.entries[WM_B]; -	vlevel = min(max(vlevel_req, 1), vlevel_max); -	calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b, -						&context->bw_ctx.dml, pipes, pipe_cnt); - -	/* WM Set A */ -	table_entry = &bw_params->wm_table.entries[WM_A]; -	vlevel = min(vlevel_req, vlevel_max); -	calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a, -						&context->bw_ctx.dml, pipes, pipe_cnt); - -	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { -		if (!context->res_ctx.pipe_ctx[i].stream) -			continue; - -		pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); -		pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); - -		if (dc->config.forced_clocks) { -			pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; -			pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; -		} -		if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) -			pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; -		if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) -			pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; - -		pipe_idx++; -	} - -	dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); -} -  static struct resource_funcs dcn301_res_pool_funcs = {  	.destroy = dcn301_destroy_resource_pool,  	.link_enc_create = dcn301_link_encoder_create,  	.panel_cntl_create = dcn301_panel_cntl_create,  	.validate_bandwidth = dcn30_validate_bandwidth, -	.calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, +	.calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,  	.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,  	.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,  	.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 833ab13fa834..dc7823d23ba8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -146,8 +146,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_03_soc = {  		.min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */  		.num_states = 1, -		.sr_exit_time_us = 26.5, -		.sr_enter_plus_exit_time_us = 31, +		.sr_exit_time_us = 35.5, +		.sr_enter_plus_exit_time_us = 40,  		.urgent_latency_us = 4.0,  		.urgent_latency_pixel_data_only_us = 4.0,  		.urgent_latency_pixel_mixed_with_vm_data_us = 4.0, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index bb9648488900..90c73a1cb986 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -124,8 +124,8 @@ static void dcn31_program_compbuf_size(struct hubbub *hubbub, unsigned int compb  		ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size  				+ hubbub2->det3_size + compbuf_size_segments <= hubbub2->crb_size_segs);  		REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_segments); -		REG_WAIT(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, compbuf_size_segments, 1, 100);  		hubbub2->compbuf_size_segments = compbuf_size_segments; +		ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &compbuf_size_segments) && !compbuf_size_segments);  	}  } @@ -876,7 +876,33 @@ static bool hubbub31_get_dcc_compression_cap(struct hubbub *hubbub,  static int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,  		struct dcn_hubbub_phys_addr_config *pa_config)  { -	hubbub3_init_dchub_sys_ctx(hubbub, pa_config); +	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); +	struct dcn_vmid_page_table_config phys_config; + +	REG_SET(DCN_VM_FB_LOCATION_BASE, 0, +			FB_BASE, pa_config->system_aperture.fb_base >> 24); +	REG_SET(DCN_VM_FB_LOCATION_TOP, 0, +			FB_TOP, pa_config->system_aperture.fb_top >> 24); +	REG_SET(DCN_VM_FB_OFFSET, 0, +			FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); +	REG_SET(DCN_VM_AGP_BOT, 0, +			AGP_BOT, pa_config->system_aperture.agp_bot >> 24); +	REG_SET(DCN_VM_AGP_TOP, 0, +			AGP_TOP, pa_config->system_aperture.agp_top >> 24); +	REG_SET(DCN_VM_AGP_BASE, 0, +			AGP_BASE, pa_config->system_aperture.agp_base >> 24); + +	if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { +		phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; +		phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; +		phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; +		phys_config.depth = 0; +		phys_config.block_size = 0; +		// Init VMID 0 based on PA config +		dcn20_vmid_setup(&hubbub2->vmid[0], &phys_config); + +		dcn20_vmid_setup(&hubbub2->vmid[15], &phys_config); +	}  	dcn21_dchvm_init(hubbub); @@ -934,7 +960,8 @@ static const struct hubbub_funcs hubbub31_funcs = {  	.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,  	.program_det_size = dcn31_program_det_size,  	.program_compbuf_size = dcn31_program_compbuf_size, -	.init_crb = dcn31_init_crb +	.init_crb = dcn31_init_crb, +	.hubbub_read_state = hubbub2_read_state,  };  void hubbub31_construct(struct dcn20_hubbub *hubbub31, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h index 8ec98cbcbd47..e3a654bf04e8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h @@ -98,6 +98,7 @@  	HUBBUB_SF(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, mask_sh),\  	HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, mask_sh),\  	HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, mask_sh),\ +	HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, mask_sh),\  	HUBBUB_SF(COMPBUF_RESERVED_SPACE, COMPBUF_RESERVED_SPACE_64B, mask_sh),\  	HUBBUB_SF(COMPBUF_RESERVED_SPACE, COMPBUF_RESERVED_SPACE_ZS, mask_sh),\  	HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, mask_sh), \ @@ -107,7 +108,19 @@  	HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, mask_sh), \  	HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, mask_sh), \  	HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, mask_sh), \ -	HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, mask_sh) +	HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_ADDR_MSB, DCN_VM_FAULT_ADDR_MSB, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_ADDR_LSB, DCN_VM_FAULT_ADDR_LSB, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_CLEAR, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_INTERRUPT_ENABLE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_RANGE_FAULT_DISABLE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_PRQ_FAULT_DISABLE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ +	HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh)  void hubbub31_construct(struct dcn20_hubbub *hubbub3, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 6ac6faf0c533..3f2333ec67e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -226,6 +226,7 @@ void dcn31_init_hw(struct dc *dc)  	if (dc->config.power_down_display_on_boot) {  		struct dc_link *edp_links[MAX_NUM_EDP];  		struct dc_link *edp_link; +		bool power_down = false;  		get_edp_links(dc, edp_links, &edp_num);  		if (edp_num) { @@ -239,9 +240,11 @@ void dcn31_init_hw(struct dc *dc)  					dc->hwss.edp_backlight_control(edp_link, false);  					dc->hwss.power_down(dc);  					dc->hwss.edp_power_control(edp_link, false); +					power_down = true;  				}  			} -		} else { +		} +		if (!power_down) {  			for (i = 0; i < dc->link_count; i++) {  				struct dc_link *link = dc->links[i]; @@ -404,6 +407,18 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)  			&pipe_ctx->stream_res.encoder_info_frame);  	}  } +void dcn31_z10_save_init(struct dc *dc) +{ +	union dmub_rb_cmd cmd; + +	memset(&cmd, 0, sizeof(cmd)); +	cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT; +	cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT; + +	dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); +	dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); +	dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); +}  void dcn31_z10_restore(struct dc *dc)  { @@ -595,20 +610,3 @@ bool dcn31_is_abm_supported(struct dc *dc,  	}  	return false;  } - -static void apply_riommu_invalidation_wa(struct dc *dc) -{ -	struct dce_hwseq *hws = dc->hwseq; - -	if (!hws->wa.early_riommu_invalidation) -		return; - -	REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, 0); -} - -void dcn31_init_pipes(struct dc *dc, struct dc_state *context) -{ -	dcn10_init_pipes(dc, context); -	apply_riommu_invalidation_wa(dc); - -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h index 40dfebe78fdd..140435e4f7ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h @@ -44,6 +44,7 @@ void dcn31_enable_power_gating_plane(  void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx);  void dcn31_z10_restore(struct dc *dc); +void dcn31_z10_save_init(struct dc *dc);  void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);  int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index aaf2dbd095fe..40011cd3c8ef 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -97,13 +97,14 @@ static const struct hw_sequencer_funcs dcn31_funcs = {  	.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,  	.set_pipe = dcn21_set_pipe,  	.z10_restore = dcn31_z10_restore, +	.z10_save_init = dcn31_z10_save_init,  	.is_abm_supported = dcn31_is_abm_supported,  	.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,  	.update_visual_confirm_color = dcn20_update_visual_confirm_color,  };  static const struct hwseq_private_funcs dcn31_private_funcs = { -	.init_pipes = dcn31_init_pipes, +	.init_pipes = dcn10_init_pipes,  	.update_plane_addr = dcn20_update_plane_addr,  	.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,  	.update_mpcc = dcn20_update_mpcc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index 38c010afade1..a7702d3c75cd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -1302,7 +1302,6 @@ static struct dce_hwseq *dcn31_hwseq_create(  		hws->regs = &hwseq_reg;  		hws->shifts = &hwseq_shift;  		hws->masks = &hwseq_mask; -		hws->wa.early_riommu_invalidation = true;  	}  	return hws;  } @@ -1968,6 +1967,22 @@ static bool dcn31_resource_construct(  	dc->caps.color.mpc.ogam_rom_caps.hlg = 0;  	dc->caps.color.mpc.ocsc = 1; +	/* read VBIOS LTTPR caps */ +	{ +		if (ctx->dc_bios->funcs->get_lttpr_caps) { +			enum bp_result bp_query_result; +			uint8_t is_vbios_lttpr_enable = 0; + +			bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); +			dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; +		} + +		/* interop bit is implicit */ +		{ +			dc->caps.vbios_lttpr_aware = true; +		} +	} +  	if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)  		dc->debug = debug_defaults_drv;  	else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 45862167e6ce..56055df2e8d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -58,6 +58,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)  ifdef CONFIG_DRM_AMD_DC_DCN  CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) @@ -70,6 +72,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(fram  CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)  CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)  CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_rcflags)  CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_rcflags)  CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_rcflags)  CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_rcflags) @@ -91,6 +94,7 @@ DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \  ifdef CONFIG_DRM_AMD_DC_DCN  DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o +DML += dcn2x/dcn2x.o  DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o  DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o  DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 799bae229e67..2091dd8c252d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -488,7 +488,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,  	log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element  	// each 64b meta request for dcn is 8x8 meta elements and -	// a meta element covers one 256b block of the the data surface. +	// a meta element covers one 256b block of the data surface.  	log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256  	log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element  			- log2_meta_req_height; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c index 6a6d5970d1d5..1a0c14e465fa 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -488,7 +488,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,  	log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element  	// each 64b meta request for dcn is 8x8 meta elements and -	// a meta element covers one 256b block of the the data surface. +	// a meta element covers one 256b block of the data surface.  	log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256  	log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element  			- log2_meta_req_height; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index 6655bb99fdfd..4136eb8256cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -2270,7 +2270,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman  					&locals->UrgentBurstFactorLumaPre[k],  					&locals->UrgentBurstFactorChroma[k],  					&locals->UrgentBurstFactorChromaPre[k], -					&locals->NotEnoughUrgentLatencyHiding, +					&locals->NotEnoughUrgentLatencyHiding[0][0],  					&locals->NotEnoughUrgentLatencyHidingPre);  			if (mode_lib->vba.UseUrgentBurstBandwidth == false) { @@ -2303,7 +2303,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman  		}  		mode_lib->vba.FractionOfUrgentBandwidth = MaxTotalRDBandwidthNoUrgentBurst / mode_lib->vba.ReturnBW; -		if (MaxTotalRDBandwidth <= mode_lib->vba.ReturnBW && locals->NotEnoughUrgentLatencyHiding == 0 && locals->NotEnoughUrgentLatencyHidingPre == 0 && !VRatioPrefetchMoreThan4 +		if (MaxTotalRDBandwidth <= mode_lib->vba.ReturnBW && locals->NotEnoughUrgentLatencyHiding[0][0] == 0 && +				locals->NotEnoughUrgentLatencyHidingPre == 0 && !VRatioPrefetchMoreThan4  				&& !DestinationLineTimesForPrefetchLessThan2)  			mode_lib->vba.PrefetchModeSupported = true;  		else { @@ -4824,7 +4825,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  							&locals->UrgentBurstFactorLumaPre[k],  							&locals->UrgentBurstFactorChroma[k],  							&locals->UrgentBurstFactorChromaPre[k], -							&locals->NotEnoughUrgentLatencyHiding, +							&locals->NotEnoughUrgentLatencyHiding[0][0],  							&locals->NotEnoughUrgentLatencyHidingPre);  					if (mode_lib->vba.UseUrgentBurstBandwidth == false) { @@ -4851,13 +4852,13 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				}  				locals->BandwidthWithoutPrefetchSupported[i][0] = true;  				if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i][0] -						|| locals->NotEnoughUrgentLatencyHiding == 1) { +						|| locals->NotEnoughUrgentLatencyHiding[0][0] == 1) {  					locals->BandwidthWithoutPrefetchSupported[i][0] = false;  				}  				locals->PrefetchSupported[i][j] = true;  				if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i][0] -						|| locals->NotEnoughUrgentLatencyHiding == 1 +						|| locals->NotEnoughUrgentLatencyHiding[0][0] == 1  						|| locals->NotEnoughUrgentLatencyHidingPre == 1) {  					locals->PrefetchSupported[i][j] = false;  				} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index dc1c81a6e377..287e31052b30 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -482,7 +482,7 @@ static void get_meta_and_pte_attr(  	log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element  	// each 64b meta request for dcn is 8x8 meta elements and -	// a meta element covers one 256b block of the the data surface. +	// a meta element covers one 256b block of the data surface.  	log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256  	log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element  			- log2_meta_req_height; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.c b/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.c new file mode 100644 index 000000000000..c58522436291 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "resource.h" + +#include "dcn2x.h" + +/** + * DOC: DCN2x FPU manipulation Overview + * + * The DCN architecture relies on FPU operations, which require special + * compilation flags and the use of kernel_fpu_begin/end functions; ideally, we + * want to avoid spreading FPU access across multiple files. With this idea in + * mind, this file aims to centralize all DCN20 and DCN2.1 (DCN2x) functions + * that require FPU access in a single place. Code in this file follows the + * following code pattern: + * + * 1. Functions that use FPU operations should be isolated in static functions. + * 2. The FPU functions should have the noinline attribute to ensure anything + *    that deals with FP register is contained within this call. + * 3. All function that needs to be accessed outside this file requires a + *    public interface that not uses any FPU reference. + * 4. Developers **must not** use DC_FP_START/END in this file, but they need + *    to ensure that the caller invokes it before access any function available + *    in this file. For this reason, public functions in this file must invoke + *    dc_assert_fp_enabled(); + * + * Let's expand a little bit more the idea in the code pattern. To fully + * isolate FPU operations in a single place, we must avoid situations where + * compilers spill FP values to registers due to FP enable in a specific C + * file. Note that even if we isolate all FPU functions in a single file and + * call its interface from other files, the compiler might enable the use of + * FPU before we call DC_FP_START. Nevertheless, it is the programmer's + * responsibility to invoke DC_FP_START/END in the correct place. To highlight + * situations where developers forgot to use the FP protection before calling + * the DC FPU interface functions, we introduce a helper that checks if the + * function is invoked under FP protection. If not, it will trigger a kernel + * warning. + */ + +void dcn20_populate_dml_writeback_from_context(struct dc *dc, +					       struct resource_context *res_ctx, +					       display_e2e_pipe_params_st *pipes) +{ +	int pipe_cnt, i; + +	dc_assert_fp_enabled(); + +	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { +		struct dc_writeback_info *wb_info = &res_ctx->pipe_ctx[i].stream->writeback_info[0]; + +		if (!res_ctx->pipe_ctx[i].stream) +			continue; + +		/* Set writeback information */ +		pipes[pipe_cnt].dout.wb_enable = (wb_info->wb_enabled == true) ? 1 : 0; +		pipes[pipe_cnt].dout.num_active_wb++; +		pipes[pipe_cnt].dout.wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_height; +		pipes[pipe_cnt].dout.wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_width; +		pipes[pipe_cnt].dout.wb.wb_dst_width = wb_info->dwb_params.dest_width; +		pipes[pipe_cnt].dout.wb.wb_dst_height = wb_info->dwb_params.dest_height; +		pipes[pipe_cnt].dout.wb.wb_htaps_luma = 1; +		pipes[pipe_cnt].dout.wb.wb_vtaps_luma = 1; +		pipes[pipe_cnt].dout.wb.wb_htaps_chroma = wb_info->dwb_params.scaler_taps.h_taps_c; +		pipes[pipe_cnt].dout.wb.wb_vtaps_chroma = wb_info->dwb_params.scaler_taps.v_taps_c; +		pipes[pipe_cnt].dout.wb.wb_hratio = 1.0; +		pipes[pipe_cnt].dout.wb.wb_vratio = 1.0; +		if (wb_info->dwb_params.out_format == dwb_scaler_mode_yuv420) { +			if (wb_info->dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC) +				pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_8; +			else +				pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_10; +		} else { +			pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_444_32; +		} + +		pipe_cnt++; +	} +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.h b/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.h new file mode 100644 index 000000000000..331547ba0713 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN2X_H__ +#define __DCN2X_H__ + +void dcn20_populate_dml_writeback_from_context(struct dc *dc, +					       struct resource_context *res_ctx, +					       display_e2e_pipe_params_st *pipes); + +#endif /* __DCN2X_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 9d2016d8fafe..e3d9f1decdfc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -2596,7 +2596,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman  			}  		} -		v->NotEnoughUrgentLatencyHiding = false; +		v->NotEnoughUrgentLatencyHiding[0][0] = false;  		v->NotEnoughUrgentLatencyHidingPre = false;  		for (k = 0; k < v->NumberOfActivePlanes; ++k) { @@ -2681,7 +2681,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman  			if (v->VRatioPrefetchY[k] > 4 || v->VRatioPrefetchC[k] > 4)  				VRatioPrefetchMoreThan4 = true;  			if (v->NoUrgentLatencyHiding[k] == true) -				v->NotEnoughUrgentLatencyHiding = true; +				v->NotEnoughUrgentLatencyHiding[0][0] = true;  			if (v->NoUrgentLatencyHidingPre[k] == true)  				v->NotEnoughUrgentLatencyHidingPre = true; @@ -2689,7 +2689,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman  		v->FractionOfUrgentBandwidth = MaxTotalRDBandwidthNoUrgentBurst / v->ReturnBW; -		if (MaxTotalRDBandwidth <= v->ReturnBW && v->NotEnoughUrgentLatencyHiding == 0 && v->NotEnoughUrgentLatencyHidingPre == 0 && !VRatioPrefetchMoreThan4 +		if (MaxTotalRDBandwidth <= v->ReturnBW && v->NotEnoughUrgentLatencyHiding[0][0] == 0 +				&& v->NotEnoughUrgentLatencyHidingPre == 0 && !VRatioPrefetchMoreThan4  				&& !DestinationLineTimesForPrefetchLessThan2)  			v->PrefetchModeSupported = true;  		else { @@ -2794,8 +2795,9 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman  		}  		v->VStartupLines = v->VStartupLines + 1; -		v->PrefetchAndImmediateFlipSupported = (v->PrefetchModeSupported == true && ((!v->ImmediateFlipSupport && !v->HostVMEnable && v->ImmediateFlipRequirement != dm_immediate_flip_required) || v->ImmediateFlipSupported)) ? true : false; - +		v->PrefetchModeSupported = (v->PrefetchModeSupported == true && ((!v->ImmediateFlipSupport && +				!v->HostVMEnable && v->ImmediateFlipRequirement[0] != dm_immediate_flip_required) || +				v->ImmediateFlipSupported)) ? true : false;  	} while (!v->PrefetchModeSupported && v->VStartupLines <= v->MaximumMaxVStartupLines);  	ASSERT(v->PrefetchModeSupported); @@ -3642,8 +3644,7 @@ static double TruncToValidBPP(  void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)  {  	struct vba_vars_st *v = &mode_lib->vba; -	int MinPrefetchMode = 0; -	int MaxPrefetchMode = 2; +	int MinPrefetchMode, MaxPrefetchMode;  	int i;  	unsigned int j, k, m;  	bool   EnoughWritebackUnits = true; @@ -3655,6 +3656,10 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  	/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/ +	CalculateMinAndMaxPrefetchMode( +		mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank, +		&MinPrefetchMode, &MaxPrefetchMode); +  	/*Scale Ratio, taps Support Check*/  	v->ScaleRatioAndTapsSupport = true; @@ -4753,7 +4758,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				v->HostVMMinPageSize,  				v->HostVMMaxNonCachedPageTableLevels,  				v->DynamicMetadataVMEnabled, -				v->ImmediateFlipRequirement, +				v->ImmediateFlipRequirement[0],  				v->ProgressiveToInterlaceUnitInOPP,  				v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation,  				v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData, @@ -5164,7 +5169,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  					v->NextMaxVStartup = v->NextMaxVStartup - 1;  				}  			} while (!((v->PrefetchSupported[i][j] == true && v->DynamicMetadataSupported[i][j] == true && v->VRatioInPrefetchSupported[i][j] == true -					&& ((v->HostVMEnable == false && v->ImmediateFlipRequirement != dm_immediate_flip_required) +					&& ((v->HostVMEnable == false && v->ImmediateFlipRequirement[0] != dm_immediate_flip_required)  							|| v->ImmediateFlipSupportedForState[i][j] == true))  					|| (v->NextMaxVStartup == v->MaxMaxVStartup[i][j] && NextPrefetchModeState > MaxPrefetchMode))); @@ -5305,7 +5310,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  					&& ViewportExceedsSurface == 0 && v->PrefetchSupported[i][j] == 1 && v->DynamicMetadataSupported[i][j] == 1  					&& v->TotalVerticalActiveBandwidthSupport[i][j] == 1 && v->VRatioInPrefetchSupported[i][j] == 1  					&& v->PTEBufferSizeNotExceeded[i][j] == 1 && v->NonsupportedDSCInputBPC == 0 -					&& ((v->HostVMEnable == 0 && v->ImmediateFlipRequirement != dm_immediate_flip_required) +					&& ((v->HostVMEnable == 0 && v->ImmediateFlipRequirement[0] != dm_immediate_flip_required)  							|| v->ImmediateFlipSupportedForState[i][j] == true)) {  				v->ModeSupport[i][j] = true;  			} else { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c index 04601a767a8f..0d934fae1c3a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c @@ -549,7 +549,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,  	log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element  				 // each 64b meta request for dcn is 8x8 meta elements and -				 // a meta element covers one 256b block of the the data surface. +				 // a meta element covers one 256b block of the data surface.  	log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256  	log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element  		- log2_meta_req_height; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index a9667068c690..ce55c9caf9a2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -3036,10 +3036,9 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman  		}  		v->PrefetchAndImmediateFlipSupported = -				(v->PrefetchModeSupported == true -						&& ((!v->ImmediateFlipSupport && !v->HostVMEnable -								&& v->ImmediateFlipRequirement != dm_immediate_flip_required) || v->ImmediateFlipSupported)) ? -						true : false; +				(v->PrefetchModeSupported == true && ((!v->ImmediateFlipSupport && !v->HostVMEnable +				&& v->ImmediateFlipRequirement[0] != dm_immediate_flip_required) || +				v->ImmediateFlipSupported)) ? true : false;  #ifdef __DML_VBA_DEBUG__  		dml_print("DML::%s: PrefetchModeSupported %d\n", __func__, v->PrefetchModeSupported);  		dml_print("DML::%s: ImmediateFlipRequirement %d\n", __func__, v->ImmediateFlipRequirement == dm_immediate_flip_required); @@ -5103,7 +5102,7 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				v->HostVMMinPageSize,  				v->HostVMMaxNonCachedPageTableLevels,  				v->DynamicMetadataVMEnabled, -				v->ImmediateFlipRequirement, +				v->ImmediateFlipRequirement[0],  				v->ProgressiveToInterlaceUnitInOPP,  				v->MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation,  				v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency, @@ -5542,7 +5541,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  				}  				v->NextPrefetchMode = v->NextPrefetchMode + 1;  			} while (!((v->PrefetchSupported[i][j] == true && v->DynamicMetadataSupported[i][j] == true && v->VRatioInPrefetchSupported[i][j] == true -					&& ((v->HostVMEnable == false && v->ImmediateFlipRequirement != dm_immediate_flip_required) +					&& ((v->HostVMEnable == false && +							v->ImmediateFlipRequirement[0] != dm_immediate_flip_required)  							|| v->ImmediateFlipSupportedForState[i][j] == true))  					|| (v->NextMaxVStartup == v->MaxMaxVStartup[i][j] && NextPrefetchModeState > MaxPrefetchMode))); @@ -5702,7 +5702,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  					&& v->PrefetchSupported[i][j] == true && v->DynamicMetadataSupported[i][j] == true  					&& v->TotalVerticalActiveBandwidthSupport[i][j] == true && v->VRatioInPrefetchSupported[i][j] == true  					&& v->PTEBufferSizeNotExceeded[i][j] == true && v->NonsupportedDSCInputBPC == false -					&& ((v->HostVMEnable == false && v->ImmediateFlipRequirement != dm_immediate_flip_required) +					&& ((v->HostVMEnable == false +					&& v->ImmediateFlipRequirement[0] != dm_immediate_flip_required)  							|| v->ImmediateFlipSupportedForState[i][j] == true)  					&& FMTBufferExceeded == false) {  				v->ModeSupport[i][j] = true; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c index 3def093ef88e..c23905bc733a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c @@ -563,7 +563,7 @@ static void get_meta_and_pte_attr(  	log2_meta_req_bytes = 6;	// meta request is 64b and is 8x8byte meta element  	// each 64b meta request for dcn is 8x8 meta elements and -	// a meta element covers one 256b block of the the data surface. +	// a meta element covers one 256b block of the data surface.  	log2_meta_req_height = log2_blk256_height + 3;	// meta req is 8x8 byte, each byte represent 1 blk256  	log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element - log2_meta_req_height;  	meta_req_width = 1 << log2_meta_req_width; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h index 64f9c735f74d..1051ca1a23b8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h @@ -109,7 +109,9 @@ enum clock_change_support {  };  enum output_standard { -	dm_std_uninitialized = 0, dm_std_cvtr2, dm_std_cvt +	dm_std_uninitialized = 0, +	dm_std_cvtr2, +	dm_std_cvt  };  enum mpc_combine_affinity { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index 64daa0507393..d46a2733024c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -536,6 +536,8 @@ struct _vcs_dpi_display_rq_regs_st {  	unsigned int mrq_expansion_mode;  	unsigned int crq_expansion_mode;  	unsigned int plane1_base_address; +	unsigned int aperture_low_addr;   // bits [47:18] +	unsigned int aperture_high_addr;  // bits [47:18]  };  struct _vcs_dpi_display_dlg_sys_params_st { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index d3b1b6d4ce2f..0fad15020c74 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -244,6 +244,8 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)  	mode_lib->vba.DRAMClockChangeSupportsVActive = !soc->disable_dram_clock_change_vactive_support ||  			mode_lib->vba.DummyPStateCheck;  	mode_lib->vba.AllowDramClockChangeOneDisplayVactive = soc->allow_dram_clock_one_display_vactive; +	mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank = +		soc->allow_dram_self_refresh_or_dram_clock_change_in_vblank;  	mode_lib->vba.Downspreading = soc->downspread_percent;  	mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes;   // new! @@ -396,7 +398,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)  	mode_lib->vba.NumberOfActivePlanes = 0;  	mode_lib->vba.ImmediateFlipSupport = false; -	mode_lib->vba.ImmediateFlipRequirement = dm_immediate_flip_not_required;  	for (j = 0; j < mode_lib->vba.cache_num_pipes; ++j) {  		display_pipe_source_params_st *src = &pipes[j].pipe.src;  		display_pipe_dest_params_st *dst = &pipes[j].pipe.dest; @@ -409,6 +410,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)  			continue;  		visited[j] = true; +		mode_lib->vba.ImmediateFlipRequirement[j] = dm_immediate_flip_not_required;  		mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes;  		mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1;  		mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] = @@ -667,9 +669,9 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)  				mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height_max / vdiv_c;  		} -		if (pipes[k].pipe.src.immediate_flip) { +		if (pipes[j].pipe.src.immediate_flip) {  			mode_lib->vba.ImmediateFlipSupport = true; -			mode_lib->vba.ImmediateFlipRequirement = dm_immediate_flip_required; +			mode_lib->vba.ImmediateFlipRequirement[j] = dm_immediate_flip_required;  		}  		mode_lib->vba.NumberOfActivePlanes++; @@ -733,8 +735,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)  						mode_lib->vba.OverrideHostVMPageTableLevels;  	} -	mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank = dm_try_to_allow_self_refresh_and_mclk_switch; -  	if (mode_lib->vba.OverrideGPUVMPageTableLevels)  		mode_lib->vba.GPUVMMaxPageTableLevels = mode_lib->vba.OverrideGPUVMPageTableLevels; @@ -845,9 +845,10 @@ void PixelClockAdjustmentForProgressiveToInterlaceUnit(struct display_mode_lib *  	//Progressive To Interlace Unit Effect  	for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { +		mode_lib->vba.PixelClockBackEnd[k] = mode_lib->vba.PixelClock[k];  		if (mode_lib->vba.Interlace[k] == 1  				&& mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true) { -			mode_lib->vba.PixelClock[k] = 2 * mode_lib->vba.PixelClockBackEnd[k]; +			mode_lib->vba.PixelClock[k] = 2 * mode_lib->vba.PixelClock[k];  		}  	}  } @@ -890,8 +891,9 @@ void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)  		mode_lib->vba.DISPCLK = soc->clock_limits[mode_lib->vba.VoltageLevel].dispclk_mhz;  	// Total Available Pipes Support Check -	for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) +	for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {  		total_pipes += mode_lib->vba.DPPPerPlane[k]; +	}  	ASSERT(total_pipes <= DC__NUM_DPP__MAX);  } diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index d18a021d4d32..90e87961fe3e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -676,7 +676,7 @@ struct vba_vars_st {  	double         AlignedDCCMetaPitchY[DC__NUM_DPP__MAX];  	double         AlignedDCCMetaPitchC[DC__NUM_DPP__MAX]; -	unsigned int NotEnoughUrgentLatencyHiding; +	unsigned int NotEnoughUrgentLatencyHiding[DC__VOLTAGE_STATES][2];  	unsigned int NotEnoughUrgentLatencyHidingPre;  	int PTEBufferSizeInRequestsForLuma;  	int PTEBufferSizeInRequestsForChroma; @@ -877,7 +877,7 @@ struct vba_vars_st {  	int PercentMarginOverMinimumRequiredDCFCLK;  	bool DynamicMetadataSupported[DC__VOLTAGE_STATES][2]; -	enum immediate_flip_requirement ImmediateFlipRequirement; +	enum immediate_flip_requirement ImmediateFlipRequirement[DC__NUM_DPP__MAX];  	unsigned int DETBufferSizeYThisState[DC__NUM_DPP__MAX];  	unsigned int DETBufferSizeCThisState[DC__NUM_DPP__MAX];  	bool NoUrgentLatencyHiding[DC__NUM_DPP__MAX]; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c index 414da64f5734..8f2b1684c231 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c @@ -456,7 +456,7 @@ static void dml1_rq_dlg_get_row_heights(  	log2_meta_req_bytes = 6; /* meta request is 64b and is 8x8byte meta element */  	/* each 64b meta request for dcn is 8x8 meta elements and -	 * a meta element covers one 256b block of the the data surface. +	 * a meta element covers one 256b block of the data surface.  	 */  	log2_meta_req_height = log2_blk256_height + 3; /* meta req is 8x8 */  	log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element @@ -718,7 +718,7 @@ static void get_surf_rq_param(  	log2_meta_req_bytes = 6; /* meta request is 64b and is 8x8byte meta element */  	/* each 64b meta request for dcn is 8x8 meta elements and -	 * a meta element covers one 256b block of the the data surface. +	 * a meta element covers one 256b block of the data surface.  	 */  	log2_meta_req_height = log2_blk256_height + 3; /* meta req is 8x8 byte, each byte represent 1 blk256 */  	log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index f403d8e84a8c..f5b7da0e64c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -28,6 +28,7 @@  #include <drm/drm_dp_helper.h>  #include "dc.h"  #include "rc_calc.h" +#include "fixed31_32.h"  /* This module's internal functions */ @@ -39,6 +40,47 @@ static bool dsc_policy_enable_dsc_when_not_needed;  static bool dsc_policy_disable_dsc_stream_overhead; +/* Forward Declerations */ +static void get_dsc_bandwidth_range( +		const uint32_t min_bpp_x16, +		const uint32_t max_bpp_x16, +		const uint32_t num_slices_h, +		const struct dsc_enc_caps *dsc_caps, +		const struct dc_crtc_timing *timing, +		struct dc_dsc_bw_range *range); + +static uint32_t compute_bpp_x16_from_target_bandwidth( +		const uint32_t bandwidth_in_kbps, +		const struct dc_crtc_timing *timing, +		const uint32_t num_slices_h, +		const uint32_t bpp_increment_div, +		const bool is_dp); + +static void get_dsc_enc_caps( +		const struct display_stream_compressor *dsc, +		struct dsc_enc_caps *dsc_enc_caps, +		int pixel_clock_100Hz); + +static bool intersect_dsc_caps( +		const struct dsc_dec_dpcd_caps *dsc_sink_caps, +		const struct dsc_enc_caps *dsc_enc_caps, +		enum dc_pixel_encoding pixel_encoding, +		struct dsc_enc_caps *dsc_common_caps); + +static bool setup_dsc_config( +		const struct dsc_dec_dpcd_caps *dsc_sink_caps, +		const struct dsc_enc_caps *dsc_enc_caps, +		int target_bandwidth_kbps, +		const struct dc_crtc_timing *timing, +		int min_slice_height_override, +		int max_dsc_target_bpp_limit_override_x16, +		struct dc_dsc_config *dsc_cfg); + +static struct fixed31_32 compute_dsc_max_bandwidth_overhead( +		const struct dc_crtc_timing *timing, +		const int num_slices_h, +		const bool is_dp); +  static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)  { @@ -171,10 +213,164 @@ static bool dsc_bpp_increment_div_from_dpcd(uint8_t bpp_increment_dpcd, uint32_t  	return true;  } + + +bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, +		const uint8_t *dpcd_dsc_basic_data, +		const uint8_t *dpcd_dsc_branch_decoder_caps, +		struct dsc_dec_dpcd_caps *dsc_sink_caps) +{ +	if (!dpcd_dsc_basic_data) +		return false; + +	dsc_sink_caps->is_dsc_supported = +		(dpcd_dsc_basic_data[DP_DSC_SUPPORT - DP_DSC_SUPPORT] & DP_DSC_DECOMPRESSION_IS_SUPPORTED) != 0; +	if (!dsc_sink_caps->is_dsc_supported) +		return false; + +	dsc_sink_caps->dsc_version = dpcd_dsc_basic_data[DP_DSC_REV - DP_DSC_SUPPORT]; + +	{ +		int buff_block_size; +		int buff_size; + +		if (!dsc_buff_block_size_from_dpcd(dpcd_dsc_basic_data[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT], +										   &buff_block_size)) +			return false; + +		buff_size = dpcd_dsc_basic_data[DP_DSC_RC_BUF_SIZE - DP_DSC_SUPPORT] + 1; +		dsc_sink_caps->rc_buffer_size = buff_size * buff_block_size; +	} + +	dsc_sink_caps->slice_caps1.raw = dpcd_dsc_basic_data[DP_DSC_SLICE_CAP_1 - DP_DSC_SUPPORT]; +	if (!dsc_line_buff_depth_from_dpcd(dpcd_dsc_basic_data[DP_DSC_LINE_BUF_BIT_DEPTH - DP_DSC_SUPPORT], +									   &dsc_sink_caps->lb_bit_depth)) +		return false; + +	dsc_sink_caps->is_block_pred_supported = +		(dpcd_dsc_basic_data[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & +		 DP_DSC_BLK_PREDICTION_IS_SUPPORTED) != 0; + +	dsc_sink_caps->edp_max_bits_per_pixel = +		dpcd_dsc_basic_data[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] | +		dpcd_dsc_basic_data[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] << 8; + +	dsc_sink_caps->color_formats.raw = dpcd_dsc_basic_data[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT]; +	dsc_sink_caps->color_depth.raw = dpcd_dsc_basic_data[DP_DSC_DEC_COLOR_DEPTH_CAP - DP_DSC_SUPPORT]; + +	{ +		int dpcd_throughput = dpcd_dsc_basic_data[DP_DSC_PEAK_THROUGHPUT - DP_DSC_SUPPORT]; + +		if (!dsc_throughput_from_dpcd(dpcd_throughput & DP_DSC_THROUGHPUT_MODE_0_MASK, +									  &dsc_sink_caps->throughput_mode_0_mps)) +			return false; + +		dpcd_throughput = (dpcd_throughput & DP_DSC_THROUGHPUT_MODE_1_MASK) >> DP_DSC_THROUGHPUT_MODE_1_SHIFT; +		if (!dsc_throughput_from_dpcd(dpcd_throughput, &dsc_sink_caps->throughput_mode_1_mps)) +			return false; +	} + +	dsc_sink_caps->max_slice_width = dpcd_dsc_basic_data[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] * 320; +	dsc_sink_caps->slice_caps2.raw = dpcd_dsc_basic_data[DP_DSC_SLICE_CAP_2 - DP_DSC_SUPPORT]; + +	if (!dsc_bpp_increment_div_from_dpcd(dpcd_dsc_basic_data[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT], +										 &dsc_sink_caps->bpp_increment_div)) +		return false; + +	if (dc->debug.dsc_bpp_increment_div) { +		/* dsc_bpp_increment_div should onl be 1, 2, 4, 8 or 16, but rather than rejecting invalid values, +		 * we'll accept all and get it into range. This also makes the above check against 0 redundant, +		 * but that one stresses out the override will be only used if it's not 0. +		 */ +		if (dc->debug.dsc_bpp_increment_div >= 1) +			dsc_sink_caps->bpp_increment_div = 1; +		if (dc->debug.dsc_bpp_increment_div >= 2) +			dsc_sink_caps->bpp_increment_div = 2; +		if (dc->debug.dsc_bpp_increment_div >= 4) +			dsc_sink_caps->bpp_increment_div = 4; +		if (dc->debug.dsc_bpp_increment_div >= 8) +			dsc_sink_caps->bpp_increment_div = 8; +		if (dc->debug.dsc_bpp_increment_div >= 16) +			dsc_sink_caps->bpp_increment_div = 16; +	} + +	/* Extended caps */ +	if (dpcd_dsc_branch_decoder_caps == NULL) { // branch decoder DPCD DSC data can be null for non branch device +		dsc_sink_caps->branch_overall_throughput_0_mps = 0; +		dsc_sink_caps->branch_overall_throughput_1_mps = 0; +		dsc_sink_caps->branch_max_line_width = 0; +		return true; +	} + +	dsc_sink_caps->branch_overall_throughput_0_mps = +		dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0]; +	if (dsc_sink_caps->branch_overall_throughput_0_mps == 0) +		dsc_sink_caps->branch_overall_throughput_0_mps = 0; +	else if (dsc_sink_caps->branch_overall_throughput_0_mps == 1) +		dsc_sink_caps->branch_overall_throughput_0_mps = 680; +	else { +		dsc_sink_caps->branch_overall_throughput_0_mps *= 50; +		dsc_sink_caps->branch_overall_throughput_0_mps += 600; +	} + +	dsc_sink_caps->branch_overall_throughput_1_mps = +		dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0]; +	if (dsc_sink_caps->branch_overall_throughput_1_mps == 0) +		dsc_sink_caps->branch_overall_throughput_1_mps = 0; +	else if (dsc_sink_caps->branch_overall_throughput_1_mps == 1) +		dsc_sink_caps->branch_overall_throughput_1_mps = 680; +	else { +		dsc_sink_caps->branch_overall_throughput_1_mps *= 50; +		dsc_sink_caps->branch_overall_throughput_1_mps += 600; +	} + +	dsc_sink_caps->branch_max_line_width = +		dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_MAX_LINE_WIDTH - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0] * 320; +	ASSERT(dsc_sink_caps->branch_max_line_width == 0 || dsc_sink_caps->branch_max_line_width >= 5120); + +	dsc_sink_caps->is_dp = true; +	return true; +} + + +/* If DSC is possbile, get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range and + * timing's pixel clock and uncompressed bandwidth. + * If DSC is not possible, leave '*range' untouched. + */ +bool dc_dsc_compute_bandwidth_range( +		const struct display_stream_compressor *dsc, +		uint32_t dsc_min_slice_height_override, +		uint32_t min_bpp_x16, +		uint32_t max_bpp_x16, +		const struct dsc_dec_dpcd_caps *dsc_sink_caps, +		const struct dc_crtc_timing *timing, +		struct dc_dsc_bw_range *range) +{ +	bool is_dsc_possible = false; +	struct dsc_enc_caps dsc_enc_caps; +	struct dsc_enc_caps dsc_common_caps; +	struct dc_dsc_config config; + +	get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); + +	is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps, +			timing->pixel_encoding, &dsc_common_caps); + +	if (is_dsc_possible) +		is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing, +				dsc_min_slice_height_override, max_bpp_x16, &config); + +	if (is_dsc_possible) +		get_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, +				config.num_slices_h, &dsc_common_caps, timing, range); + +	return is_dsc_possible; +} +  static void get_dsc_enc_caps( -	const struct display_stream_compressor *dsc, -	struct dsc_enc_caps *dsc_enc_caps, -	int pixel_clock_100Hz) +		const struct display_stream_compressor *dsc, +		struct dsc_enc_caps *dsc_enc_caps, +		int pixel_clock_100Hz)  {  	// This is a static HW query, so we can use any DSC @@ -187,14 +383,14 @@ static void get_dsc_enc_caps(  	}  } -/* Returns 'false' if no intersection was found for at least one capablity. +/* Returns 'false' if no intersection was found for at least one capability.   * It also implicitly validates some sink caps against invalid value of zero.   */  static bool intersect_dsc_caps( -	const struct dsc_dec_dpcd_caps *dsc_sink_caps, -	const struct dsc_enc_caps *dsc_enc_caps, -	enum dc_pixel_encoding pixel_encoding, -	struct dsc_enc_caps *dsc_common_caps) +		const struct dsc_dec_dpcd_caps *dsc_sink_caps, +		const struct dsc_enc_caps *dsc_enc_caps, +		enum dc_pixel_encoding pixel_encoding, +		struct dsc_enc_caps *dsc_common_caps)  {  	int32_t max_slices;  	int32_t total_sink_throughput; @@ -205,10 +401,14 @@ static bool intersect_dsc_caps(  	if (!dsc_common_caps->dsc_version)  		return false; -	dsc_common_caps->slice_caps.bits.NUM_SLICES_1 = dsc_sink_caps->slice_caps1.bits.NUM_SLICES_1 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_1; -	dsc_common_caps->slice_caps.bits.NUM_SLICES_2 = dsc_sink_caps->slice_caps1.bits.NUM_SLICES_2 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_2; -	dsc_common_caps->slice_caps.bits.NUM_SLICES_4 = dsc_sink_caps->slice_caps1.bits.NUM_SLICES_4 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_4; -	dsc_common_caps->slice_caps.bits.NUM_SLICES_8 = dsc_sink_caps->slice_caps1.bits.NUM_SLICES_8 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_8; +	dsc_common_caps->slice_caps.bits.NUM_SLICES_1 = +		dsc_sink_caps->slice_caps1.bits.NUM_SLICES_1 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_1; +	dsc_common_caps->slice_caps.bits.NUM_SLICES_2 = +		dsc_sink_caps->slice_caps1.bits.NUM_SLICES_2 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_2; +	dsc_common_caps->slice_caps.bits.NUM_SLICES_4 = +		dsc_sink_caps->slice_caps1.bits.NUM_SLICES_4 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_4; +	dsc_common_caps->slice_caps.bits.NUM_SLICES_8 = +		dsc_sink_caps->slice_caps1.bits.NUM_SLICES_8 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_8;  	if (!dsc_common_caps->slice_caps.raw)  		return false; @@ -216,7 +416,8 @@ static bool intersect_dsc_caps(  	if (!dsc_common_caps->lb_bit_depth)  		return false; -	dsc_common_caps->is_block_pred_supported = dsc_sink_caps->is_block_pred_supported && dsc_enc_caps->is_block_pred_supported; +	dsc_common_caps->is_block_pred_supported = +		dsc_sink_caps->is_block_pred_supported && dsc_enc_caps->is_block_pred_supported;  	dsc_common_caps->color_formats.raw = dsc_sink_caps->color_formats.raw & dsc_enc_caps->color_formats.raw;  	if (!dsc_common_caps->color_formats.raw) @@ -288,11 +489,11 @@ static struct fixed31_32 compute_dsc_max_bandwidth_overhead(  }  static uint32_t compute_bpp_x16_from_target_bandwidth( -		const uint32_t bandwidth_in_kbps, -		const struct dc_crtc_timing *timing, -		const uint32_t num_slices_h, -		const uint32_t bpp_increment_div, -		const bool is_dp) +	const uint32_t bandwidth_in_kbps, +	const struct dc_crtc_timing *timing, +	const uint32_t num_slices_h, +	const uint32_t bpp_increment_div, +	const bool is_dp)  {  	struct fixed31_32 overhead_in_kbps;  	struct fixed31_32 effective_bandwidth_in_kbps; @@ -769,146 +970,6 @@ done:  	return is_dsc_possible;  } -bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_branch_decoder_caps, struct dsc_dec_dpcd_caps *dsc_sink_caps) -{ -	if (!dpcd_dsc_basic_data) -		return false; - -	dsc_sink_caps->is_dsc_supported = (dpcd_dsc_basic_data[DP_DSC_SUPPORT - DP_DSC_SUPPORT] & DP_DSC_DECOMPRESSION_IS_SUPPORTED) != 0; -	if (!dsc_sink_caps->is_dsc_supported) -		return false; - -	dsc_sink_caps->dsc_version = dpcd_dsc_basic_data[DP_DSC_REV - DP_DSC_SUPPORT]; - -	{ -		int buff_block_size; -		int buff_size; - -		if (!dsc_buff_block_size_from_dpcd(dpcd_dsc_basic_data[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT], &buff_block_size)) -			return false; - -		buff_size = dpcd_dsc_basic_data[DP_DSC_RC_BUF_SIZE - DP_DSC_SUPPORT] + 1; -		dsc_sink_caps->rc_buffer_size = buff_size * buff_block_size; -	} - -	dsc_sink_caps->slice_caps1.raw = dpcd_dsc_basic_data[DP_DSC_SLICE_CAP_1 - DP_DSC_SUPPORT]; -	if (!dsc_line_buff_depth_from_dpcd(dpcd_dsc_basic_data[DP_DSC_LINE_BUF_BIT_DEPTH - DP_DSC_SUPPORT], &dsc_sink_caps->lb_bit_depth)) -		return false; - -	dsc_sink_caps->is_block_pred_supported = -		(dpcd_dsc_basic_data[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & DP_DSC_BLK_PREDICTION_IS_SUPPORTED) != 0; - -	dsc_sink_caps->edp_max_bits_per_pixel = -		dpcd_dsc_basic_data[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] | -		dpcd_dsc_basic_data[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] << 8; - -	dsc_sink_caps->color_formats.raw = dpcd_dsc_basic_data[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT]; -	dsc_sink_caps->color_depth.raw = dpcd_dsc_basic_data[DP_DSC_DEC_COLOR_DEPTH_CAP - DP_DSC_SUPPORT]; - -	{ -		int dpcd_throughput = dpcd_dsc_basic_data[DP_DSC_PEAK_THROUGHPUT - DP_DSC_SUPPORT]; - -		if (!dsc_throughput_from_dpcd(dpcd_throughput & DP_DSC_THROUGHPUT_MODE_0_MASK, &dsc_sink_caps->throughput_mode_0_mps)) -			return false; - -		dpcd_throughput = (dpcd_throughput & DP_DSC_THROUGHPUT_MODE_1_MASK) >> DP_DSC_THROUGHPUT_MODE_1_SHIFT; -		if (!dsc_throughput_from_dpcd(dpcd_throughput, &dsc_sink_caps->throughput_mode_1_mps)) -			return false; -	} - -	dsc_sink_caps->max_slice_width = dpcd_dsc_basic_data[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] * 320; -	dsc_sink_caps->slice_caps2.raw = dpcd_dsc_basic_data[DP_DSC_SLICE_CAP_2 - DP_DSC_SUPPORT]; - -	if (!dsc_bpp_increment_div_from_dpcd(dpcd_dsc_basic_data[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT], &dsc_sink_caps->bpp_increment_div)) -		return false; - -	if (dc->debug.dsc_bpp_increment_div) { -		/* dsc_bpp_increment_div should onl be 1, 2, 4, 8 or 16, but rather than rejecting invalid values, -		 * we'll accept all and get it into range. This also makes the above check against 0 redundant, -		 * but that one stresses out the override will be only used if it's not 0. -		 */ -		if (dc->debug.dsc_bpp_increment_div >= 1) -			dsc_sink_caps->bpp_increment_div = 1; -		if (dc->debug.dsc_bpp_increment_div >= 2) -			dsc_sink_caps->bpp_increment_div = 2; -		if (dc->debug.dsc_bpp_increment_div >= 4) -			dsc_sink_caps->bpp_increment_div = 4; -		if (dc->debug.dsc_bpp_increment_div >= 8) -			dsc_sink_caps->bpp_increment_div = 8; -		if (dc->debug.dsc_bpp_increment_div >= 16) -			dsc_sink_caps->bpp_increment_div = 16; -	} - -	/* Extended caps */ -	if (dpcd_dsc_branch_decoder_caps == NULL) { // branch decoder DPCD DSC data can be null for non branch device -		dsc_sink_caps->branch_overall_throughput_0_mps = 0; -		dsc_sink_caps->branch_overall_throughput_1_mps = 0; -		dsc_sink_caps->branch_max_line_width = 0; -		return true; -	} - -	dsc_sink_caps->branch_overall_throughput_0_mps = dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0]; -	if (dsc_sink_caps->branch_overall_throughput_0_mps == 0) -		dsc_sink_caps->branch_overall_throughput_0_mps = 0; -	else if (dsc_sink_caps->branch_overall_throughput_0_mps == 1) -		dsc_sink_caps->branch_overall_throughput_0_mps = 680; -	else { -		dsc_sink_caps->branch_overall_throughput_0_mps *= 50; -		dsc_sink_caps->branch_overall_throughput_0_mps += 600; -	} - -	dsc_sink_caps->branch_overall_throughput_1_mps = dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0]; -	if (dsc_sink_caps->branch_overall_throughput_1_mps == 0) -		dsc_sink_caps->branch_overall_throughput_1_mps = 0; -	else if (dsc_sink_caps->branch_overall_throughput_1_mps == 1) -		dsc_sink_caps->branch_overall_throughput_1_mps = 680; -	else { -		dsc_sink_caps->branch_overall_throughput_1_mps *= 50; -		dsc_sink_caps->branch_overall_throughput_1_mps += 600; -	} - -	dsc_sink_caps->branch_max_line_width = dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_MAX_LINE_WIDTH - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0] * 320; -	ASSERT(dsc_sink_caps->branch_max_line_width == 0 || dsc_sink_caps->branch_max_line_width >= 5120); - -	dsc_sink_caps->is_dp = true; -	return true; -} - - -/* If DSC is possbile, get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range and - * timing's pixel clock and uncompressed bandwidth. - * If DSC is not possible, leave '*range' untouched. - */ -bool dc_dsc_compute_bandwidth_range( -		const struct display_stream_compressor *dsc, -		uint32_t dsc_min_slice_height_override, -		uint32_t min_bpp_x16, -		uint32_t max_bpp_x16, -		const struct dsc_dec_dpcd_caps *dsc_sink_caps, -		const struct dc_crtc_timing *timing, -		struct dc_dsc_bw_range *range) -{ -	bool is_dsc_possible = false; -	struct dsc_enc_caps dsc_enc_caps; -	struct dsc_enc_caps dsc_common_caps; -	struct dc_dsc_config config; - -	get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); - -	is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps, -			timing->pixel_encoding, &dsc_common_caps); - -	if (is_dsc_possible) -		is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing, -				dsc_min_slice_height_override, max_bpp_x16, &config); - -	if (is_dsc_possible) -		get_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, -				config.num_slices_h, &dsc_common_caps, timing, range); - -	return is_dsc_possible; -} -  bool dc_dsc_compute_config(  		const struct display_stream_compressor *dsc,  		const struct dsc_dec_dpcd_caps *dsc_sink_caps, @@ -923,22 +984,22 @@ bool dc_dsc_compute_config(  	get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);  	is_dsc_possible = setup_dsc_config(dsc_sink_caps, -			&dsc_enc_caps, -			target_bandwidth_kbps, -			timing, dsc_min_slice_height_override, -			max_target_bpp_limit_override * 16, dsc_cfg); +		&dsc_enc_caps, +		target_bandwidth_kbps, +		timing, dsc_min_slice_height_override, +		max_target_bpp_limit_override * 16, dsc_cfg);  	return is_dsc_possible;  }  uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing, -		uint32_t bpp_x16, uint32_t num_slices_h, bool is_dp) +	uint32_t bpp_x16, uint32_t num_slices_h, bool is_dp)  {  	struct fixed31_32 overhead_in_kbps;  	struct fixed31_32 bpp;  	struct fixed31_32 actual_bandwidth_in_kbps;  	overhead_in_kbps = compute_dsc_max_bandwidth_overhead( -			timing, num_slices_h, is_dp); +		timing, num_slices_h, is_dp);  	bpp = dc_fixpt_from_fraction(bpp_x16, 16);  	actual_bandwidth_in_kbps = dc_fixpt_from_fraction(timing->pix_clk_100hz, 10);  	actual_bandwidth_in_kbps = dc_fixpt_mul(actual_bandwidth_in_kbps, bpp); @@ -946,7 +1007,9 @@ uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing,  	return dc_fixpt_ceil(actual_bandwidth_in_kbps);  } -void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override_x16, struct dc_dsc_policy *policy) +void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, +		uint32_t max_target_bpp_limit_override_x16, +		struct dc_dsc_policy *policy)  {  	uint32_t bpc = 0; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index e2b58ec9912d..01c3a31be191 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -174,7 +174,6 @@ bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable);  void dp_decide_training_settings(  	struct dc_link *link,  	const struct dc_link_settings *link_setting, -	const struct dc_link_training_overrides *overrides,  	struct link_training_settings *lt_settings);  /* Convert PHY repeater count read from DPCD uint8_t. */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index 316301fc1e30..a262f3278c21 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -318,6 +318,11 @@ static inline bool should_update_pstate_support(bool safe_to_lower, bool calc_su  	return false;  } +static inline int khz_to_mhz_ceil(int khz) +{ +	return (khz + 999) / 1000; +} +  int clk_mgr_helper_get_active_display_cnt(  		struct dc *dc,  		struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 0638b337f143..713f5558f5e1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -102,6 +102,15 @@ struct hubbub_addr_config {  	} default_addrs;  }; +struct dcn_hubbub_state { +	uint32_t vm_fault_addr_msb; +	uint32_t vm_fault_addr_lsb; +	uint32_t vm_error_status; +	uint32_t vm_error_vmid; +	uint32_t vm_error_pipe; +	uint32_t vm_error_mode; +}; +  struct hubbub_funcs {  	void (*update_dchub)(  			struct hubbub *hubbub, @@ -149,6 +158,8 @@ struct hubbub_funcs {  	void (*force_wm_propagate_to_pipes)(struct hubbub *hubbub); +	void (*hubbub_read_state)(struct hubbub *hubbub, struct dcn_hubbub_state *hubbub_state); +  	void (*force_pstate_change_control)(struct hubbub *hubbub, bool force, bool allow);  	void (*init_watermarks)(struct hubbub *hubbub); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 5ab008e62b82..ad5f2adcc40d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -237,6 +237,7 @@ struct hw_sequencer_funcs {  			int width, int height, int offset);  	void (*z10_restore)(struct dc *dc); +	void (*z10_save_init)(struct dc *dc);  	void (*update_visual_confirm_color)(struct dc *dc,  			struct pipe_ctx *pipe_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h index 082549f75978..f7f7e4fff0c2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -41,7 +41,6 @@ struct dce_hwseq_wa {  	bool DEGVIDCN10_254;  	bool DEGVIDCN21;  	bool disallow_self_refresh_during_multi_plane_transition; -	bool early_riommu_invalidation;  };  struct hwseq_wa_state { diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h b/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h index d4d52ef1b165..3f12b1600d2a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h @@ -1,3 +1,28 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +  #ifndef __LINK_DPCD_H__  #define __LINK_DPCD_H__  #include <inc/core_status.h> diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h index 1139b9eb9f6f..530c2578db40 100644 --- a/drivers/gpu/drm/amd/display/dc/irq_types.h +++ b/drivers/gpu/drm/amd/display/dc/irq_types.h @@ -152,7 +152,7 @@ enum dc_irq_source {  	DC_IRQ_SOURCE_DC6_VLINE1,  	DC_IRQ_SOURCE_DMCUB_OUTBOX,  	DC_IRQ_SOURCE_DMCUB_OUTBOX0, - +	DC_IRQ_SOURCE_DMCUB_GENERAL_DATAOUT,  	DAL_IRQ_SOURCES_NUMBER  }; diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index 126c2f3a4dd3..f50cae252de4 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -51,38 +51,9 @@  #define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)  #if defined(CONFIG_DRM_AMD_DC_DCN) -#if defined(CONFIG_X86) -#include <asm/fpu/api.h> -#define DC_FP_START() kernel_fpu_begin() -#define DC_FP_END() kernel_fpu_end() -#elif defined(CONFIG_PPC64) -#include <asm/switch_to.h> -#include <asm/cputable.h> -#define DC_FP_START() { \ -	if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \ -		preempt_disable(); \ -		enable_kernel_vsx(); \ -	} else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \ -		preempt_disable(); \ -		enable_kernel_altivec(); \ -	} else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \ -		preempt_disable(); \ -		enable_kernel_fp(); \ -	} \ -} -#define DC_FP_END() { \ -	if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \ -		disable_kernel_vsx(); \ -		preempt_enable(); \ -	} else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \ -		disable_kernel_altivec(); \ -		preempt_enable(); \ -	} else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \ -		disable_kernel_fp(); \ -		preempt_enable(); \ -	} \ -} -#endif +#include "amdgpu_dm/dc_fpu.h" +#define DC_FP_START() dc_fpu_begin(__func__, __LINE__) +#define DC_FP_END() dc_fpu_end(__func__, __LINE__)  #endif  /* diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index abbf7ae584c9..caf961bb633f 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -352,6 +352,8 @@ struct dmub_srv_hw_funcs {  	uint32_t (*get_gpint_response)(struct dmub_srv *dmub); +	uint32_t (*get_gpint_dataout)(struct dmub_srv *dmub); +  	void (*send_inbox0_cmd)(struct dmub_srv *dmub, union dmub_inbox0_data_register data);  	uint32_t (*get_current_time)(struct dmub_srv *dmub); @@ -677,6 +679,22 @@ enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,  					     uint32_t *response);  /** + * dmub_srv_get_gpint_dataout() - Queries the GPINT DATAOUT. + * @dmub: the dmub service + * @dataout: the data for the GPINT DATAOUT + * + * Returns the response code for the last GPINT DATAOUT interrupt. + * + * Can be called after software initialization. + * + * Return: + *   DMUB_STATUS_OK - success + *   DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_get_gpint_dataout(struct dmub_srv *dmub, +					     uint32_t *dataout); + +/**   * dmub_flush_buffer_mem() - Read back entire frame buffer region.   * This ensures that the write from x86 has been flushed and will not   * hang the DMCUB. diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 7c4734f905d9..7b684e7f60df 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -23,8 +23,8 @@   *   */ -#ifndef _DMUB_CMD_H_ -#define _DMUB_CMD_H_ +#ifndef DMUB_CMD_H +#define DMUB_CMD_H  #if defined(_TEST_HARNESS) || defined(FPGA_USB4)  #include "dmub_fw_types.h" @@ -47,10 +47,10 @@  /* Firmware versioning. */  #ifdef DMUB_EXPOSE_VERSION -#define DMUB_FW_VERSION_GIT_HASH 0xf3da2b656 +#define DMUB_FW_VERSION_GIT_HASH 0x7383caadc  #define DMUB_FW_VERSION_MAJOR 0  #define DMUB_FW_VERSION_MINOR 0 -#define DMUB_FW_VERSION_REVISION 71 +#define DMUB_FW_VERSION_REVISION 79  #define DMUB_FW_VERSION_TEST 0  #define DMUB_FW_VERSION_VBIOS 0  #define DMUB_FW_VERSION_HOTFIX 0 @@ -322,6 +322,10 @@ union dmub_fw_boot_status {  		uint32_t mailbox_rdy : 1; /**< 1 if mailbox ready */  		uint32_t optimized_init_done : 1; /**< 1 if optimized init done */  		uint32_t restore_required : 1; /**< 1 if driver should call restore */ +		uint32_t defer_load : 1; /**< 1 if VBIOS data is deferred programmed */ +		uint32_t reserved : 1; +		uint32_t detection_required: 1; /**<  if detection need to be triggered by driver */ +  	} bits; /**< status bits */  	uint32_t all; /**< 32-bit access to status bits */  }; @@ -334,6 +338,8 @@ enum dmub_fw_boot_status_bit {  	DMUB_FW_BOOT_STATUS_BIT_MAILBOX_READY = (1 << 1), /**< 1 if mailbox ready */  	DMUB_FW_BOOT_STATUS_BIT_OPTIMIZED_INIT_DONE = (1 << 2), /**< 1 if init done */  	DMUB_FW_BOOT_STATUS_BIT_RESTORE_REQUIRED = (1 << 3), /**< 1 if driver should call restore */ +	DMUB_FW_BOOT_STATUS_BIT_DEFERRED_LOADED = (1 << 4), /**< 1 if VBIOS data is deferred programmed */ +	DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED = (1 << 6), /**< 1 if detection need to be triggered by driver*/  };  /* Register bit definition for SCRATCH5 */ @@ -352,7 +358,7 @@ enum dmub_lvtma_status_bit {  };  /** - * union dmub_fw_boot_options - Boot option definitions for SCRATCH15 + * union dmub_fw_boot_options - Boot option definitions for SCRATCH14   */  union dmub_fw_boot_options {  	struct { @@ -363,7 +369,10 @@ union dmub_fw_boot_options {  		uint32_t disable_clk_gate: 1; /**< 1 if clock gating should be disabled */  		uint32_t skip_phy_init_panel_sequence: 1; /**< 1 to skip panel init seq */  		uint32_t z10_disable: 1; /**< 1 to disable z10 */ -		uint32_t reserved : 25; /**< reserved */ +		uint32_t reserved2: 1; /**< reserved for an unreleased feature */ +		uint32_t reserved_unreleased1: 1; /**< reserved for an unreleased feature */ +		uint32_t invalid_vbios_data: 1; /**< 1 if VBIOS data table is invalid */ +		uint32_t reserved : 23; /**< reserved */  	} bits; /**< boot bits */  	uint32_t all; /**< 32-bit access to bits */  }; @@ -485,6 +494,11 @@ enum dmub_gpint_command {  	 * RETURN: PSR residency in milli-percent.  	 */  	DMUB_GPINT__PSR_RESIDENCY = 9, + +	/** +	 * DESC: Notifies DMCUB detection is done so detection required can be cleared. +	 */ +	DMUB_GPINT__NOTIFY_DETECTION_DONE = 12,  };  /** @@ -856,6 +870,11 @@ enum dmub_cmd_idle_opt_type {  	 * DCN hardware restore.  	 */  	DMUB_CMD__IDLE_OPT_DCN_RESTORE = 0, + +	/** +	 * DCN hardware save. +	 */ +	DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT = 1  };  /** @@ -1406,6 +1425,10 @@ struct dmub_cmd_psr_copy_settings_data {  	 * Currently the support is only for 0 or 1  	 */  	uint8_t panel_inst; +	/** +	 * Explicit padding to 4 byte boundary. +	 */ +	uint8_t pad3[4];  };  /** @@ -1430,7 +1453,7 @@ struct dmub_cmd_psr_set_level_data {  	 * 16-bit value dicated by driver that will enable/disable different functionality.  	 */  	uint16_t psr_level; -		/** +	/**  	 * PSR control version.  	 */  	uint8_t cmd_version; @@ -2462,16 +2485,14 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)  static inline bool dmub_rb_push_front(struct dmub_rb *rb,  				      const union dmub_rb_cmd *cmd)  { -	uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t); -	const uint64_t *src = (const uint64_t *)cmd; -	uint8_t i; +	uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt; +	const uint8_t *src = (const uint8_t *)cmd;  	if (dmub_rb_full(rb))  		return false;  	// copying data -	for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++) -		*dst++ = *src++; +	dmub_memcpy(dst, src, DMUB_RB_CMD_SIZE);  	rb->wrpt += DMUB_RB_CMD_SIZE; @@ -2493,7 +2514,7 @@ static inline bool dmub_rb_out_push_front(struct dmub_rb *rb,  				      const union dmub_rb_out_cmd *cmd)  {  	uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt; -	const uint8_t *src = (uint8_t *)cmd; +	const uint8_t *src = (const uint8_t *)cmd;  	if (dmub_rb_full(rb))  		return false; @@ -2578,18 +2599,16 @@ static inline bool dmub_rb_peek_offset(struct dmub_rb *rb,   * @return false otherwise   */  static inline bool dmub_rb_out_front(struct dmub_rb *rb, -				 union dmub_rb_out_cmd  *cmd) +				 union dmub_rb_out_cmd *cmd)  { -	const uint64_t volatile *src = (const uint64_t volatile *)(rb->base_address) + rb->rptr / sizeof(uint64_t); -	uint64_t *dst = (uint64_t *)cmd; -	uint8_t i; +	const uint8_t *src = (const uint8_t *)(rb->base_address) + rb->rptr; +	uint8_t *dst = (uint8_t *)cmd;  	if (dmub_rb_empty(rb))  		return false;  	// copying data -	for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++) -		*dst++ = *src++; +	dmub_memcpy(dst, src, DMUB_RB_CMD_SIZE);  	return true;  } @@ -2624,15 +2643,14 @@ static inline bool dmub_rb_pop_front(struct dmub_rb *rb)   */  static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)  { +	uint8_t buf[DMUB_RB_CMD_SIZE];  	uint32_t rptr = rb->rptr;  	uint32_t wptr = rb->wrpt;  	while (rptr != wptr) { -		uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t); -		uint8_t i; +		const uint8_t *data = (const uint8_t *)rb->base_address + rptr; -		for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++) -			*data++; +		dmub_memcpy(buf, data, DMUB_RB_CMD_SIZE);  		rptr += DMUB_RB_CMD_SIZE;  		if (rptr >= rb->capacity) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 973de346410d..fc667cb17eb0 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -38,7 +38,10 @@  const struct dmub_srv_dcn31_regs dmub_srv_dcn31_regs = {  #define DMUB_SR(reg) REG_OFFSET_EXP(reg), -	{ DMUB_DCN31_REGS() }, +	{ +		DMUB_DCN31_REGS() +		DMCUB_INTERNAL_REGS() +	},  #undef DMUB_SR  #define DMUB_SF(reg, field) FD_MASK(reg, field), @@ -80,7 +83,7 @@ static inline void dmub_dcn31_translate_addr(const union dmub_addr *addr_in,  void dmub_dcn31_reset(struct dmub_srv *dmub)  {  	union dmub_gpint_data_register cmd; -	const uint32_t timeout = 30; +	const uint32_t timeout = 100;  	uint32_t in_reset, scratch, i;  	REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); @@ -95,26 +98,22 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)  		/**  		 * Timeout covers both the ACK and the wait  		 * for remaining work to finish. -		 * -		 * This is mostly bound by the PHY disable sequence. -		 * Each register check will be greater than 1us, so -		 * don't bother using udelay.  		 */  		for (i = 0; i < timeout; ++i) {  			if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))  				break; + +			udelay(1);  		}  		for (i = 0; i < timeout; ++i) {  			scratch = dmub->hw_funcs.get_gpint_response(dmub);  			if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)  				break; -		} -		/* Clear the GPINT command manually so we don't reset again. */ -		cmd.all = 0; -		dmub->hw_funcs.set_gpint(dmub, cmd); +			udelay(1); +		}  		/* Force reset in case we timed out, DMCUB is likely hung. */  	} @@ -127,6 +126,10 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)  	REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);  	REG_WRITE(DMCUB_OUTBOX1_WPTR, 0);  	REG_WRITE(DMCUB_SCRATCH0, 0); + +	/* Clear the GPINT command manually so we don't send anything during boot. */ +	cmd.all = 0; +	dmub->hw_funcs.set_gpint(dmub, cmd);  }  void dmub_dcn31_reset_release(struct dmub_srv *dmub) @@ -267,11 +270,13 @@ void dmub_dcn31_set_outbox1_rptr(struct dmub_srv *dmub, uint32_t rptr_offset)  bool dmub_dcn31_is_hw_init(struct dmub_srv *dmub)  { -	uint32_t is_hw_init; +	union dmub_fw_boot_status status; +	uint32_t is_enable; -	REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_hw_init); +	status.all = REG_READ(DMCUB_SCRATCH0); +	REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enable); -	return is_hw_init != 0; +	return is_enable != 0 && status.bits.dal_fw;  }  bool dmub_dcn31_is_supported(struct dmub_srv *dmub) @@ -305,6 +310,21 @@ uint32_t dmub_dcn31_get_gpint_response(struct dmub_srv *dmub)  	return REG_READ(DMCUB_SCRATCH7);  } +uint32_t dmub_dcn31_get_gpint_dataout(struct dmub_srv *dmub) +{ +	uint32_t dataout = REG_READ(DMCUB_GPINT_DATAOUT); + +	REG_UPDATE(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN, 0); + +	REG_WRITE(DMCUB_GPINT_DATAOUT, 0); +	REG_UPDATE(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK, 1); +	REG_UPDATE(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK, 0); + +	REG_UPDATE(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN, 1); + +	return dataout; +} +  union dmub_fw_boot_status dmub_dcn31_get_fw_boot_status(struct dmub_srv *dmub)  {  	union dmub_fw_boot_status status; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h index 9456a6a2d518..bb62605d2ac8 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h @@ -114,7 +114,9 @@ struct dmub_srv;  	DMUB_SR(DMCUB_TIMER_CURRENT) \  	DMUB_SR(DMCUB_INST_FETCH_FAULT_ADDR) \  	DMUB_SR(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR) \ -	DMUB_SR(DMCUB_DATA_WRITE_FAULT_ADDR) +	DMUB_SR(DMCUB_DATA_WRITE_FAULT_ADDR) \ +	DMUB_SR(DMCUB_INTERRUPT_ENABLE) \ +	DMUB_SR(DMCUB_INTERRUPT_ACK)  #define DMUB_DCN31_FIELDS() \  	DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \ @@ -147,7 +149,9 @@ struct dmub_srv;  	DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \  	DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \  	DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) \ -	DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR) +	DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR) \ +	DMUB_SF(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN) \ +	DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK)  struct dmub_srv_dcn31_reg_offset {  #define DMUB_SR(reg) uint32_t reg; @@ -222,6 +226,8 @@ bool dmub_dcn31_is_gpint_acked(struct dmub_srv *dmub,  uint32_t dmub_dcn31_get_gpint_response(struct dmub_srv *dmub); +uint32_t dmub_dcn31_get_gpint_dataout(struct dmub_srv *dmub); +  void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params);  void dmub_dcn31_skip_dmub_panel_power_sequence(struct dmub_srv *dmub, bool skip); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 2bdbd7406f56..75a91cfaf036 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -224,6 +224,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)  		funcs->set_gpint = dmub_dcn31_set_gpint;  		funcs->is_gpint_acked = dmub_dcn31_is_gpint_acked;  		funcs->get_gpint_response = dmub_dcn31_get_gpint_response; +		funcs->get_gpint_dataout = dmub_dcn31_get_gpint_dataout;  		funcs->get_fw_status = dmub_dcn31_get_fw_boot_status;  		funcs->enable_dmub_boot_options = dmub_dcn31_enable_dmub_boot_options;  		funcs->skip_dmub_panel_power_sequence = dmub_dcn31_skip_dmub_panel_power_sequence; @@ -719,6 +720,22 @@ enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,  	return DMUB_STATUS_OK;  } +enum dmub_status dmub_srv_get_gpint_dataout(struct dmub_srv *dmub, +					     uint32_t *dataout) +{ +	*dataout = 0; + +	if (!dmub->sw_init) +		return DMUB_STATUS_INVALID; + +	if (!dmub->hw_funcs.get_gpint_dataout) +		return DMUB_STATUS_INVALID; + +	*dataout = dmub->hw_funcs.get_gpint_dataout(dmub); + +	return DMUB_STATUS_OK; +} +  enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub,  					     union dmub_fw_boot_status *status)  { diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c index b963226e8af4..3e81850a7ffe 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -39,8 +39,12 @@ static void push_error_status(struct mod_hdcp *hdcp,  	if (is_hdcp1(hdcp)) {  		hdcp->connection.hdcp1_retry_count++; +		if (hdcp->connection.hdcp1_retry_count == MAX_NUM_OF_ATTEMPTS) +			hdcp->connection.link.adjust.hdcp1.disable = 1;  	} else if (is_hdcp2(hdcp)) {  		hdcp->connection.hdcp2_retry_count++; +		if (hdcp->connection.hdcp2_retry_count == MAX_NUM_OF_ATTEMPTS) +			hdcp->connection.link.adjust.hdcp2.disable = 1;  	}  } @@ -59,8 +63,7 @@ static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)  		}  	} -	return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) && -			is_auth_needed && +	return is_auth_needed &&  			!hdcp->connection.link.adjust.hdcp1.disable &&  			!hdcp->connection.is_hdcp1_revoked;  } @@ -80,8 +83,7 @@ static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp)  		}  	} -	return (hdcp->connection.hdcp2_retry_count < MAX_NUM_OF_ATTEMPTS) && -			is_auth_needed && +	return is_auth_needed &&  			!hdcp->connection.link.adjust.hdcp2.disable &&  			!hdcp->connection.is_hdcp2_revoked;  } @@ -143,6 +145,7 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,  			} else {  				callback_in_ms(0, output);  				set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED); +				set_auth_complete(hdcp, output);  			}  		else if (is_hdmi_dvi_sl_hdcp(hdcp))  			if (is_cp_desired_hdcp2(hdcp)) { @@ -154,10 +157,12 @@ static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,  			} else {  				callback_in_ms(0, output);  				set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED); +				set_auth_complete(hdcp, output);  			}  		else {  			callback_in_ms(0, output);  			set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED); +			set_auth_complete(hdcp, output);  		}  	} else if (is_in_cp_not_desired_state(hdcp)) {  		increment_stay_counter(hdcp); @@ -313,9 +318,6 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,  		goto out;  	} -	/* save current encryption states to restore after next authentication */ -	mod_hdcp_save_current_encryption_states(hdcp); -  	/* reset existing authentication status */  	status = reset_authentication(hdcp, output);  	if (status != MOD_HDCP_STATUS_SUCCESS) @@ -362,9 +364,6 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,  		goto out;  	} -	/* save current encryption states to restore after next authentication */ -	mod_hdcp_save_current_encryption_states(hdcp); -  	/* stop current authentication */  	status = reset_authentication(hdcp, output);  	if (status != MOD_HDCP_STATUS_SUCCESS) @@ -392,6 +391,60 @@ out:  	return status;  } +enum mod_hdcp_status mod_hdcp_update_authentication(struct mod_hdcp *hdcp, +		uint8_t index, +		struct mod_hdcp_link_adjustment *link_adjust, +		struct mod_hdcp_display_adjustment *display_adjust, +		struct mod_hdcp_output *output) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; +	struct mod_hdcp_display *display = NULL; + +	HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, index); +	memset(output, 0, sizeof(struct mod_hdcp_output)); + +	/* find display in connection */ +	display = get_active_display_at_index(hdcp, index); +	if (!display) { +		status = MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; +		goto out; +	} + +	/* skip if no changes */ +	if (memcmp(link_adjust, &hdcp->connection.link.adjust, +			sizeof(struct mod_hdcp_link_adjustment)) == 0 && +			memcmp(display_adjust, &display->adjust, +					sizeof(struct mod_hdcp_display_adjustment)) == 0) { +		status = MOD_HDCP_STATUS_SUCCESS; +		goto out; +	} + +	/* stop current authentication */ +	status = reset_authentication(hdcp, output); +	if (status != MOD_HDCP_STATUS_SUCCESS) +		goto out; + +	/* clear retry counters */ +	reset_retry_counts(hdcp); + +	/* reset error trace */ +	memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); + +	/* set new adjustment */ +	hdcp->connection.link.adjust = *link_adjust; +	display->adjust = *display_adjust; + +	/* request authentication when connection is not reset */ +	if (current_state(hdcp) != HDCP_UNINITIALIZED) +		/* wait 100ms to debounce simultaneous updates for different indices */ +		callback_in_ms(100, output); + +out: +	if (status != MOD_HDCP_STATUS_SUCCESS) +		push_error_status(hdcp, status); +	return status; +} +  enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp,  		uint8_t index, struct mod_hdcp_display_query *query)  { @@ -470,7 +523,7 @@ enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp,  	/* reset authentication if needed */  	if (trans_status == MOD_HDCP_STATUS_RESET_NEEDED) { -		HDCP_FULL_DDC_TRACE(hdcp); +		mod_hdcp_log_ddc_trace(hdcp);  		reset_status = reset_authentication(hdcp, output);  		if (reset_status != MOD_HDCP_STATUS_SUCCESS)  			push_error_status(hdcp, reset_status); diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index 3ce91db560d1..399fbca8947b 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -324,6 +324,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,  /* log functions */  void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,  		uint8_t *buf, uint32_t buf_size); +void mod_hdcp_log_ddc_trace(struct mod_hdcp *hdcp);  /* TODO: add adjustment log */  /* psp functions */ @@ -331,8 +332,6 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(  		struct mod_hdcp *hdcp, struct mod_hdcp_display *display);  enum mod_hdcp_status mod_hdcp_remove_display_from_topology(  		struct mod_hdcp *hdcp, uint8_t index); -bool mod_hdcp_is_link_encryption_enabled(struct mod_hdcp *hdcp); -void mod_hdcp_save_current_encryption_states(struct mod_hdcp *hdcp);  enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);  enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp);  enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp); @@ -496,6 +495,13 @@ static inline void set_watchdog_in_ms(struct mod_hdcp *hdcp, uint16_t time,  	output->watchdog_timer_delay = time;  } +static inline void set_auth_complete(struct mod_hdcp *hdcp, +		struct mod_hdcp_output *output) +{ +	output->auth_complete = 1; +	mod_hdcp_log_ddc_trace(hdcp); +} +  /* connection topology helpers */  static inline uint8_t is_display_active(struct mod_hdcp_display *display)  { diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index de872e7958b0..6ec918af3bff 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -266,9 +266,6 @@ static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,  	mod_hdcp_execute_and_set(mod_hdcp_hdcp1_link_maintenance,  			&input->link_maintenance, &status,  			hdcp, "link_maintenance"); - -	if (status != MOD_HDCP_STATUS_SUCCESS) -		mod_hdcp_save_current_encryption_states(hdcp);  out:  	return status;  } @@ -447,9 +444,6 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,  		mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,  				&input->reauth_request_check, &status,  				hdcp, "reauth_request_check"); - -	if (status != MOD_HDCP_STATUS_SUCCESS) -		mod_hdcp_save_current_encryption_states(hdcp);  out:  	return status;  } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c index 3dda8c1d83fc..7f011196ce98 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c @@ -89,7 +89,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,  		} else {  			callback_in_ms(0, output);  			set_state_id(hdcp, output, H1_A45_AUTHENTICATED); -			HDCP_FULL_DDC_TRACE(hdcp); +			set_auth_complete(hdcp, output);  		}  		break;  	case H1_A45_AUTHENTICATED: @@ -137,7 +137,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,  		}  		callback_in_ms(0, output);  		set_state_id(hdcp, output, H1_A45_AUTHENTICATED); -		HDCP_FULL_DDC_TRACE(hdcp); +		set_auth_complete(hdcp, output);  		break;  	default:  		status = MOD_HDCP_STATUS_INVALID_STATE; @@ -239,7 +239,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,  			set_state_id(hdcp, output, D1_A6_WAIT_FOR_READY);  		} else {  			set_state_id(hdcp, output, D1_A4_AUTHENTICATED); -			HDCP_FULL_DDC_TRACE(hdcp); +			set_auth_complete(hdcp, output);  		}  		break;  	case D1_A4_AUTHENTICATED: @@ -311,7 +311,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,  			break;  		}  		set_state_id(hdcp, output, D1_A4_AUTHENTICATED); -		HDCP_FULL_DDC_TRACE(hdcp); +		set_auth_complete(hdcp, output);  		break;  	default:  		fail_and_restart_in_ms(0, &status, output); diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index 117c6b45f718..91c22b96ebde 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -571,9 +571,6 @@ static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,  	}  	process_rxstatus(hdcp, event_ctx, input, &status); - -	if (status != MOD_HDCP_STATUS_SUCCESS) -		mod_hdcp_save_current_encryption_states(hdcp);  out:  	return status;  } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c index 70cb230d8f56..1f4095b26409 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c @@ -242,7 +242,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,  		}  		callback_in_ms(0, output);  		set_state_id(hdcp, output, H2_A5_AUTHENTICATED); -		HDCP_FULL_DDC_TRACE(hdcp); +		set_auth_complete(hdcp, output);  		break;  	case H2_A5_AUTHENTICATED:  		if (input->rxstatus_read == FAIL || @@ -559,7 +559,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,  			break;  		}  		set_state_id(hdcp, output, D2_A5_AUTHENTICATED); -		HDCP_FULL_DDC_TRACE(hdcp); +		set_auth_complete(hdcp, output);  		break;  	case D2_A5_AUTHENTICATED:  		if (input->rxstatus_read == FAIL || diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c index 1a0f7c3dc964..6b3b5f610907 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c @@ -51,6 +51,80 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,  	}  } +void mod_hdcp_log_ddc_trace(struct mod_hdcp *hdcp) +{ +	if (is_hdcp1(hdcp)) { +		HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, +				sizeof(hdcp->auth.msg.hdcp1.bksv)); +		HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, +				sizeof(hdcp->auth.msg.hdcp1.bcaps)); +		HDCP_DDC_READ_TRACE(hdcp, "BSTATUS", +				(uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, +				sizeof(hdcp->auth.msg.hdcp1.bstatus)); +		HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, +				sizeof(hdcp->auth.msg.hdcp1.an)); +		HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, +				sizeof(hdcp->auth.msg.hdcp1.aksv)); +		HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, +				sizeof(hdcp->auth.msg.hdcp1.ainfo)); +		HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", +				(uint8_t *)&hdcp->auth.msg.hdcp1.r0p, +				sizeof(hdcp->auth.msg.hdcp1.r0p)); +		HDCP_DDC_READ_TRACE(hdcp, "BINFO", +				(uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, +				sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); +		HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, +				hdcp->auth.msg.hdcp1.ksvlist_size); +		HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, +				sizeof(hdcp->auth.msg.hdcp1.vp)); +	} else if (is_hdcp2(hdcp)) { +		HDCP_DDC_READ_TRACE(hdcp, "HDCP2Version", +				&hdcp->auth.msg.hdcp2.hdcp2version_hdmi, +				sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi)); +		HDCP_DDC_READ_TRACE(hdcp, "Rx Caps", hdcp->auth.msg.hdcp2.rxcaps_dp, +				sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp)); +		HDCP_DDC_WRITE_TRACE(hdcp, "AKE Init", hdcp->auth.msg.hdcp2.ake_init, +				sizeof(hdcp->auth.msg.hdcp2.ake_init)); +		HDCP_DDC_READ_TRACE(hdcp, "AKE Cert", hdcp->auth.msg.hdcp2.ake_cert, +				sizeof(hdcp->auth.msg.hdcp2.ake_cert)); +		HDCP_DDC_WRITE_TRACE(hdcp, "Stored KM", +				hdcp->auth.msg.hdcp2.ake_stored_km, +				sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); +		HDCP_DDC_WRITE_TRACE(hdcp, "No Stored KM", +				hdcp->auth.msg.hdcp2.ake_no_stored_km, +				sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); +		HDCP_DDC_READ_TRACE(hdcp, "H'", hdcp->auth.msg.hdcp2.ake_h_prime, +				sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)); +		HDCP_DDC_READ_TRACE(hdcp, "Pairing Info", +				hdcp->auth.msg.hdcp2.ake_pairing_info, +				sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)); +		HDCP_DDC_WRITE_TRACE(hdcp, "LC Init", hdcp->auth.msg.hdcp2.lc_init, +				sizeof(hdcp->auth.msg.hdcp2.lc_init)); +		HDCP_DDC_READ_TRACE(hdcp, "L'", hdcp->auth.msg.hdcp2.lc_l_prime, +				sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)); +		HDCP_DDC_WRITE_TRACE(hdcp, "Exchange KS", hdcp->auth.msg.hdcp2.ske_eks, +				sizeof(hdcp->auth.msg.hdcp2.ske_eks)); +		HDCP_DDC_READ_TRACE(hdcp, "Rx Status", +				(uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus, +				sizeof(hdcp->auth.msg.hdcp2.rxstatus)); +		HDCP_DDC_READ_TRACE(hdcp, "Rx Id List", +				hdcp->auth.msg.hdcp2.rx_id_list, +				hdcp->auth.msg.hdcp2.rx_id_list_size); +		HDCP_DDC_WRITE_TRACE(hdcp, "Rx Id List Ack", +				hdcp->auth.msg.hdcp2.repeater_auth_ack, +				sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); +		HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Management", +				hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, +				hdcp->auth.msg.hdcp2.stream_manage_size); +		HDCP_DDC_READ_TRACE(hdcp, "Stream Ready", +				hdcp->auth.msg.hdcp2.repeater_auth_stream_ready, +				sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)); +		HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Type", +				hdcp->auth.msg.hdcp2.content_stream_type_dp, +				sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)); +	} +} +  char *mod_hdcp_status_to_str(int32_t status)  {  	switch (status) { diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h index 47f8ee2832ff..eb6f9b9c504a 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h @@ -106,78 +106,6 @@  				hdcp->config.index, msg_name,\  				hdcp->buf); \  } while (0) -#define HDCP_FULL_DDC_TRACE(hdcp) do { \ -	if (is_hdcp1(hdcp)) { \ -		HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \ -				sizeof(hdcp->auth.msg.hdcp1.bksv)); \ -		HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \ -				sizeof(hdcp->auth.msg.hdcp1.bcaps)); \ -		HDCP_DDC_READ_TRACE(hdcp, "BSTATUS", \ -				(uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, \ -				sizeof(hdcp->auth.msg.hdcp1.bstatus)); \ -		HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \ -				sizeof(hdcp->auth.msg.hdcp1.an)); \ -		HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \ -				sizeof(hdcp->auth.msg.hdcp1.aksv)); \ -		HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \ -				sizeof(hdcp->auth.msg.hdcp1.ainfo)); \ -		HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \ -				(uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \ -				sizeof(hdcp->auth.msg.hdcp1.r0p)); \ -		HDCP_DDC_READ_TRACE(hdcp, "BINFO", \ -				(uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \ -				sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \ -		HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \ -				hdcp->auth.msg.hdcp1.ksvlist_size); \ -		HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \ -				sizeof(hdcp->auth.msg.hdcp1.vp)); \ -	} else { \ -		HDCP_DDC_READ_TRACE(hdcp, "HDCP2Version", \ -				&hdcp->auth.msg.hdcp2.hdcp2version_hdmi, \ -				sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi)); \ -		HDCP_DDC_READ_TRACE(hdcp, "Rx Caps", hdcp->auth.msg.hdcp2.rxcaps_dp, \ -				sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp)); \ -		HDCP_DDC_WRITE_TRACE(hdcp, "AKE Init", hdcp->auth.msg.hdcp2.ake_init, \ -				sizeof(hdcp->auth.msg.hdcp2.ake_init)); \ -		HDCP_DDC_READ_TRACE(hdcp, "AKE Cert", hdcp->auth.msg.hdcp2.ake_cert, \ -				sizeof(hdcp->auth.msg.hdcp2.ake_cert)); \ -		HDCP_DDC_WRITE_TRACE(hdcp, "Stored KM", \ -				hdcp->auth.msg.hdcp2.ake_stored_km, \ -				sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); \ -		HDCP_DDC_WRITE_TRACE(hdcp, "No Stored KM", \ -				hdcp->auth.msg.hdcp2.ake_no_stored_km, \ -				sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); \ -		HDCP_DDC_READ_TRACE(hdcp, "H'", hdcp->auth.msg.hdcp2.ake_h_prime, \ -				sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)); \ -		HDCP_DDC_READ_TRACE(hdcp, "Pairing Info", \ -				hdcp->auth.msg.hdcp2.ake_pairing_info, \ -				sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)); \ -		HDCP_DDC_WRITE_TRACE(hdcp, "LC Init", hdcp->auth.msg.hdcp2.lc_init, \ -				sizeof(hdcp->auth.msg.hdcp2.lc_init)); \ -		HDCP_DDC_READ_TRACE(hdcp, "L'", hdcp->auth.msg.hdcp2.lc_l_prime, \ -				sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)); \ -		HDCP_DDC_WRITE_TRACE(hdcp, "Exchange KS", hdcp->auth.msg.hdcp2.ske_eks, \ -				sizeof(hdcp->auth.msg.hdcp2.ske_eks)); \ -		HDCP_DDC_READ_TRACE(hdcp, "Rx Status", \ -				(uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus, \ -				sizeof(hdcp->auth.msg.hdcp2.rxstatus)); \ -		HDCP_DDC_READ_TRACE(hdcp, "Rx Id List", \ -				hdcp->auth.msg.hdcp2.rx_id_list, \ -				hdcp->auth.msg.hdcp2.rx_id_list_size); \ -		HDCP_DDC_WRITE_TRACE(hdcp, "Rx Id List Ack", \ -				hdcp->auth.msg.hdcp2.repeater_auth_ack, \ -				sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); \ -		HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Management", \ -				hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, \ -				hdcp->auth.msg.hdcp2.stream_manage_size); \ -		HDCP_DDC_READ_TRACE(hdcp, "Stream Ready", \ -				hdcp->auth.msg.hdcp2.repeater_auth_stream_ready, \ -				sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)); \ -		HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Type", \ -				hdcp->auth.msg.hdcp2.content_stream_type_dp, \ -				sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)); \ -	} \ -} while (0)  #define HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, i) \  		HDCP_LOG_TOP(hdcp, "[Link %d]\tadd display %d", \  				hdcp->config.index, i) diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index 1b02056bc8bd..e9bd84ec027d 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -45,7 +45,7 @@ static void hdcp2_message_init(struct mod_hdcp *hdcp,  	in->process.msg3_desc.msg_size = 0;  } -static enum mod_hdcp_status mod_hdcp_remove_display_from_topology_v2( +static enum mod_hdcp_status remove_display_from_topology_v2(  		struct mod_hdcp *hdcp, uint8_t index)  {  	struct psp_context *psp = hdcp->config.psp.handle; @@ -54,7 +54,7 @@ static enum mod_hdcp_status mod_hdcp_remove_display_from_topology_v2(  			get_active_display_at_index(hdcp, index);  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; -	dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; +	dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;  	if (!display || !is_display_active(display))  		return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; @@ -81,7 +81,7 @@ static enum mod_hdcp_status mod_hdcp_remove_display_from_topology_v2(  	return status;  } -static enum mod_hdcp_status mod_hdcp_remove_display_from_topology_v3( +static enum mod_hdcp_status remove_display_from_topology_v3(  		struct mod_hdcp *hdcp, uint8_t index)  {  	struct psp_context *psp = hdcp->config.psp.handle; @@ -90,7 +90,7 @@ static enum mod_hdcp_status mod_hdcp_remove_display_from_topology_v3(  		get_active_display_at_index(hdcp, index);  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; -	dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; +	dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;  	if (!display || !is_display_active(display))  		return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; @@ -107,7 +107,7 @@ static enum mod_hdcp_status mod_hdcp_remove_display_from_topology_v3(  	psp_dtm_invoke(psp, dtm_cmd->cmd_id);  	if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) { -		status = mod_hdcp_remove_display_from_topology_v2(hdcp, index); +		status = remove_display_from_topology_v2(hdcp, index);  		if (status != MOD_HDCP_STATUS_SUCCESS)  			display->state = MOD_HDCP_DISPLAY_INACTIVE;  	} else { @@ -120,20 +120,7 @@ static enum mod_hdcp_status mod_hdcp_remove_display_from_topology_v3(  	return status;  } -enum mod_hdcp_status mod_hdcp_remove_display_from_topology( -		struct mod_hdcp *hdcp, uint8_t index) -{ -	enum mod_hdcp_status status = MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; - -	if (hdcp->config.psp.caps.dtm_v3_supported) -		status = mod_hdcp_remove_display_from_topology_v3(hdcp, index); -	else -		status = mod_hdcp_remove_display_from_topology_v2(hdcp, index); - -	return status; -} - -static enum mod_hdcp_status mod_hdcp_add_display_to_topology_v2( +static enum mod_hdcp_status add_display_to_topology_v2(  		struct mod_hdcp *hdcp, struct mod_hdcp_display *display)  {  	struct psp_context *psp = hdcp->config.psp.handle; @@ -141,13 +128,13 @@ static enum mod_hdcp_status mod_hdcp_add_display_to_topology_v2(  	struct mod_hdcp_link *link = &hdcp->connection.link;  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; -	if (!psp->dtm_context.dtm_initialized) { +	if (!psp->dtm_context.context.initialized) {  		DRM_INFO("Failed to add display topology, DTM TA is not initialized.");  		display->state = MOD_HDCP_DISPLAY_INACTIVE;  		return MOD_HDCP_STATUS_FAILURE;  	} -	dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; +	dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;  	mutex_lock(&psp->dtm_context.mutex);  	memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); @@ -180,7 +167,7 @@ static enum mod_hdcp_status mod_hdcp_add_display_to_topology_v2(  	return status;  } -static enum mod_hdcp_status mod_hdcp_add_display_to_topology_v3( +static enum mod_hdcp_status add_display_to_topology_v3(  		struct mod_hdcp *hdcp, struct mod_hdcp_display *display)  {  	struct psp_context *psp = hdcp->config.psp.handle; @@ -188,13 +175,13 @@ static enum mod_hdcp_status mod_hdcp_add_display_to_topology_v3(  	struct mod_hdcp_link *link = &hdcp->connection.link;  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; -	if (!psp->dtm_context.dtm_initialized) { +	if (!psp->dtm_context.context.initialized) {  		DRM_INFO("Failed to add display topology, DTM TA is not initialized.");  		display->state = MOD_HDCP_DISPLAY_INACTIVE;  		return MOD_HDCP_STATUS_FAILURE;  	} -	dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; +	dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;  	mutex_lock(&psp->dtm_context.mutex);  	memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); @@ -220,7 +207,7 @@ static enum mod_hdcp_status mod_hdcp_add_display_to_topology_v3(  	psp_dtm_invoke(psp, dtm_cmd->cmd_id);  	if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) { -		status = mod_hdcp_add_display_to_topology_v2(hdcp, display); +		status = add_display_to_topology_v2(hdcp, display);  		if (status != MOD_HDCP_STATUS_SUCCESS)  			display->state = MOD_HDCP_DISPLAY_INACTIVE;  	} else { @@ -232,15 +219,28 @@ static enum mod_hdcp_status mod_hdcp_add_display_to_topology_v3(  	return status;  } +enum mod_hdcp_status mod_hdcp_remove_display_from_topology( +		struct mod_hdcp *hdcp, uint8_t index) +{ +	enum mod_hdcp_status status = MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; + +	if (hdcp->config.psp.caps.dtm_v3_supported) +		status = remove_display_from_topology_v3(hdcp, index); +	else +		status = remove_display_from_topology_v2(hdcp, index); + +	return status; +} +  enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,  					       struct mod_hdcp_display *display)  {  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;  	if (hdcp->config.psp.caps.dtm_v3_supported) -		status = mod_hdcp_add_display_to_topology_v3(hdcp, display); +		status = add_display_to_topology_v3(hdcp, display);  	else -		status = mod_hdcp_add_display_to_topology_v2(hdcp, display); +		status = add_display_to_topology_v2(hdcp, display);  	return status;  } @@ -253,12 +253,12 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)  	struct ta_hdcp_shared_memory *hdcp_cmd;  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; -	if (!psp->hdcp_context.hdcp_initialized) { +	if (!psp->hdcp_context.context.initialized) {  		DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized.");  		return MOD_HDCP_STATUS_FAILURE;  	} -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	mutex_lock(&psp->hdcp_context.mutex);  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -293,7 +293,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	hdcp_cmd->in_msg.hdcp1_destroy_session.session_handle = hdcp->auth.id; @@ -325,7 +325,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	hdcp_cmd->in_msg.hdcp1_first_part_authentication.session_handle = hdcp->auth.id; @@ -367,7 +367,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id; @@ -393,7 +393,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	hdcp_cmd->in_msg.hdcp1_second_part_authentication.session_handle = hdcp->auth.id; @@ -436,7 +436,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { @@ -471,7 +471,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -498,7 +498,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; -	if (!psp->hdcp_context.hdcp_initialized) { +	if (!psp->hdcp_context.context.initialized) {  		DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");  		return MOD_HDCP_STATUS_FAILURE;  	} @@ -508,7 +508,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index; @@ -545,7 +545,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	hdcp_cmd->in_msg.hdcp2_destroy_session.session_handle = hdcp->auth.id; @@ -579,7 +579,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; @@ -611,7 +611,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; @@ -671,7 +671,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; @@ -717,7 +717,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; @@ -750,7 +750,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; @@ -785,7 +785,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; @@ -833,7 +833,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	hdcp_cmd->in_msg.hdcp2_set_encryption.session_handle = hdcp->auth.id; @@ -862,7 +862,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; @@ -914,7 +914,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; @@ -958,7 +958,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *h  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; @@ -994,7 +994,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)  	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;  	mutex_lock(&psp->hdcp_context.mutex); -	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; +	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;  	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));  	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; @@ -1021,14 +1021,3 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)  	mutex_unlock(&psp->hdcp_context.mutex);  	return status;  } - -bool mod_hdcp_is_link_encryption_enabled(struct mod_hdcp *hdcp) -{ -	/* unsupported */ -	return true; -} - -void mod_hdcp_save_current_encryption_states(struct mod_hdcp *hdcp) -{ -	/* unsupported */ -} diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index c590493fd293..f37101f5a777 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -123,7 +123,6 @@ enum mod_hdcp_display_state {  struct mod_hdcp_psp_caps {  	uint8_t dtm_v3_supported; -	uint8_t opm_state_query_supported;  };  enum mod_hdcp_display_disable_option { @@ -226,6 +225,7 @@ struct mod_hdcp_output {  	uint8_t watchdog_timer_stop;  	uint16_t callback_delay;  	uint16_t watchdog_timer_delay; +	uint8_t auth_complete;  };  /* used to represent per display info */ @@ -282,15 +282,22 @@ enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp,  /* called per link on link destroy */  enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp); -/* called per display on cp_desired set to true */ +/* called per display after stream is enabled */  enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,  		struct mod_hdcp_link *link, struct mod_hdcp_display *display,  		struct mod_hdcp_output *output); -/* called per display on cp_desired set to false */ +/* called per display before stream is disabled */  enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,  		uint8_t index, struct mod_hdcp_output *output); +/* called per display to apply new authentication adjustment */ +enum mod_hdcp_status mod_hdcp_update_authentication(struct mod_hdcp *hdcp, +		uint8_t index, +		struct mod_hdcp_link_adjustment *link_adjust, +		struct mod_hdcp_display_adjustment *display_adjust, +		struct mod_hdcp_output *output); +  /* called to query hdcp information on a specific index */  enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp,  		uint8_t index, struct mod_hdcp_display_query *query);  |