diff options
Diffstat (limited to 'drivers/gpu/drm/drm_dp_mst_topology.c')
| -rw-r--r-- | drivers/gpu/drm/drm_dp_mst_topology.c | 271 | 
1 files changed, 213 insertions, 58 deletions
| diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index d3fc7e4e85b7..41b492f99955 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -31,6 +31,8 @@  #include <drm/drmP.h>  #include <drm/drm_fixed.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h>  /**   * DOC: dp mst helper @@ -330,6 +332,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,  			return false;  		} +		/* +		 * ignore out-of-order messages or messages that are part of a +		 * failed transaction +		 */ +		if (!recv_hdr.somt && !msg->have_somt) +			return false; +  		/* get length contained in this portion */  		msg->curchunk_len = recv_hdr.msg_len;  		msg->curchunk_hdrlen = hdrlen; @@ -737,16 +746,16 @@ static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,  static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,  			      struct drm_dp_sideband_msg_tx *txmsg)  { -	bool ret; +	unsigned int state;  	/*  	 * All updates to txmsg->state are protected by mgr->qlock, and the two  	 * cases we check here are terminal states. For those the barriers  	 * provided by the wake_up/wait_event pair are enough.  	 */ -	ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || -	       txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); -	return ret; +	state = READ_ONCE(txmsg->state); +	return (state == DRM_DP_SIDEBAND_TX_RX || +		state == DRM_DP_SIDEBAND_TX_TIMEOUT);  }  static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, @@ -855,7 +864,7 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)  	mutex_unlock(&mstb->mgr->qlock);  	if (wake_tx) -		wake_up(&mstb->mgr->tx_waitq); +		wake_up_all(&mstb->mgr->tx_waitq);  	kref_put(kref, drm_dp_free_mst_branch_device);  } @@ -1335,15 +1344,17 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)  static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,  				 u8 *guid)  { -	static u8 zero_guid[16]; +	u64 salt; -	if (!memcmp(guid, zero_guid, 16)) { -		u64 salt = get_jiffies_64(); -		memcpy(&guid[0], &salt, sizeof(u64)); -		memcpy(&guid[8], &salt, sizeof(u64)); -		return false; -	} -	return true; +	if (memchr_inv(guid, 0, 16)) +		return true; + +	salt = get_jiffies_64(); + +	memcpy(&guid[0], &salt, sizeof(u64)); +	memcpy(&guid[8], &salt, sizeof(u64)); + +	return false;  }  #if 0 @@ -1510,7 +1521,7 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)  		if (txmsg->seqno != -1)  			txmsg->dst->tx_slots[txmsg->seqno] = NULL;  		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; -		wake_up(&mgr->tx_waitq); +		wake_up_all(&mgr->tx_waitq);  	}  } @@ -2164,7 +2175,7 @@ out_unlock:  }  EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); -static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) +static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)  {  	int len;  	u8 replyblock[32]; @@ -2179,12 +2190,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)  			       replyblock, len);  	if (ret != len) {  		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); -		return; +		return false;  	}  	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);  	if (!ret) {  		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); -		return; +		return false;  	}  	replylen = msg->curchunk_len + msg->curchunk_hdrlen; @@ -2196,21 +2207,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)  		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,  				    replyblock, len);  		if (ret != len) { -			DRM_DEBUG_KMS("failed to read a chunk\n"); +			DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n", +				      len, ret); +			return false;  		} +  		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); -		if (ret == false) +		if (!ret) {  			DRM_DEBUG_KMS("failed to build sideband msg\n"); +			return false; +		} +  		curreply += len;  		replylen -= len;  	} +	return true;  }  static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)  {  	int ret = 0; -	drm_dp_get_one_sb_msg(mgr, false); +	if (!drm_dp_get_one_sb_msg(mgr, false)) { +		memset(&mgr->down_rep_recv, 0, +		       sizeof(struct drm_dp_sideband_msg_rx)); +		return 0; +	}  	if (mgr->down_rep_recv.have_eomt) {  		struct drm_dp_sideband_msg_tx *txmsg; @@ -2258,7 +2280,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)  		mstb->tx_slots[slot] = NULL;  		mutex_unlock(&mgr->qlock); -		wake_up(&mgr->tx_waitq); +		wake_up_all(&mgr->tx_waitq);  	}  	return ret;  } @@ -2266,7 +2288,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)  static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)  {  	int ret = 0; -	drm_dp_get_one_sb_msg(mgr, true); + +	if (!drm_dp_get_one_sb_msg(mgr, true)) { +		memset(&mgr->up_req_recv, 0, +		       sizeof(struct drm_dp_sideband_msg_rx)); +		return 0; +	}  	if (mgr->up_req_recv.have_eomt) {  		struct drm_dp_sideband_msg_req_body msg; @@ -2318,7 +2345,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)  			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);  		} -		drm_dp_put_mst_branch_device(mstb); +		if (mstb) +			drm_dp_put_mst_branch_device(mstb); +  		memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));  	}  	return ret; @@ -2498,6 +2527,81 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,  }  /** + * drm_dp_atomic_find_vcpi_slots() - Find and add vcpi slots to the state + * @state: global atomic state + * @mgr: MST topology manager for the port + * @port: port to find vcpi slots for + * @pbn: bandwidth required for the mode in PBN + * + * RETURNS: + * Total slots in the atomic state assigned for this port or error + */ +int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, +				  struct drm_dp_mst_topology_mgr *mgr, +				  struct drm_dp_mst_port *port, int pbn) +{ +	struct drm_dp_mst_topology_state *topology_state; +	int req_slots; + +	topology_state = drm_atomic_get_mst_topology_state(state, mgr); +	if (IS_ERR(topology_state)) +		return PTR_ERR(topology_state); + +	port = drm_dp_get_validated_port_ref(mgr, port); +	if (port == NULL) +		return -EINVAL; +	req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); +	DRM_DEBUG_KMS("vcpi slots req=%d, avail=%d\n", +			req_slots, topology_state->avail_slots); + +	if (req_slots > topology_state->avail_slots) { +		drm_dp_put_port(port); +		return -ENOSPC; +	} + +	topology_state->avail_slots -= req_slots; +	DRM_DEBUG_KMS("vcpi slots avail=%d", topology_state->avail_slots); + +	drm_dp_put_port(port); +	return req_slots; +} +EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots); + +/** + * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots + * @state: global atomic state + * @mgr: MST topology manager for the port + * @slots: number of vcpi slots to release + * + * RETURNS: + * 0 if @slots were added back to &drm_dp_mst_topology_state->avail_slots or + * negative error code + */ +int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, +				     struct drm_dp_mst_topology_mgr *mgr, +				     int slots) +{ +	struct drm_dp_mst_topology_state *topology_state; + +	topology_state = drm_atomic_get_mst_topology_state(state, mgr); +	if (IS_ERR(topology_state)) +		return PTR_ERR(topology_state); + +	/* We cannot rely on port->vcpi.num_slots to update +	 * topology_state->avail_slots as the port may not exist if the parent +	 * branch device was unplugged. This should be fixed by tracking +	 * per-port slot allocation in drm_dp_mst_topology_state instead of +	 * depending on the caller to tell us how many slots to release. +	 */ +	topology_state->avail_slots += slots; +	DRM_DEBUG_KMS("vcpi slots released=%d, avail=%d\n", +			slots, topology_state->avail_slots); + +	return 0; +} +EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots); + +/**   * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel   * @mgr: manager for this port   * @port: port to allocate a virtual channel for. @@ -2761,16 +2865,15 @@ static void drm_dp_mst_dump_mstb(struct seq_file *m,  static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,  				  char *buf)  { -	int ret;  	int i; -	for (i = 0; i < 4; i++) { -		ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16); -		if (ret != 16) -			break; + +	for (i = 0; i < 64; i += 16) { +		if (drm_dp_dpcd_read(mgr->aux, +				     DP_PAYLOAD_TABLE_UPDATE_STATUS + i, +				     &buf[i], 16) != 16) +			return false;  	} -	if (i == 4) -		return true; -	return false; +	return true;  }  static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr, @@ -2834,42 +2937,24 @@ void drm_dp_mst_dump_topology(struct seq_file *m,  	mutex_lock(&mgr->lock);  	if (mgr->mst_primary) {  		u8 buf[64]; -		bool bret;  		int ret; +  		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE); -		seq_printf(m, "dpcd: "); -		for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++) -			seq_printf(m, "%02x ", buf[i]); -		seq_printf(m, "\n"); +		seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);  		ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); -		seq_printf(m, "faux/mst: "); -		for (i = 0; i < 2; i++) -			seq_printf(m, "%02x ", buf[i]); -		seq_printf(m, "\n"); +		seq_printf(m, "faux/mst: %*ph\n", 2, buf);  		ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); -		seq_printf(m, "mst ctrl: "); -		for (i = 0; i < 1; i++) -			seq_printf(m, "%02x ", buf[i]); -		seq_printf(m, "\n"); +		seq_printf(m, "mst ctrl: %*ph\n", 1, buf);  		/* dump the standard OUI branch header */  		ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); -		seq_printf(m, "branch oui: "); -		for (i = 0; i < 0x3; i++) -			seq_printf(m, "%02x", buf[i]); -		seq_printf(m, " devid: "); +		seq_printf(m, "branch oui: %*phN devid: ", 3, buf);  		for (i = 0x3; i < 0x8 && buf[i]; i++)  			seq_printf(m, "%c", buf[i]); - -		seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]); -		seq_printf(m, "\n"); -		bret = dump_dp_payload_table(mgr, buf); -		if (bret == true) { -			seq_printf(m, "payload table: "); -			for (i = 0; i < 63; i++) -				seq_printf(m, "%02x ", buf[i]); -			seq_printf(m, "\n"); -		} +		seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n", +			   buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]); +		if (dump_dp_payload_table(mgr, buf)) +			seq_printf(m, "payload table: %*ph\n", 63, buf);  	} @@ -2936,6 +3021,59 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)  		(*mgr->cbs->hotplug)(mgr);  } +static struct drm_private_state * +drm_dp_mst_duplicate_state(struct drm_private_obj *obj) +{ +	struct drm_dp_mst_topology_state *state; + +	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); +	if (!state) +		return NULL; + +	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + +	return &state->base; +} + +static void drm_dp_mst_destroy_state(struct drm_private_obj *obj, +				     struct drm_private_state *state) +{ +	struct drm_dp_mst_topology_state *mst_state = +		to_dp_mst_topology_state(state); + +	kfree(mst_state); +} + +static const struct drm_private_state_funcs mst_state_funcs = { +	.atomic_duplicate_state = drm_dp_mst_duplicate_state, +	.atomic_destroy_state = drm_dp_mst_destroy_state, +}; + +/** + * drm_atomic_get_mst_topology_state: get MST topology state + * + * @state: global atomic state + * @mgr: MST topology manager, also the private object in this case + * + * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic + * state vtable so that the private object state returned is that of a MST + * topology object. Also, drm_atomic_get_private_obj_state() expects the caller + * to care of the locking, so warn if don't hold the connection_mutex. + * + * RETURNS: + * + * The MST topology state or error pointer. + */ +struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, +								    struct drm_dp_mst_topology_mgr *mgr) +{ +	struct drm_device *dev = mgr->dev; + +	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); +	return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base)); +} +EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); +  /**   * drm_dp_mst_topology_mgr_init - initialise a topology manager   * @mgr: manager struct to initialise @@ -2952,6 +3090,8 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,  				 int max_dpcd_transaction_bytes,  				 int max_payloads, int conn_base_id)  { +	struct drm_dp_mst_topology_state *mst_state; +  	mutex_init(&mgr->lock);  	mutex_init(&mgr->qlock);  	mutex_init(&mgr->payload_lock); @@ -2980,6 +3120,19 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,  	if (test_calc_pbn_mode() < 0)  		DRM_ERROR("MST PBN self-test failed\n"); +	mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL); +	if (mst_state == NULL) +		return -ENOMEM; + +	mst_state->mgr = mgr; + +	/* max. time slots - one slot for MTP header */ +	mst_state->avail_slots = 63; + +	drm_atomic_private_obj_init(&mgr->base, +				    &mst_state->base, +				    &mst_state_funcs); +  	return 0;  }  EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); @@ -3000,6 +3153,8 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)  	mutex_unlock(&mgr->payload_lock);  	mgr->dev = NULL;  	mgr->aux = NULL; +	drm_atomic_private_obj_fini(&mgr->base); +	mgr->funcs = NULL;  }  EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); |