diff options
Diffstat (limited to 'drivers/infiniband/core/mad.c')
| -rw-r--r-- | drivers/infiniband/core/mad.c | 52 | 
1 files changed, 44 insertions, 8 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 192ee3dafb80..f8f53bb90837 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -40,9 +40,11 @@  #include <linux/dma-mapping.h>  #include <linux/slab.h>  #include <linux/module.h> +#include <linux/security.h>  #include <rdma/ib_cache.h>  #include "mad_priv.h" +#include "core_priv.h"  #include "mad_rmpp.h"  #include "smi.h"  #include "opa_smi.h" @@ -369,6 +371,12 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,  	atomic_set(&mad_agent_priv->refcount, 1);  	init_completion(&mad_agent_priv->comp); +	ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); +	if (ret2) { +		ret = ERR_PTR(ret2); +		goto error4; +	} +  	spin_lock_irqsave(&port_priv->reg_lock, flags);  	mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; @@ -386,7 +394,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,  				if (method) {  					if (method_in_use(&method,  							   mad_reg_req)) -						goto error4; +						goto error5;  				}  			}  			ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, @@ -402,14 +410,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,  					if (is_vendor_method_in_use(  							vendor_class,  							mad_reg_req)) -						goto error4; +						goto error5;  				}  			}  			ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);  		}  		if (ret2) {  			ret = ERR_PTR(ret2); -			goto error4; +			goto error5;  		}  	} @@ -418,9 +426,10 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,  	spin_unlock_irqrestore(&port_priv->reg_lock, flags);  	return &mad_agent_priv->agent; - -error4: +error5:  	spin_unlock_irqrestore(&port_priv->reg_lock, flags); +	ib_mad_agent_security_cleanup(&mad_agent_priv->agent); +error4:  	kfree(reg_req);  error3:  	kfree(mad_agent_priv); @@ -491,6 +500,7 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,  	struct ib_mad_agent *ret;  	struct ib_mad_snoop_private *mad_snoop_priv;  	int qpn; +	int err;  	/* Validate parameters */  	if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || @@ -525,17 +535,25 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,  	mad_snoop_priv->agent.port_num = port_num;  	mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;  	init_completion(&mad_snoop_priv->comp); + +	err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type); +	if (err) { +		ret = ERR_PTR(err); +		goto error2; +	} +  	mad_snoop_priv->snoop_index = register_snoop_agent(  						&port_priv->qp_info[qpn],  						mad_snoop_priv);  	if (mad_snoop_priv->snoop_index < 0) {  		ret = ERR_PTR(mad_snoop_priv->snoop_index); -		goto error2; +		goto error3;  	}  	atomic_set(&mad_snoop_priv->refcount, 1);  	return &mad_snoop_priv->agent; - +error3: +	ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);  error2:  	kfree(mad_snoop_priv);  error1: @@ -581,6 +599,8 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)  	deref_mad_agent(mad_agent_priv);  	wait_for_completion(&mad_agent_priv->comp); +	ib_mad_agent_security_cleanup(&mad_agent_priv->agent); +  	kfree(mad_agent_priv->reg_req);  	kfree(mad_agent_priv);  } @@ -599,6 +619,8 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)  	deref_snoop_agent(mad_snoop_priv);  	wait_for_completion(&mad_snoop_priv->comp); +	ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); +  	kfree(mad_snoop_priv);  } @@ -1215,12 +1237,16 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,  	/* Walk list of send WRs and post each on send list */  	for (; send_buf; send_buf = next_send_buf) { -  		mad_send_wr = container_of(send_buf,  					   struct ib_mad_send_wr_private,  					   send_buf);  		mad_agent_priv = mad_send_wr->mad_agent_priv; +		ret = ib_mad_enforce_security(mad_agent_priv, +					      mad_send_wr->send_wr.pkey_index); +		if (ret) +			goto error; +  		if (!send_buf->mad_agent->send_handler ||  		    (send_buf->timeout_ms &&  		     !send_buf->mad_agent->recv_handler)) { @@ -1946,6 +1972,14 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,  	struct ib_mad_send_wr_private *mad_send_wr;  	struct ib_mad_send_wc mad_send_wc;  	unsigned long flags; +	int ret; + +	ret = ib_mad_enforce_security(mad_agent_priv, +				      mad_recv_wc->wc->pkey_index); +	if (ret) { +		ib_free_recv_mad(mad_recv_wc); +		deref_mad_agent(mad_agent_priv); +	}  	INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);  	list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); @@ -2003,6 +2037,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,  						   mad_recv_wc);  		deref_mad_agent(mad_agent_priv);  	} + +	return;  }  static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,  |