diff options
author | Dmitry Torokhov <dtor@insightbb.com> | 2007-05-01 00:24:54 -0400 |
---|---|---|
committer | Dmitry Torokhov <dtor@insightbb.com> | 2007-05-01 00:24:54 -0400 |
commit | bc95f3669f5e6f63cf0b84fe4922c3c6dd4aa775 (patch) | |
tree | 427fcf2a7287c16d4b5aa6cbf494d59579a6a8b1 /drivers/infiniband/core | |
parent | 3d29cdff999c37b3876082278a8134a0642a02cd (diff) | |
parent | dc87c3985e9b442c60994308a96f887579addc39 (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/usb/input/Makefile
drivers/usb/input/gtco.c
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r-- | drivers/infiniband/core/Makefile | 2 | ||||
-rw-r--r-- | drivers/infiniband/core/cm.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/core/cma.c | 363 | ||||
-rw-r--r-- | drivers/infiniband/core/fmr_pool.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/core/iwcm.c | 47 | ||||
-rw-r--r-- | drivers/infiniband/core/mad.c | 34 | ||||
-rw-r--r-- | drivers/infiniband/core/multicast.c | 837 | ||||
-rw-r--r-- | drivers/infiniband/core/sa.h | 66 | ||||
-rw-r--r-- | drivers/infiniband/core/sa_query.c | 54 | ||||
-rw-r--r-- | drivers/infiniband/core/smi.c | 86 | ||||
-rw-r--r-- | drivers/infiniband/core/smi.h | 34 | ||||
-rw-r--r-- | drivers/infiniband/core/sysfs.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/core/ucm.c | 23 | ||||
-rw-r--r-- | drivers/infiniband/core/ucma.c | 228 | ||||
-rw-r--r-- | drivers/infiniband/core/user_mad.c | 20 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_cmd.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/core/verbs.c | 2 |
17 files changed, 1606 insertions, 211 deletions
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index 50fb1cd447b7..189e5d4b9b17 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -12,7 +12,7 @@ ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ ib_mad-y := mad.o smi.o agent.o mad_rmpp.o -ib_sa-y := sa_query.o +ib_sa-y := sa_query.o multicast.o ib_cm-y := cm.o diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index d446998b12a4..842cd0b53e91 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -88,7 +88,6 @@ struct cm_port { struct cm_device { struct list_head list; struct ib_device *device; - __be64 ca_guid; struct cm_port port[0]; }; @@ -739,8 +738,8 @@ retest: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); spin_unlock_irqrestore(&cm_id_priv->lock, flags); ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, - &cm_id_priv->av.port->cm_dev->ca_guid, - sizeof cm_id_priv->av.port->cm_dev->ca_guid, + &cm_id_priv->id.device->node_guid, + sizeof cm_id_priv->id.device->node_guid, NULL, 0); break; case IB_CM_REQ_RCVD: @@ -883,7 +882,7 @@ static void cm_format_req(struct cm_req_msg *req_msg, req_msg->local_comm_id = cm_id_priv->id.local_id; req_msg->service_id = param->service_id; - req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; + req_msg->local_ca_guid = cm_id_priv->id.device->node_guid; cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); cm_req_set_resp_res(req_msg, param->responder_resources); cm_req_set_init_depth(req_msg, param->initiator_depth); @@ -1442,7 +1441,7 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg, cm_rep_set_flow_ctrl(rep_msg, param->flow_control); cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); cm_rep_set_srq(rep_msg, param->srq); - rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; + rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid; if (param->private_data && param->private_data_len) memcpy(rep_msg->private_data, param->private_data, @@ -3385,7 +3384,6 @@ static void cm_add_one(struct ib_device *device) return; cm_dev->device = device; - cm_dev->ca_guid = device->node_guid; set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); for (i = 1; i <= device->phys_port_cnt; i++) { diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index db88e609bf42..fde92ce45153 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -71,12 +71,12 @@ static struct workqueue_struct *cma_wq; static DEFINE_IDR(sdp_ps); static DEFINE_IDR(tcp_ps); static DEFINE_IDR(udp_ps); +static DEFINE_IDR(ipoib_ps); static int next_port; struct cma_device { struct list_head list; struct ib_device *device; - __be64 node_guid; struct completion comp; atomic_t refcount; struct list_head id_list; @@ -116,6 +116,7 @@ struct rdma_id_private { struct list_head list; struct list_head listen_list; struct cma_device *cma_dev; + struct list_head mc_list; enum cma_state state; spinlock_t lock; @@ -134,10 +135,23 @@ struct rdma_id_private { } cm_id; u32 seq_num; + u32 qkey; u32 qp_num; u8 srq; }; +struct cma_multicast { + struct rdma_id_private *id_priv; + union { + struct ib_sa_multicast *ib; + } multicast; + struct list_head list; + void *context; + struct sockaddr addr; + u8 pad[sizeof(struct sockaddr_in6) - + sizeof(struct sockaddr)]; +}; + struct cma_work { struct work_struct work; struct rdma_id_private *id; @@ -243,6 +257,11 @@ static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver) hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); } +static inline int cma_is_ud_ps(enum rdma_port_space ps) +{ + return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB); +} + static void cma_attach_to_dev(struct rdma_id_private *id_priv, struct cma_device *cma_dev) { @@ -265,19 +284,41 @@ static void cma_detach_from_dev(struct rdma_id_private *id_priv) id_priv->cma_dev = NULL; } +static int cma_set_qkey(struct ib_device *device, u8 port_num, + enum rdma_port_space ps, + struct rdma_dev_addr *dev_addr, u32 *qkey) +{ + struct ib_sa_mcmember_rec rec; + int ret = 0; + + switch (ps) { + case RDMA_PS_UDP: + *qkey = RDMA_UDP_QKEY; + break; + case RDMA_PS_IPOIB: + ib_addr_get_mgid(dev_addr, &rec.mgid); + ret = ib_sa_get_mcmember_rec(device, port_num, &rec.mgid, &rec); + *qkey = be32_to_cpu(rec.qkey); + break; + default: + break; + } + return ret; +} + static int cma_acquire_dev(struct rdma_id_private *id_priv) { - enum rdma_node_type dev_type = id_priv->id.route.addr.dev_addr.dev_type; + struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; struct cma_device *cma_dev; union ib_gid gid; int ret = -ENODEV; - switch (rdma_node_get_transport(dev_type)) { + switch (rdma_node_get_transport(dev_addr->dev_type)) { case RDMA_TRANSPORT_IB: - ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); + ib_addr_get_sgid(dev_addr, &gid); break; case RDMA_TRANSPORT_IWARP: - iw_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); + iw_addr_get_sgid(dev_addr, &gid); break; default: return -ENODEV; @@ -287,7 +328,12 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv) ret = ib_find_cached_gid(cma_dev->device, &gid, &id_priv->id.port_num, NULL); if (!ret) { - cma_attach_to_dev(id_priv, cma_dev); + ret = cma_set_qkey(cma_dev->device, + id_priv->id.port_num, + id_priv->id.ps, dev_addr, + &id_priv->qkey); + if (!ret) + cma_attach_to_dev(id_priv, cma_dev); break; } } @@ -325,40 +371,50 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, init_waitqueue_head(&id_priv->wait_remove); atomic_set(&id_priv->dev_remove, 0); INIT_LIST_HEAD(&id_priv->listen_list); + INIT_LIST_HEAD(&id_priv->mc_list); get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); return &id_priv->id; } EXPORT_SYMBOL(rdma_create_id); -static int cma_init_ib_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) +static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) { struct ib_qp_attr qp_attr; - struct rdma_dev_addr *dev_addr; - int ret; + int qp_attr_mask, ret; - dev_addr = &id_priv->id.route.addr.dev_addr; - ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, - ib_addr_get_pkey(dev_addr), - &qp_attr.pkey_index); + qp_attr.qp_state = IB_QPS_INIT; + ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); if (ret) return ret; - qp_attr.qp_state = IB_QPS_INIT; - qp_attr.qp_access_flags = 0; - qp_attr.port_num = id_priv->id.port_num; - return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS | - IB_QP_PKEY_INDEX | IB_QP_PORT); + ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); + if (ret) + return ret; + + qp_attr.qp_state = IB_QPS_RTR; + ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); + if (ret) + return ret; + + qp_attr.qp_state = IB_QPS_RTS; + qp_attr.sq_psn = 0; + ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); + + return ret; } -static int cma_init_iw_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) +static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) { struct ib_qp_attr qp_attr; + int qp_attr_mask, ret; qp_attr.qp_state = IB_QPS_INIT; - qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE; + ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); + if (ret) + return ret; - return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS); + return ib_modify_qp(qp, &qp_attr, qp_attr_mask); } int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, @@ -376,18 +432,10 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, if (IS_ERR(qp)) return PTR_ERR(qp); - switch (rdma_node_get_transport(id->device->node_type)) { - case RDMA_TRANSPORT_IB: - ret = cma_init_ib_qp(id_priv, qp); - break; - case RDMA_TRANSPORT_IWARP: - ret = cma_init_iw_qp(id_priv, qp); - break; - default: - ret = -ENOSYS; - break; - } - + if (cma_is_ud_ps(id_priv->id.ps)) + ret = cma_init_ud_qp(id_priv, qp); + else + ret = cma_init_conn_qp(id_priv, qp); if (ret) goto err; @@ -460,23 +508,55 @@ static int cma_modify_qp_err(struct rdma_cm_id *id) return ib_modify_qp(id->qp, &qp_attr, IB_QP_STATE); } +static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, + struct ib_qp_attr *qp_attr, int *qp_attr_mask) +{ + struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; + int ret; + + ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, + ib_addr_get_pkey(dev_addr), + &qp_attr->pkey_index); + if (ret) + return ret; + + qp_attr->port_num = id_priv->id.port_num; + *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; + + if (cma_is_ud_ps(id_priv->id.ps)) { + qp_attr->qkey = id_priv->qkey; + *qp_attr_mask |= IB_QP_QKEY; + } else { + qp_attr->qp_access_flags = 0; + *qp_attr_mask |= IB_QP_ACCESS_FLAGS; + } + return 0; +} + int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { struct rdma_id_private *id_priv; - int ret; + int ret = 0; id_priv = container_of(id, struct rdma_id_private, id); switch (rdma_node_get_transport(id_priv->id.device->node_type)) { case RDMA_TRANSPORT_IB: - ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, - qp_attr_mask); + if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps)) + ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); + else + ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, + qp_attr_mask); if (qp_attr->qp_state == IB_QPS_RTR) qp_attr->rq_psn = id_priv->seq_num; break; case RDMA_TRANSPORT_IWARP: - ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, - qp_attr_mask); + if (!id_priv->cm_id.iw) { + qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE; + *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; + } else + ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, + qp_attr_mask); break; default: ret = -ENOSYS; @@ -698,6 +778,19 @@ static void cma_release_port(struct rdma_id_private *id_priv) mutex_unlock(&lock); } +static void cma_leave_mc_groups(struct rdma_id_private *id_priv) +{ + struct cma_multicast *mc; + + while (!list_empty(&id_priv->mc_list)) { + mc = container_of(id_priv->mc_list.next, + struct cma_multicast, list); + list_del(&mc->list); + ib_sa_free_multicast(mc->multicast.ib); + kfree(mc); + } +} + void rdma_destroy_id(struct rdma_cm_id *id) { struct rdma_id_private *id_priv; @@ -722,6 +815,7 @@ void rdma_destroy_id(struct rdma_cm_id *id) default: break; } + cma_leave_mc_groups(id_priv); mutex_lock(&lock); cma_detach_from_dev(id_priv); } @@ -972,7 +1066,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) memset(&event, 0, sizeof event); offset = cma_user_data_offset(listen_id->id.ps); event.event = RDMA_CM_EVENT_CONNECT_REQUEST; - if (listen_id->id.ps == RDMA_PS_UDP) { + if (cma_is_ud_ps(listen_id->id.ps)) { conn_id = cma_new_udp_id(&listen_id->id, ib_event); event.param.ud.private_data = ib_event->private_data + offset; event.param.ud.private_data_len = @@ -1397,11 +1491,13 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, ib_addr_get_dgid(addr, &path_rec.dgid); path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr)); path_rec.numb_path = 1; + path_rec.reversible = 1; id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, id_priv->id.port_num, &path_rec, IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | - IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH, + IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | + IB_SA_PATH_REC_REVERSIBLE, timeout_ms, GFP_KERNEL, cma_query_handler, work, &id_priv->query); @@ -1847,6 +1943,9 @@ static int cma_get_port(struct rdma_id_private *id_priv) case RDMA_PS_UDP: ps = &udp_ps; break; + case RDMA_PS_IPOIB: + ps = &ipoib_ps; + break; default: return -EPROTONOSUPPORT; } @@ -1961,7 +2060,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, event.status = ib_event->param.sidr_rep_rcvd.status; break; } - if (rep->qkey != RDMA_UD_QKEY) { + if (id_priv->qkey != rep->qkey) { event.event = RDMA_CM_EVENT_UNREACHABLE; event.status = -EINVAL; break; @@ -2160,7 +2259,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) switch (rdma_node_get_transport(id->device->node_type)) { case RDMA_TRANSPORT_IB: - if (id->ps == RDMA_PS_UDP) + if (cma_is_ud_ps(id->ps)) ret = cma_resolve_ib_udp(id_priv, conn_param); else ret = cma_connect_ib(id_priv, conn_param); @@ -2256,7 +2355,7 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv, rep.status = status; if (status == IB_SIDR_SUCCESS) { rep.qp_num = id_priv->qp_num; - rep.qkey = RDMA_UD_QKEY; + rep.qkey = id_priv->qkey; } rep.private_data = private_data; rep.private_data_len = private_data_len; @@ -2280,7 +2379,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) switch (rdma_node_get_transport(id->device->node_type)) { case RDMA_TRANSPORT_IB: - if (id->ps == RDMA_PS_UDP) + if (cma_is_ud_ps(id->ps)) ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, conn_param->private_data, conn_param->private_data_len); @@ -2341,7 +2440,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, switch (rdma_node_get_transport(id->device->node_type)) { case RDMA_TRANSPORT_IB: - if (id->ps == RDMA_PS_UDP) + if (cma_is_ud_ps(id->ps)) ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, private_data, private_data_len); else @@ -2392,6 +2491,178 @@ out: } EXPORT_SYMBOL(rdma_disconnect); +static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) +{ + struct rdma_id_private *id_priv; + struct cma_multicast *mc = multicast->context; + struct rdma_cm_event event; + int ret; + + id_priv = mc->id_priv; + atomic_inc(&id_priv->dev_remove); + if (!cma_comp(id_priv, CMA_ADDR_BOUND) && + !cma_comp(id_priv, CMA_ADDR_RESOLVED)) + goto out; + + if (!status && id_priv->id.qp) + status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, + multicast->rec.mlid); + + memset(&event, 0, sizeof event); + event.status = status; + event.param.ud.private_data = mc->context; + if (!status) { + event.event = RDMA_CM_EVENT_MULTICAST_JOIN; + ib_init_ah_from_mcmember(id_priv->id.device, + id_priv->id.port_num, &multicast->rec, + &event.param.ud.ah_attr); + event.param.ud.qp_num = 0xFFFFFF; + event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); + } else + event.event = RDMA_CM_EVENT_MULTICAST_ERROR; + + ret = id_priv->id.event_handler(&id_priv->id, &event); + if (ret) { + cma_exch(id_priv, CMA_DESTROYING); + cma_release_remove(id_priv); + rdma_destroy_id(&id_priv->id); + return 0; + } +out: + cma_release_remove(id_priv); + return 0; +} + +static void cma_set_mgid(struct rdma_id_private *id_priv, + struct sockaddr *addr, union ib_gid *mgid) +{ + unsigned char mc_map[MAX_ADDR_LEN]; + struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; + struct sockaddr_in *sin = (struct sockaddr_in *) addr; + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; + + if (cma_any_addr(addr)) { + memset(mgid, 0, sizeof *mgid); + } else if ((addr->sa_family == AF_INET6) && + ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFF10A01B) == + 0xFF10A01B)) { + /* IPv6 address is an SA assigned MGID. */ + memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); + } else { + ip_ib_mc_map(sin->sin_addr.s_addr, mc_map); + if (id_priv->id.ps == RDMA_PS_UDP) + mc_map[7] = 0x01; /* Use RDMA CM signature */ + mc_map[8] = ib_addr_get_pkey(dev_addr) >> 8; + mc_map[9] = (unsigned char) ib_addr_get_pkey(dev_addr); + *mgid = *(union ib_gid *) (mc_map + 4); + } +} + +static int cma_join_ib_multicast(struct rdma_id_private *id_priv, + struct cma_multicast *mc) +{ + struct ib_sa_mcmember_rec rec; + struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; + ib_sa_comp_mask comp_mask; + int ret; + + ib_addr_get_mgid(dev_addr, &rec.mgid); + ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, + &rec.mgid, &rec); + if (ret) + return ret; + + cma_set_mgid(id_priv, &mc->addr, &rec.mgid); + if (id_priv->id.ps == RDMA_PS_UDP) + rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); + ib_addr_get_sgid(dev_addr, &rec.port_gid); + rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); + rec.join_state = 1; + + comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | + IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | + IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | + IB_SA_MCMEMBER_REC_FLOW_LABEL | + IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; + + mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, + id_priv->id.port_num, &rec, + comp_mask, GFP_KERNEL, + cma_ib_mc_handler, mc); + if (IS_ERR(mc->multicast.ib)) + return PTR_ERR(mc->multicast.ib); + + return 0; +} + +int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, + void *context) +{ + struct rdma_id_private *id_priv; + struct cma_multicast *mc; + int ret; + + id_priv = container_of(id, struct rdma_id_private, id); + if (!cma_comp(id_priv, CMA_ADDR_BOUND) && + !cma_comp(id_priv, CMA_ADDR_RESOLVED)) + return -EINVAL; + + mc = kmalloc(sizeof *mc, GFP_KERNEL); + if (!mc) + return -ENOMEM; + + memcpy(&mc->addr, addr, ip_addr_size(addr)); + mc->context = context; + mc->id_priv = id_priv; + + spin_lock(&id_priv->lock); + list_add(&mc->list, &id_priv->mc_list); + spin_unlock(&id_priv->lock); + + switch (rdma_node_get_transport(id->device->node_type)) { + case RDMA_TRANSPORT_IB: + ret = cma_join_ib_multicast(id_priv, mc); + break; + default: + ret = -ENOSYS; + break; + } + + if (ret) { + spin_lock_irq(&id_priv->lock); + list_del(&mc->list); + spin_unlock_irq(&id_priv->lock); + kfree(mc); + } + return ret; +} +EXPORT_SYMBOL(rdma_join_multicast); + +void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) +{ + struct rdma_id_private *id_priv; + struct cma_multicast *mc; + + id_priv = container_of(id, struct rdma_id_private, id); + spin_lock_irq(&id_priv->lock); + list_for_each_entry(mc, &id_priv->mc_list, list) { + if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) { + list_del(&mc->list); + spin_unlock_irq(&id_priv->lock); + + if (id->qp) + ib_detach_mcast(id->qp, + &mc->multicast.ib->rec.mgid, + mc->multicast.ib->rec.mlid); + ib_sa_free_multicast(mc->multicast.ib); + kfree(mc); + return; + } + } + spin_unlock_irq(&id_priv->lock); +} +EXPORT_SYMBOL(rdma_leave_multicast); + static void cma_add_one(struct ib_device *device) { struct cma_device *cma_dev; @@ -2402,7 +2673,6 @@ static void cma_add_one(struct ib_device *device) return; cma_dev->device = device; - cma_dev->node_guid = device->node_guid; init_completion(&cma_dev->comp); atomic_set(&cma_dev->refcount, 1); @@ -2522,6 +2792,7 @@ static void cma_cleanup(void) idr_destroy(&sdp_ps); idr_destroy(&tcp_ps); idr_destroy(&udp_ps); + idr_destroy(&ipoib_ps); } module_init(cma_init); diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c index 8926a2bd4a87..1d796e7c8199 100644 --- a/drivers/infiniband/core/fmr_pool.c +++ b/drivers/infiniband/core/fmr_pool.c @@ -301,7 +301,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, { struct ib_pool_fmr *fmr; - struct ib_fmr_attr attr = { + struct ib_fmr_attr fmr_attr = { .max_pages = params->max_pages_per_fmr, .max_maps = pool->max_remaps, .page_shift = params->page_shift @@ -321,7 +321,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, fmr->ref_count = 0; INIT_HLIST_NODE(&fmr->cache_node); - fmr->fmr = ib_alloc_fmr(pd, params->access, &attr); + fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr); if (IS_ERR(fmr->fmr)) { printk(KERN_WARNING "fmr_create failed for FMR %d", i); kfree(fmr); diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 1039ad57d53b..891d1fa7b2eb 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -146,6 +146,12 @@ static int copy_private_data(struct iw_cm_event *event) return 0; } +static void free_cm_id(struct iwcm_id_private *cm_id_priv) +{ + dealloc_work_entries(cm_id_priv); + kfree(cm_id_priv); +} + /* * Release a reference on cm_id. If the last reference is being * released, enable the waiting thread (in iw_destroy_cm_id) to @@ -153,21 +159,14 @@ static int copy_private_data(struct iw_cm_event *event) */ static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) { - int ret = 0; - BUG_ON(atomic_read(&cm_id_priv->refcount)==0); if (atomic_dec_and_test(&cm_id_priv->refcount)) { BUG_ON(!list_empty(&cm_id_priv->work_list)); - if (waitqueue_active(&cm_id_priv->destroy_comp.wait)) { - BUG_ON(cm_id_priv->state != IW_CM_STATE_DESTROYING); - BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, - &cm_id_priv->flags)); - ret = 1; - } complete(&cm_id_priv->destroy_comp); + return 1; } - return ret; + return 0; } static void add_ref(struct iw_cm_id *cm_id) @@ -181,7 +180,11 @@ static void rem_ref(struct iw_cm_id *cm_id) { struct iwcm_id_private *cm_id_priv; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); - iwcm_deref_id(cm_id_priv); + if (iwcm_deref_id(cm_id_priv) && + test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) { + BUG_ON(!list_empty(&cm_id_priv->work_list)); + free_cm_id(cm_id_priv); + } } static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); @@ -355,7 +358,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id) case IW_CM_STATE_CONN_RECV: /* * App called destroy before/without calling accept after - * receiving connection request event notification. + * receiving connection request event notification or + * returned non zero from the event callback function. + * In either case, must tell the provider to reject. */ cm_id_priv->state = IW_CM_STATE_DESTROYING; break; @@ -391,9 +396,7 @@ void iw_destroy_cm_id(struct iw_cm_id *cm_id) wait_for_completion(&cm_id_priv->destroy_comp); - dealloc_work_entries(cm_id_priv); - - kfree(cm_id_priv); + free_cm_id(cm_id_priv); } EXPORT_SYMBOL(iw_destroy_cm_id); @@ -647,10 +650,11 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, /* Call the client CM handler */ ret = cm_id->cm_handler(cm_id, iw_event); if (ret) { + iw_cm_reject(cm_id, NULL, 0); set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); destroy_cm_id(cm_id); if (atomic_read(&cm_id_priv->refcount)==0) - kfree(cm_id); + free_cm_id(cm_id_priv); } out: @@ -854,13 +858,12 @@ static void cm_work_handler(struct work_struct *_work) destroy_cm_id(&cm_id_priv->id); } BUG_ON(atomic_read(&cm_id_priv->refcount)==0); - if (iwcm_deref_id(cm_id_priv)) - return; - - if (atomic_read(&cm_id_priv->refcount)==0 && - test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) { - dealloc_work_entries(cm_id_priv); - kfree(cm_id_priv); + if (iwcm_deref_id(cm_id_priv)) { + if (test_bit(IWCM_F_CALLBACK_DESTROY, + &cm_id_priv->flags)) { + BUG_ON(!list_empty(&cm_id_priv->work_list)); + free_cm_id(cm_id_priv); + } return; } spin_lock_irqsave(&cm_id_priv->lock, flags); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 13efd4170349..6edfecf1be72 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 Intel Corporation. All rights reserved. * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. * @@ -31,7 +31,6 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - * $Id: mad.c 5596 2006-03-03 01:00:07Z sean.hefty $ */ #include <linux/dma-mapping.h> #include <rdma/ib_cache.h> @@ -668,7 +667,7 @@ static void build_smp_wc(struct ib_qp *qp, static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_send_wr_private *mad_send_wr) { - int ret; + int ret = 0; struct ib_smp *smp = mad_send_wr->send_buf.mad; unsigned long flags; struct ib_mad_local_private *local; @@ -688,14 +687,15 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, */ if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == IB_LID_PERMISSIVE && - !smi_handle_dr_smp_send(smp, device->node_type, port_num)) { + smi_handle_dr_smp_send(smp, device->node_type, port_num) == + IB_SMI_DISCARD) { ret = -EINVAL; printk(KERN_ERR PFX "Invalid directed route\n"); goto out; } + /* Check to post send on QP or process locally */ - ret = smi_check_local_smp(smp, device); - if (!ret) + if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD) goto out; local = kmalloc(sizeof *local, GFP_ATOMIC); @@ -1874,18 +1874,22 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { - if (!smi_handle_dr_smp_recv(&recv->mad.smp, - port_priv->device->node_type, - port_priv->port_num, - port_priv->device->phys_port_cnt)) + if (smi_handle_dr_smp_recv(&recv->mad.smp, + port_priv->device->node_type, + port_priv->port_num, + port_priv->device->phys_port_cnt) == + IB_SMI_DISCARD) goto out; - if (!smi_check_forward_dr_smp(&recv->mad.smp)) + + if (smi_check_forward_dr_smp(&recv->mad.smp) == IB_SMI_LOCAL) goto local; - if (!smi_handle_dr_smp_send(&recv->mad.smp, - port_priv->device->node_type, - port_priv->port_num)) + + if (smi_handle_dr_smp_send(&recv->mad.smp, + port_priv->device->node_type, + port_priv->port_num) == IB_SMI_DISCARD) goto out; - if (!smi_check_local_smp(&recv->mad.smp, port_priv->device)) + + if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD) goto out; } diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c new file mode 100644 index 000000000000..4a579b3a1c90 --- /dev/null +++ b/drivers/infiniband/core/multicast.c @@ -0,0 +1,837 @@ +/* + * Copyright (c) 2006 Intel Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/completion.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/bitops.h> +#include <linux/random.h> + +#include <rdma/ib_cache.h> +#include "sa.h" + +static void mcast_add_one(struct ib_device *device); +static void mcast_remove_one(struct ib_device *device); + +static struct ib_client mcast_client = { + .name = "ib_multicast", + .add = mcast_add_one, + .remove = mcast_remove_one +}; + +static struct ib_sa_client sa_client; +static struct workqueue_struct *mcast_wq; +static union ib_gid mgid0; + +struct mcast_device; + +struct mcast_port { + struct mcast_device *dev; + spinlock_t lock; + struct rb_root table; + atomic_t refcount; + struct completion comp; + u8 port_num; +}; + +struct mcast_device { + struct ib_device *device; + struct ib_event_handler event_handler; + int start_port; + int end_port; + struct mcast_port port[0]; +}; + +enum mcast_state { + MCAST_IDLE, + MCAST_JOINING, + MCAST_MEMBER, + MCAST_BUSY, + MCAST_ERROR +}; + +struct mcast_member; + +struct mcast_group { + struct ib_sa_mcmember_rec rec; + struct rb_node node; + struct mcast_port *port; + spinlock_t lock; + struct work_struct work; + struct list_head pending_list; + struct list_head active_list; + struct mcast_member *last_join; + int members[3]; + atomic_t refcount; + enum mcast_state state; + struct ib_sa_query *query; + int query_id; +}; + +struct mcast_member { + struct ib_sa_multicast multicast; + struct ib_sa_client *client; + struct mcast_group *group; + struct list_head list; + enum mcast_state state; + atomic_t refcount; + struct completion comp; +}; + +static void join_handler(int status, struct ib_sa_mcmember_rec *rec, + void *context); +static void leave_handler(int status, struct ib_sa_mcmember_rec *rec, + void *context); + +static struct mcast_group *mcast_find(struct mcast_port *port, + union ib_gid *mgid) +{ + struct rb_node *node = port->table.rb_node; + struct mcast_group *group; + int ret; + + while (node) { + group = rb_entry(node, struct mcast_group, node); + ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); + if (!ret) + return group; + + if (ret < 0) + node = node->rb_left; + else + node = node->rb_right; + } + return NULL; +} + +static struct mcast_group *mcast_insert(struct mcast_port *port, + struct mcast_group *group, + int allow_duplicates) +{ + struct rb_node **link = &port->table.rb_node; + struct rb_node *parent = NULL; + struct mcast_group *cur_group; + int ret; + + while (*link) { + parent = *link; + cur_group = rb_entry(parent, struct mcast_group, node); + + ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, + sizeof group->rec.mgid); + if (ret < 0) + link = &(*link)->rb_left; + else if (ret > 0) + link = &(*link)->rb_right; + else if (allow_duplicates) + link = &(*link)->rb_left; + else + return cur_group; + } + rb_link_node(&group->node, parent, link); + rb_insert_color(&group->node, &port->table); + return NULL; +} + +static void deref_port(struct mcast_port *port) +{ + if (atomic_dec_and_test(&port->refcount)) + complete(&port->comp); +} + +static void release_group(struct mcast_group *group) +{ + struct mcast_port *port = group->port; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + if (atomic_dec_and_test(&group->refcount)) { + rb_erase(&group->node, &port->table); + spin_unlock_irqrestore(&port->lock, flags); + kfree(group); + deref_port(port); + } else + spin_unlock_irqrestore(&port->lock, flags); +} + +static void deref_member(struct mcast_member *member) +{ + if (atomic_dec_and_test(&member->refcount)) + complete(&member->comp); +} + +static void queue_join(struct mcast_member *member) +{ + struct mcast_group *group = member->group; + unsigned long flags; + + spin_lock_irqsave(&group->lock, flags); + list_add(&member->list, &group->pending_list); + if (group->state == MCAST_IDLE) { + group->state = MCAST_BUSY; + atomic_inc(&group->refcount); + queue_work(mcast_wq, &group->work); + } + spin_unlock_irqrestore(&group->lock, flags); +} + +/* + * A multicast group has three types of members: full member, non member, and + * send only member. We need to keep track of the number of members of each + * type based on their join state. Adjust the number of members the belong to + * the specified join states. + */ +static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) +{ + int i; + + for (i = 0; i < 3; i++, join_state >>= 1) + if (join_state & 0x1) + group->members[i] += inc; +} + +/* + * If a multicast group has zero members left for a particular join state, but + * the group is still a member with the SA, we need to leave that join state. + * Determine which join states we still belong to, but that do not have any + * active members. + */ +static u8 get_leave_state(struct mcast_group *group) +{ + u8 leave_state = 0; + int i; + + for (i = 0; i < 3; i++) + if (!group->members[i]) + leave_state |= (0x1 << i); + + return leave_state & group->rec.join_state; +} + +static int check_selector(ib_sa_comp_mask comp_mask, + ib_sa_comp_mask selector_mask, + ib_sa_comp_mask value_mask, + u8 selector, u8 src_value, u8 dst_value) +{ + int err; + + if (!(comp_mask & selector_mask) || !(comp_mask & value_mask)) + return 0; + + switch (selector) { + case IB_SA_GT: + err = (src_value <= dst_value); + break; + case IB_SA_LT: + err = (src_value >= dst_value); + break; + case IB_SA_EQ: + err = (src_value != dst_value); + break; + default: + err = 0; + break; + } + + return err; +} + +static int cmp_rec(struct ib_sa_mcmember_rec *src, + struct ib_sa_mcmember_rec *dst, ib_sa_comp_mask comp_mask) +{ + /* MGID must already match */ + + if (comp_mask & IB_SA_MCMEMBER_REC_PORT_GID && + memcmp(&src->port_gid, &dst->port_gid, sizeof src->port_gid)) + return -EINVAL; + if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey) + return -EINVAL; + if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid) + return -EINVAL; + if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR, + IB_SA_MCMEMBER_REC_MTU, dst->mtu_selector, + src->mtu, dst->mtu)) + return -EINVAL; + if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS && + src->traffic_class != dst->traffic_class) + return -EINVAL; + if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey) + return -EINVAL; + if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR, + IB_SA_MCMEMBER_REC_RATE, dst->rate_selector, + src->rate, dst->rate)) + return -EINVAL; + if (check_selector(comp_mask, + IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR, + IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME, + dst->packet_life_time_selector, + src->packet_life_time, dst->packet_life_time)) + return -EINVAL; + if (comp_mask & IB_SA_MCMEMBER_REC_SL && src->sl != dst->sl) + return -EINVAL; + if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL && + src->flow_label != dst->flow_label) + return -EINVAL; + if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT && + src->hop_limit != dst->hop_limit) + return -EINVAL; + if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && src->scope != dst->scope) + return -EINVAL; + + /* join_state checked separately, proxy_join ignored */ + + return 0; +} + +static int send_join(struct mcast_group *group, struct mcast_member *member) +{ + struct mcast_port *port = group->port; + int ret; + + group->last_join = member; + ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, + port->port_num, IB_MGMT_METHOD_SET, + &member->multicast.rec, + member->multicast.comp_mask, + 3000, GFP_KERNEL, join_handler, group, + &group->query); + if (ret >= 0) { + group->query_id = ret; + ret = 0; + } + return ret; +} + +static int send_leave(struct mcast_group *group, u8 leave_state) +{ + struct mcast_port *port = group->port; + struct ib_sa_mcmember_rec rec; + int ret; + + rec = group->rec; + rec.join_state = leave_state; + + ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, + port->port_num, IB_SA_METHOD_DELETE, &rec, + IB_SA_MCMEMBER_REC_MGID | + IB_SA_MCMEMBER_REC_PORT_GID | + IB_SA_MCMEMBER_REC_JOIN_STATE, + 3000, GFP_KERNEL, leave_handler, + group, &group->query); + if (ret >= 0) { + group->query_id = ret; + ret = 0; + } + return ret; +} + +static void join_group(struct mcast_group *group, struct mcast_member *member, + u8 join_state) +{ + member->state = MCAST_MEMBER; + adjust_membership(group, join_state, 1); + group->rec.join_state |= join_state; + member->multicast.rec = group->rec; + member->multicast.rec.join_state = join_state; + list_move(&member->list, &group->active_list); +} + +static int fail_join(struct mcast_group *group, struct mcast_member *member, + int status) +{ + spin_lock_irq(&group->lock); + list_del_init(&member->list); + spin_unlock_irq(&group->lock); + return member->multicast.callback(status, &member->multicast); +} + +static void process_group_error(struct mcast_group *group) +{ + struct mcast_member *member; + int ret; + + spin_lock_irq(&group->lock); + while (!list_empty(&group->active_list)) { + member = list_entry(group->active_list.next, + struct mcast_member, list); + atomic_inc(&member->refcount); + list_del_init(&member->list); + adjust_membership(group, member->multicast.rec.join_state, -1); + member->state = MCAST_ERROR; + spin_unlock_irq(&group->lock); + + ret = member->multicast.callback(-ENETRESET, + &member->multicast); + deref_member(member); + if (ret) + ib_sa_free_multicast(&member->multicast); + spin_lock_irq(&group->lock); + } + + group->rec.join_state = 0; + group->state = MCAST_BUSY; + spin_unlock_irq(&group->lock); +} + +static void mcast_work_handler(struct work_struct *work) +{ + struct mcast_group *group; + struct mcast_member *member; + struct ib_sa_multicast *multicast; + int status, ret; + u8 join_state; + + group = container_of(work, typeof(*group), work); +retest: + spin_lock_irq(&group->lock); + while (!list_empty(&group->pending_list) || + (group->state == MCAST_ERROR)) { + + if (group->state == MCAST_ERROR) { + spin_unlock_irq(&group->lock); + process_group_error(group); + goto retest; + } + + member = list_entry(group->pending_list.next, + struct mcast_member, list); + multicast = &member->multicast; + join_state = multicast->rec.join_state; + atomic_inc(&member->refcount); + + if (join_state == (group->rec.join_state & join_state)) { + status = cmp_rec(&group->rec, &multicast->rec, + multicast->comp_mask); + if (!status) + join_group(group, member, join_state); + else + list_del_init(&member->list); + spin_unlock_irq(&group->lock); + ret = multicast->callback(status, multicast); + } else { + spin_unlock_irq(&group->lock); + status = send_join(group, member); + if (!status) { + deref_member(member); + return; + } + ret = fail_join(group, member, status); + } + + deref_member(member); + if (ret) + ib_sa_free_multicast(&member->multicast); + spin_lock_irq(&group->lock); + } + + join_state = get_leave_state(group); + if (join_state) { + group->rec.join_state &= ~join_state; + spin_unlock_irq(&group->lock); + if (send_leave(group, join_state)) + goto retest; + } else { + group->state = MCAST_IDLE; + spin_unlock_irq(&group->lock); + release_group(group); + } +} + +/* + * Fail a join request if it is still active - at the head of the pending queue. + */ +static void process_join_error(struct mcast_group *group, int status) +{ + struct mcast_member *member; + int ret; + + spin_lock_irq(&group->lock); + member = list_entry(group->pending_list.next, + struct mcast_member, list); + if (group->last_join == member) { + atomic_inc(&member->refcount); + list_del_init(&member->list); + spin_unlock_irq(&group->lock); + ret = member->multicast.callback(status, &member->multicast); + deref_member(member); + if (ret) + ib_sa_free_multicast(&member->multicast); + } else + spin_unlock_irq(&group->lock); +} + +static void join_handler(int status, struct ib_sa_mcmember_rec *rec, + void *context) +{ + struct mcast_group *group = context; + + if (status) + process_join_error(group, status); + else { + spin_lock_irq(&group->port->lock); + group->rec = *rec; + if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) { + rb_erase(&group->node, &group->port->table); + mcast_insert(group->port, group, 1); + } + spin_unlock_irq(&group->port->lock); + } + mcast_work_handler(&group->work); +} + +static void leave_handler(int status, struct ib_sa_mcmember_rec *rec, + void *context) +{ + struct mcast_group *group = context; + + mcast_work_handler(&group->work); +} + +static struct mcast_group *acquire_group(struct mcast_port *port, + union ib_gid *mgid, gfp_t gfp_mask) +{ + struct mcast_group *group, *cur_group; + unsigned long flags; + int is_mgid0; + + is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0); + if (!is_mgid0) { + spin_lock_irqsave(&port->lock, flags); + group = mcast_find(port, mgid); + if (group) + goto found; + spin_unlock_irqrestore(&port->lock, flags); + } + + group = kzalloc(sizeof *group, gfp_mask); + if (!group) + return NULL; + + group->port = port; + group->rec.mgid = *mgid; + INIT_LIST_HEAD(&group->pending_list); + INIT_LIST_HEAD(&group->active_list); + INIT_WORK(&group->work, mcast_work_handler); + spin_lock_init(&group->lock); + + spin_lock_irqsave(&port->lock, flags); + cur_group = mcast_insert(port, group, is_mgid0); + if (cur_group) { + kfree(group); + group = cur_group; + } else + atomic_inc(&port->refcount); +found: + atomic_inc(&group->refcount); + spin_unlock_irqrestore(&port->lock, flags); + return group; +} + +/* + * We serialize all join requests to a single group to make our lives much + * easier. Otherwise, two users could try to join the same group + * simultaneously, with different configurations, one could leave while the + * join is in progress, etc., which makes locking around error recovery + * difficult. + */ +struct ib_sa_multicast * +ib_sa_join_multicast(struct ib_sa_client *client, + struct ib_device *device, u8 port_num, + struct ib_sa_mcmember_rec *rec, + ib_sa_comp_mask comp_mask, gfp_t gfp_mask, + int (*callback)(int status, + struct ib_sa_multicast *multicast), + void *context) +{ + struct mcast_device *dev; + struct mcast_member *member; + struct ib_sa_multicast *multicast; + int ret; + + dev = ib_get_client_data(device, &mcast_client); + if (!dev) + return ERR_PTR(-ENODEV); + + member = kmalloc(sizeof *member, gfp_mask); + if (!member) + return ERR_PTR(-ENOMEM); + + ib_sa_client_get(client); + member->client = client; + member->multicast.rec = *rec; + member->multicast.comp_mask = comp_mask; + member->multicast.callback = callback; + member->multicast.context = context; + init_completion(&member->comp); + atomic_set(&member->refcount, 1); + member->state = MCAST_JOINING; + + member->group = acquire_group(&dev->port[port_num - dev->start_port], + &rec->mgid, gfp_mask); + if (!member->group) { + ret = -ENOMEM; + goto err; + } + + /* + * The user will get the multicast structure in their callback. They + * could then free the multicast structure before we can return from + * this routine. So we save the pointer to return before queuing + * any callback. + */ + multicast = &member->multicast; + queue_join(member); + return multicast; + +err: + ib_sa_client_put(client); + kfree(member); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(ib_sa_join_multicast); + +void ib_sa_free_multicast(struct ib_sa_multicast *multicast) +{ + struct mcast_member *member; + struct mcast_group *group; + + member = container_of(multicast, struct mcast_member, multicast); + group = member->group; + + spin_lock_irq(&group->lock); + if (member->state == MCAST_MEMBER) + adjust_membership(group, multicast->rec.join_state, -1); + + list_del_init(&member->list); + + if (group->state == MCAST_IDLE) { + group->state = MCAST_BUSY; + spin_unlock_irq(&group->lock); + /* Continue to hold reference on group until callback */ + queue_work(mcast_wq, &group->work); + } else { + spin_unlock_irq(&group->lock); + release_group(group); + } + + deref_member(member); + wait_for_completion(&member->comp); + ib_sa_client_put(member->client); + kfree(member); +} +EXPORT_SYMBOL(ib_sa_free_multicast); + +int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num, + union ib_gid *mgid, struct ib_sa_mcmember_rec *rec) +{ + struct mcast_device *dev; + struct mcast_port *port; + struct mcast_group *group; + unsigned long flags; + int ret = 0; + + dev = ib_get_client_data(device, &mcast_client); + if (!dev) + return -ENODEV; + + port = &dev->port[port_num - dev->start_port]; + spin_lock_irqsave(&port->lock, flags); + group = mcast_find(port, mgid); + if (group) + *rec = group->rec; + else + ret = -EADDRNOTAVAIL; + spin_unlock_irqrestore(&port->lock, flags); + + return ret; +} +EXPORT_SYMBOL(ib_sa_get_mcmember_rec); + +int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num, + struct ib_sa_mcmember_rec *rec, + struct ib_ah_attr *ah_attr) +{ + int ret; + u16 gid_index; + u8 p; + + ret = ib_find_cached_gid(device, &rec->port_gid, &p, &gid_index); + if (ret) + return ret; + + memset(ah_attr, 0, sizeof *ah_attr); + ah_attr->dlid = be16_to_cpu(rec->mlid); + ah_attr->sl = rec->sl; + ah_attr->port_num = port_num; + ah_attr->static_rate = rec->rate; + + ah_attr->ah_flags = IB_AH_GRH; + ah_attr->grh.dgid = rec->mgid; + + ah_attr->grh.sgid_index = (u8) gid_index; + ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label); + ah_attr->grh.hop_limit = rec->hop_limit; + ah_attr->grh.traffic_class = rec->traffic_class; + + return 0; +} +EXPORT_SYMBOL(ib_init_ah_from_mcmember); + +static void mcast_groups_lost(struct mcast_port *port) +{ + struct mcast_group *group; + struct rb_node *node; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + for (node = rb_first(&port->table); node; node = rb_next(node)) { + group = rb_entry(node, struct mcast_group, node); + spin_lock(&group->lock); + if (group->state == MCAST_IDLE) { + atomic_inc(&group->refcount); + queue_work(mcast_wq, &group->work); + } + group->state = MCAST_ERROR; + spin_unlock(&group->lock); + } + spin_unlock_irqrestore(&port->lock, flags); +} + +static void mcast_event_handler(struct ib_event_handler *handler, + struct ib_event *event) +{ + struct mcast_device *dev; + + dev = container_of(handler, struct mcast_device, event_handler); + + switch (event->event) { + case IB_EVENT_PORT_ERR: + case IB_EVENT_LID_CHANGE: + case IB_EVENT_SM_CHANGE: + case IB_EVENT_CLIENT_REREGISTER: + mcast_groups_lost(&dev->port[event->element.port_num - + dev->start_port]); + break; + default: + break; + } +} + +static void mcast_add_one(struct ib_device *device) +{ + struct mcast_device *dev; + struct mcast_port *port; + int i; + + if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) + return; + + dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port, + GFP_KERNEL); + if (!dev) + return; + + if (device->node_type == RDMA_NODE_IB_SWITCH) + dev->start_port = dev->end_port = 0; + else { + dev->start_port = 1; + dev->end_port = device->phys_port_cnt; + } + + for (i = 0; i <= dev->end_port - dev->start_port; i++) { + port = &dev->port[i]; + port->dev = dev; + port->port_num = dev->start_port + i; + spin_lock_init(&port->lock); + port->table = RB_ROOT; + init_completion(&port->comp); + atomic_set(&port->refcount, 1); + } + + dev->device = device; + ib_set_client_data(device, &mcast_client, dev); + + INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler); + ib_register_event_handler(&dev->event_handler); +} + +static void mcast_remove_one(struct ib_device *device) +{ + struct mcast_device *dev; + struct mcast_port *port; + int i; + + dev = ib_get_client_data(device, &mcast_client); + if (!dev) + return; + + ib_unregister_event_handler(&dev->event_handler); + flush_workqueue(mcast_wq); + + for (i = 0; i <= dev->end_port - dev->start_port; i++) { + port = &dev->port[i]; + deref_port(port); + wait_for_completion(&port->comp); + } + + kfree(dev); +} + +int mcast_init(void) +{ + int ret; + + mcast_wq = create_singlethread_workqueue("ib_mcast"); + if (!mcast_wq) + return -ENOMEM; + + ib_sa_register_client(&sa_client); + + ret = ib_register_client(&mcast_client); + if (ret) + goto err; + return 0; + +err: + ib_sa_unregister_client(&sa_client); + destroy_workqueue(mcast_wq); + return ret; +} + +void mcast_cleanup(void) +{ + ib_unregister_client(&mcast_client); + ib_sa_unregister_client(&sa_client); + destroy_workqueue(mcast_wq); +} diff --git a/drivers/infiniband/core/sa.h b/drivers/infiniband/core/sa.h new file mode 100644 index 000000000000..24c93fd320fb --- /dev/null +++ b/drivers/infiniband/core/sa.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2006 Intel Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef SA_H +#define SA_H + +#include <rdma/ib_sa.h> + +static inline void ib_sa_client_get(struct ib_sa_client *client) +{ + atomic_inc(&client->users); +} + +static inline void ib_sa_client_put(struct ib_sa_client *client) +{ + if (atomic_dec_and_test(&client->users)) + complete(&client->comp); +} + +int ib_sa_mcmember_rec_query(struct ib_sa_client *client, + struct ib_device *device, u8 port_num, + u8 method, + struct ib_sa_mcmember_rec *rec, + ib_sa_comp_mask comp_mask, + int timeout_ms, gfp_t gfp_mask, + void (*callback)(int status, + struct ib_sa_mcmember_rec *resp, + void *context), + void *context, + struct ib_sa_query **sa_query); + +int mcast_init(void); +void mcast_cleanup(void); + +#endif /* SA_H */ diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index e45afba75341..9a7eaadb1688 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -47,8 +47,8 @@ #include <linux/workqueue.h> #include <rdma/ib_pack.h> -#include <rdma/ib_sa.h> #include <rdma/ib_cache.h> +#include "sa.h" MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("InfiniBand subnet administration query support"); @@ -57,6 +57,7 @@ MODULE_LICENSE("Dual BSD/GPL"); struct ib_sa_sm_ah { struct ib_ah *ah; struct kref ref; + u8 src_path_mask; }; struct ib_sa_port { @@ -380,6 +381,7 @@ static void update_sm_ah(struct work_struct *work) } kref_init(&new_ah->ref); + new_ah->src_path_mask = (1 << port_attr.lmc) - 1; memset(&ah_attr, 0, sizeof ah_attr); ah_attr.dlid = port_attr.sm_lid; @@ -425,17 +427,6 @@ void ib_sa_register_client(struct ib_sa_client *client) } EXPORT_SYMBOL(ib_sa_register_client); -static inline void ib_sa_client_get(struct ib_sa_client *client) -{ - atomic_inc(&client->users); -} - -static inline void ib_sa_client_put(struct ib_sa_client *client) -{ - if (atomic_dec_and_test(&client->users)) - complete(&client->comp); -} - void ib_sa_unregister_client(struct ib_sa_client *client) { ib_sa_client_put(client); @@ -471,6 +462,25 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query) } EXPORT_SYMBOL(ib_sa_cancel_query); +static u8 get_src_path_mask(struct ib_device *device, u8 port_num) +{ + struct ib_sa_device *sa_dev; + struct ib_sa_port *port; + unsigned long flags; + u8 src_path_mask; + + sa_dev = ib_get_client_data(device, &sa_client); + if (!sa_dev) + return 0x7f; + + port = &sa_dev->port[port_num - sa_dev->start_port]; + spin_lock_irqsave(&port->ah_lock, flags); + src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f; + spin_unlock_irqrestore(&port->ah_lock, flags); + + return src_path_mask; +} + int ib_init_ah_from_path(struct ib_device *device, u8 port_num, struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr) { @@ -480,8 +490,10 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num, memset(ah_attr, 0, sizeof *ah_attr); ah_attr->dlid = be16_to_cpu(rec->dlid); ah_attr->sl = rec->sl; - ah_attr->src_path_bits = be16_to_cpu(rec->slid) & 0x7f; + ah_attr->src_path_bits = be16_to_cpu(rec->slid) & + get_src_path_mask(device, port_num); ah_attr->port_num = port_num; + ah_attr->static_rate = rec->rate; if (rec->hop_limit > 1) { ah_attr->ah_flags = IB_AH_GRH; @@ -901,7 +913,6 @@ err1: kfree(query); return ret; } -EXPORT_SYMBOL(ib_sa_mcmember_rec_query); static void send_handler(struct ib_mad_agent *agent, struct ib_mad_send_wc *mad_send_wc) @@ -1053,14 +1064,27 @@ static int __init ib_sa_init(void) get_random_bytes(&tid, sizeof tid); ret = ib_register_client(&sa_client); - if (ret) + if (ret) { printk(KERN_ERR "Couldn't register ib_sa client\n"); + goto err1; + } + + ret = mcast_init(); + if (ret) { + printk(KERN_ERR "Couldn't initialize multicast handling\n"); + goto err2; + } + return 0; +err2: + ib_unregister_client(&sa_client); +err1: return ret; } static void __exit ib_sa_cleanup(void) { + mcast_cleanup(); ib_unregister_client(&sa_client); idr_destroy(&query_idr); } diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c index 54b81e17ad50..2bca753eb622 100644 --- a/drivers/infiniband/core/smi.c +++ b/drivers/infiniband/core/smi.c @@ -3,7 +3,7 @@ * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. - * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. + * Copyright (c) 2004-2007 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -34,7 +34,6 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - * $Id: smi.c 1389 2004-12-27 22:56:47Z roland $ */ #include <rdma/ib_smi.h> @@ -44,9 +43,8 @@ * Fixup a directed route SMP for sending * Return 0 if the SMP should be discarded */ -int smi_handle_dr_smp_send(struct ib_smp *smp, - u8 node_type, - int port_num) +enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, + u8 node_type, int port_num) { u8 hop_ptr, hop_cnt; @@ -59,18 +57,18 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, if (hop_cnt && hop_ptr == 0) { smp->hop_ptr++; return (smp->initial_path[smp->hop_ptr] == - port_num); + port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-9:2 */ if (hop_ptr && hop_ptr < hop_cnt) { if (node_type != RDMA_NODE_IB_SWITCH) - return 0; + return IB_SMI_DISCARD; /* smp->return_path set when received */ smp->hop_ptr++; return (smp->initial_path[smp->hop_ptr] == - port_num); + port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-9:3 -- We're at the end of the DR segment of path */ @@ -78,29 +76,30 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, /* smp->return_path set when received */ smp->hop_ptr++; return (node_type == RDMA_NODE_IB_SWITCH || - smp->dr_dlid == IB_LID_PERMISSIVE); + smp->dr_dlid == IB_LID_PERMISSIVE ? + IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */ /* C14-9:5 -- Fail unreasonable hop pointer */ - return (hop_ptr == hop_cnt + 1); + return (hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD); } else { /* C14-13:1 */ if (hop_cnt && hop_ptr == hop_cnt + 1) { smp->hop_ptr--; return (smp->return_path[smp->hop_ptr] == - port_num); + port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:2 */ if (2 <= hop_ptr && hop_ptr <= hop_cnt) { if (node_type != RDMA_NODE_IB_SWITCH) - return 0; + return IB_SMI_DISCARD; smp->hop_ptr--; return (smp->return_path[smp->hop_ptr] == - port_num); + port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:3 -- at the end of the DR segment of path */ @@ -108,15 +107,16 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, smp->hop_ptr--; /* C14-13:3 -- SMPs destined for SM shouldn't be here */ return (node_type == RDMA_NODE_IB_SWITCH || - smp->dr_slid == IB_LID_PERMISSIVE); + smp->dr_slid == IB_LID_PERMISSIVE ? + IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:4 -- hop_ptr = 0 -> should have gone to SM */ if (hop_ptr == 0) - return 1; + return IB_SMI_HANDLE; /* C14-13:5 -- Check for unreasonable hop pointer */ - return 0; + return IB_SMI_DISCARD; } } @@ -124,10 +124,8 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, * Adjust information for a received SMP * Return 0 if the SMP should be dropped */ -int smi_handle_dr_smp_recv(struct ib_smp *smp, - u8 node_type, - int port_num, - int phys_port_cnt) +enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, + int port_num, int phys_port_cnt) { u8 hop_ptr, hop_cnt; @@ -138,16 +136,17 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, if (!ib_get_smp_direction(smp)) { /* C14-9:1 -- sender should have incremented hop_ptr */ if (hop_cnt && hop_ptr == 0) - return 0; + return IB_SMI_DISCARD; /* C14-9:2 -- intermediate hop */ if (hop_ptr && hop_ptr < hop_cnt) { if (node_type != RDMA_NODE_IB_SWITCH) - return 0; + return IB_SMI_DISCARD; smp->return_path[hop_ptr] = port_num; /* smp->hop_ptr updated when sending */ - return (smp->initial_path[hop_ptr+1] <= phys_port_cnt); + return (smp->initial_path[hop_ptr+1] <= phys_port_cnt ? + IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-9:3 -- We're at the end of the DR segment of path */ @@ -157,12 +156,13 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, /* smp->hop_ptr updated when sending */ return (node_type == RDMA_NODE_IB_SWITCH || - smp->dr_dlid == IB_LID_PERMISSIVE); + smp->dr_dlid == IB_LID_PERMISSIVE ? + IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */ /* C14-9:5 -- fail unreasonable hop pointer */ - return (hop_ptr == hop_cnt + 1); + return (hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD); } else { @@ -170,16 +170,17 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, if (hop_cnt && hop_ptr == hop_cnt + 1) { smp->hop_ptr--; return (smp->return_path[smp->hop_ptr] == - port_num); + port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:2 */ if (2 <= hop_ptr && hop_ptr <= hop_cnt) { if (node_type != RDMA_NODE_IB_SWITCH) - return 0; + return IB_SMI_DISCARD; /* smp->hop_ptr updated when sending */ - return (smp->return_path[hop_ptr-1] <= phys_port_cnt); + return (smp->return_path[hop_ptr-1] <= phys_port_cnt ? + IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:3 -- We're at the end of the DR segment of path */ @@ -187,23 +188,20 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, if (smp->dr_slid == IB_LID_PERMISSIVE) { /* giving SMP to SM - update hop_ptr */ smp->hop_ptr--; - return 1; + return IB_SMI_HANDLE; } /* smp->hop_ptr updated when sending */ - return (node_type == RDMA_NODE_IB_SWITCH); + return (node_type == RDMA_NODE_IB_SWITCH ? + IB_SMI_HANDLE: IB_SMI_DISCARD); } /* C14-13:4 -- hop_ptr = 0 -> give to SM */ /* C14-13:5 -- Check for unreasonable hop pointer */ - return (hop_ptr == 0); + return (hop_ptr == 0 ? IB_SMI_HANDLE : IB_SMI_DISCARD); } } -/* - * Return 1 if the received DR SMP should be forwarded to the send queue - * Return 0 if the SMP should be completed up the stack - */ -int smi_check_forward_dr_smp(struct ib_smp *smp) +enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp) { u8 hop_ptr, hop_cnt; @@ -213,23 +211,25 @@ int smi_check_forward_dr_smp(struct ib_smp *smp) if (!ib_get_smp_direction(smp)) { /* C14-9:2 -- intermediate hop */ if (hop_ptr && hop_ptr < hop_cnt) - return 1; + return IB_SMI_SEND; /* C14-9:3 -- at the end of the DR segment of path */ if (hop_ptr == hop_cnt) - return (smp->dr_dlid == IB_LID_PERMISSIVE); + return (smp->dr_dlid == IB_LID_PERMISSIVE ? + IB_SMI_SEND : IB_SMI_LOCAL); /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */ if (hop_ptr == hop_cnt + 1) - return 1; + return IB_SMI_SEND; } else { - /* C14-13:2 */ + /* C14-13:2 -- intermediate hop */ if (2 <= hop_ptr && hop_ptr <= hop_cnt) - return 1; + return IB_SMI_SEND; /* C14-13:3 -- at the end of the DR segment of path */ if (hop_ptr == 1) - return (smp->dr_slid != IB_LID_PERMISSIVE); + return (smp->dr_slid != IB_LID_PERMISSIVE ? + IB_SMI_SEND : IB_SMI_LOCAL); } - return 0; + return IB_SMI_LOCAL; } diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h index 3011bfd86dc5..9a4b349efc30 100644 --- a/drivers/infiniband/core/smi.h +++ b/drivers/infiniband/core/smi.h @@ -3,7 +3,7 @@ * Copyright (c) 2004 Infinicon Corporation. All rights reserved. * Copyright (c) 2004 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. - * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * Copyright (c) 2004-2007 Voltaire Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -33,7 +33,6 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - * $Id: smi.h 1389 2004-12-27 22:56:47Z roland $ */ #ifndef __SMI_H_ @@ -41,26 +40,33 @@ #include <rdma/ib_smi.h> -int smi_handle_dr_smp_recv(struct ib_smp *smp, - u8 node_type, - int port_num, - int phys_port_cnt); -extern int smi_check_forward_dr_smp(struct ib_smp *smp); -extern int smi_handle_dr_smp_send(struct ib_smp *smp, - u8 node_type, - int port_num); +enum smi_action { + IB_SMI_DISCARD, + IB_SMI_HANDLE +}; + +enum smi_forward_action { + IB_SMI_LOCAL, /* SMP should be completed up the stack */ + IB_SMI_SEND, /* received DR SMP should be forwarded to the send queue */ +}; + +enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, + int port_num, int phys_port_cnt); +extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp); +extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, + u8 node_type, int port_num); /* * Return 1 if the SMP should be handled by the local SMA/SM via process_mad */ -static inline int smi_check_local_smp(struct ib_smp *smp, - struct ib_device *device) +static inline enum smi_action smi_check_local_smp(struct ib_smp *smp, + struct ib_device *device) { /* C14-9:3 -- We're at the end of the DR segment of path */ /* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */ return ((device->process_mad && !ib_get_smp_direction(smp) && - (smp->hop_ptr == smp->hop_cnt + 1))); + (smp->hop_ptr == smp->hop_cnt + 1)) ? + IB_SMI_HANDLE : IB_SMI_DISCARD); } - #endif /* __SMI_H_ */ diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 709323c14c5d..08c299ebf4a8 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -683,6 +683,7 @@ int ib_device_register_sysfs(struct ib_device *device) class_dev->class = &ib_class; class_dev->class_data = device; + class_dev->dev = device->dma_device; strlcpy(class_dev->class_id, device->name, BUS_ID_SIZE); INIT_LIST_HEAD(&device->port_list); @@ -714,8 +715,6 @@ int ib_device_register_sysfs(struct ib_device *device) if (ret) goto err_put; } else { - int i; - for (i = 1; i <= device->phys_port_cnt; ++i) { ret = add_port(device, i); if (ret) diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index ee51d79a7ad5..2586a3ee8eba 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -407,29 +407,18 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file, mutex_lock(&file->file_mutex); while (list_empty(&file->events)) { + mutex_unlock(&file->file_mutex); - if (file->filp->f_flags & O_NONBLOCK) { - result = -EAGAIN; - break; - } + if (file->filp->f_flags & O_NONBLOCK) + return -EAGAIN; - if (signal_pending(current)) { - result = -ERESTARTSYS; - break; - } + if (wait_event_interruptible(file->poll_wait, + !list_empty(&file->events))) + return -ERESTARTSYS; - prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE); - - mutex_unlock(&file->file_mutex); - schedule(); mutex_lock(&file->file_mutex); - - finish_wait(&file->poll_wait, &wait); } - if (result) - goto done; - uevent = list_entry(file->events.next, struct ib_ucm_event, file_list); if (ib_ucm_new_cm_id(uevent->resp.event)) { diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 6b81b98961c7..53b4c94a7eb5 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -70,10 +70,24 @@ struct ucma_context { u64 uid; struct list_head list; + struct list_head mc_list; +}; + +struct ucma_multicast { + struct ucma_context *ctx; + int id; + int events_reported; + + u64 uid; + struct list_head list; + struct sockaddr addr; + u8 pad[sizeof(struct sockaddr_in6) - + sizeof(struct sockaddr)]; }; struct ucma_event { struct ucma_context *ctx; + struct ucma_multicast *mc; struct list_head list; struct rdma_cm_id *cm_id; struct rdma_ucm_event_resp resp; @@ -81,6 +95,7 @@ struct ucma_event { static DEFINE_MUTEX(mut); static DEFINE_IDR(ctx_idr); +static DEFINE_IDR(multicast_idr); static inline struct ucma_context *_ucma_find_context(int id, struct ucma_file *file) @@ -124,6 +139,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) atomic_set(&ctx->ref, 1); init_completion(&ctx->comp); + INIT_LIST_HEAD(&ctx->mc_list); ctx->file = file; do { @@ -147,6 +163,37 @@ error: return NULL; } +static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) +{ + struct ucma_multicast *mc; + int ret; + + mc = kzalloc(sizeof(*mc), GFP_KERNEL); + if (!mc) + return NULL; + + do { + ret = idr_pre_get(&multicast_idr, GFP_KERNEL); + if (!ret) + goto error; + + mutex_lock(&mut); + ret = idr_get_new(&multicast_idr, mc, &mc->id); + mutex_unlock(&mut); + } while (ret == -EAGAIN); + + if (ret) + goto error; + + mc->ctx = ctx; + list_add_tail(&mc->list, &ctx->mc_list); + return mc; + +error: + kfree(mc); + return NULL; +} + static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst, struct rdma_conn_param *src) { @@ -180,8 +227,19 @@ static void ucma_set_event_context(struct ucma_context *ctx, struct ucma_event *uevent) { uevent->ctx = ctx; - uevent->resp.uid = ctx->uid; - uevent->resp.id = ctx->id; + switch (event->event) { + case RDMA_CM_EVENT_MULTICAST_JOIN: + case RDMA_CM_EVENT_MULTICAST_ERROR: + uevent->mc = (struct ucma_multicast *) + event->param.ud.private_data; + uevent->resp.uid = uevent->mc->uid; + uevent->resp.id = uevent->mc->id; + break; + default: + uevent->resp.uid = ctx->uid; + uevent->resp.id = ctx->id; + break; + } } static int ucma_event_handler(struct rdma_cm_id *cm_id, @@ -199,7 +257,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, ucma_set_event_context(ctx, event, uevent); uevent->resp.event = event->event; uevent->resp.status = event->status; - if (cm_id->ps == RDMA_PS_UDP) + if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB) ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud); else ucma_copy_conn_event(&uevent->resp.param.conn, @@ -208,7 +266,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, mutex_lock(&ctx->file->mut); if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { if (!ctx->backlog) { - ret = -EDQUOT; + ret = -ENOMEM; kfree(uevent); goto out; } @@ -248,26 +306,18 @@ static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf, mutex_lock(&file->mut); while (list_empty(&file->event_list)) { - if (file->filp->f_flags & O_NONBLOCK) { - ret = -EAGAIN; - break; - } + mutex_unlock(&file->mut); - if (signal_pending(current)) { - ret = -ERESTARTSYS; - break; - } + if (file->filp->f_flags & O_NONBLOCK) + return -EAGAIN; + + if (wait_event_interruptible(file->poll_wait, + !list_empty(&file->event_list))) + return -ERESTARTSYS; - prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE); - mutex_unlock(&file->mut); - schedule(); mutex_lock(&file->mut); - finish_wait(&file->poll_wait, &wait); } - if (ret) - goto done; - uevent = list_entry(file->event_list.next, struct ucma_event, list); if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { @@ -290,6 +340,8 @@ static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf, list_del(&uevent->list); uevent->ctx->events_reported++; + if (uevent->mc) + uevent->mc->events_reported++; kfree(uevent); done: mutex_unlock(&file->mut); @@ -342,6 +394,19 @@ err1: return ret; } +static void ucma_cleanup_multicast(struct ucma_context *ctx) +{ + struct ucma_multicast *mc, *tmp; + + mutex_lock(&mut); + list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { + list_del(&mc->list); + idr_remove(&multicast_idr, mc->id); + kfree(mc); + } + mutex_unlock(&mut); +} + static void ucma_cleanup_events(struct ucma_context *ctx) { struct ucma_event *uevent, *tmp; @@ -360,6 +425,19 @@ static void ucma_cleanup_events(struct ucma_context *ctx) } } +static void ucma_cleanup_mc_events(struct ucma_multicast *mc) +{ + struct ucma_event *uevent, *tmp; + + list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) { + if (uevent->mc != mc) + continue; + + list_del(&uevent->list); + kfree(uevent); + } +} + static int ucma_free_ctx(struct ucma_context *ctx) { int events_reported; @@ -367,6 +445,8 @@ static int ucma_free_ctx(struct ucma_context *ctx) /* No new events will be generated after destroying the id. */ rdma_destroy_id(ctx->cm_id); + ucma_cleanup_multicast(ctx); + /* Cleanup events not yet reported to the user. */ mutex_lock(&ctx->file->mut); ucma_cleanup_events(ctx); @@ -731,6 +811,114 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, return ret; } +static ssize_t ucma_join_multicast(struct ucma_file *file, + const char __user *inbuf, + int in_len, int out_len) +{ + struct rdma_ucm_join_mcast cmd; + struct rdma_ucm_create_id_resp resp; + struct ucma_context *ctx; + struct ucma_multicast *mc; + int ret; + + if (out_len < sizeof(resp)) + return -ENOSPC; + + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) + return -EFAULT; + + ctx = ucma_get_ctx(file, cmd.id); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + mutex_lock(&file->mut); + mc = ucma_alloc_multicast(ctx); + if (IS_ERR(mc)) { + ret = PTR_ERR(mc); + goto err1; + } + + mc->uid = cmd.uid; + memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr); + ret = rdma_join_multicast(ctx->cm_id, &mc->addr, mc); + if (ret) + goto err2; + + resp.id = mc->id; + if (copy_to_user((void __user *)(unsigned long)cmd.response, + &resp, sizeof(resp))) { + ret = -EFAULT; + goto err3; + } + + mutex_unlock(&file->mut); + ucma_put_ctx(ctx); + return 0; + +err3: + rdma_leave_multicast(ctx->cm_id, &mc->addr); + ucma_cleanup_mc_events(mc); +err2: + mutex_lock(&mut); + idr_remove(&multicast_idr, mc->id); + mutex_unlock(&mut); + list_del(&mc->list); + kfree(mc); +err1: + mutex_unlock(&file->mut); + ucma_put_ctx(ctx); + return ret; +} + +static ssize_t ucma_leave_multicast(struct ucma_file *file, + const char __user *inbuf, + int in_len, int out_len) +{ + struct rdma_ucm_destroy_id cmd; + struct rdma_ucm_destroy_id_resp resp; + struct ucma_multicast *mc; + int ret = 0; + + if (out_len < sizeof(resp)) + return -ENOSPC; + + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) + return -EFAULT; + + mutex_lock(&mut); + mc = idr_find(&multicast_idr, cmd.id); + if (!mc) + mc = ERR_PTR(-ENOENT); + else if (mc->ctx->file != file) + mc = ERR_PTR(-EINVAL); + else { + idr_remove(&multicast_idr, mc->id); + atomic_inc(&mc->ctx->ref); + } + mutex_unlock(&mut); + + if (IS_ERR(mc)) { + ret = PTR_ERR(mc); + goto out; + } + + rdma_leave_multicast(mc->ctx->cm_id, &mc->addr); + mutex_lock(&mc->ctx->file->mut); + ucma_cleanup_mc_events(mc); + list_del(&mc->list); + mutex_unlock(&mc->ctx->file->mut); + + ucma_put_ctx(mc->ctx); + resp.events_reported = mc->events_reported; + kfree(mc); + + if (copy_to_user((void __user *)(unsigned long)cmd.response, + &resp, sizeof(resp))) + ret = -EFAULT; +out: + return ret; +} + static ssize_t (*ucma_cmd_table[])(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) = { @@ -750,6 +938,8 @@ static ssize_t (*ucma_cmd_table[])(struct ucma_file *file, [RDMA_USER_CM_CMD_GET_OPTION] = NULL, [RDMA_USER_CM_CMD_SET_OPTION] = NULL, [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify, + [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast, + [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast, }; static ssize_t ucma_write(struct file *filp, const char __user *buf, diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index c069ebeba8e3..8199b83052a9 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -135,7 +135,7 @@ static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); static DEFINE_SPINLOCK(port_lock); static struct ib_umad_port *umad_port[IB_UMAD_MAX_PORTS]; -static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS * 2); +static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS); static void ib_umad_add_one(struct ib_device *device); static void ib_umad_remove_one(struct ib_device *device); @@ -231,12 +231,17 @@ static void recv_handler(struct ib_mad_agent *agent, packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); if (packet->mad.hdr.grh_present) { - /* XXX parse GRH */ - packet->mad.hdr.gid_index = 0; - packet->mad.hdr.hop_limit = 0; - packet->mad.hdr.traffic_class = 0; - memset(packet->mad.hdr.gid, 0, 16); - packet->mad.hdr.flow_label = 0; + struct ib_ah_attr ah_attr; + + ib_init_ah_from_wc(agent->device, agent->port_num, + mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, + &ah_attr); + + packet->mad.hdr.gid_index = ah_attr.grh.sgid_index; + packet->mad.hdr.hop_limit = ah_attr.grh.hop_limit; + packet->mad.hdr.traffic_class = ah_attr.grh.traffic_class; + memcpy(packet->mad.hdr.gid, &ah_attr.grh.dgid, 16); + packet->mad.hdr.flow_label = cpu_to_be32(ah_attr.grh.flow_label); } if (queue_packet(file, agent, packet)) @@ -473,6 +478,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, if (packet->mad.hdr.grh_present) { ah_attr.ah_flags = IB_AH_GRH; memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); + ah_attr.grh.sgid_index = packet->mad.hdr.gid_index; ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label); ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index df1efbc10882..4fd75afa6a3a 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -622,8 +622,10 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, obj->umem.virt_base = cmd.hca_va; pd = idr_read_pd(cmd.pd_handle, file->ucontext); - if (!pd) + if (!pd) { + ret = -EINVAL; goto err_release; + } mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata); if (IS_ERR(mr)) { diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 8b5dd3649bbf..ccdf93d30b01 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -167,7 +167,7 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc, ah_attr->grh.sgid_index = (u8) gid_index; flow_class = be32_to_cpu(grh->version_tclass_flow); ah_attr->grh.flow_label = flow_class & 0xFFFFF; - ah_attr->grh.hop_limit = grh->hop_limit; + ah_attr->grh.hop_limit = 0xFF; ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF; } return 0; |