diff options
Diffstat (limited to 'drivers/infiniband/core/cache.c')
-rw-r--r-- | drivers/infiniband/core/cache.c | 179 |
1 files changed, 91 insertions, 88 deletions
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index e337b08de2ff..81d66f56e38f 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -125,6 +125,16 @@ const char *ib_cache_gid_type_str(enum ib_gid_type gid_type) } EXPORT_SYMBOL(ib_cache_gid_type_str); +/** rdma_is_zero_gid - Check if given GID is zero or not. + * @gid: GID to check + * Returns true if given GID is zero, returns false otherwise. + */ +bool rdma_is_zero_gid(const union ib_gid *gid) +{ + return !memcmp(gid, &zgid, sizeof(*gid)); +} +EXPORT_SYMBOL(rdma_is_zero_gid); + int ib_cache_gid_parse_type_str(const char *buf) { unsigned int i; @@ -149,6 +159,11 @@ int ib_cache_gid_parse_type_str(const char *buf) } EXPORT_SYMBOL(ib_cache_gid_parse_type_str); +static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port) +{ + return device->cache.ports[port - rdma_start_port(device)].gid; +} + static void del_roce_gid(struct ib_device *device, u8 port_num, struct ib_gid_table *table, int ix) { @@ -231,7 +246,7 @@ static int add_modify_gid(struct ib_gid_table *table, * So ignore such behavior for IB link layer and don't * fail the call, but don't add such entry to GID cache. */ - if (!memcmp(gid, &zgid, sizeof(*gid))) + if (rdma_is_zero_gid(gid)) return 0; } @@ -264,7 +279,7 @@ static void del_gid(struct ib_device *ib_dev, u8 port, if (rdma_protocol_roce(ib_dev, port)) del_roce_gid(ib_dev, port, table, ix); - memcpy(&table->data_vec[ix].gid, &zgid, sizeof(zgid)); + memset(&table->data_vec[ix].gid, 0, sizeof(table->data_vec[ix].gid)); memset(&table->data_vec[ix].attr, 0, sizeof(table->data_vec[ix].attr)); table->data_vec[ix].context = NULL; } @@ -291,14 +306,18 @@ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid, * so lookup free slot only if requested. */ if (pempty && empty < 0) { - if (data->props & GID_TABLE_ENTRY_INVALID) { - /* Found an invalid (free) entry; allocate it */ - if (data->props & GID_TABLE_ENTRY_DEFAULT) { - if (default_gid) - empty = curr_index; - } else { - empty = curr_index; - } + if (data->props & GID_TABLE_ENTRY_INVALID && + (default_gid == + !!(data->props & GID_TABLE_ENTRY_DEFAULT))) { + /* + * Found an invalid (free) entry; allocate it. + * If default GID is requested, then our + * found slot must be one of the DEFAULT + * reserved slots or we fail. + * This ensures that only DEFAULT reserved + * slots are used for default property GIDs. + */ + empty = curr_index; } } @@ -359,10 +378,10 @@ static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port, * IB spec version 1.3 section 4.1.1 point (6) and * section 12.7.10 and section 12.7.20 */ - if (!memcmp(gid, &zgid, sizeof(*gid))) + if (rdma_is_zero_gid(gid)) return -EINVAL; - table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; + table = rdma_gid_table(ib_dev, port); mutex_lock(&table->lock); @@ -420,22 +439,20 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, return ret; } -int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, - union ib_gid *gid, struct ib_gid_attr *attr) +static int +_ib_cache_gid_del(struct ib_device *ib_dev, u8 port, + union ib_gid *gid, struct ib_gid_attr *attr, + unsigned long mask, bool default_gid) { struct ib_gid_table *table; int ret = 0; int ix; - table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; + table = rdma_gid_table(ib_dev, port); mutex_lock(&table->lock); - ix = find_gid(table, gid, attr, false, - GID_ATTR_FIND_MASK_GID | - GID_ATTR_FIND_MASK_GID_TYPE | - GID_ATTR_FIND_MASK_NETDEV, - NULL); + ix = find_gid(table, gid, attr, default_gid, mask, NULL); if (ix < 0) { ret = -EINVAL; goto out_unlock; @@ -452,6 +469,17 @@ out_unlock: return ret; } +int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, + union ib_gid *gid, struct ib_gid_attr *attr) +{ + unsigned long mask = GID_ATTR_FIND_MASK_GID | + GID_ATTR_FIND_MASK_GID_TYPE | + GID_ATTR_FIND_MASK_DEFAULT | + GID_ATTR_FIND_MASK_NETDEV; + + return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false); +} + int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, struct net_device *ndev) { @@ -459,7 +487,7 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, int ix; bool deleted = false; - table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; + table = rdma_gid_table(ib_dev, port); mutex_lock(&table->lock); @@ -483,13 +511,13 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index, { struct ib_gid_table *table; - table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; + table = rdma_gid_table(ib_dev, port); if (index < 0 || index >= table->sz) return -EINVAL; if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID) - return -EAGAIN; + return -EINVAL; memcpy(gid, &table->data_vec[index].gid, sizeof(*gid)); if (attr) { @@ -576,7 +604,7 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev, if (!rdma_is_port_valid(ib_dev, port)) return -ENOENT; - table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; + table = rdma_gid_table(ib_dev, port); if (ndev) mask |= GID_ATTR_FIND_MASK_NETDEV; @@ -634,7 +662,7 @@ static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev, !rdma_protocol_roce(ib_dev, port)) return -EPROTONOSUPPORT; - table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; + table = rdma_gid_table(ib_dev, port); read_lock_irqsave(&table->rwlock, flags); for (i = 0; i < table->sz; i++) { @@ -711,8 +739,7 @@ static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port, mutex_lock(&table->lock); for (i = 0; i < table->sz; ++i) { - if (memcmp(&table->data_vec[i].gid, &zgid, - sizeof(table->data_vec[i].gid))) { + if (!rdma_is_zero_gid(&table->data_vec[i].gid)) { del_gid(ib_dev, port, table, i); deleted = true; } @@ -728,15 +755,17 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, unsigned long gid_type_mask, enum ib_cache_gid_default_mode mode) { - union ib_gid gid; + union ib_gid gid = { }; struct ib_gid_attr gid_attr; struct ib_gid_table *table; unsigned int gid_type; unsigned long mask; - table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; + table = rdma_gid_table(ib_dev, port); - make_default_gid(ndev, &gid); + mask = GID_ATTR_FIND_MASK_GID_TYPE | + GID_ATTR_FIND_MASK_DEFAULT | + GID_ATTR_FIND_MASK_NETDEV; memset(&gid_attr, 0, sizeof(gid_attr)); gid_attr.ndev = ndev; @@ -747,18 +776,18 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, gid_attr.gid_type = gid_type; if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) { - mask = GID_ATTR_FIND_MASK_GID_TYPE | - GID_ATTR_FIND_MASK_DEFAULT; + make_default_gid(ndev, &gid); __ib_cache_gid_add(ib_dev, port, &gid, &gid_attr, mask, true); } else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) { - ib_cache_gid_del(ib_dev, port, &gid, &gid_attr); + _ib_cache_gid_del(ib_dev, port, &gid, + &gid_attr, mask, true); } } } -static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port, - struct ib_gid_table *table) +static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port, + struct ib_gid_table *table) { unsigned int i; unsigned long roce_gid_type_mask; @@ -768,8 +797,7 @@ static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port, roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port); num_default_gids = hweight_long(roce_gid_type_mask); for (i = 0; i < num_default_gids && i < table->sz; i++) { - struct ib_gid_table_entry *entry = - &table->data_vec[i]; + struct ib_gid_table_entry *entry = &table->data_vec[i]; entry->props |= GID_TABLE_ENTRY_DEFAULT; current_gid = find_next_bit(&roce_gid_type_mask, @@ -777,59 +805,42 @@ static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port, current_gid); entry->attr.gid_type = current_gid++; } +} - return 0; + +static void gid_table_release_one(struct ib_device *ib_dev) +{ + struct ib_gid_table *table; + u8 port; + + for (port = 0; port < ib_dev->phys_port_cnt; port++) { + table = ib_dev->cache.ports[port].gid; + release_gid_table(table); + ib_dev->cache.ports[port].gid = NULL; + } } static int _gid_table_setup_one(struct ib_device *ib_dev) { u8 port; struct ib_gid_table *table; - int err = 0; for (port = 0; port < ib_dev->phys_port_cnt; port++) { u8 rdma_port = port + rdma_start_port(ib_dev); - table = - alloc_gid_table( + table = alloc_gid_table( ib_dev->port_immutable[rdma_port].gid_tbl_len); - if (!table) { - err = -ENOMEM; + if (!table) goto rollback_table_setup; - } - err = gid_table_reserve_default(ib_dev, - port + rdma_start_port(ib_dev), - table); - if (err) - goto rollback_table_setup; + gid_table_reserve_default(ib_dev, rdma_port, table); ib_dev->cache.ports[port].gid = table; } - return 0; rollback_table_setup: - for (port = 0; port < ib_dev->phys_port_cnt; port++) { - table = ib_dev->cache.ports[port].gid; - - cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev), - table); - release_gid_table(table); - } - - return err; -} - -static void gid_table_release_one(struct ib_device *ib_dev) -{ - struct ib_gid_table *table; - u8 port; - - for (port = 0; port < ib_dev->phys_port_cnt; port++) { - table = ib_dev->cache.ports[port].gid; - release_gid_table(table); - ib_dev->cache.ports[port].gid = NULL; - } + gid_table_release_one(ib_dev); + return -ENOMEM; } static void gid_table_cleanup_one(struct ib_device *ib_dev) @@ -871,7 +882,7 @@ int ib_get_cached_gid(struct ib_device *device, if (!rdma_is_port_valid(device, port_num)) return -EINVAL; - table = device->cache.ports[port_num - rdma_start_port(device)].gid; + table = rdma_gid_table(device, port_num); read_lock_irqsave(&table->rwlock, flags); res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr); read_unlock_irqrestore(&table->rwlock, flags); @@ -1089,7 +1100,7 @@ static int config_non_roce_gid_cache(struct ib_device *device, gid_attr.device = device; gid_attr.port_num = port; - table = device->cache.ports[port - rdma_start_port(device)].gid; + table = rdma_gid_table(device, port); mutex_lock(&table->lock); for (i = 0; i < gid_tbl_len; ++i) { @@ -1122,7 +1133,7 @@ static void ib_cache_update(struct ib_device *device, if (!rdma_is_port_valid(device, port)) return; - table = device->cache.ports[port - rdma_start_port(device)].gid; + table = rdma_gid_table(device, port); tprops = kmalloc(sizeof *tprops, GFP_KERNEL); if (!tprops) @@ -1142,8 +1153,9 @@ static void ib_cache_update(struct ib_device *device, goto err; } - pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len * - sizeof *pkey_cache->table, GFP_KERNEL); + pkey_cache = kmalloc(struct_size(pkey_cache, table, + tprops->pkey_tbl_len), + GFP_KERNEL); if (!pkey_cache) goto err; @@ -1233,8 +1245,9 @@ int ib_cache_setup_one(struct ib_device *device) rwlock_init(&device->cache.lock); device->cache.ports = - kzalloc(sizeof(*device->cache.ports) * - (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL); + kcalloc(rdma_end_port(device) - rdma_start_port(device) + 1, + sizeof(*device->cache.ports), + GFP_KERNEL); if (!device->cache.ports) return -ENOMEM; @@ -1284,13 +1297,3 @@ void ib_cache_cleanup_one(struct ib_device *device) flush_workqueue(ib_wq); gid_table_cleanup_one(device); } - -void __init ib_cache_setup(void) -{ - roce_gid_mgmt_init(); -} - -void __exit ib_cache_cleanup(void) -{ - roce_gid_mgmt_cleanup(); -} |