diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/resource_tracker.c')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 237 |
1 files changed, 214 insertions, 23 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 9813d34f3e5b..b46dbe29ef6c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -222,6 +222,13 @@ enum res_fs_rule_states { struct res_fs_rule { struct res_common com; int qpn; + /* VF DMFS mbox with port flipped */ + void *mirr_mbox; + /* > 0 --> apply mirror when getting into HA mode */ + /* = 0 --> un-apply mirror when getting out of HA mode */ + u32 mirr_mbox_size; + struct list_head mirr_list; + u64 mirr_rule_id; }; static void *res_tracker_lookup(struct rb_root *root, u64 res_id) @@ -4284,6 +4291,22 @@ err_mac: return err; } +static u32 qp_attach_mbox_size(void *mbox) +{ + u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl); + struct _rule_hw *rule_header; + + rule_header = (struct _rule_hw *)(mbox + size); + + while (rule_header->size) { + size += rule_header->size * sizeof(u32); + rule_header += 1; + } + return size; +} + +static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule); + int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -4300,15 +4323,18 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_net_trans_rule_hw_ctrl *ctrl; struct _rule_hw *rule_header; int header_id; + struct res_fs_rule *rrule; + u32 mbox_size; if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) return -EOPNOTSUPP; ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; - ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port); - if (ctrl->port <= 0) + err = mlx4_slave_convert_port(dev, slave, ctrl->port); + if (err <= 0) return -EINVAL; + ctrl->port = err; qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; err = get_res(dev, slave, qpn, RES_QP, &rqp); if (err) { @@ -4328,7 +4354,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, case MLX4_NET_TRANS_RULE_ID_ETH: if (validate_eth_header_mac(slave, rule_header, rlist)) { err = -EINVAL; - goto err_put; + goto err_put_qp; } break; case MLX4_NET_TRANS_RULE_ID_IB: @@ -4339,7 +4365,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n"); if (add_eth_header(dev, slave, inbox, rlist, header_id)) { err = -EINVAL; - goto err_put; + goto err_put_qp; } vhcr->in_modifier += sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; @@ -4347,7 +4373,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, default: pr_err("Corrupted mailbox\n"); err = -EINVAL; - goto err_put; + goto err_put_qp; } execute: @@ -4356,23 +4382,69 @@ execute: MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - goto err_put; + goto err_put_qp; + err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); if (err) { mlx4_err(dev, "Fail to add flow steering resources\n"); - /* detach rule*/ + goto err_detach; + } + + err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule); + if (err) + goto err_detach; + + mbox_size = qp_attach_mbox_size(inbox->buf); + rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL); + if (!rrule->mirr_mbox) { + err = -ENOMEM; + goto err_put_rule; + } + rrule->mirr_mbox_size = mbox_size; + rrule->mirr_rule_id = 0; + memcpy(rrule->mirr_mbox, inbox->buf, mbox_size); + + /* set different port */ + ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox; + if (ctrl->port == 1) + ctrl->port = 2; + else + ctrl->port = 1; + + if (mlx4_is_bonded(dev)) + mlx4_do_mirror_rule(dev, rrule); + + atomic_inc(&rqp->ref_count); + +err_put_rule: + put_res(dev, slave, vhcr->out_param, RES_FS_RULE); +err_detach: + /* detach rule on error */ + if (err) mlx4_cmd(dev, vhcr->out_param, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); - goto err_put; - } - atomic_inc(&rqp->ref_count); -err_put: +err_put_qp: put_res(dev, slave, qpn, RES_QP); return err; } +static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule) +{ + int err; + + err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0); + if (err) { + mlx4_err(dev, "Fail to remove flow steering resources\n"); + return err; + } + + mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + return 0; +} + int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -4382,6 +4454,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, int err; struct res_qp *rqp; struct res_fs_rule *rrule; + u64 mirr_reg_id; if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) @@ -4390,12 +4463,30 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule); if (err) return err; + + if (!rrule->mirr_mbox) { + mlx4_err(dev, "Mirror rules cannot be removed explicitly\n"); + put_res(dev, slave, vhcr->in_param, RES_FS_RULE); + return -EINVAL; + } + mirr_reg_id = rrule->mirr_rule_id; + kfree(rrule->mirr_mbox); + /* Release the rule form busy state before removal */ put_res(dev, slave, vhcr->in_param, RES_FS_RULE); err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp); if (err) return err; + if (mirr_reg_id && mlx4_is_bonded(dev)) { + err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule); + if (err) { + mlx4_err(dev, "Fail to get resource of mirror rule\n"); + } else { + put_res(dev, slave, mirr_reg_id, RES_FS_RULE); + mlx4_undo_mirror_rule(dev, rrule); + } + } err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); if (err) { mlx4_err(dev, "Fail to remove flow steering resources\n"); @@ -4833,6 +4924,91 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave) spin_unlock_irq(mlx4_tlock(dev)); } +static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule) +{ + struct mlx4_cmd_mailbox *mailbox; + int err; + struct res_fs_rule *mirr_rule; + u64 reg_id; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + if (!fs_rule->mirr_mbox) { + mlx4_err(dev, "rule mirroring mailbox is null\n"); + return -EINVAL; + } + memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size); + err = mlx4_cmd_imm(dev, mailbox->dma, ®_id, fs_rule->mirr_mbox_size >> 2, 0, + MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + mlx4_free_cmd_mailbox(dev, mailbox); + + if (err) + goto err; + + err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn); + if (err) + goto err_detach; + + err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule); + if (err) + goto err_rem; + + fs_rule->mirr_rule_id = reg_id; + mirr_rule->mirr_rule_id = 0; + mirr_rule->mirr_mbox_size = 0; + mirr_rule->mirr_mbox = NULL; + put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE); + + return 0; +err_rem: + rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0); +err_detach: + mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); +err: + return err; +} + +static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = + &priv->mfunc.master.res_tracker; + struct rb_root *root = &tracker->res_tree[RES_FS_RULE]; + struct rb_node *p; + struct res_fs_rule *fs_rule; + int err = 0; + LIST_HEAD(mirr_list); + + for (p = rb_first(root); p; p = rb_next(p)) { + fs_rule = rb_entry(p, struct res_fs_rule, com.node); + if ((bond && fs_rule->mirr_mbox_size) || + (!bond && !fs_rule->mirr_mbox_size)) + list_add_tail(&fs_rule->mirr_list, &mirr_list); + } + + list_for_each_entry(fs_rule, &mirr_list, mirr_list) { + if (bond) + err += mlx4_do_mirror_rule(dev, fs_rule); + else + err += mlx4_undo_mirror_rule(dev, fs_rule); + } + return err; +} + +int mlx4_bond_fs_rules(struct mlx4_dev *dev) +{ + return mlx4_mirror_fs_rules(dev, true); +} + +int mlx4_unbond_fs_rules(struct mlx4_dev *dev) +{ + return mlx4_mirror_fs_rules(dev, false); +} + static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -4952,26 +5128,41 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave) struct res_counter *counter; struct res_counter *tmp; int err; - int index; + int *counters_arr = NULL; + int i, j; err = move_all_busy(dev, slave, RES_COUNTER); if (err) mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", slave); - spin_lock_irq(mlx4_tlock(dev)); - list_for_each_entry_safe(counter, tmp, counter_list, com.list) { - if (counter->com.owner == slave) { - index = counter->com.res_id; - rb_erase(&counter->com.node, - &tracker->res_tree[RES_COUNTER]); - list_del(&counter->com.list); - kfree(counter); - __mlx4_counter_free(dev, index); + counters_arr = kmalloc_array(dev->caps.max_counters, + sizeof(*counters_arr), GFP_KERNEL); + if (!counters_arr) + return; + + do { + i = 0; + j = 0; + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(counter, tmp, counter_list, com.list) { + if (counter->com.owner == slave) { + counters_arr[i++] = counter->com.res_id; + rb_erase(&counter->com.node, + &tracker->res_tree[RES_COUNTER]); + list_del(&counter->com.list); + kfree(counter); + } + } + spin_unlock_irq(mlx4_tlock(dev)); + + while (j < i) { + __mlx4_counter_free(dev, counters_arr[j++]); mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); } - } - spin_unlock_irq(mlx4_tlock(dev)); + } while (i); + + kfree(counters_arr); } static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) |