diff options
22 files changed, 878 insertions, 573 deletions
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst index 3bd72577af9a..99d95be4d159 100644 --- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst +++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst @@ -218,6 +218,22 @@ the software port. [#accel]_. - Informative + * - `rx[i]_hds_nosplit_packets` + - Number of packets that were not split in header/data split mode. A + packet will not get split when the hardware does not support its + protocol splitting. An example such a protocol is ICMPv4/v6. Currently + TCP and UDP with IPv4/IPv6 are supported for header/data split + [#accel]_. + - Informative + + * - `rx[i]_hds_nosplit_bytes` + - Number of bytes for packets that were not split in header/data split + mode. A packet will not get split when the hardware does not support its + protocol splitting. An example such a protocol is ICMPv4/v6. Currently + TCP and UDP with IPv4/IPv6 are supported for header/data split + [#accel]_. + - Informative + * - `rx[i]_lro_packets` - The number of LRO packets received on ring i [#accel]_. - Acceleration diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 20768ef2e9d2..9af8ddb4a78f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -754,6 +754,8 @@ static const char *cmd_status_str(u8 status) return "bad resource"; case MLX5_CMD_STAT_RES_BUSY: return "resource busy"; + case MLX5_CMD_STAT_NOT_READY: + return "FW not ready"; case MLX5_CMD_STAT_LIM_ERR: return "limits exceeded"; case MLX5_CMD_STAT_BAD_RES_STATE_ERR: @@ -787,6 +789,7 @@ static int cmd_status_to_err(u8 status) case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; + case MLX5_CMD_STAT_NOT_READY: return -EAGAIN; case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; case MLX5_CMD_STAT_IX_ERR: return -EINVAL; @@ -815,14 +818,16 @@ EXPORT_SYMBOL(mlx5_cmd_out_err); static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out) { u16 opcode, op_mod; + u8 status; u16 uid; opcode = in_to_opcode(in); op_mod = MLX5_GET(mbox_in, in, op_mod); uid = MLX5_GET(mbox_in, in, uid); + status = MLX5_GET(mbox_out, out, status); if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY && - opcode != MLX5_CMD_OP_CREATE_UCTX) + opcode != MLX5_CMD_OP_CREATE_UCTX && status != MLX5_CMD_STAT_NOT_READY) mlx5_cmd_out_err(dev, opcode, op_mod, out); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h index ddf1b87f1bc0..9aed29fa4900 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h @@ -203,10 +203,10 @@ TRACE_EVENT(mlx5_fs_set_fte, fs_get_obj(__entry->fg, fte->node.parent); __entry->group_index = __entry->fg->id; __entry->index = fte->index; - __entry->action = fte->action.action; + __entry->action = fte->act_dests.action.action; __entry->mask_enable = __entry->fg->mask.match_criteria_enable; - __entry->flow_tag = fte->flow_context.flow_tag; - __entry->flow_source = fte->flow_context.flow_source; + __entry->flow_tag = fte->act_dests.flow_context.flow_tag; + __entry->flow_source = fte->act_dests.flow_context.flow_source; memcpy(__entry->mask_outer, MLX5_ADDR_OF(fte_match_param, &__entry->fg->mask.match_criteria, @@ -284,7 +284,7 @@ TRACE_EVENT(mlx5_fs_add_rule, TP_fast_assign( __entry->rule = rule; fs_get_obj(__entry->fte, rule->node.parent); - __entry->index = __entry->fte->dests_size - 1; + __entry->index = __entry->fte->act_dests.dests_size - 1; __entry->sw_action = rule->sw_action; memcpy(__entry->destination, &rule->dest_attr, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 47e7a80d221b..a5659c0c4236 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1016,30 +1016,31 @@ err_rq_xdp_prog: static void mlx5e_free_rq(struct mlx5e_rq *rq) { - struct bpf_prog *old_prog; - - if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) { - old_prog = rcu_dereference_protected(rq->xdp_prog, - lockdep_is_held(&rq->priv->state_lock)); - if (old_prog) - bpf_prog_put(old_prog); - } + kvfree(rq->dim); + page_pool_destroy(rq->page_pool); switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + mlx5e_rq_free_shampo(rq); kvfree(rq->mpwqe.info); mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be)); mlx5e_free_mpwqe_rq_drop_page(rq); - mlx5e_rq_free_shampo(rq); break; default: /* MLX5_WQ_TYPE_CYCLIC */ mlx5e_free_wqe_alloc_info(rq); } - kvfree(rq->dim); - xdp_rxq_info_unreg(&rq->xdp_rxq); - page_pool_destroy(rq->page_pool); mlx5_wq_destroy(&rq->wq_ctrl); + + if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) { + struct bpf_prog *old_prog; + + old_prog = rcu_dereference_protected(rq->xdp_prog, + lockdep_is_held(&rq->priv->state_lock)); + if (old_prog) + bpf_prog_put(old_prog); + } + xdp_rxq_info_unreg(&rq->xdp_rxq); } int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index de9d01036c28..8e24ba96c779 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -2346,6 +2346,9 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq stats->hds_nodata_packets++; stats->hds_nodata_bytes += head_size; } + } else { + stats->hds_nosplit_packets++; + stats->hds_nosplit_bytes += data_bcnt; } mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index e7a3290a708a..611ec4b6f370 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -144,6 +144,8 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, @@ -347,6 +349,8 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, s->rx_gro_large_hds += rq_stats->gro_large_hds; s->rx_hds_nodata_packets += rq_stats->hds_nodata_packets; s->rx_hds_nodata_bytes += rq_stats->hds_nodata_bytes; + s->rx_hds_nosplit_packets += rq_stats->hds_nosplit_packets; + s->rx_hds_nosplit_bytes += rq_stats->hds_nosplit_bytes; s->rx_ecn_mark += rq_stats->ecn_mark; s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; s->rx_csum_none += rq_stats->csum_none; @@ -2062,6 +2066,8 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_bytes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_packets) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 4c5858c1dd82..5961c569cfe0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -156,6 +156,8 @@ struct mlx5e_sw_stats { u64 rx_gro_large_hds; u64 rx_hds_nodata_packets; u64 rx_hds_nodata_bytes; + u64 rx_hds_nosplit_packets; + u64 rx_hds_nosplit_bytes; u64 rx_mcast_packets; u64 rx_ecn_mark; u64 rx_removed_vlan_packets; @@ -356,6 +358,8 @@ struct mlx5e_rq_stats { u64 gro_large_hds; u64 hds_nodata_packets; u64 hds_nodata_bytes; + u64 hds_nosplit_packets; + u64 hds_nosplit_bytes; u64 mcast_packets; u64 ecn_mark; u64 removed_vlan_packets; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index f15ecaef1331..2505f90c0b39 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -896,7 +896,7 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx) if (!mlx5_irq_pool_is_sf_pool(pool)) return comp_irq_request_pci(dev, vecidx); - af_desc.is_managed = 1; + af_desc.is_managed = false; cpumask_copy(&af_desc.mask, cpu_online_mask); cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus); irq = mlx5_irq_affinity_request(dev, pool, &af_desc); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 9b8599c200e2..676005854dad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -463,7 +463,7 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev, int num_encap = 0; *extended_dest = false; - if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) + if (!(fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) return 0; list_for_each_entry(dst, &fte->node.children, node.list) { @@ -502,17 +502,17 @@ mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context) execute_aso[0]); MLX5_SET(execute_aso, execute_aso, valid, 1); MLX5_SET(execute_aso, execute_aso, aso_object_id, - fte->action.exe_aso.object_id); + fte->act_dests.action.exe_aso.object_id); exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl); MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id, - fte->action.exe_aso.return_reg_id); + fte->act_dests.action.exe_aso.return_reg_id); MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type, - fte->action.exe_aso.type); + fte->act_dests.action.exe_aso.type); MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color, - fte->action.exe_aso.flow_meter.init_color); + fte->act_dests.action.exe_aso.flow_meter.init_color); MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id, - fte->action.exe_aso.flow_meter.meter_idx); + fte->act_dests.action.exe_aso.flow_meter.meter_idx); } static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, @@ -541,7 +541,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, else dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format); - inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size; + inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->act_dests.dests_size * dst_cnt_size; in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -553,7 +553,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(set_fte_in, in, table_id, ft->id); MLX5_SET(set_fte_in, in, flow_index, fte->index); MLX5_SET(set_fte_in, in, ignore_flow_level, - !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL)); + !!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL)); MLX5_SET(set_fte_in, in, vport_number, ft->vport); MLX5_SET(set_fte_in, in, other_vport, @@ -563,23 +563,23 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(flow_context, in_flow_context, group_id, group_id); MLX5_SET(flow_context, in_flow_context, flow_tag, - fte->flow_context.flow_tag); + fte->act_dests.flow_context.flow_tag); MLX5_SET(flow_context, in_flow_context, flow_source, - fte->flow_context.flow_source); + fte->act_dests.flow_context.flow_source); MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en, - !!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN)); + !!(fte->act_dests.flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN)); MLX5_SET(flow_context, in_flow_context, extended_destination, extended_dest); - action = fte->action.action; + action = fte->act_dests.action.action; if (extended_dest) action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; MLX5_SET(flow_context, in_flow_context, action, action); - if (!extended_dest && fte->action.pkt_reformat) { - struct mlx5_pkt_reformat *pkt_reformat = fte->action.pkt_reformat; + if (!extended_dest && fte->act_dests.action.pkt_reformat) { + struct mlx5_pkt_reformat *pkt_reformat = fte->act_dests.action.pkt_reformat; if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) { reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat); @@ -591,46 +591,46 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, goto err_out; } } else { - reformat_id = fte->action.pkt_reformat->id; + reformat_id = fte->act_dests.action.pkt_reformat->id; } } MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id); - if (fte->action.modify_hdr) { - if (fte->action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) { + if (fte->act_dests.action.modify_hdr) { + if (fte->act_dests.action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) { mlx5_core_err(dev, "Can't use SW-owned modify_hdr in FW-owned table\n"); err = -EOPNOTSUPP; goto err_out; } MLX5_SET(flow_context, in_flow_context, modify_header_id, - fte->action.modify_hdr->id); + fte->act_dests.action.modify_hdr->id); } MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type, - fte->action.crypto.type); + fte->act_dests.action.crypto.type); MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id, - fte->action.crypto.obj_id); + fte->act_dests.action.crypto.obj_id); vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan); - MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype); - MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid); - MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio); + MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[0].ethtype); + MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[0].vid); + MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[0].prio); vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2); - MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype); - MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid); - MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio); + MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[1].ethtype); + MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[1].vid); + MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[1].prio); in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, match_value); memcpy(in_match_value, &fte->val, sizeof(fte->val)); in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { int list_size = 0; list_for_each_entry(dst, &fte->node.children, node.list) { @@ -706,7 +706,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, list_size); } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev, log_max_flow_counter, ft->type)); @@ -731,8 +731,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, list_size); } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) { - if (fte->action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) { + if (fte->act_dests.action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) { mlx5_cmd_set_fte_flow_meter(fte, in_flow_context); } else { err = -EOPNOTSUPP; @@ -1071,7 +1071,7 @@ static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns, static u32 mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace *ns, enum fs_flow_table_type ft_type) { - return 0; + return MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH; } static const struct mlx5_flow_cmds mlx5_flow_cmds = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index 53e0e5137d3f..7eb7b3ffe3d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -124,4 +124,12 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void); int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode); int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect); + +static inline bool mlx5_fs_cmd_is_fw_term_table(struct mlx5_flow_table *ft) +{ + if (ft->flags & MLX5_FLOW_TABLE_TERMINATION) + return true; + + return false; +} #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index a47d6419160d..8505d5e241e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -605,12 +605,37 @@ static void modify_fte(struct fs_fte *fte) dev = get_dev(&fte->node); root = find_root(&ft->node); - err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte); + err = root->cmds->update_fte(root, ft, fg, fte->act_dests.modify_mask, fte); if (err) mlx5_core_warn(dev, "%s can't del rule fg id=%d fte_index=%d\n", __func__, fg->id, fte->index); - fte->modify_mask = 0; + fte->act_dests.modify_mask = 0; +} + +static void del_sw_hw_dup_rule(struct fs_node *node) +{ + struct mlx5_flow_rule *rule; + struct fs_fte *fte; + + fs_get_obj(rule, node); + fs_get_obj(fte, rule->node.parent); + trace_mlx5_fs_del_rule(rule); + + if (is_fwd_next_action(rule->sw_action)) { + mutex_lock(&rule->dest_attr.ft->lock); + list_del(&rule->next_ft); + mutex_unlock(&rule->dest_attr.ft->lock); + } + + /* If a pending rule is being deleted it means + * this is a NO APPEND rule, so there are no partial deletions, + * all the rules of the mlx5_flow_handle are going to be deleted + * and the rules aren't shared with any other mlx5_flow_handle instance + * so no need to do any bookkeeping like in del_sw_hw_rule(). + */ + + kfree(rule); } static void del_sw_hw_rule(struct fs_node *node) @@ -628,29 +653,29 @@ static void del_sw_hw_rule(struct fs_node *node) } if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) { - --fte->dests_size; - fte->modify_mask |= + --fte->act_dests.dests_size; + fte->act_dests.modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) | BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); - fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; + fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; goto out; } if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) { - --fte->dests_size; - fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); - fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW; + --fte->act_dests.dests_size; + fte->act_dests.modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); + fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW; goto out; } if (is_fwd_dest_type(rule->dest_attr.type)) { - --fte->dests_size; - --fte->fwd_dests; + --fte->act_dests.dests_size; + --fte->act_dests.fwd_dests; - if (!fte->fwd_dests) - fte->action.action &= + if (!fte->act_dests.fwd_dests) + fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - fte->modify_mask |= + fte->act_dests.modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); goto out; } @@ -658,12 +683,33 @@ out: kfree(rule); } +static void switch_to_pending_act_dests(struct fs_fte *fte) +{ + struct fs_node *iter; + + memcpy(&fte->act_dests, &fte->dup->act_dests, sizeof(fte->act_dests)); + + list_bulk_move_tail(&fte->node.children, + fte->dup->children.next, + fte->dup->children.prev); + + list_for_each_entry(iter, &fte->node.children, list) + iter->del_sw_func = del_sw_hw_rule; + + /* Make sure the fte isn't deleted + * as mlx5_del_flow_rules() decreases the refcount + * of the fte to trigger deletion. + */ + tree_get_node(&fte->node); +} + static void del_hw_fte(struct fs_node *node) { struct mlx5_flow_root_namespace *root; struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; struct mlx5_core_dev *dev; + bool pending_used = false; struct fs_fte *fte; int err; @@ -672,16 +718,35 @@ static void del_hw_fte(struct fs_node *node) fs_get_obj(ft, fg->node.parent); trace_mlx5_fs_del_fte(fte); - WARN_ON(fte->dests_size); + WARN_ON(fte->act_dests.dests_size); dev = get_dev(&ft->node); root = find_root(&ft->node); + + if (fte->dup && !list_empty(&fte->dup->children)) { + switch_to_pending_act_dests(fte); + pending_used = true; + } else { + /* Avoid double call to del_hw_fte */ + node->del_hw_func = NULL; + } + if (node->active) { - err = root->cmds->delete_fte(root, ft, fte); - if (err) - mlx5_core_warn(dev, - "flow steering can't delete fte in index %d of flow group id %d\n", - fte->index, fg->id); - node->active = false; + if (pending_used) { + err = root->cmds->update_fte(root, ft, fg, + fte->act_dests.modify_mask, fte); + if (err) + mlx5_core_warn(dev, + "flow steering can't update to pending rule in index %d of flow group id %d\n", + fte->index, fg->id); + fte->act_dests.modify_mask = 0; + } else { + err = root->cmds->delete_fte(root, ft, fte); + if (err) + mlx5_core_warn(dev, + "flow steering can't delete fte in index %d of flow group id %d\n", + fte->index, fg->id); + node->active = false; + } } } @@ -700,6 +765,7 @@ static void del_sw_fte(struct fs_node *node) rhash_fte); WARN_ON(err); ida_free(&fg->fte_allocator, fte->index - fg->start_index); + kvfree(fte->dup); kmem_cache_free(steering->ftes_cache, fte); } @@ -782,8 +848,8 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft, memcpy(fte->val, &spec->match_value, sizeof(fte->val)); fte->node.type = FS_TYPE_FLOW_ENTRY; - fte->action = *flow_act; - fte->flow_context = spec->flow_context; + fte->act_dests.action = *flow_act; + fte->act_dests.flow_context = spec->flow_context; tree_init_node(&fte->node, del_hw_fte, del_sw_fte); @@ -1103,18 +1169,45 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio return err; } +static bool rule_is_pending(struct fs_fte *fte, struct mlx5_flow_rule *rule) +{ + struct mlx5_flow_rule *tmp_rule; + struct fs_node *iter; + + if (!fte->dup || list_empty(&fte->dup->children)) + return false; + + list_for_each_entry(iter, &fte->dup->children, list) { + tmp_rule = container_of(iter, struct mlx5_flow_rule, node); + + if (tmp_rule == rule) + return true; + } + + return false; +} + static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, struct mlx5_flow_destination *dest) { struct mlx5_flow_root_namespace *root; + struct fs_fte_action *act_dests; struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; + bool pending = false; struct fs_fte *fte; int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); int err = 0; fs_get_obj(fte, rule->node.parent); - if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) + + pending = rule_is_pending(fte, rule); + if (pending) + act_dests = &fte->dup->act_dests; + else + act_dests = &fte->act_dests; + + if (!(act_dests->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) return -EINVAL; down_write_ref_node(&fte->node, false); fs_get_obj(fg, fte->node.parent); @@ -1122,8 +1215,9 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, memcpy(&rule->dest_attr, dest, sizeof(*dest)); root = find_root(&ft->node); - err = root->cmds->update_fte(root, ft, fg, - modify_mask, fte); + if (!pending) + err = root->cmds->update_fte(root, ft, fg, + modify_mask, fte); up_write_ref_node(&fte->node, false); return err; @@ -1453,6 +1547,16 @@ static struct mlx5_flow_handle *alloc_handle(int num_rules) return handle; } +static void destroy_flow_handle_dup(struct mlx5_flow_handle *handle, + int i) +{ + for (; --i >= 0;) { + list_del(&handle->rule[i]->node.list); + kfree(handle->rule[i]); + } + kfree(handle); +} + static void destroy_flow_handle(struct fs_fte *fte, struct mlx5_flow_handle *handle, struct mlx5_flow_destination *dest, @@ -1460,7 +1564,7 @@ static void destroy_flow_handle(struct fs_fte *fte, { for (; --i >= 0;) { if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) { - fte->dests_size--; + fte->act_dests.dests_size--; list_del(&handle->rule[i]->node.list); kfree(handle->rule[i]); } @@ -1469,6 +1573,61 @@ static void destroy_flow_handle(struct fs_fte *fte, } static struct mlx5_flow_handle * +create_flow_handle_dup(struct list_head *children, + struct mlx5_flow_destination *dest, + int dest_num, + struct fs_fte_action *act_dests) +{ + static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); + static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); + struct mlx5_flow_rule *rule = NULL; + struct mlx5_flow_handle *handle; + int i = 0; + int type; + + handle = alloc_handle((dest_num) ? dest_num : 1); + if (!handle) + return NULL; + + do { + rule = alloc_rule(dest + i); + if (!rule) + goto free_rules; + + /* Add dest to dests list- we need flow tables to be in the + * end of the list for forward to next prio rules. + */ + tree_init_node(&rule->node, NULL, del_sw_hw_dup_rule); + if (dest && + dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) + list_add(&rule->node.list, children); + else + list_add_tail(&rule->node.list, children); + + if (dest) { + act_dests->dests_size++; + + if (is_fwd_dest_type(dest[i].type)) + act_dests->fwd_dests++; + + type = dest[i].type == + MLX5_FLOW_DESTINATION_TYPE_COUNTER; + act_dests->modify_mask |= type ? count : dst; + } + handle->rule[i] = rule; + } while (++i < dest_num); + + return handle; + +free_rules: + destroy_flow_handle_dup(handle, i); + act_dests->dests_size = 0; + act_dests->fwd_dests = 0; + + return NULL; +} + +static struct mlx5_flow_handle * create_flow_handle(struct fs_fte *fte, struct mlx5_flow_destination *dest, int dest_num, @@ -1510,10 +1669,10 @@ create_flow_handle(struct fs_fte *fte, else list_add_tail(&rule->node.list, &fte->node.children); if (dest) { - fte->dests_size++; + fte->act_dests.dests_size++; if (is_fwd_dest_type(dest[i].type)) - fte->fwd_dests++; + fte->act_dests.fwd_dests++; type = dest[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER; @@ -1774,17 +1933,17 @@ static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_context *flow_context, const struct mlx5_flow_act *flow_act) { - if (check_conflicting_actions(flow_act, &fte->action)) { + if (check_conflicting_actions(flow_act, &fte->act_dests.action)) { mlx5_core_warn(get_dev(&fte->node), "Found two FTEs with conflicting actions\n"); return -EEXIST; } if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) && - fte->flow_context.flow_tag != flow_context->flow_tag) { + fte->act_dests.flow_context.flow_tag != flow_context->flow_tag) { mlx5_core_warn(get_dev(&fte->node), "FTE flow tag %u already exists with different flow tag %u\n", - fte->flow_context.flow_tag, + fte->act_dests.flow_context.flow_tag, flow_context->flow_tag); return -EEXIST; } @@ -1808,12 +1967,12 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, if (ret) return ERR_PTR(ret); - old_action = fte->action.action; - fte->action.action |= flow_act->action; + old_action = fte->act_dests.action.action; + fte->act_dests.action.action |= flow_act->action; handle = add_rule_fte(fte, fg, dest, dest_num, old_action != flow_act->action); if (IS_ERR(handle)) { - fte->action.action = old_action; + fte->act_dests.action.action = old_action; return handle; } trace_mlx5_fs_set_fte(fte, false); @@ -1961,6 +2120,62 @@ out: return fte_tmp; } +/* Native capability lacks support for adding an additional match with the same value + * to the same flow group. To accommodate the NO APPEND flag in these scenarios, + * we include the new rule in the existing flow table entry (fte) without immediate + * hardware commitment. When a request is made to delete the corresponding hardware rule, + * we then commit the pending rule to hardware. + */ +static struct mlx5_flow_handle * +add_rule_dup_match_fte(struct fs_fte *fte, + const struct mlx5_flow_spec *spec, + struct mlx5_flow_act *flow_act, + struct mlx5_flow_destination *dest, + int dest_num) +{ + struct mlx5_flow_handle *handle; + struct fs_fte_dup *dup; + int i = 0; + + if (!fte->dup) { + dup = kvzalloc(sizeof(*dup), GFP_KERNEL); + if (!dup) + return ERR_PTR(-ENOMEM); + /* dup will be freed when the fte is freed + * this way we don't allocate / free dup on every rule deletion + * or creation + */ + INIT_LIST_HEAD(&dup->children); + fte->dup = dup; + } + + if (!list_empty(&fte->dup->children)) { + mlx5_core_warn(get_dev(&fte->node), + "Can have only a single duplicate rule\n"); + + return ERR_PTR(-EEXIST); + } + + fte->dup->act_dests.action = *flow_act; + fte->dup->act_dests.flow_context = spec->flow_context; + fte->dup->act_dests.dests_size = 0; + fte->dup->act_dests.fwd_dests = 0; + fte->dup->act_dests.modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); + + handle = create_flow_handle_dup(&fte->dup->children, + dest, dest_num, + &fte->dup->act_dests); + if (!handle) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < handle->num_rules; i++) { + tree_add_node(&handle->rule[i]->node, &fte->node); + trace_mlx5_fs_add_rule(handle->rule[i]); + } + + return handle; +} + static struct mlx5_flow_handle * try_add_to_existing_fg(struct mlx5_flow_table *ft, struct list_head *match_head, @@ -1971,6 +2186,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, int ft_version) { struct mlx5_flow_steering *steering = get_steering(&ft->node); + struct mlx5_flow_root_namespace *root = find_root(&ft->node); struct mlx5_flow_group *g; struct mlx5_flow_handle *rule; struct match_list *iter; @@ -1984,7 +2200,9 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, return ERR_PTR(-ENOMEM); search_again_locked: - if (flow_act->flags & FLOW_ACT_NO_APPEND) + if (flow_act->flags & FLOW_ACT_NO_APPEND && + (root->cmds->get_capabilities(root, root->table_type) & + MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH)) goto skip_search; version = matched_fgs_get_version(match_head); /* Try to find an fte with identical match value and attempt update its @@ -1997,7 +2215,10 @@ search_again_locked: fte_tmp = lookup_fte_locked(g, spec->match_value, take_write); if (!fte_tmp) continue; - rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp); + if (flow_act->flags & FLOW_ACT_NO_APPEND) + rule = add_rule_dup_match_fte(fte_tmp, spec, flow_act, dest, dest_num); + else + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp); /* No error check needed here, because insert_fte() is not called */ up_write_ref_node(&fte_tmp->node, false); tree_put_node(&fte_tmp->node, false); @@ -2265,12 +2486,10 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) tree_remove_node(&handle->rule[i]->node, true); if (list_empty(&fte->node.children)) { fte->node.del_hw_func(&fte->node); - /* Avoid double call to del_hw_fte */ - fte->node.del_hw_func = NULL; up_write_ref_node(&fte->node, false); tree_put_node(&fte->node, false); - } else if (fte->dests_size) { - if (fte->modify_mask) + } else if (fte->act_dests.dests_size) { + if (fte->act_dests.modify_mask) modify_fte(fte); up_write_ref_node(&fte->node, false); } else { @@ -3590,8 +3809,8 @@ out: } EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn); -static struct mlx5_flow_root_namespace -*get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type) +struct mlx5_flow_root_namespace * +mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type) { struct mlx5_flow_namespace *ns; @@ -3614,7 +3833,7 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev, struct mlx5_modify_hdr *modify_hdr; int err; - root = get_root_namespace(dev, ns_type); + root = mlx5_get_root_namespace(dev, ns_type); if (!root) return ERR_PTR(-EOPNOTSUPP); @@ -3639,7 +3858,7 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, { struct mlx5_flow_root_namespace *root; - root = get_root_namespace(dev, modify_hdr->ns_type); + root = mlx5_get_root_namespace(dev, modify_hdr->ns_type); if (WARN_ON(!root)) return; root->cmds->modify_header_dealloc(root, modify_hdr); @@ -3655,7 +3874,7 @@ struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, struct mlx5_flow_root_namespace *root; int err; - root = get_root_namespace(dev, ns_type); + root = mlx5_get_root_namespace(dev, ns_type); if (!root) return ERR_PTR(-EOPNOTSUPP); @@ -3681,7 +3900,7 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, { struct mlx5_flow_root_namespace *root; - root = get_root_namespace(dev, pkt_reformat->ns_type); + root = mlx5_get_root_namespace(dev, pkt_reformat->ns_type); if (WARN_ON(!root)) return; root->cmds->packet_reformat_dealloc(root, pkt_reformat); @@ -3703,7 +3922,7 @@ mlx5_create_match_definer(struct mlx5_core_dev *dev, struct mlx5_flow_definer *definer; int id; - root = get_root_namespace(dev, ns_type); + root = mlx5_get_root_namespace(dev, ns_type); if (!root) return ERR_PTR(-EOPNOTSUPP); @@ -3727,7 +3946,7 @@ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev, { struct mlx5_flow_root_namespace *root; - root = get_root_namespace(dev, definer->ns_type); + root = mlx5_get_root_namespace(dev, definer->ns_type); if (WARN_ON(!root)) return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 6201647d6156..964937f17cf5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -133,6 +133,7 @@ enum mlx5_flow_steering_capabilty { MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX = 1UL << 0, MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX = 1UL << 1, MLX5_FLOW_STEERING_CAP_MATCH_RANGES = 1UL << 2, + MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH = 1UL << 3, }; struct mlx5_flow_steering { @@ -230,20 +231,29 @@ struct mlx5_ft_underlay_qp { MLX5_BYTE_OFF(fte_match_param, \ MLX5_FTE_MATCH_PARAM_RESERVED))) +struct fs_fte_action { + int modify_mask; + u32 dests_size; + u32 fwd_dests; + struct mlx5_flow_context flow_context; + struct mlx5_flow_act action; +}; + +struct fs_fte_dup { + struct list_head children; + struct fs_fte_action act_dests; +}; + /* Type of children is mlx5_flow_rule */ struct fs_fte { struct fs_node node; struct mlx5_fs_dr_rule fs_dr_rule; u32 val[MLX5_ST_SZ_DW_MATCH_PARAM]; - u32 dests_size; - u32 fwd_dests; + struct fs_fte_action act_dests; + struct fs_fte_dup *dup; u32 index; - struct mlx5_flow_context flow_context; - struct mlx5_flow_act action; enum fs_fte_status status; - struct mlx5_fc *counter; struct rhash_head hash; - int modify_mask; }; /* Type of children is mlx5_flow_table/namespace */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index b43ca0b762c3..4f55e55ecb55 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -26,6 +26,7 @@ struct mlx5_fw_reset { struct work_struct reset_now_work; struct work_struct reset_abort_work; unsigned long reset_flags; + u8 reset_method; struct timer_list timer; struct completion done; int ret; @@ -95,7 +96,7 @@ static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level, } static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, - u8 *reset_type, u8 *reset_state) + u8 *reset_type, u8 *reset_state, u8 *reset_method) { u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {}; u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {}; @@ -111,13 +112,26 @@ static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, *reset_type = MLX5_GET(mfrl_reg, out, reset_type); if (reset_state) *reset_state = MLX5_GET(mfrl_reg, out, reset_state); + if (reset_method) + *reset_method = MLX5_GET(mfrl_reg, out, pci_reset_req_method); return 0; } int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type) { - return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL); + return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL, NULL); +} + +static int mlx5_fw_reset_get_reset_method(struct mlx5_core_dev *dev, + u8 *reset_method) +{ + if (!MLX5_CAP_GEN(dev, pcie_reset_using_hotreset_method)) { + *reset_method = MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE; + return 0; + } + + return mlx5_reg_mfrl_query(dev, NULL, NULL, NULL, reset_method); } static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev, @@ -125,7 +139,7 @@ static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev, { u8 reset_state; - if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state)) + if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state, NULL)) goto out; if (!reset_state) @@ -398,7 +412,8 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id) return 0; } -static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev) +static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev, + u8 reset_method) { u16 dev_id; int err; @@ -409,9 +424,11 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev) } #if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE) - err = mlx5_check_hotplug_interrupt(dev); - if (err) - return false; + if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) { + err = mlx5_check_hotplug_interrupt(dev); + if (err) + return false; + } #endif err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id); @@ -427,8 +444,12 @@ static void mlx5_sync_reset_request_event(struct work_struct *work) struct mlx5_core_dev *dev = fw_reset->dev; int err; - if (test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) || - !mlx5_is_reset_now_capable(dev)) { + err = mlx5_fw_reset_get_reset_method(dev, &fw_reset->reset_method); + if (err) + mlx5_core_warn(dev, "Failed reading MFRL, err %d\n", err); + + if (err || test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) || + !mlx5_is_reset_now_capable(dev, fw_reset->reset_method)) { err = mlx5_fw_reset_set_reset_sync_nack(dev); mlx5_core_warn(dev, "PCI Sync FW Update Reset Nack %s", err ? "Failed" : "Sent"); @@ -444,21 +465,15 @@ static void mlx5_sync_reset_request_event(struct work_struct *work) mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n"); } -static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev) +static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev, u16 dev_id) { struct pci_bus *bridge_bus = dev->pdev->bus; struct pci_dev *bridge = bridge_bus->self; unsigned long timeout; struct pci_dev *sdev; - u16 reg16, dev_id; int cap, err; + u16 reg16; - err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id); - if (err) - return pcibios_err_to_errno(err); - err = mlx5_check_dev_ids(dev, dev_id); - if (err) - return err; cap = pci_find_capability(bridge, PCI_CAP_ID_EXP); if (!cap) return -EOPNOTSUPP; @@ -528,6 +543,44 @@ restore: return err; } +static int mlx5_pci_reset_bus(struct mlx5_core_dev *dev) +{ + if (!MLX5_CAP_GEN(dev, pcie_reset_using_hotreset_method)) + return -EOPNOTSUPP; + + return pci_reset_bus(dev->pdev); +} + +static int mlx5_sync_pci_reset(struct mlx5_core_dev *dev, u8 reset_method) +{ + u16 dev_id; + int err; + + err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id); + if (err) + return pcibios_err_to_errno(err); + err = mlx5_check_dev_ids(dev, dev_id); + if (err) + return err; + + switch (reset_method) { + case MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE: + err = mlx5_pci_link_toggle(dev, dev_id); + if (err) + mlx5_core_warn(dev, "mlx5_pci_link_toggle failed\n"); + break; + case MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET: + err = mlx5_pci_reset_bus(dev); + if (err) + mlx5_core_warn(dev, "mlx5_pci_reset_bus failed\n"); + break; + default: + return -EOPNOTSUPP; + } + + return err; +} + static void mlx5_sync_reset_now_event(struct work_struct *work) { struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset, @@ -546,9 +599,9 @@ static void mlx5_sync_reset_now_event(struct work_struct *work) goto done; } - err = mlx5_pci_link_toggle(dev); + err = mlx5_sync_pci_reset(dev, fw_reset->reset_method); if (err) { - mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err); + mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, no reset done, err %d\n", err); set_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags); } @@ -610,9 +663,9 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work) mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", rst_state); if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) { - err = mlx5_pci_link_toggle(dev); + err = mlx5_sync_pci_reset(dev, fw_reset->reset_method); if (err) { - mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, err %d\n", err); + mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n", err); fw_reset->ret = err; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index c6e951b8ebdb..8b0abd61eca6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -619,6 +619,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_with_driver_unload)) MLX5_SET(cmd_hca_cap, set_hca_cap, pci_sync_for_fw_update_with_driver_unload, 1); + if (MLX5_CAP_GEN_MAX(dev, pcie_reset_using_hotreset_method)) + MLX5_SET(cmd_hca_cap, set_hca_cap, + pcie_reset_using_hotreset_method, 1); if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports)) MLX5_SET(cmd_hca_cap, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 50c2554c9ccf..833cb68c744f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -9,14 +9,6 @@ #include "fs_dr.h" #include "dr_types.h" -static bool dr_is_fw_term_table(struct mlx5_flow_table *ft) -{ - if (ft->flags & MLX5_FLOW_TABLE_TERMINATION) - return true; - - return false; -} - static int mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, u32 underlay_qpn, @@ -70,7 +62,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns, u32 flags; int err; - if (dr_is_fw_term_table(ft)) + if (mlx5_fs_cmd_is_fw_term_table(ft)) return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, ft_attr, next_ft); @@ -110,7 +102,7 @@ static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns, struct mlx5dr_action *action = ft->fs_dr_table.miss_action; int err; - if (dr_is_fw_term_table(ft)) + if (mlx5_fs_cmd_is_fw_term_table(ft)) return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft); err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table); @@ -135,7 +127,7 @@ static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_table *next_ft) { - if (dr_is_fw_term_table(ft)) + if (mlx5_fs_cmd_is_fw_term_table(ft)) return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft); return set_miss_action(ns, ft, next_ft); @@ -154,7 +146,7 @@ static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns, match_criteria_enable); struct mlx5dr_match_parameters mask; - if (dr_is_fw_term_table(ft)) + if (mlx5_fs_cmd_is_fw_term_table(ft)) return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in, fg); @@ -179,7 +171,7 @@ static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_group *fg) { - if (dr_is_fw_term_table(ft)) + if (mlx5_fs_cmd_is_fw_term_table(ft)) return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg); return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher); @@ -279,7 +271,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, int err = 0; int i; - if (dr_is_fw_term_table(ft)) + if (mlx5_fs_cmd_is_fw_term_table(ft)) return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte); actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions), @@ -306,12 +298,12 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, match_sz = sizeof(fte->val); /* Drop reformat action bit if destination vport set with reformat */ - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { list_for_each_entry(dst, &fte->node.children, node.list) { if (!contain_vport_reformat_action(dst)) continue; - fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; break; } } @@ -321,7 +313,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, * TX: modify header -> push vlan -> encap * RX: decap -> pop vlan -> modify header */ - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { enum mlx5dr_action_reformat_type decap_type = DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2; @@ -337,26 +329,26 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = tmp_action; } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { bool is_decap; - if (fte->action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) { + if (fte->act_dests.action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) { err = -EINVAL; mlx5dr_err(domain, "FW-owned reformat can't be used in SW rule\n"); goto free_actions; } - is_decap = fte->action.pkt_reformat->reformat_type == + is_decap = fte->act_dests.action.pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2; if (is_decap) actions[num_actions++] = - fte->action.pkt_reformat->action.dr_action; + fte->act_dests.action.pkt_reformat->action.dr_action; else delay_encap_set = true; } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) { tmp_action = mlx5dr_action_create_pop_vlan(); if (!tmp_action) { @@ -367,7 +359,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = tmp_action; } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) { tmp_action = mlx5dr_action_create_pop_vlan(); if (!tmp_action) { @@ -378,12 +370,12 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = tmp_action; } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) actions[num_actions++] = - fte->action.modify_hdr->action.dr_action; + fte->act_dests.action.modify_hdr->action.dr_action; - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { - tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]); + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { + tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[0]); if (!tmp_action) { err = -ENOMEM; goto free_actions; @@ -392,8 +384,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = tmp_action; } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { - tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]); + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { + tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[1]); if (!tmp_action) { err = -ENOMEM; goto free_actions; @@ -404,11 +396,11 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, if (delay_encap_set) actions[num_actions++] = - fte->action.pkt_reformat->action.dr_action; + fte->act_dests.action.pkt_reformat->action.dr_action; /* The order of the actions below is not important */ - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { tmp_action = mlx5dr_action_create_drop(); if (!tmp_action) { err = -ENOMEM; @@ -418,9 +410,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, term_actions[num_term_actions++].dest = tmp_action; } - if (fte->flow_context.flow_tag) { + if (fte->act_dests.flow_context.flow_tag) { tmp_action = - mlx5dr_action_create_tag(fte->flow_context.flow_tag); + mlx5dr_action_create_tag(fte->act_dests.flow_context.flow_tag); if (!tmp_action) { err = -ENOMEM; goto free_actions; @@ -429,7 +421,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = tmp_action; } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { list_for_each_entry(dst, &fte->node.children, node.list) { enum mlx5_flow_destination_type type = dst->dest_attr.type; u32 id; @@ -510,7 +502,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, } } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { list_for_each_entry(dst, &fte->node.children, node.list) { u32 id; @@ -537,19 +529,21 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, } } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) { - if (fte->action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) { + if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) { + struct mlx5_flow_act *action = &fte->act_dests.action; + + if (fte->act_dests.action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) { err = -EOPNOTSUPP; goto free_actions; } tmp_action = mlx5dr_action_create_aso(domain, - fte->action.exe_aso.object_id, - fte->action.exe_aso.return_reg_id, - fte->action.exe_aso.type, - fte->action.exe_aso.flow_meter.init_color, - fte->action.exe_aso.flow_meter.meter_idx); + action->exe_aso.object_id, + action->exe_aso.return_reg_id, + action->exe_aso.type, + action->exe_aso.flow_meter.init_color, + action->exe_aso.flow_meter.meter_idx); if (!tmp_action) { err = -ENOMEM; goto free_actions; @@ -576,8 +570,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = term_actions->dest; } else if (num_term_actions > 1) { bool ignore_flow_level = - !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL); - u32 flow_source = fte->flow_context.flow_source; + !!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL); + u32 flow_source = fte->act_dests.flow_context.flow_source; if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { @@ -601,7 +595,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, ¶ms, num_actions, actions, - fte->flow_context.flow_source); + fte->act_dests.flow_context.flow_source); if (!rule) { err = -EINVAL; goto free_actions; @@ -740,7 +734,7 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns, int err; int i; - if (dr_is_fw_term_table(ft)) + if (mlx5_fs_cmd_is_fw_term_table(ft)) return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte); err = mlx5dr_rule_destroy(rule->dr_rule); @@ -765,7 +759,7 @@ static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns, struct fs_fte fte_tmp = {}; int ret; - if (dr_is_fw_term_table(ft)) + if (mlx5_fs_cmd_is_fw_term_table(ft)) return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte); /* Backup current dr rule details */ @@ -819,11 +813,11 @@ static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns) static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns, enum fs_flow_table_type ft_type) { - u32 steering_caps = 0; + u32 steering_caps = MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH; if (ft_type != FS_FT_FDB || MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5) - return 0; + return steering_caps; steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX; steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h index 9a85d4e12f77..f39d636ff39a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h @@ -214,11 +214,12 @@ struct mlx5hws_action_dest_attr { struct mlx5hws_action *reformat; }; -/* Check whether HWS is supported +/** + * mlx5hws_is_supported - Check whether HWS is supported * - * @param[in] mdev - * The device to check. - * @return true if supported, false otherwise. + * @mdev: The device to check. + * + * Return: true if supported, false otherwise. */ static inline bool mlx5hws_is_supported(struct mlx5_core_dev *mdev) { @@ -234,92 +235,94 @@ static inline bool mlx5hws_is_supported(struct mlx5_core_dev *mdev) return wqe_based_flow_table_update && ignore_flow_level_rtc_valid; } -/* Open a context used for direct rule insertion using hardware steering. - * Each context can contain multiple tables of different types. +/** + * mlx5hws_context_open - Open a context used for direct rule insertion + * using hardware steering. + * + * @mdev: The device to be used for HWS. + * @attr: Attributes used for context open. * - * @param[in] mdev - * The device to be used for HWS. - * @param[in] attr - * Attributes used for context open. - * @return pointer to mlx5hws_context on success NULL otherwise. + * Return: pointer to mlx5hws_context on success NULL otherwise. */ struct mlx5hws_context * mlx5hws_context_open(struct mlx5_core_dev *mdev, struct mlx5hws_context_attr *attr); -/* Close a context used for direct hardware steering. +/** + * mlx5hws_context_close - Close a context used for direct hardware steering. * - * @param[in] ctx - * mlx5hws context to close. - * @return zero on success non zero otherwise. + * @ctx: mlx5hws context to close. + * + * Return: zero on success non zero otherwise. */ int mlx5hws_context_close(struct mlx5hws_context *ctx); -/* Set a peer context, each context can have multiple contexts as peers. +/** + * mlx5hws_context_set_peer - Set a peer context. + * Each context can have multiple contexts as peers. * - * @param[in] ctx - * The context in which the peer_ctx will be peered to it. - * @param[in] peer_ctx - * The peer context. - * @param[in] peer_vhca_id - * The peer context vhca id. + * @ctx: The context in which the peer_ctx will be peered to it. + * @peer_ctx: The peer context. + * @peer_vhca_id: The peer context vhca id. */ void mlx5hws_context_set_peer(struct mlx5hws_context *ctx, struct mlx5hws_context *peer_ctx, u16 peer_vhca_id); -/* Create a new direct rule table. Each table can contain multiple matchers. +/** + * mlx5hws_table_create - Create a new direct rule table. + * Each table can contain multiple matchers. + * + * @ctx: The context in which the new table will be opened. + * @attr: Attributes used for table creation. * - * @param[in] ctx - * The context in which the new table will be opened. - * @param[in] attr - * Attributes used for table creation. - * @return pointer to mlx5hws_table on success NULL otherwise. + * Return: pointer to mlx5hws_table on success NULL otherwise. */ struct mlx5hws_table * mlx5hws_table_create(struct mlx5hws_context *ctx, struct mlx5hws_table_attr *attr); -/* Destroy direct rule table. +/** + * mlx5hws_table_destroy - Destroy direct rule table. * - * @param[in] tbl - * Table to destroy. - * @return zero on success non zero otherwise. + * @tbl: Table to destroy. + * + * Return: zero on success non zero otherwise. */ int mlx5hws_table_destroy(struct mlx5hws_table *tbl); -/* Get ID of the flow table. +/** + * mlx5hws_table_get_id() - Get ID of the flow table. + * + * @tbl:Table to get ID of. * - * @param[in] tbl - * Table to get ID of. - * @return ID of the table. + * Return: ID of the table. */ u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl); -/* Set default miss table for mlx5hws_table by using another mlx5hws_table +/** + * mlx5hws_table_set_default_miss - Set default miss table for mlx5hws_table + * by using another mlx5hws_table. * Traffic which all table matchers miss will be forwarded to miss table. * - * @param[in] tbl - * Source table - * @param[in] miss_tbl - * Target (miss) table, or NULL to remove current miss table - * @return zero on success non zero otherwise. + * @tbl: Source table + * @miss_tbl: Target (miss) table, or NULL to remove current miss table + * + * Return: zero on success non zero otherwise. */ int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl, struct mlx5hws_table *miss_tbl); -/* Create new match template based on items mask, the match template - * will be used for matcher creation. +/** + * mlx5hws_match_template_create - Create a new match template based on items mask. + * The match template will be used for matcher creation. + * + * @ctx: The context in which the new template will be created. + * @match_param: Describe the mask based on PRM match parameters. + * @match_param_sz: Size of match param buffer. + * @match_criteria_enable: Bitmap for each sub-set in match_criteria buffer. * - * @param[in] ctx - * The context in which the new template will be created. - * @param[in] match_param - * Describe the mask based on PRM match parameters - * @param[in] match_param_sz - * Size of match param buffer - * @param[in] match_criteria_enable - * Bitmap for each sub-set in match_criteria buffer - * @return pointer to mlx5hws_match_template on success NULL otherwise + * Return: Pointer to mlx5hws_match_template on success, NULL otherwise. */ struct mlx5hws_match_template * mlx5hws_match_template_create(struct mlx5hws_context *ctx, @@ -327,52 +330,53 @@ mlx5hws_match_template_create(struct mlx5hws_context *ctx, u32 match_param_sz, u8 match_criteria_enable); -/* Destroy match template. +/** + * mlx5hws_match_template_destroy - Destroy a match template. * - * @param[in] mt - * Match template to destroy. - * @return zero on success non zero otherwise. + * @mt: Match template to destroy. + * + * Return: Zero on success, non-zero otherwise. */ int mlx5hws_match_template_destroy(struct mlx5hws_match_template *mt); -/* Create new action template based on action_type array, the action template - * will be used for matcher creation. +/** + * mlx5hws_action_template_create - Create a new action template based on an action_type array. + * + * @action_type: An array of actions based on the order of actions which will be provided + * with rule_actions to mlx5hws_rule_create. The last action is marked + * using MLX5HWS_ACTION_TYP_LAST. * - * @param[in] action_type - * An array of actions based on the order of actions which will be provided - * with rule_actions to mlx5hws_rule_create. The last action is marked - * using MLX5HWS_ACTION_TYP_LAST. - * @return pointer to mlx5hws_action_template on success NULL otherwise + * Return: Pointer to mlx5hws_action_template on success, NULL otherwise. */ struct mlx5hws_action_template * mlx5hws_action_template_create(enum mlx5hws_action_type action_type[]); -/* Destroy action template. +/** + * mlx5hws_action_template_destroy - Destroy action template. * - * @param[in] at - * Action template to destroy. - * @return zero on success non zero otherwise. + * @at: Action template to destroy. + * + * Return: zero on success non zero otherwise. */ int mlx5hws_action_template_destroy(struct mlx5hws_action_template *at); -/* Create a new direct rule matcher. Each matcher can contain multiple rules. - * Matchers on the table will be processed by priority. Matching fields and - * mask are described by the match template. In some cases multiple match - * templates can be used on the same matcher. - * - * @param[in] table - * The table in which the new matcher will be opened. - * @param[in] mt - * Array of match templates to be used on matcher. - * @param[in] num_of_mt - * Number of match templates in mt array. - * @param[in] at - * Array of action templates to be used on matcher. - * @param[in] num_of_at - * Number of action templates in mt array. - * @param[in] attr - * Attributes used for matcher creation. - * @return pointer to mlx5hws_matcher on success NULL otherwise. +/** + * mlx5hws_matcher_create - Create a new direct rule matcher. + * + * Each matcher can contain multiple rules. Matchers on the table will be + * processed by priority. Matching fields and mask are described by the + * match template. In some cases, multiple match templates can be used on + * the same matcher. + * + * @table: The table in which the new matcher will be opened. + * @mt: Array of match templates to be used on matcher. + * @num_of_mt: Number of match templates in mt array. + * @at: Array of action templates to be used on matcher. + * @num_of_at: Number of action templates in at array. + * @attr: Attributes used for matcher creation. + * + * Return: Pointer to mlx5hws_matcher on success, NULL otherwise. + * */ struct mlx5hws_matcher * mlx5hws_matcher_create(struct mlx5hws_table *table, @@ -382,81 +386,77 @@ mlx5hws_matcher_create(struct mlx5hws_table *table, u8 num_of_at, struct mlx5hws_matcher_attr *attr); -/* Destroy direct rule matcher. +/** + * mlx5hws_matcher_destroy - Destroy a direct rule matcher. * - * @param[in] matcher - * Matcher to destroy. - * @return zero on success non zero otherwise. + * @matcher: Matcher to destroy. + * + * Return: Zero on success, non-zero otherwise. */ int mlx5hws_matcher_destroy(struct mlx5hws_matcher *matcher); -/* Attach new action template to direct rule matcher. +/** + * mlx5hws_matcher_attach_at - Attach a new action template to a direct rule matcher. + * + * @matcher: Matcher to attach the action template to. + * @at: Action template to be attached to the matcher. * - * @param[in] matcher - * Matcher to attach at to. - * @param[in] at - * Action template to be attached to the matcher. - * @return zero on success non zero otherwise. + * Return: Zero on success, non-zero otherwise. */ int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher, struct mlx5hws_action_template *at); -/* Link two matchers and enable moving rules from src matcher to dst matcher. - * Both matchers must be in the same table type, must be created with 'resizable' - * property, and should have the same characteristics (e.g. same mt, same at). +/** + * mlx5hws_matcher_resize_set_target - Link two matchers and enable moving rules. * - * It is the user's responsibility to make sure that the dst matcher - * was allocated with the appropriate size. + * Both matchers must be in the same table type, must be created with the + * 'resizable' property, and should have the same characteristics (e.g., same + * match templates and action templates). It is the user's responsibility to + * ensure that the destination matcher is allocated with the appropriate size. * * Once the function is completed, the user is: - * - allowed to move rules from src into dst matcher - * - no longer allowed to insert rules to the src matcher + * - Allowed to move rules from the source into the destination matcher. + * - No longer allowed to insert rules into the source matcher. * - * The user is always allowed to insert rules to the dst matcher and + * The user is always allowed to insert rules into the destination matcher and * to delete rules from any matcher. * - * @param[in] src_matcher - * source matcher for moving rules from - * @param[in] dst_matcher - * destination matcher for moving rules to - * @return zero on successful move, non zero otherwise. + * @src_matcher: Source matcher for moving rules from. + * @dst_matcher: Destination matcher for moving rules to. + * + * Return: Zero on successful move, non-zero otherwise. */ int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher, struct mlx5hws_matcher *dst_matcher); -/* Enqueue moving rule operation: moving rule from src matcher to a dst matcher +/** + * mlx5hws_matcher_resize_rule_move - Enqueue moving rule operation. + * + * This function enqueues the operation of moving a rule from the source + * matcher to the destination matcher. * - * @param[in] src_matcher - * matcher that the rule belongs to - * @param[in] rule - * the rule to move - * @param[in] attr - * rule attributes - * @return zero on success, non zero otherwise. + * @src_matcher: Matcher that the rule belongs to. + * @rule: The rule to move. + * @attr: Rule attributes. + * + * Return: Zero on success, non-zero otherwise. */ int mlx5hws_matcher_resize_rule_move(struct mlx5hws_matcher *src_matcher, struct mlx5hws_rule *rule, struct mlx5hws_rule_attr *attr); -/* Enqueue create rule operation. - * - * @param[in] matcher - * The matcher in which the new rule will be created. - * @param[in] mt_idx - * Match template index to create the match with. - * @param[in] match_param - * The match parameter PRM buffer used for the value matching. - * @param[in] rule_actions - * Rule action to be executed on match. - * @param[in] at_idx - * Action template index to apply the actions with. - * @param[in] num_of_actions - * Number of rule actions. - * @param[in] attr - * Rule creation attributes. - * @param[in, out] rule_handle - * A valid rule handle. The handle doesn't require any initialization. - * @return zero on successful enqueue non zero otherwise. +/** + * mlx5hws_rule_create - Enqueue create rule operation. + * + * @matcher: The matcher in which the new rule will be created. + * @mt_idx: Match template index to create the match with. + * @match_param: The match parameter PRM buffer used for value matching. + * @at_idx: Action template index to apply the actions with. + * @rule_actions: Rule actions to be executed on match. + * @attr: Rule creation attributes. + * @rule_handle: A valid rule handle. The handle doesn't require any initialization. + * + * Return: Zero on successful enqueue, non-zero otherwise. */ int mlx5hws_rule_create(struct mlx5hws_matcher *matcher, u8 mt_idx, @@ -466,114 +466,106 @@ int mlx5hws_rule_create(struct mlx5hws_matcher *matcher, struct mlx5hws_rule_attr *attr, struct mlx5hws_rule *rule_handle); -/* Enqueue destroy rule operation. +/** + * mlx5hws_rule_destroy - Enqueue destroy rule operation. + * + * @rule: The rule destruction to enqueue. + * @attr: Rule destruction attributes. * - * @param[in] rule - * The rule destruction to enqueue. - * @param[in] attr - * Rule destruction attributes. - * @return zero on successful enqueue non zero otherwise. + * Return: Zero on successful enqueue, non-zero otherwise. */ int mlx5hws_rule_destroy(struct mlx5hws_rule *rule, struct mlx5hws_rule_attr *attr); -/* Enqueue update actions on an existing rule. +/** + * mlx5hws_rule_action_update - Enqueue update actions on an existing rule. * - * @param[in, out] rule_handle - * A valid rule handle to update. - * @param[in] at_idx - * Action template index to update the actions with. - * @param[in] rule_actions - * Rule action to be executed on match. - * @param[in] attr - * Rule update attributes. - * @return zero on successful enqueue non zero otherwise. + * @rule: A valid rule handle to update. + * @at_idx: Action template index to update the actions with. + * @rule_actions: Rule actions to be executed on match. + * @attr: Rule update attributes. + * + * Return: Zero on successful enqueue, non-zero otherwise. */ int mlx5hws_rule_action_update(struct mlx5hws_rule *rule, u8 at_idx, struct mlx5hws_rule_action rule_actions[], struct mlx5hws_rule_attr *attr); -/* Get action type. +/** + * mlx5hws_action_get_type - Get action type. + * + * @action: The action to get the type of. * - * @param[in] action - * The action to get the type of. - * @return action type. + * Return: action type. */ enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action); -/* Create direct rule drop action. +/** + * mlx5hws_action_create_dest_drop - Create a direct rule drop action. * - * @param[in] ctx - * The context in which the new action will be created. - * @param[in] flags - * Action creation flags. (enum mlx5hws_action_flags) - * @return pointer to mlx5hws_action on success NULL otherwise. + * @ctx: The context in which the new action will be created. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: Pointer to mlx5hws_action on success, NULL otherwise. */ struct mlx5hws_action * mlx5hws_action_create_dest_drop(struct mlx5hws_context *ctx, u32 flags); -/* Create direct rule default miss action. - * Defaults are RX: Drop TX: Wire. +/** + * mlx5hws_action_create_default_miss - Create a direct rule default miss action. + * Defaults are RX: Drop, TX: Wire. + * + * @ctx: The context in which the new action will be created. + * @flags: Action creation flags (enum mlx5hws_action_flags). * - * @param[in] ctx - * The context in which the new action will be created. - * @param[in] flags - * Action creation flags. (enum mlx5hws_action_flags) - * @return pointer to mlx5hws_action on success NULL otherwise. + * Return: Pointer to mlx5hws_action on success, NULL otherwise. */ struct mlx5hws_action * mlx5hws_action_create_default_miss(struct mlx5hws_context *ctx, u32 flags); -/* Create direct rule goto table action. +/** + * mlx5hws_action_create_dest_table - Create direct rule goto table action. * - * @param[in] ctx - * The context in which the new action will be created. - * @param[in] tbl - * Destination table. - * @param[in] flags - * Action creation flags. (enum mlx5hws_action_flags) - * @return pointer to mlx5hws_action on success NULL otherwise. + * @ctx: The context in which the new action will be created. + * @tbl: Destination table. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. */ struct mlx5hws_action * mlx5hws_action_create_dest_table(struct mlx5hws_context *ctx, struct mlx5hws_table *tbl, u32 flags); -/* Create direct rule goto table number action. +/** + * mlx5hws_action_create_dest_table_num - Create direct rule goto table number action. + * + * @ctx: The context in which the new action will be created. + * @tbl_num: Destination table number. + * @flags: Action creation flags (enum mlx5hws_action_flags). * - * @param[in] ctx - * The context in which the new action will be created. - * @param[in] tbl_num - * Destination table number. - * @param[in] flags - * Action creation flags. (enum mlx5hws_action_flags) - * @return pointer to mlx5hws_action on success NULL otherwise. + * Return: pointer to mlx5hws_action on success NULL otherwise. */ struct mlx5hws_action * mlx5hws_action_create_dest_table_num(struct mlx5hws_context *ctx, - u32 table_num, u32 flags); - -/* Create direct rule range match action. - * - * @param[in] ctx - * The context in which the new action will be created. - * @param[in] field - * Field to comapare the value. - * @param[in] hit_ft - * Flow table to go to on hit. - * @param[in] miss_ft - * Flow table to go to on miss. - * @param[in] min - * Minimal value of the field to be considered as hit. - * @param[in] max - * Maximal value of the field to be considered as hit. - * @param[in] flags - * Action creation flags. (enum mlx5hws_action_flags) - * @return pointer to mlx5hws_action on success NULL otherwise. + u32 tbl_num, u32 flags); + +/** + * mlx5hws_action_create_dest_match_range - Create direct rule range match action. + * + * @ctx: The context in which the new action will be created. + * @field: Field to comapare the value. + * @hit_ft: Flow table to go to on hit. + * @miss_ft: Flow table to go to on miss. + * @min: Minimal value of the field to be considered as hit. + * @max: Maximal value of the field to be considered as hit. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. */ struct mlx5hws_action * mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx, @@ -582,33 +574,29 @@ mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx, struct mlx5_flow_table *miss_ft, u32 min, u32 max, u32 flags); -/* Create direct rule flow sampler action. +/** + * mlx5hws_action_create_flow_sampler - Create direct rule flow sampler action. * - * @param[in] ctx - * The context in which the new action will be created. - * @param[in] sampler_id - * Flow sampler object ID. - * @param[in] flags - * Action creation flags. (enum mlx5hws_action_flags) - * @return pointer to mlx5hws_action on success NULL otherwise. + * @ctx: The context in which the new action will be created. + * @sampler_id: Flow sampler object ID. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. */ struct mlx5hws_action * mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx, u32 sampler_id, u32 flags); -/* Create direct rule goto vport action. +/** + * mlx5hws_action_create_dest_vport - Create direct rule goto vport action. + * + * @ctx: The context in which the new action will be created. + * @vport_num: Destination vport number. + * @vhca_id_valid: Tells if the vhca_id parameter is valid. + * @vhca_id: VHCA ID of the destination vport. + * @flags: Action creation flags (enum mlx5hws_action_flags). * - * @param[in] ctx - * The context in which the new action will be created. - * @param[in] vport_num - * Destination vport number. - * @param[in] vhca_id_valid - * Tells if the vhca_id parameter is valid. - * @param[in] vhca_id - * VHCA ID of the destination vport. - * @param[in] flags - * Action creation flags. (enum mlx5hws_action_flags) - * @return pointer to mlx5hws_action on success NULL otherwise. + * Return: pointer to mlx5hws_action on success NULL otherwise. */ struct mlx5hws_action * mlx5hws_action_create_dest_vport(struct mlx5hws_context *ctx, @@ -617,48 +605,42 @@ mlx5hws_action_create_dest_vport(struct mlx5hws_context *ctx, u16 vhca_id, u32 flags); -/* Create direct rule TAG action. +/** + * mlx5hws_action_create_tag - Create direct rule TAG action. * - * @param[in] ctx - * The context in which the new action will be created. - * @param[in] flags - * Action creation flags. (enum mlx5hws_action_flags) - * @return pointer to mlx5hws_action on success NULL otherwise. + * @ctx: The context in which the new action will be created. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. */ struct mlx5hws_action * -mlx5hws_action_create_tag(struct mlx5hws_context *ctx, - u32 flags); +mlx5hws_action_create_tag(struct mlx5hws_context *ctx, u32 flags); -/* Create direct rule counter action. +/** + * mlx5hws_action_create_counter - Create direct rule counter action. + * + * @ctx: The context in which the new action will be created. + * @obj_id: Direct rule counter object ID. + * @flags: Action creation flags (enum mlx5hws_action_flags). * - * @param[in] ctx - * The context in which the new action will be created. - * @param[in] obj_id - * Direct rule counter object ID. - * @param[in] flags - * Action creation flags. (enum mlx5hws_action_flags) - * @return pointer to mlx5hws_action on success NULL otherwise. + * Return: pointer to mlx5hws_action on success NULL otherwise. */ struct mlx5hws_action * mlx5hws_action_create_counter(struct mlx5hws_context *ctx, u32 obj_id, u32 flags); -/* Create direct rule reformat action. - * - * @param[in] ctx - * The context in which the new action will be created. - * @param[in] reformat_type - * Type of reformat prefixed with MLX5HWS_ACTION_TYP_REFORMAT. - * @param[in] num_of_hdrs - * Number of provided headers in "hdrs" array. - * @param[in] hdrs - * Headers array containing header information. - * @param[in] log_bulk_size - * Number of unique values used with this reformat. - * @param[in] flags - * Action creation flags. (enum mlx5hws_action_flags) - * @return pointer to mlx5hws_action on success NULL otherwise. +/** + * mlx5hws_action_create_reformat - Create direct rule reformat action. + * + * @ctx: The context in which the new action will be created. + * @reformat_type: Type of reformat prefixed with MLX5HWS_ACTION_TYP_REFORMAT. + * @num_of_hdrs: Number of provided headers in "hdrs" array. + * @hdrs: Headers array containing header information. + * @log_bulk_size: Number of unique values used with this reformat. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. */ struct mlx5hws_action * mlx5hws_action_create_reformat(struct mlx5hws_context *ctx, @@ -668,19 +650,16 @@ mlx5hws_action_create_reformat(struct mlx5hws_context *ctx, u32 log_bulk_size, u32 flags); -/* Create direct rule modify header action. +/** + * mlx5hws_action_create_modify_header - Create direct rule modify header action. * - * @param[in] ctx - * The context in which the new action will be created. - * @param[in] num_of_patterns - * Number of provided patterns in "patterns" array. - * @param[in] patterns - * Patterns array containing pattern information. - * @param[in] log_bulk_size - * Number of unique values used with this pattern. - * @param[in] flags - * Action creation flags. (enum mlx5hws_action_flags) - * @return pointer to mlx5hws_action on success NULL otherwise. + * @ctx: The context in which the new action will be created. + * @num_of_patterns: Number of provided patterns in "patterns" array. + * @patterns: Patterns array containing pattern information. + * @log_bulk_size: Number of unique values used with this pattern. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. */ struct mlx5hws_action * mlx5hws_action_create_modify_header(struct mlx5hws_context *ctx, @@ -689,17 +668,16 @@ mlx5hws_action_create_modify_header(struct mlx5hws_context *ctx, u32 log_bulk_size, u32 flags); -/* Create direct rule ASO flow meter action. +/** + * mlx5hws_action_create_aso_meter - Create direct rule ASO flow meter action. + * + * @ctx: The context in which the new action will be created. + * @obj_id: ASO object ID. + * @return_reg_c: Copy the ASO object value into this reg_c, + * after a packet hits a rule with this ASO object. + * @flags: Action creation flags (enum mlx5hws_action_flags). * - * @param[in] ctx - * The context in which the new action will be created. - * @param[in] obj_id - * ASO object ID. - * @param[in] return_reg_c - * Copy the ASO object value into this reg_c, after a packet hits a rule with this ASO object. - * @param[in] flags - * Action creation flags. (enum mlx5hws_action_flags) - * @return pointer to mlx5hws_action on success NULL otherwise. + * Return: pointer to mlx5hws_action on success NULL otherwise. */ struct mlx5hws_action * mlx5hws_action_create_aso_meter(struct mlx5hws_context *ctx, @@ -707,42 +685,41 @@ mlx5hws_action_create_aso_meter(struct mlx5hws_context *ctx, u8 return_reg_c, u32 flags); -/* Create direct rule pop vlan action. - * @param[in] ctx - * The context in which the new action will be created. - * @param[in] flags - * Action creation flags. (enum mlx5hws_action_flags) - * @return pointer to mlx5hws_action on success NULL otherwise. +/** + * mlx5hws_action_create_pop_vlan - Create direct rule pop vlan action. + * + * @ctx: The context in which the new action will be created. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. */ struct mlx5hws_action * mlx5hws_action_create_pop_vlan(struct mlx5hws_context *ctx, u32 flags); -/* Create direct rule push vlan action. - * @param[in] ctx - * The context in which the new action will be created. - * @param[in] flags - * Action creation flags. (enum mlx5hws_action_flags) - * @return pointer to mlx5hws_action on success NULL otherwise. +/** + * mlx5hws_action_create_push_vlan - Create direct rule push vlan action. + * + * @ctx: The context in which the new action will be created. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. */ struct mlx5hws_action * mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags); -/* Create a dest array action, this action can duplicate packets and forward to - * multiple destinations in the destination list. - * @param[in] ctx - * The context in which the new action will be created. - * @param[in] num_dest - * The number of dests attributes. - * @param[in] dests - * The destination array. Each contains a destination action and can have - * additional actions. - * @param[in] ignore_flow_level - * Boolean that says whether to turn on 'ignore_flow_level' for this dest. - * @param[in] flow_source - * Source port of the traffic for this actions. - * @param[in] flags - * Action creation flags. (enum mlx5hws_action_flags) - * @return pointer to mlx5hws_action on success NULL otherwise. +/** + * mlx5hws_action_create_dest_array - Create a dest array action, this action can + * duplicate packets and forward to multiple destinations in the destination list. + * + * @ctx: The context in which the new action will be created. + * @num_dest: The number of dests attributes. + * @dests: The destination array. Each contains a destination action and can + * have additional actions. + * @ignore_flow_level: Whether to turn on 'ignore_flow_level' for this dest. + * @flow_source: Source port of the traffic for this actions. + * @flags: Action creation flags (enum mlx5hws_action_flags). + * + * Return: pointer to mlx5hws_action on success NULL otherwise. */ struct mlx5hws_action * mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx, @@ -752,19 +729,16 @@ mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx, u32 flow_source, u32 flags); -/* Create insert header action. +/** + * mlx5hws_action_create_insert_header - Create insert header action. + * + * @ctx: The context in which the new action will be created. + * @num_of_hdrs: Number of provided headers in "hdrs" array. + * @hdrs: Headers array containing header information. + * @log_bulk_size: Number of unique values used with this insert header. + * @flags: Action creation flags. (enum mlx5hws_action_flags) * - * @param[in] ctx - * The context in which the new action will be created. - * @param[in] num_of_hdrs - * Number of provided headers in "hdrs" array. - * @param[in] hdrs - * Headers array containing header information. - * @param[in] log_bulk_size - * Number of unique values used with this insert header. - * @param[in] flags - * Action creation flags. (enum mlx5hws_action_flags) - * @return pointer to mlx5hws_action on success NULL otherwise. + * Return: pointer to mlx5hws_action on success NULL otherwise. */ struct mlx5hws_action * mlx5hws_action_create_insert_header(struct mlx5hws_context *ctx, @@ -773,38 +747,38 @@ mlx5hws_action_create_insert_header(struct mlx5hws_context *ctx, u32 log_bulk_size, u32 flags); -/* Create remove header action. +/** + * mlx5hws_action_create_remove_header - Create remove header action. * - * @param[in] ctx - * The context in which the new action will be created. - * @param[in] attr - * attributes: specifies the remove header type, PRM start anchor and - * the PRM end anchor or the PRM start anchor and remove size in bytes. - * @param[in] flags - * Action creation flags. (enum mlx5hws_action_flags) - * @return pointer to mlx5hws_action on success NULL otherwise. + * @ctx: The context in which the new action will be created. + * @attr: attributes that specifie the remove header type, PRM start anchor and + * the PRM end anchor or the PRM start anchor and remove size in bytes. + * @flags: Action creation flags. (enum mlx5hws_action_flags) + * + * Return: pointer to mlx5hws_action on success NULL otherwise. */ struct mlx5hws_action * mlx5hws_action_create_remove_header(struct mlx5hws_context *ctx, struct mlx5hws_action_remove_header_attr *attr, u32 flags); -/* Create direct rule LAST action. +/** + * mlx5hws_action_create_last - Create direct rule LAST action. + * + * @ctx: The context in which the new action will be created. + * @flags: Action creation flags. (enum mlx5hws_action_flags) * - * @param[in] ctx - * The context in which the new action will be created. - * @param[in] flags - * Action creation flags. (enum mlx5hws_action_flags) - * @return pointer to mlx5hws_action on success NULL otherwise. + * Return: pointer to mlx5hws_action on success NULL otherwise. */ struct mlx5hws_action * mlx5hws_action_create_last(struct mlx5hws_context *ctx, u32 flags); -/* Destroy direct rule action. +/** + * mlx5hws_action_destroy - Destroy direct rule action. * - * @param[in] action - * The action to destroy. - * @return zero on success non zero otherwise. + * @action: The action to destroy. + * + * Return: zero on success non zero otherwise. */ int mlx5hws_action_destroy(struct mlx5hws_action *action); @@ -818,42 +792,40 @@ struct mlx5hws_flow_op_result { void *user_data; }; -/* Poll queue for rule creation and deletions completions. +/** + * mlx5hws_send_queue_poll - Poll queue for rule creation and deletions completions. + * + * @ctx: The context to which the queue belong to. + * @queue_id: The id of the queue to poll. + * @res: Completion array. + * @res_nb: Maximum number of results to return. * - * @param[in] ctx - * The context to which the queue belong to. - * @param[in] queue_id - * The id of the queue to poll. - * @param[in, out] res - * Completion array. - * @param[in] res_nb - * Maximum number of results to return. - * @return negative number on failure, the number of completions otherwise. + * Return: negative number on failure, the number of completions otherwise. */ int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx, u16 queue_id, struct mlx5hws_flow_op_result res[], u32 res_nb); -/* Perform an action on the queue +/** + * mlx5hws_send_queue_action - Perform an action on the queue * - * @param[in] ctx - * The context to which the queue belong to. - * @param[in] queue_id - * The id of the queue to perform the action on. - * @param[in] actions - * Actions to perform on the queue. (enum mlx5hws_send_queue_actions) - * @return zero on success non zero otherwise. + * @ctx: The context to which the queue belong to. + * @queue_id: The id of the queue to perform the action on. + * @actions: Actions to perform on the queue (enum mlx5hws_send_queue_actions) + * + * Return: zero on success non zero otherwise. */ int mlx5hws_send_queue_action(struct mlx5hws_context *ctx, u16 queue_id, u32 actions); -/* Dump HWS info +/** + * mlx5hws_debug_dump - Dump HWS info + * + * @ctx: The context which to dump the info from. * - * @param[in] ctx - * The context which to dump the info from. - * @return zero on success non zero otherwise. + * Return: zero on success non zero otherwise. */ int mlx5hws_debug_dump(struct mlx5hws_context *ctx); @@ -865,7 +837,9 @@ struct mlx5hws_match_parameters { u32 *match_buf; /* Device spec format */ }; -/* Create a new BWC direct rule matcher. +/** + * mlx5hws_bwc_matcher_create - Create a new BWC direct rule matcher. + * * This function does the following: * - creates match template based on flow items * - creates an empty action template @@ -875,15 +849,12 @@ struct mlx5hws_match_parameters { * - table->ctx must have BWC support * - complex rules are not supported * - * @param[in] table - * The table in which the new matcher will be opened - * @param[in] priority - * Priority for this BWC matcher - * @param[in] match_criteria_enable - * Bitmask that defines matching criteria - * @param[in] mask - * Match parameters - * @return pointer to mlx5hws_bwc_matcher on success or NULL otherwise. + * @table: The table in which the new matcher will be opened + * @priority: Priority for this BWC matcher + * @match_criteria_enable: Bitmask that defines matching criteria + * @mask: Match parameters + * + * Return: pointer to mlx5hws_bwc_matcher on success or NULL otherwise. */ struct mlx5hws_bwc_matcher * mlx5hws_bwc_matcher_create(struct mlx5hws_table *table, @@ -891,15 +862,18 @@ mlx5hws_bwc_matcher_create(struct mlx5hws_table *table, u8 match_criteria_enable, struct mlx5hws_match_parameters *mask); -/* Destroy BWC direct rule matcher. +/** + * mlx5hws_bwc_matcher_destroy - Destroy BWC direct rule matcher. * - * @param[in] bwc_matcher - * Matcher to destroy - * @return zero on success, non zero otherwise + * @bwc_matcher: Matcher to destroy + * + * Return: zero on success, non zero otherwise */ int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher); -/* Create a new BWC rule. +/** + * mlx5hws_bwc_rule_create - Create a new BWC rule. + * * Unlike the usual rule creation function, this one is blocking: when the * function returns, the rule is written to its place (no need to poll). * This function does the following: @@ -916,15 +890,12 @@ int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher); * - matcher->tbl->ctx must have BWC support * - separate BWC ctx queues are used * - * @param[in] bwc_matcher - * The BWC matcher in which the new rule will be created. - * @param[in] params - * Match perameters - * @param[in] flow_source - * Flow source for this rule - * @param[in] rule_actions - * Rule action to be executed on match - * @return valid BWC rule handle on success, NULL otherwise + * @bwc_matcher: The BWC matcher in which the new rule will be created. + * @params: Match perameters + * @flow_source: Flow source for this rule + * @rule_actions: Rule action to be executed on match + * + * Return: valid BWC rule handle on success, NULL otherwise */ struct mlx5hws_bwc_rule * mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher, @@ -932,21 +903,22 @@ mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher, u32 flow_source, struct mlx5hws_rule_action rule_actions[]); -/* Destroy BWC direct rule. +/** + * mlx5hws_bwc_rule_destroy - Destroy BWC direct rule. + * + * @bwc_rule: Rule to destroy. * - * @param[in] bwc_rule - * Rule to destroy - * @return zero on success, non zero otherwise + * Return: zero on success, non zero otherwise. */ int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule); -/* Update actions on an existing BWC rule. +/** + * mlx5hws_bwc_rule_action_update - Update actions on an existing BWC rule. + * + * @bwc_rule: Rule to update + * @rule_actions: Rule action to update with * - * @param[in] bwc_rule - * Rule to update - * @param[in] rule_actions - * Rule action to update with - * @return zero on successful update, non zero otherwise. + * Return: zero on successful update, non zero otherwise. */ int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule, struct mlx5hws_rule_action rule_actions[]); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c index 1964261415aa..33d2b31e4b46 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c @@ -967,7 +967,7 @@ int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher, ret = hws_matcher_check_and_process_at(matcher, at); if (ret) - return -ret; + return ret; required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term); if (matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes < required_stes) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c index c79ee70edf03..8a011b958b43 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c @@ -751,11 +751,11 @@ int mlx5hws_rule_destroy(struct mlx5hws_rule *rule, ret = hws_rule_enqueue_precheck(rule, attr); if (unlikely(ret)) - return -ret; + return ret; ret = hws_rule_destroy_hws(rule, attr); - return -ret; + return ret; } int mlx5hws_rule_action_update(struct mlx5hws_rule *rule, @@ -767,7 +767,7 @@ int mlx5hws_rule_action_update(struct mlx5hws_rule *rule, ret = hws_rule_enqueue_precheck_update(rule, attr); if (unlikely(ret)) - return -ret; + return ret; ret = hws_rule_create_hws(rule, attr, @@ -776,5 +776,5 @@ int mlx5hws_rule_action_update(struct mlx5hws_rule *rule, at_idx, rule_actions); - return -ret; + return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c index 9dbc3e9da5ea..8c063a8d87d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c @@ -489,5 +489,5 @@ int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl, return 0; out: mutex_unlock(&ctx->ctrl_lock); - return -ret; + return ret; } diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index a94bc9e3af96..d0f7d1f36c5e 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1449,6 +1449,7 @@ enum { MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, MLX5_CMD_STAT_BAD_RES_ERR = 0x5, MLX5_CMD_STAT_RES_BUSY = 0x6, + MLX5_CMD_STAT_NOT_READY = 0x7, MLX5_CMD_STAT_LIM_ERR = 0x8, MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, MLX5_CMD_STAT_IX_ERR = 0xa, diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 3fb428ce7d1c..b744e554f014 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -342,4 +342,7 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, struct mlx5_pkt_reformat *reformat); u32 mlx5_flow_table_id(struct mlx5_flow_table *ft); + +struct mlx5_flow_root_namespace * +mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type); #endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 2045575b70d4..620a5c305123 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1856,7 +1856,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_328[0x2]; u8 relaxed_ordering_read[0x1]; u8 log_max_pd[0x5]; - u8 reserved_at_330[0x6]; + u8 reserved_at_330[0x5]; + u8 pcie_reset_using_hotreset_method[0x1]; u8 pci_sync_for_fw_update_with_driver_unload[0x1]; u8 vnic_env_cnt_steering_fail[0x1]; u8 vport_counter_local_loopback[0x1]; @@ -11189,6 +11190,11 @@ struct mlx5_ifc_mcda_reg_bits { }; enum { + MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE = 0, + MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET = 1, +}; + +enum { MLX5_MFRL_REG_RESET_STATE_IDLE = 0, MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1, MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS = 2, @@ -11215,7 +11221,8 @@ struct mlx5_ifc_mfrl_reg_bits { u8 pci_sync_for_fw_update_start[0x1]; u8 pci_sync_for_fw_update_resp[0x2]; u8 rst_type_sel[0x3]; - u8 reserved_at_28[0x4]; + u8 pci_reset_req_method[0x3]; + u8 reserved_at_2b[0x1]; u8 reset_state[0x4]; u8 reset_type[0x8]; u8 reset_level[0x8]; |