Commit 74491de9 authored by Mark Bloch's avatar Mark Bloch Committed by Leon Romanovsky
Browse files

net/mlx5: Add multi dest support



Currently when calling mlx5_add_flow_rule we accept
only one flow destination, this commit allows to pass
multiple destinations.

This change forces us to change the return structure to a more
flexible one. We introduce a flow handle (struct mlx5_flow_handle),
it holds internally the number for rules created and holds an array
where each cell points the to a flow rule.

From the consumers (of mlx5_add_flow_rule) point of view this
change is only cosmetic and requires only to change the type
of the returned value they store.

From the core point of view, we now need to use a loop when
allocating and deleting rules (e.g given to us a flow handler).
Signed-off-by: default avatarMark Bloch <markb@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent a6224985
......@@ -1771,13 +1771,13 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
mutex_lock(&dev->flow_db.lock);
list_for_each_entry_safe(iter, tmp, &handler->list, list) {
mlx5_del_flow_rule(iter->rule);
mlx5_del_flow_rules(iter->rule);
put_flow_table(dev, iter->prio, true);
list_del(&iter->list);
kfree(iter);
}
mlx5_del_flow_rule(handler->rule);
mlx5_del_flow_rules(handler->rule);
put_flow_table(dev, handler->prio, true);
mutex_unlock(&dev->flow_db.lock);
......@@ -1907,10 +1907,10 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
handler->rule = mlx5_add_flow_rule(ft, spec,
handler->rule = mlx5_add_flow_rules(ft, spec,
action,
MLX5_FS_DEFAULT_FLOW_TAG,
dst);
dst, 1);
if (IS_ERR(handler->rule)) {
err = PTR_ERR(handler->rule);
......@@ -1941,7 +1941,7 @@ static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *de
handler_dst = create_flow_rule(dev, ft_prio,
flow_attr, dst);
if (IS_ERR(handler_dst)) {
mlx5_del_flow_rule(handler->rule);
mlx5_del_flow_rules(handler->rule);
ft_prio->refcount--;
kfree(handler);
handler = handler_dst;
......@@ -2004,7 +2004,7 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
&leftovers_specs[LEFTOVERS_UC].flow_attr,
dst);
if (IS_ERR(handler_ucast)) {
mlx5_del_flow_rule(handler->rule);
mlx5_del_flow_rules(handler->rule);
ft_prio->refcount--;
kfree(handler);
handler = handler_ucast;
......@@ -2046,7 +2046,7 @@ static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
return handler_rx;
err_tx:
mlx5_del_flow_rule(handler_rx->rule);
mlx5_del_flow_rules(handler_rx->rule);
ft_rx->refcount--;
kfree(handler_rx);
err:
......
......@@ -153,7 +153,7 @@ struct mlx5_ib_flow_handler {
struct list_head list;
struct ib_flow ibflow;
struct mlx5_ib_flow_prio *prio;
struct mlx5_flow_rule *rule;
struct mlx5_flow_handle *rule;
};
struct mlx5_ib_flow_db {
......
......@@ -520,7 +520,7 @@ struct mlx5e_vxlan_db {
struct mlx5e_l2_rule {
u8 addr[ETH_ALEN + 2];
struct mlx5_flow_rule *rule;
struct mlx5_flow_handle *rule;
};
struct mlx5e_flow_table {
......@@ -541,10 +541,10 @@ struct mlx5e_tc_table {
struct mlx5e_vlan_table {
struct mlx5e_flow_table ft;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID];
struct mlx5_flow_rule *untagged_rule;
struct mlx5_flow_rule *any_vlan_rule;
bool filter_disabled;
struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *untagged_rule;
struct mlx5_flow_handle *any_vlan_rule;
bool filter_disabled;
};
struct mlx5e_l2_table {
......@@ -562,14 +562,14 @@ struct mlx5e_l2_table {
/* L3/L4 traffic type classifier */
struct mlx5e_ttc_table {
struct mlx5e_flow_table ft;
struct mlx5_flow_rule *rules[MLX5E_NUM_TT];
struct mlx5_flow_handle *rules[MLX5E_NUM_TT];
};
#define ARFS_HASH_SHIFT BITS_PER_BYTE
#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
struct arfs_table {
struct mlx5e_flow_table ft;
struct mlx5_flow_rule *default_rule;
struct mlx5_flow_handle *default_rule;
struct hlist_head rules_hash[ARFS_HASH_SIZE];
};
......
......@@ -56,7 +56,7 @@ struct arfs_tuple {
struct arfs_rule {
struct mlx5e_priv *priv;
struct work_struct arfs_work;
struct mlx5_flow_rule *rule;
struct mlx5_flow_handle *rule;
struct hlist_node hlist;
int rxq;
/* Flow ID passed to ndo_rx_flow_steer */
......@@ -104,7 +104,7 @@ static int arfs_disable(struct mlx5e_priv *priv)
tt = arfs_get_tt(i);
/* Modify ttc rules destination to bypass the aRFS tables*/
err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
&dest);
&dest, NULL);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc destination failed\n",
......@@ -137,7 +137,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
tt = arfs_get_tt(i);
/* Modify ttc rules destination to point on the aRFS FTs */
err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
&dest);
&dest, NULL);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc destination failed err=%d\n",
......@@ -151,7 +151,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
static void arfs_destroy_table(struct arfs_table *arfs_t)
{
mlx5_del_flow_rule(arfs_t->default_rule);
mlx5_del_flow_rules(arfs_t->default_rule);
mlx5e_destroy_flow_table(&arfs_t->ft);
}
......@@ -205,10 +205,10 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
goto out;
}
arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest);
arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest, 1);
if (IS_ERR(arfs_t->default_rule)) {
err = PTR_ERR(arfs_t->default_rule);
arfs_t->default_rule = NULL;
......@@ -396,7 +396,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule)
mlx5_del_flow_rule(arfs_rule->rule);
mlx5_del_flow_rules(arfs_rule->rule);
hlist_del(&arfs_rule->hlist);
kfree(arfs_rule);
}
......@@ -420,7 +420,7 @@ static void arfs_del_rules(struct mlx5e_priv *priv)
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work);
if (rule->rule)
mlx5_del_flow_rule(rule->rule);
mlx5_del_flow_rules(rule->rule);
hlist_del(&rule->hlist);
kfree(rule);
}
......@@ -462,12 +462,12 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
return NULL;
}
static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_rule *rule = NULL;
struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest;
struct arfs_table *arfs_table;
struct mlx5_flow_spec *spec;
......@@ -544,9 +544,9 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
rule = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest);
rule = mlx5_add_flow_rules(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
......@@ -559,14 +559,14 @@ out:
}
static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
struct mlx5_flow_rule *rule, u16 rxq)
struct mlx5_flow_handle *rule, u16 rxq)
{
struct mlx5_flow_destination dst;
int err = 0;
dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dst.tir_num = priv->direct_tir[rxq].tirn;
err = mlx5_modify_rule_destination(rule, &dst);
err = mlx5_modify_rule_destination(rule, &dst, NULL);
if (err)
netdev_warn(priv->netdev,
"Failed to modfiy aRFS rule destination to rq=%d\n", rxq);
......@@ -578,7 +578,7 @@ static void arfs_handle_work(struct work_struct *work)
struct arfs_rule,
arfs_work);
struct mlx5e_priv *priv = arfs_rule->priv;
struct mlx5_flow_rule *rule;
struct mlx5_flow_handle *rule;
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
......
......@@ -160,7 +160,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
{
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest;
struct mlx5_flow_rule **rule_p;
struct mlx5_flow_handle **rule_p;
int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
......@@ -187,10 +187,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
break;
}
*rule_p = mlx5_add_flow_rule(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest);
*rule_p = mlx5_add_flow_rules(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest, 1);
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
......@@ -229,20 +229,20 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
if (priv->fs.vlan.untagged_rule) {
mlx5_del_flow_rule(priv->fs.vlan.untagged_rule);
mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
priv->fs.vlan.untagged_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID:
if (priv->fs.vlan.any_vlan_rule) {
mlx5_del_flow_rule(priv->fs.vlan.any_vlan_rule);
mlx5_del_flow_rules(priv->fs.vlan.any_vlan_rule);
priv->fs.vlan.any_vlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
mlx5e_vport_context_update_vlans(priv);
if (priv->fs.vlan.active_vlans_rule[vid]) {
mlx5_del_flow_rule(priv->fs.vlan.active_vlans_rule[vid]);
mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]);
priv->fs.vlan.active_vlans_rule[vid] = NULL;
}
mlx5e_vport_context_update_vlans(priv);
......@@ -560,7 +560,7 @@ static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
for (i = 0; i < MLX5E_NUM_TT; i++) {
if (!IS_ERR_OR_NULL(ttc->rules[i])) {
mlx5_del_flow_rule(ttc->rules[i]);
mlx5_del_flow_rules(ttc->rules[i]);
ttc->rules[i] = NULL;
}
}
......@@ -616,13 +616,14 @@ static struct {
},
};
static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
struct mlx5_flow_table *ft,
struct mlx5_flow_destination *dest,
u16 etype,
u8 proto)
static struct mlx5_flow_handle *
mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
struct mlx5_flow_table *ft,
struct mlx5_flow_destination *dest,
u16 etype,
u8 proto)
{
struct mlx5_flow_rule *rule;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err = 0;
......@@ -643,10 +644,10 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
}
rule = mlx5_add_flow_rule(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
dest);
rule = mlx5_add_flow_rules(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
......@@ -660,7 +661,7 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
{
struct mlx5_flow_destination dest;
struct mlx5e_ttc_table *ttc;
struct mlx5_flow_rule **rules;
struct mlx5_flow_handle **rules;
struct mlx5_flow_table *ft;
int tt;
int err;
......@@ -801,7 +802,7 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai)
{
if (!IS_ERR_OR_NULL(ai->rule)) {
mlx5_del_flow_rule(ai->rule);
mlx5_del_flow_rules(ai->rule);
ai->rule = NULL;
}
}
......@@ -847,9 +848,9 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
break;
}
ai->rule = mlx5_add_flow_rule(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest);
ai->rule = mlx5_add_flow_rules(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest, 1);
if (IS_ERR(ai->rule)) {
netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
__func__, mv_dmac);
......
......@@ -36,7 +36,7 @@
struct mlx5e_ethtool_rule {
struct list_head list;
struct ethtool_rx_flow_spec flow_spec;
struct mlx5_flow_rule *rule;
struct mlx5_flow_handle *rule;
struct mlx5e_ethtool_table *eth_ft;
};
......@@ -284,13 +284,14 @@ static bool outer_header_zero(u32 *match_criteria)
size - 1);
}
static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv,
struct mlx5_flow_table *ft,
struct ethtool_rx_flow_spec *fs)
static struct mlx5_flow_handle *
add_ethtool_flow_rule(struct mlx5e_priv *priv,
struct mlx5_flow_table *ft,
struct ethtool_rx_flow_spec *fs)
{
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_spec *spec;
struct mlx5_flow_rule *rule;
struct mlx5_flow_handle *rule;
int err = 0;
u32 action;
......@@ -317,8 +318,8 @@ static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv,
}
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
rule = mlx5_add_flow_rule(ft, spec, action,
MLX5_FS_DEFAULT_FLOW_TAG, dst);
rule = mlx5_add_flow_rules(ft, spec, action,
MLX5_FS_DEFAULT_FLOW_TAG, dst, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
......@@ -335,7 +336,7 @@ static void del_ethtool_rule(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *eth_rule)
{
if (eth_rule->rule)
mlx5_del_flow_rule(eth_rule->rule);
mlx5_del_flow_rules(eth_rule->rule);
list_del(&eth_rule->list);
priv->fs.ethtool.tot_num_rules--;
put_flow_table(eth_rule->eth_ft);
......@@ -475,7 +476,7 @@ int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
{
struct mlx5e_ethtool_table *eth_ft;
struct mlx5e_ethtool_rule *eth_rule;
struct mlx5_flow_rule *rule;
struct mlx5_flow_handle *rule;
int num_tuples;
int err;
......
......@@ -328,7 +328,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_eswitch_rep *rep = priv->ppriv;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_flow_rule *flow_rule;
struct mlx5_flow_handle *flow_rule;
int err;
int i;
......@@ -360,7 +360,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
return 0;
err_del_flow_rule:
mlx5_del_flow_rule(rep->vport_rx_rule);
mlx5_del_flow_rules(rep->vport_rx_rule);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_direct_rqts:
......@@ -375,7 +375,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
int i;
mlx5e_tc_cleanup(priv);
mlx5_del_flow_rule(rep->vport_rx_rule);
mlx5_del_flow_rules(rep->vport_rx_rule);
mlx5e_destroy_direct_tirs(priv);
for (i = 0; i < priv->params.num_channels; i++)
mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
......
......@@ -47,21 +47,22 @@
struct mlx5e_tc_flow {
struct rhash_head node;
u64 cookie;
struct mlx5_flow_rule *rule;
struct mlx5_flow_handle *rule;
struct mlx5_esw_flow_attr *attr;
};
#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
#define MLX5E_TC_TABLE_NUM_GROUPS 4
static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
u32 action, u32 flow_tag)
static struct mlx5_flow_handle *
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
u32 action, u32 flow_tag)
{
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_flow_destination dest = { 0 };
struct mlx5_fc *counter = NULL;
struct mlx5_flow_rule *rule;
struct mlx5_flow_handle *rule;
bool table_created = false;
if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
......@@ -94,9 +95,9 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
}
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rule(priv->fs.tc.t, spec,
action, flow_tag,
&dest);
rule = mlx5_add_flow_rules(priv->fs.tc.t, spec,
action, flow_tag,
&dest, 1);
if (IS_ERR(rule))
goto err_add_rule;
......@@ -114,9 +115,10 @@ err_create_ft:
return rule;
}
static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr)
static struct mlx5_flow_handle *
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int err;
......@@ -129,7 +131,7 @@ static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5_flow_rule *rule,
struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
......@@ -140,7 +142,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
if (esw && esw->mode == SRIOV_OFFLOADS)
mlx5_eswitch_del_vlan_action(esw, attr);
mlx5_del_flow_rule(rule);
mlx5_del_flow_rules(rule);
mlx5_fc_destroy(priv->mdev, counter);
......@@ -450,7 +452,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
u32 flow_tag, action;
struct mlx5e_tc_flow *flow;
struct mlx5_flow_spec *spec;
struct mlx5_flow_rule *old = NULL;
struct mlx5_flow_handle *old = NULL;
struct mlx5_esw_flow_attr *old_attr = NULL;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
......@@ -511,7 +513,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
goto out;
err_del_rule:
mlx5_del_flow_rule(flow->rule);
mlx5_del_flow_rules(flow->rule);
err_free:
if (!old)
......
......@@ -56,7 +56,7 @@ struct esw_uc_addr {
/* E-Switch MC FDB table hash node */
struct esw_mc_addr { /* SRIOV only */
struct l2addr_node node;
struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */
struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
u32 refcnt;
};
......@@ -65,7 +65,7 @@ struct vport_addr {
struct l2addr_node node;
u8 action;
u32 vport;
struct mlx5_flow_rule *flow_rule; /* SRIOV only */
struct mlx5_flow_handle *flow_rule; /* SRIOV only */
/* A flag indicating that mac was added due to mc promiscuous vport */
bool mc_promisc;
};
......@@ -237,13 +237,13 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
}
/* E-Switch FDB */
static struct mlx5_flow_rule *
static struct mlx5_flow_handle *
__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
{
int match_header = (is_zero_ether_addr(mac_c) ? 0 :
MLX5_MATCH_OUTER_HEADERS);
struct mlx5_flow_rule *flow_rule = NULL;
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_destination dest;
struct mlx5_flow_spec *spec;
void *mv_misc = NULL;
......@@ -286,9 +286,9 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
dmac_v, dmac_c, vport);
spec->match_criteria_enable = match_header;
flow_rule =
mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest);
mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest, 1);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev,
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
......@@ -300,7 +300,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
return flow_rule;
}
static struct mlx5_flow_rule *
static struct mlx5_flow_handle *
esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
{
u8 mac_c[ETH_ALEN];
......@@ -309,7 +309,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
}
static struct mlx5_flow_rule *
static struct mlx5_flow_handle *
esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
{
u8 mac_c[ETH_ALEN];
......@@ -322,7 +322,7 @@ esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
}
static struct mlx5_flow_rule *
static struct mlx5_flow_handle *
esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
{
u8 mac_c[ETH_ALEN];
......@@ -515,7 +515,7 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
del_l2_table_entry(esw->dev, esw_uc->table_index);
if (vaddr->flow_rule)
mlx5_del_flow_rule(vaddr->flow_rule);
mlx5_del_flow_rules(vaddr->flow_rule);
vaddr->flow_rule = NULL;
l2addr_hash_del(esw_uc);
......@@ -562,7 +562,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
case MLX5_ACTION_DEL:
if (!iter_vaddr)
continue;
mlx5_del_flow_rule(iter_vaddr->flow_rule);
mlx5_del_flow_rules(iter_vaddr->flow_rule);