eswitch.c 61.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
/*
 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
37
#include <linux/mlx5/fs.h>
38
39
#include "mlx5_core.h"
#include "eswitch.h"
40
#include "fs_core.h"
41

42
43
#define UPLINK_VPORT 0xFFFF

44
45
46
47
48
49
enum {
	MLX5_ACTION_NONE = 0,
	MLX5_ACTION_ADD  = 1,
	MLX5_ACTION_DEL  = 2,
};

50
51
52
53
54
/* Vport UC/MC hash node */
struct vport_addr {
	struct l2addr_node     node;
	u8                     action;
	u32                    vport;
55
56
	struct mlx5_flow_handle *flow_rule;
	bool mpfs; /* UC MAC was added to MPFs */
57
58
	/* A flag indicating that mac was added due to mc promiscuous vport */
	bool mc_promisc;
59
60
61
62
63
};

enum {
	UC_ADDR_CHANGE = BIT(0),
	MC_ADDR_CHANGE = BIT(1),
64
	PROMISC_CHANGE = BIT(3),
65
66
};

67
68
/* Vport context events */
#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
69
70
			    MC_ADDR_CHANGE | \
			    PROMISC_CHANGE)
71
72

static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
73
74
					u32 events_mask)
{
75
76
	int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)]   = {0};
	int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
	void *nic_vport_ctx;

	MLX5_SET(modify_nic_vport_context_in, in,
		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
	MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
	if (vport)
		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
				     in, nic_vport_context);

	MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);

	if (events_mask & UC_ADDR_CHANGE)
		MLX5_SET(nic_vport_context, nic_vport_ctx,
			 event_on_uc_address_change, 1);
	if (events_mask & MC_ADDR_CHANGE)
		MLX5_SET(nic_vport_context, nic_vport_ctx,
			 event_on_mc_address_change, 1);
96
97
98
	if (events_mask & PROMISC_CHANGE)
		MLX5_SET(nic_vport_context, nic_vport_ctx,
			 event_on_promisc_change, 1);
99

100
	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
101
102
}

103
104
105
106
/* E-Switch vport context HW commands */
static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
					void *in, int inlen)
{
107
	u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
108

109
110
	MLX5_SET(modify_esw_vport_context_in, in, opcode,
		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
111
112
113
	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
	if (vport)
		MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
114
	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
115
116
117
}

static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
118
				  u16 vlan, u8 qos, u8 set_flags)
119
{
120
	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
121
122
123

	if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
	    !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
124
		return -EOPNOTSUPP;
125

126
127
128
129
	esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
		  vport, vlan, qos, set_flags);

	if (set_flags & SET_VLAN_STRIP)
130
131
		MLX5_SET(modify_esw_vport_context_in, in,
			 esw_vport_context.vport_cvlan_strip, 1);
132
133

	if (set_flags & SET_VLAN_INSERT) {
134
135
136
		/* insert only if no vlan in packet */
		MLX5_SET(modify_esw_vport_context_in, in,
			 esw_vport_context.vport_cvlan_insert, 1);
137

138
139
140
141
142
143
144
145
146
147
148
149
150
151
		MLX5_SET(modify_esw_vport_context_in, in,
			 esw_vport_context.cvlan_pcp, qos);
		MLX5_SET(modify_esw_vport_context_in, in,
			 esw_vport_context.cvlan_id, vlan);
	}

	MLX5_SET(modify_esw_vport_context_in, in,
		 field_select.vport_cvlan_strip, 1);
	MLX5_SET(modify_esw_vport_context_in, in,
		 field_select.vport_cvlan_insert, 1);

	return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
}

152
/* E-Switch FDB */
Mark Bloch's avatar
Mark Bloch committed
153
static struct mlx5_flow_handle *
154
__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
155
			 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
156
{
157
158
	int match_header = (is_zero_ether_addr(mac_c) ? 0 :
			    MLX5_MATCH_OUTER_HEADERS);
Mark Bloch's avatar
Mark Bloch committed
159
	struct mlx5_flow_handle *flow_rule = NULL;
160
	struct mlx5_flow_act flow_act = {0};
161
	struct mlx5_flow_destination dest = {};
162
	struct mlx5_flow_spec *spec;
163
164
	void *mv_misc = NULL;
	void *mc_misc = NULL;
165
166
	u8 *dmac_v = NULL;
	u8 *dmac_c = NULL;
167

168
169
	if (rx_rule)
		match_header |= MLX5_MATCH_MISC_PARAMETERS;
170

171
172
	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
	if (!spec)
173
		return NULL;
174

175
	dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
176
			      outer_headers.dmac_47_16);
177
	dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
178
179
			      outer_headers.dmac_47_16);

180
	if (match_header & MLX5_MATCH_OUTER_HEADERS) {
181
182
183
		ether_addr_copy(dmac_v, mac_v);
		ether_addr_copy(dmac_c, mac_c);
	}
184

185
	if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
186
187
188
189
		mv_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_value,
					misc_parameters);
		mc_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
					misc_parameters);
190
191
192
193
		MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
		MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
	}

194
195
196
197
198
199
	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
	dest.vport_num = vport;

	esw_debug(esw->dev,
		  "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
		  dmac_v, dmac_c, vport);
200
	spec->match_criteria_enable = match_header;
201
	flow_act.action =  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
202
	flow_rule =
Mark Bloch's avatar
Mark Bloch committed
203
		mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
204
				    &flow_act, &dest, 1);
205
	if (IS_ERR(flow_rule)) {
206
207
		esw_warn(esw->dev,
			 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
208
209
210
			 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
		flow_rule = NULL;
	}
211
212

	kvfree(spec);
213
214
215
	return flow_rule;
}

Mark Bloch's avatar
Mark Bloch committed
216
static struct mlx5_flow_handle *
217
218
219
220
221
esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
{
	u8 mac_c[ETH_ALEN];

	eth_broadcast_addr(mac_c);
222
223
224
	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
}

Mark Bloch's avatar
Mark Bloch committed
225
static struct mlx5_flow_handle *
226
227
228
229
230
231
232
233
234
235
236
237
esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
{
	u8 mac_c[ETH_ALEN];
	u8 mac_v[ETH_ALEN];

	eth_zero_addr(mac_c);
	eth_zero_addr(mac_v);
	mac_c[0] = 0x01;
	mac_v[0] = 0x01;
	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
}

Mark Bloch's avatar
Mark Bloch committed
238
static struct mlx5_flow_handle *
239
240
241
242
243
244
245
246
esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
{
	u8 mac_c[ETH_ALEN];
	u8 mac_v[ETH_ALEN];

	eth_zero_addr(mac_c);
	eth_zero_addr(mac_v);
	return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
247
248
}

249
static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
250
{
251
	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
252
	struct mlx5_flow_table_attr ft_attr = {};
253
	struct mlx5_core_dev *dev = esw->dev;
254
	struct mlx5_flow_namespace *root_ns;
255
	struct mlx5_flow_table *fdb;
256
257
258
259
	struct mlx5_flow_group *g;
	void *match_criteria;
	int table_size;
	u32 *flow_group_in;
260
	u8 *dmac;
261
	int err = 0;
262
263
264
265

	esw_debug(dev, "Create FDB log_max_size(%d)\n",
		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));

266
267
268
	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
	if (!root_ns) {
		esw_warn(dev, "Failed to get FDB flow namespace\n");
269
		return -EOPNOTSUPP;
270
	}
271

272
	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
273
274
275
276
	if (!flow_group_in)
		return -ENOMEM;

	table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
277
278
279

	ft_attr.max_fte = table_size;
	fdb = mlx5_create_flow_table(root_ns, &ft_attr);
280
	if (IS_ERR(fdb)) {
281
282
283
284
		err = PTR_ERR(fdb);
		esw_warn(dev, "Failed to create FDB Table err %d\n", err);
		goto out;
	}
285
	esw->fdb_table.fdb = fdb;
286

287
	/* Addresses group : Full match unicast/multicast addresses */
288
289
290
291
292
	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
		 MLX5_MATCH_OUTER_HEADERS);
	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
	dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
293
294
	/* Preserve 2 entries for allmulti and promisc rules*/
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
295
296
	eth_broadcast_addr(dmac);
	g = mlx5_create_flow_group(fdb, flow_group_in);
297
	if (IS_ERR(g)) {
298
299
300
301
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create flow group err(%d)\n", err);
		goto out;
	}
302
	esw->fdb_table.legacy.addr_grp = g;
303
304
305
306
307
308
309
310
311

	/* Allmulti group : One rule that forwards any mcast traffic */
	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
		 MLX5_MATCH_OUTER_HEADERS);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
	eth_zero_addr(dmac);
	dmac[0] = 0x01;
	g = mlx5_create_flow_group(fdb, flow_group_in);
312
	if (IS_ERR(g)) {
313
314
315
316
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
		goto out;
	}
317
	esw->fdb_table.legacy.allmulti_grp = g;
318
319
320
321
322
323
324
325
326
327
328

	/* Promiscuous group :
	 * One rule that forward all unmatched traffic from previous groups
	 */
	eth_zero_addr(dmac);
	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
		 MLX5_MATCH_MISC_PARAMETERS);
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
	g = mlx5_create_flow_group(fdb, flow_group_in);
329
	if (IS_ERR(g)) {
330
331
332
333
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
		goto out;
	}
334
	esw->fdb_table.legacy.promisc_grp = g;
335

336
out:
337
	if (err) {
338
339
340
		if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) {
			mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
			esw->fdb_table.legacy.allmulti_grp = NULL;
341
		}
342
343
344
		if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) {
			mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
			esw->fdb_table.legacy.addr_grp = NULL;
345
346
347
348
349
350
351
		}
		if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
			mlx5_destroy_flow_table(esw->fdb_table.fdb);
			esw->fdb_table.fdb = NULL;
		}
	}

352
	kvfree(flow_group_in);
353
	return err;
354
355
}

356
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
357
358
359
360
{
	if (!esw->fdb_table.fdb)
		return;

361
	esw_debug(esw->dev, "Destroy FDB Table\n");
362
363
364
	mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
	mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
	mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
365
366
	mlx5_destroy_flow_table(esw->fdb_table.fdb);
	esw->fdb_table.fdb = NULL;
367
368
369
	esw->fdb_table.legacy.addr_grp = NULL;
	esw->fdb_table.legacy.allmulti_grp = NULL;
	esw->fdb_table.legacy.promisc_grp = NULL;
370
371
372
373
374
375
376
377
378
379
380
381
}

/* E-Switch vport UC/MC lists management */
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
				 struct vport_addr *vaddr);

static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
	u8 *mac = vaddr->node.addr;
	u32 vport = vaddr->vport;
	int err;

382
383
384
385
386
387
388
389
	/* Skip mlx5_mpfs_add_mac for PFs,
	 * it is already done by the PF netdev in mlx5e_execute_l2_action
	 */
	if (!vport)
		goto fdb_add;

	err = mlx5_mpfs_add_mac(esw->dev, mac);
	if (err) {
390
		esw_warn(esw->dev,
391
392
393
			 "Failed to add L2 table mac(%pM) for vport(%d), err(%d)\n",
			 mac, vport, err);
		return err;
394
	}
395
	vaddr->mpfs = true;
396

397
fdb_add:
398
399
	/* SRIOV is enabled: Forward UC MAC to vport */
	if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY)
400
401
		vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);

402
403
404
	esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
		  vport, mac, vaddr->flow_rule);

405
	return 0;
406
407
}

408
static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
409
{
410
411
	u8 *mac = vaddr->node.addr;
	u32 vport = vaddr->vport;
412
	int err = 0;
413

414
415
416
417
418
	/* Skip mlx5_mpfs_del_mac for PFs,
	 * it is already done by the PF netdev in mlx5e_execute_l2_action
	 */
	if (!vport || !vaddr->mpfs)
		goto fdb_del;
419

420
421
422
423
424
425
	err = mlx5_mpfs_del_mac(esw->dev, mac);
	if (err)
		esw_warn(esw->dev,
			 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
			 mac, vport, err);
	vaddr->mpfs = false;
426

427
fdb_del:
428
	if (vaddr->flow_rule)
Mark Bloch's avatar
Mark Bloch committed
429
		mlx5_del_flow_rules(vaddr->flow_rule);
430
431
432
433
434
	vaddr->flow_rule = NULL;

	return 0;
}

435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
static void update_allmulti_vports(struct mlx5_eswitch *esw,
				   struct vport_addr *vaddr,
				   struct esw_mc_addr *esw_mc)
{
	u8 *mac = vaddr->node.addr;
	u32 vport_idx = 0;

	for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
		struct mlx5_vport *vport = &esw->vports[vport_idx];
		struct hlist_head *vport_hash = vport->mc_list;
		struct vport_addr *iter_vaddr =
					l2addr_hash_find(vport_hash,
							 mac,
							 struct vport_addr);
		if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
		    vaddr->vport == vport_idx)
			continue;
		switch (vaddr->action) {
		case MLX5_ACTION_ADD:
			if (iter_vaddr)
				continue;
			iter_vaddr = l2addr_hash_add(vport_hash, mac,
						     struct vport_addr,
						     GFP_KERNEL);
			if (!iter_vaddr) {
				esw_warn(esw->dev,
					 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
					 mac, vport_idx);
				continue;
			}
			iter_vaddr->vport = vport_idx;
			iter_vaddr->flow_rule =
					esw_fdb_set_vport_rule(esw,
							       mac,
							       vport_idx);
470
			iter_vaddr->mc_promisc = true;
471
472
473
474
			break;
		case MLX5_ACTION_DEL:
			if (!iter_vaddr)
				continue;
Mark Bloch's avatar
Mark Bloch committed
475
			mlx5_del_flow_rules(iter_vaddr->flow_rule);
476
477
478
479
480
481
			l2addr_hash_del(iter_vaddr);
			break;
		}
	}
}

482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
	struct hlist_head *hash = esw->mc_table;
	struct esw_mc_addr *esw_mc;
	u8 *mac = vaddr->node.addr;
	u32 vport = vaddr->vport;

	if (!esw->fdb_table.fdb)
		return 0;

	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
	if (esw_mc)
		goto add;

	esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
	if (!esw_mc)
		return -ENOMEM;

	esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
		esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
502
503
504
505

	/* Add this multicast mac to all the mc promiscuous vports */
	update_allmulti_vports(esw, vaddr, esw_mc);

506
add:
507
508
509
510
511
512
	/* If the multicast mac is added as a result of mc promiscuous vport,
	 * don't increment the multicast ref count
	 */
	if (!vaddr->mc_promisc)
		esw_mc->refcnt++;

513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
	/* Forward MC MAC to vport */
	vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
	esw_debug(esw->dev,
		  "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
		  vport, mac, vaddr->flow_rule,
		  esw_mc->refcnt, esw_mc->uplink_rule);
	return 0;
}

static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
	struct hlist_head *hash = esw->mc_table;
	struct esw_mc_addr *esw_mc;
	u8 *mac = vaddr->node.addr;
	u32 vport = vaddr->vport;
528

529
530
531
532
533
534
535
	if (!esw->fdb_table.fdb)
		return 0;

	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
	if (!esw_mc) {
		esw_warn(esw->dev,
			 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
536
537
538
			 mac, vport);
		return -EINVAL;
	}
539
540
541
542
543
544
	esw_debug(esw->dev,
		  "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
		  vport, mac, vaddr->flow_rule, esw_mc->refcnt,
		  esw_mc->uplink_rule);

	if (vaddr->flow_rule)
Mark Bloch's avatar
Mark Bloch committed
545
		mlx5_del_flow_rules(vaddr->flow_rule);
546
547
	vaddr->flow_rule = NULL;

548
549
550
551
	/* If the multicast mac is added as a result of mc promiscuous vport,
	 * don't decrement the multicast ref count.
	 */
	if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
552
		return 0;
553

554
555
556
	/* Remove this multicast mac from all the mc promiscuous vports */
	update_allmulti_vports(esw, vaddr, esw_mc);

557
	if (esw_mc->uplink_rule)
Mark Bloch's avatar
Mark Bloch committed
558
		mlx5_del_flow_rules(esw_mc->uplink_rule);
559
560

	l2addr_hash_del(esw_mc);
561
562
563
	return 0;
}

564
565
566
/* Apply vport UC/MC list to HW l2 table and FDB table */
static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
				      u32 vport_num, int list_type)
567
568
{
	struct mlx5_vport *vport = &esw->vports[vport_num];
569
570
571
572
	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
	vport_addr_action vport_addr_add;
	vport_addr_action vport_addr_del;
	struct vport_addr *addr;
573
574
575
576
577
	struct l2addr_node *node;
	struct hlist_head *hash;
	struct hlist_node *tmp;
	int hi;

578
579
580
581
582
583
	vport_addr_add = is_uc ? esw_add_uc_addr :
				 esw_add_mc_addr;
	vport_addr_del = is_uc ? esw_del_uc_addr :
				 esw_del_mc_addr;

	hash = is_uc ? vport->uc_list : vport->mc_list;
584
	for_each_l2hash_node(node, tmp, hash, hi) {
585
		addr = container_of(node, struct vport_addr, node);
586
587
		switch (addr->action) {
		case MLX5_ACTION_ADD:
588
			vport_addr_add(esw, addr);
589
590
591
			addr->action = MLX5_ACTION_NONE;
			break;
		case MLX5_ACTION_DEL:
592
			vport_addr_del(esw, addr);
593
594
595
596
597
598
			l2addr_hash_del(addr);
			break;
		}
	}
}

599
600
601
/* Sync vport UC/MC list from vport context */
static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
				       u32 vport_num, int list_type)
602
603
{
	struct mlx5_vport *vport = &esw->vports[vport_num];
604
	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
605
	u8 (*mac_list)[ETH_ALEN];
606
607
	struct l2addr_node *node;
	struct vport_addr *addr;
608
609
610
611
612
613
614
	struct hlist_head *hash;
	struct hlist_node *tmp;
	int size;
	int err;
	int hi;
	int i;

615
616
	size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
		       MLX5_MAX_MC_PER_VPORT(esw->dev);
617
618
619
620
621

	mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
	if (!mac_list)
		return;

622
	hash = is_uc ? vport->uc_list : vport->mc_list;
623
624

	for_each_l2hash_node(node, tmp, hash, hi) {
625
		addr = container_of(node, struct vport_addr, node);
626
627
628
		addr->action = MLX5_ACTION_DEL;
	}

629
630
631
	if (!vport->enabled)
		goto out;

632
	err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
633
634
					    mac_list, &size);
	if (err)
635
		goto out;
636
637
	esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
		  vport_num, is_uc ? "UC" : "MC", size);
638
639

	for (i = 0; i < size; i++) {
640
		if (is_uc && !is_valid_ether_addr(mac_list[i]))
641
642
			continue;

643
644
645
646
		if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
			continue;

		addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
647
648
		if (addr) {
			addr->action = MLX5_ACTION_NONE;
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
			/* If this mac was previously added because of allmulti
			 * promiscuous rx mode, its now converted to be original
			 * vport mac.
			 */
			if (addr->mc_promisc) {
				struct esw_mc_addr *esw_mc =
					l2addr_hash_find(esw->mc_table,
							 mac_list[i],
							 struct esw_mc_addr);
				if (!esw_mc) {
					esw_warn(esw->dev,
						 "Failed to MAC(%pM) in mcast DB\n",
						 mac_list[i]);
					continue;
				}
				esw_mc->refcnt++;
				addr->mc_promisc = false;
			}
667
668
669
			continue;
		}

670
		addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
671
672
673
674
675
676
677
				       GFP_KERNEL);
		if (!addr) {
			esw_warn(esw->dev,
				 "Failed to add MAC(%pM) to vport[%d] DB\n",
				 mac_list[i], vport_num);
			continue;
		}
678
		addr->vport = vport_num;
679
680
		addr->action = MLX5_ACTION_ADD;
	}
681
out:
682
683
684
	kfree(mac_list);
}

685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
/* Sync vport UC/MC list from vport context
 * Must be called after esw_update_vport_addr_list
 */
static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
{
	struct mlx5_vport *vport = &esw->vports[vport_num];
	struct l2addr_node *node;
	struct vport_addr *addr;
	struct hlist_head *hash;
	struct hlist_node *tmp;
	int hi;

	hash = vport->mc_list;

	for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
		u8 *mac = node->addr;

		addr = l2addr_hash_find(hash, mac, struct vport_addr);
		if (addr) {
			if (addr->action == MLX5_ACTION_DEL)
				addr->action = MLX5_ACTION_NONE;
			continue;
		}
		addr = l2addr_hash_add(hash, mac, struct vport_addr,
				       GFP_KERNEL);
		if (!addr) {
			esw_warn(esw->dev,
				 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
				 mac, vport_num);
			continue;
		}
		addr->vport = vport_num;
		addr->action = MLX5_ACTION_ADD;
		addr->mc_promisc = true;
	}
}

/* Apply vport rx mode to HW FDB table */
static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
				    bool promisc, bool mc_promisc)
{
726
	struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
727
728
729
730
731
732
733
734
735
736
737
738
739
740
	struct mlx5_vport *vport = &esw->vports[vport_num];

	if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
		goto promisc;

	if (mc_promisc) {
		vport->allmulti_rule =
				esw_fdb_set_vport_allmulti_rule(esw, vport_num);
		if (!allmulti_addr->uplink_rule)
			allmulti_addr->uplink_rule =
				esw_fdb_set_vport_allmulti_rule(esw,
								UPLINK_VPORT);
		allmulti_addr->refcnt++;
	} else if (vport->allmulti_rule) {
Mark Bloch's avatar
Mark Bloch committed
741
		mlx5_del_flow_rules(vport->allmulti_rule);
742
743
744
745
746
747
		vport->allmulti_rule = NULL;

		if (--allmulti_addr->refcnt > 0)
			goto promisc;

		if (allmulti_addr->uplink_rule)
Mark Bloch's avatar
Mark Bloch committed
748
			mlx5_del_flow_rules(allmulti_addr->uplink_rule);
749
750
751
752
753
754
755
756
757
758
759
		allmulti_addr->uplink_rule = NULL;
	}

promisc:
	if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
		return;

	if (promisc) {
		vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
								     vport_num);
	} else if (vport->promisc_rule) {
Mark Bloch's avatar
Mark Bloch committed
760
		mlx5_del_flow_rules(vport->promisc_rule);
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
		vport->promisc_rule = NULL;
	}
}

/* Sync vport rx mode from vport context */
static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
{
	struct mlx5_vport *vport = &esw->vports[vport_num];
	int promisc_all = 0;
	int promisc_uc = 0;
	int promisc_mc = 0;
	int err;

	err = mlx5_query_nic_vport_promisc(esw->dev,
					   vport_num,
					   &promisc_uc,
					   &promisc_mc,
					   &promisc_all);
	if (err)
		return;
	esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
		  vport_num, promisc_all, promisc_mc);

784
	if (!vport->info.trusted || !vport->enabled) {
785
786
787
788
789
790
791
792
793
		promisc_uc = 0;
		promisc_mc = 0;
		promisc_all = 0;
	}

	esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
				(promisc_all || promisc_mc));
}

794
static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
795
796
{
	struct mlx5_core_dev *dev = vport->dev;
797
	struct mlx5_eswitch *esw = dev->priv.eswitch;
798
799
800
	u8 mac[ETH_ALEN];

	mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
801
802
803
804
805
806
807
808
809
	esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
		  vport->vport, mac);

	if (vport->enabled_events & UC_ADDR_CHANGE) {
		esw_update_vport_addr_list(esw, vport->vport,
					   MLX5_NVPRT_LIST_TYPE_UC);
		esw_apply_vport_addr_list(esw, vport->vport,
					  MLX5_NVPRT_LIST_TYPE_UC);
	}
810

811
812
813
	if (vport->enabled_events & MC_ADDR_CHANGE) {
		esw_update_vport_addr_list(esw, vport->vport,
					   MLX5_NVPRT_LIST_TYPE_MC);
814
815
816
817
818
819
820
821
822
	}

	if (vport->enabled_events & PROMISC_CHANGE) {
		esw_update_vport_rx_mode(esw, vport->vport);
		if (!IS_ERR_OR_NULL(vport->allmulti_rule))
			esw_update_vport_mc_promisc(esw, vport->vport);
	}

	if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
823
824
825
		esw_apply_vport_addr_list(esw, vport->vport,
					  MLX5_NVPRT_LIST_TYPE_MC);
	}
826

827
	esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
828
829
	if (vport->enabled)
		arm_vport_context_events_cmd(dev, vport->vport,
830
					     vport->enabled_events);
831
832
}

833
834
835
836
837
838
839
840
841
842
843
static void esw_vport_change_handler(struct work_struct *work)
{
	struct mlx5_vport *vport =
		container_of(work, struct mlx5_vport, vport_change_handler);
	struct mlx5_eswitch *esw = vport->dev->priv.eswitch;

	mutex_lock(&esw->state_lock);
	esw_vport_change_handle_locked(vport);
	mutex_unlock(&esw->state_lock);
}

844
845
static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
				       struct mlx5_vport *vport)
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
{
	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
	struct mlx5_flow_group *vlan_grp = NULL;
	struct mlx5_flow_group *drop_grp = NULL;
	struct mlx5_core_dev *dev = esw->dev;
	struct mlx5_flow_namespace *root_ns;
	struct mlx5_flow_table *acl;
	void *match_criteria;
	u32 *flow_group_in;
	/* The egress acl table contains 2 rules:
	 * 1)Allow traffic with vlan_tag=vst_vlan_id
	 * 2)Drop all other traffic.
	 */
	int table_size = 2;
	int err = 0;

862
863
864
865
866
	if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
		return -EOPNOTSUPP;

	if (!IS_ERR_OR_NULL(vport->egress.acl))
		return 0;
867
868
869
870

	esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
		  vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));

871
872
	root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
						    vport->vport);
873
	if (!root_ns) {
874
		esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
875
		return -EOPNOTSUPP;
876
877
	}

878
	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
879
	if (!flow_group_in)
880
		return -ENOMEM;
881
882

	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
883
	if (IS_ERR(acl)) {
884
885
886
887
888
889
890
891
		err = PTR_ERR(acl);
		esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
			 vport->vport, err);
		goto out;
	}

	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
892
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
893
894
895
896
897
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);

	vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
898
	if (IS_ERR(vlan_grp)) {
899
900
901
902
903
904
905
906
907
908
		err = PTR_ERR(vlan_grp);
		esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}

	memset(flow_group_in, 0, inlen);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
	drop_grp = mlx5_create_flow_group(acl, flow_group_in);
909
	if (IS_ERR(drop_grp)) {
910
911
912
913
914
915
916
917
918
919
		err = PTR_ERR(drop_grp);
		esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}

	vport->egress.acl = acl;
	vport->egress.drop_grp = drop_grp;
	vport->egress.allowed_vlans_grp = vlan_grp;
out:
920
	kvfree(flow_group_in);
921
922
923
924
	if (err && !IS_ERR_OR_NULL(vlan_grp))
		mlx5_destroy_flow_group(vlan_grp);
	if (err && !IS_ERR_OR_NULL(acl))
		mlx5_destroy_flow_table(acl);
925
	return err;
926
927
}

928
929
930
931
static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
					   struct mlx5_vport *vport)
{
	if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
Mark Bloch's avatar
Mark Bloch committed
932
		mlx5_del_flow_rules(vport->egress.allowed_vlan);
933
934

	if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
Mark Bloch's avatar
Mark Bloch committed
935
		mlx5_del_flow_rules(vport->egress.drop_rule);
936
937
938
939
940

	vport->egress.allowed_vlan = NULL;
	vport->egress.drop_rule = NULL;
}

941
942
943
944
945
946
947
948
static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
					 struct mlx5_vport *vport)
{
	if (IS_ERR_OR_NULL(vport->egress.acl))
		return;

	esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);

949
	esw_vport_cleanup_egress_rules(esw, vport);
950
951
952
953
954
955
956
957
	mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
	mlx5_destroy_flow_group(vport->egress.drop_grp);
	mlx5_destroy_flow_table(vport->egress.acl);
	vport->egress.allowed_vlans_grp = NULL;
	vport->egress.drop_grp = NULL;
	vport->egress.acl = NULL;
}

958
959
static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
					struct mlx5_vport *vport)
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
{
	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
	struct mlx5_core_dev *dev = esw->dev;
	struct mlx5_flow_namespace *root_ns;
	struct mlx5_flow_table *acl;
	struct mlx5_flow_group *g;
	void *match_criteria;
	u32 *flow_group_in;
	/* The ingress acl table contains 4 groups
	 * (2 active rules at the same time -
	 *      1 allow rule from one of the first 3 groups.
	 *      1 drop rule from the last group):
	 * 1)Allow untagged traffic with smac=original mac.
	 * 2)Allow untagged traffic.
	 * 3)Allow traffic with smac=original mac.
	 * 4)Drop all other traffic.
	 */
	int table_size = 4;
	int err = 0;

980
981
982
983
984
	if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
		return -EOPNOTSUPP;

	if (!IS_ERR_OR_NULL(vport->ingress.acl))
		return 0;
985
986
987
988

	esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
		  vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));

989
990
	root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
						    vport->vport);
991
	if (!root_ns) {
992
		esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
993
		return -EOPNOTSUPP;
994
995
	}

996
	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
997
	if (!flow_group_in)
998
		return -ENOMEM;
999
1000

	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
1001
	if (IS_ERR(acl)) {
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
		err = PTR_ERR(acl);
		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
			 vport->vport, err);
		goto out;
	}
	vport->ingress.acl = acl;

	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);

	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1012
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1013
1014
1015
1016
1017
1018
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);

	g = mlx5_create_flow_group(acl, flow_group_in);
1019
	if (IS_ERR(g)) {
1020
1021
1022
1023
1024
1025
1026
1027
1028
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}
	vport->ingress.allow_untagged_spoofchk_grp = g;

	memset(flow_group_in, 0, inlen);
	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1029
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1030
1031
1032
1033
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);

	g = mlx5_create_flow_group(acl, flow_group_in);
1034
	if (IS_ERR(g)) {
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}
	vport->ingress.allow_untagged_only_grp = g;

	memset(flow_group_in, 0, inlen);
	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);

	g = mlx5_create_flow_group(acl, flow_group_in);
1050
	if (IS_ERR(g)) {
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}
	vport->ingress.allow_spoofchk_only_grp = g;

	memset(flow_group_in, 0, inlen);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);

	g = mlx5_create_flow_group(acl, flow_group_in);
1063
	if (IS_ERR(g)) {
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}
	vport->ingress.drop_grp = g;

out:
	if (err) {
		if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
			mlx5_destroy_flow_group(
					vport->ingress.allow_spoofchk_only_grp);
		if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
			mlx5_destroy_flow_group(
					vport->ingress.allow_untagged_only_grp);
		if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
			mlx5_destroy_flow_group(
				vport->ingress.allow_untagged_spoofchk_grp);
		if (!IS_ERR_OR_NULL(vport->ingress.acl))
			mlx5_destroy_flow_table(vport->ingress.acl);
	}

1086
	kvfree(flow_group_in);
1087
	return err;
1088
1089
}

1090
1091
1092
1093
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
					    struct mlx5_vport *vport)
{
	if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
Mark Bloch's avatar
Mark Bloch committed
1094
		mlx5_del_flow_rules(vport->ingress.drop_rule);
1095
1096

	if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
Mark Bloch's avatar
Mark Bloch committed
1097
		mlx5_del_flow_rules(vport->ingress.allow_rule);
1098

1099
	vport->ingress.drop_rule = NULL;
1100
	vport->ingress.allow_rule = NULL;
1101
1102
}

1103
1104
1105
1106
1107
1108
1109
1110
static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
					  struct mlx5_vport *vport)
{
	if (IS_ERR_OR_NULL(vport->ingress.acl))
		return;

	esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);

1111
	esw_vport_cleanup_ingress_rules(esw, vport);
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
	mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
	mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
	mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
	mlx5_destroy_flow_group(vport->ingress.drop_grp);
	mlx5_destroy_flow_table(vport->ingress.acl);
	vport->ingress.acl = NULL;
	vport->ingress.drop_grp = NULL;
	vport->ingress.allow_spoofchk_only_grp = NULL;
	vport->ingress.allow_untagged_only_grp = NULL;
	vport->ingress.allow_untagged_spoofchk_grp = NULL;
}

1124
1125
1126
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
				    struct mlx5_vport *vport)
{
1127
1128
1129
	struct mlx5_fc *counter = vport->ingress.drop_counter;
	struct mlx5_flow_destination drop_ctr_dst = {0};
	struct mlx5_flow_destination *dst = NULL;
1130
	struct mlx5_flow_act flow_act = {0};
1131
	struct mlx5_flow_spec *spec;
1132
	int dest_num = 0;
1133
	int err = 0;
1134
	u8 *smac_v;
1135

1136
1137
1138
1139
1140
	if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
		mlx5_core_warn(esw->dev,
			       "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
			       vport->vport);
		return -EPERM;
1141
1142
	}

1143
1144
	esw_vport_cleanup_ingress_rules(esw, vport);

1145
	if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
1146
		esw_vport_disable_ingress_acl(esw, vport);
1147
		return 0;
1148
1149
	}

1150
1151
1152
1153
1154
1155
1156
	err = esw_vport_enable_ingress_acl(esw, vport);
	if (err) {
		mlx5_core_warn(esw->dev,
			       "failed to enable ingress acl (%d) on vport[%d]\n",
			       err, vport->vport);
		return err;
	}
1157
1158
1159

	esw_debug(esw->dev,
		  "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1160
		  vport->vport, vport->info.vlan, vport->info.qos);
1161

1162
	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1163
	if (!spec) {
1164
1165
1166
1167
		err = -ENOMEM;
		goto out;
	}

1168
	if (vport->info.vlan || vport->info.qos)
1169
		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1170