eswitch.c 57.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
/*
 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
37
#include <linux/mlx5/fs.h>
38
39
40
#include "mlx5_core.h"
#include "eswitch.h"

41
42
#define UPLINK_VPORT 0xFFFF

43
44
45
46
47
48
enum {
	MLX5_ACTION_NONE = 0,
	MLX5_ACTION_ADD  = 1,
	MLX5_ACTION_DEL  = 2,
};

49
50
51
52
53
/* Vport UC/MC hash node */
struct vport_addr {
	struct l2addr_node     node;
	u8                     action;
	u32                    vport;
54
55
	struct mlx5_flow_handle *flow_rule;
	bool mpfs; /* UC MAC was added to MPFs */
56
57
	/* A flag indicating that mac was added due to mc promiscuous vport */
	bool mc_promisc;
58
59
60
61
62
};

enum {
	UC_ADDR_CHANGE = BIT(0),
	MC_ADDR_CHANGE = BIT(1),
63
	PROMISC_CHANGE = BIT(3),
64
65
};

66
67
/* Vport context events */
#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
68
69
			    MC_ADDR_CHANGE | \
			    PROMISC_CHANGE)
70
71

static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
72
73
					u32 events_mask)
{
74
75
	int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)]   = {0};
	int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
	void *nic_vport_ctx;

	MLX5_SET(modify_nic_vport_context_in, in,
		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
	MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
	if (vport)
		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
				     in, nic_vport_context);

	MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);

	if (events_mask & UC_ADDR_CHANGE)
		MLX5_SET(nic_vport_context, nic_vport_ctx,
			 event_on_uc_address_change, 1);
	if (events_mask & MC_ADDR_CHANGE)
		MLX5_SET(nic_vport_context, nic_vport_ctx,
			 event_on_mc_address_change, 1);
95
96
97
	if (events_mask & PROMISC_CHANGE)
		MLX5_SET(nic_vport_context, nic_vport_ctx,
			 event_on_promisc_change, 1);
98

99
	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
100
101
}

102
103
104
105
/* E-Switch vport context HW commands */
static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
					void *in, int inlen)
{
106
	u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
107

108
109
	MLX5_SET(modify_esw_vport_context_in, in, opcode,
		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
110
111
112
	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
	if (vport)
		MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
113
	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
114
115
116
}

static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
117
				  u16 vlan, u8 qos, u8 set_flags)
118
{
119
	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
120
121
122

	if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
	    !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
123
		return -EOPNOTSUPP;
124

125
126
127
128
	esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
		  vport, vlan, qos, set_flags);

	if (set_flags & SET_VLAN_STRIP)
129
130
		MLX5_SET(modify_esw_vport_context_in, in,
			 esw_vport_context.vport_cvlan_strip, 1);
131
132

	if (set_flags & SET_VLAN_INSERT) {
133
134
135
		/* insert only if no vlan in packet */
		MLX5_SET(modify_esw_vport_context_in, in,
			 esw_vport_context.vport_cvlan_insert, 1);
136

137
138
139
140
141
142
143
144
145
146
147
148
149
150
		MLX5_SET(modify_esw_vport_context_in, in,
			 esw_vport_context.cvlan_pcp, qos);
		MLX5_SET(modify_esw_vport_context_in, in,
			 esw_vport_context.cvlan_id, vlan);
	}

	MLX5_SET(modify_esw_vport_context_in, in,
		 field_select.vport_cvlan_strip, 1);
	MLX5_SET(modify_esw_vport_context_in, in,
		 field_select.vport_cvlan_insert, 1);

	return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
}

151
/* E-Switch FDB */
Mark Bloch's avatar
Mark Bloch committed
152
static struct mlx5_flow_handle *
153
__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
154
			 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
155
{
156
157
	int match_header = (is_zero_ether_addr(mac_c) ? 0 :
			    MLX5_MATCH_OUTER_HEADERS);
Mark Bloch's avatar
Mark Bloch committed
158
	struct mlx5_flow_handle *flow_rule = NULL;
159
	struct mlx5_flow_act flow_act = {0};
160
	struct mlx5_flow_destination dest = {};
161
	struct mlx5_flow_spec *spec;
162
163
	void *mv_misc = NULL;
	void *mc_misc = NULL;
164
165
	u8 *dmac_v = NULL;
	u8 *dmac_c = NULL;
166

167
168
	if (rx_rule)
		match_header |= MLX5_MATCH_MISC_PARAMETERS;
169

170
171
	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
	if (!spec)
172
		return NULL;
173

174
	dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
175
			      outer_headers.dmac_47_16);
176
	dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
177
178
			      outer_headers.dmac_47_16);

179
	if (match_header & MLX5_MATCH_OUTER_HEADERS) {
180
181
182
		ether_addr_copy(dmac_v, mac_v);
		ether_addr_copy(dmac_c, mac_c);
	}
183

184
	if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
185
186
187
188
		mv_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_value,
					misc_parameters);
		mc_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
					misc_parameters);
189
190
191
192
		MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
		MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
	}

193
194
195
196
197
198
	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
	dest.vport_num = vport;

	esw_debug(esw->dev,
		  "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
		  dmac_v, dmac_c, vport);
199
	spec->match_criteria_enable = match_header;
200
	flow_act.action =  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
201
	flow_rule =
Mark Bloch's avatar
Mark Bloch committed
202
		mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
203
				    &flow_act, &dest, 1);
204
	if (IS_ERR(flow_rule)) {
205
206
		esw_warn(esw->dev,
			 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
207
208
209
			 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
		flow_rule = NULL;
	}
210
211

	kvfree(spec);
212
213
214
	return flow_rule;
}

Mark Bloch's avatar
Mark Bloch committed
215
static struct mlx5_flow_handle *
216
217
218
219
220
esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
{
	u8 mac_c[ETH_ALEN];

	eth_broadcast_addr(mac_c);
221
222
223
	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
}

Mark Bloch's avatar
Mark Bloch committed
224
static struct mlx5_flow_handle *
225
226
227
228
229
230
231
232
233
234
235
236
esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
{
	u8 mac_c[ETH_ALEN];
	u8 mac_v[ETH_ALEN];

	eth_zero_addr(mac_c);
	eth_zero_addr(mac_v);
	mac_c[0] = 0x01;
	mac_v[0] = 0x01;
	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
}

Mark Bloch's avatar
Mark Bloch committed
237
static struct mlx5_flow_handle *
238
239
240
241
242
243
244
245
esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
{
	u8 mac_c[ETH_ALEN];
	u8 mac_v[ETH_ALEN];

	eth_zero_addr(mac_c);
	eth_zero_addr(mac_v);
	return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
246
247
}

248
static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
249
{
250
	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
251
	struct mlx5_flow_table_attr ft_attr = {};
252
	struct mlx5_core_dev *dev = esw->dev;
253
	struct mlx5_flow_namespace *root_ns;
254
	struct mlx5_flow_table *fdb;
255
256
257
258
	struct mlx5_flow_group *g;
	void *match_criteria;
	int table_size;
	u32 *flow_group_in;
259
	u8 *dmac;
260
	int err = 0;
261
262
263
264

	esw_debug(dev, "Create FDB log_max_size(%d)\n",
		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));

265
266
267
	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
	if (!root_ns) {
		esw_warn(dev, "Failed to get FDB flow namespace\n");
268
		return -EOPNOTSUPP;
269
	}
270

271
	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
272
273
274
275
	if (!flow_group_in)
		return -ENOMEM;

	table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
276
277
278

	ft_attr.max_fte = table_size;
	fdb = mlx5_create_flow_table(root_ns, &ft_attr);
279
	if (IS_ERR(fdb)) {
280
281
282
283
		err = PTR_ERR(fdb);
		esw_warn(dev, "Failed to create FDB Table err %d\n", err);
		goto out;
	}
284
	esw->fdb_table.fdb = fdb;
285

286
	/* Addresses group : Full match unicast/multicast addresses */
287
288
289
290
291
	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
		 MLX5_MATCH_OUTER_HEADERS);
	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
	dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
292
293
	/* Preserve 2 entries for allmulti and promisc rules*/
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
294
295
	eth_broadcast_addr(dmac);
	g = mlx5_create_flow_group(fdb, flow_group_in);
296
	if (IS_ERR(g)) {
297
298
299
300
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create flow group err(%d)\n", err);
		goto out;
	}
301
	esw->fdb_table.legacy.addr_grp = g;
302
303
304
305
306
307
308
309
310

	/* Allmulti group : One rule that forwards any mcast traffic */
	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
		 MLX5_MATCH_OUTER_HEADERS);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
	eth_zero_addr(dmac);
	dmac[0] = 0x01;
	g = mlx5_create_flow_group(fdb, flow_group_in);
311
	if (IS_ERR(g)) {
312
313
314
315
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
		goto out;
	}
316
	esw->fdb_table.legacy.allmulti_grp = g;
317
318
319
320
321
322
323
324
325
326
327

	/* Promiscuous group :
	 * One rule that forward all unmatched traffic from previous groups
	 */
	eth_zero_addr(dmac);
	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
		 MLX5_MATCH_MISC_PARAMETERS);
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
	g = mlx5_create_flow_group(fdb, flow_group_in);
328
	if (IS_ERR(g)) {
329
330
331
332
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
		goto out;
	}
333
	esw->fdb_table.legacy.promisc_grp = g;
334

335
out:
336
	if (err) {
337
338
339
		if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) {
			mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
			esw->fdb_table.legacy.allmulti_grp = NULL;
340
		}
341
342
343
		if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) {
			mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
			esw->fdb_table.legacy.addr_grp = NULL;
344
345
346
347
348
349
350
		}
		if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
			mlx5_destroy_flow_table(esw->fdb_table.fdb);
			esw->fdb_table.fdb = NULL;
		}
	}

351
	kvfree(flow_group_in);
352
	return err;
353
354
}

355
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
356
357
358
359
{
	if (!esw->fdb_table.fdb)
		return;

360
	esw_debug(esw->dev, "Destroy FDB Table\n");
361
362
363
	mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
	mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
	mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
364
365
	mlx5_destroy_flow_table(esw->fdb_table.fdb);
	esw->fdb_table.fdb = NULL;
366
367
368
	esw->fdb_table.legacy.addr_grp = NULL;
	esw->fdb_table.legacy.allmulti_grp = NULL;
	esw->fdb_table.legacy.promisc_grp = NULL;
369
370
371
372
373
374
375
376
377
378
379
380
}

/* E-Switch vport UC/MC lists management */
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
				 struct vport_addr *vaddr);

static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
	u8 *mac = vaddr->node.addr;
	u32 vport = vaddr->vport;
	int err;

381
382
383
384
385
386
387
388
	/* Skip mlx5_mpfs_add_mac for PFs,
	 * it is already done by the PF netdev in mlx5e_execute_l2_action
	 */
	if (!vport)
		goto fdb_add;

	err = mlx5_mpfs_add_mac(esw->dev, mac);
	if (err) {
389
		esw_warn(esw->dev,
390
391
392
			 "Failed to add L2 table mac(%pM) for vport(%d), err(%d)\n",
			 mac, vport, err);
		return err;
393
	}
394
	vaddr->mpfs = true;
395

396
fdb_add:
397
398
	/* SRIOV is enabled: Forward UC MAC to vport */
	if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY)
399
400
		vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);

401
402
403
	esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
		  vport, mac, vaddr->flow_rule);

404
	return 0;
405
406
}

407
static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
408
{
409
410
	u8 *mac = vaddr->node.addr;
	u32 vport = vaddr->vport;
411
	int err = 0;
412

413
414
415
416
417
	/* Skip mlx5_mpfs_del_mac for PFs,
	 * it is already done by the PF netdev in mlx5e_execute_l2_action
	 */
	if (!vport || !vaddr->mpfs)
		goto fdb_del;
418

419
420
421
422
423
424
	err = mlx5_mpfs_del_mac(esw->dev, mac);
	if (err)
		esw_warn(esw->dev,
			 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
			 mac, vport, err);
	vaddr->mpfs = false;
425

426
fdb_del:
427
	if (vaddr->flow_rule)
Mark Bloch's avatar
Mark Bloch committed
428
		mlx5_del_flow_rules(vaddr->flow_rule);
429
430
431
432
433
	vaddr->flow_rule = NULL;

	return 0;
}

434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
static void update_allmulti_vports(struct mlx5_eswitch *esw,
				   struct vport_addr *vaddr,
				   struct esw_mc_addr *esw_mc)
{
	u8 *mac = vaddr->node.addr;
	u32 vport_idx = 0;

	for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
		struct mlx5_vport *vport = &esw->vports[vport_idx];
		struct hlist_head *vport_hash = vport->mc_list;
		struct vport_addr *iter_vaddr =
					l2addr_hash_find(vport_hash,
							 mac,
							 struct vport_addr);
		if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
		    vaddr->vport == vport_idx)
			continue;
		switch (vaddr->action) {
		case MLX5_ACTION_ADD:
			if (iter_vaddr)
				continue;
			iter_vaddr = l2addr_hash_add(vport_hash, mac,
						     struct vport_addr,
						     GFP_KERNEL);
			if (!iter_vaddr) {
				esw_warn(esw->dev,
					 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
					 mac, vport_idx);
				continue;
			}
			iter_vaddr->vport = vport_idx;
			iter_vaddr->flow_rule =
					esw_fdb_set_vport_rule(esw,
							       mac,
							       vport_idx);
469
			iter_vaddr->mc_promisc = true;
470
471
472
473
			break;
		case MLX5_ACTION_DEL:
			if (!iter_vaddr)
				continue;
Mark Bloch's avatar
Mark Bloch committed
474
			mlx5_del_flow_rules(iter_vaddr->flow_rule);
475
476
477
478
479
480
			l2addr_hash_del(iter_vaddr);
			break;
		}
	}
}

481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
	struct hlist_head *hash = esw->mc_table;
	struct esw_mc_addr *esw_mc;
	u8 *mac = vaddr->node.addr;
	u32 vport = vaddr->vport;

	if (!esw->fdb_table.fdb)
		return 0;

	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
	if (esw_mc)
		goto add;

	esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
	if (!esw_mc)
		return -ENOMEM;

	esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
		esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
501
502
503
504

	/* Add this multicast mac to all the mc promiscuous vports */
	update_allmulti_vports(esw, vaddr, esw_mc);

505
add:
506
507
508
509
510
511
	/* If the multicast mac is added as a result of mc promiscuous vport,
	 * don't increment the multicast ref count
	 */
	if (!vaddr->mc_promisc)
		esw_mc->refcnt++;

512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
	/* Forward MC MAC to vport */
	vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
	esw_debug(esw->dev,
		  "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
		  vport, mac, vaddr->flow_rule,
		  esw_mc->refcnt, esw_mc->uplink_rule);
	return 0;
}

static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
	struct hlist_head *hash = esw->mc_table;
	struct esw_mc_addr *esw_mc;
	u8 *mac = vaddr->node.addr;
	u32 vport = vaddr->vport;
527

528
529
530
531
532
533
534
	if (!esw->fdb_table.fdb)
		return 0;

	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
	if (!esw_mc) {
		esw_warn(esw->dev,
			 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
535
536
537
			 mac, vport);
		return -EINVAL;
	}
538
539
540
541
542
543
	esw_debug(esw->dev,
		  "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
		  vport, mac, vaddr->flow_rule, esw_mc->refcnt,
		  esw_mc->uplink_rule);

	if (vaddr->flow_rule)
Mark Bloch's avatar
Mark Bloch committed
544
		mlx5_del_flow_rules(vaddr->flow_rule);
545
546
	vaddr->flow_rule = NULL;

547
548
549
550
	/* If the multicast mac is added as a result of mc promiscuous vport,
	 * don't decrement the multicast ref count.
	 */
	if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
551
		return 0;
552

553
554
555
	/* Remove this multicast mac from all the mc promiscuous vports */
	update_allmulti_vports(esw, vaddr, esw_mc);

556
	if (esw_mc->uplink_rule)
Mark Bloch's avatar
Mark Bloch committed
557
		mlx5_del_flow_rules(esw_mc->uplink_rule);
558
559

	l2addr_hash_del(esw_mc);
560
561
562
	return 0;
}

563
564
565
/* Apply vport UC/MC list to HW l2 table and FDB table */
static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
				      u32 vport_num, int list_type)
566
567
{
	struct mlx5_vport *vport = &esw->vports[vport_num];
568
569
570
571
	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
	vport_addr_action vport_addr_add;
	vport_addr_action vport_addr_del;
	struct vport_addr *addr;
572
573
574
575
576
	struct l2addr_node *node;
	struct hlist_head *hash;
	struct hlist_node *tmp;
	int hi;

577
578
579
580
581
582
	vport_addr_add = is_uc ? esw_add_uc_addr :
				 esw_add_mc_addr;
	vport_addr_del = is_uc ? esw_del_uc_addr :
				 esw_del_mc_addr;

	hash = is_uc ? vport->uc_list : vport->mc_list;
583
	for_each_l2hash_node(node, tmp, hash, hi) {
584
		addr = container_of(node, struct vport_addr, node);
585
586
		switch (addr->action) {
		case MLX5_ACTION_ADD:
587
			vport_addr_add(esw, addr);
588
589
590
			addr->action = MLX5_ACTION_NONE;
			break;
		case MLX5_ACTION_DEL:
591
			vport_addr_del(esw, addr);
592
593
594
595
596
597
			l2addr_hash_del(addr);
			break;
		}
	}
}

598
599
600
/* Sync vport UC/MC list from vport context */
static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
				       u32 vport_num, int list_type)
601
602
{
	struct mlx5_vport *vport = &esw->vports[vport_num];
603
	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
604
	u8 (*mac_list)[ETH_ALEN];
605
606
	struct l2addr_node *node;
	struct vport_addr *addr;
607
608
609
610
611
612
613
	struct hlist_head *hash;
	struct hlist_node *tmp;
	int size;
	int err;
	int hi;
	int i;

614
615
	size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
		       MLX5_MAX_MC_PER_VPORT(esw->dev);
616
617
618
619
620

	mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
	if (!mac_list)
		return;

621
	hash = is_uc ? vport->uc_list : vport->mc_list;
622
623

	for_each_l2hash_node(node, tmp, hash, hi) {
624
		addr = container_of(node, struct vport_addr, node);
625
626
627
		addr->action = MLX5_ACTION_DEL;
	}

628
629
630
	if (!vport->enabled)
		goto out;

631
	err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
632
633
					    mac_list, &size);
	if (err)
634
		goto out;
635
636
	esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
		  vport_num, is_uc ? "UC" : "MC", size);
637
638

	for (i = 0; i < size; i++) {
639
		if (is_uc && !is_valid_ether_addr(mac_list[i]))
640
641
			continue;

642
643
644
645
		if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
			continue;

		addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
646
647
		if (addr) {
			addr->action = MLX5_ACTION_NONE;
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
			/* If this mac was previously added because of allmulti
			 * promiscuous rx mode, its now converted to be original
			 * vport mac.
			 */
			if (addr->mc_promisc) {
				struct esw_mc_addr *esw_mc =
					l2addr_hash_find(esw->mc_table,
							 mac_list[i],
							 struct esw_mc_addr);
				if (!esw_mc) {
					esw_warn(esw->dev,
						 "Failed to MAC(%pM) in mcast DB\n",
						 mac_list[i]);
					continue;
				}
				esw_mc->refcnt++;
				addr->mc_promisc = false;
			}
666
667
668
			continue;
		}

669
		addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
670
671
672
673
674
675
676
				       GFP_KERNEL);
		if (!addr) {
			esw_warn(esw->dev,
				 "Failed to add MAC(%pM) to vport[%d] DB\n",
				 mac_list[i], vport_num);
			continue;
		}
677
		addr->vport = vport_num;
678
679
		addr->action = MLX5_ACTION_ADD;
	}
680
out:
681
682
683
	kfree(mac_list);
}

684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
/* Sync vport UC/MC list from vport context
 * Must be called after esw_update_vport_addr_list
 */
static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
{
	struct mlx5_vport *vport = &esw->vports[vport_num];
	struct l2addr_node *node;
	struct vport_addr *addr;
	struct hlist_head *hash;
	struct hlist_node *tmp;
	int hi;

	hash = vport->mc_list;

	for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
		u8 *mac = node->addr;

		addr = l2addr_hash_find(hash, mac, struct vport_addr);
		if (addr) {
			if (addr->action == MLX5_ACTION_DEL)
				addr->action = MLX5_ACTION_NONE;
			continue;
		}
		addr = l2addr_hash_add(hash, mac, struct vport_addr,
				       GFP_KERNEL);
		if (!addr) {
			esw_warn(esw->dev,
				 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
				 mac, vport_num);
			continue;
		}
		addr->vport = vport_num;
		addr->action = MLX5_ACTION_ADD;
		addr->mc_promisc = true;
	}
}

/* Apply vport rx mode to HW FDB table */
static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
				    bool promisc, bool mc_promisc)
{
725
	struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
726
727
728
729
730
731
732
733
734
735
736
737
738
739
	struct mlx5_vport *vport = &esw->vports[vport_num];

	if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
		goto promisc;

	if (mc_promisc) {
		vport->allmulti_rule =
				esw_fdb_set_vport_allmulti_rule(esw, vport_num);
		if (!allmulti_addr->uplink_rule)
			allmulti_addr->uplink_rule =
				esw_fdb_set_vport_allmulti_rule(esw,
								UPLINK_VPORT);
		allmulti_addr->refcnt++;
	} else if (vport->allmulti_rule) {
Mark Bloch's avatar
Mark Bloch committed
740
		mlx5_del_flow_rules(vport->allmulti_rule);
741
742
743
744
745
746
		vport->allmulti_rule = NULL;

		if (--allmulti_addr->refcnt > 0)
			goto promisc;

		if (allmulti_addr->uplink_rule)
Mark Bloch's avatar
Mark Bloch committed
747
			mlx5_del_flow_rules(allmulti_addr->uplink_rule);
748
749
750
751
752
753
754
755
756
757
758
		allmulti_addr->uplink_rule = NULL;
	}

promisc:
	if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
		return;

	if (promisc) {
		vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
								     vport_num);
	} else if (vport->promisc_rule) {
Mark Bloch's avatar
Mark Bloch committed
759
		mlx5_del_flow_rules(vport->promisc_rule);
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
		vport->promisc_rule = NULL;
	}
}

/* Sync vport rx mode from vport context */
static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
{
	struct mlx5_vport *vport = &esw->vports[vport_num];
	int promisc_all = 0;
	int promisc_uc = 0;
	int promisc_mc = 0;
	int err;

	err = mlx5_query_nic_vport_promisc(esw->dev,
					   vport_num,
					   &promisc_uc,
					   &promisc_mc,
					   &promisc_all);
	if (err)
		return;
	esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
		  vport_num, promisc_all, promisc_mc);

783
	if (!vport->info.trusted || !vport->enabled) {
784
785
786
787
788
789
790
791
792
		promisc_uc = 0;
		promisc_mc = 0;
		promisc_all = 0;
	}

	esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
				(promisc_all || promisc_mc));
}

793
static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
794
795
{
	struct mlx5_core_dev *dev = vport->dev;
796
	struct mlx5_eswitch *esw = dev->priv.eswitch;
797
798
799
	u8 mac[ETH_ALEN];

	mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
800
801
802
803
804
805
806
807
808
	esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
		  vport->vport, mac);

	if (vport->enabled_events & UC_ADDR_CHANGE) {
		esw_update_vport_addr_list(esw, vport->vport,
					   MLX5_NVPRT_LIST_TYPE_UC);
		esw_apply_vport_addr_list(esw, vport->vport,
					  MLX5_NVPRT_LIST_TYPE_UC);
	}
809

810
811
812
	if (vport->enabled_events & MC_ADDR_CHANGE) {
		esw_update_vport_addr_list(esw, vport->vport,
					   MLX5_NVPRT_LIST_TYPE_MC);
813
814
815
816
817
818
819
820
821
	}

	if (vport->enabled_events & PROMISC_CHANGE) {
		esw_update_vport_rx_mode(esw, vport->vport);
		if (!IS_ERR_OR_NULL(vport->allmulti_rule))
			esw_update_vport_mc_promisc(esw, vport->vport);
	}

	if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
822
823
824
		esw_apply_vport_addr_list(esw, vport->vport,
					  MLX5_NVPRT_LIST_TYPE_MC);
	}
825

826
	esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
827
828
	if (vport->enabled)
		arm_vport_context_events_cmd(dev, vport->vport,
829
					     vport->enabled_events);
830
831
}

832
833
834
835
836
837
838
839
840
841
842
static void esw_vport_change_handler(struct work_struct *work)
{
	struct mlx5_vport *vport =
		container_of(work, struct mlx5_vport, vport_change_handler);
	struct mlx5_eswitch *esw = vport->dev->priv.eswitch;

	mutex_lock(&esw->state_lock);
	esw_vport_change_handle_locked(vport);
	mutex_unlock(&esw->state_lock);
}

843
844
static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
				       struct mlx5_vport *vport)
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
{
	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
	struct mlx5_flow_group *vlan_grp = NULL;
	struct mlx5_flow_group *drop_grp = NULL;
	struct mlx5_core_dev *dev = esw->dev;
	struct mlx5_flow_namespace *root_ns;
	struct mlx5_flow_table *acl;
	void *match_criteria;
	u32 *flow_group_in;
	/* The egress acl table contains 2 rules:
	 * 1)Allow traffic with vlan_tag=vst_vlan_id
	 * 2)Drop all other traffic.
	 */
	int table_size = 2;
	int err = 0;

861
862
863
864
865
	if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
		return -EOPNOTSUPP;

	if (!IS_ERR_OR_NULL(vport->egress.acl))
		return 0;
866
867
868
869
870
871
872

	esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
		  vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));

	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
	if (!root_ns) {
		esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
873
		return -EOPNOTSUPP;
874
875
	}

876
	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
877
	if (!flow_group_in)
878
		return -ENOMEM;
879
880

	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
881
	if (IS_ERR(acl)) {
882
883
884
885
886
887
888
889
		err = PTR_ERR(acl);
		esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
			 vport->vport, err);
		goto out;
	}

	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
890
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
891
892
893
894
895
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);

	vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
896
	if (IS_ERR(vlan_grp)) {
897
898
899
900
901
902
903
904
905
906
		err = PTR_ERR(vlan_grp);
		esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}

	memset(flow_group_in, 0, inlen);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
	drop_grp = mlx5_create_flow_group(acl, flow_group_in);
907
	if (IS_ERR(drop_grp)) {
908
909
910
911
912
913
914
915
916
917
		err = PTR_ERR(drop_grp);
		esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}

	vport->egress.acl = acl;
	vport->egress.drop_grp = drop_grp;
	vport->egress.allowed_vlans_grp = vlan_grp;
out:
918
	kvfree(flow_group_in);
919
920
921
922
	if (err && !IS_ERR_OR_NULL(vlan_grp))
		mlx5_destroy_flow_group(vlan_grp);
	if (err && !IS_ERR_OR_NULL(acl))
		mlx5_destroy_flow_table(acl);
923
	return err;
924
925
}

926
927
928
929
static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
					   struct mlx5_vport *vport)
{
	if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
Mark Bloch's avatar
Mark Bloch committed
930
		mlx5_del_flow_rules(vport->egress.allowed_vlan);
931
932

	if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
Mark Bloch's avatar
Mark Bloch committed
933
		mlx5_del_flow_rules(vport->egress.drop_rule);
934
935
936
937
938

	vport->egress.allowed_vlan = NULL;
	vport->egress.drop_rule = NULL;
}

939
940
941
942
943
944
945
946
static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
					 struct mlx5_vport *vport)
{
	if (IS_ERR_OR_NULL(vport->egress.acl))
		return;

	esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);

947
	esw_vport_cleanup_egress_rules(esw, vport);
948
949
950
951
952
953
954
955
	mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
	mlx5_destroy_flow_group(vport->egress.drop_grp);
	mlx5_destroy_flow_table(vport->egress.acl);
	vport->egress.allowed_vlans_grp = NULL;
	vport->egress.drop_grp = NULL;
	vport->egress.acl = NULL;
}

956
957
static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
					struct mlx5_vport *vport)
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
{
	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
	struct mlx5_core_dev *dev = esw->dev;
	struct mlx5_flow_namespace *root_ns;
	struct mlx5_flow_table *acl;
	struct mlx5_flow_group *g;
	void *match_criteria;
	u32 *flow_group_in;
	/* The ingress acl table contains 4 groups
	 * (2 active rules at the same time -
	 *      1 allow rule from one of the first 3 groups.
	 *      1 drop rule from the last group):
	 * 1)Allow untagged traffic with smac=original mac.
	 * 2)Allow untagged traffic.
	 * 3)Allow traffic with smac=original mac.
	 * 4)Drop all other traffic.
	 */
	int table_size = 4;
	int err = 0;

978
979
980
981
982
	if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
		return -EOPNOTSUPP;

	if (!IS_ERR_OR_NULL(vport->ingress.acl))
		return 0;
983
984
985
986
987
988
989

	esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
		  vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));

	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
	if (!root_ns) {
		esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
990
		return -EOPNOTSUPP;
991
992
	}

993
	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
994
	if (!flow_group_in)
995
		return -ENOMEM;
996
997

	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
998
	if (IS_ERR(acl)) {
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
		err = PTR_ERR(acl);
		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
			 vport->vport, err);
		goto out;
	}
	vport->ingress.acl = acl;

	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);

	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1009
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1010
1011
1012
1013
1014
1015
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);

	g = mlx5_create_flow_group(acl, flow_group_in);
1016
	if (IS_ERR(g)) {
1017
1018
1019
1020
1021
1022
1023
1024
1025
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}
	vport->ingress.allow_untagged_spoofchk_grp = g;

	memset(flow_group_in, 0, inlen);
	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1026
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1027
1028
1029
1030
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);

	g = mlx5_create_flow_group(acl, flow_group_in);
1031
	if (IS_ERR(g)) {
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}
	vport->ingress.allow_untagged_only_grp = g;

	memset(flow_group_in, 0, inlen);
	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);

	g = mlx5_create_flow_group(acl, flow_group_in);
1047
	if (IS_ERR(g)) {
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}
	vport->ingress.allow_spoofchk_only_grp = g;

	memset(flow_group_in, 0, inlen);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);

	g = mlx5_create_flow_group(acl, flow_group_in);
1060
	if (IS_ERR(g)) {
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}
	vport->ingress.drop_grp = g;

out:
	if (err) {
		if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
			mlx5_destroy_flow_group(
					vport->ingress.allow_spoofchk_only_grp);
		if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
			mlx5_destroy_flow_group(
					vport->ingress.allow_untagged_only_grp);
		if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
			mlx5_destroy_flow_group(
				vport->ingress.allow_untagged_spoofchk_grp);
		if (!IS_ERR_OR_NULL(vport->ingress.acl))
			mlx5_destroy_flow_table(vport->ingress.acl);
	}

1083
	kvfree(flow_group_in);
1084
	return err;
1085
1086
}

1087
1088
1089
1090
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
					    struct mlx5_vport *vport)
{
	if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
Mark Bloch's avatar
Mark Bloch committed
1091
		mlx5_del_flow_rules(vport->ingress.drop_rule);
1092
1093

	if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
Mark Bloch's avatar
Mark Bloch committed
1094
		mlx5_del_flow_rules(vport->ingress.allow_rule);
1095

1096
	vport->ingress.drop_rule = NULL;
1097
	vport->ingress.allow_rule = NULL;
1098
1099
}

1100
1101
1102
1103
1104
1105
1106
1107
static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
					  struct mlx5_vport *vport)
{
	if (IS_ERR_OR_NULL(vport->ingress.acl))
		return;

	esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);

1108
	esw_vport_cleanup_ingress_rules(esw, vport);
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
	mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
	mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
	mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
	mlx5_destroy_flow_group(vport->ingress.drop_grp);
	mlx5_destroy_flow_table(vport->ingress.acl);
	vport->ingress.acl = NULL;
	vport->ingress.drop_grp = NULL;
	vport->ingress.allow_spoofchk_only_grp = NULL;
	vport->ingress.allow_untagged_only_grp = NULL;
	vport->ingress.allow_untagged_spoofchk_grp = NULL;
}

1121
1122
1123
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
				    struct mlx5_vport *vport)
{
1124
	struct mlx5_flow_act flow_act = {0};
1125
	struct mlx5_flow_spec *spec;
1126
	int err = 0;
1127
	u8 *smac_v;
1128

1129
1130
1131
1132
1133
	if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
		mlx5_core_warn(esw->dev,
			       "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
			       vport->vport);
		return -EPERM;
1134
1135
	}

1136
1137
	esw_vport_cleanup_ingress_rules(esw, vport);

1138
	if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
1139
		esw_vport_disable_ingress_acl(esw, vport);
1140
		return 0;
1141
1142
	}

1143
1144
1145
1146
1147
1148
1149
	err = esw_vport_enable_ingress_acl(esw, vport);
	if (err) {
		mlx5_core_warn(esw->dev,
			       "failed to enable ingress acl (%d) on vport[%d]\n",
			       err, vport->vport);
		return err;
	}
1150
1151
1152

	esw_debug(esw->dev,
		  "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1153
		  vport->vport, vport->info.vlan, vport->info.qos);
1154

1155
	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1156
	if (!spec) {
1157
1158
1159
1160
		err = -ENOMEM;
		goto out;
	}

1161
	if (vport->info.vlan || vport->info.qos)
1162
		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1163

1164
	if (vport->info.spoofchk) {
1165
1166
		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
1167
		smac_v = MLX5_ADDR_OF(fte_match_param,
1168
				      spec->match_value,
1169
				      outer_headers.smac_47_16);
1170
		ether_addr_copy(smac_v, vport->info.mac);
1171
1172
	}

1173
	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1174
	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;