eswitch.c 61.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
/*
 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
37
#include <linux/mlx5/fs.h>
38
39
40
#include "mlx5_core.h"
#include "eswitch.h"

41
42
#define UPLINK_VPORT 0xFFFF

43
44
45
46
47
48
enum {
	MLX5_ACTION_NONE = 0,
	MLX5_ACTION_ADD  = 1,
	MLX5_ACTION_DEL  = 2,
};

49
50
/* E-Switch UC L2 table hash node */
struct esw_uc_addr {
51
52
53
54
55
	struct l2addr_node node;
	u32                table_index;
	u32                vport;
};

56
57
58
59
60
/* Vport UC/MC hash node */
struct vport_addr {
	struct l2addr_node     node;
	u8                     action;
	u32                    vport;
Mark Bloch's avatar
Mark Bloch committed
61
	struct mlx5_flow_handle *flow_rule; /* SRIOV only */
62
63
	/* A flag indicating that mac was added due to mc promiscuous vport */
	bool mc_promisc;
64
65
66
67
68
};

enum {
	UC_ADDR_CHANGE = BIT(0),
	MC_ADDR_CHANGE = BIT(1),
69
	PROMISC_CHANGE = BIT(3),
70
71
};

72
73
/* Vport context events */
#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
74
75
			    MC_ADDR_CHANGE | \
			    PROMISC_CHANGE)
76
77

static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
78
79
					u32 events_mask)
{
80
81
	int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)]   = {0};
	int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
	void *nic_vport_ctx;

	MLX5_SET(modify_nic_vport_context_in, in,
		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
	MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
	if (vport)
		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
				     in, nic_vport_context);

	MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);

	if (events_mask & UC_ADDR_CHANGE)
		MLX5_SET(nic_vport_context, nic_vport_ctx,
			 event_on_uc_address_change, 1);
	if (events_mask & MC_ADDR_CHANGE)
		MLX5_SET(nic_vport_context, nic_vport_ctx,
			 event_on_mc_address_change, 1);
101
102
103
	if (events_mask & PROMISC_CHANGE)
		MLX5_SET(nic_vport_context, nic_vport_ctx,
			 event_on_promisc_change, 1);
104

105
	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
106
107
}

108
109
110
111
/* E-Switch vport context HW commands */
static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
					void *in, int inlen)
{
112
	u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
113

114
115
	MLX5_SET(modify_esw_vport_context_in, in, opcode,
		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
116
117
118
	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
	if (vport)
		MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
119
	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
120
121
122
}

static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
123
				  u16 vlan, u8 qos, u8 set_flags)
124
{
125
	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
126
127
128

	if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
	    !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
129
		return -EOPNOTSUPP;
130

131
132
133
134
	esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
		  vport, vlan, qos, set_flags);

	if (set_flags & SET_VLAN_STRIP)
135
136
		MLX5_SET(modify_esw_vport_context_in, in,
			 esw_vport_context.vport_cvlan_strip, 1);
137
138

	if (set_flags & SET_VLAN_INSERT) {
139
140
141
		/* insert only if no vlan in packet */
		MLX5_SET(modify_esw_vport_context_in, in,
			 esw_vport_context.vport_cvlan_insert, 1);
142

143
144
145
146
147
148
149
150
151
152
153
154
155
156
		MLX5_SET(modify_esw_vport_context_in, in,
			 esw_vport_context.cvlan_pcp, qos);
		MLX5_SET(modify_esw_vport_context_in, in,
			 esw_vport_context.cvlan_id, vlan);
	}

	MLX5_SET(modify_esw_vport_context_in, in,
		 field_select.vport_cvlan_strip, 1);
	MLX5_SET(modify_esw_vport_context_in, in,
		 field_select.vport_cvlan_insert, 1);

	return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
}

157
158
159
160
/* HW L2 Table (MPFS) management */
static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
				  u8 *mac, u8 vlan_valid, u16 vlan)
{
161
162
	u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)]   = {0};
	u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0};
163
164
165
166
167
168
169
170
171
172
173
	u8 *in_mac_addr;

	MLX5_SET(set_l2_table_entry_in, in, opcode,
		 MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
	MLX5_SET(set_l2_table_entry_in, in, table_index, index);
	MLX5_SET(set_l2_table_entry_in, in, vlan_valid, vlan_valid);
	MLX5_SET(set_l2_table_entry_in, in, vlan, vlan);

	in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address);
	ether_addr_copy(&in_mac_addr[2], mac);

174
	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
175
176
177
178
}

static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
{
179
180
	u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)]   = {0};
	u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0};
181
182
183
184

	MLX5_SET(delete_l2_table_entry_in, in, opcode,
		 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
	MLX5_SET(delete_l2_table_entry_in, in, table_index, index);
185
	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
}

static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix)
{
	int err = 0;

	*ix = find_first_zero_bit(l2_table->bitmap, l2_table->size);
	if (*ix >= l2_table->size)
		err = -ENOSPC;
	else
		__set_bit(*ix, l2_table->bitmap);

	return err;
}

static void free_l2_table_index(struct mlx5_l2_table *l2_table, u32 ix)
{
	__clear_bit(ix, l2_table->bitmap);
}

static int set_l2_table_entry(struct mlx5_core_dev *dev, u8 *mac,
			      u8 vlan_valid, u16 vlan,
			      u32 *index)
{
	struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table;
	int err;

	err = alloc_l2_table_index(l2_table, index);
	if (err)
		return err;

	err = set_l2_table_entry_cmd(dev, *index, mac, vlan_valid, vlan);
	if (err)
		free_l2_table_index(l2_table, *index);

	return err;
}

static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
{
	struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table;

	del_l2_table_entry_cmd(dev, index);
	free_l2_table_index(l2_table, index);
}

232
/* E-Switch FDB */
Mark Bloch's avatar
Mark Bloch committed
233
static struct mlx5_flow_handle *
234
__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
235
			 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
236
{
237
238
	int match_header = (is_zero_ether_addr(mac_c) ? 0 :
			    MLX5_MATCH_OUTER_HEADERS);
Mark Bloch's avatar
Mark Bloch committed
239
	struct mlx5_flow_handle *flow_rule = NULL;
240
	struct mlx5_flow_act flow_act = {0};
241
	struct mlx5_flow_destination dest;
242
	struct mlx5_flow_spec *spec;
243
244
	void *mv_misc = NULL;
	void *mc_misc = NULL;
245
246
	u8 *dmac_v = NULL;
	u8 *dmac_c = NULL;
247

248
249
	if (rx_rule)
		match_header |= MLX5_MATCH_MISC_PARAMETERS;
250

251
252
	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
	if (!spec)
253
		return NULL;
254

255
	dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
256
			      outer_headers.dmac_47_16);
257
	dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
258
259
			      outer_headers.dmac_47_16);

260
	if (match_header & MLX5_MATCH_OUTER_HEADERS) {
261
262
263
		ether_addr_copy(dmac_v, mac_v);
		ether_addr_copy(dmac_c, mac_c);
	}
264

265
	if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
266
267
268
269
		mv_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_value,
					misc_parameters);
		mc_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
					misc_parameters);
270
271
272
273
		MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
		MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
	}

274
275
276
277
278
279
	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
	dest.vport_num = vport;

	esw_debug(esw->dev,
		  "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
		  dmac_v, dmac_c, vport);
280
	spec->match_criteria_enable = match_header;
281
	flow_act.action =  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
282
	flow_rule =
Mark Bloch's avatar
Mark Bloch committed
283
		mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
284
				    &flow_act, &dest, 1);
285
	if (IS_ERR(flow_rule)) {
286
287
		esw_warn(esw->dev,
			 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
288
289
290
			 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
		flow_rule = NULL;
	}
291
292

	kvfree(spec);
293
294
295
	return flow_rule;
}

Mark Bloch's avatar
Mark Bloch committed
296
static struct mlx5_flow_handle *
297
298
299
300
301
esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
{
	u8 mac_c[ETH_ALEN];

	eth_broadcast_addr(mac_c);
302
303
304
	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
}

Mark Bloch's avatar
Mark Bloch committed
305
static struct mlx5_flow_handle *
306
307
308
309
310
311
312
313
314
315
316
317
esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
{
	u8 mac_c[ETH_ALEN];
	u8 mac_v[ETH_ALEN];

	eth_zero_addr(mac_c);
	eth_zero_addr(mac_v);
	mac_c[0] = 0x01;
	mac_v[0] = 0x01;
	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
}

Mark Bloch's avatar
Mark Bloch committed
318
static struct mlx5_flow_handle *
319
320
321
322
323
324
325
326
esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
{
	u8 mac_c[ETH_ALEN];
	u8 mac_v[ETH_ALEN];

	eth_zero_addr(mac_c);
	eth_zero_addr(mac_v);
	return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
327
328
}

329
static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
330
{
331
	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
332
	struct mlx5_flow_table_attr ft_attr = {};
333
	struct mlx5_core_dev *dev = esw->dev;
334
	struct mlx5_flow_namespace *root_ns;
335
	struct mlx5_flow_table *fdb;
336
337
338
339
	struct mlx5_flow_group *g;
	void *match_criteria;
	int table_size;
	u32 *flow_group_in;
340
	u8 *dmac;
341
	int err = 0;
342
343
344
345

	esw_debug(dev, "Create FDB log_max_size(%d)\n",
		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));

346
347
348
	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
	if (!root_ns) {
		esw_warn(dev, "Failed to get FDB flow namespace\n");
349
		return -EOPNOTSUPP;
350
	}
351

352
	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
353
354
355
356
	if (!flow_group_in)
		return -ENOMEM;

	table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
357
358
359

	ft_attr.max_fte = table_size;
	fdb = mlx5_create_flow_table(root_ns, &ft_attr);
360
	if (IS_ERR(fdb)) {
361
362
363
364
		err = PTR_ERR(fdb);
		esw_warn(dev, "Failed to create FDB Table err %d\n", err);
		goto out;
	}
365
	esw->fdb_table.fdb = fdb;
366

367
	/* Addresses group : Full match unicast/multicast addresses */
368
369
370
371
372
	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
		 MLX5_MATCH_OUTER_HEADERS);
	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
	dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
373
374
	/* Preserve 2 entries for allmulti and promisc rules*/
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
375
376
	eth_broadcast_addr(dmac);
	g = mlx5_create_flow_group(fdb, flow_group_in);
377
	if (IS_ERR(g)) {
378
379
380
381
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create flow group err(%d)\n", err);
		goto out;
	}
382
	esw->fdb_table.legacy.addr_grp = g;
383
384
385
386
387
388
389
390
391

	/* Allmulti group : One rule that forwards any mcast traffic */
	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
		 MLX5_MATCH_OUTER_HEADERS);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
	eth_zero_addr(dmac);
	dmac[0] = 0x01;
	g = mlx5_create_flow_group(fdb, flow_group_in);
392
	if (IS_ERR(g)) {
393
394
395
396
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
		goto out;
	}
397
	esw->fdb_table.legacy.allmulti_grp = g;
398
399
400
401
402
403
404
405
406
407
408

	/* Promiscuous group :
	 * One rule that forward all unmatched traffic from previous groups
	 */
	eth_zero_addr(dmac);
	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
		 MLX5_MATCH_MISC_PARAMETERS);
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
	g = mlx5_create_flow_group(fdb, flow_group_in);
409
	if (IS_ERR(g)) {
410
411
412
413
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
		goto out;
	}
414
	esw->fdb_table.legacy.promisc_grp = g;
415

416
out:
417
	if (err) {
418
419
420
		if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) {
			mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
			esw->fdb_table.legacy.allmulti_grp = NULL;
421
		}
422
423
424
		if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) {
			mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
			esw->fdb_table.legacy.addr_grp = NULL;
425
426
427
428
429
430
431
		}
		if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
			mlx5_destroy_flow_table(esw->fdb_table.fdb);
			esw->fdb_table.fdb = NULL;
		}
	}

432
	kvfree(flow_group_in);
433
	return err;
434
435
}

436
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
437
438
439
440
{
	if (!esw->fdb_table.fdb)
		return;

441
	esw_debug(esw->dev, "Destroy FDB Table\n");
442
443
444
	mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
	mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
	mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
445
446
	mlx5_destroy_flow_table(esw->fdb_table.fdb);
	esw->fdb_table.fdb = NULL;
447
448
449
	esw->fdb_table.legacy.addr_grp = NULL;
	esw->fdb_table.legacy.allmulti_grp = NULL;
	esw->fdb_table.legacy.promisc_grp = NULL;
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
}

/* E-Switch vport UC/MC lists management */
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
				 struct vport_addr *vaddr);

static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
	struct hlist_head *hash = esw->l2_table.l2_hash;
	struct esw_uc_addr *esw_uc;
	u8 *mac = vaddr->node.addr;
	u32 vport = vaddr->vport;
	int err;

	esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
	if (esw_uc) {
466
467
		esw_warn(esw->dev,
			 "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n",
468
			 mac, vport, esw_uc->vport);
469
470
471
		return -EEXIST;
	}

472
473
	esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL);
	if (!esw_uc)
474
		return -ENOMEM;
475
	esw_uc->vport = vport;
476

477
	err = set_l2_table_entry(esw->dev, mac, 0, 0, &esw_uc->table_index);
478
	if (err)
479
480
		goto abort;

481
482
	/* SRIOV is enabled: Forward UC MAC to vport */
	if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY)
483
484
485
486
487
488
489
		vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);

	esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
		  vport, mac, esw_uc->table_index, vaddr->flow_rule);
	return err;
abort:
	l2addr_hash_del(esw_uc);
490
491
492
	return err;
}

493
static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
494
{
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
	struct hlist_head *hash = esw->l2_table.l2_hash;
	struct esw_uc_addr *esw_uc;
	u8 *mac = vaddr->node.addr;
	u32 vport = vaddr->vport;

	esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
	if (!esw_uc || esw_uc->vport != vport) {
		esw_debug(esw->dev,
			  "MAC(%pM) doesn't belong to vport (%d)\n",
			  mac, vport);
		return -EINVAL;
	}
	esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n",
		  vport, mac, esw_uc->table_index, vaddr->flow_rule);

	del_l2_table_entry(esw->dev, esw_uc->table_index);

	if (vaddr->flow_rule)
Mark Bloch's avatar
Mark Bloch committed
513
		mlx5_del_flow_rules(vaddr->flow_rule);
514
515
516
517
518
519
	vaddr->flow_rule = NULL;

	l2addr_hash_del(esw_uc);
	return 0;
}

520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
static void update_allmulti_vports(struct mlx5_eswitch *esw,
				   struct vport_addr *vaddr,
				   struct esw_mc_addr *esw_mc)
{
	u8 *mac = vaddr->node.addr;
	u32 vport_idx = 0;

	for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
		struct mlx5_vport *vport = &esw->vports[vport_idx];
		struct hlist_head *vport_hash = vport->mc_list;
		struct vport_addr *iter_vaddr =
					l2addr_hash_find(vport_hash,
							 mac,
							 struct vport_addr);
		if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
		    vaddr->vport == vport_idx)
			continue;
		switch (vaddr->action) {
		case MLX5_ACTION_ADD:
			if (iter_vaddr)
				continue;
			iter_vaddr = l2addr_hash_add(vport_hash, mac,
						     struct vport_addr,
						     GFP_KERNEL);
			if (!iter_vaddr) {
				esw_warn(esw->dev,
					 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
					 mac, vport_idx);
				continue;
			}
			iter_vaddr->vport = vport_idx;
			iter_vaddr->flow_rule =
					esw_fdb_set_vport_rule(esw,
							       mac,
							       vport_idx);
555
			iter_vaddr->mc_promisc = true;
556
557
558
559
			break;
		case MLX5_ACTION_DEL:
			if (!iter_vaddr)
				continue;
Mark Bloch's avatar
Mark Bloch committed
560
			mlx5_del_flow_rules(iter_vaddr->flow_rule);
561
562
563
564
565
566
			l2addr_hash_del(iter_vaddr);
			break;
		}
	}
}

567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
	struct hlist_head *hash = esw->mc_table;
	struct esw_mc_addr *esw_mc;
	u8 *mac = vaddr->node.addr;
	u32 vport = vaddr->vport;

	if (!esw->fdb_table.fdb)
		return 0;

	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
	if (esw_mc)
		goto add;

	esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
	if (!esw_mc)
		return -ENOMEM;

	esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
		esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
587
588
589
590

	/* Add this multicast mac to all the mc promiscuous vports */
	update_allmulti_vports(esw, vaddr, esw_mc);

591
add:
592
593
594
595
596
597
	/* If the multicast mac is added as a result of mc promiscuous vport,
	 * don't increment the multicast ref count
	 */
	if (!vaddr->mc_promisc)
		esw_mc->refcnt++;

598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
	/* Forward MC MAC to vport */
	vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
	esw_debug(esw->dev,
		  "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
		  vport, mac, vaddr->flow_rule,
		  esw_mc->refcnt, esw_mc->uplink_rule);
	return 0;
}

static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
	struct hlist_head *hash = esw->mc_table;
	struct esw_mc_addr *esw_mc;
	u8 *mac = vaddr->node.addr;
	u32 vport = vaddr->vport;
613

614
615
616
617
618
619
620
	if (!esw->fdb_table.fdb)
		return 0;

	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
	if (!esw_mc) {
		esw_warn(esw->dev,
			 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
621
622
623
			 mac, vport);
		return -EINVAL;
	}
624
625
626
627
628
629
	esw_debug(esw->dev,
		  "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
		  vport, mac, vaddr->flow_rule, esw_mc->refcnt,
		  esw_mc->uplink_rule);

	if (vaddr->flow_rule)
Mark Bloch's avatar
Mark Bloch committed
630
		mlx5_del_flow_rules(vaddr->flow_rule);
631
632
	vaddr->flow_rule = NULL;

633
634
635
636
	/* If the multicast mac is added as a result of mc promiscuous vport,
	 * don't decrement the multicast ref count.
	 */
	if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
637
		return 0;
638

639
640
641
	/* Remove this multicast mac from all the mc promiscuous vports */
	update_allmulti_vports(esw, vaddr, esw_mc);

642
	if (esw_mc->uplink_rule)
Mark Bloch's avatar
Mark Bloch committed
643
		mlx5_del_flow_rules(esw_mc->uplink_rule);
644
645

	l2addr_hash_del(esw_mc);
646
647
648
	return 0;
}

649
650
651
/* Apply vport UC/MC list to HW l2 table and FDB table */
static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
				      u32 vport_num, int list_type)
652
653
{
	struct mlx5_vport *vport = &esw->vports[vport_num];
654
655
656
657
	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
	vport_addr_action vport_addr_add;
	vport_addr_action vport_addr_del;
	struct vport_addr *addr;
658
659
660
661
662
	struct l2addr_node *node;
	struct hlist_head *hash;
	struct hlist_node *tmp;
	int hi;

663
664
665
666
667
668
	vport_addr_add = is_uc ? esw_add_uc_addr :
				 esw_add_mc_addr;
	vport_addr_del = is_uc ? esw_del_uc_addr :
				 esw_del_mc_addr;

	hash = is_uc ? vport->uc_list : vport->mc_list;
669
	for_each_l2hash_node(node, tmp, hash, hi) {
670
		addr = container_of(node, struct vport_addr, node);
671
672
		switch (addr->action) {
		case MLX5_ACTION_ADD:
673
			vport_addr_add(esw, addr);
674
675
676
			addr->action = MLX5_ACTION_NONE;
			break;
		case MLX5_ACTION_DEL:
677
			vport_addr_del(esw, addr);
678
679
680
681
682
683
			l2addr_hash_del(addr);
			break;
		}
	}
}

684
685
686
/* Sync vport UC/MC list from vport context */
static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
				       u32 vport_num, int list_type)
687
688
{
	struct mlx5_vport *vport = &esw->vports[vport_num];
689
	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
690
	u8 (*mac_list)[ETH_ALEN];
691
692
	struct l2addr_node *node;
	struct vport_addr *addr;
693
694
695
696
697
698
699
	struct hlist_head *hash;
	struct hlist_node *tmp;
	int size;
	int err;
	int hi;
	int i;

700
701
	size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
		       MLX5_MAX_MC_PER_VPORT(esw->dev);
702
703
704
705
706

	mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
	if (!mac_list)
		return;

707
	hash = is_uc ? vport->uc_list : vport->mc_list;
708
709

	for_each_l2hash_node(node, tmp, hash, hi) {
710
		addr = container_of(node, struct vport_addr, node);
711
712
713
		addr->action = MLX5_ACTION_DEL;
	}

714
715
716
	if (!vport->enabled)
		goto out;

717
	err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
718
719
					    mac_list, &size);
	if (err)
720
		goto out;
721
722
	esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
		  vport_num, is_uc ? "UC" : "MC", size);
723
724

	for (i = 0; i < size; i++) {
725
		if (is_uc && !is_valid_ether_addr(mac_list[i]))
726
727
			continue;

728
729
730
731
		if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
			continue;

		addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
732
733
		if (addr) {
			addr->action = MLX5_ACTION_NONE;
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
			/* If this mac was previously added because of allmulti
			 * promiscuous rx mode, its now converted to be original
			 * vport mac.
			 */
			if (addr->mc_promisc) {
				struct esw_mc_addr *esw_mc =
					l2addr_hash_find(esw->mc_table,
							 mac_list[i],
							 struct esw_mc_addr);
				if (!esw_mc) {
					esw_warn(esw->dev,
						 "Failed to MAC(%pM) in mcast DB\n",
						 mac_list[i]);
					continue;
				}
				esw_mc->refcnt++;
				addr->mc_promisc = false;
			}
752
753
754
			continue;
		}

755
		addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
756
757
758
759
760
761
762
				       GFP_KERNEL);
		if (!addr) {
			esw_warn(esw->dev,
				 "Failed to add MAC(%pM) to vport[%d] DB\n",
				 mac_list[i], vport_num);
			continue;
		}
763
		addr->vport = vport_num;
764
765
		addr->action = MLX5_ACTION_ADD;
	}
766
out:
767
768
769
	kfree(mac_list);
}

770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
/* Sync vport UC/MC list from vport context
 * Must be called after esw_update_vport_addr_list
 */
static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
{
	struct mlx5_vport *vport = &esw->vports[vport_num];
	struct l2addr_node *node;
	struct vport_addr *addr;
	struct hlist_head *hash;
	struct hlist_node *tmp;
	int hi;

	hash = vport->mc_list;

	for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
		u8 *mac = node->addr;

		addr = l2addr_hash_find(hash, mac, struct vport_addr);
		if (addr) {
			if (addr->action == MLX5_ACTION_DEL)
				addr->action = MLX5_ACTION_NONE;
			continue;
		}
		addr = l2addr_hash_add(hash, mac, struct vport_addr,
				       GFP_KERNEL);
		if (!addr) {
			esw_warn(esw->dev,
				 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
				 mac, vport_num);
			continue;
		}
		addr->vport = vport_num;
		addr->action = MLX5_ACTION_ADD;
		addr->mc_promisc = true;
	}
}

/* Apply vport rx mode to HW FDB table */
static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
				    bool promisc, bool mc_promisc)
{
811
	struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
812
813
814
815
816
817
818
819
820
821
822
823
824
825
	struct mlx5_vport *vport = &esw->vports[vport_num];

	if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
		goto promisc;

	if (mc_promisc) {
		vport->allmulti_rule =
				esw_fdb_set_vport_allmulti_rule(esw, vport_num);
		if (!allmulti_addr->uplink_rule)
			allmulti_addr->uplink_rule =
				esw_fdb_set_vport_allmulti_rule(esw,
								UPLINK_VPORT);
		allmulti_addr->refcnt++;
	} else if (vport->allmulti_rule) {
Mark Bloch's avatar
Mark Bloch committed
826
		mlx5_del_flow_rules(vport->allmulti_rule);
827
828
829
830
831
832
		vport->allmulti_rule = NULL;

		if (--allmulti_addr->refcnt > 0)
			goto promisc;

		if (allmulti_addr->uplink_rule)
Mark Bloch's avatar
Mark Bloch committed
833
			mlx5_del_flow_rules(allmulti_addr->uplink_rule);
834
835
836
837
838
839
840
841
842
843
844
		allmulti_addr->uplink_rule = NULL;
	}

promisc:
	if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
		return;

	if (promisc) {
		vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
								     vport_num);
	} else if (vport->promisc_rule) {
Mark Bloch's avatar
Mark Bloch committed
845
		mlx5_del_flow_rules(vport->promisc_rule);
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
		vport->promisc_rule = NULL;
	}
}

/* Sync vport rx mode from vport context */
static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
{
	struct mlx5_vport *vport = &esw->vports[vport_num];
	int promisc_all = 0;
	int promisc_uc = 0;
	int promisc_mc = 0;
	int err;

	err = mlx5_query_nic_vport_promisc(esw->dev,
					   vport_num,
					   &promisc_uc,
					   &promisc_mc,
					   &promisc_all);
	if (err)
		return;
	esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
		  vport_num, promisc_all, promisc_mc);

869
	if (!vport->info.trusted || !vport->enabled) {
870
871
872
873
874
875
876
877
878
		promisc_uc = 0;
		promisc_mc = 0;
		promisc_all = 0;
	}

	esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
				(promisc_all || promisc_mc));
}

879
static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
880
881
{
	struct mlx5_core_dev *dev = vport->dev;
882
	struct mlx5_eswitch *esw = dev->priv.eswitch;
883
884
885
	u8 mac[ETH_ALEN];

	mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
886
887
888
889
890
891
892
893
894
	esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
		  vport->vport, mac);

	if (vport->enabled_events & UC_ADDR_CHANGE) {
		esw_update_vport_addr_list(esw, vport->vport,
					   MLX5_NVPRT_LIST_TYPE_UC);
		esw_apply_vport_addr_list(esw, vport->vport,
					  MLX5_NVPRT_LIST_TYPE_UC);
	}
895

896
897
898
	if (vport->enabled_events & MC_ADDR_CHANGE) {
		esw_update_vport_addr_list(esw, vport->vport,
					   MLX5_NVPRT_LIST_TYPE_MC);
899
900
901
902
903
904
905
906
907
	}

	if (vport->enabled_events & PROMISC_CHANGE) {
		esw_update_vport_rx_mode(esw, vport->vport);
		if (!IS_ERR_OR_NULL(vport->allmulti_rule))
			esw_update_vport_mc_promisc(esw, vport->vport);
	}

	if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
908
909
910
		esw_apply_vport_addr_list(esw, vport->vport,
					  MLX5_NVPRT_LIST_TYPE_MC);
	}
911

912
	esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
913
914
	if (vport->enabled)
		arm_vport_context_events_cmd(dev, vport->vport,
915
					     vport->enabled_events);
916
917
}

918
919
920
921
922
923
924
925
926
927
928
static void esw_vport_change_handler(struct work_struct *work)
{
	struct mlx5_vport *vport =
		container_of(work, struct mlx5_vport, vport_change_handler);
	struct mlx5_eswitch *esw = vport->dev->priv.eswitch;

	mutex_lock(&esw->state_lock);
	esw_vport_change_handle_locked(vport);
	mutex_unlock(&esw->state_lock);
}

929
930
static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
				       struct mlx5_vport *vport)
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
{
	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
	struct mlx5_flow_group *vlan_grp = NULL;
	struct mlx5_flow_group *drop_grp = NULL;
	struct mlx5_core_dev *dev = esw->dev;
	struct mlx5_flow_namespace *root_ns;
	struct mlx5_flow_table *acl;
	void *match_criteria;
	u32 *flow_group_in;
	/* The egress acl table contains 2 rules:
	 * 1)Allow traffic with vlan_tag=vst_vlan_id
	 * 2)Drop all other traffic.
	 */
	int table_size = 2;
	int err = 0;

947
948
949
950
951
	if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
		return -EOPNOTSUPP;

	if (!IS_ERR_OR_NULL(vport->egress.acl))
		return 0;
952
953
954
955
956
957
958

	esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
		  vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));

	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
	if (!root_ns) {
		esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
959
		return -EOPNOTSUPP;
960
961
	}

962
	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
963
	if (!flow_group_in)
964
		return -ENOMEM;
965
966

	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
967
	if (IS_ERR(acl)) {
968
969
970
971
972
973
974
975
		err = PTR_ERR(acl);
		esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
			 vport->vport, err);
		goto out;
	}

	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
976
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
977
978
979
980
981
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);

	vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
982
	if (IS_ERR(vlan_grp)) {
983
984
985
986
987
988
989
990
991
992
		err = PTR_ERR(vlan_grp);
		esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}

	memset(flow_group_in, 0, inlen);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
	drop_grp = mlx5_create_flow_group(acl, flow_group_in);
993
	if (IS_ERR(drop_grp)) {
994
995
996
997
998
999
1000
1001
1002
1003
		err = PTR_ERR(drop_grp);
		esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}

	vport->egress.acl = acl;
	vport->egress.drop_grp = drop_grp;
	vport->egress.allowed_vlans_grp = vlan_grp;
out:
1004
	kvfree(flow_group_in);
1005
1006
1007
1008
	if (err && !IS_ERR_OR_NULL(vlan_grp))
		mlx5_destroy_flow_group(vlan_grp);
	if (err && !IS_ERR_OR_NULL(acl))
		mlx5_destroy_flow_table(acl);
1009
	return err;
1010
1011
}

1012
1013
1014
1015
static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
					   struct mlx5_vport *vport)
{
	if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
Mark Bloch's avatar
Mark Bloch committed
1016
		mlx5_del_flow_rules(vport->egress.allowed_vlan);
1017
1018

	if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
Mark Bloch's avatar
Mark Bloch committed
1019
		mlx5_del_flow_rules(vport->egress.drop_rule);
1020
1021
1022
1023
1024

	vport->egress.allowed_vlan = NULL;
	vport->egress.drop_rule = NULL;
}

1025
1026
1027
1028
1029
1030
1031
1032
static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
					 struct mlx5_vport *vport)
{
	if (IS_ERR_OR_NULL(vport->egress.acl))
		return;

	esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);

1033
	esw_vport_cleanup_egress_rules(esw, vport);
1034
1035
1036
1037
1038
1039
1040
1041
	mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
	mlx5_destroy_flow_group(vport->egress.drop_grp);
	mlx5_destroy_flow_table(vport->egress.acl);
	vport->egress.allowed_vlans_grp = NULL;
	vport->egress.drop_grp = NULL;
	vport->egress.acl = NULL;
}

1042
1043
static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
					struct mlx5_vport *vport)
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
{
	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
	struct mlx5_core_dev *dev = esw->dev;
	struct mlx5_flow_namespace *root_ns;
	struct mlx5_flow_table *acl;
	struct mlx5_flow_group *g;
	void *match_criteria;
	u32 *flow_group_in;
	/* The ingress acl table contains 4 groups
	 * (2 active rules at the same time -
	 *      1 allow rule from one of the first 3 groups.
	 *      1 drop rule from the last group):
	 * 1)Allow untagged traffic with smac=original mac.
	 * 2)Allow untagged traffic.
	 * 3)Allow traffic with smac=original mac.
	 * 4)Drop all other traffic.
	 */
	int table_size = 4;
	int err = 0;

1064
1065
1066
1067
1068
	if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
		return -EOPNOTSUPP;

	if (!IS_ERR_OR_NULL(vport->ingress.acl))
		return 0;
1069
1070
1071
1072
1073
1074
1075

	esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
		  vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));

	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
	if (!root_ns) {
		esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
1076
		return -EOPNOTSUPP;
1077
1078
	}

1079
	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1080
	if (!flow_group_in)
1081
		return -ENOMEM;
1082
1083

	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
1084
	if (IS_ERR(acl)) {
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
		err = PTR_ERR(acl);
		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
			 vport->vport, err);
		goto out;
	}
	vport->ingress.acl = acl;

	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);

	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1095
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1096
1097
1098
1099
1100
1101
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);

	g = mlx5_create_flow_group(acl, flow_group_in);
1102
	if (IS_ERR(g)) {
1103
1104
1105
1106
1107
1108
1109
1110
1111
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}
	vport->ingress.allow_untagged_spoofchk_grp = g;

	memset(flow_group_in, 0, inlen);
	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1112
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1113
1114
1115
1116
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);

	g = mlx5_create_flow_group(acl, flow_group_in);
1117
	if (IS_ERR(g)) {
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}
	vport->ingress.allow_untagged_only_grp = g;

	memset(flow_group_in, 0, inlen);
	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);

	g = mlx5_create_flow_group(acl, flow_group_in);
1133
	if (IS_ERR(g)) {
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}
	vport->ingress.allow_spoofchk_only_grp = g;

	memset(flow_group_in, 0, inlen);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);

	g = mlx5_create_flow_group(acl, flow_group_in);
1146
	if (IS_ERR(g)) {
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}
	vport->ingress.drop_grp = g;

out:
	if (err) {
		if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
			mlx5_destroy_flow_group(
					vport->ingress.allow_spoofchk_only_grp);
		if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
			mlx5_destroy_flow_group(
					vport->ingress.allow_untagged_only_grp);
		if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
			mlx5_destroy_flow_group(
				vport->ingress.allow_untagged_spoofchk_grp);
		if (!IS_ERR_OR_NULL(vport->ingress.acl))
			mlx5_destroy_flow_table(vport->ingress.acl);
	}

1169
	kvfree(flow_group_in);
1170
	return err;
1171
1172
}

1173
1174
1175
1176
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
					    struct mlx5_vport *vport)
{
	if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
Mark Bloch's avatar
Mark Bloch committed
1177
		mlx5_del_flow_rules(vport->ingress.drop_rule);
1178
1179

	if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
Mark Bloch's avatar
Mark Bloch committed
1180
		mlx5_del_flow_rules(vport->ingress.allow_rule);
1181

1182
	vport->ingress.drop_rule = NULL;
1183
	vport->ingress.allow_rule = NULL;
1184
1185
}

1186
1187
1188
1189
1190
1191
1192
1193
static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
					  struct mlx5_vport *vport)
{
	if (IS_ERR_OR_NULL(vport->ingress.acl))
		return;

	esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);

1194
	esw_vport_cleanup_ingress_rules(esw, vport);
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
	mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
	mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
	mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
	mlx5_destroy_flow_group(vport->ingress.drop_grp);
	mlx5_destroy_flow_table(vport->ingress.acl);
	vport->ingress.acl = NULL;
	vport->ingress.drop_grp = NULL;
	vport->ingress.allow_spoofchk_only_grp = NULL;
	vport->ingress.allow_untagged_only_grp = NULL;
	vport->ingress.allow_untagged_spoofchk_grp = NULL;
}

1207
1208
1209
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
				    struct mlx5_vport *vport)
{
1210
	struct mlx5_flow_act flow_act = {0};
1211
	struct mlx5_flow_spec *spec;
1212
	int err = 0;
1213
	u8 *smac_v;
1214

1215
1216
1217
1218
1219
	if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
		mlx5_core_warn(esw->dev,
			       "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
			       vport->vport);
		return -EPERM;
1220
1221
	}

1222
1223
	esw_vport_cleanup_ingress_rules(esw, vport);

1224
	if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
1225
		esw_vport_disable_ingress_acl(esw, vport);
1226
		return 0;
1227
1228
	}

1229
1230
1231
1232
1233
1234
1235
	err = esw_vport_enable_ingress_acl(esw, vport);
	if (err) {
		mlx5_core_warn(esw->dev,
			       "failed to enable ingress acl (%d) on vport[%d]\n",
			       err, vport->vport);
		return err;