eswitch.c 72.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
/*
 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
37
#include <linux/mlx5/fs.h>
38
#include "mlx5_core.h"
39
#include "lib/eq.h"
40
#include "eswitch.h"
41
#include "fs_core.h"
42
#include "ecpf.h"
43
44
45
46
47
48
49

enum {
	MLX5_ACTION_NONE = 0,
	MLX5_ACTION_ADD  = 1,
	MLX5_ACTION_DEL  = 2,
};

50
51
52
53
/* Vport UC/MC hash node */
struct vport_addr {
	struct l2addr_node     node;
	u8                     action;
54
	u16                    vport;
55
56
	struct mlx5_flow_handle *flow_rule;
	bool mpfs; /* UC MAC was added to MPFs */
57
58
	/* A flag indicating that mac was added due to mc promiscuous vport */
	bool mc_promisc;
59
60
};

61
62
63
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);

64
65
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
66
{
67
68
69
70
71
72
73
74
75
76
77
78
	u16 idx;

	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
		return ERR_PTR(-EPERM);

	idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);

	if (idx > esw->total_vports - 1) {
		esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n",
			  vport_num, idx);
		return ERR_PTR(-EINVAL);
	}
79
80

	return &esw->vports[idx];
81
82
}

83
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
84
85
					u32 events_mask)
{
86
87
	int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)]   = {0};
	int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
88
89
90
91
92
93
	void *nic_vport_ctx;

	MLX5_SET(modify_nic_vport_context_in, in,
		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
	MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
94
	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
95
96
97
98
99
	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
				     in, nic_vport_context);

	MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);

100
	if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
101
102
		MLX5_SET(nic_vport_context, nic_vport_ctx,
			 event_on_uc_address_change, 1);
103
	if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
104
105
		MLX5_SET(nic_vport_context, nic_vport_ctx,
			 event_on_mc_address_change, 1);
106
	if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
107
108
		MLX5_SET(nic_vport_context, nic_vport_ctx,
			 event_on_promisc_change, 1);
109

110
	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
111
112
}

113
114
115
116
/* E-Switch vport context HW commands */
static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
					void *in, int inlen)
{
117
	u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
118

119
120
	MLX5_SET(modify_esw_vport_context_in, in, opcode,
		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
121
	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
122
	MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
123
	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
124
125
}

126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
					  void *in, int inlen)
{
	return modify_esw_vport_context_cmd(esw->dev, vport, in, inlen);
}

static int query_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
				       void *out, int outlen)
{
	u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};

	MLX5_SET(query_esw_vport_context_in, in, opcode,
		 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
	MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}

int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
					 void *out, int outlen)
{
	return query_esw_vport_context_cmd(esw->dev, vport, out, outlen);
}

150
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
151
				  u16 vlan, u8 qos, u8 set_flags)
152
{
153
	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
154
155
156

	if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
	    !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
157
		return -EOPNOTSUPP;
158

159
160
161
162
	esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
		  vport, vlan, qos, set_flags);

	if (set_flags & SET_VLAN_STRIP)
163
164
		MLX5_SET(modify_esw_vport_context_in, in,
			 esw_vport_context.vport_cvlan_strip, 1);
165
166

	if (set_flags & SET_VLAN_INSERT) {
167
168
169
		/* insert only if no vlan in packet */
		MLX5_SET(modify_esw_vport_context_in, in,
			 esw_vport_context.vport_cvlan_insert, 1);
170

171
172
173
174
175
176
177
178
179
180
181
182
183
184
		MLX5_SET(modify_esw_vport_context_in, in,
			 esw_vport_context.cvlan_pcp, qos);
		MLX5_SET(modify_esw_vport_context_in, in,
			 esw_vport_context.cvlan_id, vlan);
	}

	MLX5_SET(modify_esw_vport_context_in, in,
		 field_select.vport_cvlan_strip, 1);
	MLX5_SET(modify_esw_vport_context_in, in,
		 field_select.vport_cvlan_insert, 1);

	return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
}

185
/* E-Switch FDB */
Mark Bloch's avatar
Mark Bloch committed
186
static struct mlx5_flow_handle *
187
__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
188
			 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
189
{
190
191
	int match_header = (is_zero_ether_addr(mac_c) ? 0 :
			    MLX5_MATCH_OUTER_HEADERS);
Mark Bloch's avatar
Mark Bloch committed
192
	struct mlx5_flow_handle *flow_rule = NULL;
193
	struct mlx5_flow_act flow_act = {0};
194
	struct mlx5_flow_destination dest = {};
195
	struct mlx5_flow_spec *spec;
196
197
	void *mv_misc = NULL;
	void *mc_misc = NULL;
198
199
	u8 *dmac_v = NULL;
	u8 *dmac_c = NULL;
200

201
202
	if (rx_rule)
		match_header |= MLX5_MATCH_MISC_PARAMETERS;
203

204
205
	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
	if (!spec)
206
		return NULL;
207

208
	dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
209
			      outer_headers.dmac_47_16);
210
	dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
211
212
			      outer_headers.dmac_47_16);

213
	if (match_header & MLX5_MATCH_OUTER_HEADERS) {
214
215
216
		ether_addr_copy(dmac_v, mac_v);
		ether_addr_copy(dmac_c, mac_c);
	}
217

218
	if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
219
220
221
222
		mv_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_value,
					misc_parameters);
		mc_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
					misc_parameters);
223
		MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
224
225
226
		MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
	}

227
	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
228
	dest.vport.num = vport;
229
230
231
232

	esw_debug(esw->dev,
		  "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
		  dmac_v, dmac_c, vport);
233
	spec->match_criteria_enable = match_header;
234
	flow_act.action =  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
235
	flow_rule =
236
		mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
237
				    &flow_act, &dest, 1);
238
	if (IS_ERR(flow_rule)) {
239
240
		esw_warn(esw->dev,
			 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
241
242
243
			 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
		flow_rule = NULL;
	}
244
245

	kvfree(spec);
246
247
248
	return flow_rule;
}

Mark Bloch's avatar
Mark Bloch committed
249
static struct mlx5_flow_handle *
250
esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
251
252
253
254
{
	u8 mac_c[ETH_ALEN];

	eth_broadcast_addr(mac_c);
255
256
257
	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
}

Mark Bloch's avatar
Mark Bloch committed
258
static struct mlx5_flow_handle *
259
esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
260
261
262
263
264
265
266
267
268
269
270
{
	u8 mac_c[ETH_ALEN];
	u8 mac_v[ETH_ALEN];

	eth_zero_addr(mac_c);
	eth_zero_addr(mac_v);
	mac_c[0] = 0x01;
	mac_v[0] = 0x01;
	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
}

Mark Bloch's avatar
Mark Bloch committed
271
static struct mlx5_flow_handle *
272
esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
273
274
275
276
277
278
279
{
	u8 mac_c[ETH_ALEN];
	u8 mac_v[ETH_ALEN];

	eth_zero_addr(mac_c);
	eth_zero_addr(mac_v);
	return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
280
281
}

282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
enum {
	LEGACY_VEPA_PRIO = 0,
	LEGACY_FDB_PRIO,
};

static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
{
	struct mlx5_core_dev *dev = esw->dev;
	struct mlx5_flow_namespace *root_ns;
	struct mlx5_flow_table *fdb;
	int err;

	root_ns = mlx5_get_fdb_sub_ns(dev, 0);
	if (!root_ns) {
		esw_warn(dev, "Failed to get FDB flow namespace\n");
		return -EOPNOTSUPP;
	}

	/* num FTE 2, num FG 2 */
	fdb = mlx5_create_auto_grouped_flow_table(root_ns, LEGACY_VEPA_PRIO,
						  2, 2, 0, 0);
	if (IS_ERR(fdb)) {
		err = PTR_ERR(fdb);
		esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
		return err;
	}
	esw->fdb_table.legacy.vepa_fdb = fdb;

	return 0;
}

313
static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
314
{
315
	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
316
	struct mlx5_flow_table_attr ft_attr = {};
317
	struct mlx5_core_dev *dev = esw->dev;
318
	struct mlx5_flow_namespace *root_ns;
319
	struct mlx5_flow_table *fdb;
320
321
322
323
	struct mlx5_flow_group *g;
	void *match_criteria;
	int table_size;
	u32 *flow_group_in;
324
	u8 *dmac;
325
	int err = 0;
326
327
328
329

	esw_debug(dev, "Create FDB log_max_size(%d)\n",
		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));

330
	root_ns = mlx5_get_fdb_sub_ns(dev, 0);
331
332
	if (!root_ns) {
		esw_warn(dev, "Failed to get FDB flow namespace\n");
333
		return -EOPNOTSUPP;
334
	}
335

336
	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
337
338
339
340
	if (!flow_group_in)
		return -ENOMEM;

	table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
341
	ft_attr.max_fte = table_size;
342
	ft_attr.prio = LEGACY_FDB_PRIO;
343
	fdb = mlx5_create_flow_table(root_ns, &ft_attr);
344
	if (IS_ERR(fdb)) {
345
346
347
348
		err = PTR_ERR(fdb);
		esw_warn(dev, "Failed to create FDB Table err %d\n", err);
		goto out;
	}
349
	esw->fdb_table.legacy.fdb = fdb;
350

351
	/* Addresses group : Full match unicast/multicast addresses */
352
353
354
355
356
	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
		 MLX5_MATCH_OUTER_HEADERS);
	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
	dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
357
358
	/* Preserve 2 entries for allmulti and promisc rules*/
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
359
360
	eth_broadcast_addr(dmac);
	g = mlx5_create_flow_group(fdb, flow_group_in);
361
	if (IS_ERR(g)) {
362
363
364
365
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create flow group err(%d)\n", err);
		goto out;
	}
366
	esw->fdb_table.legacy.addr_grp = g;
367
368
369
370
371
372
373
374
375

	/* Allmulti group : One rule that forwards any mcast traffic */
	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
		 MLX5_MATCH_OUTER_HEADERS);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
	eth_zero_addr(dmac);
	dmac[0] = 0x01;
	g = mlx5_create_flow_group(fdb, flow_group_in);
376
	if (IS_ERR(g)) {
377
378
379
380
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
		goto out;
	}
381
	esw->fdb_table.legacy.allmulti_grp = g;
382
383
384
385
386
387
388
389
390
391
392

	/* Promiscuous group :
	 * One rule that forward all unmatched traffic from previous groups
	 */
	eth_zero_addr(dmac);
	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
		 MLX5_MATCH_MISC_PARAMETERS);
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
	g = mlx5_create_flow_group(fdb, flow_group_in);
393
	if (IS_ERR(g)) {
394
395
396
397
		err = PTR_ERR(g);
		esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
		goto out;
	}
398
	esw->fdb_table.legacy.promisc_grp = g;
399

400
out:
401
402
	if (err)
		esw_destroy_legacy_fdb_table(esw);
403

404
	kvfree(flow_group_in);
405
	return err;
406
407
}

408
409
410
411
412
413
414
415
416
417
static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
{
	esw_debug(esw->dev, "Destroy VEPA Table\n");
	if (!esw->fdb_table.legacy.vepa_fdb)
		return;

	mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
	esw->fdb_table.legacy.vepa_fdb = NULL;
}

418
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
419
{
420
	esw_debug(esw->dev, "Destroy FDB Table\n");
421
	if (!esw->fdb_table.legacy.fdb)
422
423
		return;

424
425
426
427
428
429
	if (esw->fdb_table.legacy.promisc_grp)
		mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
	if (esw->fdb_table.legacy.allmulti_grp)
		mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
	if (esw->fdb_table.legacy.addr_grp)
		mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
430
	mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
431

432
	esw->fdb_table.legacy.fdb = NULL;
433
434
435
	esw->fdb_table.legacy.addr_grp = NULL;
	esw->fdb_table.legacy.allmulti_grp = NULL;
	esw->fdb_table.legacy.promisc_grp = NULL;
436
437
}

438
439
440
441
static int esw_create_legacy_table(struct mlx5_eswitch *esw)
{
	int err;

442
443
	memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));

444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
	err = esw_create_legacy_vepa_table(esw);
	if (err)
		return err;

	err = esw_create_legacy_fdb_table(esw);
	if (err)
		esw_destroy_legacy_vepa_table(esw);

	return err;
}

static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
{
	esw_cleanup_vepa_rules(esw);
	esw_destroy_legacy_fdb_table(esw);
	esw_destroy_legacy_vepa_table(esw);
}

462
463
464
465
466
467
468
/* E-Switch vport UC/MC lists management */
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
				 struct vport_addr *vaddr);

static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
	u8 *mac = vaddr->node.addr;
469
	u16 vport = vaddr->vport;
470
471
	int err;

472
473
	/* Skip mlx5_mpfs_add_mac for eswitch_managers,
	 * it is already done by its netdev in mlx5e_execute_l2_action
474
	 */
475
	if (esw->manager_vport == vport)
476
477
478
479
		goto fdb_add;

	err = mlx5_mpfs_add_mac(esw->dev, mac);
	if (err) {
480
		esw_warn(esw->dev,
481
			 "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n",
482
483
			 mac, vport, err);
		return err;
484
	}
485
	vaddr->mpfs = true;
486

487
fdb_add:
488
	/* SRIOV is enabled: Forward UC MAC to vport */
489
	if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
490
491
		vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);

492
493
494
	esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
		  vport, mac, vaddr->flow_rule);

495
	return 0;
496
497
}

498
static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
499
{
500
	u8 *mac = vaddr->node.addr;
501
	u16 vport = vaddr->vport;
502
	int err = 0;
503

504
505
	/* Skip mlx5_mpfs_del_mac for eswitch managerss,
	 * it is already done by its netdev in mlx5e_execute_l2_action
506
	 */
507
	if (!vaddr->mpfs || esw->manager_vport == vport)
508
		goto fdb_del;
509

510
511
512
513
514
515
	err = mlx5_mpfs_del_mac(esw->dev, mac);
	if (err)
		esw_warn(esw->dev,
			 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
			 mac, vport, err);
	vaddr->mpfs = false;
516

517
fdb_del:
518
	if (vaddr->flow_rule)
Mark Bloch's avatar
Mark Bloch committed
519
		mlx5_del_flow_rules(vaddr->flow_rule);
520
521
522
523
524
	vaddr->flow_rule = NULL;

	return 0;
}

525
526
527
528
529
static void update_allmulti_vports(struct mlx5_eswitch *esw,
				   struct vport_addr *vaddr,
				   struct esw_mc_addr *esw_mc)
{
	u8 *mac = vaddr->node.addr;
530
531
	struct mlx5_vport *vport;
	u16 i, vport_num;
532

533
	mlx5_esw_for_all_vports(esw, i, vport) {
534
535
536
537
538
		struct hlist_head *vport_hash = vport->mc_list;
		struct vport_addr *iter_vaddr =
					l2addr_hash_find(vport_hash,
							 mac,
							 struct vport_addr);
539
		vport_num = vport->vport;
540
		if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
541
		    vaddr->vport == vport_num)
542
543
544
545
546
547
548
549
550
551
552
			continue;
		switch (vaddr->action) {
		case MLX5_ACTION_ADD:
			if (iter_vaddr)
				continue;
			iter_vaddr = l2addr_hash_add(vport_hash, mac,
						     struct vport_addr,
						     GFP_KERNEL);
			if (!iter_vaddr) {
				esw_warn(esw->dev,
					 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
553
					 mac, vport_num);
554
555
				continue;
			}
556
			iter_vaddr->vport = vport_num;
557
558
559
			iter_vaddr->flow_rule =
					esw_fdb_set_vport_rule(esw,
							       mac,
560
							       vport_num);
561
			iter_vaddr->mc_promisc = true;
562
563
564
565
			break;
		case MLX5_ACTION_DEL:
			if (!iter_vaddr)
				continue;
Mark Bloch's avatar
Mark Bloch committed
566
			mlx5_del_flow_rules(iter_vaddr->flow_rule);
567
568
569
570
571
572
			l2addr_hash_del(iter_vaddr);
			break;
		}
	}
}

573
574
575
576
577
static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
	struct hlist_head *hash = esw->mc_table;
	struct esw_mc_addr *esw_mc;
	u8 *mac = vaddr->node.addr;
578
	u16 vport = vaddr->vport;
579

580
	if (!esw->fdb_table.legacy.fdb)
581
582
583
584
585
586
587
588
589
590
591
		return 0;

	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
	if (esw_mc)
		goto add;

	esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
	if (!esw_mc)
		return -ENOMEM;

	esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
592
		esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
593
594
595
596

	/* Add this multicast mac to all the mc promiscuous vports */
	update_allmulti_vports(esw, vaddr, esw_mc);

597
add:
598
599
600
601
602
603
	/* If the multicast mac is added as a result of mc promiscuous vport,
	 * don't increment the multicast ref count
	 */
	if (!vaddr->mc_promisc)
		esw_mc->refcnt++;

604
605
606
607
608
609
610
611
612
613
614
615
616
617
	/* Forward MC MAC to vport */
	vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
	esw_debug(esw->dev,
		  "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
		  vport, mac, vaddr->flow_rule,
		  esw_mc->refcnt, esw_mc->uplink_rule);
	return 0;
}

static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
	struct hlist_head *hash = esw->mc_table;
	struct esw_mc_addr *esw_mc;
	u8 *mac = vaddr->node.addr;
618
	u16 vport = vaddr->vport;
619

620
	if (!esw->fdb_table.legacy.fdb)
621
622
623
624
625
626
		return 0;

	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
	if (!esw_mc) {
		esw_warn(esw->dev,
			 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
627
628
629
			 mac, vport);
		return -EINVAL;
	}
630
631
632
633
634
635
	esw_debug(esw->dev,
		  "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
		  vport, mac, vaddr->flow_rule, esw_mc->refcnt,
		  esw_mc->uplink_rule);

	if (vaddr->flow_rule)
Mark Bloch's avatar
Mark Bloch committed
636
		mlx5_del_flow_rules(vaddr->flow_rule);
637
638
	vaddr->flow_rule = NULL;

639
640
641
642
	/* If the multicast mac is added as a result of mc promiscuous vport,
	 * don't decrement the multicast ref count.
	 */
	if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
643
		return 0;
644

645
646
647
	/* Remove this multicast mac from all the mc promiscuous vports */
	update_allmulti_vports(esw, vaddr, esw_mc);

648
	if (esw_mc->uplink_rule)
Mark Bloch's avatar
Mark Bloch committed
649
		mlx5_del_flow_rules(esw_mc->uplink_rule);
650
651

	l2addr_hash_del(esw_mc);
652
653
654
	return 0;
}

655
656
/* Apply vport UC/MC list to HW l2 table and FDB table */
static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
657
				      struct mlx5_vport *vport, int list_type)
658
{
659
660
661
662
	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
	vport_addr_action vport_addr_add;
	vport_addr_action vport_addr_del;
	struct vport_addr *addr;
663
664
665
666
667
	struct l2addr_node *node;
	struct hlist_head *hash;
	struct hlist_node *tmp;
	int hi;

668
669
670
671
672
673
	vport_addr_add = is_uc ? esw_add_uc_addr :
				 esw_add_mc_addr;
	vport_addr_del = is_uc ? esw_del_uc_addr :
				 esw_del_mc_addr;

	hash = is_uc ? vport->uc_list : vport->mc_list;
674
	for_each_l2hash_node(node, tmp, hash, hi) {
675
		addr = container_of(node, struct vport_addr, node);
676
677
		switch (addr->action) {
		case MLX5_ACTION_ADD:
678
			vport_addr_add(esw, addr);
679
680
681
			addr->action = MLX5_ACTION_NONE;
			break;
		case MLX5_ACTION_DEL:
682
			vport_addr_del(esw, addr);
683
684
685
686
687
688
			l2addr_hash_del(addr);
			break;
		}
	}
}

689
690
/* Sync vport UC/MC list from vport context */
static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
691
				       struct mlx5_vport *vport, int list_type)
692
{
693
	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
694
	u8 (*mac_list)[ETH_ALEN];
695
696
	struct l2addr_node *node;
	struct vport_addr *addr;
697
698
699
700
701
702
703
	struct hlist_head *hash;
	struct hlist_node *tmp;
	int size;
	int err;
	int hi;
	int i;

704
705
	size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
		       MLX5_MAX_MC_PER_VPORT(esw->dev);
706
707
708
709
710

	mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
	if (!mac_list)
		return;

711
	hash = is_uc ? vport->uc_list : vport->mc_list;
712
713

	for_each_l2hash_node(node, tmp, hash, hi) {
714
		addr = container_of(node, struct vport_addr, node);
715
716
717
		addr->action = MLX5_ACTION_DEL;
	}

718
719
720
	if (!vport->enabled)
		goto out;

721
	err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type,
722
723
					    mac_list, &size);
	if (err)
724
		goto out;
725
	esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
726
		  vport->vport, is_uc ? "UC" : "MC", size);
727
728

	for (i = 0; i < size; i++) {
729
		if (is_uc && !is_valid_ether_addr(mac_list[i]))
730
731
			continue;

732
733
734
735
		if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
			continue;

		addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
736
737
		if (addr) {
			addr->action = MLX5_ACTION_NONE;
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
			/* If this mac was previously added because of allmulti
			 * promiscuous rx mode, its now converted to be original
			 * vport mac.
			 */
			if (addr->mc_promisc) {
				struct esw_mc_addr *esw_mc =
					l2addr_hash_find(esw->mc_table,
							 mac_list[i],
							 struct esw_mc_addr);
				if (!esw_mc) {
					esw_warn(esw->dev,
						 "Failed to MAC(%pM) in mcast DB\n",
						 mac_list[i]);
					continue;
				}
				esw_mc->refcnt++;
				addr->mc_promisc = false;
			}
756
757
758
			continue;
		}

759
		addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
760
761
762
763
				       GFP_KERNEL);
		if (!addr) {
			esw_warn(esw->dev,
				 "Failed to add MAC(%pM) to vport[%d] DB\n",
764
				 mac_list[i], vport->vport);
765
766
			continue;
		}
767
		addr->vport = vport->vport;
768
769
		addr->action = MLX5_ACTION_ADD;
	}
770
out:
771
772
773
	kfree(mac_list);
}

774
775
776
/* Sync vport UC/MC list from vport context
 * Must be called after esw_update_vport_addr_list
 */
777
778
static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw,
					struct mlx5_vport *vport)
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
{
	struct l2addr_node *node;
	struct vport_addr *addr;
	struct hlist_head *hash;
	struct hlist_node *tmp;
	int hi;

	hash = vport->mc_list;

	for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
		u8 *mac = node->addr;

		addr = l2addr_hash_find(hash, mac, struct vport_addr);
		if (addr) {
			if (addr->action == MLX5_ACTION_DEL)
				addr->action = MLX5_ACTION_NONE;
			continue;
		}
		addr = l2addr_hash_add(hash, mac, struct vport_addr,
				       GFP_KERNEL);
		if (!addr) {
			esw_warn(esw->dev,
				 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
802
				 mac, vport->vport);
803
804
			continue;
		}
805
		addr->vport = vport->vport;
806
807
808
809
810
811
		addr->action = MLX5_ACTION_ADD;
		addr->mc_promisc = true;
	}
}

/* Apply vport rx mode to HW FDB table */
812
813
static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw,
				    struct mlx5_vport *vport,
814
815
				    bool promisc, bool mc_promisc)
{
816
	struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
817
818
819
820
821
822

	if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
		goto promisc;

	if (mc_promisc) {
		vport->allmulti_rule =
823
			esw_fdb_set_vport_allmulti_rule(esw, vport->vport);
824
825
826
		if (!allmulti_addr->uplink_rule)
			allmulti_addr->uplink_rule =
				esw_fdb_set_vport_allmulti_rule(esw,
827
								MLX5_VPORT_UPLINK);
828
829
		allmulti_addr->refcnt++;
	} else if (vport->allmulti_rule) {
Mark Bloch's avatar
Mark Bloch committed
830
		mlx5_del_flow_rules(vport->allmulti_rule);
831
832
833
834
835
836
		vport->allmulti_rule = NULL;

		if (--allmulti_addr->refcnt > 0)
			goto promisc;

		if (allmulti_addr->uplink_rule)
Mark Bloch's avatar
Mark Bloch committed
837
			mlx5_del_flow_rules(allmulti_addr->uplink_rule);
838
839
840
841
842
843
844
845
		allmulti_addr->uplink_rule = NULL;
	}

promisc:
	if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
		return;

	if (promisc) {
846
847
		vport->promisc_rule =
			esw_fdb_set_vport_promisc_rule(esw, vport->vport);
848
	} else if (vport->promisc_rule) {
Mark Bloch's avatar
Mark Bloch committed
849
		mlx5_del_flow_rules(vport->promisc_rule);
850
851
852
853
854
		vport->promisc_rule = NULL;
	}
}

/* Sync vport rx mode from vport context */
855
856
static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
				     struct mlx5_vport *vport)
857
858
859
860
861
862
863
{
	int promisc_all = 0;
	int promisc_uc = 0;
	int promisc_mc = 0;
	int err;

	err = mlx5_query_nic_vport_promisc(esw->dev,
864
					   vport->vport,
865
866
867
868
869
870
					   &promisc_uc,
					   &promisc_mc,
					   &promisc_all);
	if (err)
		return;
	esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
871
		  vport->vport, promisc_all, promisc_mc);
872

873
	if (!vport->info.trusted || !vport->enabled) {
874
875
876
877
878
		promisc_uc = 0;
		promisc_mc = 0;
		promisc_all = 0;
	}

879
	esw_apply_vport_rx_mode(esw, vport, promisc_all,
880
881
882
				(promisc_all || promisc_mc));
}

883
static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
884
885
{
	struct mlx5_core_dev *dev = vport->dev;
886
	struct mlx5_eswitch *esw = dev->priv.eswitch;
887
888
	u8 mac[ETH_ALEN];

889
	mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
890
891
892
	esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
		  vport->vport, mac);

893
	if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
894
895
		esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
		esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
896
	}
897

898
	if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
899
		esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
900

901
	if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
902
		esw_update_vport_rx_mode(esw, vport);
903
		if (!IS_ERR_OR_NULL(vport->allmulti_rule))
904
			esw_update_vport_mc_promisc(esw, vport);
905
906
	}

907
	if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
908
		esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
909

910
	esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
911
912
	if (vport->enabled)
		arm_vport_context_events_cmd(dev, vport->vport,
913
					     vport->enabled_events);
914
915
}

916
917
918
919
920
921
922
923
924
925
926
static void esw_vport_change_handler(struct work_struct *work)
{
	struct mlx5_vport *vport =
		container_of(work, struct mlx5_vport, vport_change_handler);
	struct mlx5_eswitch *esw = vport->dev->priv.eswitch;

	mutex_lock(&esw->state_lock);
	esw_vport_change_handle_locked(vport);
	mutex_unlock(&esw->state_lock);
}

927
928
int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
				struct mlx5_vport *vport)
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
{
	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
	struct mlx5_flow_group *vlan_grp = NULL;
	struct mlx5_flow_group *drop_grp = NULL;
	struct mlx5_core_dev *dev = esw->dev;
	struct mlx5_flow_namespace *root_ns;
	struct mlx5_flow_table *acl;
	void *match_criteria;
	u32 *flow_group_in;
	/* The egress acl table contains 2 rules:
	 * 1)Allow traffic with vlan_tag=vst_vlan_id
	 * 2)Drop all other traffic.
	 */
	int table_size = 2;
	int err = 0;

945
946
947
948
949
	if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
		return -EOPNOTSUPP;

	if (!IS_ERR_OR_NULL(vport->egress.acl))
		return 0;
950
951
952
953

	esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
		  vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));

954
	root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
955
			mlx5_eswitch_vport_num_to_index(esw, vport->vport));
956
	if (!root_ns) {
957
		esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
958
		return -EOPNOTSUPP;
959
960
	}

961
	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
962
	if (!flow_group_in)
963
		return -ENOMEM;
964
965

	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
966
	if (IS_ERR(acl)) {
967
968
969
970
971
972
973
974
		err = PTR_ERR(acl);
		esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
			 vport->vport, err);
		goto out;
	}

	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
975
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
976
977
978
979
980
	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);

	vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
981
	if (IS_ERR(vlan_grp)) {
982
983
984
985
986
987
988
989
990
991
		err = PTR_ERR(vlan_grp);
		esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}

	memset(flow_group_in, 0, inlen);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
	drop_grp = mlx5_create_flow_group(acl, flow_group_in);
992
	if (IS_ERR(drop_grp)) {
993
994
995
996
997
998
999
1000
1001
1002
		err = PTR_ERR(drop_grp);
		esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
			 vport->vport, err);
		goto out;
	}

	vport->egress.acl = acl;
	vport->egress.drop_grp = drop_grp;
	vport->egress.allowed_vlans_grp = vlan_grp;
out:
1003
	kvfree(flow_group_in);
1004
1005
1006
1007
	if (err && !IS_ERR_OR_NULL(vlan_grp))
		mlx5_destroy_flow_group(vlan_grp);
	if (err && !IS_ERR_OR_NULL(acl))
		mlx5_destroy_flow_table(acl);
1008
	return err;
1009
1010
}

1011
1012
void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
				    struct mlx5_vport *vport)
1013
1014
{
	if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
Mark Bloch's avatar
Mark Bloch committed
1015
		mlx5_del_flow_rules(vport->egress.allowed_vlan);
1016
1017

	if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
Mark Bloch's avatar
Mark Bloch committed
1018
		mlx5_del_flow_rules(vport->egress.drop_rule);
1019
1020
1021
1022
1023

	vport->egress.allowed_vlan = NULL;
	vport->egress.drop_rule = NULL;
}

1024
1025
void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
				  struct mlx5_vport *vport)
1026
1027
1028
1029
1030
1031
{
	if (IS_ERR_OR_NULL(vport->egress.acl))
		return;

	esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);

1032
	esw_vport_cleanup_egress_rules(esw, vport);
Mohamad Haj Yahia's avatar