bcmsysport.c 73.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
/*
 * Broadcom BCM7xxx System Port Ethernet MAC driver
 *
 * Copyright (C) 2014 Broadcom Corporation
 */

#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt

#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
22
#include <net/dsa.h>
23
24
25
26
27
28
29
30
31
#include <net/ip.h>
#include <net/ipv6.h>

#include "bcmsysport.h"

/* I/O accessors register helpers */
#define BCM_SYSPORT_IO_MACRO(name, offset) \
static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off)	\
{									\
32
	u32 reg = readl_relaxed(priv->base + offset + off);		\
33
34
35
36
37
	return reg;							\
}									\
static inline void name##_writel(struct bcm_sysport_priv *priv,		\
				  u32 val, u32 off)			\
{									\
38
	writel_relaxed(val, priv->base + offset + off);			\
39
40
41
42
43
}									\

BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
44
BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
45
46
47
48
49
50
51
BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);

52
53
54
55
56
57
58
/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
 * same layout, except it has been moved by 4 bytes up, *sigh*
 */
static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
{
	if (priv->is_lite && off >= RDMA_STATUS)
		off += 4;
59
	return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
60
61
62
63
64
65
}

static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
{
	if (priv->is_lite && off >= RDMA_STATUS)
		off += 4;
66
	writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
67
68
69
70
71
72
73
74
75
76
77
78
79
80
}

static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
{
	if (!priv->is_lite) {
		return BIT(bit);
	} else {
		if (bit >= ACB_ALGO)
			return BIT(bit + 1);
		else
			return BIT(bit);
	}
}

81
82
83
84
85
86
87
88
/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
  */
#define BCM_SYSPORT_INTR_L2(which)	\
static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
						u32 mask)		\
{									\
	priv->irq##which##_mask &= ~(mask);				\
89
	intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);	\
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
}									\
static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
						u32 mask)		\
{									\
	intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET);	\
	priv->irq##which##_mask |= (mask);				\
}									\

BCM_SYSPORT_INTR_L2(0)
BCM_SYSPORT_INTR_L2(1)

/* Register accesses to GISB/RBUS registers are expensive (few hundred
 * nanoseconds), so keep the check for 64-bits explicit here to save
 * one register write per-packet on 32-bits platforms.
 */
static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
				     void __iomem *d,
				     dma_addr_t addr)
{
#ifdef CONFIG_PHYS_ADDR_T_64BIT
110
	writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
111
		     d + DESC_ADDR_HI_STATUS_LEN);
112
#endif
113
	writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
114
115
116
}

/* Ethtool operations */
117
118
static void bcm_sysport_set_rx_csum(struct net_device *dev,
				    netdev_features_t wanted)
119
120
121
122
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	u32 reg;

123
	priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
124
	reg = rxchk_readl(priv, RXCHK_CONTROL);
125
126
127
128
	/* Clear L2 header checks, which would prevent BPDUs
	 * from being received.
	 */
	reg &= ~RXCHK_L2_HDR_DIS;
129
	if (priv->rx_chk_en)
130
131
132
133
134
135
136
		reg |= RXCHK_EN;
	else
		reg &= ~RXCHK_EN;

	/* If UniMAC forwards CRC, we need to skip over it to get
	 * a valid CHK bit to be set in the per-packet status word
	 */
137
	if (priv->rx_chk_en && priv->crc_fwd)
138
139
140
141
		reg |= RXCHK_SKIP_FCS;
	else
		reg &= ~RXCHK_SKIP_FCS;

142
143
144
145
146
147
148
149
150
	/* If Broadcom tags are enabled (e.g: using a switch), make
	 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
	 * tag after the Ethernet MAC Source Address.
	 */
	if (netdev_uses_dsa(dev))
		reg |= RXCHK_BRCM_TAG_EN;
	else
		reg &= ~RXCHK_BRCM_TAG_EN;

151
152
153
	rxchk_writel(priv, reg, RXCHK_CONTROL);
}

154
155
static void bcm_sysport_set_tx_csum(struct net_device *dev,
				    netdev_features_t wanted)
156
157
158
159
160
161
162
163
164
165
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	u32 reg;

	/* Hardware transmit checksum requires us to enable the Transmit status
	 * block prepended to the packet contents
	 */
	priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
	reg = tdma_readl(priv, TDMA_CONTROL);
	if (priv->tsb_en)
166
		reg |= tdma_control_bit(priv, TSB_EN);
167
	else
168
		reg &= ~tdma_control_bit(priv, TSB_EN);
169
170
171
172
	tdma_writel(priv, reg, TDMA_CONTROL);
}

static int bcm_sysport_set_features(struct net_device *dev,
173
				    netdev_features_t features)
174
{
175
	struct bcm_sysport_priv *priv = netdev_priv(dev);
176

177
178
179
180
181
182
	/* Read CRC forward */
	if (!priv->is_lite)
		priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
	else
		priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
				  GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
183

184
185
186
187
	bcm_sysport_set_rx_csum(dev, features);
	bcm_sysport_set_tx_csum(dev, features);

	return 0;
188
189
190
191
192
193
194
}

/* Hardware counters must be kept in sync because the order/offset
 * is important here (order in structure declaration = order in hardware)
 */
static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
	/* general stats */
195
196
197
198
	STAT_NETDEV64(rx_packets),
	STAT_NETDEV64(tx_packets),
	STAT_NETDEV64(rx_bytes),
	STAT_NETDEV64(tx_bytes),
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
	STAT_NETDEV(rx_errors),
	STAT_NETDEV(tx_errors),
	STAT_NETDEV(rx_dropped),
	STAT_NETDEV(tx_dropped),
	STAT_NETDEV(multicast),
	/* UniMAC RSV counters */
	STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
	STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
	STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
	STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
	STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
	STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
	STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
	STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
	STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
	STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
	STAT_MIB_RX("rx_pkts", mib.rx.pkt),
	STAT_MIB_RX("rx_bytes", mib.rx.bytes),
	STAT_MIB_RX("rx_multicast", mib.rx.mca),
	STAT_MIB_RX("rx_broadcast", mib.rx.bca),
	STAT_MIB_RX("rx_fcs", mib.rx.fcs),
	STAT_MIB_RX("rx_control", mib.rx.cf),
	STAT_MIB_RX("rx_pause", mib.rx.pf),
	STAT_MIB_RX("rx_unknown", mib.rx.uo),
	STAT_MIB_RX("rx_align", mib.rx.aln),
	STAT_MIB_RX("rx_outrange", mib.rx.flr),
	STAT_MIB_RX("rx_code", mib.rx.cde),
	STAT_MIB_RX("rx_carrier", mib.rx.fcr),
	STAT_MIB_RX("rx_oversize", mib.rx.ovr),
	STAT_MIB_RX("rx_jabber", mib.rx.jbr),
	STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
	STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
	STAT_MIB_RX("rx_unicast", mib.rx.uc),
	STAT_MIB_RX("rx_ppp", mib.rx.ppp),
	STAT_MIB_RX("rx_crc", mib.rx.rcrc),
	/* UniMAC TSV counters */
	STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
	STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
	STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
	STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
	STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
	STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
	STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
	STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
	STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
	STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
	STAT_MIB_TX("tx_pkts", mib.tx.pkts),
	STAT_MIB_TX("tx_multicast", mib.tx.mca),
	STAT_MIB_TX("tx_broadcast", mib.tx.bca),
	STAT_MIB_TX("tx_pause", mib.tx.pf),
	STAT_MIB_TX("tx_control", mib.tx.cf),
	STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
	STAT_MIB_TX("tx_oversize", mib.tx.ovr),
	STAT_MIB_TX("tx_defer", mib.tx.drf),
	STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
	STAT_MIB_TX("tx_single_col", mib.tx.scl),
	STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
	STAT_MIB_TX("tx_late_col", mib.tx.lcl),
	STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
	STAT_MIB_TX("tx_frags", mib.tx.frg),
	STAT_MIB_TX("tx_total_col", mib.tx.ncl),
	STAT_MIB_TX("tx_jabber", mib.tx.jbr),
	STAT_MIB_TX("tx_bytes", mib.tx.bytes),
	STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
	STAT_MIB_TX("tx_unicast", mib.tx.uc),
	/* UniMAC RUNT counters */
	STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
	STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
	STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
	STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
	/* RXCHK misc statistics */
	STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
	STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
272
		   RXCHK_OTHER_DISC_CNTR),
273
274
275
	/* RBUF misc statistics */
	STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
	STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
276
277
278
	STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
	STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
	STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
279
280
	STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb),
	STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed),
281
	/* Per TX-queue statistics are dynamically appended */
282
283
284
285
286
};

#define BCM_SYSPORT_STATS_LEN	ARRAY_SIZE(bcm_sysport_gstrings_stats)

static void bcm_sysport_get_drvinfo(struct net_device *dev,
287
				    struct ethtool_drvinfo *info)
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
{
	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
	strlcpy(info->version, "0.1", sizeof(info->version));
	strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
}

static u32 bcm_sysport_get_msglvl(struct net_device *dev)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);

	return priv->msg_enable;
}

static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);

	priv->msg_enable = enable;
}

308
309
310
311
static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
{
	switch (type) {
	case BCM_SYSPORT_STAT_NETDEV:
312
	case BCM_SYSPORT_STAT_NETDEV64:
313
314
315
316
317
318
319
320
321
	case BCM_SYSPORT_STAT_RXCHK:
	case BCM_SYSPORT_STAT_RBUF:
	case BCM_SYSPORT_STAT_SOFT:
		return true;
	default:
		return false;
	}
}

322
323
static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
{
324
325
326
327
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	const struct bcm_sysport_stats *s;
	unsigned int i, j;

328
329
	switch (string_set) {
	case ETH_SS_STATS:
330
331
332
333
334
335
336
		for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
			s = &bcm_sysport_gstrings_stats[i];
			if (priv->is_lite &&
			    !bcm_sysport_lite_stat_valid(s->type))
				continue;
			j++;
		}
337
338
		/* Include per-queue statistics */
		return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
339
340
341
342
343
344
	default:
		return -EOPNOTSUPP;
	}
}

static void bcm_sysport_get_strings(struct net_device *dev,
345
				    u32 stringset, u8 *data)
346
{
347
348
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	const struct bcm_sysport_stats *s;
349
	char buf[128];
350
	int i, j;
351
352
353

	switch (stringset) {
	case ETH_SS_STATS:
354
355
356
357
358
359
360
		for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
			s = &bcm_sysport_gstrings_stats[i];
			if (priv->is_lite &&
			    !bcm_sysport_lite_stat_valid(s->type))
				continue;

			memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
361
			       ETH_GSTRING_LEN);
362
			j++;
363
		}
364
365
366
367
368
369
370
371
372
373
374
375

		for (i = 0; i < dev->num_tx_queues; i++) {
			snprintf(buf, sizeof(buf), "txq%d_packets", i);
			memcpy(data + j * ETH_GSTRING_LEN, buf,
			       ETH_GSTRING_LEN);
			j++;

			snprintf(buf, sizeof(buf), "txq%d_bytes", i);
			memcpy(data + j * ETH_GSTRING_LEN, buf,
			       ETH_GSTRING_LEN);
			j++;
		}
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
		break;
	default:
		break;
	}
}

static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
{
	int i, j = 0;

	for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
		const struct bcm_sysport_stats *s;
		u8 offset = 0;
		u32 val = 0;
		char *p;

		s = &bcm_sysport_gstrings_stats[i];
		switch (s->type) {
		case BCM_SYSPORT_STAT_NETDEV:
395
		case BCM_SYSPORT_STAT_NETDEV64:
396
		case BCM_SYSPORT_STAT_SOFT:
397
398
399
400
			continue;
		case BCM_SYSPORT_STAT_MIB_RX:
		case BCM_SYSPORT_STAT_MIB_TX:
		case BCM_SYSPORT_STAT_RUNT:
401
402
403
			if (priv->is_lite)
				continue;

404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
			if (s->type != BCM_SYSPORT_STAT_MIB_RX)
				offset = UMAC_MIB_STAT_OFFSET;
			val = umac_readl(priv, UMAC_MIB_START + j + offset);
			break;
		case BCM_SYSPORT_STAT_RXCHK:
			val = rxchk_readl(priv, s->reg_offset);
			if (val == ~0)
				rxchk_writel(priv, 0, s->reg_offset);
			break;
		case BCM_SYSPORT_STAT_RBUF:
			val = rbuf_readl(priv, s->reg_offset);
			if (val == ~0)
				rbuf_writel(priv, 0, s->reg_offset);
			break;
		}

		j += s->stat_sizeof;
		p = (char *)priv + s->stat_offset;
		*(u32 *)p = val;
	}

	netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
}

428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
					u64 *tx_bytes, u64 *tx_packets)
{
	struct bcm_sysport_tx_ring *ring;
	u64 bytes = 0, packets = 0;
	unsigned int start;
	unsigned int q;

	for (q = 0; q < priv->netdev->num_tx_queues; q++) {
		ring = &priv->tx_rings[q];
		do {
			start = u64_stats_fetch_begin_irq(&priv->syncp);
			bytes = ring->bytes;
			packets = ring->packets;
		} while (u64_stats_fetch_retry_irq(&priv->syncp, start));

		*tx_bytes += bytes;
		*tx_packets += packets;
	}
}

449
static void bcm_sysport_get_stats(struct net_device *dev,
450
				  struct ethtool_stats *stats, u64 *data)
451
452
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
453
454
	struct bcm_sysport_stats64 *stats64 = &priv->stats64;
	struct u64_stats_sync *syncp = &priv->syncp;
455
	struct bcm_sysport_tx_ring *ring;
456
	u64 tx_bytes = 0, tx_packets = 0;
457
	unsigned int start;
458
	int i, j;
459

460
	if (netif_running(dev)) {
461
		bcm_sysport_update_mib_counters(priv);
462
463
464
465
		bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
		stats64->tx_bytes = tx_bytes;
		stats64->tx_packets = tx_packets;
	}
466

467
	for (i =  0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
468
469
470
471
472
473
		const struct bcm_sysport_stats *s;
		char *p;

		s = &bcm_sysport_gstrings_stats[i];
		if (s->type == BCM_SYSPORT_STAT_NETDEV)
			p = (char *)&dev->stats;
474
475
		else if (s->type == BCM_SYSPORT_STAT_NETDEV64)
			p = (char *)stats64;
476
477
		else
			p = (char *)priv;
478

479
480
		if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
			continue;
481
		p += s->stat_offset;
482

483
484
		if (s->stat_sizeof == sizeof(u64) &&
		    s->type == BCM_SYSPORT_STAT_NETDEV64) {
485
486
487
488
			do {
				start = u64_stats_fetch_begin_irq(syncp);
				data[i] = *(u64 *)p;
			} while (u64_stats_fetch_retry_irq(syncp, start));
489
		} else
490
			data[i] = *(u32 *)p;
491
		j++;
492
	}
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508

	/* For SYSTEMPORT Lite since we have holes in our statistics, j would
	 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
	 * needs to point to how many total statistics we have minus the
	 * number of per TX queue statistics
	 */
	j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
	    dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;

	for (i = 0; i < dev->num_tx_queues; i++) {
		ring = &priv->tx_rings[i];
		data[j] = ring->packets;
		j++;
		data[j] = ring->bytes;
		j++;
	}
509
510
}

511
512
513
514
515
static void bcm_sysport_get_wol(struct net_device *dev,
				struct ethtool_wolinfo *wol)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);

516
	wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
517
518
519
520
521
	wol->wolopts = priv->wolopts;

	if (!(priv->wolopts & WAKE_MAGICSECURE))
		return;

522
	memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
523
524
525
}

static int bcm_sysport_set_wol(struct net_device *dev,
526
			       struct ethtool_wolinfo *wol)
527
528
529
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	struct device *kdev = &priv->pdev->dev;
530
	u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
531
532
533
534
535
536
537

	if (!device_can_wakeup(kdev))
		return -ENOTSUPP;

	if (wol->wolopts & ~supported)
		return -EINVAL;

538
539
	if (wol->wolopts & WAKE_MAGICSECURE)
		memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
540
541
542
543

	/* Flag the device and relevant IRQ as wakeup capable */
	if (wol->wolopts) {
		device_set_wakeup_enable(kdev, 1);
544
545
		if (priv->wol_irq_disabled)
			enable_irq_wake(priv->wol_irq);
546
547
548
549
550
551
552
553
554
555
556
557
558
559
		priv->wol_irq_disabled = 0;
	} else {
		device_set_wakeup_enable(kdev, 0);
		/* Avoid unbalanced disable_irq_wake calls */
		if (!priv->wol_irq_disabled)
			disable_irq_wake(priv->wol_irq);
		priv->wol_irq_disabled = 1;
	}

	priv->wolopts = wol->wolopts;

	return 0;
}

560
561
static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv,
					u32 usecs, u32 pkts)
562
563
564
565
566
567
{
	u32 reg;

	reg = rdma_readl(priv, RDMA_MBDONE_INTR);
	reg &= ~(RDMA_INTR_THRESH_MASK |
		 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
568
569
	reg |= pkts;
	reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT;
570
571
572
	rdma_writel(priv, reg, RDMA_MBDONE_INTR);
}

573
574
static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring,
					struct ethtool_coalesce *ec)
575
576
577
578
579
580
581
{
	struct bcm_sysport_priv *priv = ring->priv;
	u32 reg;

	reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
	reg &= ~(RING_INTR_THRESH_MASK |
		 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
582
583
	reg |= ec->tx_max_coalesced_frames;
	reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
584
585
586
587
			    RING_TIMEOUT_SHIFT;
	tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
}

588
589
590
591
592
593
594
595
596
597
598
static int bcm_sysport_get_coalesce(struct net_device *dev,
				    struct ethtool_coalesce *ec)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	u32 reg;

	reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));

	ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
	ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;

599
600
601
602
	reg = rdma_readl(priv, RDMA_MBDONE_INTR);

	ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
	ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
603
	ec->use_adaptive_rx_coalesce = priv->dim.use_dim;
604

605
606
607
608
609
610
611
	return 0;
}

static int bcm_sysport_set_coalesce(struct net_device *dev,
				    struct ethtool_coalesce *ec)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
612
	struct dim_cq_moder moder;
613
	u32 usecs, pkts;
614
615
	unsigned int i;

616
617
618
	/* Base system clock is 125Mhz, DMA timeout is this reference clock
	 * divided by 1024, which yield roughly 8.192 us, our maximum value has
	 * to fit in the RING_TIMEOUT_MASK (16 bits).
619
620
	 */
	if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
621
622
623
	    ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
	    ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
	    ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
624
625
		return -EINVAL;

626
	if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
627
628
	    (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) ||
	    ec->use_adaptive_tx_coalesce)
629
630
		return -EINVAL;

631
632
	for (i = 0; i < dev->num_tx_queues; i++)
		bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
633

634
635
636
637
	priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
	priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
	usecs = priv->rx_coalesce_usecs;
	pkts = priv->rx_max_coalesced_frames;
638

639
	if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
640
		moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode);
641
642
		usecs = moder.usec;
		pkts = moder.pkts;
643
	}
644

645
	priv->dim.use_dim = ec->use_adaptive_rx_coalesce;
646
647
648

	/* Apply desired coalescing parameters */
	bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
649

650
651
652
	return 0;
}

653
654
static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
{
655
	dev_consume_skb_any(cb->skb);
656
657
658
659
	cb->skb = NULL;
	dma_unmap_addr_set(cb, dma_addr, 0);
}

660
661
static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
					     struct bcm_sysport_cb *cb)
662
663
664
{
	struct device *kdev = &priv->pdev->dev;
	struct net_device *ndev = priv->netdev;
665
	struct sk_buff *skb, *rx_skb;
666
667
	dma_addr_t mapping;

668
669
670
671
	/* Allocate a new SKB for a new packet */
	skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
	if (!skb) {
		priv->mib.alloc_rx_buff_failed++;
672
		netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
673
		return NULL;
674
675
	}

676
	mapping = dma_map_single(kdev, skb->data,
677
				 RX_BUF_LENGTH, DMA_FROM_DEVICE);
678
	if (dma_mapping_error(kdev, mapping)) {
679
		priv->mib.rx_dma_failed++;
680
		dev_kfree_skb_any(skb);
681
		netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
682
		return NULL;
683
684
	}

685
686
687
688
689
690
691
692
	/* Grab the current SKB on the ring */
	rx_skb = cb->skb;
	if (likely(rx_skb))
		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
				 RX_BUF_LENGTH, DMA_FROM_DEVICE);

	/* Put the new SKB on the ring */
	cb->skb = skb;
693
	dma_unmap_addr_set(cb, dma_addr, mapping);
694
	dma_desc_set_addr(priv, cb->bd_addr, mapping);
695
696
697

	netif_dbg(priv, rx_status, ndev, "RX refill\n");

698
699
	/* Return the current SKB to the caller */
	return rx_skb;
700
701
702
703
704
}

static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
{
	struct bcm_sysport_cb *cb;
705
	struct sk_buff *skb;
706
707
708
	unsigned int i;

	for (i = 0; i < priv->num_rx_bds; i++) {
709
		cb = &priv->rx_cbs[i];
710
		skb = bcm_sysport_rx_refill(priv, cb);
711
		dev_kfree_skb(skb);
712
713
		if (!cb->skb)
			return -ENOMEM;
714
715
	}

716
	return 0;
717
718
719
720
721
722
}

/* Poll the hardware for up to budget packets to process */
static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
					unsigned int budget)
{
723
	struct bcm_sysport_stats64 *stats64 = &priv->stats64;
724
725
	struct net_device *ndev = priv->netdev;
	unsigned int processed = 0, to_process;
726
	unsigned int processed_bytes = 0;
727
728
729
730
	struct bcm_sysport_cb *cb;
	struct sk_buff *skb;
	unsigned int p_index;
	u16 len, status;
731
	struct bcm_rsb *rsb;
732

733
734
735
	/* Clear status before servicing to reduce spurious interrupts */
	intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);

736
737
738
739
740
741
742
743
	/* Determine how much we should process since last call, SYSTEMPORT Lite
	 * groups the producer and consumer indexes into the same 32-bit
	 * which we access using RDMA_CONS_INDEX
	 */
	if (!priv->is_lite)
		p_index = rdma_readl(priv, RDMA_PROD_INDEX);
	else
		p_index = rdma_readl(priv, RDMA_CONS_INDEX);
744
745
	p_index &= RDMA_PROD_INDEX_MASK;

746
	to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
747
748

	netif_dbg(priv, rx_status, ndev,
749
750
		  "p_index=%d rx_c_index=%d to_process=%d\n",
		  p_index, priv->rx_c_index, to_process);
751

752
	while ((processed < to_process) && (processed < budget)) {
753
		cb = &priv->rx_cbs[priv->rx_read_ptr];
754
		skb = bcm_sysport_rx_refill(priv, cb);
755
756
757
758
759
760
761
762
763
764
765


		/* We do not have a backing SKB, so we do not a corresponding
		 * DMA mapping for this incoming packet since
		 * bcm_sysport_rx_refill always either has both skb and mapping
		 * or none.
		 */
		if (unlikely(!skb)) {
			netif_err(priv, rx_err, ndev, "out of memory!\n");
			ndev->stats.rx_dropped++;
			ndev->stats.rx_errors++;
766
			goto next;
767
768
		}

769
		/* Extract the Receive Status Block prepended */
770
		rsb = (struct bcm_rsb *)skb->data;
771
772
		len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
		status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
773
			  DESC_STATUS_MASK;
774
775

		netif_dbg(priv, rx_status, ndev,
776
777
778
			  "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
			  p_index, priv->rx_c_index, priv->rx_read_ptr,
			  len, status);
779

780
781
782
783
784
785
786
787
		if (unlikely(len > RX_BUF_LENGTH)) {
			netif_err(priv, rx_status, ndev, "oversized packet\n");
			ndev->stats.rx_length_errors++;
			ndev->stats.rx_errors++;
			dev_kfree_skb_any(skb);
			goto next;
		}

788
789
790
791
		if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
			netif_err(priv, rx_status, ndev, "fragmented packet!\n");
			ndev->stats.rx_dropped++;
			ndev->stats.rx_errors++;
792
793
			dev_kfree_skb_any(skb);
			goto next;
794
795
796
797
		}

		if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
			netif_err(priv, rx_err, ndev, "error packet\n");
798
			if (status & RX_STATUS_OVFLOW)
799
800
801
				ndev->stats.rx_over_errors++;
			ndev->stats.rx_dropped++;
			ndev->stats.rx_errors++;
802
803
			dev_kfree_skb_any(skb);
			goto next;
804
805
806
807
808
809
810
811
		}

		skb_put(skb, len);

		/* Hardware validated our checksum */
		if (likely(status & DESC_L4_CSUM))
			skb->ip_summed = CHECKSUM_UNNECESSARY;

812
813
814
		/* Hardware pre-pends packets with 2bytes before Ethernet
		 * header plus we have the Receive Status Block, strip off all
		 * of this from the SKB.
815
816
817
		 */
		skb_pull(skb, sizeof(*rsb) + 2);
		len -= (sizeof(*rsb) + 2);
818
		processed_bytes += len;
819
820
821
822
823
824
825
826
827
828

		/* UniMAC may forward CRC */
		if (priv->crc_fwd) {
			skb_trim(skb, len - ETH_FCS_LEN);
			len -= ETH_FCS_LEN;
		}

		skb->protocol = eth_type_trans(skb, ndev);
		ndev->stats.rx_packets++;
		ndev->stats.rx_bytes += len;
829
830
831
832
		u64_stats_update_begin(&priv->syncp);
		stats64->rx_packets++;
		stats64->rx_bytes += len;
		u64_stats_update_end(&priv->syncp);
833
834

		napi_gro_receive(&priv->napi, skb);
835
836
837
838
839
840
next:
		processed++;
		priv->rx_read_ptr++;

		if (priv->rx_read_ptr == priv->num_rx_bds)
			priv->rx_read_ptr = 0;
841
842
	}

843
844
845
	priv->dim.packets = processed;
	priv->dim.bytes = processed_bytes;

846
847
848
	return processed;
}

849
static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
850
851
852
				       struct bcm_sysport_cb *cb,
				       unsigned int *bytes_compl,
				       unsigned int *pkts_compl)
853
{
854
	struct bcm_sysport_priv *priv = ring->priv;
855
856
857
858
859
	struct device *kdev = &priv->pdev->dev;

	if (cb->skb) {
		*bytes_compl += cb->skb->len;
		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
860
861
				 dma_unmap_len(cb, dma_len),
				 DMA_TO_DEVICE);
862
863
864
865
		(*pkts_compl)++;
		bcm_sysport_free_cb(cb);
	/* SKB fragment */
	} else if (dma_unmap_addr(cb, dma_addr)) {
866
		*bytes_compl += dma_unmap_len(cb, dma_len);
867
		dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
868
			       dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
869
870
871
872
873
874
875
876
877
		dma_unmap_addr_set(cb, dma_addr, 0);
	}
}

/* Reclaim queued SKBs for transmission completion, lockless version */
static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
					     struct bcm_sysport_tx_ring *ring)
{
	unsigned int pkts_compl = 0, bytes_compl = 0;
878
	struct net_device *ndev = priv->netdev;
879
	unsigned int txbds_processed = 0;
880
	struct bcm_sysport_cb *cb;
881
882
	unsigned int txbds_ready;
	unsigned int c_index;
883
884
	u32 hw_ind;

885
886
887
888
889
890
891
	/* Clear status before servicing to reduce spurious interrupts */
	if (!ring->priv->is_lite)
		intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
	else
		intrl2_0_writel(ring->priv, BIT(ring->index +
				INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);

892
893
894
	/* Compute how many descriptors have been processed since last call */
	hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
	c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
895
	txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
896
897

	netif_dbg(priv, tx_done, ndev,
898
899
		  "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
		  ring->index, ring->c_index, c_index, txbds_ready);
900

901
902
	while (txbds_processed < txbds_ready) {
		cb = &ring->cbs[ring->clean_index];
903
		bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
904
905

		ring->desc_count++;
906
907
908
909
910
911
		txbds_processed++;

		if (likely(ring->clean_index < ring->size - 1))
			ring->clean_index++;
		else
			ring->clean_index = 0;
912
913
	}

914
915
916
917
918
	u64_stats_update_begin(&priv->syncp);
	ring->packets += pkts_compl;
	ring->bytes += bytes_compl;
	u64_stats_update_end(&priv->syncp);

919
920
921
	ring->c_index = c_index;

	netif_dbg(priv, tx_done, ndev,
922
923
		  "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
		  ring->index, ring->c_index, pkts_compl, bytes_compl);
924
925
926
927
928
929
930
931

	return pkts_compl;
}

/* Locked version of the per-ring TX reclaim routine */
static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
					   struct bcm_sysport_tx_ring *ring)
{
932
	struct netdev_queue *txq;
933
	unsigned int released;
934
	unsigned long flags;
935

936
937
	txq = netdev_get_tx_queue(priv->netdev, ring->index);

938
	spin_lock_irqsave(&ring->lock, flags);
939
	released = __bcm_sysport_tx_reclaim(priv, ring);
940
941
942
	if (released)
		netif_tx_wake_queue(txq);

943
	spin_unlock_irqrestore(&ring->lock, flags);
944
945
946
947

	return released;
}

948
949
950
951
952
953
954
955
956
957
958
/* Locked version of the per-ring TX reclaim, but does not wake the queue */
static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
				 struct bcm_sysport_tx_ring *ring)
{
	unsigned long flags;

	spin_lock_irqsave(&ring->lock, flags);
	__bcm_sysport_tx_reclaim(priv, ring);
	spin_unlock_irqrestore(&ring->lock, flags);
}

959
960
961
962
963
964
965
966
static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
{
	struct bcm_sysport_tx_ring *ring =
		container_of(napi, struct bcm_sysport_tx_ring, napi);
	unsigned int work_done = 0;

	work_done = bcm_sysport_tx_reclaim(ring->priv, ring);

967
	if (work_done == 0) {
968
969
		napi_complete(napi);
		/* re-enable TX interrupt */
970
971
972
973
974
		if (!ring->priv->is_lite)
			intrl2_1_mask_clear(ring->priv, BIT(ring->index));
		else
			intrl2_0_mask_clear(ring->priv, BIT(ring->index +
					    INTRL2_0_TDMA_MBDONE_SHIFT));
975
976

		return 0;
977
978
	}

979
	return budget;
980
981
982
983
984
985
986
987
988
989
990
991
992
993
}

static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
{
	unsigned int q;

	for (q = 0; q < priv->netdev->num_tx_queues; q++)
		bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
}

static int bcm_sysport_poll(struct napi_struct *napi, int budget)
{
	struct bcm_sysport_priv *priv =
		container_of(napi, struct bcm_sysport_priv, napi);
994
	struct dim_sample dim_sample = {};
995
996
997
998
999
1000
	unsigned int work_done = 0;

	work_done = bcm_sysport_desc_rx(priv, budget);

	priv->rx_c_index += work_done;
	priv->rx_c_index &= RDMA_CONS_INDEX_MASK;