macb_main.c 115 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
 * Cadence MACB/GEM Ethernet Controller driver
4
5
6
7
 *
 * Copyright (C) 2004-2006 Atmel Corporation
 */

8
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
#include <linux/clk.h>
10
#include <linux/clk-provider.h>
11
#include <linux/crc32.h>
12
13
14
15
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
16
#include <linux/circ_buf.h>
17
18
#include <linux/slab.h>
#include <linux/init.h>
19
#include <linux/io.h>
20
#include <linux/gpio.h>
21
#include <linux/gpio/consumer.h>
22
#include <linux/interrupt.h>
23
24
25
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/dma-mapping.h>
26
#include <linux/platform_data/macb.h>
27
#include <linux/platform_device.h>
frederic RODO's avatar
frederic RODO committed
28
#include <linux/phy.h>
29
#include <linux/of.h>
30
#include <linux/of_device.h>
31
#include <linux/of_gpio.h>
32
#include <linux/of_mdio.h>
33
#include <linux/of_net.h>
Rafal Ozieblo's avatar
Rafal Ozieblo committed
34
35
36
#include <linux/ip.h>
#include <linux/udp.h>
#include <linux/tcp.h>
37
#include <linux/iopoll.h>
38
#include <linux/pm_runtime.h>
39
40
#include "macb.h"

41
42
43
44
45
46
47
/* This structure is only used for MACB on SiFive FU540 devices */
struct sifive_fu540_macb_mgmt {
	void __iomem *reg;
	unsigned long rate;
	struct clk_hw hw;
};

48
49
#define MACB_RX_BUFFER_SIZE	128
#define RX_BUFFER_MULTIPLE	64  /* bytes */
50

51
#define DEFAULT_RX_RING_SIZE	512 /* must be power of 2 */
52
53
#define MIN_RX_RING_SIZE	64
#define MAX_RX_RING_SIZE	8192
54
#define RX_RING_BYTES(bp)	(macb_dma_desc_get_size(bp)	\
55
				 * (bp)->rx_ring_size)
56

57
#define DEFAULT_TX_RING_SIZE	512 /* must be power of 2 */
58
59
#define MIN_TX_RING_SIZE	64
#define MAX_TX_RING_SIZE	4096
60
#define TX_RING_BYTES(bp)	(macb_dma_desc_get_size(bp)	\
61
				 * (bp)->tx_ring_size)
62

63
/* level of occupied TX descriptors under which we wake up TX process */
64
#define MACB_TX_WAKEUP_THRESH(bp)	(3 * (bp)->tx_ring_size / 4)
65

66
#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
67
68
69
#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
					| MACB_BIT(ISR_RLE)		\
					| MACB_BIT(TXERR))
70
71
#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)	\
					| MACB_BIT(TXUBR))
72

Rafal Ozieblo's avatar
Rafal Ozieblo committed
73
74
75
76
/* Max length of transmit frame must be a multiple of 8 bytes */
#define MACB_TX_LEN_ALIGN	8
#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
77

78
#define GEM_MTU_MIN_SIZE	ETH_MIN_MTU
79
#define MACB_NETIF_LSO		NETIF_F_TSO
80

81
82
83
#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
#define MACB_WOL_ENABLED		(0x1 << 1)

84
/* Graceful stop timeouts in us. We should allow up to
85
86
87
 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
 */
#define MACB_HALT_TIMEOUT	1230
88

89
90
#define MACB_PM_TIMEOUT  100 /* ms */

91
92
#define MACB_MDIO_TIMEOUT	1000000 /* in usecs */

93
/* DMA buffer descriptor might be different size
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
 * depends on hardware configuration:
 *
 * 1. dma address width 32 bits:
 *    word 1: 32 bit address of Data Buffer
 *    word 2: control
 *
 * 2. dma address width 64 bits:
 *    word 1: 32 bit address of Data Buffer
 *    word 2: control
 *    word 3: upper 32 bit address of Data Buffer
 *    word 4: unused
 *
 * 3. dma address width 32 bits with hardware timestamping:
 *    word 1: 32 bit address of Data Buffer
 *    word 2: control
 *    word 3: timestamp word 1
 *    word 4: timestamp word 2
 *
 * 4. dma address width 64 bits with hardware timestamping:
 *    word 1: 32 bit address of Data Buffer
 *    word 2: control
 *    word 3: upper 32 bit address of Data Buffer
 *    word 4: unused
 *    word 5: timestamp word 1
 *    word 6: timestamp word 2
119
120
121
 */
static unsigned int macb_dma_desc_get_size(struct macb *bp)
{
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
#ifdef MACB_EXT_DESC
	unsigned int desc_size;

	switch (bp->hw_dma_cap) {
	case HW_DMA_CAP_64B:
		desc_size = sizeof(struct macb_dma_desc)
			+ sizeof(struct macb_dma_desc_64);
		break;
	case HW_DMA_CAP_PTP:
		desc_size = sizeof(struct macb_dma_desc)
			+ sizeof(struct macb_dma_desc_ptp);
		break;
	case HW_DMA_CAP_64B_PTP:
		desc_size = sizeof(struct macb_dma_desc)
			+ sizeof(struct macb_dma_desc_64)
			+ sizeof(struct macb_dma_desc_ptp);
		break;
	default:
		desc_size = sizeof(struct macb_dma_desc);
	}
	return desc_size;
143
144
145
146
#endif
	return sizeof(struct macb_dma_desc);
}

147
static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
148
{
149
150
151
152
153
154
155
156
157
158
159
160
#ifdef MACB_EXT_DESC
	switch (bp->hw_dma_cap) {
	case HW_DMA_CAP_64B:
	case HW_DMA_CAP_PTP:
		desc_idx <<= 1;
		break;
	case HW_DMA_CAP_64B_PTP:
		desc_idx *= 3;
		break;
	default:
		break;
	}
161
#endif
162
	return desc_idx;
163
164
165
166
167
}

#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
{
168
169
	return (struct macb_dma_desc_64 *)((void *)desc
		+ sizeof(struct macb_dma_desc));
170
171
172
}
#endif

173
/* Ring buffer accessors */
174
static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
175
{
176
	return index & (bp->tx_ring_size - 1);
177
178
}

179
180
static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
					  unsigned int index)
181
{
182
183
184
	index = macb_tx_ring_wrap(queue->bp, index);
	index = macb_adj_dma_desc_idx(queue->bp, index);
	return &queue->tx_ring[index];
185
186
}

187
188
static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
				       unsigned int index)
189
{
190
	return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
191
192
}

193
static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
194
195
196
{
	dma_addr_t offset;

197
	offset = macb_tx_ring_wrap(queue->bp, index) *
198
			macb_dma_desc_get_size(queue->bp);
199

200
	return queue->tx_ring_dma + offset;
201
202
}

203
static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
204
{
205
	return index & (bp->rx_ring_size - 1);
206
207
}

208
static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
209
{
210
211
212
	index = macb_rx_ring_wrap(queue->bp, index);
	index = macb_adj_dma_desc_idx(queue->bp, index);
	return &queue->rx_ring[index];
213
214
}

215
static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
216
{
217
218
	return queue->rx_buffers + queue->bp->rx_buffer_size *
	       macb_rx_ring_wrap(queue->bp, index);
219
220
}

221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
/* I/O accessors */
static u32 hw_readl_native(struct macb *bp, int offset)
{
	return __raw_readl(bp->regs + offset);
}

static void hw_writel_native(struct macb *bp, int offset, u32 value)
{
	__raw_writel(value, bp->regs + offset);
}

static u32 hw_readl(struct macb *bp, int offset)
{
	return readl_relaxed(bp->regs + offset);
}

static void hw_writel(struct macb *bp, int offset, u32 value)
{
	writel_relaxed(value, bp->regs + offset);
}

242
/* Find the CPU endianness by using the loopback bit of NCR register. When the
Moritz Fischer's avatar
Moritz Fischer committed
243
 * CPU is in big endian we need to program swapped mode for management
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
 * descriptor access.
 */
static bool hw_is_native_io(void __iomem *addr)
{
	u32 value = MACB_BIT(LLB);

	__raw_writel(value, addr + MACB_NCR);
	value = __raw_readl(addr + MACB_NCR);

	/* Write 0 back to disable everything */
	__raw_writel(0, addr + MACB_NCR);

	return value == MACB_BIT(LLB);
}

static bool hw_is_gem(void __iomem *addr, bool native_io)
{
	u32 id;

	if (native_io)
		id = __raw_readl(addr + MACB_MID);
	else
		id = readl_relaxed(addr + MACB_MID);

	return MACB_BFEXT(IDNUM, id) >= 0x2;
}

271
static void macb_set_hwaddr(struct macb *bp)
272
273
274
275
276
{
	u32 bottom;
	u16 top;

	bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
277
	macb_or_gem_writel(bp, SA1B, bottom);
278
	top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
279
	macb_or_gem_writel(bp, SA1T, top);
280
281
282
283
284
285
286
287

	/* Clear unused address register sets */
	macb_or_gem_writel(bp, SA2B, 0);
	macb_or_gem_writel(bp, SA2T, 0);
	macb_or_gem_writel(bp, SA3B, 0);
	macb_or_gem_writel(bp, SA3T, 0);
	macb_or_gem_writel(bp, SA4B, 0);
	macb_or_gem_writel(bp, SA4T, 0);
288
289
}

290
static void macb_get_hwaddr(struct macb *bp)
291
292
293
294
{
	u32 bottom;
	u16 top;
	u8 addr[6];
295
296
	int i;

297
	/* Check all 4 address register for valid address */
298
299
300
301
	for (i = 0; i < 4; i++) {
		bottom = macb_or_gem_readl(bp, SA1B + i * 8);
		top = macb_or_gem_readl(bp, SA1T + i * 8);

302
303
304
305
306
307
		addr[0] = bottom & 0xff;
		addr[1] = (bottom >> 8) & 0xff;
		addr[2] = (bottom >> 16) & 0xff;
		addr[3] = (bottom >> 24) & 0xff;
		addr[4] = top & 0xff;
		addr[5] = (top >> 8) & 0xff;
308
309
310
311
312

		if (is_valid_ether_addr(addr)) {
			memcpy(bp->dev->dev_addr, addr, sizeof(addr));
			return;
		}
313
	}
314

315
	dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
316
	eth_hw_addr_random(bp->dev);
317
318
}

319
320
321
322
323
324
325
326
static int macb_mdio_wait_for_idle(struct macb *bp)
{
	u32 val;

	return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE),
				  1, MACB_MDIO_TIMEOUT);
}

frederic RODO's avatar
frederic RODO committed
327
static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
328
{
frederic RODO's avatar
frederic RODO committed
329
	struct macb *bp = bus->priv;
330
	int status;
331

332
333
334
335
336
337
338
	status = pm_runtime_get_sync(&bp->pdev->dev);
	if (status < 0)
		goto mdio_pm_exit;

	status = macb_mdio_wait_for_idle(bp);
	if (status < 0)
		goto mdio_read_exit;
339
340
341

	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
			      | MACB_BF(RW, MACB_MAN_READ)
frederic RODO's avatar
frederic RODO committed
342
343
			      | MACB_BF(PHYA, mii_id)
			      | MACB_BF(REGA, regnum)
344
345
			      | MACB_BF(CODE, MACB_MAN_CODE)));

346
347
348
	status = macb_mdio_wait_for_idle(bp);
	if (status < 0)
		goto mdio_read_exit;
349

350
	status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
351

352
353
354
355
356
mdio_read_exit:
	pm_runtime_mark_last_busy(&bp->pdev->dev);
	pm_runtime_put_autosuspend(&bp->pdev->dev);
mdio_pm_exit:
	return status;
357
358
}

frederic RODO's avatar
frederic RODO committed
359
360
static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
			   u16 value)
361
{
frederic RODO's avatar
frederic RODO committed
362
	struct macb *bp = bus->priv;
363
	int status;
364

365
366
367
368
369
370
371
	status = pm_runtime_get_sync(&bp->pdev->dev);
	if (status < 0)
		goto mdio_pm_exit;

	status = macb_mdio_wait_for_idle(bp);
	if (status < 0)
		goto mdio_write_exit;
372
373
374

	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
			      | MACB_BF(RW, MACB_MAN_WRITE)
frederic RODO's avatar
frederic RODO committed
375
376
			      | MACB_BF(PHYA, mii_id)
			      | MACB_BF(REGA, regnum)
377
			      | MACB_BF(CODE, MACB_MAN_CODE)
frederic RODO's avatar
frederic RODO committed
378
			      | MACB_BF(DATA, value)));
379

380
381
382
	status = macb_mdio_wait_for_idle(bp);
	if (status < 0)
		goto mdio_write_exit;
frederic RODO's avatar
frederic RODO committed
383

384
385
386
387
388
mdio_write_exit:
	pm_runtime_mark_last_busy(&bp->pdev->dev);
	pm_runtime_put_autosuspend(&bp->pdev->dev);
mdio_pm_exit:
	return status;
frederic RODO's avatar
frederic RODO committed
389
}
390

391
392
393
394
395
396
397
398
399
400
/**
 * macb_set_tx_clk() - Set a clock to a new frequency
 * @clk		Pointer to the clock to change
 * @rate	New frequency in Hz
 * @dev		Pointer to the struct net_device
 */
static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
{
	long ferr, rate, rate_rounded;

401
402
403
	if (!clk)
		return;

404
405
406
407
408
409
410
411
412
413
414
	switch (speed) {
	case SPEED_10:
		rate = 2500000;
		break;
	case SPEED_100:
		rate = 25000000;
		break;
	case SPEED_1000:
		rate = 125000000;
		break;
	default:
Soren Brinkmann's avatar
Soren Brinkmann committed
415
		return;
416
417
418
419
420
421
422
423
424
425
426
427
428
	}

	rate_rounded = clk_round_rate(clk, rate);
	if (rate_rounded < 0)
		return;

	/* RGMII allows 50 ppm frequency error. Test and warn if this limit
	 * is not satisfied.
	 */
	ferr = abs(rate_rounded - rate);
	ferr = DIV_ROUND_UP(ferr, rate / 100000);
	if (ferr > 5)
		netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
429
			    rate);
430
431
432
433
434

	if (clk_set_rate(clk, rate_rounded))
		netdev_err(dev, "adjusting tx_clk failed.\n");
}

frederic RODO's avatar
frederic RODO committed
435
static void macb_handle_link_change(struct net_device *dev)
436
{
frederic RODO's avatar
frederic RODO committed
437
	struct macb *bp = netdev_priv(dev);
438
	struct phy_device *phydev = dev->phydev;
frederic RODO's avatar
frederic RODO committed
439
440
	unsigned long flags;
	int status_change = 0;
441

frederic RODO's avatar
frederic RODO committed
442
443
444
445
446
447
448
449
450
	spin_lock_irqsave(&bp->lock, flags);

	if (phydev->link) {
		if ((bp->speed != phydev->speed) ||
		    (bp->duplex != phydev->duplex)) {
			u32 reg;

			reg = macb_readl(bp, NCFGR);
			reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
451
452
			if (macb_is_gem(bp))
				reg &= ~GEM_BIT(GBE);
frederic RODO's avatar
frederic RODO committed
453
454
455

			if (phydev->duplex)
				reg |= MACB_BIT(FD);
Atsushi Nemoto's avatar
Atsushi Nemoto committed
456
			if (phydev->speed == SPEED_100)
frederic RODO's avatar
frederic RODO committed
457
				reg |= MACB_BIT(SPD);
458
459
			if (phydev->speed == SPEED_1000 &&
			    bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
460
				reg |= GEM_BIT(GBE);
frederic RODO's avatar
frederic RODO committed
461

462
			macb_or_gem_writel(bp, NCFGR, reg);
frederic RODO's avatar
frederic RODO committed
463
464
465
466
467

			bp->speed = phydev->speed;
			bp->duplex = phydev->duplex;
			status_change = 1;
		}
468
469
	}

frederic RODO's avatar
frederic RODO committed
470
	if (phydev->link != bp->link) {
471
		if (!phydev->link) {
frederic RODO's avatar
frederic RODO committed
472
473
474
475
			bp->speed = 0;
			bp->duplex = -1;
		}
		bp->link = phydev->link;
476

frederic RODO's avatar
frederic RODO committed
477
478
		status_change = 1;
	}
479

frederic RODO's avatar
frederic RODO committed
480
481
482
	spin_unlock_irqrestore(&bp->lock, flags);

	if (status_change) {
483
		if (phydev->link) {
484
485
486
487
488
			/* Update the TX clock rate if and only if the link is
			 * up and there has been a link change.
			 */
			macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);

489
			netif_carrier_on(dev);
490
491
492
493
			netdev_info(dev, "link up (%d/%s)\n",
				    phydev->speed,
				    phydev->duplex == DUPLEX_FULL ?
				    "Full" : "Half");
494
495
		} else {
			netif_carrier_off(dev);
496
			netdev_info(dev, "link down\n");
497
		}
frederic RODO's avatar
frederic RODO committed
498
	}
499
500
}

frederic RODO's avatar
frederic RODO committed
501
502
/* based on au1000_eth. c*/
static int macb_mii_probe(struct net_device *dev)
503
{
frederic RODO's avatar
frederic RODO committed
504
	struct macb *bp = netdev_priv(dev);
505
	struct phy_device *phydev;
506
	struct device_node *np;
507
	int ret, i;
508
509
510
511
512
513
514
515

	np = bp->pdev->dev.of_node;
	ret = 0;

	if (np) {
		if (of_phy_is_fixed_link(np)) {
			bp->phy_node = of_node_get(np);
		} else {
516
517
518
519
			bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
			/* fallback to standard phy registration if no
			 * phy-handle was found nor any phy found during
			 * dt phy registration
520
			 */
521
			if (!bp->phy_node && !phy_find_first(bp->mii_bus)) {
522
523
524
525
526
527
528
529
530
531
532
533
534
535
				for (i = 0; i < PHY_MAX_ADDR; i++) {
					phydev = mdiobus_scan(bp->mii_bus, i);
					if (IS_ERR(phydev) &&
					    PTR_ERR(phydev) != -ENODEV) {
						ret = PTR_ERR(phydev);
						break;
					}
				}

				if (ret)
					return -ENODEV;
			}
		}
	}
frederic RODO's avatar
frederic RODO committed
536

537
538
539
540
541
542
543
544
545
546
547
548
	if (bp->phy_node) {
		phydev = of_phy_connect(dev, bp->phy_node,
					&macb_handle_link_change, 0,
					bp->phy_interface);
		if (!phydev)
			return -ENODEV;
	} else {
		phydev = phy_find_first(bp->mii_bus);
		if (!phydev) {
			netdev_err(dev, "no PHY found\n");
			return -ENXIO;
		}
frederic RODO's avatar
frederic RODO committed
549

550
551
552
553
554
555
556
		/* attach the mac to the phy */
		ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
					 bp->phy_interface);
		if (ret) {
			netdev_err(dev, "Could not attach to PHY\n");
			return ret;
		}
frederic RODO's avatar
frederic RODO committed
557
558
559
	}

	/* mask with MAC supported features */
560
	if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
561
		phy_set_max_speed(phydev, SPEED_1000);
562
	else
563
		phy_set_max_speed(phydev, SPEED_100);
frederic RODO's avatar
frederic RODO committed
564

565
	if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
566
567
		phy_remove_link_mode(phydev,
				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
frederic RODO's avatar
frederic RODO committed
568
569
570
571
572
573

	bp->link = 0;
	bp->speed = 0;
	bp->duplex = -1;

	return 0;
574
575
}

576
static int macb_mii_init(struct macb *bp)
577
{
578
	struct device_node *np;
579
	int err = -ENXIO;
580

581
	/* Enable management port */
frederic RODO's avatar
frederic RODO committed
582
	macb_writel(bp, NCR, MACB_BIT(MPE));
583

584
	bp->mii_bus = mdiobus_alloc();
585
	if (!bp->mii_bus) {
586
587
588
589
590
591
592
		err = -ENOMEM;
		goto err_out;
	}

	bp->mii_bus->name = "MACB_mii_bus";
	bp->mii_bus->read = &macb_mdio_read;
	bp->mii_bus->write = &macb_mdio_write;
593
	snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
594
		 bp->pdev->name, bp->pdev->id);
595
	bp->mii_bus->priv = bp;
596
	bp->mii_bus->parent = &bp->pdev->dev;
597

598
	dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
599

600
	np = bp->pdev->dev.of_node;
601
602
603
604
605
606
607
608
609
610
611
	if (np && of_phy_is_fixed_link(np)) {
		if (of_phy_register_fixed_link(np) < 0) {
			dev_err(&bp->pdev->dev,
				"broken fixed-link specification %pOF\n", np);
			goto err_out_free_mdiobus;
		}

		err = mdiobus_register(bp->mii_bus);
	} else {
		err = of_mdiobus_register(bp->mii_bus, np);
	}
612

613
	if (err)
614
		goto err_out_free_fixed_link;
615

616
617
	err = macb_mii_probe(bp->dev);
	if (err)
frederic RODO's avatar
frederic RODO committed
618
		goto err_out_unregister_bus;
619

frederic RODO's avatar
frederic RODO committed
620
	return 0;
621

frederic RODO's avatar
frederic RODO committed
622
err_out_unregister_bus:
623
	mdiobus_unregister(bp->mii_bus);
624
err_out_free_fixed_link:
625
626
	if (np && of_phy_is_fixed_link(np))
		of_phy_deregister_fixed_link(np);
627
628
err_out_free_mdiobus:
	of_node_put(bp->phy_node);
629
	mdiobus_free(bp->mii_bus);
frederic RODO's avatar
frederic RODO committed
630
631
err_out:
	return err;
632
633
634
635
}

static void macb_update_stats(struct macb *bp)
{
636
637
	u32 *p = &bp->hw_stats.macb.rx_pause_frames;
	u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
638
	int offset = MACB_PFR;
639
640
641

	WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);

642
	for (; p < end; p++, offset += 4)
643
		*p += bp->macb_reg_readl(bp, offset);
644
645
}

646
static int macb_halt_tx(struct macb *bp)
647
{
648
649
	unsigned long	halt_time, timeout;
	u32		status;
650

651
	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
652

653
654
655
656
657
658
	timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
	do {
		halt_time = jiffies;
		status = macb_readl(bp, TSR);
		if (!(status & MACB_BIT(TGO)))
			return 0;
659

660
		udelay(250);
661
	} while (time_before(halt_time, timeout));
662

663
664
	return -ETIMEDOUT;
}
665

666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
{
	if (tx_skb->mapping) {
		if (tx_skb->mapped_as_page)
			dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
				       tx_skb->size, DMA_TO_DEVICE);
		else
			dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
					 tx_skb->size, DMA_TO_DEVICE);
		tx_skb->mapping = 0;
	}

	if (tx_skb->skb) {
		dev_kfree_skb_any(tx_skb->skb);
		tx_skb->skb = NULL;
	}
}

684
static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
685
686
{
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
687
688
	struct macb_dma_desc_64 *desc_64;

689
	if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
690
691
		desc_64 = macb_64b_desc(bp, desc);
		desc_64->addrh = upper_32_bits(addr);
692
693
694
695
696
		/* The low bits of RX address contain the RX_USED bit, clearing
		 * of which allows packet RX. Make sure the high bits are also
		 * visible to HW at that point.
		 */
		dma_wmb();
697
	}
698
#endif
699
700
701
702
703
704
705
706
707
	desc->addr = lower_32_bits(addr);
}

static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
{
	dma_addr_t addr = 0;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
	struct macb_dma_desc_64 *desc_64;

708
	if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
709
710
711
712
713
714
		desc_64 = macb_64b_desc(bp, desc);
		addr = ((u64)(desc_64->addrh) << 32);
	}
#endif
	addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
	return addr;
715
716
}

717
718
static void macb_tx_error_task(struct work_struct *work)
{
719
720
721
	struct macb_queue	*queue = container_of(work, struct macb_queue,
						      tx_error_task);
	struct macb		*bp = queue->bp;
722
	struct macb_tx_skb	*tx_skb;
723
	struct macb_dma_desc	*desc;
724
725
	struct sk_buff		*skb;
	unsigned int		tail;
726
727
728
729
730
	unsigned long		flags;

	netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
		    (unsigned int)(queue - bp->queues),
		    queue->tx_tail, queue->tx_head);
731

732
733
734
735
736
737
738
	/* Prevent the queue IRQ handlers from running: each of them may call
	 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
	 * As explained below, we have to halt the transmission before updating
	 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
	 * network engine about the macb/gem being halted.
	 */
	spin_lock_irqsave(&bp->lock, flags);
739

740
	/* Make sure nobody is trying to queue up new packets */
741
	netif_tx_stop_all_queues(bp->dev);
742

743
	/* Stop transmission now
744
	 * (in case we have just queued new packets)
745
	 * macb/gem must be halted to write TBQP register
746
747
748
749
	 */
	if (macb_halt_tx(bp))
		/* Just complain for now, reinitializing TX path can be good */
		netdev_err(bp->dev, "BUG: halt tx timed out\n");
750

751
	/* Treat frames in TX queue including the ones that caused the error.
752
753
	 * Free transmit buffers in upper layer.
	 */
754
755
	for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
		u32	ctrl;
756

757
		desc = macb_tx_desc(queue, tail);
758
		ctrl = desc->ctrl;
759
		tx_skb = macb_tx_skb(queue, tail);
760
		skb = tx_skb->skb;
761

762
		if (ctrl & MACB_BIT(TX_USED)) {
763
764
765
766
			/* skb is set for the last buffer of the frame */
			while (!skb) {
				macb_tx_unmap(bp, tx_skb);
				tail++;
767
				tx_skb = macb_tx_skb(queue, tail);
768
769
770
771
772
773
774
775
				skb = tx_skb->skb;
			}

			/* ctrl still refers to the first buffer descriptor
			 * since it's the only one written back by the hardware
			 */
			if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
				netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
776
777
					    macb_tx_ring_wrap(bp, tail),
					    skb->data);
778
				bp->dev->stats.tx_packets++;
779
				queue->stats.tx_packets++;
780
				bp->dev->stats.tx_bytes += skb->len;
781
				queue->stats.tx_bytes += skb->len;
782
			}
783
		} else {
784
785
786
			/* "Buffers exhausted mid-frame" errors may only happen
			 * if the driver is buggy, so complain loudly about
			 * those. Statistics are updated by hardware.
787
788
789
790
			 */
			if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
				netdev_err(bp->dev,
					   "BUG: TX buffers exhausted mid-frame\n");
791

792
793
794
			desc->ctrl = ctrl | MACB_BIT(TX_USED);
		}

795
		macb_tx_unmap(bp, tx_skb);
796
797
	}

798
799
	/* Set end of TX queue */
	desc = macb_tx_desc(queue, 0);
800
	macb_set_addr(bp, desc, 0);
801
802
	desc->ctrl = MACB_BIT(TX_USED);

803
804
805
806
	/* Make descriptor updates visible to hardware */
	wmb();

	/* Reinitialize the TX desc queue */
807
	queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
808
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
809
	if (bp->hw_dma_cap & HW_DMA_CAP_64B)
810
		queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
811
#endif
812
	/* Make TX ring reflect state of hardware */
813
814
	queue->tx_head = 0;
	queue->tx_tail = 0;
815
816
817

	/* Housework before enabling TX IRQ */
	macb_writel(bp, TSR, macb_readl(bp, TSR));
818
819
820
821
822
823
824
	queue_writel(queue, IER, MACB_TX_INT_FLAGS);

	/* Now we are ready to start transmission again */
	netif_tx_start_all_queues(bp->dev);
	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));

	spin_unlock_irqrestore(&bp->lock, flags);
825
826
}

827
static void macb_tx_interrupt(struct macb_queue *queue)
828
829
830
831
{
	unsigned int tail;
	unsigned int head;
	u32 status;
832
833
	struct macb *bp = queue->bp;
	u16 queue_index = queue - bp->queues;
834
835
836
837

	status = macb_readl(bp, TSR);
	macb_writel(bp, TSR, status);

838
	if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
839
		queue_writel(queue, ISR, MACB_BIT(TCOMP));
840

841
	netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
842
		    (unsigned long)status);
843

844
845
	head = queue->tx_head;
	for (tail = queue->tx_tail; tail != head; tail++) {
846
847
848
849
		struct macb_tx_skb	*tx_skb;
		struct sk_buff		*skb;
		struct macb_dma_desc	*desc;
		u32			ctrl;
850

851
		desc = macb_tx_desc(queue, tail);
852

853
		/* Make hw descriptor updates visible to CPU */
854
		rmb();
855

856
		ctrl = desc->ctrl;
857

858
859
860
		/* TX_USED bit is only set by hardware on the very first buffer
		 * descriptor of the transmitted frame.
		 */
861
		if (!(ctrl & MACB_BIT(TX_USED)))
862
863
			break;

864
865
		/* Process all buffers of the current transmitted frame */
		for (;; tail++) {
866
			tx_skb = macb_tx_skb(queue, tail);
867
868
869
870
			skb = tx_skb->skb;

			/* First, update TX stats if needed */
			if (skb) {
871
872
873
				if (unlikely(skb_shinfo(skb)->tx_flags &
					     SKBTX_HW_TSTAMP) &&
				    gem_ptp_do_txstamp(queue, skb, desc) == 0) {
874
875
876
877
878
					/* skb now belongs to timestamp buffer
					 * and will be removed later
					 */
					tx_skb->skb = NULL;
				}
879
				netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
880
881
					    macb_tx_ring_wrap(bp, tail),
					    skb->data);
882
				bp->dev->stats.tx_packets++;
883
				queue->stats.tx_packets++;
884
				bp->dev->stats.tx_bytes += skb->len;
885
				queue->stats.tx_bytes += skb->len;
886
			}
887

888
889
890
891
892
893
894
895
896
897
			/* Now we can safely release resources */
			macb_tx_unmap(bp, tx_skb);

			/* skb is set only for the last buffer of the frame.
			 * WARNING: at this point skb has been freed by
			 * macb_tx_unmap().
			 */
			if (skb)
				break;
		}
898
899
	}

900
901
902
	queue->tx_tail = tail;
	if (__netif_subqueue_stopped(bp->dev, queue_index) &&
	    CIRC_CNT(queue->tx_head, queue->tx_tail,
903
		     bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
904
		netif_wake_subqueue(bp->dev, queue_index);
905
906
}

907
static void gem_rx_refill(struct macb_queue *queue)
908
909
910
911
{
	unsigned int		entry;
	struct sk_buff		*skb;
	dma_addr_t		paddr;
912
	struct macb *bp = queue->bp;
913
	struct macb_dma_desc *desc;
914

915
916
917
	while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
			bp->rx_ring_size) > 0) {
		entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
918
919
920
921

		/* Make hw descriptor updates visible to CPU */
		rmb();

922
923
		queue->rx_prepared_head++;
		desc = macb_rx_desc(queue, entry);
924

925
		if (!queue->rx_skbuff[entry]) {
926
927
			/* allocate sk_buff for this free entry in ring */
			skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
928
			if (unlikely(!skb)) {
929
930
931
932
933
934
935
				netdev_err(bp->dev,
					   "Unable to allocate sk_buff\n");
				break;
			}

			/* now fill corresponding descriptor entry */
			paddr = dma_map_single(&bp->pdev->dev, skb->data,
936
937
					       bp->rx_buffer_size,
					       DMA_FROM_DEVICE);
938
939
940
941
942
			if (dma_mapping_error(&bp->pdev->dev, paddr)) {
				dev_kfree_skb(skb);
				break;
			}

943
			queue->rx_skbuff[entry] = skb;
944

945
			if (entry == bp->rx_ring_size - 1)
946
				paddr |= MACB_BIT(RX_WRAP);
947
			desc->ctrl = 0;
948
949
950
951
952
			/* Setting addr clears RX_USED and allows reception,
			 * make sure ctrl is cleared first to avoid a race.
			 */
			dma_wmb();
			macb_set_addr(bp, desc, paddr);
953
954
955

			/* properly align Ethernet header */
			skb_reserve(skb, NET_IP_ALIGN);
956
		} else {
957
			desc->ctrl = 0;
958
959
			dma_wmb();
			desc->addr &= ~MACB_BIT(RX_USED);
960
961
962
963
964
965
		}
	}

	/* Make descriptor updates visible to hardware */
	wmb();

966
967
	netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
			queue, queue->rx_prepared_head, queue->rx_tail);
968
969
970
}

/* Mark DMA descriptors from begin up to and not including end as unused */
971
static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
972
973
974
975
976
				  unsigned int end)
{
	unsigned int frag;

	for (frag = begin; frag != end; frag++) {
977
		struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
978

979
980
981
982
983
984
		desc->addr &= ~MACB_BIT(RX_USED);
	}

	/* Make descriptor updates visible to hardware */
	wmb();

985
	/* When this happens, the hardware stats registers for
986
987
988
989
990
	 * whatever caused this is updated, so we don't have to record
	 * anything.
	 */
}

Antoine Tenart's avatar
Antoine Tenart committed
991
992
static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
		  int budget)
993
{
994
	struct macb *bp = queue->bp;
995
996
997
998
999
1000
	unsigned int		len;
	unsigned int		entry;
	struct sk_buff		*skb;
	struct macb_dma_desc	*desc;
	int			count = 0;