gianfar.c 95.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
Jan Ceuleers's avatar
Jan Ceuleers committed
2
/* drivers/net/ethernet/freescale/gianfar.c
Linus Torvalds's avatar
Linus Torvalds committed
3
4
 *
 * Gianfar Ethernet Driver
5
6
 * This driver is designed for the non-CPM ethernet controllers
 * on the 85xx and 83xx family of integrated processors
Linus Torvalds's avatar
Linus Torvalds committed
7
8
9
 * Based on 8260_io/fcc_enet.c
 *
 * Author: Andy Fleming
10
 * Maintainer: Kumar Gala
11
 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds's avatar
Linus Torvalds committed
12
 *
13
 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
14
 * Copyright 2007 MontaVista Software, Inc.
Linus Torvalds's avatar
Linus Torvalds committed
15
16
17
18
19
20
21
22
 *
 *  Gianfar:  AKA Lambda Draconis, "Dragon"
 *  RA 11 31 24.2
 *  Dec +69 19 52
 *  V 3.84
 *  B-V +1.62
 *
 *  Theory of operation
23
 *
24
25
 *  The driver is initialized through of_device. Configuration information
 *  is therefore conveyed through an OF-style device tree.
Linus Torvalds's avatar
Linus Torvalds committed
26
27
28
 *
 *  The Gianfar Ethernet Controller uses a ring of buffer
 *  descriptors.  The beginning is indicated by a register
29
30
 *  pointing to the physical address of the start of the ring.
 *  The end is determined by a "wrap" bit being set in the
Linus Torvalds's avatar
Linus Torvalds committed
31
32
33
 *  last descriptor of the ring.
 *
 *  When a packet is received, the RXF bit in the
34
 *  IEVENT register is set, triggering an interrupt when the
Linus Torvalds's avatar
Linus Torvalds committed
35
36
37
 *  corresponding bit in the IMASK register is also set (if
 *  interrupt coalescing is active, then the interrupt may not
 *  happen immediately, but will wait until either a set number
38
 *  of frames or amount of time have passed).  In NAPI, the
Linus Torvalds's avatar
Linus Torvalds committed
39
 *  interrupt handler will signal there is work to be done, and
40
 *  exit. This method will start at the last known empty
41
 *  descriptor, and process every subsequent descriptor until there
Linus Torvalds's avatar
Linus Torvalds committed
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
 *  are none left with data (NAPI will stop after a set number of
 *  packets to give time to other tasks, but will eventually
 *  process all the packets).  The data arrives inside a
 *  pre-allocated skb, and so after the skb is passed up to the
 *  stack, a new skb must be allocated, and the address field in
 *  the buffer descriptor must be updated to indicate this new
 *  skb.
 *
 *  When the kernel requests that a packet be transmitted, the
 *  driver starts where it left off last time, and points the
 *  descriptor at the buffer which was passed in.  The driver
 *  then informs the DMA engine that there are packets ready to
 *  be transmitted.  Once the controller is finished transmitting
 *  the packet, an interrupt may be triggered (under the same
 *  conditions as for reception, but depending on the TXF bit).
 *  The driver then cleans up the buffer.
 */

60
61
62
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DEBUG

Linus Torvalds's avatar
Linus Torvalds committed
63
64
65
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
66
#include <linux/unistd.h>
Linus Torvalds's avatar
Linus Torvalds committed
67
68
69
70
71
72
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
73
#include <linux/if_vlan.h>
Linus Torvalds's avatar
Linus Torvalds committed
74
75
#include <linux/spinlock.h>
#include <linux/mm.h>
76
77
#include <linux/of_address.h>
#include <linux/of_irq.h>
78
#include <linux/of_mdio.h>
79
#include <linux/of_platform.h>
80
81
82
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
Kumar Gala's avatar
Kumar Gala committed
83
#include <linux/in.h>
84
#include <linux/net_tstamp.h>
Linus Torvalds's avatar
Linus Torvalds committed
85
86

#include <asm/io.h>
87
#ifdef CONFIG_PPC
88
#include <asm/reg.h>
89
#include <asm/mpc85xx.h>
90
#endif
Linus Torvalds's avatar
Linus Torvalds committed
91
#include <asm/irq.h>
92
#include <linux/uaccess.h>
Linus Torvalds's avatar
Linus Torvalds committed
93
94
95
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/crc32.h>
96
97
#include <linux/mii.h>
#include <linux/phy.h>
98
99
#include <linux/phy_fixed.h>
#include <linux/of.h>
100
#include <linux/of_net.h>
Linus Torvalds's avatar
Linus Torvalds committed
101
102
103

#include "gianfar.h"

104
#define TX_TIMEOUT      (5*HZ)
Linus Torvalds's avatar
Linus Torvalds committed
105

106
const char gfar_driver_version[] = "2.0";
Linus Torvalds's avatar
Linus Torvalds committed
107
108
109
110
111

MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
MODULE_LICENSE("GPL");

112
static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
113
114
115
116
			    dma_addr_t buf)
{
	u32 lstatus;

117
	bdp->bufPtr = cpu_to_be32(buf);
118
119

	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
120
	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
121
122
		lstatus |= BD_LFLAG(RXBD_WRAP);

123
	gfar_wmb();
124

125
	bdp->lstatus = cpu_to_be32(lstatus);
126
127
}

128
129
static void gfar_init_tx_rx_base(struct gfar_private *priv)
{
130
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
131
	u32 __iomem *baddr;
132
133
134
	int i;

	baddr = &regs->tbase0;
135
	for (i = 0; i < priv->num_tx_queues; i++) {
136
		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
137
		baddr += 2;
138
139
140
	}

	baddr = &regs->rbase0;
141
	for (i = 0; i < priv->num_rx_queues; i++) {
142
		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
143
		baddr += 2;
144
145
146
	}
}

147
148
149
150
151
152
153
154
155
156
157
158
159
160
static void gfar_init_rqprm(struct gfar_private *priv)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	u32 __iomem *baddr;
	int i;

	baddr = &regs->rqprm0;
	for (i = 0; i < priv->num_rx_queues; i++) {
		gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
			   (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
		baddr++;
	}
}

161
static void gfar_rx_offload_en(struct gfar_private *priv)
162
{
163
164
165
	/* set this when rx hw offload (TOE) functions are being used */
	priv->uses_rxfcb = 0;

166
167
168
	if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
		priv->uses_rxfcb = 1;

169
	if (priv->hwts_rx_en || priv->rx_filer_enable)
170
171
172
173
174
175
176
177
		priv->uses_rxfcb = 1;
}

static void gfar_mac_rx_config(struct gfar_private *priv)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	u32 rctrl = 0;

Sandeep Gopalpet's avatar
Sandeep Gopalpet committed
178
	if (priv->rx_filer_enable) {
179
		rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
Sandeep Gopalpet's avatar
Sandeep Gopalpet committed
180
		/* Program the RIR0 reg with the required distribution */
181
182
183
184
		if (priv->poll_mode == GFAR_SQ_POLLING)
			gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
		else /* GFAR_MQ_POLLING */
			gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
Sandeep Gopalpet's avatar
Sandeep Gopalpet committed
185
	}
186

187
	/* Restore PROMISC mode */
188
	if (priv->ndev->flags & IFF_PROMISC)
189
190
		rctrl |= RCTRL_PROM;

191
	if (priv->ndev->features & NETIF_F_RXCSUM)
192
193
		rctrl |= RCTRL_CHECKSUMMING;

194
195
	if (priv->extended_hash)
		rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
196
197
198
199
200
201

	if (priv->padding) {
		rctrl &= ~RCTRL_PAL_MASK;
		rctrl |= RCTRL_PADDING(priv->padding);
	}

202
	/* Enable HW time stamping if requested from user space */
203
	if (priv->hwts_rx_en)
204
205
		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;

206
	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
207
		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
208

209
210
211
212
213
214
215
	/* Clear the LFC bit */
	gfar_write(&regs->rctrl, rctrl);
	/* Init flow control threshold values */
	gfar_init_rqprm(priv);
	gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
	rctrl |= RCTRL_LFC;

216
217
	/* Init rctrl based on our settings */
	gfar_write(&regs->rctrl, rctrl);
218
}
219

220
221
222
223
224
225
static void gfar_mac_tx_config(struct gfar_private *priv)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	u32 tctrl = 0;

	if (priv->ndev->features & NETIF_F_IP_CSUM)
226
227
		tctrl |= TCTRL_INIT_CSUM;

228
229
230
231
232
233
234
	if (priv->prio_sched_en)
		tctrl |= TCTRL_TXSCHED_PRIO;
	else {
		tctrl |= TCTRL_TXSCHED_WRRS;
		gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
		gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
	}
235

236
237
238
	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
		tctrl |= TCTRL_VLINS;

239
240
241
	gfar_write(&regs->tctrl, tctrl);
}

242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
static void gfar_configure_coalescing(struct gfar_private *priv,
			       unsigned long tx_mask, unsigned long rx_mask)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	u32 __iomem *baddr;

	if (priv->mode == MQ_MG_MODE) {
		int i = 0;

		baddr = &regs->txic0;
		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
			gfar_write(baddr + i, 0);
			if (likely(priv->tx_queue[i]->txcoalescing))
				gfar_write(baddr + i, priv->tx_queue[i]->txic);
		}

		baddr = &regs->rxic0;
		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
			gfar_write(baddr + i, 0);
			if (likely(priv->rx_queue[i]->rxcoalescing))
				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
		}
	} else {
		/* Backward compatible case -- even if we enable
		 * multiple queues, there's only single reg to program
		 */
		gfar_write(&regs->txic, 0);
		if (likely(priv->tx_queue[0]->txcoalescing))
			gfar_write(&regs->txic, priv->tx_queue[0]->txic);

		gfar_write(&regs->rxic, 0);
		if (unlikely(priv->rx_queue[0]->rxcoalescing))
			gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
	}
}

278
static void gfar_configure_coalescing_all(struct gfar_private *priv)
279
280
281
282
{
	gfar_configure_coalescing(priv, 0xFF, 0xFF);
}

Sandeep Gopalpet's avatar
Sandeep Gopalpet committed
283
284
285
286
287
static struct net_device_stats *gfar_get_stats(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
	unsigned long tx_packets = 0, tx_bytes = 0;
288
	int i;
Sandeep Gopalpet's avatar
Sandeep Gopalpet committed
289
290
291

	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_packets += priv->rx_queue[i]->stats.rx_packets;
292
		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
Sandeep Gopalpet's avatar
Sandeep Gopalpet committed
293
294
295
296
		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
	}

	dev->stats.rx_packets = rx_packets;
297
	dev->stats.rx_bytes   = rx_bytes;
Sandeep Gopalpet's avatar
Sandeep Gopalpet committed
298
299
300
	dev->stats.rx_dropped = rx_dropped;

	for (i = 0; i < priv->num_tx_queues; i++) {
301
302
		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
		tx_packets += priv->tx_queue[i]->stats.tx_packets;
Sandeep Gopalpet's avatar
Sandeep Gopalpet committed
303
304
	}

305
	dev->stats.tx_bytes   = tx_bytes;
Sandeep Gopalpet's avatar
Sandeep Gopalpet committed
306
307
308
309
310
	dev->stats.tx_packets = tx_packets;

	return &dev->stats;
}

311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
/* Set the appropriate hash bit for the given addr */
/* The algorithm works like so:
 * 1) Take the Destination Address (ie the multicast address), and
 * do a CRC on it (little endian), and reverse the bits of the
 * result.
 * 2) Use the 8 most significant bits as a hash into a 256-entry
 * table.  The table is controlled through 8 32-bit registers:
 * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
 * gaddr7.  This means that the 3 most significant bits in the
 * hash index which gaddr register to use, and the 5 other bits
 * indicate which bit (assuming an IBM numbering scheme, which
 * for PowerPC (tm) is usually the case) in the register holds
 * the entry.
 */
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
	u32 tempval;
	struct gfar_private *priv = netdev_priv(dev);
	u32 result = ether_crc(ETH_ALEN, addr);
	int width = priv->hash_width;
	u8 whichbit = (result >> (32 - width)) & 0x1f;
	u8 whichreg = result >> (32 - width + 5);
	u32 value = (1 << (31-whichbit));

	tempval = gfar_read(priv->hash_regs[whichreg]);
	tempval |= value;
	gfar_write(priv->hash_regs[whichreg], tempval);
}

/* There are multiple MAC Address register pairs on some controllers
 * This function sets the numth pair to a given address
 */
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
				  const u8 *addr)
{
	struct gfar_private *priv = netdev_priv(dev);
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	u32 tempval;
	u32 __iomem *macptr = &regs->macstnaddr1;

	macptr += num*2;

	/* For a station address of 0x12345678ABCD in transmission
	 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
	 * MACnADDR2 is set to 0x34120000.
	 */
	tempval = (addr[5] << 24) | (addr[4] << 16) |
		  (addr[3] << 8)  |  addr[2];

	gfar_write(macptr, tempval);

	tempval = (addr[1] << 24) | (addr[0] << 16);

	gfar_write(macptr+1, tempval);
}

367
368
369
370
371
372
373
374
375
static int gfar_set_mac_addr(struct net_device *dev, void *p)
{
	eth_mac_addr(dev, p);

	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);

	return 0;
}

376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
static void gfar_ints_disable(struct gfar_private *priv)
{
	int i;
	for (i = 0; i < priv->num_grps; i++) {
		struct gfar __iomem *regs = priv->gfargrp[i].regs;
		/* Clear IEVENT */
		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);

		/* Initialize IMASK */
		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
	}
}

static void gfar_ints_enable(struct gfar_private *priv)
{
	int i;
	for (i = 0; i < priv->num_grps; i++) {
		struct gfar __iomem *regs = priv->gfargrp[i].regs;
		/* Unmask the interrupts we look for */
		gfar_write(&regs->imask, IMASK_DEFAULT);
	}
}

399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
static int gfar_alloc_tx_queues(struct gfar_private *priv)
{
	int i;

	for (i = 0; i < priv->num_tx_queues; i++) {
		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
					    GFP_KERNEL);
		if (!priv->tx_queue[i])
			return -ENOMEM;

		priv->tx_queue[i]->tx_skbuff = NULL;
		priv->tx_queue[i]->qindex = i;
		priv->tx_queue[i]->dev = priv->ndev;
		spin_lock_init(&(priv->tx_queue[i]->txlock));
	}
	return 0;
}

static int gfar_alloc_rx_queues(struct gfar_private *priv)
{
	int i;

	for (i = 0; i < priv->num_rx_queues; i++) {
		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
					    GFP_KERNEL);
		if (!priv->rx_queue[i])
			return -ENOMEM;

		priv->rx_queue[i]->qindex = i;
428
		priv->rx_queue[i]->ndev = priv->ndev;
429
430
431
432
433
	}
	return 0;
}

static void gfar_free_tx_queues(struct gfar_private *priv)
434
{
435
	int i;
436
437
438
439
440

	for (i = 0; i < priv->num_tx_queues; i++)
		kfree(priv->tx_queue[i]);
}

441
static void gfar_free_rx_queues(struct gfar_private *priv)
442
{
443
	int i;
444
445
446
447
448

	for (i = 0; i < priv->num_rx_queues; i++)
		kfree(priv->rx_queue[i]);
}

449
450
static void unmap_group_regs(struct gfar_private *priv)
{
451
	int i;
452
453
454
455
456
457

	for (i = 0; i < MAXGROUPS; i++)
		if (priv->gfargrp[i].regs)
			iounmap(priv->gfargrp[i].regs);
}

458
459
460
461
462
463
464
465
466
467
468
469
470
static void free_gfar_dev(struct gfar_private *priv)
{
	int i, j;

	for (i = 0; i < priv->num_grps; i++)
		for (j = 0; j < GFAR_NUM_IRQS; j++) {
			kfree(priv->gfargrp[i].irqinfo[j]);
			priv->gfargrp[i].irqinfo[j] = NULL;
		}

	free_netdev(priv->ndev);
}

471
472
static void disable_napi(struct gfar_private *priv)
{
473
	int i;
474

475
476
477
478
	for (i = 0; i < priv->num_grps; i++) {
		napi_disable(&priv->gfargrp[i].napi_rx);
		napi_disable(&priv->gfargrp[i].napi_tx);
	}
479
480
481
482
}

static void enable_napi(struct gfar_private *priv)
{
483
	int i;
484

485
486
487
488
	for (i = 0; i < priv->num_grps; i++) {
		napi_enable(&priv->gfargrp[i].napi_rx);
		napi_enable(&priv->gfargrp[i].napi_tx);
	}
489
490
491
}

static int gfar_parse_group(struct device_node *np,
492
			    struct gfar_private *priv, const char *model)
493
{
494
	struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
495
496
	int i;

497
498
499
500
	for (i = 0; i < GFAR_NUM_IRQS; i++) {
		grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
					  GFP_KERNEL);
		if (!grp->irqinfo[i])
501
502
			return -ENOMEM;
	}
503

504
505
	grp->regs = of_iomap(np, 0);
	if (!grp->regs)
506
507
		return -ENOMEM;

508
	gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
509
510
511

	/* If we aren't the FEC we have multiple interrupts */
	if (model && strcasecmp(model, "FEC")) {
512
513
		gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
		gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
514
515
516
		if (!gfar_irq(grp, TX)->irq ||
		    !gfar_irq(grp, RX)->irq ||
		    !gfar_irq(grp, ER)->irq)
517
518
519
			return -EINVAL;
	}

520
521
	grp->priv = priv;
	spin_lock_init(&grp->grplock);
522
	if (priv->mode == MQ_MG_MODE) {
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
		u32 rxq_mask, txq_mask;
		int ret;

		grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
		grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);

		ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
		if (!ret) {
			grp->rx_bit_map = rxq_mask ?
			rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
		}

		ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
		if (!ret) {
			grp->tx_bit_map = txq_mask ?
			txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
		}
540
541
542
543
544
545

		if (priv->poll_mode == GFAR_SQ_POLLING) {
			/* One Q per interrupt group: Q0 to G0, Q1 to G1 */
			grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
			grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
		}
546
	} else {
547
548
		grp->rx_bit_map = 0xFF;
		grp->tx_bit_map = 0xFF;
549
	}
550
551
552
553
554
555
556
557
558
559
560

	/* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
	 * right to left, so we need to revert the 8 bits to get the q index
	 */
	grp->rx_bit_map = bitrev8(grp->rx_bit_map);
	grp->tx_bit_map = bitrev8(grp->tx_bit_map);

	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
	 * also assign queues to groups
	 */
	for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
561
562
		if (!grp->rx_queue)
			grp->rx_queue = priv->rx_queue[i];
563
564
565
566
567
568
569
		grp->num_rx_queues++;
		grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
		priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
		priv->rx_queue[i]->grp = grp;
	}

	for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
570
571
		if (!grp->tx_queue)
			grp->tx_queue = priv->tx_queue[i];
572
573
574
575
576
577
		grp->num_tx_queues++;
		grp->tstat |= (TSTAT_CLEAR_THALT >> i);
		priv->tqueue |= (TQUEUE_EN0 >> i);
		priv->tx_queue[i]->grp = grp;
	}

578
579
580
581
582
	priv->num_grps++;

	return 0;
}

583
584
585
586
587
588
static int gfar_of_group_count(struct device_node *np)
{
	struct device_node *child;
	int num = 0;

	for_each_available_child_of_node(np, child)
589
		if (of_node_name_eq(child, "queue-group"))
590
591
592
593
594
			num++;

	return num;
}

595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
/* Reads the controller's registers to determine what interface
 * connects it to the PHY.
 */
static phy_interface_t gfar_get_interface(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	u32 ecntrl;

	ecntrl = gfar_read(&regs->ecntrl);

	if (ecntrl & ECNTRL_SGMII_MODE)
		return PHY_INTERFACE_MODE_SGMII;

	if (ecntrl & ECNTRL_TBI_MODE) {
		if (ecntrl & ECNTRL_REDUCED_MODE)
			return PHY_INTERFACE_MODE_RTBI;
		else
			return PHY_INTERFACE_MODE_TBI;
	}

	if (ecntrl & ECNTRL_REDUCED_MODE) {
		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
			return PHY_INTERFACE_MODE_RMII;
		}
		else {
			phy_interface_t interface = priv->interface;

			/* This isn't autodetected right now, so it must
			 * be set by the device tree or platform code.
			 */
			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
				return PHY_INTERFACE_MODE_RGMII_ID;

			return PHY_INTERFACE_MODE_RGMII;
		}
	}

	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
		return PHY_INTERFACE_MODE_GMII;

	return PHY_INTERFACE_MODE_MII;
}

639
static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
640
641
642
{
	const char *model;
	const void *mac_addr;
643
644
645
	int err = 0, i;
	struct net_device *dev = NULL;
	struct gfar_private *priv = NULL;
646
	struct device_node *np = ofdev->dev.of_node;
647
	struct device_node *child = NULL;
648
649
	u32 stash_len = 0;
	u32 stash_idx = 0;
650
	unsigned int num_tx_qs, num_rx_qs;
651
	unsigned short mode, poll_mode;
652

653
	if (!np)
654
655
		return -ENODEV;

656
657
658
659
660
661
662
663
664
	if (of_device_is_compatible(np, "fsl,etsec2")) {
		mode = MQ_MG_MODE;
		poll_mode = GFAR_SQ_POLLING;
	} else {
		mode = SQ_SG_MODE;
		poll_mode = GFAR_SQ_POLLING;
	}

	if (mode == SQ_SG_MODE) {
665
666
667
		num_tx_qs = 1;
		num_rx_qs = 1;
	} else { /* MQ_MG_MODE */
668
		/* get the actual number of supported groups */
669
		unsigned int num_grps = gfar_of_group_count(np);
670
671
672
673
674
675
676
677

		if (num_grps == 0 || num_grps > MAXGROUPS) {
			dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
				num_grps);
			pr_err("Cannot do alloc_etherdev, aborting\n");
			return -EINVAL;
		}

678
		if (poll_mode == GFAR_SQ_POLLING) {
679
680
			num_tx_qs = num_grps; /* one txq per int group */
			num_rx_qs = num_grps; /* one rxq per int group */
681
		} else { /* GFAR_MQ_POLLING */
682
683
684
685
686
687
688
689
690
691
692
			u32 tx_queues, rx_queues;
			int ret;

			/* parse the num of HW tx and rx queues */
			ret = of_property_read_u32(np, "fsl,num_tx_queues",
						   &tx_queues);
			num_tx_qs = ret ? 1 : tx_queues;

			ret = of_property_read_u32(np, "fsl,num_rx_queues",
						   &rx_queues);
			num_rx_qs = ret ? 1 : rx_queues;
693
694
		}
	}
695
696

	if (num_tx_qs > MAX_TX_QS) {
697
698
699
		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
		       num_tx_qs, MAX_TX_QS);
		pr_err("Cannot do alloc_etherdev, aborting\n");
700
701
702
703
		return -EINVAL;
	}

	if (num_rx_qs > MAX_RX_QS) {
704
705
706
		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
		       num_rx_qs, MAX_RX_QS);
		pr_err("Cannot do alloc_etherdev, aborting\n");
707
708
709
710
711
712
713
714
715
716
717
		return -EINVAL;
	}

	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
	dev = *pdev;
	if (NULL == dev)
		return -ENOMEM;

	priv = netdev_priv(dev);
	priv->ndev = dev;

718
719
720
	priv->mode = mode;
	priv->poll_mode = poll_mode;

721
	priv->num_tx_queues = num_tx_qs;
722
	netif_set_real_num_rx_queues(dev, num_rx_qs);
723
	priv->num_rx_queues = num_rx_qs;
724
725
726
727
728
729
730
731

	err = gfar_alloc_tx_queues(priv);
	if (err)
		goto tx_alloc_failed;

	err = gfar_alloc_rx_queues(priv);
	if (err)
		goto rx_alloc_failed;
732

733
734
735
736
737
738
	err = of_property_read_string(np, "model", &model);
	if (err) {
		pr_err("Device model property missing, aborting\n");
		goto rx_alloc_failed;
	}

Jan Ceuleers's avatar
Jan Ceuleers committed
739
	/* Init Rx queue filer rule set linked list */
Sebastian Poehn's avatar
Sebastian Poehn committed
740
741
742
743
	INIT_LIST_HEAD(&priv->rx_list.list);
	priv->rx_list.count = 0;
	mutex_init(&priv->rx_queue_access);

744
745
	for (i = 0; i < MAXGROUPS; i++)
		priv->gfargrp[i].regs = NULL;
746

747
	/* Parse and initialize group specific information */
748
	if (priv->mode == MQ_MG_MODE) {
749
		for_each_available_child_of_node(np, child) {
750
			if (!of_node_name_eq(child, "queue-group"))
751
752
				continue;

753
754
755
			err = gfar_parse_group(child, priv, model);
			if (err)
				goto err_grp_init;
756
		}
757
	} else { /* SQ_SG_MODE */
758
		err = gfar_parse_group(np, priv, model);
759
		if (err)
760
			goto err_grp_init;
761
762
	}

763
	if (of_property_read_bool(np, "bd-stash")) {
Andy Fleming's avatar
Andy Fleming committed
764
765
766
767
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
		priv->bd_stash_en = 1;
	}

768
	err = of_property_read_u32(np, "rx-stash-len", &stash_len);
Andy Fleming's avatar
Andy Fleming committed
769

770
771
	if (err == 0)
		priv->rx_stash_size = stash_len;
Andy Fleming's avatar
Andy Fleming committed
772

773
	err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
Andy Fleming's avatar
Andy Fleming committed
774

775
776
	if (err == 0)
		priv->rx_stash_index = stash_idx;
Andy Fleming's avatar
Andy Fleming committed
777
778
779
780

	if (stash_len || stash_idx)
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;

781
	mac_addr = of_get_mac_address(np);
782

783
	if (!IS_ERR(mac_addr))
784
		ether_addr_copy(dev->dev_addr, mac_addr);
785
786

	if (model && !strcasecmp(model, "TSEC"))
787
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
788
789
790
791
				     FSL_GIANFAR_DEV_HAS_COALESCE |
				     FSL_GIANFAR_DEV_HAS_RMON |
				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;

792
	if (model && !strcasecmp(model, "eTSEC"))
793
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
794
795
796
797
798
799
800
				     FSL_GIANFAR_DEV_HAS_COALESCE |
				     FSL_GIANFAR_DEV_HAS_RMON |
				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
				     FSL_GIANFAR_DEV_HAS_CSUM |
				     FSL_GIANFAR_DEV_HAS_VLAN |
				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
801
802
				     FSL_GIANFAR_DEV_HAS_TIMER |
				     FSL_GIANFAR_DEV_HAS_RX_FILER;
803

804
805
806
807
808
809
810
	/* Use PHY connection type from the DT node if one is specified there.
	 * rgmii-id really needs to be specified. Other types can be
	 * detected by hardware
	 */
	err = of_get_phy_mode(np);
	if (err >= 0)
		priv->interface = err;
811
	else
812
		priv->interface = gfar_get_interface(dev);
813

814
	if (of_find_property(np, "fsl,magic-packet", NULL))
815
816
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;

817
818
819
	if (of_get_property(np, "fsl,wake-on-filer", NULL))
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;

820
	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
821

822
823
824
	/* In the case of a fixed PHY, the DT node associated
	 * to the PHY is the Ethernet MAC DT node.
	 */
825
	if (!priv->phy_node && of_phy_is_fixed_link(np)) {
826
827
828
829
		err = of_phy_register_fixed_link(np);
		if (err)
			goto err_grp_init;

830
		priv->phy_node = of_node_get(np);
831
832
	}

833
	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
834
	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
835
836
837

	return 0;

838
839
err_grp_init:
	unmap_group_regs(priv);
840
841
842
843
rx_alloc_failed:
	gfar_free_rx_queues(priv);
tx_alloc_failed:
	gfar_free_tx_queues(priv);
844
	free_gfar_dev(priv);
845
846
847
	return err;
}

848
849
static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
				   u32 class)
850
851
852
853
854
855
{
	u32 rqfpr = FPR_FILER_MASK;
	u32 rqfcr = 0x0;

	rqfar--;
	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
856
857
	priv->ftp_rqfpr[rqfar] = rqfpr;
	priv->ftp_rqfcr[rqfar] = rqfcr;
858
859
860
861
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_NOMATCH;
862
863
	priv->ftp_rqfpr[rqfar] = rqfpr;
	priv->ftp_rqfcr[rqfar] = rqfcr;
864
865
866
867
868
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
	rqfpr = class;
869
870
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
871
872
873
874
875
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
	rqfpr = class;
876
877
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
878
879
880
881
882
883
884
885
886
887
888
889
890
891
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	return rqfar;
}

static void gfar_init_filer_table(struct gfar_private *priv)
{
	int i = 0x0;
	u32 rqfar = MAX_FILER_IDX;
	u32 rqfcr = 0x0;
	u32 rqfpr = FPR_FILER_MASK;

	/* Default rule */
	rqfcr = RQFCR_CMP_MATCH;
892
893
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
894
895
896
897
898
899
900
901
902
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);

903
	/* cur_filer_idx indicated the first non-masked rule */
904
905
906
907
908
	priv->cur_filer_idx = rqfar;

	/* Rest are masked rules */
	rqfcr = RQFCR_CMP_NOMATCH;
	for (i = 0; i < rqfar; i++) {
909
910
		priv->ftp_rqfcr[i] = rqfcr;
		priv->ftp_rqfpr[i] = rqfpr;
911
912
913
914
		gfar_write_filer(priv, i, rqfcr, rqfpr);
	}
}

915
#ifdef CONFIG_PPC
916
static void __gfar_detect_errata_83xx(struct gfar_private *priv)
917
918
919
920
921
922
923
924
{
	unsigned int pvr = mfspr(SPRN_PVR);
	unsigned int svr = mfspr(SPRN_SVR);
	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
	unsigned int rev = svr & 0xffff;

	/* MPC8313 Rev 2.0 and higher; All MPC837x */
	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
925
	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
926
927
		priv->errata |= GFAR_ERRATA_74;

928
929
	/* MPC8313 and MPC837x all rev */
	if ((pvr == 0x80850010 && mod == 0x80b0) ||
930
	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
931
932
		priv->errata |= GFAR_ERRATA_76;

933
934
935
936
937
938
939
940
941
942
	/* MPC8313 Rev < 2.0 */
	if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
		priv->errata |= GFAR_ERRATA_12;
}

static void __gfar_detect_errata_85xx(struct gfar_private *priv)
{
	unsigned int svr = mfspr(SPRN_SVR);

	if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
943
		priv->errata |= GFAR_ERRATA_12;
944
	/* P2020/P1010 Rev 1; MPC8548 Rev 2 */
945
	if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
946
947
	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
	    ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
948
		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
949
}
950
#endif
951
952
953
954
955
956
957
958

static void gfar_detect_errata(struct gfar_private *priv)
{
	struct device *dev = &priv->ofdev->dev;

	/* no plans to fix */
	priv->errata |= GFAR_ERRATA_A002;

959
#ifdef CONFIG_PPC
960
961
962
963
	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
		__gfar_detect_errata_85xx(priv);
	else /* non-mpc85xx parts, i.e. e300 core based */
		__gfar_detect_errata_83xx(priv);
964
#endif
965

966
967
968
969
970
	if (priv->errata)
		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
			 priv->errata);
}

971
static void gfar_init_addr_hash_table(struct gfar_private *priv)
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;

	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
		priv->extended_hash = 1;
		priv->hash_width = 9;

		priv->hash_regs[0] = &regs->igaddr0;
		priv->hash_regs[1] = &regs->igaddr1;
		priv->hash_regs[2] = &regs->igaddr2;
		priv->hash_regs[3] = &regs->igaddr3;
		priv->hash_regs[4] = &regs->igaddr4;
		priv->hash_regs[5] = &regs->igaddr5;
		priv->hash_regs[6] = &regs->igaddr6;
		priv->hash_regs[7] = &regs->igaddr7;
		priv->hash_regs[8] = &regs->gaddr0;
		priv->hash_regs[9] = &regs->gaddr1;
		priv->hash_regs[10] = &regs->gaddr2;
		priv->hash_regs[11] = &regs->gaddr3;
		priv->hash_regs[12] = &regs->gaddr4;
		priv->hash_regs[13] = &regs->gaddr5;
		priv->hash_regs[14] = &regs->gaddr6;
		priv->hash_regs[15] = &regs->gaddr7;

	} else {
		priv->extended_hash = 0;
		priv->hash_width = 8;

		priv->hash_regs[0] = &regs->gaddr0;