sch_generic.c 32.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds's avatar
Linus Torvalds committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
/*
 * net/sched/sch_generic.c	Generic packet scheduler routines.
 *
 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601
 *              - Ingress support
 */

#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
23
#include <linux/slab.h>
24
#include <linux/if_vlan.h>
25
#include <linux/skb_array.h>
26
#include <linux/if_macvlan.h>
27
#include <net/sch_generic.h>
Linus Torvalds's avatar
Linus Torvalds committed
28
#include <net/pkt_sched.h>
Eric Dumazet's avatar
Eric Dumazet committed
29
#include <net/dst.h>
30
#include <trace/events/qdisc.h>
31
#include <trace/events/net.h>
32
#include <net/xfrm.h>
Linus Torvalds's avatar
Linus Torvalds committed
33

34
35
36
37
/* Qdisc to use by default */
const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
EXPORT_SYMBOL(default_qdisc_ops);

Linus Torvalds's avatar
Linus Torvalds committed
38
39
/* Main transmission queue. */

40
/* Modifications to data participating in scheduling must be protected with
41
 * qdisc_lock(qdisc) spinlock.
42
43
 *
 * The idea is the following:
44
45
 * - enqueue, dequeue are serialized via qdisc root lock
 * - ingress filtering is also serialized via qdisc root lock
46
 * - updates to tree and tree walking are only done under the rtnl mutex.
Linus Torvalds's avatar
Linus Torvalds committed
47
 */
48

49
50
#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)

51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
{
	const struct netdev_queue *txq = q->dev_queue;
	spinlock_t *lock = NULL;
	struct sk_buff *skb;

	if (q->flags & TCQ_F_NOLOCK) {
		lock = qdisc_lock(q);
		spin_lock(lock);
	}

	skb = skb_peek(&q->skb_bad_txq);
	if (skb) {
		/* check the reason of requeuing without tx lock first */
		txq = skb_get_tx_queue(txq->dev, skb);
		if (!netif_xmit_frozen_or_stopped(txq)) {
			skb = __skb_dequeue(&q->skb_bad_txq);
			if (qdisc_is_percpu_stats(q)) {
				qdisc_qstats_cpu_backlog_dec(q, skb);
70
				qdisc_qstats_cpu_qlen_dec(q);
71
72
73
74
75
			} else {
				qdisc_qstats_backlog_dec(q, skb);
				q->q.qlen--;
			}
		} else {
76
			skb = SKB_XOFF_MAGIC;
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
		}
	}

	if (lock)
		spin_unlock(lock);

	return skb;
}

static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
{
	struct sk_buff *skb = skb_peek(&q->skb_bad_txq);

	if (unlikely(skb))
		skb = __skb_dequeue_bad_txq(q);

	return skb;
}

static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
					     struct sk_buff *skb)
{
	spinlock_t *lock = NULL;

	if (q->flags & TCQ_F_NOLOCK) {
		lock = qdisc_lock(q);
		spin_lock(lock);
	}

	__skb_queue_tail(&q->skb_bad_txq, skb);

Eric Dumazet's avatar
Eric Dumazet committed
108
109
	if (qdisc_is_percpu_stats(q)) {
		qdisc_qstats_cpu_backlog_inc(q, skb);
110
		qdisc_qstats_cpu_qlen_inc(q);
Eric Dumazet's avatar
Eric Dumazet committed
111
112
113
114
115
	} else {
		qdisc_qstats_backlog_inc(q, skb);
		q->q.qlen++;
	}

116
117
118
119
	if (lock)
		spin_unlock(lock);
}

120
static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
121
{
122
	spinlock_t *lock = NULL;
123

124
125
126
	if (q->flags & TCQ_F_NOLOCK) {
		lock = qdisc_lock(q);
		spin_lock(lock);
127
	}
128

129
130
131
132
133
	while (skb) {
		struct sk_buff *next = skb->next;

		__skb_queue_tail(&q->gso_skb, skb);

134
135
136
137
		/* it's still part of the queue */
		if (qdisc_is_percpu_stats(q)) {
			qdisc_qstats_cpu_requeues_inc(q);
			qdisc_qstats_cpu_backlog_inc(q, skb);
138
			qdisc_qstats_cpu_qlen_inc(q);
139
140
141
142
143
		} else {
			q->qstats.requeues++;
			qdisc_qstats_backlog_inc(q, skb);
			q->q.qlen++;
		}
144
145
146

		skb = next;
	}
147
148
	if (lock)
		spin_unlock(lock);
149
150
151
	__netif_schedule(q);
}

152
153
static void try_bulk_dequeue_skb(struct Qdisc *q,
				 struct sk_buff *skb,
154
155
				 const struct netdev_queue *txq,
				 int *packets)
156
{
157
	int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
158
159

	while (bytelimit > 0) {
160
		struct sk_buff *nskb = q->dequeue(q);
161

162
		if (!nskb)
163
164
			break;

165
166
167
		bytelimit -= nskb->len; /* covers GSO len */
		skb->next = nskb;
		skb = nskb;
168
		(*packets)++; /* GSO counts as one pkt */
169
	}
170
	skb_mark_not_on_list(skb);
171
172
}

173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
/* This variant of try_bulk_dequeue_skb() makes sure
 * all skbs in the chain are for the same txq
 */
static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
				      struct sk_buff *skb,
				      int *packets)
{
	int mapping = skb_get_queue_mapping(skb);
	struct sk_buff *nskb;
	int cnt = 0;

	do {
		nskb = q->dequeue(q);
		if (!nskb)
			break;
		if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
189
			qdisc_enqueue_skb_bad_txq(q, nskb);
190
191
192
193
194
195
			break;
		}
		skb->next = nskb;
		skb = nskb;
	} while (++cnt < 8);
	(*packets) += cnt;
196
	skb_mark_not_on_list(skb);
197
198
}

199
200
201
/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
 * A requeued skb (via q->gso_skb) can also be a SKB list.
 */
202
203
static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
				   int *packets)
204
{
205
	const struct netdev_queue *txq = q->dev_queue;
206
	struct sk_buff *skb = NULL;
207

208
	*packets = 1;
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
	if (unlikely(!skb_queue_empty(&q->gso_skb))) {
		spinlock_t *lock = NULL;

		if (q->flags & TCQ_F_NOLOCK) {
			lock = qdisc_lock(q);
			spin_lock(lock);
		}

		skb = skb_peek(&q->gso_skb);

		/* skb may be null if another cpu pulls gso_skb off in between
		 * empty check and lock.
		 */
		if (!skb) {
			if (lock)
				spin_unlock(lock);
			goto validate;
		}

228
229
		/* skb in gso_skb were already validated */
		*validate = false;
230
231
		if (xfrm_offload(skb))
			*validate = true;
232
		/* check the reason of requeuing without tx lock first */
233
		txq = skb_get_tx_queue(txq->dev, skb);
234
		if (!netif_xmit_frozen_or_stopped(txq)) {
235
236
237
			skb = __skb_dequeue(&q->gso_skb);
			if (qdisc_is_percpu_stats(q)) {
				qdisc_qstats_cpu_backlog_dec(q, skb);
238
				qdisc_qstats_cpu_qlen_dec(q);
239
240
241
242
243
			} else {
				qdisc_qstats_backlog_dec(q, skb);
				q->q.qlen--;
			}
		} else {
244
			skb = NULL;
245
246
247
		}
		if (lock)
			spin_unlock(lock);
248
		goto trace;
249
	}
250
validate:
251
	*validate = true;
252
253
254
255
256

	if ((q->flags & TCQ_F_ONETXQUEUE) &&
	    netif_xmit_frozen_or_stopped(txq))
		return skb;

257
	skb = qdisc_dequeue_skb_bad_txq(q);
258
259
260
	if (unlikely(skb)) {
		if (skb == SKB_XOFF_MAGIC)
			return NULL;
261
		goto bulk;
262
	}
263
	skb = q->dequeue(q);
264
265
266
267
268
269
	if (skb) {
bulk:
		if (qdisc_may_bulk(q))
			try_bulk_dequeue_skb(q, skb, txq, packets);
		else
			try_bulk_dequeue_skb_slow(q, skb, packets);
270
	}
271
272
trace:
	trace_qdisc_dequeue(q, txq, *packets, skb);
273
274
275
	return skb;
}

276
/*
277
 * Transmit possibly several skbs, and handle the return status as
278
 * required. Owning running seqcount bit guarantees that
279
 * only one CPU can execute this function.
280
281
 *
 * Returns to the caller:
282
283
 *				false  - hardware queue frozen backoff
 *				true   - feel free to send more pkts
284
 */
285
286
287
bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
		     struct net_device *dev, struct netdev_queue *txq,
		     spinlock_t *root_lock, bool validate)
Linus Torvalds's avatar
Linus Torvalds committed
288
{
289
	int ret = NETDEV_TX_BUSY;
290
	bool again = false;
291
292

	/* And release qdisc */
293
294
	if (root_lock)
		spin_unlock(root_lock);
295

296
297
	/* Note that we validate skb (GSO, checksum, ...) outside of locks */
	if (validate)
298
299
300
301
302
303
304
305
306
307
308
		skb = validate_xmit_skb_list(skb, dev, &again);

#ifdef CONFIG_XFRM_OFFLOAD
	if (unlikely(again)) {
		if (root_lock)
			spin_lock(root_lock);

		dev_requeue_skb(skb, q);
		return false;
	}
#endif
309

310
	if (likely(skb)) {
311
312
313
		HARD_TX_LOCK(dev, txq, smp_processor_id());
		if (!netif_xmit_frozen_or_stopped(txq))
			skb = dev_hard_start_xmit(skb, dev, txq, &ret);
314

315
		HARD_TX_UNLOCK(dev, txq);
316
	} else {
317
318
		if (root_lock)
			spin_lock(root_lock);
319
		return true;
320
	}
321
322
323

	if (root_lock)
		spin_lock(root_lock);
324

325
	if (!dev_xmit_complete(ret)) {
326
		/* Driver returned NETDEV_TX_BUSY - requeue skb */
327
328
329
		if (unlikely(ret != NETDEV_TX_BUSY))
			net_warn_ratelimited("BUG %s code %d qlen %d\n",
					     dev->name, ret, q->q.qlen);
330

331
332
		dev_requeue_skb(skb, q);
		return false;
333
	}
334

335
	return true;
Linus Torvalds's avatar
Linus Torvalds committed
336
337
}

338
339
340
/*
 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
 *
341
 * running seqcount guarantees only one CPU can process
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
 * this queue.
 *
 *  netif_tx_lock serializes accesses to device driver.
 *
 *  qdisc_lock(q) and netif_tx_lock are mutually exclusive,
 *  if one is grabbed, another must be free.
 *
 * Note, that this procedure can be called by a watchdog timer
 *
 * Returns to the caller:
 *				0  - queue is empty or throttled.
 *				>0 - queue is not empty.
 *
 */
357
static inline bool qdisc_restart(struct Qdisc *q, int *packets)
358
{
359
	spinlock_t *root_lock = NULL;
360
361
362
	struct netdev_queue *txq;
	struct net_device *dev;
	struct sk_buff *skb;
363
	bool validate;
364
365

	/* Dequeue packet */
366
	skb = dequeue_skb(q, &validate, packets);
367
	if (unlikely(!skb))
368
		return false;
369

370
	if (!(q->flags & TCQ_F_NOLOCK))
371
372
		root_lock = qdisc_lock(q);

373
	dev = qdisc_dev(q);
374
	txq = skb_get_tx_queue(dev, skb);
375

376
	return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
377
378
}

379
void __qdisc_run(struct Qdisc *q)
380
{
381
	int quota = dev_tx_weight;
382
	int packets;
383

384
	while (qdisc_restart(q, &packets)) {
385
		/*
jamal's avatar
jamal committed
386
387
388
		 * Ordered by possible occurrence: Postpone processing if
		 * 1. we've exceeded packet quota
		 * 2. another process needs the CPU;
389
		 */
390
391
		quota -= packets;
		if (quota <= 0 || need_resched()) {
392
			__netif_schedule(q);
393
			break;
394
395
		}
	}
396
397
}

398
399
unsigned long dev_trans_start(struct net_device *dev)
{
400
	unsigned long val, res;
401
402
	unsigned int i;

403
404
	if (is_vlan_dev(dev))
		dev = vlan_dev_real_dev(dev);
405
406
	else if (netif_is_macvlan(dev))
		dev = macvlan_dev_real_dev(dev);
407
408
	res = netdev_get_tx_queue(dev, 0)->trans_start;
	for (i = 1; i < dev->num_tx_queues; i++) {
409
410
411
412
		val = netdev_get_tx_queue(dev, i)->trans_start;
		if (val && time_after(val, res))
			res = val;
	}
413

414
415
416
417
	return res;
}
EXPORT_SYMBOL(dev_trans_start);

418
static void dev_watchdog(struct timer_list *t)
Linus Torvalds's avatar
Linus Torvalds committed
419
{
420
	struct net_device *dev = from_timer(dev, t, watchdog_timer);
Linus Torvalds's avatar
Linus Torvalds committed
421

Herbert Xu's avatar
Herbert Xu committed
422
	netif_tx_lock(dev);
423
	if (!qdisc_tx_is_noop(dev)) {
Linus Torvalds's avatar
Linus Torvalds committed
424
425
426
		if (netif_device_present(dev) &&
		    netif_running(dev) &&
		    netif_carrier_ok(dev)) {
427
			int some_queue_timedout = 0;
428
			unsigned int i;
429
			unsigned long trans_start;
430
431
432
433
434

			for (i = 0; i < dev->num_tx_queues; i++) {
				struct netdev_queue *txq;

				txq = netdev_get_tx_queue(dev, i);
435
				trans_start = txq->trans_start;
436
				if (netif_xmit_stopped(txq) &&
437
438
439
				    time_after(jiffies, (trans_start +
							 dev->watchdog_timeo))) {
					some_queue_timedout = 1;
440
					txq->trans_timeout++;
441
442
443
					break;
				}
			}
444

445
			if (some_queue_timedout) {
446
				trace_net_dev_xmit_timeout(dev, i);
447
				WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
448
				       dev->name, netdev_drivername(dev), i);
449
				dev->netdev_ops->ndo_tx_timeout(dev);
Linus Torvalds's avatar
Linus Torvalds committed
450
			}
451
452
453
			if (!mod_timer(&dev->watchdog_timer,
				       round_jiffies(jiffies +
						     dev->watchdog_timeo)))
Linus Torvalds's avatar
Linus Torvalds committed
454
455
456
				dev_hold(dev);
		}
	}
Herbert Xu's avatar
Herbert Xu committed
457
	netif_tx_unlock(dev);
Linus Torvalds's avatar
Linus Torvalds committed
458
459
460
461
462
463

	dev_put(dev);
}

void __netdev_watchdog_up(struct net_device *dev)
{
464
	if (dev->netdev_ops->ndo_tx_timeout) {
Linus Torvalds's avatar
Linus Torvalds committed
465
466
		if (dev->watchdog_timeo <= 0)
			dev->watchdog_timeo = 5*HZ;
467
468
		if (!mod_timer(&dev->watchdog_timer,
			       round_jiffies(jiffies + dev->watchdog_timeo)))
Linus Torvalds's avatar
Linus Torvalds committed
469
470
471
472
473
474
475
476
477
478
479
			dev_hold(dev);
	}
}

static void dev_watchdog_up(struct net_device *dev)
{
	__netdev_watchdog_up(dev);
}

static void dev_watchdog_down(struct net_device *dev)
{
Herbert Xu's avatar
Herbert Xu committed
480
	netif_tx_lock_bh(dev);
Linus Torvalds's avatar
Linus Torvalds committed
481
	if (del_timer(&dev->watchdog_timer))
482
		dev_put(dev);
Herbert Xu's avatar
Herbert Xu committed
483
	netif_tx_unlock_bh(dev);
Linus Torvalds's avatar
Linus Torvalds committed
484
485
}

486
487
488
489
/**
 *	netif_carrier_on - set carrier
 *	@dev: network device
 *
490
 * Device has detected acquisition of carrier.
491
 */
492
493
void netif_carrier_on(struct net_device *dev)
{
Jeff Garzik's avatar
Jeff Garzik committed
494
	if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
495
496
		if (dev->reg_state == NETREG_UNINITIALIZED)
			return;
497
		atomic_inc(&dev->carrier_up_count);
498
		linkwatch_fire_event(dev);
Jeff Garzik's avatar
Jeff Garzik committed
499
500
501
		if (netif_running(dev))
			__netdev_watchdog_up(dev);
	}
502
}
503
EXPORT_SYMBOL(netif_carrier_on);
504

505
506
507
508
509
510
/**
 *	netif_carrier_off - clear carrier
 *	@dev: network device
 *
 * Device has detected loss of carrier.
 */
511
512
void netif_carrier_off(struct net_device *dev)
{
513
514
515
	if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
		if (dev->reg_state == NETREG_UNINITIALIZED)
			return;
516
		atomic_inc(&dev->carrier_down_count);
517
		linkwatch_fire_event(dev);
518
	}
519
}
520
EXPORT_SYMBOL(netif_carrier_off);
521

Linus Torvalds's avatar
Linus Torvalds committed
522
523
524
525
526
/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
   under all circumstances. It is difficult to invent anything faster or
   cheaper.
 */

527
528
static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
			struct sk_buff **to_free)
Linus Torvalds's avatar
Linus Torvalds committed
529
{
530
	__qdisc_drop(skb, to_free);
Linus Torvalds's avatar
Linus Torvalds committed
531
532
533
	return NET_XMIT_CN;
}

534
static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
Linus Torvalds's avatar
Linus Torvalds committed
535
536
537
538
{
	return NULL;
}

539
struct Qdisc_ops noop_qdisc_ops __read_mostly = {
Linus Torvalds's avatar
Linus Torvalds committed
540
541
542
543
	.id		=	"noop",
	.priv_size	=	0,
	.enqueue	=	noop_enqueue,
	.dequeue	=	noop_dequeue,
544
	.peek		=	noop_dequeue,
Linus Torvalds's avatar
Linus Torvalds committed
545
546
547
	.owner		=	THIS_MODULE,
};

548
static struct netdev_queue noop_netdev_queue = {
549
	RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
550
	.qdisc_sleeping	=	&noop_qdisc,
551
552
};

Linus Torvalds's avatar
Linus Torvalds committed
553
554
555
556
struct Qdisc noop_qdisc = {
	.enqueue	=	noop_enqueue,
	.dequeue	=	noop_dequeue,
	.flags		=	TCQ_F_BUILTIN,
557
	.ops		=	&noop_qdisc_ops,
558
	.q.lock		=	__SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
559
	.dev_queue	=	&noop_netdev_queue,
560
	.running	=	SEQCNT_ZERO(noop_qdisc.running),
561
	.busylock	=	__SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
562
563
564
565
566
567
568
569
570
571
572
573
	.gso_skb = {
		.next = (struct sk_buff *)&noop_qdisc.gso_skb,
		.prev = (struct sk_buff *)&noop_qdisc.gso_skb,
		.qlen = 0,
		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock),
	},
	.skb_bad_txq = {
		.next = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
		.prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
		.qlen = 0,
		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
	},
Linus Torvalds's avatar
Linus Torvalds committed
574
};
575
EXPORT_SYMBOL(noop_qdisc);
Linus Torvalds's avatar
Linus Torvalds committed
576

577
578
static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
			struct netlink_ext_ack *extack)
579
580
581
582
583
584
585
586
587
{
	/* register_qdisc() assigns a default of noop_enqueue if unset,
	 * but __dev_queue_xmit() treats noqueue only as such
	 * if this is NULL - so clear it here. */
	qdisc->enqueue = NULL;
	return 0;
}

struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
Linus Torvalds's avatar
Linus Torvalds committed
588
589
	.id		=	"noqueue",
	.priv_size	=	0,
590
	.init		=	noqueue_init,
Linus Torvalds's avatar
Linus Torvalds committed
591
592
	.enqueue	=	noop_enqueue,
	.dequeue	=	noop_dequeue,
593
	.peek		=	noop_dequeue,
Linus Torvalds's avatar
Linus Torvalds committed
594
595
596
	.owner		=	THIS_MODULE,
};

Eric Dumazet's avatar
Eric Dumazet committed
597
598
599
static const u8 prio2band[TC_PRIO_MAX + 1] = {
	1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
};
600
601
602
603
604
605
606

/* 3-band FIFO queue: old style, but should be a bit faster than
   generic prio+fifo combination.
 */

#define PFIFO_FAST_BANDS 3

607
608
/*
 * Private data for a pfifo_fast scheduler containing:
609
 *	- rings for priority bands
610
611
 */
struct pfifo_fast_priv {
612
	struct skb_array q[PFIFO_FAST_BANDS];
613
614
};

615
616
static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
					  int band)
617
{
618
	return &priv->q[band];
619
620
}

621
622
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
			      struct sk_buff **to_free)
623
{
624
625
626
	int band = prio2band[skb->priority & TC_PRIO_MAX];
	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
	struct skb_array *q = band2list(priv, band);
Eric Dumazet's avatar
Eric Dumazet committed
627
	unsigned int pkt_len = qdisc_pkt_len(skb);
628
	int err;
629

630
631
	err = skb_array_produce(q, skb);

632
633
634
635
636
637
	if (unlikely(err)) {
		if (qdisc_is_percpu_stats(qdisc))
			return qdisc_drop_cpu(skb, qdisc, to_free);
		else
			return qdisc_drop(skb, qdisc, to_free);
	}
638

639
	qdisc_update_stats_at_enqueue(qdisc, pkt_len);
640
	return NET_XMIT_SUCCESS;
Linus Torvalds's avatar
Linus Torvalds committed
641
642
}

Eric Dumazet's avatar
Eric Dumazet committed
643
static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
Linus Torvalds's avatar
Linus Torvalds committed
644
{
645
	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
646
647
	struct sk_buff *skb = NULL;
	int band;
648

649
650
	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
		struct skb_array *q = band2list(priv, band);
651

652
653
		if (__skb_array_empty(q))
			continue;
654

655
		skb = __skb_array_consume(q);
656
657
	}
	if (likely(skb)) {
658
		qdisc_update_stats_at_dequeue(qdisc, skb);
659
660
	} else {
		qdisc->empty = true;
661
	}
662

663
	return skb;
Linus Torvalds's avatar
Linus Torvalds committed
664
665
}

Eric Dumazet's avatar
Eric Dumazet committed
666
static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
667
{
668
	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
669
670
	struct sk_buff *skb = NULL;
	int band;
671

672
673
	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
		struct skb_array *q = band2list(priv, band);
674

675
		skb = __skb_array_peek(q);
676
677
	}

678
	return skb;
679
680
}

Eric Dumazet's avatar
Eric Dumazet committed
681
static void pfifo_fast_reset(struct Qdisc *qdisc)
Linus Torvalds's avatar
Linus Torvalds committed
682
{
683
	int i, band;
684
	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
685

686
687
688
	for (band = 0; band < PFIFO_FAST_BANDS; band++) {
		struct skb_array *q = band2list(priv, band);
		struct sk_buff *skb;
689

690
691
692
693
694
695
		/* NULL ring is possible if destroy path is due to a failed
		 * skb_array_init() in pfifo_fast_init() case.
		 */
		if (!q->ring.queue)
			continue;

696
		while ((skb = __skb_array_consume(q)) != NULL)
697
698
699
			kfree_skb(skb);
	}

700
701
702
	if (qdisc_is_percpu_stats(qdisc)) {
		for_each_possible_cpu(i) {
			struct gnet_stats_queue *q;
703

704
705
706
707
			q = per_cpu_ptr(qdisc->cpu_qstats, i);
			q->backlog = 0;
			q->qlen = 0;
		}
708
	}
Linus Torvalds's avatar
Linus Torvalds committed
709
710
}

711
712
713
714
static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
{
	struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };

Eric Dumazet's avatar
Eric Dumazet committed
715
	memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
716
717
	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
		goto nla_put_failure;
718
719
720
721
722
723
	return skb->len;

nla_put_failure:
	return -1;
}

724
725
static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
			   struct netlink_ext_ack *extack)
726
{
727
	unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
728
	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
729
730
731
732
733
	int prio;

	/* guard against zero length rings */
	if (!qlen)
		return -EINVAL;
734

735
736
737
738
739
740
741
742
	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
		struct skb_array *q = band2list(priv, prio);
		int err;

		err = skb_array_init(q, qlen, GFP_KERNEL);
		if (err)
			return -ENOMEM;
	}
743

744
745
	/* Can by-pass the queue discipline */
	qdisc->flags |= TCQ_F_CAN_BYPASS;
746
747
748
	return 0;
}

749
750
751
752
753
754
755
756
757
758
759
static void pfifo_fast_destroy(struct Qdisc *sch)
{
	struct pfifo_fast_priv *priv = qdisc_priv(sch);
	int prio;

	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
		struct skb_array *q = band2list(priv, prio);

		/* NULL ring is possible if destroy path is due to a failed
		 * skb_array_init() in pfifo_fast_init() case.
		 */
760
		if (!q->ring.queue)
761
762
763
764
765
766
767
768
			continue;
		/* Destroy ring but no need to kfree_skb because a call to
		 * pfifo_fast_reset() has already done that work.
		 */
		ptr_ring_cleanup(&q->ring, NULL);
	}
}

769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
					  unsigned int new_len)
{
	struct pfifo_fast_priv *priv = qdisc_priv(sch);
	struct skb_array *bands[PFIFO_FAST_BANDS];
	int prio;

	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
		struct skb_array *q = band2list(priv, prio);

		bands[prio] = q;
	}

	return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
					 GFP_KERNEL);
}

786
struct Qdisc_ops pfifo_fast_ops __read_mostly = {
787
	.id		=	"pfifo_fast",
788
	.priv_size	=	sizeof(struct pfifo_fast_priv),
789
790
	.enqueue	=	pfifo_fast_enqueue,
	.dequeue	=	pfifo_fast_dequeue,
791
	.peek		=	pfifo_fast_peek,
792
	.init		=	pfifo_fast_init,
793
	.destroy	=	pfifo_fast_destroy,
794
795
	.reset		=	pfifo_fast_reset,
	.dump		=	pfifo_fast_dump,
796
	.change_tx_queue_len =  pfifo_fast_change_tx_queue_len,
Linus Torvalds's avatar
Linus Torvalds committed
797
	.owner		=	THIS_MODULE,
798
	.static_flags	=	TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
Linus Torvalds's avatar
Linus Torvalds committed
799
};
800
EXPORT_SYMBOL(pfifo_fast_ops);
Linus Torvalds's avatar
Linus Torvalds committed
801

802
static struct lock_class_key qdisc_tx_busylock;
803
static struct lock_class_key qdisc_running_key;
804

805
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
806
807
			  const struct Qdisc_ops *ops,
			  struct netlink_ext_ack *extack)
Linus Torvalds's avatar
Linus Torvalds committed
808
809
810
{
	void *p;
	struct Qdisc *sch;
811
	unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
812
	int err = -ENOBUFS;
813
814
815
	struct net_device *dev;

	if (!dev_queue) {
816
		NL_SET_ERR_MSG(extack, "No device queue given");
817
818
819
		err = -EINVAL;
		goto errout;
	}
Linus Torvalds's avatar
Linus Torvalds committed
820

821
	dev = dev_queue->dev;
822
823
824
	p = kzalloc_node(size, GFP_KERNEL,
			 netdev_queue_numa_node_read(dev_queue));

Linus Torvalds's avatar
Linus Torvalds committed
825
	if (!p)
826
827
		goto errout;
	sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
828
829
830
831
832
833
834
835
836
837
	/* if we got non aligned memory, ask more and do alignment ourself */
	if (sch != p) {
		kfree(p);
		p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
				 netdev_queue_numa_node_read(dev_queue));
		if (!p)
			goto errout;
		sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
		sch->padded = (char *) sch - (char *) p;
	}
838
	__skb_queue_head_init(&sch->gso_skb);
839
	__skb_queue_head_init(&sch->skb_bad_txq);
840
841
	qdisc_skb_head_init(&sch->q);
	spin_lock_init(&sch->q.lock);
842

843
844
845
846
847
848
849
850
851
852
853
854
855
	if (ops->static_flags & TCQ_F_CPUSTATS) {
		sch->cpu_bstats =
			netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
		if (!sch->cpu_bstats)
			goto errout1;

		sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
		if (!sch->cpu_qstats) {
			free_percpu(sch->cpu_bstats);
			goto errout1;
		}
	}

856
	spin_lock_init(&sch->busylock);
857
858
859
	lockdep_set_class(&sch->busylock,
			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);

860
861
862
863
864
	/* seqlock has the same scope of busylock, for NOLOCK qdisc */
	spin_lock_init(&sch->seqlock);
	lockdep_set_class(&sch->busylock,
			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);

865
866
867
868
	seqcount_init(&sch->running);
	lockdep_set_class(&sch->running,
			  dev->qdisc_running_key ?: &qdisc_running_key);

Linus Torvalds's avatar
Linus Torvalds committed
869
	sch->ops = ops;
870
	sch->flags = ops->static_flags;
Linus Torvalds's avatar
Linus Torvalds committed
871
872
	sch->enqueue = ops->enqueue;
	sch->dequeue = ops->dequeue;
873
	sch->dev_queue = dev_queue;
874
	sch->empty = true;
875
	dev_hold(dev);
876
	refcount_set(&sch->refcnt, 1);
877
878

	return sch;
879
880
errout1:
	kfree(p);
881
errout:
882
	return ERR_PTR(err);
883
884
}

885
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
886
				const struct Qdisc_ops *ops,
887
888
				unsigned int parentid,
				struct netlink_ext_ack *extack)
889
890
{
	struct Qdisc *sch;
891

892
893
	if (!try_module_get(ops->owner)) {
		NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
894
		return NULL;
895
	}
896

897
	sch = qdisc_alloc(dev_queue, ops, extack);
898
899
900
901
	if (IS_ERR(sch)) {
		module_put(ops->owner);
		return NULL;
	}
902
	sch->parent = parentid;
903

904
	if (!ops->init || ops->init(sch, NULL, extack) == 0)
Linus Torvalds's avatar
Linus Torvalds committed
905
906
		return sch;

907
	qdisc_put(sch);
Linus Torvalds's avatar
Linus Torvalds committed
908
909
	return NULL;
}
910
EXPORT_SYMBOL(qdisc_create_dflt);
Linus Torvalds's avatar
Linus Torvalds committed
911

912
/* Under qdisc_lock(qdisc) and BH! */
Linus Torvalds's avatar
Linus Torvalds committed
913
914
915

void qdisc_reset(struct Qdisc *qdisc)
{
916
	const struct Qdisc_ops *ops = qdisc->ops;
917
	struct sk_buff *skb, *tmp;
Linus Torvalds's avatar
Linus Torvalds committed
918
919
920

	if (ops->reset)
		ops->reset(qdisc);
921

922
923
924
	skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
		__skb_unlink(skb, &qdisc->gso_skb);
		kfree_skb_list(skb);
925
	}
926

927
928
929
930
931
	skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
		__skb_unlink(skb, &qdisc->skb_bad_txq);
		kfree_skb_list(skb);
	}

932
	qdisc->q.qlen = 0;
933
	qdisc->qstats.backlog = 0;
Linus Torvalds's avatar
Linus Torvalds committed
934
}
935
EXPORT_SYMBOL(qdisc_reset);
Linus Torvalds's avatar
Linus Torvalds committed
936

937
void qdisc_free(struct Qdisc *qdisc)
Eric Dumazet's avatar
Eric Dumazet committed
938
{
939
	if (qdisc_is_percpu_stats(qdisc)) {
940
		free_percpu(qdisc->cpu_bstats);
941
942
		free_percpu(qdisc->cpu_qstats);
	}
943

Eric Dumazet's avatar
Eric Dumazet committed
944
945
946
	kfree((char *) qdisc - qdisc->padded);
}

947
static void qdisc_free_cb(struct rcu_head *head)
948
949
950
951
952
953
{
	struct Qdisc *q = container_of(head, struct Qdisc, rcu);

	qdisc_free(q);
}

954
static void qdisc_destroy(struct Qdisc *qdisc)
Linus Torvalds's avatar
Linus Torvalds committed
955
{
956
	const struct Qdisc_ops  *ops = qdisc->ops;
957
	struct sk_buff *skb, *tmp;
958

959
#ifdef CONFIG_NET_SCHED