interface.c 15.7 KB
Newer Older
Ian Campbell's avatar
Ian Campbell committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
/*
 * Network-device interface management.
 *
 * Copyright (c) 2004-2005, Keir Fraser
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include "common.h"

33
#include <linux/kthread.h>
Ian Campbell's avatar
Ian Campbell committed
34
35
36
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
37
#include <linux/vmalloc.h>
Ian Campbell's avatar
Ian Campbell committed
38
39
40

#include <xen/events.h>
#include <asm/xen/hypercall.h>
41
#include <xen/balloon.h>
Ian Campbell's avatar
Ian Campbell committed
42
43

#define XENVIF_QUEUE_LENGTH 32
44
#define XENVIF_NAPI_WEIGHT  64
Ian Campbell's avatar
Ian Campbell committed
45
46
47
48
49
50

int xenvif_schedulable(struct xenvif *vif)
{
	return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
}

51
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
Ian Campbell's avatar
Ian Campbell committed
52
53
54
{
	struct xenvif *vif = dev_id;

55
56
	if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx))
		napi_schedule(&vif->napi);
Ian Campbell's avatar
Ian Campbell committed
57

58
59
60
	return IRQ_HANDLED;
}

61
62
63
64
65
static int xenvif_poll(struct napi_struct *napi, int budget)
{
	struct xenvif *vif = container_of(napi, struct xenvif, napi);
	int work_done;

Wei Liu's avatar
Wei Liu committed
66
	work_done = xenvif_tx_action(vif, budget);
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90

	if (work_done < budget) {
		int more_to_do = 0;
		unsigned long flags;

		/* It is necessary to disable IRQ before calling
		 * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
		 * lose event from the frontend.
		 *
		 * Consider:
		 *   RING_HAS_UNCONSUMED_REQUESTS
		 *   <frontend generates event to trigger napi_schedule>
		 *   __napi_complete
		 *
		 * This handler is still in scheduled state so the
		 * event has no effect at all. After __napi_complete
		 * this handler is descheduled and cannot get
		 * scheduled again. We lose event in this case and the ring
		 * will be completely stalled.
		 */

		local_irq_save(flags);

		RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
91
92
		if (!(more_to_do &&
		      xenvif_tx_pending_slots_available(vif)))
93
94
95
96
97
98
99
100
			__napi_complete(napi);

		local_irq_restore(flags);
	}

	return work_done;
}

101
102
103
104
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
	struct xenvif *vif = dev_id;

105
	xenvif_kick_thread(vif);
Ian Campbell's avatar
Ian Campbell committed
106
107
108
109

	return IRQ_HANDLED;
}

110
111
112
113
114
115
116
117
static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
{
	xenvif_tx_interrupt(irq, dev_id);
	xenvif_rx_interrupt(irq, dev_id);

	return IRQ_HANDLED;
}

118
119
120
121
122
123
124
125
126
127
128
129
static void xenvif_wake_queue(unsigned long data)
{
	struct xenvif *vif = (struct xenvif *)data;

	if (netif_queue_stopped(vif->dev)) {
		netdev_err(vif->dev, "draining TX queue\n");
		vif->rx_queue_purge = true;
		xenvif_kick_thread(vif);
		netif_wake_queue(vif->dev);
	}
}

Ian Campbell's avatar
Ian Campbell committed
130
131
132
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
133
	int min_slots_needed;
Ian Campbell's avatar
Ian Campbell committed
134
135
136

	BUG_ON(skb->dev != dev);

137
	/* Drop the packet if vif is not ready */
138
139
140
	if (vif->task == NULL ||
	    vif->dealloc_task == NULL ||
	    !xenvif_schedulable(vif))
Ian Campbell's avatar
Ian Campbell committed
141
142
		goto drop;

143
144
145
146
	/* At best we'll need one slot for the header and one for each
	 * frag.
	 */
	min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
Ian Campbell's avatar
Ian Campbell committed
147

148
149
150
151
152
153
	/* If the skb is GSO then we'll also need an extra slot for the
	 * metadata.
	 */
	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
	    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
		min_slots_needed++;
Ian Campbell's avatar
Ian Campbell committed
154

155
156
157
158
	/* If the skb can't possibly fit in the remaining slots
	 * then turn off the queue to give the ring a chance to
	 * drain.
	 */
159
160
161
	if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) {
		vif->wake_queue.function = xenvif_wake_queue;
		vif->wake_queue.data = (unsigned long)vif;
162
		xenvif_stop_queue(vif);
163
164
165
		mod_timer(&vif->wake_queue,
			jiffies + rx_drain_timeout_jiffies);
	}
Ian Campbell's avatar
Ian Campbell committed
166

167
168
	skb_queue_tail(&vif->rx_queue, skb);
	xenvif_kick_thread(vif);
Ian Campbell's avatar
Ian Campbell committed
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185

	return NETDEV_TX_OK;

 drop:
	vif->dev->stats.tx_dropped++;
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}

static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
	return &vif->dev->stats;
}

static void xenvif_up(struct xenvif *vif)
{
186
	napi_enable(&vif->napi);
187
188
189
	enable_irq(vif->tx_irq);
	if (vif->tx_irq != vif->rx_irq)
		enable_irq(vif->rx_irq);
Wei Liu's avatar
Wei Liu committed
190
	xenvif_check_rx_xenvif(vif);
Ian Campbell's avatar
Ian Campbell committed
191
192
193
194
}

static void xenvif_down(struct xenvif *vif)
{
195
	napi_disable(&vif->napi);
196
197
198
	disable_irq(vif->tx_irq);
	if (vif->tx_irq != vif->rx_irq)
		disable_irq(vif->rx_irq);
199
	del_timer_sync(&vif->credit_timeout);
Ian Campbell's avatar
Ian Campbell committed
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
}

static int xenvif_open(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
	if (netif_carrier_ok(dev))
		xenvif_up(vif);
	netif_start_queue(dev);
	return 0;
}

static int xenvif_close(struct net_device *dev)
{
	struct xenvif *vif = netdev_priv(dev);
	if (netif_carrier_ok(dev))
		xenvif_down(vif);
	netif_stop_queue(dev);
	return 0;
}

static int xenvif_change_mtu(struct net_device *dev, int mtu)
{
	struct xenvif *vif = netdev_priv(dev);
	int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;

	if (mtu > max)
		return -EINVAL;
	dev->mtu = mtu;
	return 0;
}

231
232
static netdev_features_t xenvif_fix_features(struct net_device *dev,
	netdev_features_t features)
Ian Campbell's avatar
Ian Campbell committed
233
234
235
{
	struct xenvif *vif = netdev_priv(dev);

236
237
	if (!vif->can_sg)
		features &= ~NETIF_F_SG;
238
	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
239
		features &= ~NETIF_F_TSO;
240
241
	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
		features &= ~NETIF_F_TSO6;
242
	if (!vif->ip_csum)
243
		features &= ~NETIF_F_IP_CSUM;
244
245
	if (!vif->ipv6_csum)
		features &= ~NETIF_F_IPV6_CSUM;
Ian Campbell's avatar
Ian Campbell committed
246

247
	return features;
Ian Campbell's avatar
Ian Campbell committed
248
249
250
251
252
253
254
255
256
257
}

static const struct xenvif_stat {
	char name[ETH_GSTRING_LEN];
	u16 offset;
} xenvif_stats[] = {
	{
		"rx_gso_checksum_fixup",
		offsetof(struct xenvif, rx_gso_checksum_fixup)
	},
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
	/* If (sent != success + fail), there are probably packets never
	 * freed up properly!
	 */
	{
		"tx_zerocopy_sent",
		offsetof(struct xenvif, tx_zerocopy_sent),
	},
	{
		"tx_zerocopy_success",
		offsetof(struct xenvif, tx_zerocopy_success),
	},
	{
		"tx_zerocopy_fail",
		offsetof(struct xenvif, tx_zerocopy_fail)
	},
273
274
275
276
277
278
279
	/* Number of packets exceeding MAX_SKB_FRAG slots. You should use
	 * a guest with the same MAX_SKB_FRAG
	 */
	{
		"tx_frag_overflow",
		offsetof(struct xenvif, tx_frag_overflow)
	},
Ian Campbell's avatar
Ian Campbell committed
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
};

static int xenvif_get_sset_count(struct net_device *dev, int string_set)
{
	switch (string_set) {
	case ETH_SS_STATS:
		return ARRAY_SIZE(xenvif_stats);
	default:
		return -EINVAL;
	}
}

static void xenvif_get_ethtool_stats(struct net_device *dev,
				     struct ethtool_stats *stats, u64 * data)
{
	void *vif = netdev_priv(dev);
	int i;

	for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
		data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset);
}

static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
{
	int i;

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
			memcpy(data + i * ETH_GSTRING_LEN,
			       xenvif_stats[i].name, ETH_GSTRING_LEN);
		break;
	}
}

315
static const struct ethtool_ops xenvif_ethtool_ops = {
Ian Campbell's avatar
Ian Campbell committed
316
317
318
319
320
321
322
	.get_link	= ethtool_op_get_link,

	.get_sset_count = xenvif_get_sset_count,
	.get_ethtool_stats = xenvif_get_ethtool_stats,
	.get_strings = xenvif_get_strings,
};

323
static const struct net_device_ops xenvif_netdev_ops = {
Ian Campbell's avatar
Ian Campbell committed
324
325
326
327
328
	.ndo_start_xmit	= xenvif_start_xmit,
	.ndo_get_stats	= xenvif_get_stats,
	.ndo_open	= xenvif_open,
	.ndo_stop	= xenvif_close,
	.ndo_change_mtu	= xenvif_change_mtu,
329
	.ndo_fix_features = xenvif_fix_features,
330
331
	.ndo_set_mac_address = eth_mac_addr,
	.ndo_validate_addr   = eth_validate_addr,
Ian Campbell's avatar
Ian Campbell committed
332
333
334
335
336
337
338
339
340
};

struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
			    unsigned int handle)
{
	int err;
	struct net_device *dev;
	struct xenvif *vif;
	char name[IFNAMSIZ] = {};
341
	int i;
Ian Campbell's avatar
Ian Campbell committed
342
343
344
345

	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
	dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
	if (dev == NULL) {
346
		pr_warn("Could not allocate netdev for %s\n", name);
Ian Campbell's avatar
Ian Campbell committed
347
348
349
350
351
352
		return ERR_PTR(-ENOMEM);
	}

	SET_NETDEV_DEV(dev, parent);

	vif = netdev_priv(dev);
353
354
355
356
357
358
359
360
361

	vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
				     MAX_GRANT_COPY_OPS);
	if (vif->grant_copy_op == NULL) {
		pr_warn("Could not allocate grant copy space for %s\n", name);
		free_netdev(dev);
		return ERR_PTR(-ENOMEM);
	}

Ian Campbell's avatar
Ian Campbell committed
362
363
364
	vif->domid  = domid;
	vif->handle = handle;
	vif->can_sg = 1;
365
	vif->ip_csum = 1;
Ian Campbell's avatar
Ian Campbell committed
366
367
368
369
370
	vif->dev = dev;

	vif->credit_bytes = vif->remaining_credit = ~0UL;
	vif->credit_usec  = 0UL;
	init_timer(&vif->credit_timeout);
371
	vif->credit_window_start = get_jiffies_64();
Ian Campbell's avatar
Ian Campbell committed
372

373
374
	init_timer(&vif->wake_queue);

Ian Campbell's avatar
Ian Campbell committed
375
	dev->netdev_ops	= &xenvif_netdev_ops;
376
377
	dev->hw_features = NETIF_F_SG |
		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
378
		NETIF_F_TSO | NETIF_F_TSO6;
379
	dev->features = dev->hw_features | NETIF_F_RXCSUM;
Ian Campbell's avatar
Ian Campbell committed
380
381
382
383
	SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);

	dev->tx_queue_len = XENVIF_QUEUE_LENGTH;

384
385
386
387
388
389
390
	skb_queue_head_init(&vif->rx_queue);
	skb_queue_head_init(&vif->tx_queue);

	vif->pending_cons = 0;
	vif->pending_prod = MAX_PENDING_REQS;
	for (i = 0; i < MAX_PENDING_REQS; i++)
		vif->pending_ring[i] = i;
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
	spin_lock_init(&vif->callback_lock);
	spin_lock_init(&vif->response_lock);
	/* If ballooning is disabled, this will consume real memory, so you
	 * better enable it. The long term solution would be to use just a
	 * bunch of valid page descriptors, without dependency on ballooning
	 */
	err = alloc_xenballooned_pages(MAX_PENDING_REQS,
				       vif->mmap_pages,
				       false);
	if (err) {
		netdev_err(dev, "Could not reserve mmap_pages\n");
		return ERR_PTR(-ENOMEM);
	}
	for (i = 0; i < MAX_PENDING_REQS; i++) {
		vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
			{ .callback = xenvif_zerocopy_callback,
			  .ctx = NULL,
			  .desc = i };
		vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
	}
411
	init_timer(&vif->dealloc_delay);
412

Ian Campbell's avatar
Ian Campbell committed
413
414
415
416
417
418
419
420
421
	/*
	 * Initialise a dummy MAC address. We choose the numerically
	 * largest non-broadcast address to prevent the address getting
	 * stolen by an Ethernet bridge for STP purposes.
	 * (FE:FF:FF:FF:FF:FF)
	 */
	memset(dev->dev_addr, 0xFF, ETH_ALEN);
	dev->dev_addr[0] &= ~0x01;

422
423
	netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);

Ian Campbell's avatar
Ian Campbell committed
424
425
426
427
428
429
430
431
432
433
	netif_carrier_off(dev);

	err = register_netdev(dev);
	if (err) {
		netdev_warn(dev, "Could not register device: err=%d\n", err);
		free_netdev(dev);
		return ERR_PTR(err);
	}

	netdev_dbg(dev, "Successfully created xenvif\n");
434
435
436

	__module_get(THIS_MODULE);

Ian Campbell's avatar
Ian Campbell committed
437
438
439
440
	return vif;
}

int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
441
442
		   unsigned long rx_ring_ref, unsigned int tx_evtchn,
		   unsigned int rx_evtchn)
Ian Campbell's avatar
Ian Campbell committed
443
{
444
	struct task_struct *task;
Ian Campbell's avatar
Ian Campbell committed
445
446
	int err = -ENOMEM;

447
448
	BUG_ON(vif->tx_irq);
	BUG_ON(vif->task);
449
	BUG_ON(vif->dealloc_task);
Ian Campbell's avatar
Ian Campbell committed
450

Wei Liu's avatar
Wei Liu committed
451
	err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
Ian Campbell's avatar
Ian Campbell committed
452
453
454
	if (err < 0)
		goto err;

455
	init_waitqueue_head(&vif->wq);
456
	init_waitqueue_head(&vif->dealloc_wq);
457

458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
	if (tx_evtchn == rx_evtchn) {
		/* feature-split-event-channels == 0 */
		err = bind_interdomain_evtchn_to_irqhandler(
			vif->domid, tx_evtchn, xenvif_interrupt, 0,
			vif->dev->name, vif);
		if (err < 0)
			goto err_unmap;
		vif->tx_irq = vif->rx_irq = err;
		disable_irq(vif->tx_irq);
	} else {
		/* feature-split-event-channels == 1 */
		snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name),
			 "%s-tx", vif->dev->name);
		err = bind_interdomain_evtchn_to_irqhandler(
			vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
			vif->tx_irq_name, vif);
		if (err < 0)
			goto err_unmap;
		vif->tx_irq = err;
		disable_irq(vif->tx_irq);

		snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name),
			 "%s-rx", vif->dev->name);
		err = bind_interdomain_evtchn_to_irqhandler(
			vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
			vif->rx_irq_name, vif);
		if (err < 0)
			goto err_tx_unbind;
		vif->rx_irq = err;
		disable_irq(vif->rx_irq);
	}
Ian Campbell's avatar
Ian Campbell committed
489

490
491
	task = kthread_create(xenvif_kthread_guest_rx,
			      (void *)vif, "%s-guest-rx", vif->dev->name);
492
	if (IS_ERR(task)) {
493
		pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
494
		err = PTR_ERR(task);
495
496
		goto err_rx_unbind;
	}
Ian Campbell's avatar
Ian Campbell committed
497

498
499
	vif->task = task;

500
501
502
503
504
505
506
507
508
509
	task = kthread_create(xenvif_dealloc_kthread,
			      (void *)vif, "%s-dealloc", vif->dev->name);
	if (IS_ERR(task)) {
		pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
		err = PTR_ERR(task);
		goto err_rx_unbind;
	}

	vif->dealloc_task = task;

Ian Campbell's avatar
Ian Campbell committed
510
	rtnl_lock();
511
512
513
514
	if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
		dev_set_mtu(vif->dev, ETH_DATA_LEN);
	netdev_update_features(vif->dev);
	netif_carrier_on(vif->dev);
515
516
	if (netif_running(vif->dev))
		xenvif_up(vif);
Ian Campbell's avatar
Ian Campbell committed
517
518
	rtnl_unlock();

519
	wake_up_process(vif->task);
520
	wake_up_process(vif->dealloc_task);
521

Ian Campbell's avatar
Ian Campbell committed
522
	return 0;
523
524
525
526

err_rx_unbind:
	unbind_from_irqhandler(vif->rx_irq, vif);
	vif->rx_irq = 0;
527
528
529
err_tx_unbind:
	unbind_from_irqhandler(vif->tx_irq, vif);
	vif->tx_irq = 0;
Ian Campbell's avatar
Ian Campbell committed
530
err_unmap:
Wei Liu's avatar
Wei Liu committed
531
	xenvif_unmap_frontend_rings(vif);
Ian Campbell's avatar
Ian Campbell committed
532
err:
533
	module_put(THIS_MODULE);
Ian Campbell's avatar
Ian Campbell committed
534
535
536
	return err;
}

537
void xenvif_carrier_off(struct xenvif *vif)
Ian Campbell's avatar
Ian Campbell committed
538
539
{
	struct net_device *dev = vif->dev;
540
541
542
543
544
545
546
547
548
549
550
551

	rtnl_lock();
	netif_carrier_off(dev); /* discard queued packets */
	if (netif_running(dev))
		xenvif_down(vif);
	rtnl_unlock();
}

void xenvif_disconnect(struct xenvif *vif)
{
	if (netif_carrier_ok(vif->dev))
		xenvif_carrier_off(vif);
Ian Campbell's avatar
Ian Campbell committed
552

553
	if (vif->task) {
554
		del_timer_sync(&vif->wake_queue);
555
		kthread_stop(vif->task);
556
557
		vif->task = NULL;
	}
558

559
	if (vif->dealloc_task) {
560
		del_timer_sync(&vif->dealloc_delay);
561
562
563
564
		kthread_stop(vif->dealloc_task);
		vif->dealloc_task = NULL;
	}

565
566
567
568
569
570
571
	if (vif->tx_irq) {
		if (vif->tx_irq == vif->rx_irq)
			unbind_from_irqhandler(vif->tx_irq, vif);
		else {
			unbind_from_irqhandler(vif->tx_irq, vif);
			unbind_from_irqhandler(vif->rx_irq, vif);
		}
572
		vif->tx_irq = 0;
573
	}
Ian Campbell's avatar
Ian Campbell committed
574

575
576
577
578
579
	xenvif_unmap_frontend_rings(vif);
}

void xenvif_free(struct xenvif *vif)
{
580
	int i, unmap_timeout = 0;
581
582
583
584
585
586
587
588
589
590
591
592
593
	/* Here we want to avoid timeout messages if an skb can be legitimatly
	 * stucked somewhere else. Realisticly this could be an another vif's
	 * internal or QDisc queue. That another vif also has this
	 * rx_drain_timeout_msecs timeout, but the timer only ditches the
	 * internal queue. After that, the QDisc queue can put in worst case
	 * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's
	 * internal queue, so we need several rounds of such timeouts until we
	 * can be sure that no another vif should have skb's from us. We are
	 * not sending more skb's, so newly stucked packets are not interesting
	 * for us here.
	 */
	unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
		DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
594
595
596
597
598

	for (i = 0; i < MAX_PENDING_REQS; ++i) {
		if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
			unmap_timeout++;
			schedule_timeout(msecs_to_jiffies(1000));
599
			if (unmap_timeout > worst_case_skb_lifetime &&
600
601
602
603
604
605
606
607
608
609
			    net_ratelimit())
				netdev_err(vif->dev,
					   "Page still granted! Index: %x\n",
					   i);
			i = -1;
		}
	}

	free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages);

610
611
	netif_napi_del(&vif->napi);

Ian Campbell's avatar
Ian Campbell committed
612
613
	unregister_netdev(vif->dev);

614
	vfree(vif->grant_copy_op);
Ian Campbell's avatar
Ian Campbell committed
615
	free_netdev(vif->dev);
616

617
	module_put(THIS_MODULE);
Ian Campbell's avatar
Ian Campbell committed
618
}