Commit dae2e9f4 authored by Pradeep A. Dalvi's avatar Pradeep A. Dalvi Committed by David S. Miller
Browse files

netdev: ethernet dev_alloc_skb to netdev_alloc_skb



Replaced deprecating dev_alloc_skb with netdev_alloc_skb in drivers/net/ethernet
  - Removed extra skb->dev = dev after netdev_alloc_skb

Signed-off-by: default avatarPradeep A Dalvi <netdev@pradeepdalvi.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c4062dfc
......@@ -150,7 +150,7 @@ static void netx_eth_receive(struct net_device *ndev)
seg = (val & FIFO_PTR_SEGMENT_MASK) >> FIFO_PTR_SEGMENT_SHIFT;
len = (val & FIFO_PTR_FRAMELEN_MASK) >> FIFO_PTR_FRAMELEN_SHIFT;
skb = dev_alloc_skb(len);
skb = netdev_alloc_skb(ndev, len);
if (unlikely(skb == NULL)) {
printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
ndev->name);
......
......@@ -735,7 +735,7 @@ static void netdev_rx(struct net_device *dev)
if (status & RXDS_RXGD) {
data = ether->rdesc->recv_buf[ether->cur_rx];
skb = dev_alloc_skb(length+2);
skb = netdev_alloc_skb(dev, length + 2);
if (!skb) {
dev_err(&pdev->dev, "get skb buffer error\n");
ether->stats.rx_dropped++;
......
......@@ -1815,7 +1815,7 @@ static int nv_alloc_rx(struct net_device *dev)
less_rx = np->last_rx.orig;
while (np->put_rx.orig != less_rx) {
struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
if (skb) {
np->put_rx_ctx->skb = skb;
np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
......@@ -1850,7 +1850,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
less_rx = np->last_rx.ex;
while (np->put_rx.ex != less_rx) {
struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
if (skb) {
np->put_rx_ctx->skb = skb;
np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
......@@ -4993,9 +4993,9 @@ static int nv_loopback_test(struct net_device *dev)
/* setup packet for tx */
pkt_len = ETH_DATA_LEN;
tx_skb = dev_alloc_skb(pkt_len);
tx_skb = netdev_alloc_skb(dev, pkt_len);
if (!tx_skb) {
netdev_err(dev, "dev_alloc_skb() failed during loopback test\n");
netdev_err(dev, "netdev_alloc_skb() failed during loopback test\n");
ret = 0;
goto out;
}
......
......@@ -1188,11 +1188,10 @@ static void hamachi_init_ring(struct net_device *dev)
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2);
hmp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* 16 byte align the IP header. */
hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
......@@ -1488,7 +1487,7 @@ static int hamachi_rx(struct net_device *dev)
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
if (pkt_len < rx_copybreak &&
(skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
#ifdef RX_CHECKSUM
printk(KERN_ERR "%s: rx_copybreak non-zero "
"not good with RX_CHECKSUM\n", dev->name);
......@@ -1591,12 +1590,11 @@ static int hamachi_rx(struct net_device *dev)
entry = hmp->dirty_rx % RX_RING_SIZE;
desc = &(hmp->rx_ring[entry]);
if (hmp->rx_skbuff[entry] == NULL) {
struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2);
hmp->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
......
......@@ -743,11 +743,10 @@ static int yellowfin_init_ring(struct net_device *dev)
}
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
yp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* 16 byte align the IP header. */
yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
......@@ -1133,7 +1132,7 @@ static int yellowfin_rx(struct net_device *dev)
PCI_DMA_FROMDEVICE);
yp->rx_skbuff[entry] = NULL;
} else {
skb = dev_alloc_skb(pkt_len + 2);
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header */
......@@ -1156,11 +1155,10 @@ static int yellowfin_rx(struct net_device *dev)
for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
entry = yp->dirty_rx % RX_RING_SIZE;
if (yp->rx_skbuff[entry] == NULL) {
struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
if (skb == NULL)
break; /* Better luck next round. */
yp->rx_skbuff[entry] = skb;
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
......
......@@ -643,7 +643,7 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
/* Entry in use? */
WARN_ON(*buff);
skb = dev_alloc_skb(mac->bufsz);
skb = netdev_alloc_skb(dev, mac->bufsz);
skb_reserve(skb, LOCAL_SKB_ALIGN);
if (unlikely(!skb))
......
......@@ -1487,7 +1487,7 @@ netxen_alloc_rx_skb(struct netxen_adapter *adapter,
dma_addr_t dma;
struct pci_dev *pdev = adapter->pdev;
buffer->skb = dev_alloc_skb(rds_ring->skb_size);
buffer->skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
if (!buffer->skb)
return 1;
......
......@@ -719,7 +719,7 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
int i, loop, cnt = 0;
for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
skb = dev_alloc_skb(QLCNIC_ILB_PKT_SIZE);
skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
......
......@@ -1440,7 +1440,7 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
dma_addr_t dma;
struct pci_dev *pdev = adapter->pdev;
skb = dev_alloc_skb(rds_ring->skb_size);
skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
if (!skb) {
adapter->stats.skb_alloc_failure++;
return -ENOMEM;
......
......@@ -552,7 +552,7 @@ static void ni5010_rx(struct net_device *dev)
}
/* Malloc up new buffer. */
skb = dev_alloc_skb(i_pkt_size + 3);
skb = netdev_alloc_skb(dev, i_pkt_size + 3);
if (skb == NULL) {
printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
......
......@@ -783,7 +783,7 @@ static void net_rx(struct net_device *dev)
int pkt_len = (rx_head.rx_count & 0x7ff) - 4;
struct sk_buff *skb;
skb = dev_alloc_skb(pkt_len + 2);
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n",
dev->name);
......
......@@ -653,13 +653,12 @@ static void sh_eth_ring_format(struct net_device *ndev)
for (i = 0; i < RX_RING_SIZE; i++) {
/* skb */
mdp->rx_skbuff[i] = NULL;
skb = dev_alloc_skb(mdp->rx_buf_sz);
skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
mdp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
DMA_FROM_DEVICE);
skb->dev = ndev; /* Mark as being used by this device. */
sh_eth_set_receive_align(skb);
/* RX descriptor */
......@@ -953,13 +952,12 @@ static int sh_eth_rx(struct net_device *ndev)
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
if (mdp->rx_skbuff[entry] == NULL) {
skb = dev_alloc_skb(mdp->rx_buf_sz);
skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
mdp->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
DMA_FROM_DEVICE);
skb->dev = ndev;
sh_eth_set_receive_align(skb);
skb_checksum_none_assert(skb);
......
......@@ -643,7 +643,7 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
if (next_ptr <= this_ptr)
length += RX_END - RX_START;
skb = dev_alloc_skb(length + 2);
skb = netdev_alloc_skb(dev, length + 2);
if (skb) {
unsigned char *buf;
......
......@@ -548,7 +548,7 @@ static void seeq8005_rx(struct net_device *dev)
struct sk_buff *skb;
unsigned char *buf;
skb = dev_alloc_skb(pkt_len);
skb = netdev_alloc_skb(dev, pkt_len);
if (skb == NULL) {
printk("%s: Memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
......
......@@ -1166,7 +1166,7 @@ sis900_init_rx_ring(struct net_device *net_dev)
for (i = 0; i < NUM_RX_DESC; i++) {
struct sk_buff *skb;
if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
/* not enough memory for skbuff, this makes a "hole"
on the buffer ring, it is not clear how the
hardware will react to this kind of degenerated
......@@ -1769,7 +1769,7 @@ static int sis900_rx(struct net_device *net_dev)
/* refill the Rx buffer, what if there is not enough
* memory for new socket buffer ?? */
if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
/*
* Not enough memory to refill the buffer
* so we need to recycle the old one so
......@@ -1827,7 +1827,7 @@ refill_rx_ring:
entry = sis_priv->dirty_rx % NUM_RX_DESC;
if (sis_priv->rx_skbuff[entry] == NULL) {
if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
/* not enough memory for skbuff, this makes a
* "hole" on the buffer ring, it is not clear
* how the hardware will react to this kind
......
......@@ -934,7 +934,7 @@ static void epic_init_ring(struct net_device *dev)
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz + 2);
struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
ep->rx_skbuff[i] = skb;
if (skb == NULL)
break;
......@@ -1199,7 +1199,7 @@ static int epic_rx(struct net_device *dev, int budget)
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
if (pkt_len < rx_copybreak &&
(skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
pci_dma_sync_single_for_cpu(ep->pci_dev,
ep->rx_ring[entry].bufaddr,
......@@ -1232,7 +1232,7 @@ static int epic_rx(struct net_device *dev, int budget)
entry = ep->dirty_rx % RX_RING_SIZE;
if (ep->rx_skbuff[entry] == NULL) {
struct sk_buff *skb;
skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz + 2);
skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
if (skb == NULL)
break;
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
......
......@@ -401,7 +401,7 @@ static inline void smc911x_rcv(struct net_device *dev)
} else {
/* Receive a valid packet */
/* Alloc a buffer with extra room for DMA alignment */
skb=dev_alloc_skb(pkt_len+32);
skb = netdev_alloc_skb(dev, pkt_len+32);
if (unlikely(skb == NULL)) {
PRINTK( "%s: Low memory, rcvd packet dropped.\n",
dev->name);
......
......@@ -1222,7 +1222,7 @@ static void smc_rcv(struct net_device *dev)
if ( status & RS_MULTICAST )
dev->stats.multicast++;
skb = dev_alloc_skb( packet_length + 5);
skb = netdev_alloc_skb(dev, packet_length + 5);
if ( skb == NULL ) {
printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n");
......
......@@ -1500,7 +1500,7 @@ static void smc_rx(struct net_device *dev)
struct sk_buff *skb;
/* Note: packet_length adds 5 or 6 extra bytes here! */
skb = dev_alloc_skb(packet_length+2);
skb = netdev_alloc_skb(dev, packet_length+2);
if (skb == NULL) {
pr_debug("%s: Low memory, packet dropped.\n", dev->name);
......
......@@ -463,7 +463,7 @@ static inline void smc_rcv(struct net_device *dev)
* multiple of 4 bytes on 32 bit buses.
* Hence packet_len - 6 + 2 + 2 + 2.
*/
skb = dev_alloc_skb(packet_len);
skb = netdev_alloc_skb(dev, packet_len);
if (unlikely(skb == NULL)) {
printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
dev->name);
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment