Commit 8779772c authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/net-next



Eric W. Biederman says:

====================
Using dev_kfree/consume_skb_any for functions called in multiple contexts

These changes are a result of walking through the network drivers
supporting netpoll and verifying the code paths that netpoll can cause
to be called in hard irq context use an appropriate flavor of
kfree_skb.  Either dev_kfree_skb_any or dev_consume_skb_any.

Since my last pass at this I have become aware of the small differences
between dev_kfree_skb_any and dev_consume_skb_any.
net/core/drop_monitor.c reports the dev_kfree_skb_any as a drop and
while being quite about the second.  With the weird twist that
dev_kfree_skb is unintuitively consume_skb.

As netpoll now calls the napi poll function with budget == 0, pieces of
a drivers the napi poll function that don't run when budget == 0 have
been ignored.

The most interesting change is to the atl1c which tried unsuccesfully to
tell one of it's functions which context it is called in so that it
could call dev_kfree_skb_irq or dev_kfree_skb as appropriate.  I have
just removed the extra parameter and called dev_consume_skb_any.

At 54 separate changes I will post each change as a separate patch (so
they can be reviewed) but for general sanity sake I have gathered them
all into a git branch for easy acces.
====================
Acked-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 35d499ee fb7c03df
......@@ -240,7 +240,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags);
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
......
......@@ -749,7 +749,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags);
dev_kfree_skb (skb);
dev_consume_skb_any (skb);
/* Clear the Tx status stack. */
{
......
......@@ -2086,7 +2086,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* ... and the packet rounded to a doubleword. */
skb_tx_timestamp(skb);
iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
dev_kfree_skb (skb);
dev_consume_skb_any (skb);
if (ioread16(ioaddr + TxFree) > 1536) {
netif_start_queue (dev); /* AKPM: redundant? */
} else {
......
......@@ -404,7 +404,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
spin_unlock(&ei_local->page_lock);
enable_irq_lockdep_irqrestore(dev->irq, &flags);
skb_tx_timestamp(skb);
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
dev->stats.tx_bytes += send_length;
return NETDEV_TX_OK;
......
......@@ -1087,7 +1087,7 @@ static inline void _tx_reclaim_skb(void)
tx_list_head->desc_a.config &= ~DMAEN;
tx_list_head->status.status_word = 0;
if (tx_list_head->skb) {
dev_kfree_skb(tx_list_head->skb);
dev_consume_skb_any(tx_list_head->skb);
tx_list_head->skb = NULL;
}
tx_list_head = tx_list_head->next;
......
......@@ -476,7 +476,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&db->lock, flags);
/* free this SKB */
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
......
......@@ -578,7 +578,7 @@ int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
outs++;
/* Kick the lance: transmit now */
WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
spin_lock_irqsave(&lp->devlock, flags);
if (TX_BUFFS_AVAIL)
......
......@@ -472,7 +472,7 @@ am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN)
netif_stop_queue(dev);
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
......
......@@ -2448,7 +2448,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
lp->tx_dma_addr[entry] =
pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
goto drop_packet;
}
......
......@@ -1097,7 +1097,7 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
drop:
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
......
......@@ -832,7 +832,7 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
}
static inline void atl1c_clean_buffer(struct pci_dev *pdev,
struct atl1c_buffer *buffer_info, int in_irq)
struct atl1c_buffer *buffer_info)
{
u16 pci_driection;
if (buffer_info->flags & ATL1C_BUFFER_FREE)
......@@ -850,12 +850,8 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
pci_unmap_page(pdev, buffer_info->dma,
buffer_info->length, pci_driection);
}
if (buffer_info->skb) {
if (in_irq)
dev_kfree_skb_irq(buffer_info->skb);
else
dev_kfree_skb(buffer_info->skb);
}
if (buffer_info->skb)
dev_consume_skb_any(buffer_info->skb);
buffer_info->dma = 0;
buffer_info->skb = NULL;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
......@@ -875,7 +871,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
ring_count = tpd_ring->count;
for (index = 0; index < ring_count; index++) {
buffer_info = &tpd_ring->buffer_info[index];
atl1c_clean_buffer(pdev, buffer_info, 0);
atl1c_clean_buffer(pdev, buffer_info);
}
/* Zero out Tx-buffers */
......@@ -899,7 +895,7 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
for (j = 0; j < rfd_ring->count; j++) {
buffer_info = &rfd_ring->buffer_info[j];
atl1c_clean_buffer(pdev, buffer_info, 0);
atl1c_clean_buffer(pdev, buffer_info);
}
/* zero out the descriptor ring */
memset(rfd_ring->desc, 0, rfd_ring->size);
......@@ -1562,7 +1558,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
while (next_to_clean != hw_next_to_clean) {
buffer_info = &tpd_ring->buffer_info[next_to_clean];
atl1c_clean_buffer(pdev, buffer_info, 1);
atl1c_clean_buffer(pdev, buffer_info);
if (++next_to_clean == tpd_ring->count)
next_to_clean = 0;
atomic_set(&tpd_ring->next_to_clean, next_to_clean);
......@@ -2085,7 +2081,7 @@ static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
while (index != tpd_ring->next_to_use) {
tpd = ATL1C_TPD_DESC(tpd_ring, index);
buffer_info = &tpd_ring->buffer_info[index];
atl1c_clean_buffer(adpt->pdev, buffer_info, 0);
atl1c_clean_buffer(adpt->pdev, buffer_info);
memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
if (++index == tpd_ring->count)
index = 0;
......@@ -2258,7 +2254,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
/* roll back tpd/buffer */
atl1c_tx_rollback(adapter, tpd, type);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
} else {
atl1c_tx_queue(adapter, skb, tpd, type);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
......
......@@ -2946,17 +2946,17 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Sanity checks for the skb */
if (unlikely(skb->len <= ETH_HLEN)) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
return NETDEV_TX_OK;
}
if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
return NETDEV_TX_OK;
}
if (unlikely(len == 0)) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
return NETDEV_TX_OK;
}
......@@ -2968,7 +2968,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
* and the netif_tx_stop_all_queues() call.
*/
if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
return NETDEV_TX_OK;
}
......@@ -2981,7 +2981,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
return NETDEV_TX_OK;
}
......@@ -3021,7 +3021,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Program the opcode, flags, frame_len, num_vectors in WI */
if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
txqent->hdr.wi.reserved = 0;
......@@ -3047,7 +3047,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Undo the changes starting at tcb->producer_index */
bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
tcb->producer_index);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
return NETDEV_TX_OK;
}
......@@ -3076,7 +3076,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(len != skb->len)) {
/* Undo the changes starting at tcb->producer_index */
bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
return NETDEV_TX_OK;
}
......
......@@ -1045,7 +1045,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = dma_map_single(&bp->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
if (dma_mapping_error(&bp->pdev->dev, mapping)) {
kfree_skb(skb);
dev_kfree_skb_any(skb);
goto unlock;
}
......
......@@ -897,7 +897,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
/* Check tx error on the last segment */
if (desc_get_tx_ls(p)) {
desc_get_tx_status(priv, p);
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
}
priv->tx_skbuff[entry] = NULL;
......@@ -1105,7 +1105,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
len = skb_headlen(skb);
paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, paddr)) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
priv->tx_skbuff[entry] = skb;
......@@ -1169,7 +1169,7 @@ dma_err:
desc = first;
dma_unmap_single(priv->device, desc_get_buf_addr(desc),
desc_get_buf_len(desc), DMA_TO_DEVICE);
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
......
......@@ -298,7 +298,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
if (need_unmap)
unmap_skb(d->skb, q, cidx, pdev);
if (d->eop) {
kfree_skb(d->skb);
dev_consume_skb_any(d->skb);
d->skb = NULL;
}
}
......@@ -1188,7 +1188,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
V_WR_TID(q->token));
wr_gen2(d, gen);
kfree_skb(skb);
dev_consume_skb_any(skb);
return;
}
......@@ -1233,7 +1233,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* anything shorter than an Ethernet header.
*/
if (unlikely(skb->len < ETH_HLEN)) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
......
......@@ -383,7 +383,7 @@ static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
if (d->skb) { /* an SGL is present */
if (unmap)
unmap_sgl(dev, d->skb, d->sgl, q);
kfree_skb(d->skb);
dev_consume_skb_any(d->skb);
d->skb = NULL;
}
++d;
......@@ -1009,7 +1009,7 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* anything shorter than an Ethernet header.
*/
if (unlikely(skb->len < ETH_HLEN)) {
out_free: dev_kfree_skb(skb);
out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
......@@ -1104,7 +1104,7 @@ out_free: dev_kfree_skb(skb);
if (immediate) {
inline_tx_skb(skb, &q->q, cpl + 1);
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
} else {
int last_desc;
......
......@@ -401,7 +401,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
if (sdesc->skb) {
if (need_unmap)
unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
kfree_skb(sdesc->skb);
dev_consume_skb_any(sdesc->skb);
sdesc->skb = NULL;
}
......@@ -1275,7 +1275,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* need it any longer.
*/
inline_tx_skb(skb, &txq->q, cpl + 1);
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
} else {
/*
* Write the skb's Scatter/Gather list into the TX Packet CPL
......@@ -1354,7 +1354,7 @@ out_free:
* An error of some sort happened. Free the TX skb and tell the
* OS that we've "dealt" with the packet ...
*/
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
......
......@@ -1174,7 +1174,7 @@ static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev)
writewords(lp, TX_FRAME_PORT, skb->data, (skb->len + 1) >> 1);
spin_unlock_irqrestore(&lp->lock, flags);
dev->stats.tx_bytes += skb->len;
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
/* We DO NOT call netif_wake_queue() here.
* We also DO NOT call netif_start_queue().
......
......@@ -521,7 +521,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
unsigned int txq_map;
if (skb->len <= 0) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
......@@ -536,7 +536,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
if (skb_shinfo(skb)->gso_size == 0 &&
skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
skb_linearize(skb)) {
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
......
......@@ -1033,7 +1033,7 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&db->lock, flags);
/* free this SKB */
dev_kfree_skb(skb);
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment