Commit 52bd2d62 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

net: better skb->sender_cpu and skb->napi_id cohabitation



skb->sender_cpu and skb->napi_id share a common storage,
and we had various bugs about this.

We had to call skb_sender_cpu_clear() in some places to
not leave a prior skb->napi_id and fool netdev_pick_tx()

As suggested by Alexei, we could split the space so that
these errors can not happen.

0 value being reserved as the common (not initialized) value,
let's reserve [1 .. NR_CPUS] range for valid sender_cpu,
and [NR_CPUS+1 .. ~0U] for valid napi_id.

This will allow proper busy polling support over tunnels.

Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Suggested-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d37b4c0a
......@@ -1082,9 +1082,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
static inline void skb_sender_cpu_clear(struct sk_buff *skb)
{
#ifdef CONFIG_XPS
skb->sender_cpu = 0;
#endif
}
#ifdef NET_SKBUFF_DATA_USES_OFFSET
......
......@@ -182,7 +182,7 @@ EXPORT_SYMBOL(dev_base_lock);
/* protects napi_hash addition/deletion and napi_gen_id */
static DEFINE_SPINLOCK(napi_hash_lock);
static unsigned int napi_gen_id;
static unsigned int napi_gen_id = NR_CPUS;
static DEFINE_HASHTABLE(napi_hash, 8);
static seqcount_t devnet_rename_seq;
......@@ -3021,7 +3021,9 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
int queue_index = 0;
#ifdef CONFIG_XPS
if (skb->sender_cpu == 0)
u32 sender_cpu = skb->sender_cpu - 1;
if (sender_cpu >= (u32)NR_CPUS)
skb->sender_cpu = raw_smp_processor_id() + 1;
#endif
......@@ -4676,25 +4678,22 @@ EXPORT_SYMBOL_GPL(napi_by_id);
void napi_hash_add(struct napi_struct *napi)
{
if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
if (test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
return;
spin_lock(&napi_hash_lock);
spin_lock(&napi_hash_lock);
/* 0 is not a valid id, we also skip an id that is taken
* we expect both events to be extremely rare
*/
napi->napi_id = 0;
while (!napi->napi_id) {
napi->napi_id = ++napi_gen_id;
if (napi_by_id(napi->napi_id))
napi->napi_id = 0;
}
/* 0..NR_CPUS+1 range is reserved for sender_cpu use */
do {
if (unlikely(++napi_gen_id < NR_CPUS + 1))
napi_gen_id = NR_CPUS + 1;
} while (napi_by_id(napi_gen_id));
napi->napi_id = napi_gen_id;
hlist_add_head_rcu(&napi->napi_hash_node,
&napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
hlist_add_head_rcu(&napi->napi_hash_node,
&napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
spin_unlock(&napi_hash_lock);
}
spin_unlock(&napi_hash_lock);
}
EXPORT_SYMBOL_GPL(napi_hash_add);
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment