2018-08-27 14:32:32 +00:00
|
|
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
|
|
Date: Fri, 15 Jan 2016 16:33:34 +0100
|
|
|
|
Subject: net/core: protect users of napi_alloc_cache against
|
|
|
|
reentrance
|
2018-09-13 17:28:08 +00:00
|
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.7-rt5.tar.xz
|
2018-08-27 14:32:32 +00:00
|
|
|
|
|
|
|
On -RT the code running in BH can not be moved to another CPU so CPU
|
|
|
|
local variable remain local. However the code can be preempted
|
|
|
|
and another task may enter BH accessing the same CPU using the same
|
|
|
|
napi_alloc_cache variable.
|
|
|
|
This patch ensures that each user of napi_alloc_cache uses a local lock.
|
|
|
|
|
|
|
|
Cc: stable-rt@vger.kernel.org
|
|
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
|
|
---
|
|
|
|
net/core/skbuff.c | 25 +++++++++++++++++++------
|
|
|
|
1 file changed, 19 insertions(+), 6 deletions(-)
|
|
|
|
|
|
|
|
--- a/net/core/skbuff.c
|
|
|
|
+++ b/net/core/skbuff.c
|
|
|
|
@@ -332,6 +332,7 @@ struct napi_alloc_cache {
|
|
|
|
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
|
|
|
|
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
|
|
|
|
static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
|
|
|
|
+static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock);
|
|
|
|
|
|
|
|
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
|
|
|
{
|
|
|
|
@@ -361,9 +362,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
|
|
|
|
|
|
|
|
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
|
|
|
{
|
|
|
|
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
|
|
|
+ struct napi_alloc_cache *nc;
|
|
|
|
+ void *data;
|
|
|
|
|
|
|
|
- return page_frag_alloc(&nc->page, fragsz, gfp_mask);
|
|
|
|
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
|
|
|
+ data = page_frag_alloc(&nc->page, fragsz, gfp_mask);
|
|
|
|
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
|
|
|
+ return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *napi_alloc_frag(unsigned int fragsz)
|
|
|
|
@@ -457,9 +462,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
|
|
|
|
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
|
|
|
|
gfp_t gfp_mask)
|
|
|
|
{
|
|
|
|
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
|
|
|
+ struct napi_alloc_cache *nc;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
void *data;
|
|
|
|
+ bool pfmemalloc;
|
|
|
|
|
|
|
|
len += NET_SKB_PAD + NET_IP_ALIGN;
|
|
|
|
|
|
|
|
@@ -477,7 +483,10 @@ struct sk_buff *__napi_alloc_skb(struct
|
|
|
|
if (sk_memalloc_socks())
|
|
|
|
gfp_mask |= __GFP_MEMALLOC;
|
|
|
|
|
|
|
|
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
|
|
|
data = page_frag_alloc(&nc->page, len, gfp_mask);
|
|
|
|
+ pfmemalloc = nc->page.pfmemalloc;
|
|
|
|
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
|
|
|
if (unlikely(!data))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
@@ -488,7 +497,7 @@ struct sk_buff *__napi_alloc_skb(struct
|
|
|
|
}
|
|
|
|
|
|
|
|
/* use OR instead of assignment to avoid clearing of bits in mask */
|
|
|
|
- if (nc->page.pfmemalloc)
|
|
|
|
+ if (pfmemalloc)
|
|
|
|
skb->pfmemalloc = 1;
|
|
|
|
skb->head_frag = 1;
|
|
|
|
|
|
|
|
@@ -720,23 +729,26 @@ void __consume_stateless_skb(struct sk_b
|
|
|
|
|
|
|
|
void __kfree_skb_flush(void)
|
|
|
|
{
|
|
|
|
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
|
|
|
+ struct napi_alloc_cache *nc;
|
|
|
|
|
|
|
|
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
|
|
|
/* flush skb_cache if containing objects */
|
|
|
|
if (nc->skb_count) {
|
|
|
|
kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
|
|
|
|
nc->skb_cache);
|
|
|
|
nc->skb_count = 0;
|
|
|
|
}
|
|
|
|
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void _kfree_skb_defer(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
|
|
|
+ struct napi_alloc_cache *nc;
|
|
|
|
|
|
|
|
/* drop skb->head and call any destructors for packet */
|
|
|
|
skb_release_all(skb);
|
|
|
|
|
|
|
|
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
|
|
|
/* record skb to CPU local list */
|
|
|
|
nc->skb_cache[nc->skb_count++] = skb;
|
|
|
|
|
|
|
|
@@ -751,6 +763,7 @@ static inline void _kfree_skb_defer(stru
|
|
|
|
nc->skb_cache);
|
|
|
|
nc->skb_count = 0;
|
|
|
|
}
|
|
|
|
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
|
|
|
}
|
|
|
|
void __kfree_skb_defer(struct sk_buff *skb)
|
|
|
|
{
|