aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-02-17 15:48:43 -0800
committerDavid S. Miller <davem@davemloft.net>2019-02-17 15:48:43 -0800
commit254a1a2b2c2eb4332e9f8d73f7cd35b97abc6493 (patch)
tree1ff1c631dd480f88ed716d02d858bba7530726d6
parente09c6a4ec1bb9ed73b4157b69c261e408d875b0f (diff)
parent3bed3cc4156eedf652b4df72bdb35d4f1a2a739d (diff)
downloadlinux-soc-thermal-254a1a2b2c2eb4332e9f8d73f7cd35b97abc6493.tar.gz
Merge branch 'netdev-page_frag_alloc-fixes'
Alexander Duyck says: ==================== Address recent issues found in netdev page_frag_alloc usage This patch set addresses a couple of issues that I had pointed out to Jann Horn in response to a recent patch submission. The first issue is that I wanted to avoid the need to read/modify/write the size value in order to generate the value for pagecnt_bias. Instead we can just use a fixed constant which reduces the need for memory read operations and the overall number of instructions to update the pagecnt bias values. The other, and more important issue is, that apparently we were letting tun access the napi_alloc_cache indirectly through netdev_alloc_frag and as a result letting it create unaligned accesses via unaligned allocations. In order to prevent this I have added a call to SKB_DATA_ALIGN for the fragsz field so that we will keep the offset in the napi_alloc_cache SMP_CACHE_BYTES aligned. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--mm/page_alloc.c8
-rw-r--r--net/core/skbuff.c4
2 files changed, 8 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 46285d28e43b52..7f79b78bc82945 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4675,11 +4675,11 @@ refill:
/* Even if we own the page, we do not use atomic_set().
* This would break get_page_unless_zero() users.
*/
- page_ref_add(page, size);
+ page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
/* reset page count bias and offset to start of new frag */
nc->pfmemalloc = page_is_pfmemalloc(page);
- nc->pagecnt_bias = size + 1;
+ nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
nc->offset = size;
}
@@ -4695,10 +4695,10 @@ refill:
size = nc->size;
#endif
/* OK, page count is 0, we can safely set it */
- set_page_count(page, size + 1);
+ set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
/* reset page count bias and offset to start of new frag */
- nc->pagecnt_bias = size + 1;
+ nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
offset = size - fragsz;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 26d8484849126e..2415d9cb9b89fe 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -356,6 +356,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
*/
void *netdev_alloc_frag(unsigned int fragsz)
{
+ fragsz = SKB_DATA_ALIGN(fragsz);
+
return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
}
EXPORT_SYMBOL(netdev_alloc_frag);
@@ -369,6 +371,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
void *napi_alloc_frag(unsigned int fragsz)
{
+ fragsz = SKB_DATA_ALIGN(fragsz);
+
return __napi_alloc_frag(fragsz, GFP_ATOMIC);
}
EXPORT_SYMBOL(napi_alloc_frag);