aboutsummaryrefslogtreecommitdiffstats
path: root/fs/cifs/cifsfs.c
diff options
context:
space:
mode:
authorDavid Windsor <dave@nullcore.net>2017-06-10 22:50:33 -0400
committerKees Cook <keescook@chromium.org>2018-01-15 12:07:57 -0800
commitde046449045a329bae5c9256e55b58a685a22532 (patch)
tree7aa94c2a5f9506ad2ac0391f64211658ac3f92bb /fs/cifs/cifsfs.c
parente9a0561b7c8ef964078fa340fc1983f1f6d30544 (diff)
downloadlinux-de046449045a329bae5c9256e55b58a685a22532.tar.gz
cifs: Define usercopy region in cifs_request slab cache
CIFS request buffers, stored in the cifs_request slab cache, need to be copied to/from userspace. cache object allocation: fs/cifs/cifsfs.c: cifs_init_request_bufs(): ... cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv, cifs_req_cachep); fs/cifs/misc.c: cifs_buf_get(): ... ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS); ... return ret_buf; In support of usercopy hardening, this patch defines a region in the cifs_request slab cache in which userspace copy operations are allowed. This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamically sized copy operation involving cache-managed memory falls entirely within the slab's usercopy region. This patch is verbatim from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor <dave@nullcore.net> [kees: adjust commit log, provide usage trace] Cc: Steve French <sfrench@samba.org> Cc: linux-cifs@vger.kernel.org Signed-off-by: Kees Cook <keescook@chromium.org>
Diffstat (limited to 'fs/cifs/cifsfs.c')
-rw-r--r--fs/cifs/cifsfs.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 31b7565b16175..29f4b0290fbd0 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1231,9 +1231,11 @@ cifs_init_request_bufs(void)
cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
CIFSMaxBufSize, CIFSMaxBufSize);
*/
- cifs_req_cachep = kmem_cache_create("cifs_request",
+ cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
CIFSMaxBufSize + max_hdr_size, 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ SLAB_HWCACHE_ALIGN, 0,
+ CIFSMaxBufSize + max_hdr_size,
+ NULL);
if (cifs_req_cachep == NULL)
return -ENOMEM;
@@ -1259,9 +1261,9 @@ cifs_init_request_bufs(void)
more SMBs to use small buffer alloc and is still much more
efficient to alloc 1 per page off the slab compared to 17K (5page)
alloc of large cifs buffers even when page debugging is on */
- cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
+ cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
- NULL);
+ 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
if (cifs_sm_req_cachep == NULL) {
mempool_destroy(cifs_req_poolp);
kmem_cache_destroy(cifs_req_cachep);