Commit 617aebe6 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'usercopy-v4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull hardened usercopy whitelisting from Kees Cook:
 "Currently, hardened usercopy performs dynamic bounds checking on slab
  cache objects. This is good, but still leaves a lot of kernel memory
  available to be copied to/from userspace in the face of bugs.

  To further restrict what memory is available for copying, this creates
  a way to whitelist specific areas of a given slab cache object for
  copying to/from userspace, allowing much finer granularity of access
  control.

  Slab caches that are never exposed to userspace can declare no
  whitelist for their objects, thereby keeping them unavailable to
  userspace via dynamic copy operations. (Note, an implicit form of
  whitelisting is the use of constant sizes in usercopy operations and
  get_user()/put_user(); these bypass all hardened usercopy checks since
  these sizes cannot change at runtime.)

  This new check is WARN-by-default, so any mistakes can be found over
  the next several releases without breaking anyone's system.

  The series has roughly the following sections:
   - remove %p and improve reporting with offset
   - prepare infrastructure and whitelist kmalloc
   - update VFS subsystem with whitelists
   - update SCSI subsystem with whitelists
   - update network subsystem with whitelists
   - update process memory with whitelists
   - update per-architecture thread_struct with whitelists
   - update KVM with whitelists and fix ioctl bug
   - mark all other allocations as not whitelisted
   - update lkdtm for more sensible test overage"

* tag 'usercopy-v4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (38 commits)
  lkdtm: Update usercopy tests for whitelisting
  usercopy: Restrict non-usercopy caches to size 0
  kvm: x86: fix KVM_XEN_HVM_CONFIG ioctl
  kvm: whitelist struct kvm_vcpu_arch
  arm: Implement thread_struct whitelist for hardened usercopy
  arm64: Implement thread_struct whitelist for hardened usercopy
  x86: Implement thread_struct whitelist for hardened usercopy
  fork: Provide usercopy whitelisting for task_struct
  fork: Define usercopy region in thread_stack slab caches
  fork: Define usercopy region in mm_struct slab caches
  net: Restrict unwhitelisted proto caches to size 0
  sctp: Copy struct sctp_sock.autoclose to userspace using put_user()
  sctp: Define usercopy region in SCTP proto slab cache
  caif: Define usercopy region in caif proto slab cache
  ip: Define usercopy region in IP proto slab cache
  net: Define usercopy region in struct proto slab cache
  scsi: Define usercopy region in scsi_sense_cache slab cache
  cifs: Define usercopy region in cifs_request slab cache
  vxfs: Define usercopy region in vxfs_inode slab cache
  ufs: Define usercopy region in ufs_inode_cache slab cache
  ...
parents 0771ad44 e47e3118
...@@ -245,6 +245,17 @@ config ARCH_TASK_STRUCT_ON_STACK ...@@ -245,6 +245,17 @@ config ARCH_TASK_STRUCT_ON_STACK
config ARCH_TASK_STRUCT_ALLOCATOR config ARCH_TASK_STRUCT_ALLOCATOR
bool bool
config HAVE_ARCH_THREAD_STRUCT_WHITELIST
bool
depends on !ARCH_TASK_STRUCT_ALLOCATOR
help
An architecture should select this to provide hardened usercopy
knowledge about what region of the thread_struct should be
whitelisted for copying to userspace. Normally this is only the
FPU registers. Specifically, arch_thread_struct_whitelist()
should be implemented. Without this, the entire thread_struct
field in task_struct will be left whitelisted.
# Select if arch has its private alloc_thread_stack() function # Select if arch has its private alloc_thread_stack() function
config ARCH_THREAD_STACK_ALLOCATOR config ARCH_THREAD_STACK_ALLOCATOR
bool bool
......
...@@ -51,6 +51,7 @@ config ARM ...@@ -51,6 +51,7 @@ config ARM
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_ARM_SMCCC if CPU_V7 select HAVE_ARM_SMCCC if CPU_V7
select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32 select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32
......
...@@ -45,6 +45,16 @@ struct thread_struct { ...@@ -45,6 +45,16 @@ struct thread_struct {
struct debug_info debug; struct debug_info debug;
}; };
/*
* Everything usercopied to/from thread_struct is statically-sized, so
* no hardened usercopy whitelist is needed.
*/
static inline void arch_thread_struct_whitelist(unsigned long *offset,
unsigned long *size)
{
*offset = *size = 0;
}
#define INIT_THREAD { } #define INIT_THREAD { }
#define start_thread(regs,pc,sp) \ #define start_thread(regs,pc,sp) \
......
...@@ -91,6 +91,7 @@ config ARM64 ...@@ -91,6 +91,7 @@ config ARM64
select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_VMAP_STACK select HAVE_ARCH_VMAP_STACK
......
...@@ -113,6 +113,16 @@ struct thread_struct { ...@@ -113,6 +113,16 @@ struct thread_struct {
struct debug_info debug; /* debugging */ struct debug_info debug; /* debugging */
}; };
/*
* Everything usercopied to/from thread_struct is statically-sized, so
* no hardened usercopy whitelist is needed.
*/
static inline void arch_thread_struct_whitelist(unsigned long *offset,
unsigned long *size)
{
*offset = *size = 0;
}
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
#define task_user_tls(t) \ #define task_user_tls(t) \
({ \ ({ \
......
...@@ -116,6 +116,7 @@ config X86 ...@@ -116,6 +116,7 @@ config X86
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64 select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
......
...@@ -507,6 +507,14 @@ struct thread_struct { ...@@ -507,6 +507,14 @@ struct thread_struct {
*/ */
}; };
/* Whitelist the FPU state from the task_struct for hardened usercopy. */
static inline void arch_thread_struct_whitelist(unsigned long *offset,
unsigned long *size)
{
*offset = offsetof(struct thread_struct, fpu.state);
*size = fpu_kernel_xstate_size;
}
/* /*
* Thread-synchronous status. * Thread-synchronous status.
* *
......
...@@ -4237,13 +4237,14 @@ set_identity_unlock: ...@@ -4237,13 +4237,14 @@ set_identity_unlock:
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
break; break;
case KVM_XEN_HVM_CONFIG: { case KVM_XEN_HVM_CONFIG: {
struct kvm_xen_hvm_config xhc;
r = -EFAULT; r = -EFAULT;
if (copy_from_user(&kvm->arch.xen_hvm_config, argp, if (copy_from_user(&xhc, argp, sizeof(xhc)))
sizeof(struct kvm_xen_hvm_config)))
goto out; goto out;
r = -EINVAL; r = -EINVAL;
if (kvm->arch.xen_hvm_config.flags) if (xhc.flags)
goto out; goto out;
memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
r = 0; r = 0;
break; break;
} }
......
...@@ -76,8 +76,8 @@ void __init lkdtm_usercopy_init(void); ...@@ -76,8 +76,8 @@ void __init lkdtm_usercopy_init(void);
void __exit lkdtm_usercopy_exit(void); void __exit lkdtm_usercopy_exit(void);
void lkdtm_USERCOPY_HEAP_SIZE_TO(void); void lkdtm_USERCOPY_HEAP_SIZE_TO(void);
void lkdtm_USERCOPY_HEAP_SIZE_FROM(void); void lkdtm_USERCOPY_HEAP_SIZE_FROM(void);
void lkdtm_USERCOPY_HEAP_FLAG_TO(void); void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void);
void lkdtm_USERCOPY_HEAP_FLAG_FROM(void); void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void);
void lkdtm_USERCOPY_STACK_FRAME_TO(void); void lkdtm_USERCOPY_STACK_FRAME_TO(void);
void lkdtm_USERCOPY_STACK_FRAME_FROM(void); void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
void lkdtm_USERCOPY_STACK_BEYOND(void); void lkdtm_USERCOPY_STACK_BEYOND(void);
......
...@@ -177,8 +177,8 @@ static const struct crashtype crashtypes[] = { ...@@ -177,8 +177,8 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(ATOMIC_TIMING), CRASHTYPE(ATOMIC_TIMING),
CRASHTYPE(USERCOPY_HEAP_SIZE_TO), CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
CRASHTYPE(USERCOPY_HEAP_SIZE_FROM), CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
CRASHTYPE(USERCOPY_HEAP_FLAG_TO), CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO),
CRASHTYPE(USERCOPY_HEAP_FLAG_FROM), CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM),
CRASHTYPE(USERCOPY_STACK_FRAME_TO), CRASHTYPE(USERCOPY_STACK_FRAME_TO),
CRASHTYPE(USERCOPY_STACK_FRAME_FROM), CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
CRASHTYPE(USERCOPY_STACK_BEYOND), CRASHTYPE(USERCOPY_STACK_BEYOND),
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
*/ */
static volatile size_t unconst = 0; static volatile size_t unconst = 0;
static volatile size_t cache_size = 1024; static volatile size_t cache_size = 1024;
static struct kmem_cache *bad_cache; static struct kmem_cache *whitelist_cache;
static const unsigned char test_text[] = "This is a test.\n"; static const unsigned char test_text[] = "This is a test.\n";
...@@ -115,10 +115,16 @@ free_user: ...@@ -115,10 +115,16 @@ free_user:
vm_munmap(user_addr, PAGE_SIZE); vm_munmap(user_addr, PAGE_SIZE);
} }
/*
* This checks for whole-object size validation with hardened usercopy,
* with or without usercopy whitelisting.
*/
static void do_usercopy_heap_size(bool to_user) static void do_usercopy_heap_size(bool to_user)
{ {
unsigned long user_addr; unsigned long user_addr;
unsigned char *one, *two; unsigned char *one, *two;
void __user *test_user_addr;
void *test_kern_addr;
size_t size = unconst + 1024; size_t size = unconst + 1024;
one = kmalloc(size, GFP_KERNEL); one = kmalloc(size, GFP_KERNEL);
...@@ -139,27 +145,30 @@ static void do_usercopy_heap_size(bool to_user) ...@@ -139,27 +145,30 @@ static void do_usercopy_heap_size(bool to_user)
memset(one, 'A', size); memset(one, 'A', size);
memset(two, 'B', size); memset(two, 'B', size);
test_user_addr = (void __user *)(user_addr + 16);
test_kern_addr = one + 16;
if (to_user) { if (to_user) {
pr_info("attempting good copy_to_user of correct size\n"); pr_info("attempting good copy_to_user of correct size\n");
if (copy_to_user((void __user *)user_addr, one, size)) { if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) {
pr_warn("copy_to_user failed unexpectedly?!\n"); pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user; goto free_user;
} }
pr_info("attempting bad copy_to_user of too large size\n"); pr_info("attempting bad copy_to_user of too large size\n");
if (copy_to_user((void __user *)user_addr, one, 2 * size)) { if (copy_to_user(test_user_addr, test_kern_addr, size)) {
pr_warn("copy_to_user failed, but lacked Oops\n"); pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user; goto free_user;
} }
} else { } else {
pr_info("attempting good copy_from_user of correct size\n"); pr_info("attempting good copy_from_user of correct size\n");
if (copy_from_user(one, (void __user *)user_addr, size)) { if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) {
pr_warn("copy_from_user failed unexpectedly?!\n"); pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user; goto free_user;
} }
pr_info("attempting bad copy_from_user of too large size\n"); pr_info("attempting bad copy_from_user of too large size\n");
if (copy_from_user(one, (void __user *)user_addr, 2 * size)) { if (copy_from_user(test_kern_addr, test_user_addr, size)) {
pr_warn("copy_from_user failed, but lacked Oops\n"); pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user; goto free_user;
} }
...@@ -172,77 +181,79 @@ free_kernel: ...@@ -172,77 +181,79 @@ free_kernel:
kfree(two); kfree(two);
} }
static void do_usercopy_heap_flag(bool to_user) /*
* This checks for the specific whitelist window within an object. If this
* test passes, then do_usercopy_heap_size() tests will pass too.
*/
static void do_usercopy_heap_whitelist(bool to_user)
{ {
unsigned long user_addr; unsigned long user_alloc;
unsigned char *good_buf = NULL; unsigned char *buf = NULL;
unsigned char *bad_buf = NULL; unsigned char __user *user_addr;
size_t offset, size;
/* Make sure cache was prepared. */ /* Make sure cache was prepared. */
if (!bad_cache) { if (!whitelist_cache) {
pr_warn("Failed to allocate kernel cache\n"); pr_warn("Failed to allocate kernel cache\n");
return; return;
} }
/* /*
* Allocate one buffer from each cache (kmalloc will have the * Allocate a buffer with a whitelisted window in the buffer.
* SLAB_USERCOPY flag already, but "bad_cache" won't).
*/ */
good_buf = kmalloc(cache_size, GFP_KERNEL); buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL); if (!buf) {
if (!good_buf || !bad_buf) { pr_warn("Failed to allocate buffer from whitelist cache\n");
pr_warn("Failed to allocate buffers from caches\n");
goto free_alloc; goto free_alloc;
} }
/* Allocate user memory we'll poke at. */ /* Allocate user memory we'll poke at. */
user_addr = vm_mmap(NULL, 0, PAGE_SIZE, user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC, PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0); MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) { if (user_alloc >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n"); pr_warn("Failed to allocate user memory\n");
goto free_alloc; goto free_alloc;
} }
user_addr = (void __user *)user_alloc;
memset(good_buf, 'A', cache_size); memset(buf, 'B', cache_size);
memset(bad_buf, 'B', cache_size);
/* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
offset = (cache_size / 4) + unconst;
size = (cache_size / 16) + unconst;
if (to_user) { if (to_user) {
pr_info("attempting good copy_to_user with SLAB_USERCOPY\n"); pr_info("attempting good copy_to_user inside whitelist\n");
if (copy_to_user((void __user *)user_addr, good_buf, if (copy_to_user(user_addr, buf + offset, size)) {
cache_size)) {
pr_warn("copy_to_user failed unexpectedly?!\n"); pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user; goto free_user;
} }
pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n"); pr_info("attempting bad copy_to_user outside whitelist\n");
if (copy_to_user((void __user *)user_addr, bad_buf, if (copy_to_user(user_addr, buf + offset - 1, size)) {
cache_size)) {
pr_warn("copy_to_user failed, but lacked Oops\n"); pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user; goto free_user;
} }
} else { } else {
pr_info("attempting good copy_from_user with SLAB_USERCOPY\n"); pr_info("attempting good copy_from_user inside whitelist\n");
if (copy_from_user(good_buf, (void __user *)user_addr, if (copy_from_user(buf + offset, user_addr, size)) {
cache_size)) {
pr_warn("copy_from_user failed unexpectedly?!\n"); pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user; goto free_user;
} }
pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n"); pr_info("attempting bad copy_from_user outside whitelist\n");
if (copy_from_user(bad_buf, (void __user *)user_addr, if (copy_from_user(buf + offset - 1, user_addr, size)) {
cache_size)) {
pr_warn("copy_from_user failed, but lacked Oops\n"); pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user; goto free_user;
} }
} }
free_user: free_user:
vm_munmap(user_addr, PAGE_SIZE); vm_munmap(user_alloc, PAGE_SIZE);
free_alloc: free_alloc:
if (bad_buf) if (buf)
kmem_cache_free(bad_cache, bad_buf); kmem_cache_free(whitelist_cache, buf);
kfree(good_buf);
} }
/* Callable tests. */ /* Callable tests. */
...@@ -256,14 +267,14 @@ void lkdtm_USERCOPY_HEAP_SIZE_FROM(void) ...@@ -256,14 +267,14 @@ void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
do_usercopy_heap_size(false); do_usercopy_heap_size(false);
} }
void lkdtm_USERCOPY_HEAP_FLAG_TO(void) void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
{ {
do_usercopy_heap_flag(true); do_usercopy_heap_whitelist(true);
} }
void lkdtm_USERCOPY_HEAP_FLAG_FROM(void) void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
{ {
do_usercopy_heap_flag(false); do_usercopy_heap_whitelist(false);
} }
void lkdtm_USERCOPY_STACK_FRAME_TO(void) void lkdtm_USERCOPY_STACK_FRAME_TO(void)
...@@ -314,11 +325,15 @@ free_user: ...@@ -314,11 +325,15 @@ free_user:
void __init lkdtm_usercopy_init(void) void __init lkdtm_usercopy_init(void)
{ {
/* Prepare cache that lacks SLAB_USERCOPY flag. */ /* Prepare cache that lacks SLAB_USERCOPY flag. */
bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0, whitelist_cache =
0, NULL); kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
0, 0,
cache_size / 4,
cache_size / 16,
NULL);
} }
void __exit lkdtm_usercopy_exit(void) void __exit lkdtm_usercopy_exit(void)
{ {
kmem_cache_destroy(bad_cache); kmem_cache_destroy(whitelist_cache);
} }
...@@ -79,14 +79,15 @@ int scsi_init_sense_cache(struct Scsi_Host *shost) ...@@ -79,14 +79,15 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
if (shost->unchecked_isa_dma) { if (shost->unchecked_isa_dma) {
scsi_sense_isadma_cache = scsi_sense_isadma_cache =
kmem_cache_create("scsi_sense_cache(DMA)", kmem_cache_create("scsi_sense_cache(DMA)",
SCSI_SENSE_BUFFERSIZE, 0, SCSI_SENSE_BUFFERSIZE, 0,
SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL); SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
if (!scsi_sense_isadma_cache) if (!scsi_sense_isadma_cache)
ret = -ENOMEM; ret = -ENOMEM;
} else { } else {
scsi_sense_cache = scsi_sense_cache =
kmem_cache_create("scsi_sense_cache", kmem_cache_create_usercopy("scsi_sense_cache",
SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, NULL); SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
0, SCSI_SENSE_BUFFERSIZE, NULL);
if (!scsi_sense_cache) if (!scsi_sense_cache)
ret = -ENOMEM; ret = -ENOMEM;
} }
......
...@@ -444,11 +444,15 @@ unacquire_none: ...@@ -444,11 +444,15 @@ unacquire_none:
static int __init static int __init
befs_init_inodecache(void) befs_init_inodecache(void)
{ {
befs_inode_cachep = kmem_cache_create("befs_inode_cache", befs_inode_cachep = kmem_cache_create_usercopy("befs_inode_cache",
sizeof (struct befs_inode_info), sizeof(struct befs_inode_info), 0,
0, (SLAB_RECLAIM_ACCOUNT| (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
SLAB_MEM_SPREAD|SLAB_ACCOUNT), SLAB_ACCOUNT),
init_once); offsetof(struct befs_inode_info,
i_data.symlink),
sizeof_field(struct befs_inode_info,
i_data.symlink),
init_once);
if (befs_inode_cachep == NULL) if (befs_inode_cachep == NULL)
return -ENOMEM; return -ENOMEM;
......
...@@ -1239,9 +1239,11 @@ cifs_init_request_bufs(void) ...@@ -1239,9 +1239,11 @@ cifs_init_request_bufs(void)
cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n", cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
CIFSMaxBufSize, CIFSMaxBufSize); CIFSMaxBufSize, CIFSMaxBufSize);
*/ */
cifs_req_cachep = kmem_cache_create("cifs_request", cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
CIFSMaxBufSize + max_hdr_size, 0, CIFSMaxBufSize + max_hdr_size, 0,
SLAB_HWCACHE_ALIGN, NULL); SLAB_HWCACHE_ALIGN, 0,
CIFSMaxBufSize + max_hdr_size,
NULL);
if (cifs_req_cachep == NULL) if (cifs_req_cachep == NULL)
return -ENOMEM; return -ENOMEM;
...@@ -1267,9 +1269,9 @@ cifs_init_request_bufs(void) ...@@ -1267,9 +1269,9 @@ cifs_init_request_bufs(void)
more SMBs to use small buffer alloc and is still much more more SMBs to use small buffer alloc and is still much more
efficient to alloc 1 per page off the slab compared to 17K (5page) efficient to alloc 1 per page off the slab compared to 17K (5page)
alloc of large cifs buffers even when page debugging is on */ alloc of large cifs buffers even when page debugging is on */
cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
NULL); 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
if (cifs_sm_req_cachep == NULL) { if (cifs_sm_req_cachep == NULL) {
mempool_destroy(cifs_req_poolp); mempool_destroy(cifs_req_poolp);
kmem_cache_destroy(cifs_req_cachep); kmem_cache_destroy(cifs_req_cachep);
......
...@@ -3602,8 +3602,9 @@ static void __init dcache_init(void) ...@@ -3602,8 +3602,9 @@ static void __init dcache_init(void)
* but it is probably not worth it because of the cache nature * but it is probably not worth it because of the cache nature
* of the dcache. * of the dcache.
*/ */
dentry_cache = KMEM_CACHE(dentry, dentry_cache = KMEM_CACHE_USERCOPY(dentry,
SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT); SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
d_iname);
/* Hash may have been set up in dcache_init_early */ /* Hash may have been set up in dcache_init_early */
if (!hashdist) if (!hashdist)
...@@ -3641,8 +3642,8 @@ void __init vfs_caches_init_early(void) ...@@ -3641,8 +3642,8 @@ void __init vfs_caches_init_early(void)
void __init vfs_caches_init(void) void __init vfs_caches_init(void)
{ {
names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL);
dcache_init(); dcache_init();
inode_init(); inode_init();
......
...@@ -193,10 +193,13 @@ static void exofs_init_once(void *foo) ...@@ -193,10 +193,13 @@ static void exofs_init_once(void *foo)
*/ */
static int init_inodecache(void) static int init_inodecache(void)
{ {
exofs_inode_cachep = kmem_cache_create("exofs_inode_cache", exofs_inode_cachep = kmem_cache_create_usercopy("exofs_inode_cache",
sizeof(struct exofs_i_info), 0, sizeof(struct exofs_i_info), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD |
SLAB_ACCOUNT, exofs_init_once);