Commit cbafe18c authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge more updates from Andrew Morton:

 - almost all of the rest of -mm

 - various other subsystems

Subsystems affected by this patch series:
  memcg, misc, core-kernel, lib, checkpatch, reiserfs, fat, fork,
  cpumask, kexec, uaccess, kconfig, kgdb, bug, ipc, lzo, kasan, madvise,
  cleanups, pagemap

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (77 commits)
  arch/sparc/include/asm/pgtable_64.h: fix build
  mm: treewide: clarify pgtable_page_{ctor,dtor}() naming
  ntfs: remove (un)?likely() from IS_ERR() conditions
  IB/hfi1: remove unlikely() from IS_ERR*() condition
  xfs: remove unlikely() from WARN_ON() condition
  wimax/i2400m: remove unlikely() from WARN*() condition
  fs: remove unlikely() from WARN_ON() condition
  xen/events: remove unlikely() from WARN() condition
  checkpatch: check for nested (un)?likely() calls
  hexagon: drop empty and unused free_initrd_mem
  mm: factor out common parts between MADV_COLD and MADV_PAGEOUT
  mm: introduce MADV_PAGEOUT
  mm: change PAGEREF_RECLAIM_CLEAN with PAGE_REFRECLAIM
  mm: introduce MADV_COLD
  mm: untag user pointers in mmap/munmap/mremap/brk
  vfio/type1: untag user pointers in vaddr_get_pfn
  tee/shm: untag user pointers in tee_shm_register
  media/v4l2-core: untag user pointers in videobuf_dma_contig_user_get
  drm/radeon: untag user pointers in radeon_gem_userptr_ioctl
  drm/amdgpu: untag user pointers
  ...
parents f41def39 a22fea94
......@@ -42,6 +42,9 @@ String Manipulation
.. kernel-doc:: lib/string.c
:export:
.. kernel-doc:: include/linux/string.h
:internal:
.. kernel-doc:: mm/util.c
:functions: kstrdup kstrdup_const kstrndup kmemdup kmemdup_nul memdup_user
vmemdup_user strndup_user memdup_user_nul
......
......@@ -54,9 +54,9 @@ Hugetlb-specific helpers:
Support of split page table lock by an architecture
===================================================
There's no need in special enabling of PTE split page table lock:
everything required is done by pgtable_page_ctor() and pgtable_page_dtor(),
which must be called on PTE table allocation / freeing.
There's no need in special enabling of PTE split page table lock: everything
required is done by pgtable_pte_page_ctor() and pgtable_pte_page_dtor(), which
must be called on PTE table allocation / freeing.
Make sure the architecture doesn't use slab allocator for page table
allocation: slab uses page->slab_cache for its pages.
......@@ -74,7 +74,7 @@ paths: i.e X86_PAE preallocate few PMDs on pgd_alloc().
With everything in place you can set CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK.
NOTE: pgtable_page_ctor() and pgtable_pmd_page_ctor() can fail -- it must
NOTE: pgtable_pte_page_ctor() and pgtable_pmd_page_ctor() can fail -- it must
be handled properly.
page->ptl
......@@ -94,7 +94,7 @@ trick:
split lock with enabled DEBUG_SPINLOCK or DEBUG_LOCK_ALLOC, but costs
one more cache line for indirect access;
The spinlock_t allocated in pgtable_page_ctor() for PTE table and in
The spinlock_t allocated in pgtable_pte_page_ctor() for PTE table and in
pgtable_pmd_page_ctor() for PMD table.
Please, never access page->ptl directly -- use appropriate helper.
......@@ -68,6 +68,9 @@
#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
/* compatibility flags */
#define MAP_FILE 0
......
......@@ -108,7 +108,7 @@ pte_alloc_one(struct mm_struct *mm)
return 0;
memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
page = virt_to_page(pte_pg);
if (!pgtable_page_ctor(page)) {
if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return 0;
}
......@@ -123,7 +123,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
{
pgtable_page_dtor(virt_to_page(ptep));
pgtable_pte_page_dtor(virt_to_page(ptep));
free_pages((unsigned long)ptep, __get_order_pte());
}
......
......@@ -44,7 +44,7 @@ static inline void __tlb_remove_table(void *_table)
static inline void
__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
{
pgtable_page_dtor(pte);
pgtable_pte_page_dtor(pte);
#ifndef CONFIG_ARM_LPAE
/*
......
......@@ -731,7 +731,7 @@ static void *__init late_alloc(unsigned long sz)
{
void *ptr = (void *)__get_free_pages(GFP_PGTABLE_KERNEL, get_order(sz));
if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
if (!ptr || !pgtable_pte_page_ctor(virt_to_page(ptr)))
BUG();
return ptr;
}
......
......@@ -44,7 +44,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
pgtable_page_dtor(pte);
pgtable_pte_page_dtor(pte);
tlb_remove_table(tlb, pte);
}
......
......@@ -384,7 +384,7 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
* folded, and if so pgtable_pmd_page_ctor() becomes nop.
*/
if (shift == PAGE_SHIFT)
BUG_ON(!pgtable_page_ctor(phys_to_page(pa)));
BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa)));
else if (shift == PMD_SHIFT)
BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa)));
......
......@@ -71,7 +71,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
#define __pte_free_tlb(tlb, pte, address) \
do { \
pgtable_page_dtor(pte); \
pgtable_pte_page_dtor(pte); \
tlb_remove_page(tlb, pte); \
} while (0)
......
......@@ -94,7 +94,7 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
#define __pte_free_tlb(tlb, pte, addr) \
do { \
pgtable_page_dtor((pte)); \
pgtable_pte_page_dtor((pte)); \
tlb_remove_page((tlb), (pte)); \
} while (0)
......
......@@ -71,19 +71,6 @@ void __init mem_init(void)
init_mm.context.ptbase = __pa(init_mm.pgd);
}
/*
* free_initrd_mem - frees... initrd memory.
* @start - start of init memory
* @end - end of init memory
*
* Apparently has to be passed the address of the initrd memory.
*
* Wrapped by #ifdef CONFIG_BLKDEV_INITRD
*/
void free_initrd_mem(unsigned long start, unsigned long end)
{
}
void sync_icache_dcache(pte_t pte)
{
unsigned long addr;
......
......@@ -41,7 +41,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
unsigned long address)
{
pgtable_page_dtor(page);
pgtable_pte_page_dtor(page);
__free_page(page);
}
......@@ -54,7 +54,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm)
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
......@@ -73,7 +73,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm)
static inline void pte_free(struct mm_struct *mm, struct page *page)
{
pgtable_page_dtor(page);
pgtable_pte_page_dtor(page);
__free_page(page);
}
......
......@@ -36,7 +36,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
if(!page)
return NULL;
if (!pgtable_page_ctor(page)) {
if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
......@@ -51,7 +51,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
static inline void pte_free(struct mm_struct *mm, pgtable_t page)
{
pgtable_page_dtor(page);
pgtable_pte_page_dtor(page);
cache_page(kmap(page));
kunmap(page);
__free_page(page);
......@@ -60,7 +60,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t page)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
unsigned long address)
{
pgtable_page_dtor(page);
pgtable_pte_page_dtor(page);
cache_page(kmap(page));
kunmap(page);
__free_page(page);
......
......@@ -21,7 +21,7 @@ extern const char bad_pmd_string[];
#define __pte_free_tlb(tlb,pte,addr) \
do { \
pgtable_page_dtor(pte); \
pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
} while (0)
......
......@@ -54,7 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
#define __pte_free_tlb(tlb,pte,address) \
do { \
pgtable_page_dtor(pte); \
pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
} while (0)
......
......@@ -95,6 +95,9 @@
#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
/* compatibility flags */
#define MAP_FILE 0
......
......@@ -41,7 +41,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
#define __pte_free_tlb(tlb, pte, addr) \
do { \
pgtable_page_dtor(pte); \
pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), (pte)); \
} while (0)
......
......@@ -75,7 +75,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm)
if (!pte)
return NULL;
clear_page(page_address(pte));
if (!pgtable_page_ctor(pte)) {
if (!pgtable_pte_page_ctor(pte)) {
__free_page(pte);
return NULL;
}
......@@ -89,13 +89,13 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
pgtable_page_dtor(pte);
pgtable_pte_page_dtor(pte);
__free_page(pte);
}
#define __pte_free_tlb(tlb, pte, addr) \
do { \
pgtable_page_dtor(pte); \
pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), (pte)); \
} while (0)
......
......@@ -48,6 +48,9 @@
#define MADV_DONTFORK 10 /* don't inherit across fork */
#define MADV_DOFORK 11 /* do inherit across fork */
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
#define MADV_MERGEABLE 65 /* KSM may merge identical pages */
#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
......
......@@ -25,7 +25,7 @@ void pte_frag_destroy(void *pte_frag)
count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
/* We allow PTE_FRAG_NR fragments from a PTE page */
if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
pgtable_page_dtor(page);
pgtable_pte_page_dtor(page);
__free_page(page);
}
}
......@@ -61,7 +61,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
......@@ -113,7 +113,7 @@ void pte_fragment_free(unsigned long *table, int kernel)
BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
if (atomic_dec_and_test(&page->pt_frag_refcount)) {
if (!kernel)
pgtable_page_dtor(page);
pgtable_pte_page_dtor(page);
__free_page(page);
}
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment