Commit b5d903c2 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge more updates from Andrew Morton:

 - MM remainders

 - various misc things

 - kcov updates

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (27 commits)
  lib/test_printf.c: call wait_for_random_bytes() before plain %p tests
  hexagon: drop the unused variable zero_page_mask
  hexagon: fix printk format warning in setup.c
  mm: fix oom_kill event handling
  treewide: use PHYS_ADDR_MAX to avoid type casting ULLONG_MAX
  mm: use octal not symbolic permissions
  ipc: use new return type vm_fault_t
  sysvipc/sem: mitigate semnum index against spectre v1
  fault-injection: reorder config entries
  arm: port KCOV to arm
  sched/core / kcov: avoid kcov_area during task switch
  kcov: prefault the kcov_area
  kcov: ensure irq code sees a valid area
  kernel/relay.c: change return type to vm_fault_t
  exofs: avoid VLA in structures
  coredump: fix spam with zero VMA process
  fat: use fat_fs_error() instead of BUG_ON() in __fat_get_block()
  proc: skip branch in /proc/*/* lookup
  mremap: remove LATENCY_LIMIT from mremap to reduce the number of TLB shootdowns
  mm/memblock: add missing include <linux/bootmem.h>
  ...
parents 7a932516 ee410f15
......@@ -8,9 +8,10 @@ config ARM
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV
select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_STRICT_MODULE_RWX if MMU
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
......
......@@ -25,6 +25,9 @@ endif
GCOV_PROFILE := n
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
#
# Architecture dependencies
#
......
......@@ -23,3 +23,11 @@ obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
obj-$(CONFIG_KVM_ARM_HOST) += switch.o
CFLAGS_switch.o += $(CFLAGS_ARMV7VE)
obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
# KVM code is run at a different exception code with a different map, so
# compiler instrumentation that inserts callbacks or checks into the code may
# cause crashes. Just disable it.
GCOV_PROFILE := n
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n
......@@ -30,6 +30,9 @@ CFLAGS_vgettimeofday.o = -O2
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
# Force dependency
$(obj)/vdso.o : $(obj)/vdso.so
......
......@@ -310,7 +310,7 @@ static void __init arm64_memory_present(void)
}
#endif
static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX;
static phys_addr_t memory_limit = PHYS_ADDR_MAX;
/*
* Limit the memory size that was specified via FDT.
......@@ -401,7 +401,7 @@ void __init arm64_memblock_init(void)
* high up in memory, add back the kernel region that must be accessible
* via the linear mapping.
*/
if (memory_limit != (phys_addr_t)ULLONG_MAX) {
if (memory_limit != PHYS_ADDR_MAX) {
memblock_mem_limit_remove_map(memory_limit);
memblock_add(__pa_symbol(_text), (u64)(_end - _text));
}
......@@ -666,7 +666,7 @@ __setup("keepinitrd", keepinitrd_setup);
*/
static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
{
if (memory_limit != (phys_addr_t)ULLONG_MAX) {
if (memory_limit != PHYS_ADDR_MAX) {
pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
} else {
pr_emerg("Memory Limit: none\n");
......
......@@ -30,7 +30,6 @@
/* A handy thing to have if one has the RAM. Declared in head.S */
extern unsigned long empty_zero_page;
extern unsigned long zero_page_mask;
/*
* The PTE model described here is that of the Hexagon Virtual Machine,
......
......@@ -66,7 +66,7 @@ void __init setup_arch(char **cmdline_p)
*/
__vmsetvec(_K_VM_event_vector);
printk(KERN_INFO "PHYS_OFFSET=0x%08x\n", PHYS_OFFSET);
printk(KERN_INFO "PHYS_OFFSET=0x%08lx\n", PHYS_OFFSET);
/*
* Simulator has a few differences from the hardware.
......
......@@ -39,9 +39,6 @@ unsigned long __phys_offset; /* physical kernel offset >> 12 */
/* Set as variable to limit PMD copies */
int max_kernel_seg = 0x303;
/* think this should be (page_size-1) the way it's used...*/
unsigned long zero_page_mask;
/* indicate pfn's of high memory */
unsigned long highstart_pfn, highend_pfn;
......
......@@ -93,7 +93,7 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
* If the region reaches the top of the physical address space, adjust
* the size slightly so that (start + size) doesn't overflow
*/
if (start + size - 1 == (phys_addr_t)ULLONG_MAX)
if (start + size - 1 == PHYS_ADDR_MAX)
--size;
/* Sanity check */
......@@ -376,7 +376,7 @@ static void __init bootmem_init(void)
unsigned long reserved_end;
unsigned long mapstart = ~0UL;
unsigned long bootmap_size;
phys_addr_t ramstart = (phys_addr_t)ULLONG_MAX;
phys_addr_t ramstart = PHYS_ADDR_MAX;
bool bootmap_valid = false;
int i;
......
......@@ -215,7 +215,7 @@ void __init mem_topology_setup(void)
/* Place all memblock_regions in the same node and merge contiguous
* memblock_regions
*/
memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
}
void __init initmem_init(void)
......
......@@ -1620,7 +1620,7 @@ static void __init bootmem_init_nonnuma(void)
(top_of_ram - total_ram) >> 20);
init_node_masks_nonnuma();
memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
allocate_node_data(0);
node_set_online(0);
}
......
......@@ -706,7 +706,9 @@ void __init init_mem_mapping(void)
*/
int devmem_is_allowed(unsigned long pagenr)
{
if (page_is_ram(pagenr)) {
if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE,
IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
!= REGION_DISJOINT) {
/*
* For disallowed memory regions in the low 1MB range,
* request that the page be shown as all zeros.
......
......@@ -692,7 +692,7 @@ void __init initmem_init(void)
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
#endif
memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
sparse_memory_present_with_active_regions(0);
#ifdef CONFIG_FLATMEM
......
......@@ -742,7 +742,7 @@ kernel_physical_mapping_init(unsigned long paddr_start,
#ifndef CONFIG_NUMA
void __init initmem_init(void)
{
memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
}
#endif
......
......@@ -193,7 +193,7 @@ static __init void reserve_regions(void)
* uses its own memory map instead.
*/
memblock_dump_all();
memblock_remove(0, (phys_addr_t)ULLONG_MAX);
memblock_remove(0, PHYS_ADDR_MAX);
for_each_efi_memory_desc(md) {
paddr = md->phys_addr;
......
......@@ -686,7 +686,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
struct elf32_hdr *ehdr;
phys_addr_t mpss_reloc;
phys_addr_t boot_addr;
phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX;
phys_addr_t min_addr = PHYS_ADDR_MAX;
phys_addr_t max_addr = 0;
bool relocate = false;
char seg_name[10];
......
......@@ -50,7 +50,7 @@ ssize_t qcom_mdt_get_size(const struct firmware *fw)
const struct elf32_phdr *phdrs;
const struct elf32_phdr *phdr;
const struct elf32_hdr *ehdr;
phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX;
phys_addr_t min_addr = PHYS_ADDR_MAX;
phys_addr_t max_addr = 0;
int i;
......@@ -97,7 +97,7 @@ int qcom_mdt_load(struct device *dev, const struct firmware *fw,
const struct elf32_hdr *ehdr;
const struct firmware *seg_fw;
phys_addr_t mem_reloc;
phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX;
phys_addr_t min_addr = PHYS_ADDR_MAX;
phys_addr_t max_addr = 0;
size_t fw_name_len;
ssize_t offset;
......
......@@ -1621,8 +1621,8 @@ static int fill_files_note(struct memelfnote *note)
if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
return -EINVAL;
size = round_up(size, PAGE_SIZE);
data = vmalloc(size);
if (!data)
data = kvmalloc(size, GFP_KERNEL);
if (ZERO_OR_NULL_PTR(data))
return -ENOMEM;
start_end_ofs = data + 2;
......@@ -1639,7 +1639,7 @@ static int fill_files_note(struct memelfnote *note)
filename = file_path(file, name_curpos, remaining);
if (IS_ERR(filename)) {
if (PTR_ERR(filename) == -ENAMETOOLONG) {
vfree(data);
kvfree(data);
size = size * 5 / 4;
goto alloc;
}
......@@ -1932,7 +1932,7 @@ static void free_note_info(struct elf_note_info *info)
kfree(t);
}
kfree(info->psinfo.data);
vfree(info->files.data);
kvfree(info->files.data);
}
#else
......@@ -2148,7 +2148,7 @@ static void free_note_info(struct elf_note_info *info)
/* Free data possibly allocated by fill_files_note(): */
if (info->notes_files)
vfree(info->notes_files->data);
kvfree(info->notes_files->data);
kfree(info->prstatus);
kfree(info->psinfo);
......@@ -2294,8 +2294,9 @@ static int elf_core_dump(struct coredump_params *cprm)
if (segs - 1 > ULONG_MAX / sizeof(*vma_filesz))
goto end_coredump;
vma_filesz = vmalloc(array_size(sizeof(*vma_filesz), (segs - 1)));
if (!vma_filesz)
vma_filesz = kvmalloc(array_size(sizeof(*vma_filesz), (segs - 1)),
GFP_KERNEL);
if (ZERO_OR_NULL_PTR(vma_filesz))
goto end_coredump;
for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
......@@ -2402,7 +2403,7 @@ end_coredump:
cleanup:
free_note_info(&info);
kfree(shdr4extnum);
vfree(vma_filesz);
kvfree(vma_filesz);
kfree(phdr4note);
kfree(elf);
out:
......
......@@ -146,68 +146,82 @@ int _ore_get_io_state(struct ore_layout *layout,
struct ore_io_state **pios)
{
struct ore_io_state *ios;
struct page **pages;
struct osd_sg_entry *sgilist;
size_t size_ios, size_extra, size_total;
void *ios_extra;
/*
* The desired layout looks like this, with the extra_allocation
* items pointed at from fields within ios or per_dev:
struct __alloc_all_io_state {
struct ore_io_state ios;
struct ore_per_dev_state per_dev[numdevs];
union {
struct osd_sg_entry sglist[sgs_per_dev * numdevs];
struct page *pages[num_par_pages];
};
} *_aios;
if (likely(sizeof(*_aios) <= PAGE_SIZE)) {
_aios = kzalloc(sizeof(*_aios), GFP_KERNEL);
if (unlikely(!_aios)) {
ORE_DBGMSG("Failed kzalloc bytes=%zd\n",
sizeof(*_aios));
} extra_allocation;
} whole_allocation;
*/
/* This should never happen, so abort early if it ever does. */
if (sgs_per_dev && num_par_pages) {
ORE_DBGMSG("Tried to use both pages and sglist\n");
*pios = NULL;
return -EINVAL;
}
if (numdevs > (INT_MAX - sizeof(*ios)) /
sizeof(struct ore_per_dev_state))
return -ENOMEM;
size_ios = sizeof(*ios) + sizeof(struct ore_per_dev_state) * numdevs;
if (sgs_per_dev * numdevs > INT_MAX / sizeof(struct osd_sg_entry))
return -ENOMEM;
if (num_par_pages > INT_MAX / sizeof(struct page *))
return -ENOMEM;
size_extra = max(sizeof(struct osd_sg_entry) * (sgs_per_dev * numdevs),
sizeof(struct page *) * num_par_pages);
size_total = size_ios + size_extra;
if (likely(size_total <= PAGE_SIZE)) {
ios = kzalloc(size_total, GFP_KERNEL);
if (unlikely(!ios)) {
ORE_DBGMSG("Failed kzalloc bytes=%zd\n", size_total);
*pios = NULL;
return -ENOMEM;
}
pages = num_par_pages ? _aios->pages : NULL;
sgilist = sgs_per_dev ? _aios->sglist : NULL;
ios = &_aios->ios;
ios_extra = (char *)ios + size_ios;
} else {
struct __alloc_small_io_state {
struct ore_io_state ios;
struct ore_per_dev_state per_dev[numdevs];
} *_aio_small;
union __extra_part {
struct osd_sg_entry sglist[sgs_per_dev * numdevs];
struct page *pages[num_par_pages];
} *extra_part;
_aio_small = kzalloc(sizeof(*_aio_small), GFP_KERNEL);
if (unlikely(!_aio_small)) {
ios = kzalloc(size_ios, GFP_KERNEL);
if (unlikely(!ios)) {
ORE_DBGMSG("Failed alloc first part bytes=%zd\n",
sizeof(*_aio_small));
size_ios);
*pios = NULL;
return -ENOMEM;
}
extra_part = kzalloc(sizeof(*extra_part), GFP_KERNEL);
if (unlikely(!extra_part)) {
ios_extra = kzalloc(size_extra, GFP_KERNEL);
if (unlikely(!ios_extra)) {
ORE_DBGMSG("Failed alloc second part bytes=%zd\n",
sizeof(*extra_part));
kfree(_aio_small);
size_extra);
kfree(ios);
*pios = NULL;
return -ENOMEM;
}
pages = num_par_pages ? extra_part->pages : NULL;
sgilist = sgs_per_dev ? extra_part->sglist : NULL;
/* In this case the per_dev[0].sgilist holds the pointer to
* be freed
*/
ios = &_aio_small->ios;
ios->extra_part_alloc = true;
}
if (pages) {
ios->parity_pages = pages;
if (num_par_pages) {
ios->parity_pages = ios_extra;
ios->max_par_pages = num_par_pages;
}
if (sgilist) {
if (sgs_per_dev) {
struct osd_sg_entry *sgilist = ios_extra;
unsigned d;
for (d = 0; d < numdevs; ++d) {
......
......@@ -71,6 +71,11 @@ static int _sp2d_alloc(unsigned pages_in_unit, unsigned group_width,
{
struct __stripe_pages_2d *sp2d;
unsigned data_devs = group_width - parity;
/*
* Desired allocation layout is, though when larger than PAGE_SIZE,
* each struct __alloc_1p_arrays is separately allocated:
struct _alloc_all_bytes {
struct __alloc_stripe_pages_2d {
struct __stripe_pages_2d sp2d;
......@@ -82,55 +87,85 @@ static int _sp2d_alloc(unsigned pages_in_unit, unsigned group_width,
char page_is_read[data_devs];
} __a1pa[pages_in_unit];
} *_aab;
struct __alloc_1p_arrays *__a1pa;
struct __alloc_1p_arrays *__a1pa_end;
const unsigned sizeof__a1pa = sizeof(_aab->__a1pa[0]);
*/
char *__a1pa;
char *__a1pa_end;
const size_t sizeof_stripe_pages_2d =
sizeof(struct __stripe_pages_2d) +
sizeof(struct __1_page_stripe) * pages_in_unit;
const size_t sizeof__a1pa =
ALIGN(sizeof(struct page *) * (2 * group_width) + data_devs,
sizeof(void *));
const size_t sizeof__a1pa_arrays = sizeof__a1pa * pages_in_unit;
const size_t alloc_total = sizeof_stripe_pages_2d +
sizeof__a1pa_arrays;
unsigned num_a1pa, alloc_size, i;
/* FIXME: check these numbers in ore_verify_layout */
BUG_ON(sizeof(_aab->__asp2d) > PAGE_SIZE);
BUG_ON(sizeof_stripe_pages_2d > PAGE_SIZE);
BUG_ON(sizeof__a1pa > PAGE_SIZE);
if (sizeof(*_aab) > PAGE_SIZE) {
num_a1pa = (PAGE_SIZE - sizeof(_aab->__asp2d)) / sizeof__a1pa;
alloc_size = sizeof(_aab->__asp2d) + sizeof__a1pa * num_a1pa;
/*
* If alloc_total would be larger than PAGE_SIZE, only allocate
* as many a1pa items as would fill the rest of the page, instead
* of the full pages_in_unit count.
*/
if (alloc_total > PAGE_SIZE) {
num_a1pa = (PAGE_SIZE - sizeof_stripe_pages_2d) / sizeof__a1pa;
alloc_size = sizeof_stripe_pages_2d + sizeof__a1pa * num_a1pa;
} else {
num_a1pa = pages_in_unit;
alloc_size = sizeof(*_aab);
alloc_size = alloc_total;
}
_aab = kzalloc(alloc_size, GFP_KERNEL);
if (unlikely(!_aab)) {
*psp2d = sp2d = kzalloc(alloc_size, GFP_KERNEL);
if (unlikely(!sp2d)) {
ORE_DBGMSG("!! Failed to alloc sp2d size=%d\n", alloc_size);
return -ENOMEM;
}
/* From here Just call _sp2d_free */
sp2d = &_aab->__asp2d.sp2d;
*psp2d = sp2d; /* From here Just call _sp2d_free */
__a1pa = _aab->__a1pa;
__a1pa_end = __a1pa + num_a1pa;
/* Find start of a1pa area. */
__a1pa = (char *)sp2d + sizeof_stripe_pages_2d;
/* Find end of the _allocated_ a1pa area. */
__a1pa_end = __a1pa + alloc_size;
/* Allocate additionally needed a1pa items in PAGE_SIZE chunks. */
for (i = 0; i < pages_in_unit; ++i) {
struct __1_page_stripe *stripe = &sp2d->_1p_stripes[i];
if (unlikely(__a1pa >= __a1pa_end)) {
num_a1pa = min_t(unsigned, PAGE_SIZE / sizeof__a1pa,
pages_in_unit - i);
alloc_size = sizeof__a1pa * num_a1pa;
__a1pa = kcalloc(num_a1pa, sizeof__a1pa, GFP_KERNEL);
__a1pa = kzalloc(alloc_size, GFP_KERNEL);
if (unlikely(!__a1pa)) {
ORE_DBGMSG("!! Failed to _alloc_1p_arrays=%d\n",
num_a1pa);
return -ENOMEM;
}
__a1pa_end = __a1pa + num_a1pa;
__a1pa_end = __a1pa + alloc_size;
/* First *pages is marked for kfree of the buffer */
sp2d->_1p_stripes[i].alloc = true;
stripe->alloc = true;
}
sp2d->_1p_stripes[i].pages = __a1pa->pages;
sp2d->_1p_stripes[i].scribble = __a1pa->scribble ;
sp2d->_1p_stripes[i].page_is_read = __a1pa->page_is_read;
++__a1pa;
/*
* Attach all _lp_stripes pointers to the allocation for
* it which was either part of the original PAGE_SIZE
* allocation or the subsequent allocation in this loop.
*/
stripe->pages = (void *)__a1pa;
stripe->scribble = stripe->pages + group_width;
stripe->page_is_read = (char *)stripe->scribble + group_width;
__a1pa += sizeof__a1pa;
}
sp2d->parity = parity;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment