Commit 41e94a85 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Dan Williams
Browse files

add devm_memremap_pages

This behaves like devm_memremap except that it ensures we have page
structures available that can back the region.
Signed-off-by: default avatarChristoph Hellwig <>
[djbw: catch attempts to remap RAM, drop flags]
Signed-off-by: default avatarDan Williams <>
parent 033fbae9
......@@ -20,10 +20,13 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bug.h>
#include <linux/err.h>
#include <asm/io.h>
#include <asm/page.h>
struct device;
struct resource;
__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
......@@ -84,6 +87,23 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
size_t size, unsigned long flags);
void devm_memunmap(struct device *dev, void *addr);
void *__devm_memremap_pages(struct device *dev, struct resource *res);
void *devm_memremap_pages(struct device *dev, struct resource *res);
static inline void *devm_memremap_pages(struct device *dev, struct resource *res)
* Fail attempts to call devm_memremap_pages() without
* ZONE_DEVICE support enabled, this requires callers to fall
* back to plain devm_memremap() based on config
return ERR_PTR(-ENXIO);
* Some systems do not have legacy ISA devices.
* /dev/port is not a valid interface on these systems.
......@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/memory_hotplug.h>
#ifndef ioremap_cache
/* temporary while we convert existing ioremap_cache users to memremap */
......@@ -135,3 +136,55 @@ void devm_memunmap(struct device *dev, void *addr)
struct page_map {
struct resource res;
static void devm_memremap_pages_release(struct device *dev, void *res)
struct page_map *page_map = res;
/* pages are dead and unused, undo the arch mapping */
arch_remove_memory(page_map->res.start, resource_size(&page_map->res));
void *devm_memremap_pages(struct device *dev, struct resource *res)
int is_ram = region_intersects(res->start, resource_size(res),
"System RAM");
struct page_map *page_map;
int error, nid;
if (is_ram == REGION_MIXED) {
WARN_ONCE(1, "%s attempted on mixed region %pr\n",
__func__, res);
return ERR_PTR(-ENXIO);
if (is_ram == REGION_INTERSECTS)
return __va(res->start);
page_map = devres_alloc(devm_memremap_pages_release,
sizeof(*page_map), GFP_KERNEL);
if (!page_map)
return ERR_PTR(-ENOMEM);
memcpy(&page_map->res, res, sizeof(*res));
nid = dev_to_node(dev);
if (nid < 0)
nid = 0;
error = arch_add_memory(nid, res->start, resource_size(res), true);
if (error) {
return ERR_PTR(error);
devres_add(dev, page_map);
return __va(res->start);
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment