memremap.c 11.3 KB
Newer Older
D
Dan Williams 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Copyright(c) 2015 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */
D
Dan Williams 已提交
13 14
#include <linux/radix-tree.h>
#include <linux/memremap.h>
C
Christoph Hellwig 已提交
15
#include <linux/device.h>
D
Dan Williams 已提交
16
#include <linux/types.h>
D
Dan Williams 已提交
17
#include <linux/pfn_t.h>
D
Dan Williams 已提交
18 19
#include <linux/io.h>
#include <linux/mm.h>
C
Christoph Hellwig 已提交
20
#include <linux/memory_hotplug.h>
D
Dan Williams 已提交
21 22 23 24 25 26 27 28 29

#ifndef ioremap_cache
/* temporary while we convert existing ioremap_cache users to memremap */
__weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
{
	return ioremap(offset, size);
}
#endif

D
Dan Williams 已提交
30 31
static void *try_ram_remap(resource_size_t offset, size_t size)
{
32
	unsigned long pfn = PHYS_PFN(offset);
D
Dan Williams 已提交
33 34

	/* In the simple case just return the existing linear address */
35
	if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)))
D
Dan Williams 已提交
36 37 38 39
		return __va(offset);
	return NULL; /* fallback to ioremap_cache */
}

D
Dan Williams 已提交
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
/**
 * memremap() - remap an iomem_resource as cacheable memory
 * @offset: iomem resource start address
 * @size: size of remap
 * @flags: either MEMREMAP_WB or MEMREMAP_WT
 *
 * memremap() is "ioremap" for cases where it is known that the resource
 * being mapped does not have i/o side effects and the __iomem
 * annotation is not applicable.
 *
 * MEMREMAP_WB - matches the default mapping for "System RAM" on
 * the architecture.  This is usually a read-allocate write-back cache.
 * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
 * memremap() will bypass establishing a new mapping and instead return
 * a pointer into the direct map.
 *
 * MEMREMAP_WT - establish a mapping whereby writes either bypass the
 * cache or are written through to memory and never exist in a
 * cache-dirty state with respect to program visibility.  Attempts to
 * map "System RAM" with this mapping type will fail.
 */
void *memremap(resource_size_t offset, size_t size, unsigned long flags)
{
	int is_ram = region_intersects(offset, size, "System RAM");
	void *addr = NULL;

	if (is_ram == REGION_MIXED) {
		WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
				&offset, (unsigned long) size);
		return NULL;
	}

	/* Try all mapping types requested until one returns non-NULL */
	if (flags & MEMREMAP_WB) {
		flags &= ~MEMREMAP_WB;
		/*
		 * MEMREMAP_WB is special in that it can be satisifed
		 * from the direct map.  Some archs depend on the
		 * capability of memremap() to autodetect cases where
		 * the requested range is potentially in "System RAM"
		 */
		if (is_ram == REGION_INTERSECTS)
D
Dan Williams 已提交
82 83
			addr = try_ram_remap(offset, size);
		if (!addr)
D
Dan Williams 已提交
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
			addr = ioremap_cache(offset, size);
	}

	/*
	 * If we don't have a mapping yet and more request flags are
	 * pending then we will be attempting to establish a new virtual
	 * address mapping.  Enforce that this mapping is not aliasing
	 * "System RAM"
	 */
	if (!addr && is_ram == REGION_INTERSECTS && flags) {
		WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
				&offset, (unsigned long) size);
		return NULL;
	}

	if (!addr && (flags & MEMREMAP_WT)) {
		flags &= ~MEMREMAP_WT;
		addr = ioremap_wt(offset, size);
	}

	return addr;
}
EXPORT_SYMBOL(memremap);

void memunmap(void *addr)
{
	if (is_vmalloc_addr(addr))
		iounmap((void __iomem *) addr);
}
EXPORT_SYMBOL(memunmap);
C
Christoph Hellwig 已提交
114 115 116

static void devm_memremap_release(struct device *dev, void *res)
{
117
	memunmap(*(void **)res);
C
Christoph Hellwig 已提交
118 119 120 121 122 123 124 125 126 127 128 129
}

static int devm_memremap_match(struct device *dev, void *res, void *match_data)
{
	return *(void **)res == match_data;
}

void *devm_memremap(struct device *dev, resource_size_t offset,
		size_t size, unsigned long flags)
{
	void **ptr, *addr;

130 131
	ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
			dev_to_node(dev));
C
Christoph Hellwig 已提交
132
	if (!ptr)
133
		return ERR_PTR(-ENOMEM);
C
Christoph Hellwig 已提交
134 135 136 137 138

	addr = memremap(offset, size, flags);
	if (addr) {
		*ptr = addr;
		devres_add(dev, ptr);
139
	} else {
C
Christoph Hellwig 已提交
140
		devres_free(ptr);
141 142
		return ERR_PTR(-ENXIO);
	}
C
Christoph Hellwig 已提交
143 144 145 146 147 148 149

	return addr;
}
EXPORT_SYMBOL(devm_memremap);

void devm_memunmap(struct device *dev, void *addr)
{
D
Dan Williams 已提交
150 151
	WARN_ON(devres_release(dev, devm_memremap_release,
				devm_memremap_match, addr));
C
Christoph Hellwig 已提交
152 153
}
EXPORT_SYMBOL(devm_memunmap);
C
Christoph Hellwig 已提交
154

D
Dan Williams 已提交
155
pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags)
D
Dan Williams 已提交
156 157 158 159 160
{
	return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
}
EXPORT_SYMBOL(phys_to_pfn_t);

C
Christoph Hellwig 已提交
161
#ifdef CONFIG_ZONE_DEVICE
D
Dan Williams 已提交
162 163 164 165 166
static DEFINE_MUTEX(pgmap_lock);
static RADIX_TREE(pgmap_radix, GFP_KERNEL);
#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)

C
Christoph Hellwig 已提交
167 168
struct page_map {
	struct resource res;
D
Dan Williams 已提交
169 170
	struct percpu_ref *ref;
	struct dev_pagemap pgmap;
171
	struct vmem_altmap altmap;
C
Christoph Hellwig 已提交
172 173
};

174 175 176 177 178 179 180 181 182 183 184 185
void get_zone_device_page(struct page *page)
{
	percpu_ref_get(page->pgmap->ref);
}
EXPORT_SYMBOL(get_zone_device_page);

void put_zone_device_page(struct page *page)
{
	put_dev_pagemap(page->pgmap);
}
EXPORT_SYMBOL(put_zone_device_page);

D
Dan Williams 已提交
186 187
static void pgmap_radix_release(struct resource *res)
{
188 189 190 191 192
	resource_size_t key, align_start, align_size, align_end;

	align_start = res->start & ~(SECTION_SIZE - 1);
	align_size = ALIGN(resource_size(res), SECTION_SIZE);
	align_end = align_start + align_size - 1;
D
Dan Williams 已提交
193 194 195 196 197 198 199

	mutex_lock(&pgmap_lock);
	for (key = res->start; key <= res->end; key += SECTION_SIZE)
		radix_tree_delete(&pgmap_radix, key >> PA_SECTION_SHIFT);
	mutex_unlock(&pgmap_lock);
}

200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
static unsigned long pfn_first(struct page_map *page_map)
{
	struct dev_pagemap *pgmap = &page_map->pgmap;
	const struct resource *res = &page_map->res;
	struct vmem_altmap *altmap = pgmap->altmap;
	unsigned long pfn;

	pfn = res->start >> PAGE_SHIFT;
	if (altmap)
		pfn += vmem_altmap_offset(altmap);
	return pfn;
}

static unsigned long pfn_end(struct page_map *page_map)
{
	const struct resource *res = &page_map->res;

	return (res->start + resource_size(res)) >> PAGE_SHIFT;
}

#define for_each_device_pfn(pfn, map) \
	for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)

D
Dan Williams 已提交
223
static void devm_memremap_pages_release(struct device *dev, void *data)
C
Christoph Hellwig 已提交
224
{
D
Dan Williams 已提交
225 226 227
	struct page_map *page_map = data;
	struct resource *res = &page_map->res;
	resource_size_t align_start, align_size;
228
	struct dev_pagemap *pgmap = &page_map->pgmap;
D
Dan Williams 已提交
229

230 231 232 233 234
	if (percpu_ref_tryget_live(pgmap->ref)) {
		dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
		percpu_ref_put(pgmap->ref);
	}

C
Christoph Hellwig 已提交
235
	/* pages are dead and unused, undo the arch mapping */
D
Dan Williams 已提交
236 237 238
	align_start = res->start & ~(SECTION_SIZE - 1);
	align_size = ALIGN(resource_size(res), SECTION_SIZE);
	arch_remove_memory(align_start, align_size);
239
	pgmap_radix_release(res);
240 241
	dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
			"%s: failed to free all reserved pages\n", __func__);
D
Dan Williams 已提交
242 243 244 245 246 247 248 249 250 251 252
}

/* assumes rcu_read_lock() held at entry */
struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
{
	struct page_map *page_map;

	WARN_ON_ONCE(!rcu_read_lock_held());

	page_map = radix_tree_lookup(&pgmap_radix, phys >> PA_SECTION_SHIFT);
	return page_map ? &page_map->pgmap : NULL;
C
Christoph Hellwig 已提交
253 254
}

255 256 257 258
/**
 * devm_memremap_pages - remap and provide memmap backing for the given resource
 * @dev: hosting device for @res
 * @res: "host memory" address range
259
 * @ref: a live per-cpu reference count
260 261
 * @altmap: optional descriptor for allocating the memmap from @res
 *
262 263 264 265 266 267 268
 * Notes:
 * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time
 *    (or devm release event).
 *
 * 2/ @res is expected to be a host memory range that could feasibly be
 *    treated as a "System RAM" range, i.e. not a device mmio range, but
 *    this is not enforced.
269 270
 */
void *devm_memremap_pages(struct device *dev, struct resource *res,
271
		struct percpu_ref *ref, struct vmem_altmap *altmap)
C
Christoph Hellwig 已提交
272
{
273
	resource_size_t key, align_start, align_size, align_end;
274
	struct dev_pagemap *pgmap;
C
Christoph Hellwig 已提交
275
	struct page_map *page_map;
276
	int error, nid, is_ram;
277
	unsigned long pfn;
278 279 280 281 282

	align_start = res->start & ~(SECTION_SIZE - 1);
	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
		- align_start;
	is_ram = region_intersects(align_start, align_size, "System RAM");
C
Christoph Hellwig 已提交
283 284 285 286 287 288 289 290 291 292

	if (is_ram == REGION_MIXED) {
		WARN_ONCE(1, "%s attempted on mixed region %pr\n",
				__func__, res);
		return ERR_PTR(-ENXIO);
	}

	if (is_ram == REGION_INTERSECTS)
		return __va(res->start);

293 294 295 296 297 298
	if (altmap && !IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) {
		dev_err(dev, "%s: altmap requires CONFIG_SPARSEMEM_VMEMMAP=y\n",
				__func__);
		return ERR_PTR(-ENXIO);
	}

299 300 301
	if (!ref)
		return ERR_PTR(-EINVAL);

302 303
	page_map = devres_alloc_node(devm_memremap_pages_release,
			sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
C
Christoph Hellwig 已提交
304 305
	if (!page_map)
		return ERR_PTR(-ENOMEM);
306
	pgmap = &page_map->pgmap;
C
Christoph Hellwig 已提交
307 308 309

	memcpy(&page_map->res, res, sizeof(*res));

310 311 312 313 314
	pgmap->dev = dev;
	if (altmap) {
		memcpy(&page_map->altmap, altmap, sizeof(*altmap));
		pgmap->altmap = &page_map->altmap;
	}
315
	pgmap->ref = ref;
316 317
	pgmap->res = &page_map->res;

D
Dan Williams 已提交
318 319
	mutex_lock(&pgmap_lock);
	error = 0;
320 321
	align_end = align_start + align_size - 1;
	for (key = align_start; key <= align_end; key += SECTION_SIZE) {
D
Dan Williams 已提交
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
		struct dev_pagemap *dup;

		rcu_read_lock();
		dup = find_dev_pagemap(key);
		rcu_read_unlock();
		if (dup) {
			dev_err(dev, "%s: %pr collides with mapping for %s\n",
					__func__, res, dev_name(dup->dev));
			error = -EBUSY;
			break;
		}
		error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT,
				page_map);
		if (error) {
			dev_err(dev, "%s: failed: %d\n", __func__, error);
			break;
		}
	}
	mutex_unlock(&pgmap_lock);
	if (error)
		goto err_radix;

C
Christoph Hellwig 已提交
344 345
	nid = dev_to_node(dev);
	if (nid < 0)
346
		nid = numa_mem_id();
C
Christoph Hellwig 已提交
347

D
Dan Williams 已提交
348 349 350
	error = arch_add_memory(nid, align_start, align_size, true);
	if (error)
		goto err_add_memory;
C
Christoph Hellwig 已提交
351

352 353 354
	for_each_device_pfn(pfn, page_map) {
		struct page *page = pfn_to_page(pfn);

D
Dan Williams 已提交
355 356 357 358 359 360 361
		/*
		 * ZONE_DEVICE pages union ->lru with a ->pgmap back
		 * pointer.  It is a bug if a ZONE_DEVICE page is ever
		 * freed or placed on a driver-private list.  Seed the
		 * storage with LIST_POISON* values.
		 */
		list_del(&page->lru);
362 363
		page->pgmap = pgmap;
	}
C
Christoph Hellwig 已提交
364 365
	devres_add(dev, page_map);
	return __va(res->start);
D
Dan Williams 已提交
366 367 368 369 370 371

 err_add_memory:
 err_radix:
	pgmap_radix_release(res);
	devres_free(page_map);
	return ERR_PTR(error);
C
Christoph Hellwig 已提交
372 373
}
EXPORT_SYMBOL(devm_memremap_pages);
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412

unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
{
	/* number of pfns from base where pfn_to_page() is valid */
	return altmap->reserve + altmap->free;
}

void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
{
	altmap->alloc -= nr_pfns;
}

#ifdef CONFIG_SPARSEMEM_VMEMMAP
struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
{
	/*
	 * 'memmap_start' is the virtual address for the first "struct
	 * page" in this range of the vmemmap array.  In the case of
	 * CONFIG_SPARSE_VMEMMAP a page_to_pfn conversion is simple
	 * pointer arithmetic, so we can perform this to_vmem_altmap()
	 * conversion without concern for the initialization state of
	 * the struct page fields.
	 */
	struct page *page = (struct page *) memmap_start;
	struct dev_pagemap *pgmap;

	/*
	 * Uncoditionally retrieve a dev_pagemap associated with the
	 * given physical address, this is only for use in the
	 * arch_{add|remove}_memory() for setting up and tearing down
	 * the memmap.
	 */
	rcu_read_lock();
	pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page)));
	rcu_read_unlock();

	return pgmap ? pgmap->altmap : NULL;
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
C
Christoph Hellwig 已提交
413
#endif /* CONFIG_ZONE_DEVICE */