memremap.c 12.5 KB
Newer Older
D
Dan Williams 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Copyright(c) 2015 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */
D
Dan Williams 已提交
13 14
#include <linux/radix-tree.h>
#include <linux/memremap.h>
C
Christoph Hellwig 已提交
15
#include <linux/device.h>
D
Dan Williams 已提交
16
#include <linux/types.h>
D
Dan Williams 已提交
17
#include <linux/pfn_t.h>
D
Dan Williams 已提交
18 19
#include <linux/io.h>
#include <linux/mm.h>
C
Christoph Hellwig 已提交
20
#include <linux/memory_hotplug.h>
D
Dan Williams 已提交
21 22 23 24 25 26 27 28 29

#ifndef ioremap_cache
/* temporary while we convert existing ioremap_cache users to memremap */
__weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
{
	return ioremap(offset, size);
}
#endif

30 31 32 33 34 35 36
#ifndef arch_memremap_wb
static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
{
	return (__force void *)ioremap_cache(offset, size);
}
#endif

37 38 39 40 41 42 43 44 45 46
#ifndef arch_memremap_can_ram_remap
static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
					unsigned long flags)
{
	return true;
}
#endif

static void *try_ram_remap(resource_size_t offset, size_t size,
			   unsigned long flags)
D
Dan Williams 已提交
47
{
48
	unsigned long pfn = PHYS_PFN(offset);
D
Dan Williams 已提交
49 50

	/* In the simple case just return the existing linear address */
51 52
	if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) &&
	    arch_memremap_can_ram_remap(offset, size, flags))
D
Dan Williams 已提交
53
		return __va(offset);
54

55
	return NULL; /* fallback to arch_memremap_wb */
D
Dan Williams 已提交
56 57
}

D
Dan Williams 已提交
58 59 60 61
/**
 * memremap() - remap an iomem_resource as cacheable memory
 * @offset: iomem resource start address
 * @size: size of remap
62 63
 * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC,
 *		  MEMREMAP_ENC, MEMREMAP_DEC
D
Dan Williams 已提交
64 65 66
 *
 * memremap() is "ioremap" for cases where it is known that the resource
 * being mapped does not have i/o side effects and the __iomem
B
Brian Starkey 已提交
67 68 69
 * annotation is not applicable. In the case of multiple flags, the different
 * mapping types will be attempted in the order listed below until one of
 * them succeeds.
D
Dan Williams 已提交
70
 *
71
 * MEMREMAP_WB - matches the default mapping for System RAM on
D
Dan Williams 已提交
72 73 74 75 76 77 78 79
 * the architecture.  This is usually a read-allocate write-back cache.
 * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
 * memremap() will bypass establishing a new mapping and instead return
 * a pointer into the direct map.
 *
 * MEMREMAP_WT - establish a mapping whereby writes either bypass the
 * cache or are written through to memory and never exist in a
 * cache-dirty state with respect to program visibility.  Attempts to
80
 * map System RAM with this mapping type will fail.
B
Brian Starkey 已提交
81 82 83 84
 *
 * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
 * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
 * uncached. Attempts to map System RAM with this mapping type will fail.
D
Dan Williams 已提交
85 86 87
 */
void *memremap(resource_size_t offset, size_t size, unsigned long flags)
{
88 89
	int is_ram = region_intersects(offset, size,
				       IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
D
Dan Williams 已提交
90 91
	void *addr = NULL;

B
Brian Starkey 已提交
92 93 94
	if (!flags)
		return NULL;

D
Dan Williams 已提交
95 96 97 98 99 100 101 102 103 104 105 106
	if (is_ram == REGION_MIXED) {
		WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
				&offset, (unsigned long) size);
		return NULL;
	}

	/* Try all mapping types requested until one returns non-NULL */
	if (flags & MEMREMAP_WB) {
		/*
		 * MEMREMAP_WB is special in that it can be satisifed
		 * from the direct map.  Some archs depend on the
		 * capability of memremap() to autodetect cases where
107
		 * the requested range is potentially in System RAM.
D
Dan Williams 已提交
108 109
		 */
		if (is_ram == REGION_INTERSECTS)
110
			addr = try_ram_remap(offset, size, flags);
D
Dan Williams 已提交
111
		if (!addr)
112
			addr = arch_memremap_wb(offset, size);
D
Dan Williams 已提交
113 114 115
	}

	/*
B
Brian Starkey 已提交
116 117
	 * If we don't have a mapping yet and other request flags are
	 * present then we will be attempting to establish a new virtual
D
Dan Williams 已提交
118
	 * address mapping.  Enforce that this mapping is not aliasing
119
	 * System RAM.
D
Dan Williams 已提交
120
	 */
B
Brian Starkey 已提交
121
	if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
D
Dan Williams 已提交
122 123 124 125 126
		WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
				&offset, (unsigned long) size);
		return NULL;
	}

B
Brian Starkey 已提交
127
	if (!addr && (flags & MEMREMAP_WT))
D
Dan Williams 已提交
128
		addr = ioremap_wt(offset, size);
B
Brian Starkey 已提交
129 130 131

	if (!addr && (flags & MEMREMAP_WC))
		addr = ioremap_wc(offset, size);
D
Dan Williams 已提交
132 133 134 135 136 137 138 139 140 141 142

	return addr;
}
EXPORT_SYMBOL(memremap);

void memunmap(void *addr)
{
	if (is_vmalloc_addr(addr))
		iounmap((void __iomem *) addr);
}
EXPORT_SYMBOL(memunmap);
C
Christoph Hellwig 已提交
143 144 145

static void devm_memremap_release(struct device *dev, void *res)
{
146
	memunmap(*(void **)res);
C
Christoph Hellwig 已提交
147 148 149 150 151 152 153 154 155 156 157 158
}

static int devm_memremap_match(struct device *dev, void *res, void *match_data)
{
	return *(void **)res == match_data;
}

void *devm_memremap(struct device *dev, resource_size_t offset,
		size_t size, unsigned long flags)
{
	void **ptr, *addr;

159 160
	ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
			dev_to_node(dev));
C
Christoph Hellwig 已提交
161
	if (!ptr)
162
		return ERR_PTR(-ENOMEM);
C
Christoph Hellwig 已提交
163 164 165 166 167

	addr = memremap(offset, size, flags);
	if (addr) {
		*ptr = addr;
		devres_add(dev, ptr);
168
	} else {
C
Christoph Hellwig 已提交
169
		devres_free(ptr);
170 171
		return ERR_PTR(-ENXIO);
	}
C
Christoph Hellwig 已提交
172 173 174 175 176 177 178

	return addr;
}
EXPORT_SYMBOL(devm_memremap);

void devm_memunmap(struct device *dev, void *addr)
{
D
Dan Williams 已提交
179 180
	WARN_ON(devres_release(dev, devm_memremap_release,
				devm_memremap_match, addr));
C
Christoph Hellwig 已提交
181 182
}
EXPORT_SYMBOL(devm_memunmap);
C
Christoph Hellwig 已提交
183 184

#ifdef CONFIG_ZONE_DEVICE
D
Dan Williams 已提交
185 186 187 188 189
static DEFINE_MUTEX(pgmap_lock);
static RADIX_TREE(pgmap_radix, GFP_KERNEL);
#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)

C
Christoph Hellwig 已提交
190 191
struct page_map {
	struct resource res;
D
Dan Williams 已提交
192 193
	struct percpu_ref *ref;
	struct dev_pagemap pgmap;
194
	struct vmem_altmap altmap;
C
Christoph Hellwig 已提交
195 196
};

D
Dan Williams 已提交
197 198
static void pgmap_radix_release(struct resource *res)
{
199 200 201 202 203
	resource_size_t key, align_start, align_size, align_end;

	align_start = res->start & ~(SECTION_SIZE - 1);
	align_size = ALIGN(resource_size(res), SECTION_SIZE);
	align_end = align_start + align_size - 1;
D
Dan Williams 已提交
204 205 206 207 208 209 210

	mutex_lock(&pgmap_lock);
	for (key = res->start; key <= res->end; key += SECTION_SIZE)
		radix_tree_delete(&pgmap_radix, key >> PA_SECTION_SHIFT);
	mutex_unlock(&pgmap_lock);
}

211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
static unsigned long pfn_first(struct page_map *page_map)
{
	struct dev_pagemap *pgmap = &page_map->pgmap;
	const struct resource *res = &page_map->res;
	struct vmem_altmap *altmap = pgmap->altmap;
	unsigned long pfn;

	pfn = res->start >> PAGE_SHIFT;
	if (altmap)
		pfn += vmem_altmap_offset(altmap);
	return pfn;
}

static unsigned long pfn_end(struct page_map *page_map)
{
	const struct resource *res = &page_map->res;

	return (res->start + resource_size(res)) >> PAGE_SHIFT;
}

#define for_each_device_pfn(pfn, map) \
	for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)

D
Dan Williams 已提交
234
static void devm_memremap_pages_release(struct device *dev, void *data)
C
Christoph Hellwig 已提交
235
{
D
Dan Williams 已提交
236 237 238
	struct page_map *page_map = data;
	struct resource *res = &page_map->res;
	resource_size_t align_start, align_size;
239
	struct dev_pagemap *pgmap = &page_map->pgmap;
240 241 242 243
	unsigned long pfn;

	for_each_device_pfn(pfn, page_map)
		put_page(pfn_to_page(pfn));
D
Dan Williams 已提交
244

245 246 247 248 249
	if (percpu_ref_tryget_live(pgmap->ref)) {
		dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
		percpu_ref_put(pgmap->ref);
	}

C
Christoph Hellwig 已提交
250
	/* pages are dead and unused, undo the arch mapping */
D
Dan Williams 已提交
251 252
	align_start = res->start & ~(SECTION_SIZE - 1);
	align_size = ALIGN(resource_size(res), SECTION_SIZE);
253

254
	mem_hotplug_begin();
D
Dan Williams 已提交
255
	arch_remove_memory(align_start, align_size);
256
	mem_hotplug_done();
257

258
	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
259
	pgmap_radix_release(res);
260 261
	dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
			"%s: failed to free all reserved pages\n", __func__);
D
Dan Williams 已提交
262 263 264 265 266 267 268 269 270 271 272
}

/* assumes rcu_read_lock() held at entry */
struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
{
	struct page_map *page_map;

	WARN_ON_ONCE(!rcu_read_lock_held());

	page_map = radix_tree_lookup(&pgmap_radix, phys >> PA_SECTION_SHIFT);
	return page_map ? &page_map->pgmap : NULL;
C
Christoph Hellwig 已提交
273 274
}

275 276 277 278
/**
 * devm_memremap_pages - remap and provide memmap backing for the given resource
 * @dev: hosting device for @res
 * @res: "host memory" address range
279
 * @ref: a live per-cpu reference count
280 281
 * @altmap: optional descriptor for allocating the memmap from @res
 *
282 283
 * Notes:
 * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time
284 285 286 287
 *    (or devm release event). The expected order of events is that @ref has
 *    been through percpu_ref_kill() before devm_memremap_pages_release(). The
 *    wait for the completion of all references being dropped and
 *    percpu_ref_exit() must occur after devm_memremap_pages_release().
288 289 290 291
 *
 * 2/ @res is expected to be a host memory range that could feasibly be
 *    treated as a "System RAM" range, i.e. not a device mmio range, but
 *    this is not enforced.
292 293
 */
void *devm_memremap_pages(struct device *dev, struct resource *res,
294
		struct percpu_ref *ref, struct vmem_altmap *altmap)
C
Christoph Hellwig 已提交
295
{
296
	resource_size_t key, align_start, align_size, align_end;
297
	pgprot_t pgprot = PAGE_KERNEL;
298
	struct dev_pagemap *pgmap;
C
Christoph Hellwig 已提交
299
	struct page_map *page_map;
300
	int error, nid, is_ram;
301
	unsigned long pfn;
302 303 304 305

	align_start = res->start & ~(SECTION_SIZE - 1);
	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
		- align_start;
306 307
	is_ram = region_intersects(align_start, align_size,
		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
C
Christoph Hellwig 已提交
308 309 310 311 312 313 314 315 316 317

	if (is_ram == REGION_MIXED) {
		WARN_ONCE(1, "%s attempted on mixed region %pr\n",
				__func__, res);
		return ERR_PTR(-ENXIO);
	}

	if (is_ram == REGION_INTERSECTS)
		return __va(res->start);

318 319 320
	if (!ref)
		return ERR_PTR(-EINVAL);

321 322
	page_map = devres_alloc_node(devm_memremap_pages_release,
			sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
C
Christoph Hellwig 已提交
323 324
	if (!page_map)
		return ERR_PTR(-ENOMEM);
325
	pgmap = &page_map->pgmap;
C
Christoph Hellwig 已提交
326 327 328

	memcpy(&page_map->res, res, sizeof(*res));

329 330 331 332 333
	pgmap->dev = dev;
	if (altmap) {
		memcpy(&page_map->altmap, altmap, sizeof(*altmap));
		pgmap->altmap = &page_map->altmap;
	}
334
	pgmap->ref = ref;
335 336
	pgmap->res = &page_map->res;

D
Dan Williams 已提交
337 338
	mutex_lock(&pgmap_lock);
	error = 0;
339 340
	align_end = align_start + align_size - 1;
	for (key = align_start; key <= align_end; key += SECTION_SIZE) {
D
Dan Williams 已提交
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
		struct dev_pagemap *dup;

		rcu_read_lock();
		dup = find_dev_pagemap(key);
		rcu_read_unlock();
		if (dup) {
			dev_err(dev, "%s: %pr collides with mapping for %s\n",
					__func__, res, dev_name(dup->dev));
			error = -EBUSY;
			break;
		}
		error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT,
				page_map);
		if (error) {
			dev_err(dev, "%s: failed: %d\n", __func__, error);
			break;
		}
	}
	mutex_unlock(&pgmap_lock);
	if (error)
		goto err_radix;

C
Christoph Hellwig 已提交
363 364
	nid = dev_to_node(dev);
	if (nid < 0)
365
		nid = numa_mem_id();
C
Christoph Hellwig 已提交
366

367 368 369 370 371
	error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
			align_size);
	if (error)
		goto err_pfn_remap;

372
	mem_hotplug_begin();
373
	error = arch_add_memory(nid, align_start, align_size, false);
374 375 376 377
	if (!error)
		move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
					align_start >> PAGE_SHIFT,
					align_size >> PAGE_SHIFT);
378
	mem_hotplug_done();
D
Dan Williams 已提交
379 380
	if (error)
		goto err_add_memory;
C
Christoph Hellwig 已提交
381

382 383 384
	for_each_device_pfn(pfn, page_map) {
		struct page *page = pfn_to_page(pfn);

D
Dan Williams 已提交
385 386 387 388 389 390 391
		/*
		 * ZONE_DEVICE pages union ->lru with a ->pgmap back
		 * pointer.  It is a bug if a ZONE_DEVICE page is ever
		 * freed or placed on a driver-private list.  Seed the
		 * storage with LIST_POISON* values.
		 */
		list_del(&page->lru);
392
		page->pgmap = pgmap;
393
		percpu_ref_get(ref);
394
	}
C
Christoph Hellwig 已提交
395 396
	devres_add(dev, page_map);
	return __va(res->start);
D
Dan Williams 已提交
397 398

 err_add_memory:
399 400
	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
 err_pfn_remap:
D
Dan Williams 已提交
401 402 403 404
 err_radix:
	pgmap_radix_release(res);
	devres_free(page_map);
	return ERR_PTR(error);
C
Christoph Hellwig 已提交
405 406
}
EXPORT_SYMBOL(devm_memremap_pages);
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423

unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
{
	/* number of pfns from base where pfn_to_page() is valid */
	return altmap->reserve + altmap->free;
}

void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
{
	altmap->alloc -= nr_pfns;
}

struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
{
	/*
	 * 'memmap_start' is the virtual address for the first "struct
	 * page" in this range of the vmemmap array.  In the case of
424
	 * CONFIG_SPARSEMEM_VMEMMAP a page_to_pfn conversion is simple
425 426 427 428 429 430 431 432
	 * pointer arithmetic, so we can perform this to_vmem_altmap()
	 * conversion without concern for the initialization state of
	 * the struct page fields.
	 */
	struct page *page = (struct page *) memmap_start;
	struct dev_pagemap *pgmap;

	/*
433
	 * Unconditionally retrieve a dev_pagemap associated with the
434 435 436 437 438 439 440 441 442 443
	 * given physical address, this is only for use in the
	 * arch_{add|remove}_memory() for setting up and tearing down
	 * the memmap.
	 */
	rcu_read_lock();
	pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page)));
	rcu_read_unlock();

	return pgmap ? pgmap->altmap : NULL;
}
C
Christoph Hellwig 已提交
444
#endif /* CONFIG_ZONE_DEVICE */