memremap.c 10.9 KB
Newer Older
D
Dan Williams 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Copyright(c) 2015 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */
D
Dan Williams 已提交
13 14
#include <linux/radix-tree.h>
#include <linux/memremap.h>
C
Christoph Hellwig 已提交
15
#include <linux/device.h>
D
Dan Williams 已提交
16
#include <linux/types.h>
D
Dan Williams 已提交
17
#include <linux/pfn_t.h>
D
Dan Williams 已提交
18 19
#include <linux/io.h>
#include <linux/mm.h>
C
Christoph Hellwig 已提交
20
#include <linux/memory_hotplug.h>
D
Dan Williams 已提交
21 22 23 24 25 26 27 28 29

#ifndef ioremap_cache
/* temporary while we convert existing ioremap_cache users to memremap */
__weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
{
	return ioremap(offset, size);
}
#endif

D
Dan Williams 已提交
30 31 32 33 34 35 36 37 38 39
static void *try_ram_remap(resource_size_t offset, size_t size)
{
	struct page *page = pfn_to_page(offset >> PAGE_SHIFT);

	/* In the simple case just return the existing linear address */
	if (!PageHighMem(page))
		return __va(offset);
	return NULL; /* fallback to ioremap_cache */
}

D
Dan Williams 已提交
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
/**
 * memremap() - remap an iomem_resource as cacheable memory
 * @offset: iomem resource start address
 * @size: size of remap
 * @flags: either MEMREMAP_WB or MEMREMAP_WT
 *
 * memremap() is "ioremap" for cases where it is known that the resource
 * being mapped does not have i/o side effects and the __iomem
 * annotation is not applicable.
 *
 * MEMREMAP_WB - matches the default mapping for "System RAM" on
 * the architecture.  This is usually a read-allocate write-back cache.
 * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
 * memremap() will bypass establishing a new mapping and instead return
 * a pointer into the direct map.
 *
 * MEMREMAP_WT - establish a mapping whereby writes either bypass the
 * cache or are written through to memory and never exist in a
 * cache-dirty state with respect to program visibility.  Attempts to
 * map "System RAM" with this mapping type will fail.
 */
void *memremap(resource_size_t offset, size_t size, unsigned long flags)
{
	int is_ram = region_intersects(offset, size, "System RAM");
	void *addr = NULL;

	if (is_ram == REGION_MIXED) {
		WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
				&offset, (unsigned long) size);
		return NULL;
	}

	/* Try all mapping types requested until one returns non-NULL */
	if (flags & MEMREMAP_WB) {
		flags &= ~MEMREMAP_WB;
		/*
		 * MEMREMAP_WB is special in that it can be satisifed
		 * from the direct map.  Some archs depend on the
		 * capability of memremap() to autodetect cases where
		 * the requested range is potentially in "System RAM"
		 */
		if (is_ram == REGION_INTERSECTS)
D
Dan Williams 已提交
82 83
			addr = try_ram_remap(offset, size);
		if (!addr)
D
Dan Williams 已提交
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
			addr = ioremap_cache(offset, size);
	}

	/*
	 * If we don't have a mapping yet and more request flags are
	 * pending then we will be attempting to establish a new virtual
	 * address mapping.  Enforce that this mapping is not aliasing
	 * "System RAM"
	 */
	if (!addr && is_ram == REGION_INTERSECTS && flags) {
		WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
				&offset, (unsigned long) size);
		return NULL;
	}

	if (!addr && (flags & MEMREMAP_WT)) {
		flags &= ~MEMREMAP_WT;
		addr = ioremap_wt(offset, size);
	}

	return addr;
}
EXPORT_SYMBOL(memremap);

void memunmap(void *addr)
{
	if (is_vmalloc_addr(addr))
		iounmap((void __iomem *) addr);
}
EXPORT_SYMBOL(memunmap);
C
Christoph Hellwig 已提交
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129

static void devm_memremap_release(struct device *dev, void *res)
{
	memunmap(res);
}

static int devm_memremap_match(struct device *dev, void *res, void *match_data)
{
	return *(void **)res == match_data;
}

void *devm_memremap(struct device *dev, resource_size_t offset,
		size_t size, unsigned long flags)
{
	void **ptr, *addr;

130 131
	ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
			dev_to_node(dev));
C
Christoph Hellwig 已提交
132
	if (!ptr)
133
		return ERR_PTR(-ENOMEM);
C
Christoph Hellwig 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147

	addr = memremap(offset, size, flags);
	if (addr) {
		*ptr = addr;
		devres_add(dev, ptr);
	} else
		devres_free(ptr);

	return addr;
}
EXPORT_SYMBOL(devm_memremap);

void devm_memunmap(struct device *dev, void *addr)
{
D
Dan Williams 已提交
148 149
	WARN_ON(devres_release(dev, devm_memremap_release,
				devm_memremap_match, addr));
C
Christoph Hellwig 已提交
150 151
}
EXPORT_SYMBOL(devm_memunmap);
C
Christoph Hellwig 已提交
152

D
Dan Williams 已提交
153 154 155 156 157 158
pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags)
{
	return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
}
EXPORT_SYMBOL(phys_to_pfn_t);

C
Christoph Hellwig 已提交
159
#ifdef CONFIG_ZONE_DEVICE
D
Dan Williams 已提交
160 161 162 163 164
static DEFINE_MUTEX(pgmap_lock);
static RADIX_TREE(pgmap_radix, GFP_KERNEL);
#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)

C
Christoph Hellwig 已提交
165 166
struct page_map {
	struct resource res;
D
Dan Williams 已提交
167 168
	struct percpu_ref *ref;
	struct dev_pagemap pgmap;
169
	struct vmem_altmap altmap;
C
Christoph Hellwig 已提交
170 171
};

172 173 174 175 176 177 178 179 180 181 182 183
void get_zone_device_page(struct page *page)
{
	percpu_ref_get(page->pgmap->ref);
}
EXPORT_SYMBOL(get_zone_device_page);

void put_zone_device_page(struct page *page)
{
	put_dev_pagemap(page->pgmap);
}
EXPORT_SYMBOL(put_zone_device_page);

D
Dan Williams 已提交
184 185 186 187 188 189 190 191 192 193
static void pgmap_radix_release(struct resource *res)
{
	resource_size_t key;

	mutex_lock(&pgmap_lock);
	for (key = res->start; key <= res->end; key += SECTION_SIZE)
		radix_tree_delete(&pgmap_radix, key >> PA_SECTION_SHIFT);
	mutex_unlock(&pgmap_lock);
}

194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
static unsigned long pfn_first(struct page_map *page_map)
{
	struct dev_pagemap *pgmap = &page_map->pgmap;
	const struct resource *res = &page_map->res;
	struct vmem_altmap *altmap = pgmap->altmap;
	unsigned long pfn;

	pfn = res->start >> PAGE_SHIFT;
	if (altmap)
		pfn += vmem_altmap_offset(altmap);
	return pfn;
}

static unsigned long pfn_end(struct page_map *page_map)
{
	const struct resource *res = &page_map->res;

	return (res->start + resource_size(res)) >> PAGE_SHIFT;
}

#define for_each_device_pfn(pfn, map) \
	for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)

D
Dan Williams 已提交
217
static void devm_memremap_pages_release(struct device *dev, void *data)
C
Christoph Hellwig 已提交
218
{
D
Dan Williams 已提交
219 220 221
	struct page_map *page_map = data;
	struct resource *res = &page_map->res;
	resource_size_t align_start, align_size;
222
	struct dev_pagemap *pgmap = &page_map->pgmap;
D
Dan Williams 已提交
223

224 225 226 227 228
	if (percpu_ref_tryget_live(pgmap->ref)) {
		dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
		percpu_ref_put(pgmap->ref);
	}

D
Dan Williams 已提交
229
	pgmap_radix_release(res);
C
Christoph Hellwig 已提交
230 231

	/* pages are dead and unused, undo the arch mapping */
D
Dan Williams 已提交
232 233 234
	align_start = res->start & ~(SECTION_SIZE - 1);
	align_size = ALIGN(resource_size(res), SECTION_SIZE);
	arch_remove_memory(align_start, align_size);
235 236
	dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
			"%s: failed to free all reserved pages\n", __func__);
D
Dan Williams 已提交
237 238 239 240 241 242 243 244 245 246 247
}

/* assumes rcu_read_lock() held at entry */
struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
{
	struct page_map *page_map;

	WARN_ON_ONCE(!rcu_read_lock_held());

	page_map = radix_tree_lookup(&pgmap_radix, phys >> PA_SECTION_SHIFT);
	return page_map ? &page_map->pgmap : NULL;
C
Christoph Hellwig 已提交
248 249
}

250 251 252 253
/**
 * devm_memremap_pages - remap and provide memmap backing for the given resource
 * @dev: hosting device for @res
 * @res: "host memory" address range
254
 * @ref: a live per-cpu reference count
255 256
 * @altmap: optional descriptor for allocating the memmap from @res
 *
257 258 259 260 261 262 263
 * Notes:
 * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time
 *    (or devm release event).
 *
 * 2/ @res is expected to be a host memory range that could feasibly be
 *    treated as a "System RAM" range, i.e. not a device mmio range, but
 *    this is not enforced.
264 265
 */
void *devm_memremap_pages(struct device *dev, struct resource *res,
266
		struct percpu_ref *ref, struct vmem_altmap *altmap)
C
Christoph Hellwig 已提交
267 268 269
{
	int is_ram = region_intersects(res->start, resource_size(res),
			"System RAM");
D
Dan Williams 已提交
270
	resource_size_t key, align_start, align_size;
271
	struct dev_pagemap *pgmap;
C
Christoph Hellwig 已提交
272
	struct page_map *page_map;
273
	unsigned long pfn;
C
Christoph Hellwig 已提交
274 275 276 277 278 279 280 281 282 283 284
	int error, nid;

	if (is_ram == REGION_MIXED) {
		WARN_ONCE(1, "%s attempted on mixed region %pr\n",
				__func__, res);
		return ERR_PTR(-ENXIO);
	}

	if (is_ram == REGION_INTERSECTS)
		return __va(res->start);

285 286 287 288 289 290
	if (altmap && !IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) {
		dev_err(dev, "%s: altmap requires CONFIG_SPARSEMEM_VMEMMAP=y\n",
				__func__);
		return ERR_PTR(-ENXIO);
	}

291 292 293
	if (!ref)
		return ERR_PTR(-EINVAL);

294 295
	page_map = devres_alloc_node(devm_memremap_pages_release,
			sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
C
Christoph Hellwig 已提交
296 297
	if (!page_map)
		return ERR_PTR(-ENOMEM);
298
	pgmap = &page_map->pgmap;
C
Christoph Hellwig 已提交
299 300 301

	memcpy(&page_map->res, res, sizeof(*res));

302 303 304 305 306
	pgmap->dev = dev;
	if (altmap) {
		memcpy(&page_map->altmap, altmap, sizeof(*altmap));
		pgmap->altmap = &page_map->altmap;
	}
307
	pgmap->ref = ref;
308 309
	pgmap->res = &page_map->res;

D
Dan Williams 已提交
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
	mutex_lock(&pgmap_lock);
	error = 0;
	for (key = res->start; key <= res->end; key += SECTION_SIZE) {
		struct dev_pagemap *dup;

		rcu_read_lock();
		dup = find_dev_pagemap(key);
		rcu_read_unlock();
		if (dup) {
			dev_err(dev, "%s: %pr collides with mapping for %s\n",
					__func__, res, dev_name(dup->dev));
			error = -EBUSY;
			break;
		}
		error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT,
				page_map);
		if (error) {
			dev_err(dev, "%s: failed: %d\n", __func__, error);
			break;
		}
	}
	mutex_unlock(&pgmap_lock);
	if (error)
		goto err_radix;

C
Christoph Hellwig 已提交
335 336
	nid = dev_to_node(dev);
	if (nid < 0)
337
		nid = numa_mem_id();
C
Christoph Hellwig 已提交
338

D
Dan Williams 已提交
339 340 341 342 343
	align_start = res->start & ~(SECTION_SIZE - 1);
	align_size = ALIGN(resource_size(res), SECTION_SIZE);
	error = arch_add_memory(nid, align_start, align_size, true);
	if (error)
		goto err_add_memory;
C
Christoph Hellwig 已提交
344

345 346 347 348 349 350 351
	for_each_device_pfn(pfn, page_map) {
		struct page *page = pfn_to_page(pfn);

		/* ZONE_DEVICE pages must never appear on a slab lru */
		list_force_poison(&page->lru);
		page->pgmap = pgmap;
	}
C
Christoph Hellwig 已提交
352 353
	devres_add(dev, page_map);
	return __va(res->start);
D
Dan Williams 已提交
354 355 356 357 358 359

 err_add_memory:
 err_radix:
	pgmap_radix_release(res);
	devres_free(page_map);
	return ERR_PTR(error);
C
Christoph Hellwig 已提交
360 361
}
EXPORT_SYMBOL(devm_memremap_pages);
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400

unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
{
	/* number of pfns from base where pfn_to_page() is valid */
	return altmap->reserve + altmap->free;
}

void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
{
	altmap->alloc -= nr_pfns;
}

#ifdef CONFIG_SPARSEMEM_VMEMMAP
struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
{
	/*
	 * 'memmap_start' is the virtual address for the first "struct
	 * page" in this range of the vmemmap array.  In the case of
	 * CONFIG_SPARSE_VMEMMAP a page_to_pfn conversion is simple
	 * pointer arithmetic, so we can perform this to_vmem_altmap()
	 * conversion without concern for the initialization state of
	 * the struct page fields.
	 */
	struct page *page = (struct page *) memmap_start;
	struct dev_pagemap *pgmap;

	/*
	 * Uncoditionally retrieve a dev_pagemap associated with the
	 * given physical address, this is only for use in the
	 * arch_{add|remove}_memory() for setting up and tearing down
	 * the memmap.
	 */
	rcu_read_lock();
	pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page)));
	rcu_read_unlock();

	return pgmap ? pgmap->altmap : NULL;
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
C
Christoph Hellwig 已提交
401
#endif /* CONFIG_ZONE_DEVICE */