umem.c 9.9 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
4
 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/mm.h>
#include <linux/dma-mapping.h>
37
#include <linux/sched/signal.h>
38
#include <linux/sched/mm.h>
39
#include <linux/export.h>
40
#include <linux/hugetlb.h>
41
#include <linux/slab.h>
42
#include <linux/pagemap.h>
43
#include <rdma/ib_umem_odp.h>
44 45 46 47 48

#include "uverbs.h"

static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
{
49
	struct sg_page_iter sg_iter;
50
	struct page *page;
51

52
	if (umem->nmap > 0)
53
		ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents,
54
				DMA_BIDIRECTIONAL);
J
Jens Axboe 已提交
55

56 57
	for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
		page = sg_page_iter_page(&sg_iter);
58
		if (!PageDirty(page) && umem->writable && dirty)
59 60
			set_page_dirty_lock(page);
		put_page(page);
61
	}
62 63

	sg_free_table(&umem->sg_head);
64 65
}

66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
/* ib_umem_add_sg_table - Add N contiguous pages to scatter table
 *
 * sg: current scatterlist entry
 * page_list: array of npage struct page pointers
 * npages: number of pages in page_list
 * max_seg_sz: maximum segment size in bytes
 * nents: [out] number of entries in the scatterlist
 *
 * Return new end of scatterlist
 */
static struct scatterlist *ib_umem_add_sg_table(struct scatterlist *sg,
						struct page **page_list,
						unsigned long npages,
						unsigned int max_seg_sz,
						int *nents)
{
	unsigned long first_pfn;
	unsigned long i = 0;
	bool update_cur_sg = false;
	bool first = !sg_page(sg);

	/* Check if new page_list is contiguous with end of previous page_list.
	 * sg->length here is a multiple of PAGE_SIZE and sg->offset is 0.
	 */
	if (!first && (page_to_pfn(sg_page(sg)) + (sg->length >> PAGE_SHIFT) ==
		       page_to_pfn(page_list[0])))
		update_cur_sg = true;

	while (i != npages) {
		unsigned long len;
		struct page *first_page = page_list[i];

		first_pfn = page_to_pfn(first_page);

		/* Compute the number of contiguous pages we have starting
		 * at i
		 */
		for (len = 0; i != npages &&
			      first_pfn + len == page_to_pfn(page_list[i]);
		     len++)
			i++;

		/* Squash N contiguous pages from page_list into current sge */
		if (update_cur_sg &&
		    ((max_seg_sz - sg->length) >= (len << PAGE_SHIFT))) {
			sg_set_page(sg, sg_page(sg),
				    sg->length + (len << PAGE_SHIFT), 0);
			update_cur_sg = false;
			continue;
		}

		/* Squash N contiguous pages into next sge or first sge */
		if (!first)
			sg = sg_next(sg);

		(*nents)++;
		sg_set_page(sg, first_page, len << PAGE_SHIFT, 0);
		first = false;
	}

	return sg;
}

129 130
/**
 * ib_umem_get - Pin and DMA map userspace memory.
131 132
 *
 * If access flags indicate ODP memory, avoid pinning. Instead, stores
133
 * the mm for future page fault handling in conjunction with MMU notifiers.
134
 *
135
 * @udata: userspace context to pin memory for
136 137 138
 * @addr: userspace virtual address to start at
 * @size: length of region to pin
 * @access: IB_ACCESS_xxx flags for memory being pinned
139
 * @dmasync: flush in-flight DMA when the memory region is written
140
 */
141
struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
142
			    size_t size, int access, int dmasync)
143
{
144
	struct ib_ucontext *context;
145
	struct ib_umem *umem;
146
	struct page **page_list;
147
	struct vm_area_struct **vma_list;
148
	unsigned long lock_limit;
149
	unsigned long new_pinned;
150
	unsigned long cur_base;
151
	struct mm_struct *mm;
152
	unsigned long npages;
153
	int ret;
154
	int i;
155
	unsigned long dma_attrs = 0;
156
	struct scatterlist *sg;
157
	unsigned int gup_flags = FOLL_WRITE;
158

159 160 161 162 163 164 165
	if (!udata)
		return ERR_PTR(-EIO);

	context = container_of(udata, struct uverbs_attr_bundle, driver_udata)
			  ->context;
	if (!context)
		return ERR_PTR(-EIO);
166

167
	if (dmasync)
168
		dma_attrs |= DMA_ATTR_WRITE_BARRIER;
169

170 171 172 173
	/*
	 * If the combination of the addr and size requested for this memory
	 * region causes an integer overflow, return error.
	 */
174 175
	if (((addr + size) < addr) ||
	    PAGE_ALIGN(addr + size) < (addr + size))
176 177
		return ERR_PTR(-EINVAL);

178
	if (!can_do_mlock())
179
		return ERR_PTR(-EPERM);
180

181 182 183 184
	if (access & IB_ACCESS_ON_DEMAND) {
		umem = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
		if (!umem)
			return ERR_PTR(-ENOMEM);
185
		umem->is_odp = 1;
186 187 188 189 190
	} else {
		umem = kzalloc(sizeof(*umem), GFP_KERNEL);
		if (!umem)
			return ERR_PTR(-ENOMEM);
	}
191

192 193 194 195
	umem->context    = context;
	umem->length     = size;
	umem->address    = addr;
	umem->page_shift = PAGE_SHIFT;
196
	umem->writable   = ib_access_writable(access);
197 198
	umem->owning_mm = mm = current->mm;
	mmgrab(mm);
199

200
	if (access & IB_ACCESS_ON_DEMAND) {
201 202 203 204 205
		if (WARN_ON_ONCE(!context->invalidate_range)) {
			ret = -EINVAL;
			goto umem_kfree;
		}

206
		ret = ib_umem_odp_get(to_ib_umem_odp(umem), access);
207 208
		if (ret)
			goto umem_kfree;
209 210 211
		return umem;
	}

212 213 214
	/* We assume the memory is from hugetlb until proved otherwise */
	umem->hugetlb   = 1;

215 216
	page_list = (struct page **) __get_free_page(GFP_KERNEL);
	if (!page_list) {
217
		ret = -ENOMEM;
218
		goto umem_kfree;
219
	}
220

221 222 223 224 225 226 227 228
	/*
	 * if we can't alloc the vma_list, it's not so bad;
	 * just assume the memory is not hugetlb memory
	 */
	vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
	if (!vma_list)
		umem->hugetlb = 0;

229
	npages = ib_umem_num_pages(umem);
D
Doug Ledford 已提交
230 231 232 233
	if (npages == 0 || npages > UINT_MAX) {
		ret = -EINVAL;
		goto out;
	}
234

J
Jiri Slaby 已提交
235
	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
236

237
	new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
238
	if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
239
		atomic64_sub(npages, &mm->pinned_vm);
240
		ret = -ENOMEM;
241
		goto out;
242 243
	}

244
	cur_base = addr & PAGE_MASK;
245

246 247
	ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
	if (ret)
248
		goto vma;
249

250 251 252
	if (!umem->writable)
		gup_flags |= FOLL_FORCE;

253
	sg = umem->sg_head.sgl;
254

255
	while (npages) {
256
		down_read(&mm->mmap_sem);
257
		ret = get_user_pages_longterm(cur_base,
258
				     min_t(unsigned long, npages,
259
					   PAGE_SIZE / sizeof (struct page *)),
260
				     gup_flags, page_list, vma_list);
261
		if (ret < 0) {
262
			up_read(&mm->mmap_sem);
263
			goto umem_release;
264
		}
265 266 267 268

		cur_base += ret * PAGE_SIZE;
		npages   -= ret;

269 270 271 272
		sg = ib_umem_add_sg_table(sg, page_list, ret,
			dma_get_max_seg_size(context->device->dma_device),
			&umem->sg_nents);

273 274 275
		/* Continue to hold the mmap_sem as vma_list access
		 * needs to be protected.
		 */
276
		for (i = 0; i < ret && umem->hugetlb; i++) {
277 278
			if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
				umem->hugetlb = 0;
279 280
		}

281
		up_read(&mm->mmap_sem);
282 283
	}

284 285
	sg_mark_end(sg);

286 287
	umem->nmap = ib_dma_map_sg_attrs(context->device,
				  umem->sg_head.sgl,
288
				  umem->sg_nents,
289
				  DMA_BIDIRECTIONAL,
290
				  dma_attrs);
291

292
	if (!umem->nmap) {
293
		ret = -ENOMEM;
294
		goto umem_release;
295 296 297
	}

	ret = 0;
298
	goto out;
299

300 301 302
umem_release:
	__ib_umem_release(context->device, umem, 0);
vma:
303
	atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
304
out:
305 306
	if (vma_list)
		free_page((unsigned long) vma_list);
307
	free_page((unsigned long) page_list);
308
umem_kfree:
309 310
	if (ret) {
		mmdrop(umem->owning_mm);
311
		kfree(umem);
312
	}
313
	return ret ? ERR_PTR(ret) : umem;
314
}
315
EXPORT_SYMBOL(ib_umem_get);
316

317 318 319
static void __ib_umem_release_tail(struct ib_umem *umem)
{
	mmdrop(umem->owning_mm);
320
	if (umem->is_odp)
321 322 323
		kfree(to_ib_umem_odp(umem));
	else
		kfree(umem);
324 325
}

326 327 328 329 330
/**
 * ib_umem_release - release memory pinned with ib_umem_get
 * @umem: umem struct to release
 */
void ib_umem_release(struct ib_umem *umem)
331
{
332
	if (umem->is_odp) {
333
		ib_umem_odp_release(to_ib_umem_odp(umem));
334
		__ib_umem_release_tail(umem);
335 336 337
		return;
	}

338
	__ib_umem_release(umem->context->device, umem, 1);
339

340
	atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
341
	__ib_umem_release_tail(umem);
342 343 344 345 346 347 348
}
EXPORT_SYMBOL(ib_umem_release);

int ib_umem_page_count(struct ib_umem *umem)
{
	int i;
	int n;
349
	struct scatterlist *sg;
350

351
	if (umem->is_odp)
352 353
		return ib_umem_num_pages(umem);

354
	n = 0;
355
	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
356
		n += sg_dma_len(sg) >> umem->page_shift;
357

358
	return n;
359
}
360
EXPORT_SYMBOL(ib_umem_page_count);
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383

/*
 * Copy from the given ib_umem's pages to the given buffer.
 *
 * umem - the umem to copy from
 * offset - offset to start copying from
 * dst - destination buffer
 * length - buffer length
 *
 * Returns 0 on success, or an error code.
 */
int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
		      size_t length)
{
	size_t end = offset + length;
	int ret;

	if (offset > umem->length || length > umem->length - offset) {
		pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
		       offset, umem->length, end);
		return -EINVAL;
	}

384 385
	ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->sg_nents, dst, length,
				 offset + ib_umem_offset(umem));
386 387 388 389 390 391 392 393 394

	if (ret < 0)
		return ret;
	else if (ret != length)
		return -EINVAL;
	else
		return 0;
}
EXPORT_SYMBOL(ib_umem_copy_from);