umem.c 10.0 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
4
 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/mm.h>
#include <linux/dma-mapping.h>
37
#include <linux/sched/signal.h>
38
#include <linux/sched/mm.h>
39
#include <linux/export.h>
40
#include <linux/hugetlb.h>
41
#include <linux/slab.h>
42
#include <linux/pagemap.h>
43
#include <rdma/ib_umem_odp.h>
44 45 46 47 48

#include "uverbs.h"

static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
{
49
	struct sg_page_iter sg_iter;
50
	struct page *page;
51

52
	if (umem->nmap > 0)
53
		ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents,
54
				DMA_BIDIRECTIONAL);
J
Jens Axboe 已提交
55

56 57
	for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
		page = sg_page_iter_page(&sg_iter);
58
		if (!PageDirty(page) && umem->writable && dirty)
59 60
			set_page_dirty_lock(page);
		put_page(page);
61
	}
62 63

	sg_free_table(&umem->sg_head);
64 65
}

66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
/* ib_umem_add_sg_table - Add N contiguous pages to scatter table
 *
 * sg: current scatterlist entry
 * page_list: array of npage struct page pointers
 * npages: number of pages in page_list
 * max_seg_sz: maximum segment size in bytes
 * nents: [out] number of entries in the scatterlist
 *
 * Return new end of scatterlist
 */
static struct scatterlist *ib_umem_add_sg_table(struct scatterlist *sg,
						struct page **page_list,
						unsigned long npages,
						unsigned int max_seg_sz,
						int *nents)
{
	unsigned long first_pfn;
	unsigned long i = 0;
	bool update_cur_sg = false;
	bool first = !sg_page(sg);

	/* Check if new page_list is contiguous with end of previous page_list.
	 * sg->length here is a multiple of PAGE_SIZE and sg->offset is 0.
	 */
	if (!first && (page_to_pfn(sg_page(sg)) + (sg->length >> PAGE_SHIFT) ==
		       page_to_pfn(page_list[0])))
		update_cur_sg = true;

	while (i != npages) {
		unsigned long len;
		struct page *first_page = page_list[i];

		first_pfn = page_to_pfn(first_page);

		/* Compute the number of contiguous pages we have starting
		 * at i
		 */
		for (len = 0; i != npages &&
104 105
			      first_pfn + len == page_to_pfn(page_list[i]) &&
			      len < (max_seg_sz >> PAGE_SHIFT);
106 107 108 109
		     len++)
			i++;

		/* Squash N contiguous pages from page_list into current sge */
110 111 112 113 114 115 116 117
		if (update_cur_sg) {
			if ((max_seg_sz - sg->length) >= (len << PAGE_SHIFT)) {
				sg_set_page(sg, sg_page(sg),
					    sg->length + (len << PAGE_SHIFT),
					    0);
				update_cur_sg = false;
				continue;
			}
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
			update_cur_sg = false;
		}

		/* Squash N contiguous pages into next sge or first sge */
		if (!first)
			sg = sg_next(sg);

		(*nents)++;
		sg_set_page(sg, first_page, len << PAGE_SHIFT, 0);
		first = false;
	}

	return sg;
}

133 134
/**
 * ib_umem_get - Pin and DMA map userspace memory.
135 136
 *
 * If access flags indicate ODP memory, avoid pinning. Instead, stores
137
 * the mm for future page fault handling in conjunction with MMU notifiers.
138
 *
139
 * @udata: userspace context to pin memory for
140 141 142
 * @addr: userspace virtual address to start at
 * @size: length of region to pin
 * @access: IB_ACCESS_xxx flags for memory being pinned
143
 * @dmasync: flush in-flight DMA when the memory region is written
144
 */
145
struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
146
			    size_t size, int access, int dmasync)
147
{
148
	struct ib_ucontext *context;
149
	struct ib_umem *umem;
150
	struct page **page_list;
151
	struct vm_area_struct **vma_list;
152
	unsigned long lock_limit;
153
	unsigned long new_pinned;
154
	unsigned long cur_base;
155
	struct mm_struct *mm;
156
	unsigned long npages;
157
	int ret;
158
	int i;
159
	unsigned long dma_attrs = 0;
160
	struct scatterlist *sg;
161
	unsigned int gup_flags = FOLL_WRITE;
162

163 164 165 166 167 168 169
	if (!udata)
		return ERR_PTR(-EIO);

	context = container_of(udata, struct uverbs_attr_bundle, driver_udata)
			  ->context;
	if (!context)
		return ERR_PTR(-EIO);
170

171
	if (dmasync)
172
		dma_attrs |= DMA_ATTR_WRITE_BARRIER;
173

174 175 176 177
	/*
	 * If the combination of the addr and size requested for this memory
	 * region causes an integer overflow, return error.
	 */
178 179
	if (((addr + size) < addr) ||
	    PAGE_ALIGN(addr + size) < (addr + size))
180 181
		return ERR_PTR(-EINVAL);

182
	if (!can_do_mlock())
183
		return ERR_PTR(-EPERM);
184

185 186 187 188
	if (access & IB_ACCESS_ON_DEMAND) {
		umem = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
		if (!umem)
			return ERR_PTR(-ENOMEM);
189
		umem->is_odp = 1;
190 191 192 193 194
	} else {
		umem = kzalloc(sizeof(*umem), GFP_KERNEL);
		if (!umem)
			return ERR_PTR(-ENOMEM);
	}
195

196 197 198 199
	umem->context    = context;
	umem->length     = size;
	umem->address    = addr;
	umem->page_shift = PAGE_SHIFT;
200
	umem->writable   = ib_access_writable(access);
201 202
	umem->owning_mm = mm = current->mm;
	mmgrab(mm);
203

204
	if (access & IB_ACCESS_ON_DEMAND) {
205 206 207 208 209
		if (WARN_ON_ONCE(!context->invalidate_range)) {
			ret = -EINVAL;
			goto umem_kfree;
		}

210
		ret = ib_umem_odp_get(to_ib_umem_odp(umem), access);
211 212
		if (ret)
			goto umem_kfree;
213 214 215
		return umem;
	}

216 217 218
	/* We assume the memory is from hugetlb until proved otherwise */
	umem->hugetlb   = 1;

219 220
	page_list = (struct page **) __get_free_page(GFP_KERNEL);
	if (!page_list) {
221
		ret = -ENOMEM;
222
		goto umem_kfree;
223
	}
224

225 226 227 228 229 230 231 232
	/*
	 * if we can't alloc the vma_list, it's not so bad;
	 * just assume the memory is not hugetlb memory
	 */
	vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
	if (!vma_list)
		umem->hugetlb = 0;

233
	npages = ib_umem_num_pages(umem);
D
Doug Ledford 已提交
234 235 236 237
	if (npages == 0 || npages > UINT_MAX) {
		ret = -EINVAL;
		goto out;
	}
238

J
Jiri Slaby 已提交
239
	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
240

241
	new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
242
	if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
243
		atomic64_sub(npages, &mm->pinned_vm);
244
		ret = -ENOMEM;
245
		goto out;
246 247
	}

248
	cur_base = addr & PAGE_MASK;
249

250 251
	ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
	if (ret)
252
		goto vma;
253

254 255 256
	if (!umem->writable)
		gup_flags |= FOLL_FORCE;

257
	sg = umem->sg_head.sgl;
258

259
	while (npages) {
260
		down_read(&mm->mmap_sem);
261
		ret = get_user_pages_longterm(cur_base,
262
				     min_t(unsigned long, npages,
263
					   PAGE_SIZE / sizeof (struct page *)),
264
				     gup_flags, page_list, vma_list);
265
		if (ret < 0) {
266
			up_read(&mm->mmap_sem);
267
			goto umem_release;
268
		}
269 270 271 272

		cur_base += ret * PAGE_SIZE;
		npages   -= ret;

273 274 275 276
		sg = ib_umem_add_sg_table(sg, page_list, ret,
			dma_get_max_seg_size(context->device->dma_device),
			&umem->sg_nents);

277 278 279
		/* Continue to hold the mmap_sem as vma_list access
		 * needs to be protected.
		 */
280
		for (i = 0; i < ret && umem->hugetlb; i++) {
281 282
			if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
				umem->hugetlb = 0;
283 284
		}

285
		up_read(&mm->mmap_sem);
286 287
	}

288 289
	sg_mark_end(sg);

290 291
	umem->nmap = ib_dma_map_sg_attrs(context->device,
				  umem->sg_head.sgl,
292
				  umem->sg_nents,
293
				  DMA_BIDIRECTIONAL,
294
				  dma_attrs);
295

296
	if (!umem->nmap) {
297
		ret = -ENOMEM;
298
		goto umem_release;
299 300 301
	}

	ret = 0;
302
	goto out;
303

304 305 306
umem_release:
	__ib_umem_release(context->device, umem, 0);
vma:
307
	atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
308
out:
309 310
	if (vma_list)
		free_page((unsigned long) vma_list);
311
	free_page((unsigned long) page_list);
312
umem_kfree:
313 314
	if (ret) {
		mmdrop(umem->owning_mm);
315
		kfree(umem);
316
	}
317
	return ret ? ERR_PTR(ret) : umem;
318
}
319
EXPORT_SYMBOL(ib_umem_get);
320

321 322 323
static void __ib_umem_release_tail(struct ib_umem *umem)
{
	mmdrop(umem->owning_mm);
324
	if (umem->is_odp)
325 326 327
		kfree(to_ib_umem_odp(umem));
	else
		kfree(umem);
328 329
}

330 331 332 333 334
/**
 * ib_umem_release - release memory pinned with ib_umem_get
 * @umem: umem struct to release
 */
void ib_umem_release(struct ib_umem *umem)
335
{
336
	if (umem->is_odp) {
337
		ib_umem_odp_release(to_ib_umem_odp(umem));
338
		__ib_umem_release_tail(umem);
339 340 341
		return;
	}

342
	__ib_umem_release(umem->context->device, umem, 1);
343

344
	atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
345
	__ib_umem_release_tail(umem);
346 347 348 349 350 351 352
}
EXPORT_SYMBOL(ib_umem_release);

int ib_umem_page_count(struct ib_umem *umem)
{
	int i;
	int n;
353
	struct scatterlist *sg;
354

355
	if (umem->is_odp)
356 357
		return ib_umem_num_pages(umem);

358
	n = 0;
359
	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
360
		n += sg_dma_len(sg) >> umem->page_shift;
361

362
	return n;
363
}
364
EXPORT_SYMBOL(ib_umem_page_count);
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387

/*
 * Copy from the given ib_umem's pages to the given buffer.
 *
 * umem - the umem to copy from
 * offset - offset to start copying from
 * dst - destination buffer
 * length - buffer length
 *
 * Returns 0 on success, or an error code.
 */
int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
		      size_t length)
{
	size_t end = offset + length;
	int ret;

	if (offset > umem->length || length > umem->length - offset) {
		pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
		       offset, umem->length, end);
		return -EINVAL;
	}

388 389
	ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->sg_nents, dst, length,
				 offset + ib_umem_offset(umem));
390 391 392 393 394 395 396 397 398

	if (ret < 0)
		return ret;
	else if (ret != length)
		return -EINVAL;
	else
		return 0;
}
EXPORT_SYMBOL(ib_umem_copy_from);