umem.c 9.0 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
4
 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/mm.h>
#include <linux/dma-mapping.h>
A
Alexey Dobriyan 已提交
37
#include <linux/sched.h>
38
#include <linux/sched/mm.h>
39
#include <linux/export.h>
40
#include <linux/hugetlb.h>
41
#include <linux/slab.h>
42
#include <rdma/ib_umem_odp.h>
43 44 45

#include "uverbs.h"

46

47 48
static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
{
49 50
	struct scatterlist *sg;
	struct page *page;
51 52
	int i;

53 54
	if (umem->nmap > 0)
		ib_dma_unmap_sg(dev, umem->sg_head.sgl,
S
Sebastian Ott 已提交
55
				umem->npages,
56
				DMA_BIDIRECTIONAL);
J
Jens Axboe 已提交
57

58
	for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
59

60 61 62 63
		page = sg_page(sg);
		if (umem->writable && dirty)
			set_page_dirty_lock(page);
		put_page(page);
64
	}
65 66 67 68

	sg_free_table(&umem->sg_head);
	return;

69 70
}

71 72
/**
 * ib_umem_get - Pin and DMA map userspace memory.
73 74
 *
 * If access flags indicate ODP memory, avoid pinning. Instead, stores
75
 * the mm for future page fault handling in conjunction with MMU notifiers.
76
 *
77 78 79 80
 * @context: userspace context to pin memory for
 * @addr: userspace virtual address to start at
 * @size: length of region to pin
 * @access: IB_ACCESS_xxx flags for memory being pinned
81
 * @dmasync: flush in-flight DMA when the memory region is written
82 83
 */
struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
84
			    size_t size, int access, int dmasync)
85
{
86
	struct ib_umem *umem;
87
	struct page **page_list;
88
	struct vm_area_struct **vma_list;
89 90 91 92
	unsigned long locked;
	unsigned long lock_limit;
	unsigned long cur_base;
	unsigned long npages;
93
	int ret;
94
	int i;
95
	unsigned long dma_attrs = 0;
96 97
	struct scatterlist *sg, *sg_list_start;
	int need_release = 0;
98
	unsigned int gup_flags = FOLL_WRITE;
99 100

	if (dmasync)
101
		dma_attrs |= DMA_ATTR_WRITE_BARRIER;
102

103 104 105 106
	/*
	 * If the combination of the addr and size requested for this memory
	 * region causes an integer overflow, return error.
	 */
107 108
	if (((addr + size) < addr) ||
	    PAGE_ALIGN(addr + size) < (addr + size))
109 110
		return ERR_PTR(-EINVAL);

111
	if (!can_do_mlock())
112
		return ERR_PTR(-EPERM);
113

114
	umem = kzalloc(sizeof *umem, GFP_KERNEL);
115 116
	if (!umem)
		return ERR_PTR(-ENOMEM);
117

118 119
	umem->context   = context;
	umem->length    = size;
120
	umem->address   = addr;
121
	umem->page_size = PAGE_SIZE;
122
	umem->pid       = get_task_pid(current, PIDTYPE_PID);
123
	/*
124 125
	 * We ask for writable memory if any of the following
	 * access flags are set.  "Local write" and "remote write"
126 127 128 129
	 * obviously require write access.  "Remote atomic" can do
	 * things like fetch and add, which will modify memory, and
	 * "MW bind" can change permissions by binding a window.
	 */
130 131 132
	umem->writable  = !!(access &
		(IB_ACCESS_LOCAL_WRITE   | IB_ACCESS_REMOTE_WRITE |
		 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
133

134
	if (access & IB_ACCESS_ON_DEMAND) {
135
		put_pid(umem->pid);
136 137 138 139 140 141 142 143 144 145
		ret = ib_umem_odp_get(context, umem);
		if (ret) {
			kfree(umem);
			return ERR_PTR(ret);
		}
		return umem;
	}

	umem->odp_data = NULL;

146 147 148
	/* We assume the memory is from hugetlb until proved otherwise */
	umem->hugetlb   = 1;

149 150
	page_list = (struct page **) __get_free_page(GFP_KERNEL);
	if (!page_list) {
151
		put_pid(umem->pid);
152 153 154
		kfree(umem);
		return ERR_PTR(-ENOMEM);
	}
155

156 157 158 159 160 161 162 163
	/*
	 * if we can't alloc the vma_list, it's not so bad;
	 * just assume the memory is not hugetlb memory
	 */
	vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
	if (!vma_list)
		umem->hugetlb = 0;

164
	npages = ib_umem_num_pages(umem);
165 166 167

	down_write(&current->mm->mmap_sem);

168
	locked     = npages + current->mm->pinned_vm;
J
Jiri Slaby 已提交
169
	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
170 171 172 173 174 175

	if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
		ret = -ENOMEM;
		goto out;
	}

176
	cur_base = addr & PAGE_MASK;
177

178
	if (npages == 0 || npages > UINT_MAX) {
179 180 181 182 183 184 185 186
		ret = -EINVAL;
		goto out;
	}

	ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
	if (ret)
		goto out;

187 188 189
	if (!umem->writable)
		gup_flags |= FOLL_FORCE;

190 191 192
	need_release = 1;
	sg_list_start = umem->sg_head.sgl;

193
	while (npages) {
194
		ret = get_user_pages(cur_base,
195
				     min_t(unsigned long, npages,
196
					   PAGE_SIZE / sizeof (struct page *)),
197
				     gup_flags, page_list, vma_list);
198 199 200 201

		if (ret < 0)
			goto out;

202
		umem->npages += ret;
203 204 205
		cur_base += ret * PAGE_SIZE;
		npages   -= ret;

206 207 208 209 210
		for_each_sg(sg_list_start, sg, ret, i) {
			if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
				umem->hugetlb = 0;

			sg_set_page(sg, page_list[i], PAGE_SIZE, 0);
211 212
		}

213 214
		/* preparing for next loop */
		sg_list_start = sg;
215 216
	}

217 218 219 220
	umem->nmap = ib_dma_map_sg_attrs(context->device,
				  umem->sg_head.sgl,
				  umem->npages,
				  DMA_BIDIRECTIONAL,
221
				  dma_attrs);
222 223 224 225 226 227 228 229

	if (umem->nmap <= 0) {
		ret = -ENOMEM;
		goto out;
	}

	ret = 0;

230
out:
231
	if (ret < 0) {
232 233
		if (need_release)
			__ib_umem_release(context->device, umem, 0);
234
		put_pid(umem->pid);
235 236
		kfree(umem);
	} else
237
		current->mm->pinned_vm = locked;
238 239

	up_write(&current->mm->mmap_sem);
240 241
	if (vma_list)
		free_page((unsigned long) vma_list);
242 243
	free_page((unsigned long) page_list);

244
	return ret < 0 ? ERR_PTR(ret) : umem;
245
}
246
EXPORT_SYMBOL(ib_umem_get);
247

248
static void ib_umem_account(struct work_struct *work)
249
{
250 251 252
	struct ib_umem *umem = container_of(work, struct ib_umem, work);

	down_write(&umem->mm->mmap_sem);
253
	umem->mm->pinned_vm -= umem->diff;
254 255 256
	up_write(&umem->mm->mmap_sem);
	mmput(umem->mm);
	kfree(umem);
257 258
}

259 260 261 262 263
/**
 * ib_umem_release - release memory pinned with ib_umem_get
 * @umem: umem struct to release
 */
void ib_umem_release(struct ib_umem *umem)
264
{
265
	struct ib_ucontext *context = umem->context;
266
	struct mm_struct *mm;
267
	struct task_struct *task;
268
	unsigned long diff;
269

270 271 272 273 274
	if (umem->odp_data) {
		ib_umem_odp_release(umem);
		return;
	}

275
	__ib_umem_release(umem->context->device, umem, 1);
276

277 278 279 280 281 282 283 284
	task = get_pid_task(umem->pid, PIDTYPE_PID);
	put_pid(umem->pid);
	if (!task)
		goto out;
	mm = get_task_mm(task);
	put_task_struct(task);
	if (!mm)
		goto out;
285

286
	diff = ib_umem_num_pages(umem);
287

288 289 290 291 292
	/*
	 * We may be called with the mm's mmap_sem already held.  This
	 * can happen when a userspace munmap() is the call that drops
	 * the last reference to our file and calls our release
	 * method.  If there are memory regions to destroy, we'll end
293 294
	 * up here and not be able to take the mmap_sem.  In that case
	 * we defer the vm_locked accounting to the system workqueue.
295
	 */
296 297 298 299 300 301
	if (context->closing) {
		if (!down_write_trylock(&mm->mmap_sem)) {
			INIT_WORK(&umem->work, ib_umem_account);
			umem->mm   = mm;
			umem->diff = diff;

T
Tejun Heo 已提交
302
			queue_work(ib_wq, &umem->work);
303 304
			return;
		}
305 306 307
	} else
		down_write(&mm->mmap_sem);

308
	mm->pinned_vm -= diff;
309 310
	up_write(&mm->mmap_sem);
	mmput(mm);
311
out:
312
	kfree(umem);
313 314 315 316 317 318 319 320
}
EXPORT_SYMBOL(ib_umem_release);

int ib_umem_page_count(struct ib_umem *umem)
{
	int shift;
	int i;
	int n;
321
	struct scatterlist *sg;
322

323 324 325
	if (umem->odp_data)
		return ib_umem_num_pages(umem);

326
	shift = ilog2(umem->page_size);
327

328
	n = 0;
329 330
	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
		n += sg_dma_len(sg) >> shift;
331

332
	return n;
333
}
334
EXPORT_SYMBOL(ib_umem_page_count);
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368

/*
 * Copy from the given ib_umem's pages to the given buffer.
 *
 * umem - the umem to copy from
 * offset - offset to start copying from
 * dst - destination buffer
 * length - buffer length
 *
 * Returns 0 on success, or an error code.
 */
int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
		      size_t length)
{
	size_t end = offset + length;
	int ret;

	if (offset > umem->length || length > umem->length - offset) {
		pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
		       offset, umem->length, end);
		return -EINVAL;
	}

	ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length,
				 offset + ib_umem_offset(umem));

	if (ret < 0)
		return ret;
	else if (ret != length)
		return -EINVAL;
	else
		return 0;
}
EXPORT_SYMBOL(ib_umem_copy_from);