umem.c 8.9 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
4
 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/mm.h>
#include <linux/dma-mapping.h>
A
Alexey Dobriyan 已提交
37
#include <linux/sched.h>
38
#include <linux/export.h>
39
#include <linux/hugetlb.h>
40
#include <linux/dma-attrs.h>
41
#include <linux/slab.h>
42
#include <rdma/ib_umem_odp.h>
43 44 45

#include "uverbs.h"

46

47 48
static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
{
49 50
	struct scatterlist *sg;
	struct page *page;
51 52
	int i;

53 54 55 56
	if (umem->nmap > 0)
		ib_dma_unmap_sg(dev, umem->sg_head.sgl,
				umem->nmap,
				DMA_BIDIRECTIONAL);
J
Jens Axboe 已提交
57

58
	for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
59

60 61 62 63
		page = sg_page(sg);
		if (umem->writable && dirty)
			set_page_dirty_lock(page);
		put_page(page);
64
	}
65 66 67 68

	sg_free_table(&umem->sg_head);
	return;

69 70
}

71 72
/**
 * ib_umem_get - Pin and DMA map userspace memory.
73 74
 *
 * If access flags indicate ODP memory, avoid pinning. Instead, stores
75
 * the mm for future page fault handling in conjunction with MMU notifiers.
76
 *
77 78 79 80
 * @context: userspace context to pin memory for
 * @addr: userspace virtual address to start at
 * @size: length of region to pin
 * @access: IB_ACCESS_xxx flags for memory being pinned
81
 * @dmasync: flush in-flight DMA when the memory region is written
82 83
 */
struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
84
			    size_t size, int access, int dmasync)
85
{
86
	struct ib_umem *umem;
87
	struct page **page_list;
88
	struct vm_area_struct **vma_list;
89 90 91 92
	unsigned long locked;
	unsigned long lock_limit;
	unsigned long cur_base;
	unsigned long npages;
93
	int ret;
94
	int i;
95
	DEFINE_DMA_ATTRS(attrs);
96 97
	struct scatterlist *sg, *sg_list_start;
	int need_release = 0;
98 99 100

	if (dmasync)
		dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
101

102 103 104
	if (!size)
		return ERR_PTR(-EINVAL);

105 106 107 108
	/*
	 * If the combination of the addr and size requested for this memory
	 * region causes an integer overflow, return error.
	 */
109 110
	if (((addr + size) < addr) ||
	    PAGE_ALIGN(addr + size) < (addr + size))
111 112
		return ERR_PTR(-EINVAL);

113
	if (!can_do_mlock())
114
		return ERR_PTR(-EPERM);
115

116
	umem = kzalloc(sizeof *umem, GFP_KERNEL);
117 118
	if (!umem)
		return ERR_PTR(-ENOMEM);
119

120 121
	umem->context   = context;
	umem->length    = size;
122
	umem->address   = addr;
123
	umem->page_size = PAGE_SIZE;
124
	umem->pid       = get_task_pid(current, PIDTYPE_PID);
125
	/*
126 127
	 * We ask for writable memory if any of the following
	 * access flags are set.  "Local write" and "remote write"
128 129 130 131
	 * obviously require write access.  "Remote atomic" can do
	 * things like fetch and add, which will modify memory, and
	 * "MW bind" can change permissions by binding a window.
	 */
132 133 134
	umem->writable  = !!(access &
		(IB_ACCESS_LOCAL_WRITE   | IB_ACCESS_REMOTE_WRITE |
		 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
135

136 137 138 139 140 141 142 143 144 145 146
	if (access & IB_ACCESS_ON_DEMAND) {
		ret = ib_umem_odp_get(context, umem);
		if (ret) {
			kfree(umem);
			return ERR_PTR(ret);
		}
		return umem;
	}

	umem->odp_data = NULL;

147 148 149
	/* We assume the memory is from hugetlb until proved otherwise */
	umem->hugetlb   = 1;

150 151 152 153 154
	page_list = (struct page **) __get_free_page(GFP_KERNEL);
	if (!page_list) {
		kfree(umem);
		return ERR_PTR(-ENOMEM);
	}
155

156 157 158 159 160 161 162 163
	/*
	 * if we can't alloc the vma_list, it's not so bad;
	 * just assume the memory is not hugetlb memory
	 */
	vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
	if (!vma_list)
		umem->hugetlb = 0;

164
	npages = ib_umem_num_pages(umem);
165 166 167

	down_write(&current->mm->mmap_sem);

168
	locked     = npages + current->mm->pinned_vm;
J
Jiri Slaby 已提交
169
	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
170 171 172 173 174 175

	if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
		ret = -ENOMEM;
		goto out;
	}

176
	cur_base = addr & PAGE_MASK;
177

178 179 180 181 182 183 184 185 186 187 188 189
	if (npages == 0) {
		ret = -EINVAL;
		goto out;
	}

	ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
	if (ret)
		goto out;

	need_release = 1;
	sg_list_start = umem->sg_head.sgl;

190
	while (npages) {
191
		ret = get_user_pages(cur_base,
192
				     min_t(unsigned long, npages,
193
					   PAGE_SIZE / sizeof (struct page *)),
194
				     1, !umem->writable, page_list, vma_list);
195 196 197 198

		if (ret < 0)
			goto out;

199
		umem->npages += ret;
200 201 202
		cur_base += ret * PAGE_SIZE;
		npages   -= ret;

203 204 205 206 207
		for_each_sg(sg_list_start, sg, ret, i) {
			if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
				umem->hugetlb = 0;

			sg_set_page(sg, page_list[i], PAGE_SIZE, 0);
208 209
		}

210 211
		/* preparing for next loop */
		sg_list_start = sg;
212 213
	}

214 215 216 217 218 219 220 221 222 223 224 225 226
	umem->nmap = ib_dma_map_sg_attrs(context->device,
				  umem->sg_head.sgl,
				  umem->npages,
				  DMA_BIDIRECTIONAL,
				  &attrs);

	if (umem->nmap <= 0) {
		ret = -ENOMEM;
		goto out;
	}

	ret = 0;

227
out:
228
	if (ret < 0) {
229 230
		if (need_release)
			__ib_umem_release(context->device, umem, 0);
231
		put_pid(umem->pid);
232 233
		kfree(umem);
	} else
234
		current->mm->pinned_vm = locked;
235 236

	up_write(&current->mm->mmap_sem);
237 238
	if (vma_list)
		free_page((unsigned long) vma_list);
239 240
	free_page((unsigned long) page_list);

241
	return ret < 0 ? ERR_PTR(ret) : umem;
242
}
243
EXPORT_SYMBOL(ib_umem_get);
244

245
static void ib_umem_account(struct work_struct *work)
246
{
247 248 249
	struct ib_umem *umem = container_of(work, struct ib_umem, work);

	down_write(&umem->mm->mmap_sem);
250
	umem->mm->pinned_vm -= umem->diff;
251 252 253
	up_write(&umem->mm->mmap_sem);
	mmput(umem->mm);
	kfree(umem);
254 255
}

256 257 258 259 260
/**
 * ib_umem_release - release memory pinned with ib_umem_get
 * @umem: umem struct to release
 */
void ib_umem_release(struct ib_umem *umem)
261
{
262
	struct ib_ucontext *context = umem->context;
263
	struct mm_struct *mm;
264
	struct task_struct *task;
265
	unsigned long diff;
266

267 268 269 270 271
	if (umem->odp_data) {
		ib_umem_odp_release(umem);
		return;
	}

272
	__ib_umem_release(umem->context->device, umem, 1);
273

274 275 276 277 278 279 280 281
	task = get_pid_task(umem->pid, PIDTYPE_PID);
	put_pid(umem->pid);
	if (!task)
		goto out;
	mm = get_task_mm(task);
	put_task_struct(task);
	if (!mm)
		goto out;
282

283
	diff = ib_umem_num_pages(umem);
284

285 286 287 288 289
	/*
	 * We may be called with the mm's mmap_sem already held.  This
	 * can happen when a userspace munmap() is the call that drops
	 * the last reference to our file and calls our release
	 * method.  If there are memory regions to destroy, we'll end
290 291
	 * up here and not be able to take the mmap_sem.  In that case
	 * we defer the vm_locked accounting to the system workqueue.
292
	 */
293 294 295 296 297 298
	if (context->closing) {
		if (!down_write_trylock(&mm->mmap_sem)) {
			INIT_WORK(&umem->work, ib_umem_account);
			umem->mm   = mm;
			umem->diff = diff;

T
Tejun Heo 已提交
299
			queue_work(ib_wq, &umem->work);
300 301
			return;
		}
302 303 304
	} else
		down_write(&mm->mmap_sem);

305
	mm->pinned_vm -= diff;
306 307
	up_write(&mm->mmap_sem);
	mmput(mm);
308
out:
309
	kfree(umem);
310 311 312 313 314 315 316 317
}
EXPORT_SYMBOL(ib_umem_release);

int ib_umem_page_count(struct ib_umem *umem)
{
	int shift;
	int i;
	int n;
318
	struct scatterlist *sg;
319

320 321 322
	if (umem->odp_data)
		return ib_umem_num_pages(umem);

323
	shift = ilog2(umem->page_size);
324

325
	n = 0;
326 327
	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
		n += sg_dma_len(sg) >> shift;
328

329
	return n;
330
}
331
EXPORT_SYMBOL(ib_umem_page_count);
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365

/*
 * Copy from the given ib_umem's pages to the given buffer.
 *
 * umem - the umem to copy from
 * offset - offset to start copying from
 * dst - destination buffer
 * length - buffer length
 *
 * Returns 0 on success, or an error code.
 */
int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
		      size_t length)
{
	size_t end = offset + length;
	int ret;

	if (offset > umem->length || length > umem->length - offset) {
		pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
		       offset, umem->length, end);
		return -EINVAL;
	}

	ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length,
				 offset + ib_umem_offset(umem));

	if (ret < 0)
		return ret;
	else if (ret != length)
		return -EINVAL;
	else
		return 0;
}
EXPORT_SYMBOL(ib_umem_copy_from);