“2cc8d4db9dd7df9dd12d86f2e37d1b4760d3dd98”上不存在“drivers/net/wireless/wl12xx/main.c”
iser_memory.c 12.4 KB
Newer Older
1 2
/*
 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
O
Or Gerlitz 已提交
3
 * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *	- Redistributions of source code must retain the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer.
 *
 *	- Redistributions in binary form must reproduce the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer in the documentation and/or other materials
 *	  provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
A
Al Viro 已提交
37
#include <linux/highmem.h>
38 39 40 41 42
#include <linux/scatterlist.h>

#include "iscsi_iser.h"

#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
43

44 45 46
/**
 * iser_start_rdma_unaligned_sg
 */
47
static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
48
					enum iser_data_dir cmd_dir)
49 50
{
	int dma_nents;
51
	struct ib_device *dev;
52
	char *mem = NULL;
53
	struct iser_data_buf *data = &iser_task->data[cmd_dir];
54 55 56
	unsigned long  cmd_data_len = data->data_len;

	if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
O
Or Gerlitz 已提交
57
		mem = (void *)__get_free_pages(GFP_ATOMIC,
58
		      ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
59
	else
O
Or Gerlitz 已提交
60
		mem = kmalloc(cmd_data_len, GFP_ATOMIC);
61 62 63 64 65 66 67 68 69

	if (mem == NULL) {
		iser_err("Failed to allocate mem size %d %d for copying sglist\n",
			 data->size,(int)cmd_data_len);
		return -ENOMEM;
	}

	if (cmd_dir == ISER_DIR_OUT) {
		/* copy the unaligned sg the buffer which is used for RDMA */
J
Jens Axboe 已提交
70 71
		struct scatterlist *sgl = (struct scatterlist *)data->buf;
		struct scatterlist *sg;
72 73 74
		int i;
		char *p, *from;

J
Jens Axboe 已提交
75 76
		p = mem;
		for_each_sg(sgl, sg, data->size, i) {
77
			from = kmap_atomic(sg_page(sg));
78
			memcpy(p,
J
Jens Axboe 已提交
79 80
			       from + sg->offset,
			       sg->length);
81
			kunmap_atomic(from);
J
Jens Axboe 已提交
82
			p += sg->length;
83 84 85
		}
	}

86 87 88 89
	sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
	iser_task->data_copy[cmd_dir].buf  =
		&iser_task->data_copy[cmd_dir].sg_single;
	iser_task->data_copy[cmd_dir].size = 1;
90

91
	iser_task->data_copy[cmd_dir].copy_buf  = mem;
92

93
	dev = iser_task->iser_conn->ib_conn->device->ib_device;
94
	dma_nents = ib_dma_map_sg(dev,
95
				  &iser_task->data_copy[cmd_dir].sg_single,
96 97 98
				  1,
				  (cmd_dir == ISER_DIR_OUT) ?
				  DMA_TO_DEVICE : DMA_FROM_DEVICE);
99 100
	BUG_ON(dma_nents == 0);

101
	iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
102 103 104 105 106 107
	return 0;
}

/**
 * iser_finalize_rdma_unaligned_sg
 */
108
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
109 110
				     enum iser_data_dir         cmd_dir)
{
111
	struct ib_device *dev;
112 113 114
	struct iser_data_buf *mem_copy;
	unsigned long  cmd_data_len;

115 116
	dev = iser_task->iser_conn->ib_conn->device->ib_device;
	mem_copy = &iser_task->data_copy[cmd_dir];
117

118 119 120
	ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
			(cmd_dir == ISER_DIR_OUT) ?
			DMA_TO_DEVICE : DMA_FROM_DEVICE);
121 122 123

	if (cmd_dir == ISER_DIR_IN) {
		char *mem;
J
Jens Axboe 已提交
124
		struct scatterlist *sgl, *sg;
125 126 127 128 129 130 131
		unsigned char *p, *to;
		unsigned int sg_size;
		int i;

		/* copy back read RDMA to unaligned sg */
		mem	= mem_copy->copy_buf;

132 133
		sgl	= (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
		sg_size = iser_task->data[ISER_DIR_IN].size;
134

J
Jens Axboe 已提交
135 136
		p = mem;
		for_each_sg(sgl, sg, sg_size, i) {
137
			to = kmap_atomic(sg_page(sg));
J
Jens Axboe 已提交
138
			memcpy(to + sg->offset,
139
			       p,
J
Jens Axboe 已提交
140
			       sg->length);
141
			kunmap_atomic(to);
J
Jens Axboe 已提交
142
			p += sg->length;
143 144 145
		}
	}

146
	cmd_data_len = iser_task->data[cmd_dir].data_len;
147 148 149

	if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
		free_pages((unsigned long)mem_copy->copy_buf,
150
			   ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
151 152 153 154 155 156
	else
		kfree(mem_copy->copy_buf);

	mem_copy->copy_buf = NULL;
}

157 158
#define IS_4K_ALIGNED(addr)	((((unsigned long)addr) & ~MASK_4K) == 0)

159 160 161 162 163 164 165 166 167 168 169 170
/**
 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
 * and returns the length of resulting physical address array (may be less than
 * the original due to possible compaction).
 *
 * we build a "page vec" under the assumption that the SG meets the RDMA
 * alignment requirements. Other then the first and last SG elements, all
 * the "internal" elements can be compacted into a list whose elements are
 * dma addresses of physical pages. The code supports also the weird case
 * where --few fragments of the same page-- are present in the SG as
 * consecutive elements. Also, it handles one entry SG.
 */
171

172
static int iser_sg_to_page_vec(struct iser_data_buf *data,
173 174
			       struct iser_page_vec *page_vec,
			       struct ib_device *ibdev)
175
{
176 177
	struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
	u64 start_addr, end_addr, page, chunk_start = 0;
178
	unsigned long total_sz = 0;
179 180
	unsigned int dma_len;
	int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
181 182

	/* compute the offset of first element */
J
Jens Axboe 已提交
183
	page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
184

185 186
	new_chunk = 1;
	cur_page  = 0;
J
Jens Axboe 已提交
187
	for_each_sg(sgl, sg, data->dma_nents, i) {
188 189 190 191 192
		start_addr = ib_sg_dma_address(ibdev, sg);
		if (new_chunk)
			chunk_start = start_addr;
		dma_len = ib_sg_dma_len(ibdev, sg);
		end_addr = start_addr + dma_len;
193
		total_sz += dma_len;
194

195 196 197 198
		/* collect page fragments until aligned or end of SG list */
		if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
			new_chunk = 0;
			continue;
199
		}
200 201 202 203 204 205 206 207
		new_chunk = 1;

		/* address of the first page in the contiguous chunk;
		   masking relevant for the very first SG entry,
		   which might be unaligned */
		page = chunk_start & MASK_4K;
		do {
			page_vec->pages[cur_page++] = page;
208
			page += SIZE_4K;
209
		} while (page < end_addr);
210
	}
211

212 213 214 215 216 217 218 219 220 221 222 223
	page_vec->data_size = total_sz;
	iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page);
	return cur_page;
}


/**
 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
 * for RDMA sub-list of a scatter-gather list of memory buffers, and  returns
 * the number of entries which are aligned correctly. Supports the case where
 * consecutive SG elements are actually fragments of the same physcial page.
 */
224 225
static int iser_data_buf_aligned_len(struct iser_data_buf *data,
				      struct ib_device *ibdev)
226
{
227 228 229 230 231 232
	struct scatterlist *sgl, *sg, *next_sg = NULL;
	u64 start_addr, end_addr;
	int i, ret_len, start_check = 0;

	if (data->dma_nents == 1)
		return 1;
233

J
Jens Axboe 已提交
234
	sgl = (struct scatterlist *)data->buf;
235
	start_addr  = ib_sg_dma_address(ibdev, sgl);
236

J
Jens Axboe 已提交
237
	for_each_sg(sgl, sg, data->dma_nents, i) {
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
		if (start_check && !IS_4K_ALIGNED(start_addr))
			break;

		next_sg = sg_next(sg);
		if (!next_sg)
			break;

		end_addr    = start_addr + ib_sg_dma_len(ibdev, sg);
		start_addr  = ib_sg_dma_address(ibdev, next_sg);

		if (end_addr == start_addr) {
			start_check = 0;
			continue;
		} else
			start_check = 1;

		if (!IS_4K_ALIGNED(end_addr))
			break;
256
	}
257
	ret_len = (next_sg) ? i : i+1;
258 259 260 261 262
	iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
		 ret_len, data->dma_nents, data);
	return ret_len;
}

263 264
static void iser_data_buf_dump(struct iser_data_buf *data,
			       struct ib_device *ibdev)
265
{
J
Jens Axboe 已提交
266 267
	struct scatterlist *sgl = (struct scatterlist *)data->buf;
	struct scatterlist *sg;
268 269
	int i;

J
Jens Axboe 已提交
270
	for_each_sg(sgl, sg, data->dma_nents, i)
271
		iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
E
Erez Zilber 已提交
272
			 "off:0x%x sz:0x%x dma_len:0x%x\n",
J
Jens Axboe 已提交
273
			 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
J
Jens Axboe 已提交
274
			 sg_page(sg), sg->offset,
J
Jens Axboe 已提交
275
			 sg->length, ib_sg_dma_len(ibdev, sg));
276 277 278 279 280 281 282 283 284 285 286 287 288
}

static void iser_dump_page_vec(struct iser_page_vec *page_vec)
{
	int i;

	iser_err("page vec length %d data size %d\n",
		 page_vec->length, page_vec->data_size);
	for (i = 0; i < page_vec->length; i++)
		iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
}

static void iser_page_vec_build(struct iser_data_buf *data,
289 290
				struct iser_page_vec *page_vec,
				struct ib_device *ibdev)
291 292 293 294 295 296 297
{
	int page_vec_len = 0;

	page_vec->length = 0;
	page_vec->offset = 0;

	iser_dbg("Translating sg sz: %d\n", data->dma_nents);
298
	page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev);
299 300 301 302
	iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len);

	page_vec->length = page_vec_len;

303
	if (page_vec_len * SIZE_4K < page_vec->data_size) {
304
		iser_err("page_vec too short to hold this SG\n");
305
		iser_data_buf_dump(data, ibdev);
306 307 308 309 310
		iser_dump_page_vec(page_vec);
		BUG();
	}
}

311 312 313 314
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
			    struct iser_data_buf *data,
			    enum iser_data_dir iser_dir,
			    enum dma_data_direction dma_dir)
315
{
316
	struct ib_device *dev;
317

318 319
	iser_task->dir[iser_dir] = 1;
	dev = iser_task->iser_conn->ib_conn->device->ib_device;
320

321
	data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
322 323 324 325 326 327 328
	if (data->dma_nents == 0) {
		iser_err("dma_map_sg failed!!!\n");
		return -EINVAL;
	}
	return 0;
}

329
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
330
{
331
	struct ib_device *dev;
332 333
	struct iser_data_buf *data;

334
	dev = iser_task->iser_conn->ib_conn->device->ib_device;
335

336 337
	if (iser_task->dir[ISER_DIR_IN]) {
		data = &iser_task->data[ISER_DIR_IN];
338
		ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
339 340
	}

341 342
	if (iser_task->dir[ISER_DIR_OUT]) {
		data = &iser_task->data[ISER_DIR_OUT];
343
		ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
344 345 346
	}
}

347 348 349 350 351 352
/**
 * iser_reg_rdma_mem - Registers memory intended for RDMA,
 * obtaining rkey and va
 *
 * returns 0 on success, errno code on failure
 */
353
int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
354 355
		      enum   iser_data_dir        cmd_dir)
{
356 357
	struct iscsi_conn    *iscsi_conn = iser_task->iser_conn->iscsi_conn;
	struct iser_conn     *ib_conn = iser_task->iser_conn->ib_conn;
358
	struct iser_device   *device = ib_conn->device;
359
	struct ib_device     *ibdev = device->ib_device;
360
	struct iser_data_buf *mem = &iser_task->data[cmd_dir];
361 362 363
	struct iser_regd_buf *regd_buf;
	int aligned_len;
	int err;
E
Erez Zilber 已提交
364
	int i;
365
	struct scatterlist *sg;
366

367
	regd_buf = &iser_task->rdma_regd[cmd_dir];
368

369
	aligned_len = iser_data_buf_aligned_len(mem, ibdev);
370 371
	if (aligned_len != mem->dma_nents ||
	    (!ib_conn->fmr_pool && mem->dma_nents > 1)) {
372
		iscsi_conn->fmr_unalign_cnt++;
373 374 375 376 377
		iser_dbg("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
			 aligned_len, mem->size);

		if (iser_debug_level > 0)
			iser_data_buf_dump(mem, ibdev);
378 379

		/* unmap the command data before accessing it */
380
		iser_dma_unmap_task_data(iser_task);
381

382 383
		/* allocate copy buf, if we are writing, copy the */
		/* unaligned scatterlist, dma map the copy        */
384
		if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
385
				return -ENOMEM;
386
		mem = &iser_task->data_copy[cmd_dir];
387 388
	}

389 390 391 392 393 394
	/* if there a single dma entry, FMR is not needed */
	if (mem->dma_nents == 1) {
		sg = (struct scatterlist *)mem->buf;

		regd_buf->reg.lkey = device->mr->lkey;
		regd_buf->reg.rkey = device->mr->rkey;
395 396
		regd_buf->reg.len  = ib_sg_dma_len(ibdev, &sg[0]);
		regd_buf->reg.va   = ib_sg_dma_address(ibdev, &sg[0]);
397 398 399 400 401 402 403 404 405
		regd_buf->reg.is_fmr = 0;

		iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X  "
			 "va: 0x%08lX sz: %ld]\n",
			 (unsigned int)regd_buf->reg.lkey,
			 (unsigned int)regd_buf->reg.rkey,
			 (unsigned long)regd_buf->reg.va,
			 (unsigned long)regd_buf->reg.len);
	} else { /* use FMR for multiple dma entries */
406
		iser_page_vec_build(mem, ib_conn->page_vec, ibdev);
407
		err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
408
		if (err && err != -EAGAIN) {
409
			iser_data_buf_dump(mem, ibdev);
410 411 412
			iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
				 mem->dma_nents,
				 ntoh24(iser_task->desc.iscsi_header.dlength));
413 414 415 416 417 418
			iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
				 ib_conn->page_vec->data_size, ib_conn->page_vec->length,
				 ib_conn->page_vec->offset);
			for (i=0 ; i<ib_conn->page_vec->length ; i++)
				iser_err("page_vec[%d] = 0x%llx\n", i,
					 (unsigned long long) ib_conn->page_vec->pages[i]);
E
Erez Zilber 已提交
419
		}
420 421
		if (err)
			return err;
E
Erez Zilber 已提交
422
	}
423 424
	return 0;
}