scatterlist.c 24.2 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
 *
 * Scatterlist handling helpers.
 *
 * This source code is licensed under the GNU General Public License,
 * Version 2. See the file COPYING for more details.
 */
9
#include <linux/export.h>
10
#include <linux/slab.h>
11
#include <linux/scatterlist.h>
12
#include <linux/highmem.h>
13
#include <linux/kmemleak.h>
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40

/**
 * sg_next - return the next scatterlist entry in a list
 * @sg:		The current sg entry
 *
 * Description:
 *   Usually the next entry will be @sg@ + 1, but if this sg element is part
 *   of a chained scatterlist, it could jump to the start of a new
 *   scatterlist array.
 *
 **/
struct scatterlist *sg_next(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
	BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
	if (sg_is_last(sg))
		return NULL;

	sg++;
	if (unlikely(sg_is_chain(sg)))
		sg = sg_chain_ptr(sg);

	return sg;
}
EXPORT_SYMBOL(sg_next);

M
Maxim Levitsky 已提交
41 42 43 44 45 46 47 48 49 50 51
/**
 * sg_nents - return total count of entries in scatterlist
 * @sg:		The scatterlist
 *
 * Description:
 * Allows to know how many entries are in sg, taking into acount
 * chaining as well
 *
 **/
int sg_nents(struct scatterlist *sg)
{
52 53
	int nents;
	for (nents = 0; sg; sg = sg_next(sg))
M
Maxim Levitsky 已提交
54 55 56 57 58
		nents++;
	return nents;
}
EXPORT_SYMBOL(sg_nents);

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
/**
 * sg_nents_for_len - return total count of entries in scatterlist
 *                    needed to satisfy the supplied length
 * @sg:		The scatterlist
 * @len:	The total required length
 *
 * Description:
 * Determines the number of entries in sg that are required to meet
 * the supplied length, taking into acount chaining as well
 *
 * Returns:
 *   the number of sg entries needed, negative error on failure
 *
 **/
int sg_nents_for_len(struct scatterlist *sg, u64 len)
{
	int nents;
	u64 total;

	if (!len)
		return 0;

	for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
		nents++;
		total += sg->length;
		if (total >= len)
			return nents;
	}

	return -EINVAL;
}
EXPORT_SYMBOL(sg_nents_for_len);
M
Maxim Levitsky 已提交
91

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
/**
 * sg_last - return the last scatterlist entry in a list
 * @sgl:	First entry in the scatterlist
 * @nents:	Number of entries in the scatterlist
 *
 * Description:
 *   Should only be used casually, it (currently) scans the entire list
 *   to get the last entry.
 *
 *   Note that the @sgl@ pointer passed in need not be the first one,
 *   the important bit is that @nents@ denotes the number of entries that
 *   exist from @sgl@.
 *
 **/
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
{
	struct scatterlist *sg, *ret = NULL;
	unsigned int i;

	for_each_sg(sgl, sg, nents, i)
		ret = sg;

#ifdef CONFIG_DEBUG_SG
	BUG_ON(sgl[0].sg_magic != SG_MAGIC);
	BUG_ON(!sg_is_last(ret));
#endif
	return ret;
}
EXPORT_SYMBOL(sg_last);

/**
 * sg_init_table - Initialize SG table
 * @sgl:	   The SG table
 * @nents:	   Number of entries in table
 *
 * Notes:
 *   If this is part of a chained sg table, sg_mark_end() should be
 *   used only on the last table part.
 *
 **/
void sg_init_table(struct scatterlist *sgl, unsigned int nents)
{
	memset(sgl, 0, sizeof(*sgl) * nents);
135
	sg_init_marker(sgl, nents);
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
}
EXPORT_SYMBOL(sg_init_table);

/**
 * sg_init_one - Initialize a single entry sg list
 * @sg:		 SG entry
 * @buf:	 Virtual address for IO
 * @buflen:	 IO length
 *
 **/
void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
{
	sg_init_table(sg, 1);
	sg_set_buf(sg, buf, buflen);
}
EXPORT_SYMBOL(sg_init_one);

/*
 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
 * helpers.
 */
static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
{
159 160 161 162 163 164 165 166 167 168 169 170 171 172
	if (nents == SG_MAX_SINGLE_ALLOC) {
		/*
		 * Kmemleak doesn't track page allocations as they are not
		 * commonly used (in a raw form) for kernel data structures.
		 * As we chain together a list of pages and then a normal
		 * kmalloc (tracked by kmemleak), in order to for that last
		 * allocation not to become decoupled (and thus a
		 * false-positive) we need to inform kmemleak of all the
		 * intermediate allocations.
		 */
		void *ptr = (void *) __get_free_page(gfp_mask);
		kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
		return ptr;
	} else
173 174 175 176 177
		return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
}

static void sg_kfree(struct scatterlist *sg, unsigned int nents)
{
178 179
	if (nents == SG_MAX_SINGLE_ALLOC) {
		kmemleak_free(sg);
180
		free_page((unsigned long) sg);
181
	} else
182 183 184 185 186 187
		kfree(sg);
}

/**
 * __sg_free_table - Free a previously mapped sg table
 * @table:	The sg table header to use
188
 * @max_ents:	The maximum number of entries per single scatterlist
189
 * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
190 191 192
 * @free_fn:	Free function
 *
 *  Description:
193 194 195
 *    Free an sg table previously allocated and setup with
 *    __sg_alloc_table().  The @max_ents value must be identical to
 *    that previously used with __sg_alloc_table().
196 197
 *
 **/
198
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
199
		     bool skip_first_chunk, sg_free_fn *free_fn)
200 201 202 203 204 205 206 207 208 209 210 211
{
	struct scatterlist *sgl, *next;

	if (unlikely(!table->sgl))
		return;

	sgl = table->sgl;
	while (table->orig_nents) {
		unsigned int alloc_size = table->orig_nents;
		unsigned int sg_size;

		/*
212
		 * If we have more than max_ents segments left,
213 214 215 216
		 * then assign 'next' to the sg table after the current one.
		 * sg_size is then one less than alloc size, since the last
		 * element is the chain pointer.
		 */
217 218 219
		if (alloc_size > max_ents) {
			next = sg_chain_ptr(&sgl[max_ents - 1]);
			alloc_size = max_ents;
220 221 222 223 224 225 226
			sg_size = alloc_size - 1;
		} else {
			sg_size = alloc_size;
			next = NULL;
		}

		table->orig_nents -= sg_size;
227
		if (skip_first_chunk)
228
			skip_first_chunk = false;
229 230
		else
			free_fn(sgl, alloc_size);
231 232 233 234 235 236 237 238 239 240 241 242 243 244
		sgl = next;
	}

	table->sgl = NULL;
}
EXPORT_SYMBOL(__sg_free_table);

/**
 * sg_free_table - Free a previously allocated sg table
 * @table:	The mapped sg table header
 *
 **/
void sg_free_table(struct sg_table *table)
{
245
	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
246 247 248 249 250 251 252
}
EXPORT_SYMBOL(sg_free_table);

/**
 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
 * @table:	The sg table header to use
 * @nents:	Number of entries in sg list
253
 * @max_ents:	The maximum number of entries the allocator returns per call
254 255 256
 * @gfp_mask:	GFP allocation mask
 * @alloc_fn:	Allocator to use
 *
257 258 259 260 261 262
 * Description:
 *   This function returns a @table @nents long. The allocator is
 *   defined to return scatterlist chunks of maximum size @max_ents.
 *   Thus if @nents is bigger than @max_ents, the scatterlists will be
 *   chained in units of @max_ents.
 *
263 264 265 266 267
 * Notes:
 *   If this function returns non-0 (eg failure), the caller must call
 *   __sg_free_table() to cleanup any leftover allocations.
 *
 **/
268
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
269 270
		     unsigned int max_ents, struct scatterlist *first_chunk,
		     gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
271 272 273 274
{
	struct scatterlist *sg, *prv;
	unsigned int left;

275 276 277 278
	memset(table, 0, sizeof(*table));

	if (nents == 0)
		return -EINVAL;
279
#ifndef CONFIG_ARCH_HAS_SG_CHAIN
280 281
	if (WARN_ON_ONCE(nents > max_ents))
		return -EINVAL;
282 283 284 285 286 287 288
#endif

	left = nents;
	prv = NULL;
	do {
		unsigned int sg_size, alloc_size = left;

289 290
		if (alloc_size > max_ents) {
			alloc_size = max_ents;
291 292 293 294 295 296
			sg_size = alloc_size - 1;
		} else
			sg_size = alloc_size;

		left -= sg_size;

297 298 299 300 301 302
		if (first_chunk) {
			sg = first_chunk;
			first_chunk = NULL;
		} else {
			sg = alloc_fn(alloc_size, gfp_mask);
		}
303 304 305 306 307 308 309 310 311 312 313 314
		if (unlikely(!sg)) {
			/*
			 * Adjust entry count to reflect that the last
			 * entry of the previous table won't be used for
			 * linkage.  Without this, sg_kfree() may get
			 * confused.
			 */
			if (prv)
				table->nents = ++table->orig_nents;

 			return -ENOMEM;
		}
315 316 317 318 319 320 321 322 323

		sg_init_table(sg, alloc_size);
		table->nents = table->orig_nents += sg_size;

		/*
		 * If this is the first mapping, assign the sg table header.
		 * If this is not the first mapping, chain previous part.
		 */
		if (prv)
324
			sg_chain(prv, max_ents, sg);
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
		else
			table->sgl = sg;

		/*
		 * If no more entries after this one, mark the end
		 */
		if (!left)
			sg_mark_end(&sg[sg_size - 1]);

		prv = sg;
	} while (left);

	return 0;
}
EXPORT_SYMBOL(__sg_alloc_table);

/**
 * sg_alloc_table - Allocate and initialize an sg table
 * @table:	The sg table header to use
 * @nents:	Number of entries in sg list
 * @gfp_mask:	GFP allocation mask
 *
 *  Description:
 *    Allocate and initialize an sg table. If @nents@ is larger than
 *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
 *
 **/
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
{
	int ret;

356
	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
357
			       NULL, gfp_mask, sg_kmalloc);
358
	if (unlikely(ret))
359
		__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
360 361 362 363

	return ret;
}
EXPORT_SYMBOL(sg_alloc_table);
364

365
/**
366 367 368 369 370 371 372 373 374
 * __sg_alloc_table_from_pages - Allocate and initialize an sg table from
 *			         an array of pages
 * @sgt:	 The sg table header to use
 * @pages:	 Pointer to an array of page pointers
 * @n_pages:	 Number of pages in the pages array
 * @offset:      Offset from start of the first page to the start of a buffer
 * @size:        Number of valid bytes in the buffer (after offset)
 * @max_segment: Maximum size of a scatterlist node in bytes (page aligned)
 * @gfp_mask:	 GFP allocation mask
375 376 377
 *
 *  Description:
 *    Allocate and initialize an sg table from a list of pages. Contiguous
378 379 380 381
 *    ranges of the pages are squashed into a single scatterlist node up to the
 *    maximum size specified in @max_segment. An user may provide an offset at a
 *    start and a size of valid data in a buffer specified by the page array.
 *    The returned sg table is released by sg_free_table.
382 383 384 385
 *
 * Returns:
 *   0 on success, negative error on failure
 */
386 387 388 389
int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
				unsigned int n_pages, unsigned int offset,
				unsigned long size, unsigned int max_segment,
				gfp_t gfp_mask)
390
{
391
	unsigned int chunks, cur_page, seg_len, i;
392 393 394
	int ret;
	struct scatterlist *s;

395 396 397
	if (WARN_ON(!max_segment || offset_in_page(max_segment)))
		return -EINVAL;

398 399
	/* compute number of contiguous chunks */
	chunks = 1;
400 401 402 403 404 405 406 407 408
	seg_len = 0;
	for (i = 1; i < n_pages; i++) {
		seg_len += PAGE_SIZE;
		if (seg_len >= max_segment ||
		    page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
			chunks++;
			seg_len = 0;
		}
	}
409 410 411 412 413 414 415 416

	ret = sg_alloc_table(sgt, chunks, gfp_mask);
	if (unlikely(ret))
		return ret;

	/* merging chunks and putting them into the scatterlist */
	cur_page = 0;
	for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
417
		unsigned int j, chunk_size;
418 419

		/* look for the end of the current chunk */
420 421 422 423 424
		seg_len = 0;
		for (j = cur_page + 1; j < n_pages; j++) {
			seg_len += PAGE_SIZE;
			if (seg_len >= max_segment ||
			    page_to_pfn(pages[j]) !=
425 426
			    page_to_pfn(pages[j - 1]) + 1)
				break;
427
		}
428 429

		chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
430 431
		sg_set_page(s, pages[cur_page],
			    min_t(unsigned long, size, chunk_size), offset);
432 433 434 435 436 437 438
		size -= chunk_size;
		offset = 0;
		cur_page = j;
	}

	return 0;
}
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
EXPORT_SYMBOL(__sg_alloc_table_from_pages);

/**
 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
 *			       an array of pages
 * @sgt:	 The sg table header to use
 * @pages:	 Pointer to an array of page pointers
 * @n_pages:	 Number of pages in the pages array
 * @offset:      Offset from start of the first page to the start of a buffer
 * @size:        Number of valid bytes in the buffer (after offset)
 * @gfp_mask:	 GFP allocation mask
 *
 *  Description:
 *    Allocate and initialize an sg table from a list of pages. Contiguous
 *    ranges of the pages are squashed into a single scatterlist node. A user
 *    may provide an offset at a start and a size of valid data in a buffer
 *    specified by the page array. The returned sg table is released by
 *    sg_free_table.
 *
 * Returns:
 *   0 on success, negative error on failure
 */
int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
			      unsigned int n_pages, unsigned int offset,
			      unsigned long size, gfp_t gfp_mask)
{
	return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
					   SCATTERLIST_MAX_SEGMENT, gfp_mask);
}
468 469
EXPORT_SYMBOL(sg_alloc_table_from_pages);

470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
#ifdef CONFIG_SGL_ALLOC

/**
 * sgl_alloc_order - allocate a scatterlist and its pages
 * @length: Length in bytes of the scatterlist. Must be at least one
 * @order: Second argument for alloc_pages()
 * @chainable: Whether or not to allocate an extra element in the scatterlist
 *	for scatterlist chaining purposes
 * @gfp: Memory allocation flags
 * @nent_p: [out] Number of entries in the scatterlist that have pages
 *
 * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
 */
struct scatterlist *sgl_alloc_order(unsigned long long length,
				    unsigned int order, bool chainable,
				    gfp_t gfp, unsigned int *nent_p)
{
	struct scatterlist *sgl, *sg;
	struct page *page;
	unsigned int nent, nalloc;
	u32 elem_len;

	nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
	/* Check for integer overflow */
	if (length > (nent << (PAGE_SHIFT + order)))
		return NULL;
	nalloc = nent;
	if (chainable) {
		/* Check for integer overflow */
		if (nalloc + 1 < nalloc)
			return NULL;
		nalloc++;
	}
	sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
			    (gfp & ~GFP_DMA) | __GFP_ZERO);
	if (!sgl)
		return NULL;

508
	sg_init_table(sgl, nalloc);
509 510 511 512 513 514 515 516 517 518 519 520 521
	sg = sgl;
	while (length) {
		elem_len = min_t(u64, length, PAGE_SIZE << order);
		page = alloc_pages(gfp, order);
		if (!page) {
			sgl_free(sgl);
			return NULL;
		}

		sg_set_page(sg, page, elem_len, 0);
		length -= elem_len;
		sg = sg_next(sg);
	}
522
	WARN_ONCE(length, "length = %lld\n", length);
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
	if (nent_p)
		*nent_p = nent;
	return sgl;
}
EXPORT_SYMBOL(sgl_alloc_order);

/**
 * sgl_alloc - allocate a scatterlist and its pages
 * @length: Length in bytes of the scatterlist
 * @gfp: Memory allocation flags
 * @nent_p: [out] Number of entries in the scatterlist
 *
 * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
 */
struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
			      unsigned int *nent_p)
{
	return sgl_alloc_order(length, 0, false, gfp, nent_p);
}
EXPORT_SYMBOL(sgl_alloc);

/**
545
 * sgl_free_n_order - free a scatterlist and its pages
546
 * @sgl: Scatterlist with one or more elements
547
 * @nents: Maximum number of elements to free
548
 * @order: Second argument for __free_pages()
549 550 551 552 553 554 555
 *
 * Notes:
 * - If several scatterlists have been chained and each chain element is
 *   freed separately then it's essential to set nents correctly to avoid that a
 *   page would get freed twice.
 * - All pages in a chained scatterlist can be freed at once by setting @nents
 *   to a high number.
556
 */
557
void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
558 559 560
{
	struct scatterlist *sg;
	struct page *page;
561
	int i;
562

563 564 565
	for_each_sg(sgl, sg, nents, i) {
		if (!sg)
			break;
566 567 568 569 570 571
		page = sg_page(sg);
		if (page)
			__free_pages(page, order);
	}
	kfree(sgl);
}
572 573 574 575 576 577 578 579 580 581 582
EXPORT_SYMBOL(sgl_free_n_order);

/**
 * sgl_free_order - free a scatterlist and its pages
 * @sgl: Scatterlist with one or more elements
 * @order: Second argument for __free_pages()
 */
void sgl_free_order(struct scatterlist *sgl, int order)
{
	sgl_free_n_order(sgl, INT_MAX, order);
}
583 584 585 586 587 588 589 590 591 592 593 594 595 596
EXPORT_SYMBOL(sgl_free_order);

/**
 * sgl_free - free a scatterlist and its pages
 * @sgl: Scatterlist with one or more elements
 */
void sgl_free(struct scatterlist *sgl)
{
	sgl_free_order(sgl, 0);
}
EXPORT_SYMBOL(sgl_free);

#endif /* CONFIG_SGL_ALLOC */

597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
void __sg_page_iter_start(struct sg_page_iter *piter,
			  struct scatterlist *sglist, unsigned int nents,
			  unsigned long pgoffset)
{
	piter->__pg_advance = 0;
	piter->__nents = nents;

	piter->sg = sglist;
	piter->sg_pgoffset = pgoffset;
}
EXPORT_SYMBOL(__sg_page_iter_start);

static int sg_page_count(struct scatterlist *sg)
{
	return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
}

bool __sg_page_iter_next(struct sg_page_iter *piter)
{
	if (!piter->__nents || !piter->sg)
		return false;

	piter->sg_pgoffset += piter->__pg_advance;
	piter->__pg_advance = 1;

	while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
		piter->sg_pgoffset -= sg_page_count(piter->sg);
		piter->sg = sg_next(piter->sg);
		if (!--piter->__nents || !piter->sg)
			return false;
	}

	return true;
}
EXPORT_SYMBOL(__sg_page_iter_next);

T
Tejun Heo 已提交
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
/**
 * sg_miter_start - start mapping iteration over a sg list
 * @miter: sg mapping iter to be started
 * @sgl: sg list to iterate over
 * @nents: number of sg entries
 *
 * Description:
 *   Starts mapping iterator @miter.
 *
 * Context:
 *   Don't care.
 */
void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
		    unsigned int nents, unsigned int flags)
{
	memset(miter, 0, sizeof(struct sg_mapping_iter));

650
	__sg_page_iter_start(&miter->piter, sgl, nents, 0);
651
	WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
T
Tejun Heo 已提交
652 653 654 655
	miter->__flags = flags;
}
EXPORT_SYMBOL(sg_miter_start);

656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
{
	if (!miter->__remaining) {
		struct scatterlist *sg;
		unsigned long pgoffset;

		if (!__sg_page_iter_next(&miter->piter))
			return false;

		sg = miter->piter.sg;
		pgoffset = miter->piter.sg_pgoffset;

		miter->__offset = pgoffset ? 0 : sg->offset;
		miter->__remaining = sg->offset + sg->length -
				(pgoffset << PAGE_SHIFT) - miter->__offset;
		miter->__remaining = min_t(unsigned long, miter->__remaining,
					   PAGE_SIZE - miter->__offset);
	}

	return true;
}

678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
/**
 * sg_miter_skip - reposition mapping iterator
 * @miter: sg mapping iter to be skipped
 * @offset: number of bytes to plus the current location
 *
 * Description:
 *   Sets the offset of @miter to its current location plus @offset bytes.
 *   If mapping iterator @miter has been proceeded by sg_miter_next(), this
 *   stops @miter.
 *
 * Context:
 *   Don't care if @miter is stopped, or not proceeded yet.
 *   Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
 *
 * Returns:
 *   true if @miter contains the valid mapping.  false if end of sg
 *   list is reached.
 */
M
Ming Lei 已提交
696
bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
{
	sg_miter_stop(miter);

	while (offset) {
		off_t consumed;

		if (!sg_miter_get_next_page(miter))
			return false;

		consumed = min_t(off_t, offset, miter->__remaining);
		miter->__offset += consumed;
		miter->__remaining -= consumed;
		offset -= consumed;
	}

	return true;
}
M
Ming Lei 已提交
714
EXPORT_SYMBOL(sg_miter_skip);
715

T
Tejun Heo 已提交
716 717 718 719 720
/**
 * sg_miter_next - proceed mapping iterator to the next mapping
 * @miter: sg mapping iter to proceed
 *
 * Description:
721 722 723
 *   Proceeds @miter to the next mapping.  @miter should have been started
 *   using sg_miter_start().  On successful return, @miter->page,
 *   @miter->addr and @miter->length point to the current mapping.
T
Tejun Heo 已提交
724 725
 *
 * Context:
726 727
 *   Preemption disabled if SG_MITER_ATOMIC.  Preemption must stay disabled
 *   till @miter is stopped.  May sleep if !SG_MITER_ATOMIC.
T
Tejun Heo 已提交
728 729 730 731 732 733 734 735 736
 *
 * Returns:
 *   true if @miter contains the next mapping.  false if end of sg
 *   list is reached.
 */
bool sg_miter_next(struct sg_mapping_iter *miter)
{
	sg_miter_stop(miter);

737 738 739 740
	/*
	 * Get to the next page if necessary.
	 * __remaining, __offset is adjusted by sg_miter_stop
	 */
741 742
	if (!sg_miter_get_next_page(miter))
		return false;
T
Tejun Heo 已提交
743

744
	miter->page = sg_page_iter_page(&miter->piter);
745
	miter->consumed = miter->length = miter->__remaining;
T
Tejun Heo 已提交
746 747

	if (miter->__flags & SG_MITER_ATOMIC)
748
		miter->addr = kmap_atomic(miter->page) + miter->__offset;
T
Tejun Heo 已提交
749
	else
750
		miter->addr = kmap(miter->page) + miter->__offset;
T
Tejun Heo 已提交
751 752 753 754 755 756 757 758 759 760 761

	return true;
}
EXPORT_SYMBOL(sg_miter_next);

/**
 * sg_miter_stop - stop mapping iteration
 * @miter: sg mapping iter to be stopped
 *
 * Description:
 *   Stops mapping iterator @miter.  @miter should have been started
762 763 764
 *   using sg_miter_start().  A stopped iteration can be resumed by
 *   calling sg_miter_next() on it.  This is useful when resources (kmap)
 *   need to be released during iteration.
T
Tejun Heo 已提交
765 766
 *
 * Context:
767 768
 *   Preemption disabled if the SG_MITER_ATOMIC is set.  Don't care
 *   otherwise.
T
Tejun Heo 已提交
769 770 771 772 773 774 775 776
 */
void sg_miter_stop(struct sg_mapping_iter *miter)
{
	WARN_ON(miter->consumed > miter->length);

	/* drop resources from the last iteration */
	if (miter->addr) {
		miter->__offset += miter->consumed;
777
		miter->__remaining -= miter->consumed;
T
Tejun Heo 已提交
778

779 780
		if ((miter->__flags & SG_MITER_TO_SG) &&
		    !PageSlab(miter->page))
781 782
			flush_kernel_dcache_page(miter->page);

T
Tejun Heo 已提交
783
		if (miter->__flags & SG_MITER_ATOMIC) {
784
			WARN_ON_ONCE(preemptible());
785
			kunmap_atomic(miter->addr);
T
Tejun Heo 已提交
786
		} else
787
			kunmap(miter->page);
T
Tejun Heo 已提交
788 789 790 791 792 793 794 795 796

		miter->page = NULL;
		miter->addr = NULL;
		miter->length = 0;
		miter->consumed = 0;
	}
}
EXPORT_SYMBOL(sg_miter_stop);

797 798 799 800 801 802
/**
 * sg_copy_buffer - Copy data between a linear buffer and an SG list
 * @sgl:		 The SG list
 * @nents:		 Number of SG entries
 * @buf:		 Where to copy from
 * @buflen:		 The number of bytes to copy
803 804 805
 * @skip:		 Number of bytes to skip before copying
 * @to_buffer:		 transfer direction (true == from an sg list to a
 *			 buffer, false == from a buffer to an sg list
806 807 808 809
 *
 * Returns the number of copied bytes.
 *
 **/
810 811
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
		      size_t buflen, off_t skip, bool to_buffer)
812
{
T
Tejun Heo 已提交
813 814
	unsigned int offset = 0;
	struct sg_mapping_iter miter;
815 816 817 818 819 820
	unsigned int sg_flags = SG_MITER_ATOMIC;

	if (to_buffer)
		sg_flags |= SG_MITER_FROM_SG;
	else
		sg_flags |= SG_MITER_TO_SG;
T
Tejun Heo 已提交
821

822
	sg_miter_start(&miter, sgl, nents, sg_flags);
T
Tejun Heo 已提交
823

824 825 826
	if (!sg_miter_skip(&miter, skip))
		return false;

827
	while ((offset < buflen) && sg_miter_next(&miter)) {
T
Tejun Heo 已提交
828 829 830 831 832 833
		unsigned int len;

		len = min(miter.length, buflen - offset);

		if (to_buffer)
			memcpy(buf + offset, miter.addr, len);
834
		else
T
Tejun Heo 已提交
835
			memcpy(miter.addr, buf + offset, len);
836

T
Tejun Heo 已提交
837
		offset += len;
838 839
	}

T
Tejun Heo 已提交
840 841 842
	sg_miter_stop(&miter);

	return offset;
843
}
844
EXPORT_SYMBOL(sg_copy_buffer);
845 846 847 848 849 850 851 852 853 854 855 856

/**
 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
 * @sgl:		 The SG list
 * @nents:		 Number of SG entries
 * @buf:		 Where to copy from
 * @buflen:		 The number of bytes to copy
 *
 * Returns the number of copied bytes.
 *
 **/
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
857
			   const void *buf, size_t buflen)
858
{
859
	return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875
}
EXPORT_SYMBOL(sg_copy_from_buffer);

/**
 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
 * @sgl:		 The SG list
 * @nents:		 Number of SG entries
 * @buf:		 Where to copy to
 * @buflen:		 The number of bytes to copy
 *
 * Returns the number of copied bytes.
 *
 **/
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
			 void *buf, size_t buflen)
{
876
	return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
877 878
}
EXPORT_SYMBOL(sg_copy_to_buffer);
879 880 881 882 883 884 885

/**
 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
 * @sgl:		 The SG list
 * @nents:		 Number of SG entries
 * @buf:		 Where to copy from
 * @buflen:		 The number of bytes to copy
886
 * @skip:		 Number of bytes to skip before copying
887 888 889 890 891
 *
 * Returns the number of copied bytes.
 *
 **/
size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
892
			    const void *buf, size_t buflen, off_t skip)
893
{
894
	return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
895 896 897 898 899 900 901 902 903
}
EXPORT_SYMBOL(sg_pcopy_from_buffer);

/**
 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
 * @sgl:		 The SG list
 * @nents:		 Number of SG entries
 * @buf:		 Where to copy to
 * @buflen:		 The number of bytes to copy
904
 * @skip:		 Number of bytes to skip before copying
905 906 907 908 909 910 911 912 913 914
 *
 * Returns the number of copied bytes.
 *
 **/
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
			  void *buf, size_t buflen, off_t skip)
{
	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
}
EXPORT_SYMBOL(sg_pcopy_to_buffer);
915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949

/**
 * sg_zero_buffer - Zero-out a part of a SG list
 * @sgl:		 The SG list
 * @nents:		 Number of SG entries
 * @buflen:		 The number of bytes to zero out
 * @skip:		 Number of bytes to skip before zeroing
 *
 * Returns the number of bytes zeroed.
 **/
size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
		       size_t buflen, off_t skip)
{
	unsigned int offset = 0;
	struct sg_mapping_iter miter;
	unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;

	sg_miter_start(&miter, sgl, nents, sg_flags);

	if (!sg_miter_skip(&miter, skip))
		return false;

	while (offset < buflen && sg_miter_next(&miter)) {
		unsigned int len;

		len = min(miter.length, buflen - offset);
		memset(miter.addr, 0, len);

		offset += len;
	}

	sg_miter_stop(&miter);
	return offset;
}
EXPORT_SYMBOL(sg_zero_buffer);