scatterlist.c 24.7 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
 *
 * Scatterlist handling helpers.
 *
 * This source code is licensed under the GNU General Public License,
 * Version 2. See the file COPYING for more details.
 */
9
#include <linux/export.h>
10
#include <linux/slab.h>
11
#include <linux/scatterlist.h>
12
#include <linux/highmem.h>
13
#include <linux/kmemleak.h>
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37

/**
 * sg_next - return the next scatterlist entry in a list
 * @sg:		The current sg entry
 *
 * Description:
 *   Usually the next entry will be @sg@ + 1, but if this sg element is part
 *   of a chained scatterlist, it could jump to the start of a new
 *   scatterlist array.
 *
 **/
struct scatterlist *sg_next(struct scatterlist *sg)
{
	if (sg_is_last(sg))
		return NULL;

	sg++;
	if (unlikely(sg_is_chain(sg)))
		sg = sg_chain_ptr(sg);

	return sg;
}
EXPORT_SYMBOL(sg_next);

M
Maxim Levitsky 已提交
38 39 40 41 42 43 44 45 46 47 48
/**
 * sg_nents - return total count of entries in scatterlist
 * @sg:		The scatterlist
 *
 * Description:
 * Allows to know how many entries are in sg, taking into acount
 * chaining as well
 *
 **/
int sg_nents(struct scatterlist *sg)
{
49 50
	int nents;
	for (nents = 0; sg; sg = sg_next(sg))
M
Maxim Levitsky 已提交
51 52 53 54 55
		nents++;
	return nents;
}
EXPORT_SYMBOL(sg_nents);

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
/**
 * sg_nents_for_len - return total count of entries in scatterlist
 *                    needed to satisfy the supplied length
 * @sg:		The scatterlist
 * @len:	The total required length
 *
 * Description:
 * Determines the number of entries in sg that are required to meet
 * the supplied length, taking into acount chaining as well
 *
 * Returns:
 *   the number of sg entries needed, negative error on failure
 *
 **/
int sg_nents_for_len(struct scatterlist *sg, u64 len)
{
	int nents;
	u64 total;

	if (!len)
		return 0;

	for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
		nents++;
		total += sg->length;
		if (total >= len)
			return nents;
	}

	return -EINVAL;
}
EXPORT_SYMBOL(sg_nents_for_len);
M
Maxim Levitsky 已提交
88

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
/**
 * sg_last - return the last scatterlist entry in a list
 * @sgl:	First entry in the scatterlist
 * @nents:	Number of entries in the scatterlist
 *
 * Description:
 *   Should only be used casually, it (currently) scans the entire list
 *   to get the last entry.
 *
 *   Note that the @sgl@ pointer passed in need not be the first one,
 *   the important bit is that @nents@ denotes the number of entries that
 *   exist from @sgl@.
 *
 **/
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
{
	struct scatterlist *sg, *ret = NULL;
	unsigned int i;

	for_each_sg(sgl, sg, nents, i)
		ret = sg;

	BUG_ON(!sg_is_last(ret));
	return ret;
}
EXPORT_SYMBOL(sg_last);

/**
 * sg_init_table - Initialize SG table
 * @sgl:	   The SG table
 * @nents:	   Number of entries in table
 *
 * Notes:
 *   If this is part of a chained sg table, sg_mark_end() should be
 *   used only on the last table part.
 *
 **/
void sg_init_table(struct scatterlist *sgl, unsigned int nents)
{
	memset(sgl, 0, sizeof(*sgl) * nents);
129
	sg_init_marker(sgl, nents);
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
}
EXPORT_SYMBOL(sg_init_table);

/**
 * sg_init_one - Initialize a single entry sg list
 * @sg:		 SG entry
 * @buf:	 Virtual address for IO
 * @buflen:	 IO length
 *
 **/
void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
{
	sg_init_table(sg, 1);
	sg_set_buf(sg, buf, buflen);
}
EXPORT_SYMBOL(sg_init_one);

/*
 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
 * helpers.
 */
static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
{
153 154 155 156 157 158 159 160 161 162 163 164 165 166
	if (nents == SG_MAX_SINGLE_ALLOC) {
		/*
		 * Kmemleak doesn't track page allocations as they are not
		 * commonly used (in a raw form) for kernel data structures.
		 * As we chain together a list of pages and then a normal
		 * kmalloc (tracked by kmemleak), in order to for that last
		 * allocation not to become decoupled (and thus a
		 * false-positive) we need to inform kmemleak of all the
		 * intermediate allocations.
		 */
		void *ptr = (void *) __get_free_page(gfp_mask);
		kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
		return ptr;
	} else
167 168
		return kmalloc_array(nents, sizeof(struct scatterlist),
				     gfp_mask);
169 170 171 172
}

static void sg_kfree(struct scatterlist *sg, unsigned int nents)
{
173 174
	if (nents == SG_MAX_SINGLE_ALLOC) {
		kmemleak_free(sg);
175
		free_page((unsigned long) sg);
176
	} else
177 178 179 180 181 182
		kfree(sg);
}

/**
 * __sg_free_table - Free a previously mapped sg table
 * @table:	The sg table header to use
183
 * @max_ents:	The maximum number of entries per single scatterlist
184
 * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
185 186 187
 * @free_fn:	Free function
 *
 *  Description:
188 189 190
 *    Free an sg table previously allocated and setup with
 *    __sg_alloc_table().  The @max_ents value must be identical to
 *    that previously used with __sg_alloc_table().
191 192
 *
 **/
193
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
194
		     bool skip_first_chunk, sg_free_fn *free_fn)
195 196 197 198 199 200 201 202 203 204 205 206
{
	struct scatterlist *sgl, *next;

	if (unlikely(!table->sgl))
		return;

	sgl = table->sgl;
	while (table->orig_nents) {
		unsigned int alloc_size = table->orig_nents;
		unsigned int sg_size;

		/*
207
		 * If we have more than max_ents segments left,
208 209 210 211
		 * then assign 'next' to the sg table after the current one.
		 * sg_size is then one less than alloc size, since the last
		 * element is the chain pointer.
		 */
212 213 214
		if (alloc_size > max_ents) {
			next = sg_chain_ptr(&sgl[max_ents - 1]);
			alloc_size = max_ents;
215 216 217 218 219 220 221
			sg_size = alloc_size - 1;
		} else {
			sg_size = alloc_size;
			next = NULL;
		}

		table->orig_nents -= sg_size;
222
		if (skip_first_chunk)
223
			skip_first_chunk = false;
224 225
		else
			free_fn(sgl, alloc_size);
226 227 228 229 230 231 232 233 234 235 236 237 238 239
		sgl = next;
	}

	table->sgl = NULL;
}
EXPORT_SYMBOL(__sg_free_table);

/**
 * sg_free_table - Free a previously allocated sg table
 * @table:	The mapped sg table header
 *
 **/
void sg_free_table(struct sg_table *table)
{
240
	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
241 242 243 244 245 246 247
}
EXPORT_SYMBOL(sg_free_table);

/**
 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
 * @table:	The sg table header to use
 * @nents:	Number of entries in sg list
248
 * @max_ents:	The maximum number of entries the allocator returns per call
249 250 251
 * @gfp_mask:	GFP allocation mask
 * @alloc_fn:	Allocator to use
 *
252 253 254 255 256 257
 * Description:
 *   This function returns a @table @nents long. The allocator is
 *   defined to return scatterlist chunks of maximum size @max_ents.
 *   Thus if @nents is bigger than @max_ents, the scatterlists will be
 *   chained in units of @max_ents.
 *
258 259 260 261 262
 * Notes:
 *   If this function returns non-0 (eg failure), the caller must call
 *   __sg_free_table() to cleanup any leftover allocations.
 *
 **/
263
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
264 265
		     unsigned int max_ents, struct scatterlist *first_chunk,
		     gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
266 267 268 269
{
	struct scatterlist *sg, *prv;
	unsigned int left;

270 271 272 273
	memset(table, 0, sizeof(*table));

	if (nents == 0)
		return -EINVAL;
274
#ifdef CONFIG_ARCH_NO_SG_CHAIN
275 276
	if (WARN_ON_ONCE(nents > max_ents))
		return -EINVAL;
277 278 279 280 281 282 283
#endif

	left = nents;
	prv = NULL;
	do {
		unsigned int sg_size, alloc_size = left;

284 285
		if (alloc_size > max_ents) {
			alloc_size = max_ents;
286 287 288 289 290 291
			sg_size = alloc_size - 1;
		} else
			sg_size = alloc_size;

		left -= sg_size;

292 293 294 295 296 297
		if (first_chunk) {
			sg = first_chunk;
			first_chunk = NULL;
		} else {
			sg = alloc_fn(alloc_size, gfp_mask);
		}
298 299 300 301 302 303 304 305 306 307 308 309
		if (unlikely(!sg)) {
			/*
			 * Adjust entry count to reflect that the last
			 * entry of the previous table won't be used for
			 * linkage.  Without this, sg_kfree() may get
			 * confused.
			 */
			if (prv)
				table->nents = ++table->orig_nents;

 			return -ENOMEM;
		}
310 311 312 313 314 315 316 317 318

		sg_init_table(sg, alloc_size);
		table->nents = table->orig_nents += sg_size;

		/*
		 * If this is the first mapping, assign the sg table header.
		 * If this is not the first mapping, chain previous part.
		 */
		if (prv)
319
			sg_chain(prv, max_ents, sg);
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
		else
			table->sgl = sg;

		/*
		 * If no more entries after this one, mark the end
		 */
		if (!left)
			sg_mark_end(&sg[sg_size - 1]);

		prv = sg;
	} while (left);

	return 0;
}
EXPORT_SYMBOL(__sg_alloc_table);

/**
 * sg_alloc_table - Allocate and initialize an sg table
 * @table:	The sg table header to use
 * @nents:	Number of entries in sg list
 * @gfp_mask:	GFP allocation mask
 *
 *  Description:
 *    Allocate and initialize an sg table. If @nents@ is larger than
 *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
 *
 **/
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
{
	int ret;

351
	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
352
			       NULL, gfp_mask, sg_kmalloc);
353
	if (unlikely(ret))
354
		__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
355 356 357 358

	return ret;
}
EXPORT_SYMBOL(sg_alloc_table);
359

360
/**
361 362 363 364 365 366 367 368 369
 * __sg_alloc_table_from_pages - Allocate and initialize an sg table from
 *			         an array of pages
 * @sgt:	 The sg table header to use
 * @pages:	 Pointer to an array of page pointers
 * @n_pages:	 Number of pages in the pages array
 * @offset:      Offset from start of the first page to the start of a buffer
 * @size:        Number of valid bytes in the buffer (after offset)
 * @max_segment: Maximum size of a scatterlist node in bytes (page aligned)
 * @gfp_mask:	 GFP allocation mask
370 371 372
 *
 *  Description:
 *    Allocate and initialize an sg table from a list of pages. Contiguous
373 374 375 376
 *    ranges of the pages are squashed into a single scatterlist node up to the
 *    maximum size specified in @max_segment. An user may provide an offset at a
 *    start and a size of valid data in a buffer specified by the page array.
 *    The returned sg table is released by sg_free_table.
377 378 379 380
 *
 * Returns:
 *   0 on success, negative error on failure
 */
381 382 383 384
int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
				unsigned int n_pages, unsigned int offset,
				unsigned long size, unsigned int max_segment,
				gfp_t gfp_mask)
385
{
386
	unsigned int chunks, cur_page, seg_len, i;
387 388 389
	int ret;
	struct scatterlist *s;

390 391 392
	if (WARN_ON(!max_segment || offset_in_page(max_segment)))
		return -EINVAL;

393 394
	/* compute number of contiguous chunks */
	chunks = 1;
395 396 397 398 399 400 401 402 403
	seg_len = 0;
	for (i = 1; i < n_pages; i++) {
		seg_len += PAGE_SIZE;
		if (seg_len >= max_segment ||
		    page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
			chunks++;
			seg_len = 0;
		}
	}
404 405 406 407 408 409 410 411

	ret = sg_alloc_table(sgt, chunks, gfp_mask);
	if (unlikely(ret))
		return ret;

	/* merging chunks and putting them into the scatterlist */
	cur_page = 0;
	for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
412
		unsigned int j, chunk_size;
413 414

		/* look for the end of the current chunk */
415 416 417 418 419
		seg_len = 0;
		for (j = cur_page + 1; j < n_pages; j++) {
			seg_len += PAGE_SIZE;
			if (seg_len >= max_segment ||
			    page_to_pfn(pages[j]) !=
420 421
			    page_to_pfn(pages[j - 1]) + 1)
				break;
422
		}
423 424

		chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
425 426
		sg_set_page(s, pages[cur_page],
			    min_t(unsigned long, size, chunk_size), offset);
427 428 429 430 431 432 433
		size -= chunk_size;
		offset = 0;
		cur_page = j;
	}

	return 0;
}
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
EXPORT_SYMBOL(__sg_alloc_table_from_pages);

/**
 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
 *			       an array of pages
 * @sgt:	 The sg table header to use
 * @pages:	 Pointer to an array of page pointers
 * @n_pages:	 Number of pages in the pages array
 * @offset:      Offset from start of the first page to the start of a buffer
 * @size:        Number of valid bytes in the buffer (after offset)
 * @gfp_mask:	 GFP allocation mask
 *
 *  Description:
 *    Allocate and initialize an sg table from a list of pages. Contiguous
 *    ranges of the pages are squashed into a single scatterlist node. A user
 *    may provide an offset at a start and a size of valid data in a buffer
 *    specified by the page array. The returned sg table is released by
 *    sg_free_table.
 *
 * Returns:
 *   0 on success, negative error on failure
 */
int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
			      unsigned int n_pages, unsigned int offset,
			      unsigned long size, gfp_t gfp_mask)
{
	return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
					   SCATTERLIST_MAX_SEGMENT, gfp_mask);
}
463 464
EXPORT_SYMBOL(sg_alloc_table_from_pages);

465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
#ifdef CONFIG_SGL_ALLOC

/**
 * sgl_alloc_order - allocate a scatterlist and its pages
 * @length: Length in bytes of the scatterlist. Must be at least one
 * @order: Second argument for alloc_pages()
 * @chainable: Whether or not to allocate an extra element in the scatterlist
 *	for scatterlist chaining purposes
 * @gfp: Memory allocation flags
 * @nent_p: [out] Number of entries in the scatterlist that have pages
 *
 * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
 */
struct scatterlist *sgl_alloc_order(unsigned long long length,
				    unsigned int order, bool chainable,
				    gfp_t gfp, unsigned int *nent_p)
{
	struct scatterlist *sgl, *sg;
	struct page *page;
	unsigned int nent, nalloc;
	u32 elem_len;

	nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
	/* Check for integer overflow */
	if (length > (nent << (PAGE_SHIFT + order)))
		return NULL;
	nalloc = nent;
	if (chainable) {
		/* Check for integer overflow */
		if (nalloc + 1 < nalloc)
			return NULL;
		nalloc++;
	}
	sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
			    (gfp & ~GFP_DMA) | __GFP_ZERO);
	if (!sgl)
		return NULL;

503
	sg_init_table(sgl, nalloc);
504 505 506 507 508 509 510 511 512 513 514 515 516
	sg = sgl;
	while (length) {
		elem_len = min_t(u64, length, PAGE_SIZE << order);
		page = alloc_pages(gfp, order);
		if (!page) {
			sgl_free(sgl);
			return NULL;
		}

		sg_set_page(sg, page, elem_len, 0);
		length -= elem_len;
		sg = sg_next(sg);
	}
517
	WARN_ONCE(length, "length = %lld\n", length);
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
	if (nent_p)
		*nent_p = nent;
	return sgl;
}
EXPORT_SYMBOL(sgl_alloc_order);

/**
 * sgl_alloc - allocate a scatterlist and its pages
 * @length: Length in bytes of the scatterlist
 * @gfp: Memory allocation flags
 * @nent_p: [out] Number of entries in the scatterlist
 *
 * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
 */
struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
			      unsigned int *nent_p)
{
	return sgl_alloc_order(length, 0, false, gfp, nent_p);
}
EXPORT_SYMBOL(sgl_alloc);

/**
540
 * sgl_free_n_order - free a scatterlist and its pages
541
 * @sgl: Scatterlist with one or more elements
542
 * @nents: Maximum number of elements to free
543
 * @order: Second argument for __free_pages()
544 545 546 547 548 549 550
 *
 * Notes:
 * - If several scatterlists have been chained and each chain element is
 *   freed separately then it's essential to set nents correctly to avoid that a
 *   page would get freed twice.
 * - All pages in a chained scatterlist can be freed at once by setting @nents
 *   to a high number.
551
 */
552
void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
553 554 555
{
	struct scatterlist *sg;
	struct page *page;
556
	int i;
557

558 559 560
	for_each_sg(sgl, sg, nents, i) {
		if (!sg)
			break;
561 562 563 564 565 566
		page = sg_page(sg);
		if (page)
			__free_pages(page, order);
	}
	kfree(sgl);
}
567 568 569 570 571 572 573 574 575 576 577
EXPORT_SYMBOL(sgl_free_n_order);

/**
 * sgl_free_order - free a scatterlist and its pages
 * @sgl: Scatterlist with one or more elements
 * @order: Second argument for __free_pages()
 */
void sgl_free_order(struct scatterlist *sgl, int order)
{
	sgl_free_n_order(sgl, INT_MAX, order);
}
578 579 580 581 582 583 584 585 586 587 588 589 590 591
EXPORT_SYMBOL(sgl_free_order);

/**
 * sgl_free - free a scatterlist and its pages
 * @sgl: Scatterlist with one or more elements
 */
void sgl_free(struct scatterlist *sgl)
{
	sgl_free_order(sgl, 0);
}
EXPORT_SYMBOL(sgl_free);

#endif /* CONFIG_SGL_ALLOC */

592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
void __sg_page_iter_start(struct sg_page_iter *piter,
			  struct scatterlist *sglist, unsigned int nents,
			  unsigned long pgoffset)
{
	piter->__pg_advance = 0;
	piter->__nents = nents;

	piter->sg = sglist;
	piter->sg_pgoffset = pgoffset;
}
EXPORT_SYMBOL(__sg_page_iter_start);

static int sg_page_count(struct scatterlist *sg)
{
	return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
}

bool __sg_page_iter_next(struct sg_page_iter *piter)
{
	if (!piter->__nents || !piter->sg)
		return false;

	piter->sg_pgoffset += piter->__pg_advance;
	piter->__pg_advance = 1;

	while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
		piter->sg_pgoffset -= sg_page_count(piter->sg);
		piter->sg = sg_next(piter->sg);
		if (!--piter->__nents || !piter->sg)
			return false;
	}

	return true;
}
EXPORT_SYMBOL(__sg_page_iter_next);

628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
static int sg_dma_page_count(struct scatterlist *sg)
{
	return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT;
}

bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter)
{
	struct sg_page_iter *piter = &dma_iter->base;

	if (!piter->__nents || !piter->sg)
		return false;

	piter->sg_pgoffset += piter->__pg_advance;
	piter->__pg_advance = 1;

	while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) {
		piter->sg_pgoffset -= sg_dma_page_count(piter->sg);
		piter->sg = sg_next(piter->sg);
		if (!--piter->__nents || !piter->sg)
			return false;
	}

	return true;
}
EXPORT_SYMBOL(__sg_page_iter_dma_next);

T
Tejun Heo 已提交
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
/**
 * sg_miter_start - start mapping iteration over a sg list
 * @miter: sg mapping iter to be started
 * @sgl: sg list to iterate over
 * @nents: number of sg entries
 *
 * Description:
 *   Starts mapping iterator @miter.
 *
 * Context:
 *   Don't care.
 */
void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
		    unsigned int nents, unsigned int flags)
{
	memset(miter, 0, sizeof(struct sg_mapping_iter));

671
	__sg_page_iter_start(&miter->piter, sgl, nents, 0);
672
	WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
T
Tejun Heo 已提交
673 674 675 676
	miter->__flags = flags;
}
EXPORT_SYMBOL(sg_miter_start);

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
{
	if (!miter->__remaining) {
		struct scatterlist *sg;
		unsigned long pgoffset;

		if (!__sg_page_iter_next(&miter->piter))
			return false;

		sg = miter->piter.sg;
		pgoffset = miter->piter.sg_pgoffset;

		miter->__offset = pgoffset ? 0 : sg->offset;
		miter->__remaining = sg->offset + sg->length -
				(pgoffset << PAGE_SHIFT) - miter->__offset;
		miter->__remaining = min_t(unsigned long, miter->__remaining,
					   PAGE_SIZE - miter->__offset);
	}

	return true;
}

699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
/**
 * sg_miter_skip - reposition mapping iterator
 * @miter: sg mapping iter to be skipped
 * @offset: number of bytes to plus the current location
 *
 * Description:
 *   Sets the offset of @miter to its current location plus @offset bytes.
 *   If mapping iterator @miter has been proceeded by sg_miter_next(), this
 *   stops @miter.
 *
 * Context:
 *   Don't care if @miter is stopped, or not proceeded yet.
 *   Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
 *
 * Returns:
 *   true if @miter contains the valid mapping.  false if end of sg
 *   list is reached.
 */
M
Ming Lei 已提交
717
bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
{
	sg_miter_stop(miter);

	while (offset) {
		off_t consumed;

		if (!sg_miter_get_next_page(miter))
			return false;

		consumed = min_t(off_t, offset, miter->__remaining);
		miter->__offset += consumed;
		miter->__remaining -= consumed;
		offset -= consumed;
	}

	return true;
}
M
Ming Lei 已提交
735
EXPORT_SYMBOL(sg_miter_skip);
736

T
Tejun Heo 已提交
737 738 739 740 741
/**
 * sg_miter_next - proceed mapping iterator to the next mapping
 * @miter: sg mapping iter to proceed
 *
 * Description:
742 743 744
 *   Proceeds @miter to the next mapping.  @miter should have been started
 *   using sg_miter_start().  On successful return, @miter->page,
 *   @miter->addr and @miter->length point to the current mapping.
T
Tejun Heo 已提交
745 746
 *
 * Context:
747 748
 *   Preemption disabled if SG_MITER_ATOMIC.  Preemption must stay disabled
 *   till @miter is stopped.  May sleep if !SG_MITER_ATOMIC.
T
Tejun Heo 已提交
749 750 751 752 753 754 755 756 757
 *
 * Returns:
 *   true if @miter contains the next mapping.  false if end of sg
 *   list is reached.
 */
bool sg_miter_next(struct sg_mapping_iter *miter)
{
	sg_miter_stop(miter);

758 759 760 761
	/*
	 * Get to the next page if necessary.
	 * __remaining, __offset is adjusted by sg_miter_stop
	 */
762 763
	if (!sg_miter_get_next_page(miter))
		return false;
T
Tejun Heo 已提交
764

765
	miter->page = sg_page_iter_page(&miter->piter);
766
	miter->consumed = miter->length = miter->__remaining;
T
Tejun Heo 已提交
767 768

	if (miter->__flags & SG_MITER_ATOMIC)
769
		miter->addr = kmap_atomic(miter->page) + miter->__offset;
T
Tejun Heo 已提交
770
	else
771
		miter->addr = kmap(miter->page) + miter->__offset;
T
Tejun Heo 已提交
772 773 774 775 776 777 778 779 780 781 782

	return true;
}
EXPORT_SYMBOL(sg_miter_next);

/**
 * sg_miter_stop - stop mapping iteration
 * @miter: sg mapping iter to be stopped
 *
 * Description:
 *   Stops mapping iterator @miter.  @miter should have been started
783 784 785
 *   using sg_miter_start().  A stopped iteration can be resumed by
 *   calling sg_miter_next() on it.  This is useful when resources (kmap)
 *   need to be released during iteration.
T
Tejun Heo 已提交
786 787
 *
 * Context:
788 789
 *   Preemption disabled if the SG_MITER_ATOMIC is set.  Don't care
 *   otherwise.
T
Tejun Heo 已提交
790 791 792 793 794 795 796 797
 */
void sg_miter_stop(struct sg_mapping_iter *miter)
{
	WARN_ON(miter->consumed > miter->length);

	/* drop resources from the last iteration */
	if (miter->addr) {
		miter->__offset += miter->consumed;
798
		miter->__remaining -= miter->consumed;
T
Tejun Heo 已提交
799

800 801
		if ((miter->__flags & SG_MITER_TO_SG) &&
		    !PageSlab(miter->page))
802 803
			flush_kernel_dcache_page(miter->page);

T
Tejun Heo 已提交
804
		if (miter->__flags & SG_MITER_ATOMIC) {
805
			WARN_ON_ONCE(preemptible());
806
			kunmap_atomic(miter->addr);
T
Tejun Heo 已提交
807
		} else
808
			kunmap(miter->page);
T
Tejun Heo 已提交
809 810 811 812 813 814 815 816 817

		miter->page = NULL;
		miter->addr = NULL;
		miter->length = 0;
		miter->consumed = 0;
	}
}
EXPORT_SYMBOL(sg_miter_stop);

818 819 820 821 822 823
/**
 * sg_copy_buffer - Copy data between a linear buffer and an SG list
 * @sgl:		 The SG list
 * @nents:		 Number of SG entries
 * @buf:		 Where to copy from
 * @buflen:		 The number of bytes to copy
824 825 826
 * @skip:		 Number of bytes to skip before copying
 * @to_buffer:		 transfer direction (true == from an sg list to a
 *			 buffer, false == from a buffer to an sg list
827 828 829 830
 *
 * Returns the number of copied bytes.
 *
 **/
831 832
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
		      size_t buflen, off_t skip, bool to_buffer)
833
{
T
Tejun Heo 已提交
834 835
	unsigned int offset = 0;
	struct sg_mapping_iter miter;
836 837 838 839 840 841
	unsigned int sg_flags = SG_MITER_ATOMIC;

	if (to_buffer)
		sg_flags |= SG_MITER_FROM_SG;
	else
		sg_flags |= SG_MITER_TO_SG;
T
Tejun Heo 已提交
842

843
	sg_miter_start(&miter, sgl, nents, sg_flags);
T
Tejun Heo 已提交
844

845 846 847
	if (!sg_miter_skip(&miter, skip))
		return false;

848
	while ((offset < buflen) && sg_miter_next(&miter)) {
T
Tejun Heo 已提交
849 850 851 852 853 854
		unsigned int len;

		len = min(miter.length, buflen - offset);

		if (to_buffer)
			memcpy(buf + offset, miter.addr, len);
855
		else
T
Tejun Heo 已提交
856
			memcpy(miter.addr, buf + offset, len);
857

T
Tejun Heo 已提交
858
		offset += len;
859 860
	}

T
Tejun Heo 已提交
861 862 863
	sg_miter_stop(&miter);

	return offset;
864
}
865
EXPORT_SYMBOL(sg_copy_buffer);
866 867 868 869 870 871 872 873 874 875 876 877

/**
 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
 * @sgl:		 The SG list
 * @nents:		 Number of SG entries
 * @buf:		 Where to copy from
 * @buflen:		 The number of bytes to copy
 *
 * Returns the number of copied bytes.
 *
 **/
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
878
			   const void *buf, size_t buflen)
879
{
880
	return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
}
EXPORT_SYMBOL(sg_copy_from_buffer);

/**
 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
 * @sgl:		 The SG list
 * @nents:		 Number of SG entries
 * @buf:		 Where to copy to
 * @buflen:		 The number of bytes to copy
 *
 * Returns the number of copied bytes.
 *
 **/
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
			 void *buf, size_t buflen)
{
897
	return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
898 899
}
EXPORT_SYMBOL(sg_copy_to_buffer);
900 901 902 903 904 905 906

/**
 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
 * @sgl:		 The SG list
 * @nents:		 Number of SG entries
 * @buf:		 Where to copy from
 * @buflen:		 The number of bytes to copy
907
 * @skip:		 Number of bytes to skip before copying
908 909 910 911 912
 *
 * Returns the number of copied bytes.
 *
 **/
size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
913
			    const void *buf, size_t buflen, off_t skip)
914
{
915
	return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
916 917 918 919 920 921 922 923 924
}
EXPORT_SYMBOL(sg_pcopy_from_buffer);

/**
 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
 * @sgl:		 The SG list
 * @nents:		 Number of SG entries
 * @buf:		 Where to copy to
 * @buflen:		 The number of bytes to copy
925
 * @skip:		 Number of bytes to skip before copying
926 927 928 929 930 931 932 933 934 935
 *
 * Returns the number of copied bytes.
 *
 **/
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
			  void *buf, size_t buflen, off_t skip)
{
	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
}
EXPORT_SYMBOL(sg_pcopy_to_buffer);
936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970

/**
 * sg_zero_buffer - Zero-out a part of a SG list
 * @sgl:		 The SG list
 * @nents:		 Number of SG entries
 * @buflen:		 The number of bytes to zero out
 * @skip:		 Number of bytes to skip before zeroing
 *
 * Returns the number of bytes zeroed.
 **/
size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
		       size_t buflen, off_t skip)
{
	unsigned int offset = 0;
	struct sg_mapping_iter miter;
	unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;

	sg_miter_start(&miter, sgl, nents, sg_flags);

	if (!sg_miter_skip(&miter, skip))
		return false;

	while (offset < buflen && sg_miter_next(&miter)) {
		unsigned int len;

		len = min(miter.length, buflen - offset);
		memset(miter.addr, 0, len);

		offset += len;
	}

	sg_miter_stop(&miter);
	return offset;
}
EXPORT_SYMBOL(sg_zero_buffer);