2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
4 * Scatterlist handling helpers.
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
9 #include <linux/export.h>
10 #include <linux/slab.h>
11 #include <linux/scatterlist.h>
12 #include <linux/highmem.h>
13 #include <linux/kmemleak.h>
16 * sg_next - return the next scatterlist entry in a list
17 * @sg: The current sg entry
20 * Usually the next entry will be @sg@ + 1, but if this sg element is part
21 * of a chained scatterlist, it could jump to the start of a new
25 struct scatterlist *sg_next(struct scatterlist *sg)
31 if (unlikely(sg_is_chain(sg)))
32 sg = sg_chain_ptr(sg);
36 EXPORT_SYMBOL(sg_next);
39 * sg_nents - return total count of entries in scatterlist
40 * @sg: The scatterlist
43 * Allows to know how many entries are in sg, taking into acount
47 int sg_nents(struct scatterlist *sg)
50 for (nents = 0; sg; sg = sg_next(sg))
54 EXPORT_SYMBOL(sg_nents);
57 * sg_nents_for_len - return total count of entries in scatterlist
58 * needed to satisfy the supplied length
59 * @sg: The scatterlist
60 * @len: The total required length
63 * Determines the number of entries in sg that are required to meet
64 * the supplied length, taking into acount chaining as well
67 * the number of sg entries needed, negative error on failure
70 int sg_nents_for_len(struct scatterlist *sg, u64 len)
78 for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
87 EXPORT_SYMBOL(sg_nents_for_len);
90 * sg_last - return the last scatterlist entry in a list
91 * @sgl: First entry in the scatterlist
92 * @nents: Number of entries in the scatterlist
95 * Should only be used casually, it (currently) scans the entire list
96 * to get the last entry.
98 * Note that the @sgl@ pointer passed in need not be the first one,
99 * the important bit is that @nents@ denotes the number of entries that
103 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
105 struct scatterlist *sg, *ret = NULL;
108 for_each_sg(sgl, sg, nents, i)
111 BUG_ON(!sg_is_last(ret));
114 EXPORT_SYMBOL(sg_last);
117 * sg_init_table - Initialize SG table
119 * @nents: Number of entries in table
122 * If this is part of a chained sg table, sg_mark_end() should be
123 * used only on the last table part.
126 void sg_init_table(struct scatterlist *sgl, unsigned int nents)
128 memset(sgl, 0, sizeof(*sgl) * nents);
129 sg_init_marker(sgl, nents);
131 EXPORT_SYMBOL(sg_init_table);
134 * sg_init_one - Initialize a single entry sg list
136 * @buf: Virtual address for IO
140 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
142 sg_init_table(sg, 1);
143 sg_set_buf(sg, buf, buflen);
145 EXPORT_SYMBOL(sg_init_one);
148 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
151 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
153 if (nents == SG_MAX_SINGLE_ALLOC) {
155 * Kmemleak doesn't track page allocations as they are not
156 * commonly used (in a raw form) for kernel data structures.
157 * As we chain together a list of pages and then a normal
158 * kmalloc (tracked by kmemleak), in order to for that last
159 * allocation not to become decoupled (and thus a
160 * false-positive) we need to inform kmemleak of all the
161 * intermediate allocations.
163 void *ptr = (void *) __get_free_page(gfp_mask);
164 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
167 return kmalloc_array(nents, sizeof(struct scatterlist),
171 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
173 if (nents == SG_MAX_SINGLE_ALLOC) {
175 free_page((unsigned long) sg);
181 * __sg_free_table - Free a previously mapped sg table
182 * @table: The sg table header to use
183 * @max_ents: The maximum number of entries per single scatterlist
184 * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
185 * @free_fn: Free function
188 * Free an sg table previously allocated and setup with
189 * __sg_alloc_table(). The @max_ents value must be identical to
190 * that previously used with __sg_alloc_table().
193 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
194 bool skip_first_chunk, sg_free_fn *free_fn)
196 struct scatterlist *sgl, *next;
198 if (unlikely(!table->sgl))
202 while (table->orig_nents) {
203 unsigned int alloc_size = table->orig_nents;
204 unsigned int sg_size;
207 * If we have more than max_ents segments left,
208 * then assign 'next' to the sg table after the current one.
209 * sg_size is then one less than alloc size, since the last
210 * element is the chain pointer.
212 if (alloc_size > max_ents) {
213 next = sg_chain_ptr(&sgl[max_ents - 1]);
214 alloc_size = max_ents;
215 sg_size = alloc_size - 1;
217 sg_size = alloc_size;
221 table->orig_nents -= sg_size;
222 if (skip_first_chunk)
223 skip_first_chunk = false;
225 free_fn(sgl, alloc_size);
231 EXPORT_SYMBOL(__sg_free_table);
234 * sg_free_table - Free a previously allocated sg table
235 * @table: The mapped sg table header
238 void sg_free_table(struct sg_table *table)
240 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
242 EXPORT_SYMBOL(sg_free_table);
245 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
246 * @table: The sg table header to use
247 * @nents: Number of entries in sg list
248 * @max_ents: The maximum number of entries the allocator returns per call
249 * @gfp_mask: GFP allocation mask
250 * @alloc_fn: Allocator to use
253 * This function returns a @table @nents long. The allocator is
254 * defined to return scatterlist chunks of maximum size @max_ents.
255 * Thus if @nents is bigger than @max_ents, the scatterlists will be
256 * chained in units of @max_ents.
259 * If this function returns non-0 (eg failure), the caller must call
260 * __sg_free_table() to cleanup any leftover allocations.
263 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
264 unsigned int max_ents, struct scatterlist *first_chunk,
265 gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
267 struct scatterlist *sg, *prv;
270 memset(table, 0, sizeof(*table));
274 #ifdef CONFIG_ARCH_NO_SG_CHAIN
275 if (WARN_ON_ONCE(nents > max_ents))
282 unsigned int sg_size, alloc_size = left;
284 if (alloc_size > max_ents) {
285 alloc_size = max_ents;
286 sg_size = alloc_size - 1;
288 sg_size = alloc_size;
296 sg = alloc_fn(alloc_size, gfp_mask);
300 * Adjust entry count to reflect that the last
301 * entry of the previous table won't be used for
302 * linkage. Without this, sg_kfree() may get
306 table->nents = ++table->orig_nents;
311 sg_init_table(sg, alloc_size);
312 table->nents = table->orig_nents += sg_size;
315 * If this is the first mapping, assign the sg table header.
316 * If this is not the first mapping, chain previous part.
319 sg_chain(prv, max_ents, sg);
324 * If no more entries after this one, mark the end
327 sg_mark_end(&sg[sg_size - 1]);
334 EXPORT_SYMBOL(__sg_alloc_table);
337 * sg_alloc_table - Allocate and initialize an sg table
338 * @table: The sg table header to use
339 * @nents: Number of entries in sg list
340 * @gfp_mask: GFP allocation mask
343 * Allocate and initialize an sg table. If @nents@ is larger than
344 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
347 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
351 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
352 NULL, gfp_mask, sg_kmalloc);
354 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
358 EXPORT_SYMBOL(sg_alloc_table);
361 * __sg_alloc_table_from_pages - Allocate and initialize an sg table from
363 * @sgt: The sg table header to use
364 * @pages: Pointer to an array of page pointers
365 * @n_pages: Number of pages in the pages array
366 * @offset: Offset from start of the first page to the start of a buffer
367 * @size: Number of valid bytes in the buffer (after offset)
368 * @max_segment: Maximum size of a scatterlist node in bytes (page aligned)
369 * @gfp_mask: GFP allocation mask
372 * Allocate and initialize an sg table from a list of pages. Contiguous
373 * ranges of the pages are squashed into a single scatterlist node up to the
374 * maximum size specified in @max_segment. An user may provide an offset at a
375 * start and a size of valid data in a buffer specified by the page array.
376 * The returned sg table is released by sg_free_table.
379 * 0 on success, negative error on failure
381 int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
382 unsigned int n_pages, unsigned int offset,
383 unsigned long size, unsigned int max_segment,
386 unsigned int chunks, cur_page, seg_len, i;
388 struct scatterlist *s;
390 if (WARN_ON(!max_segment || offset_in_page(max_segment)))
393 /* compute number of contiguous chunks */
396 for (i = 1; i < n_pages; i++) {
397 seg_len += PAGE_SIZE;
398 if (seg_len >= max_segment ||
399 page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
405 ret = sg_alloc_table(sgt, chunks, gfp_mask);
409 /* merging chunks and putting them into the scatterlist */
411 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
412 unsigned int j, chunk_size;
414 /* look for the end of the current chunk */
416 for (j = cur_page + 1; j < n_pages; j++) {
417 seg_len += PAGE_SIZE;
418 if (seg_len >= max_segment ||
419 page_to_pfn(pages[j]) !=
420 page_to_pfn(pages[j - 1]) + 1)
424 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
425 sg_set_page(s, pages[cur_page],
426 min_t(unsigned long, size, chunk_size), offset);
434 EXPORT_SYMBOL(__sg_alloc_table_from_pages);
437 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
439 * @sgt: The sg table header to use
440 * @pages: Pointer to an array of page pointers
441 * @n_pages: Number of pages in the pages array
442 * @offset: Offset from start of the first page to the start of a buffer
443 * @size: Number of valid bytes in the buffer (after offset)
444 * @gfp_mask: GFP allocation mask
447 * Allocate and initialize an sg table from a list of pages. Contiguous
448 * ranges of the pages are squashed into a single scatterlist node. A user
449 * may provide an offset at a start and a size of valid data in a buffer
450 * specified by the page array. The returned sg table is released by
454 * 0 on success, negative error on failure
456 int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
457 unsigned int n_pages, unsigned int offset,
458 unsigned long size, gfp_t gfp_mask)
460 return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
461 SCATTERLIST_MAX_SEGMENT, gfp_mask);
463 EXPORT_SYMBOL(sg_alloc_table_from_pages);
465 #ifdef CONFIG_SGL_ALLOC
468 * sgl_alloc_order - allocate a scatterlist and its pages
469 * @length: Length in bytes of the scatterlist. Must be at least one
470 * @order: Second argument for alloc_pages()
471 * @chainable: Whether or not to allocate an extra element in the scatterlist
472 * for scatterlist chaining purposes
473 * @gfp: Memory allocation flags
474 * @nent_p: [out] Number of entries in the scatterlist that have pages
476 * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
478 struct scatterlist *sgl_alloc_order(unsigned long long length,
479 unsigned int order, bool chainable,
480 gfp_t gfp, unsigned int *nent_p)
482 struct scatterlist *sgl, *sg;
484 unsigned int nent, nalloc;
487 nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
488 /* Check for integer overflow */
489 if (length > (nent << (PAGE_SHIFT + order)))
493 /* Check for integer overflow */
494 if (nalloc + 1 < nalloc)
498 sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
499 (gfp & ~GFP_DMA) | __GFP_ZERO);
503 sg_init_table(sgl, nalloc);
506 elem_len = min_t(u64, length, PAGE_SIZE << order);
507 page = alloc_pages(gfp, order);
513 sg_set_page(sg, page, elem_len, 0);
517 WARN_ONCE(length, "length = %lld\n", length);
522 EXPORT_SYMBOL(sgl_alloc_order);
525 * sgl_alloc - allocate a scatterlist and its pages
526 * @length: Length in bytes of the scatterlist
527 * @gfp: Memory allocation flags
528 * @nent_p: [out] Number of entries in the scatterlist
530 * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
532 struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
533 unsigned int *nent_p)
535 return sgl_alloc_order(length, 0, false, gfp, nent_p);
537 EXPORT_SYMBOL(sgl_alloc);
540 * sgl_free_n_order - free a scatterlist and its pages
541 * @sgl: Scatterlist with one or more elements
542 * @nents: Maximum number of elements to free
543 * @order: Second argument for __free_pages()
546 * - If several scatterlists have been chained and each chain element is
547 * freed separately then it's essential to set nents correctly to avoid that a
548 * page would get freed twice.
549 * - All pages in a chained scatterlist can be freed at once by setting @nents
552 void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
554 struct scatterlist *sg;
558 for_each_sg(sgl, sg, nents, i) {
563 __free_pages(page, order);
567 EXPORT_SYMBOL(sgl_free_n_order);
570 * sgl_free_order - free a scatterlist and its pages
571 * @sgl: Scatterlist with one or more elements
572 * @order: Second argument for __free_pages()
574 void sgl_free_order(struct scatterlist *sgl, int order)
576 sgl_free_n_order(sgl, INT_MAX, order);
578 EXPORT_SYMBOL(sgl_free_order);
581 * sgl_free - free a scatterlist and its pages
582 * @sgl: Scatterlist with one or more elements
584 void sgl_free(struct scatterlist *sgl)
586 sgl_free_order(sgl, 0);
588 EXPORT_SYMBOL(sgl_free);
590 #endif /* CONFIG_SGL_ALLOC */
592 void __sg_page_iter_start(struct sg_page_iter *piter,
593 struct scatterlist *sglist, unsigned int nents,
594 unsigned long pgoffset)
596 piter->__pg_advance = 0;
597 piter->__nents = nents;
600 piter->sg_pgoffset = pgoffset;
602 EXPORT_SYMBOL(__sg_page_iter_start);
604 static int sg_page_count(struct scatterlist *sg)
606 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
609 bool __sg_page_iter_next(struct sg_page_iter *piter)
611 if (!piter->__nents || !piter->sg)
614 piter->sg_pgoffset += piter->__pg_advance;
615 piter->__pg_advance = 1;
617 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
618 piter->sg_pgoffset -= sg_page_count(piter->sg);
619 piter->sg = sg_next(piter->sg);
620 if (!--piter->__nents || !piter->sg)
626 EXPORT_SYMBOL(__sg_page_iter_next);
628 static int sg_dma_page_count(struct scatterlist *sg)
630 return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT;
633 bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter)
635 struct sg_page_iter *piter = &dma_iter->base;
637 if (!piter->__nents || !piter->sg)
640 piter->sg_pgoffset += piter->__pg_advance;
641 piter->__pg_advance = 1;
643 while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) {
644 piter->sg_pgoffset -= sg_dma_page_count(piter->sg);
645 piter->sg = sg_next(piter->sg);
646 if (!--piter->__nents || !piter->sg)
652 EXPORT_SYMBOL(__sg_page_iter_dma_next);
655 * sg_miter_start - start mapping iteration over a sg list
656 * @miter: sg mapping iter to be started
657 * @sgl: sg list to iterate over
658 * @nents: number of sg entries
661 * Starts mapping iterator @miter.
666 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
667 unsigned int nents, unsigned int flags)
669 memset(miter, 0, sizeof(struct sg_mapping_iter));
671 __sg_page_iter_start(&miter->piter, sgl, nents, 0);
672 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
673 miter->__flags = flags;
675 EXPORT_SYMBOL(sg_miter_start);
677 static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
679 if (!miter->__remaining) {
680 struct scatterlist *sg;
681 unsigned long pgoffset;
683 if (!__sg_page_iter_next(&miter->piter))
686 sg = miter->piter.sg;
687 pgoffset = miter->piter.sg_pgoffset;
689 miter->__offset = pgoffset ? 0 : sg->offset;
690 miter->__remaining = sg->offset + sg->length -
691 (pgoffset << PAGE_SHIFT) - miter->__offset;
692 miter->__remaining = min_t(unsigned long, miter->__remaining,
693 PAGE_SIZE - miter->__offset);
700 * sg_miter_skip - reposition mapping iterator
701 * @miter: sg mapping iter to be skipped
702 * @offset: number of bytes to plus the current location
705 * Sets the offset of @miter to its current location plus @offset bytes.
706 * If mapping iterator @miter has been proceeded by sg_miter_next(), this
710 * Don't care if @miter is stopped, or not proceeded yet.
711 * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
714 * true if @miter contains the valid mapping. false if end of sg
717 bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
719 sg_miter_stop(miter);
724 if (!sg_miter_get_next_page(miter))
727 consumed = min_t(off_t, offset, miter->__remaining);
728 miter->__offset += consumed;
729 miter->__remaining -= consumed;
735 EXPORT_SYMBOL(sg_miter_skip);
738 * sg_miter_next - proceed mapping iterator to the next mapping
739 * @miter: sg mapping iter to proceed
742 * Proceeds @miter to the next mapping. @miter should have been started
743 * using sg_miter_start(). On successful return, @miter->page,
744 * @miter->addr and @miter->length point to the current mapping.
747 * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
748 * till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
751 * true if @miter contains the next mapping. false if end of sg
754 bool sg_miter_next(struct sg_mapping_iter *miter)
756 sg_miter_stop(miter);
759 * Get to the next page if necessary.
760 * __remaining, __offset is adjusted by sg_miter_stop
762 if (!sg_miter_get_next_page(miter))
765 miter->page = sg_page_iter_page(&miter->piter);
766 miter->consumed = miter->length = miter->__remaining;
768 if (miter->__flags & SG_MITER_ATOMIC)
769 miter->addr = kmap_atomic(miter->page) + miter->__offset;
771 miter->addr = kmap(miter->page) + miter->__offset;
775 EXPORT_SYMBOL(sg_miter_next);
778 * sg_miter_stop - stop mapping iteration
779 * @miter: sg mapping iter to be stopped
782 * Stops mapping iterator @miter. @miter should have been started
783 * using sg_miter_start(). A stopped iteration can be resumed by
784 * calling sg_miter_next() on it. This is useful when resources (kmap)
785 * need to be released during iteration.
788 * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
791 void sg_miter_stop(struct sg_mapping_iter *miter)
793 WARN_ON(miter->consumed > miter->length);
795 /* drop resources from the last iteration */
797 miter->__offset += miter->consumed;
798 miter->__remaining -= miter->consumed;
800 if ((miter->__flags & SG_MITER_TO_SG) &&
801 !PageSlab(miter->page))
802 flush_kernel_dcache_page(miter->page);
804 if (miter->__flags & SG_MITER_ATOMIC) {
805 WARN_ON_ONCE(preemptible());
806 kunmap_atomic(miter->addr);
816 EXPORT_SYMBOL(sg_miter_stop);
819 * sg_copy_buffer - Copy data between a linear buffer and an SG list
821 * @nents: Number of SG entries
822 * @buf: Where to copy from
823 * @buflen: The number of bytes to copy
824 * @skip: Number of bytes to skip before copying
825 * @to_buffer: transfer direction (true == from an sg list to a
826 * buffer, false == from a buffer to an sg list
828 * Returns the number of copied bytes.
831 size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
832 size_t buflen, off_t skip, bool to_buffer)
834 unsigned int offset = 0;
835 struct sg_mapping_iter miter;
836 unsigned int sg_flags = SG_MITER_ATOMIC;
839 sg_flags |= SG_MITER_FROM_SG;
841 sg_flags |= SG_MITER_TO_SG;
843 sg_miter_start(&miter, sgl, nents, sg_flags);
845 if (!sg_miter_skip(&miter, skip))
848 while ((offset < buflen) && sg_miter_next(&miter)) {
851 len = min(miter.length, buflen - offset);
854 memcpy(buf + offset, miter.addr, len);
856 memcpy(miter.addr, buf + offset, len);
861 sg_miter_stop(&miter);
865 EXPORT_SYMBOL(sg_copy_buffer);
868 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
870 * @nents: Number of SG entries
871 * @buf: Where to copy from
872 * @buflen: The number of bytes to copy
874 * Returns the number of copied bytes.
877 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
878 const void *buf, size_t buflen)
880 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
882 EXPORT_SYMBOL(sg_copy_from_buffer);
885 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
887 * @nents: Number of SG entries
888 * @buf: Where to copy to
889 * @buflen: The number of bytes to copy
891 * Returns the number of copied bytes.
894 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
895 void *buf, size_t buflen)
897 return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
899 EXPORT_SYMBOL(sg_copy_to_buffer);
902 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
904 * @nents: Number of SG entries
905 * @buf: Where to copy from
906 * @buflen: The number of bytes to copy
907 * @skip: Number of bytes to skip before copying
909 * Returns the number of copied bytes.
912 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
913 const void *buf, size_t buflen, off_t skip)
915 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
917 EXPORT_SYMBOL(sg_pcopy_from_buffer);
920 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
922 * @nents: Number of SG entries
923 * @buf: Where to copy to
924 * @buflen: The number of bytes to copy
925 * @skip: Number of bytes to skip before copying
927 * Returns the number of copied bytes.
930 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
931 void *buf, size_t buflen, off_t skip)
933 return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
935 EXPORT_SYMBOL(sg_pcopy_to_buffer);
938 * sg_zero_buffer - Zero-out a part of a SG list
940 * @nents: Number of SG entries
941 * @buflen: The number of bytes to zero out
942 * @skip: Number of bytes to skip before zeroing
944 * Returns the number of bytes zeroed.
946 size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
947 size_t buflen, off_t skip)
949 unsigned int offset = 0;
950 struct sg_mapping_iter miter;
951 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
953 sg_miter_start(&miter, sgl, nents, sg_flags);
955 if (!sg_miter_skip(&miter, skip))
958 while (offset < buflen && sg_miter_next(&miter)) {
961 len = min(miter.length, buflen - offset);
962 memset(miter.addr, 0, len);
967 sg_miter_stop(&miter);
970 EXPORT_SYMBOL(sg_zero_buffer);