/* SPDX-License-Identifier: GPL-2.0-or-later */1/*2* Cryptographic scatter and gather helpers.3*4* Copyright (c) 2002 James Morris <[email protected]>5* Copyright (c) 2002 Adam J. Richter <[email protected]>6* Copyright (c) 2004 Jean-Luc Cooke <[email protected]>7* Copyright (c) 2007 Herbert Xu <[email protected]>8*/910#ifndef _CRYPTO_SCATTERWALK_H11#define _CRYPTO_SCATTERWALK_H1213#include <crypto/algapi.h>1415#include <linux/highmem.h>16#include <linux/mm.h>17#include <linux/scatterlist.h>1819static inline void scatterwalk_crypto_chain(struct scatterlist *head,20struct scatterlist *sg, int num)21{22if (sg)23sg_chain(head, num, sg);24else25sg_mark_end(head);26}2728static inline void scatterwalk_start(struct scatter_walk *walk,29struct scatterlist *sg)30{31walk->sg = sg;32walk->offset = sg->offset;33}3435/*36* This is equivalent to scatterwalk_start(walk, sg) followed by37* scatterwalk_skip(walk, pos).38*/39static inline void scatterwalk_start_at_pos(struct scatter_walk *walk,40struct scatterlist *sg,41unsigned int pos)42{43while (pos > sg->length) {44pos -= sg->length;45sg = sg_next(sg);46}47walk->sg = sg;48walk->offset = sg->offset + pos;49}5051static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk,52unsigned int nbytes)53{54unsigned int len_this_sg;55unsigned int limit;5657if (walk->offset >= walk->sg->offset + walk->sg->length)58scatterwalk_start(walk, sg_next(walk->sg));59len_this_sg = walk->sg->offset + walk->sg->length - walk->offset;6061/*62* HIGHMEM case: the page may have to be mapped into memory. To avoid63* the complexity of having to map multiple pages at once per sg entry,64* clamp the returned length to not cross a page boundary.65*66* !HIGHMEM case: no mapping is needed; all pages of the sg entry are67* already mapped contiguously in the kernel's direct map. For improved68* performance, allow the walker to return data segments that cross a69* page boundary. Do still cap the length to PAGE_SIZE, since some70* users rely on that to avoid disabling preemption for too long when71* using SIMD. It's also needed for when skcipher_walk uses a bounce72* page due to the data not being aligned to the algorithm's alignmask.73*/74if (IS_ENABLED(CONFIG_HIGHMEM))75limit = PAGE_SIZE - offset_in_page(walk->offset);76else77limit = PAGE_SIZE;7879return min3(nbytes, len_this_sg, limit);80}8182/*83* Create a scatterlist that represents the remaining data in a walk. Uses84* chaining to reference the original scatterlist, so this uses at most two85* entries in @sg_out regardless of the number of entries in the original list.86* Assumes that sg_init_table() was already done.87*/88static inline void scatterwalk_get_sglist(struct scatter_walk *walk,89struct scatterlist sg_out[2])90{91if (walk->offset >= walk->sg->offset + walk->sg->length)92scatterwalk_start(walk, sg_next(walk->sg));93sg_set_page(sg_out, sg_page(walk->sg),94walk->sg->offset + walk->sg->length - walk->offset,95walk->offset);96scatterwalk_crypto_chain(sg_out, sg_next(walk->sg), 2);97}9899static inline void scatterwalk_map(struct scatter_walk *walk)100{101struct page *base_page = sg_page(walk->sg);102unsigned int offset = walk->offset;103void *addr;104105if (IS_ENABLED(CONFIG_HIGHMEM)) {106struct page *page;107108page = base_page + (offset >> PAGE_SHIFT);109offset = offset_in_page(offset);110addr = kmap_local_page(page) + offset;111} else {112/*113* When !HIGHMEM we allow the walker to return segments that114* span a page boundary; see scatterwalk_clamp(). To make it115* clear that in this case we're working in the linear buffer of116* the whole sg entry in the kernel's direct map rather than117* within the mapped buffer of a single page, compute the118* address as an offset from the page_address() of the first119* page of the sg entry. Either way the result is the address120* in the direct map, but this makes it clearer what is really121* going on.122*/123addr = page_address(base_page) + offset;124}125126walk->__addr = addr;127}128129/**130* scatterwalk_next() - Get the next data buffer in a scatterlist walk131* @walk: the scatter_walk132* @total: the total number of bytes remaining, > 0133*134* A virtual address for the next segment of data from the scatterlist will135* be placed into @walk->addr. The caller must call scatterwalk_done_src()136* or scatterwalk_done_dst() when it is done using this virtual address.137*138* Returns: the next number of bytes available, <= @total139*/140static inline unsigned int scatterwalk_next(struct scatter_walk *walk,141unsigned int total)142{143unsigned int nbytes = scatterwalk_clamp(walk, total);144145scatterwalk_map(walk);146return nbytes;147}148149static inline void scatterwalk_unmap(struct scatter_walk *walk)150{151if (IS_ENABLED(CONFIG_HIGHMEM))152kunmap_local(walk->__addr);153}154155static inline void scatterwalk_advance(struct scatter_walk *walk,156unsigned int nbytes)157{158walk->offset += nbytes;159}160161/**162* scatterwalk_done_src() - Finish one step of a walk of source scatterlist163* @walk: the scatter_walk164* @nbytes: the number of bytes processed this step, less than or equal to the165* number of bytes that scatterwalk_next() returned.166*167* Use this if the mapped address was not written to, i.e. it is source data.168*/169static inline void scatterwalk_done_src(struct scatter_walk *walk,170unsigned int nbytes)171{172scatterwalk_unmap(walk);173scatterwalk_advance(walk, nbytes);174}175176/*177* Flush the dcache of any pages that overlap the region178* [offset, offset + nbytes) relative to base_page.179*180* This should be called only when ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, to ensure181* that all relevant code (including the call to sg_page() in the caller, if182* applicable) gets fully optimized out when !ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE.183*/184static inline void __scatterwalk_flush_dcache_pages(struct page *base_page,185unsigned int offset,186unsigned int nbytes)187{188unsigned int num_pages;189190base_page += offset / PAGE_SIZE;191offset %= PAGE_SIZE;192193/*194* This is an overflow-safe version of195* num_pages = DIV_ROUND_UP(offset + nbytes, PAGE_SIZE).196*/197num_pages = nbytes / PAGE_SIZE;198num_pages += DIV_ROUND_UP(offset + (nbytes % PAGE_SIZE), PAGE_SIZE);199200for (unsigned int i = 0; i < num_pages; i++)201flush_dcache_page(base_page + i);202}203204/**205* scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist206* @walk: the scatter_walk207* @nbytes: the number of bytes processed this step, less than or equal to the208* number of bytes that scatterwalk_next() returned.209*210* Use this if the mapped address may have been written to, i.e. it is211* destination data.212*/213static inline void scatterwalk_done_dst(struct scatter_walk *walk,214unsigned int nbytes)215{216scatterwalk_unmap(walk);217if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)218__scatterwalk_flush_dcache_pages(sg_page(walk->sg),219walk->offset, nbytes);220scatterwalk_advance(walk, nbytes);221}222223void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes);224225void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk,226unsigned int nbytes);227228void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf,229unsigned int nbytes);230231void memcpy_from_sglist(void *buf, struct scatterlist *sg,232unsigned int start, unsigned int nbytes);233234void memcpy_to_sglist(struct scatterlist *sg, unsigned int start,235const void *buf, unsigned int nbytes);236237void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,238unsigned int nbytes);239240/* In new code, please use memcpy_{from,to}_sglist() directly instead. */241static inline void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,242unsigned int start,243unsigned int nbytes, int out)244{245if (out)246memcpy_to_sglist(sg, start, buf, nbytes);247else248memcpy_from_sglist(buf, sg, start, nbytes);249}250251struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],252struct scatterlist *src,253unsigned int len);254255#endif /* _CRYPTO_SCATTERWALK_H */256257258