/* SPDX-License-Identifier: GPL-2.0-or-later */1/*2* Cryptographic scatter and gather helpers.3*4* Copyright (c) 2002 James Morris <[email protected]>5* Copyright (c) 2002 Adam J. Richter <[email protected]>6* Copyright (c) 2004 Jean-Luc Cooke <[email protected]>7* Copyright (c) 2007 Herbert Xu <[email protected]>8*/910#ifndef _CRYPTO_SCATTERWALK_H11#define _CRYPTO_SCATTERWALK_H1213#include <linux/errno.h>14#include <linux/highmem.h>15#include <linux/mm.h>16#include <linux/scatterlist.h>17#include <linux/types.h>1819struct scatter_walk {20/* Must be the first member, see struct skcipher_walk. */21union {22void *const addr;2324/* Private API field, do not touch. */25union crypto_no_such_thing *__addr;26};27struct scatterlist *sg;28unsigned int offset;29};3031struct skcipher_walk {32union {33/* Virtual address of the source. */34struct {35struct {36const void *const addr;37} virt;38} src;3940/* Private field for the API, do not use. */41struct scatter_walk in;42};4344union {45/* Virtual address of the destination. */46struct {47struct {48void *const addr;49} virt;50} dst;5152/* Private field for the API, do not use. */53struct scatter_walk out;54};5556unsigned int nbytes;57unsigned int total;5859u8 *page;60u8 *buffer;61u8 *oiv;62void *iv;6364unsigned int ivsize;6566int flags;67unsigned int blocksize;68unsigned int stride;69unsigned int alignmask;70};7172static inline void scatterwalk_crypto_chain(struct scatterlist *head,73struct scatterlist *sg, int num)74{75if (sg)76sg_chain(head, num, sg);77else78sg_mark_end(head);79}8081static inline void scatterwalk_start(struct scatter_walk *walk,82struct scatterlist *sg)83{84walk->sg = sg;85walk->offset = sg->offset;86}8788/*89* This is equivalent to scatterwalk_start(walk, sg) followed by90* scatterwalk_skip(walk, pos).91*/92static inline void scatterwalk_start_at_pos(struct scatter_walk *walk,93struct scatterlist *sg,94unsigned int pos)95{96while (pos > sg->length) {97pos -= sg->length;98sg = sg_next(sg);99}100walk->sg = sg;101walk->offset = sg->offset + pos;102}103104static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk,105unsigned int nbytes)106{107unsigned int len_this_sg;108unsigned int limit;109110if (walk->offset >= walk->sg->offset + walk->sg->length)111scatterwalk_start(walk, sg_next(walk->sg));112len_this_sg = walk->sg->offset + walk->sg->length - walk->offset;113114/*115* HIGHMEM case: the page may have to be mapped into memory. To avoid116* the complexity of having to map multiple pages at once per sg entry,117* clamp the returned length to not cross a page boundary.118*119* !HIGHMEM case: no mapping is needed; all pages of the sg entry are120* already mapped contiguously in the kernel's direct map. For improved121* performance, allow the walker to return data segments that cross a122* page boundary. Do still cap the length to PAGE_SIZE, since some123* users rely on that to avoid disabling preemption for too long when124* using SIMD. It's also needed for when skcipher_walk uses a bounce125* page due to the data not being aligned to the algorithm's alignmask.126*/127if (IS_ENABLED(CONFIG_HIGHMEM))128limit = PAGE_SIZE - offset_in_page(walk->offset);129else130limit = PAGE_SIZE;131132return min3(nbytes, len_this_sg, limit);133}134135/*136* Create a scatterlist that represents the remaining data in a walk. Uses137* chaining to reference the original scatterlist, so this uses at most two138* entries in @sg_out regardless of the number of entries in the original list.139* Assumes that sg_init_table() was already done.140*/141static inline void scatterwalk_get_sglist(struct scatter_walk *walk,142struct scatterlist sg_out[2])143{144if (walk->offset >= walk->sg->offset + walk->sg->length)145scatterwalk_start(walk, sg_next(walk->sg));146sg_set_page(sg_out, sg_page(walk->sg),147walk->sg->offset + walk->sg->length - walk->offset,148walk->offset);149scatterwalk_crypto_chain(sg_out, sg_next(walk->sg), 2);150}151152static inline void scatterwalk_map(struct scatter_walk *walk)153{154struct page *base_page = sg_page(walk->sg);155unsigned int offset = walk->offset;156void *addr;157158if (IS_ENABLED(CONFIG_HIGHMEM)) {159struct page *page;160161page = nth_page(base_page, offset >> PAGE_SHIFT);162offset = offset_in_page(offset);163addr = kmap_local_page(page) + offset;164} else {165/*166* When !HIGHMEM we allow the walker to return segments that167* span a page boundary; see scatterwalk_clamp(). To make it168* clear that in this case we're working in the linear buffer of169* the whole sg entry in the kernel's direct map rather than170* within the mapped buffer of a single page, compute the171* address as an offset from the page_address() of the first172* page of the sg entry. Either way the result is the address173* in the direct map, but this makes it clearer what is really174* going on.175*/176addr = page_address(base_page) + offset;177}178179walk->__addr = addr;180}181182/**183* scatterwalk_next() - Get the next data buffer in a scatterlist walk184* @walk: the scatter_walk185* @total: the total number of bytes remaining, > 0186*187* A virtual address for the next segment of data from the scatterlist will188* be placed into @walk->addr. The caller must call scatterwalk_done_src()189* or scatterwalk_done_dst() when it is done using this virtual address.190*191* Returns: the next number of bytes available, <= @total192*/193static inline unsigned int scatterwalk_next(struct scatter_walk *walk,194unsigned int total)195{196unsigned int nbytes = scatterwalk_clamp(walk, total);197198scatterwalk_map(walk);199return nbytes;200}201202static inline void scatterwalk_unmap(struct scatter_walk *walk)203{204if (IS_ENABLED(CONFIG_HIGHMEM))205kunmap_local(walk->__addr);206}207208static inline void scatterwalk_advance(struct scatter_walk *walk,209unsigned int nbytes)210{211walk->offset += nbytes;212}213214/**215* scatterwalk_done_src() - Finish one step of a walk of source scatterlist216* @walk: the scatter_walk217* @nbytes: the number of bytes processed this step, less than or equal to the218* number of bytes that scatterwalk_next() returned.219*220* Use this if the mapped address was not written to, i.e. it is source data.221*/222static inline void scatterwalk_done_src(struct scatter_walk *walk,223unsigned int nbytes)224{225scatterwalk_unmap(walk);226scatterwalk_advance(walk, nbytes);227}228229/**230* scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist231* @walk: the scatter_walk232* @nbytes: the number of bytes processed this step, less than or equal to the233* number of bytes that scatterwalk_next() returned.234*235* Use this if the mapped address may have been written to, i.e. it is236* destination data.237*/238static inline void scatterwalk_done_dst(struct scatter_walk *walk,239unsigned int nbytes)240{241scatterwalk_unmap(walk);242/*243* Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just244* relying on flush_dcache_page() being a no-op when not implemented,245* since otherwise the BUG_ON in sg_page() does not get optimized out.246* This also avoids having to consider whether the loop would get247* reliably optimized out or not.248*/249if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) {250struct page *base_page;251unsigned int offset;252int start, end, i;253254base_page = sg_page(walk->sg);255offset = walk->offset;256start = offset >> PAGE_SHIFT;257end = start + (nbytes >> PAGE_SHIFT);258end += (offset_in_page(offset) + offset_in_page(nbytes) +259PAGE_SIZE - 1) >> PAGE_SHIFT;260for (i = start; i < end; i++)261flush_dcache_page(nth_page(base_page, i));262}263scatterwalk_advance(walk, nbytes);264}265266void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes);267268void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk,269unsigned int nbytes);270271void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf,272unsigned int nbytes);273274void memcpy_from_sglist(void *buf, struct scatterlist *sg,275unsigned int start, unsigned int nbytes);276277void memcpy_to_sglist(struct scatterlist *sg, unsigned int start,278const void *buf, unsigned int nbytes);279280void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,281unsigned int nbytes);282283/* In new code, please use memcpy_{from,to}_sglist() directly instead. */284static inline void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,285unsigned int start,286unsigned int nbytes, int out)287{288if (out)289memcpy_to_sglist(sg, start, buf, nbytes);290else291memcpy_from_sglist(buf, sg, start, nbytes);292}293294struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],295struct scatterlist *src,296unsigned int len);297298int skcipher_walk_first(struct skcipher_walk *walk, bool atomic);299int skcipher_walk_done(struct skcipher_walk *walk, int res);300301static inline void skcipher_walk_abort(struct skcipher_walk *walk)302{303skcipher_walk_done(walk, -ECANCELED);304}305306#endif /* _CRYPTO_SCATTERWALK_H */307308309