aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/scatterwalk.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-05-26 13:47:28 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-05-26 13:47:28 -0700
commit14418ddcc2c2055743ac7ee53d5ac2cf8a8660a7 (patch)
tree4edb44583a986dce31f537ff3d55032118b2db3e /crypto/scatterwalk.c
parent15d90a5e5524532b7456a24f4626cf28c1629c4c (diff)
parent2297554f01df6d3d4e98a3915c183ce3e491740a (diff)
downloadnet-14418ddcc2c2055743ac7ee53d5ac2cf8a8660a7.tar.gz
Merge tag 'v6.16-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Fix memcpy_sglist to handle partially overlapping SG lists - Use memcpy_sglist to replace null skcipher - Rename CRYPTO_TESTS to CRYPTO_BENCHMARK - Flip CRYPTO_MANAGER_DISABLE_TEST into CRYPTO_SELFTESTS - Hide CRYPTO_MANAGER - Add delayed freeing of driver crypto_alg structures Compression: - Allocate large buffers on first use instead of initialisation in scomp - Drop destination linearisation buffer in scomp - Move scomp stream allocation into acomp - Add acomp scatter-gather walker - Remove request chaining - Add optional async request allocation Hashing: - Remove request chaining - Add optional async request allocation - Move partial block handling into API - Add ahash support to hmac - Fix shash documentation to disallow usage in hard IRQs Algorithms: - Remove unnecessary SIMD fallback code on x86 and arm/arm64 - Drop avx10_256 xts(aes)/ctr(aes) on x86 - Improve avx-512 optimisations for xts(aes) - Move chacha arch implementations into lib/crypto - Move poly1305 into lib/crypto and drop unused Crypto API algorithm - Disable powerpc/poly1305 as it has no SIMD fallback - Move sha256 arch implementations into lib/crypto - Convert deflate to acomp - Set block size correctly in cbcmac Drivers: - Do not use sg_dma_len before mapping in sun8i-ss - Fix warm-reboot failure by making shutdown do more work in qat - Add locking in zynqmp-sha - Remove cavium/zip - Add support for PCI device 0x17D8 to ccp - Add qat_6xxx support in qat - Add support for RK3576 in rockchip-rng - Add support for i.MX8QM in caam Others: - Fix irq_fpu_usable/kernel_fpu_begin inconsistency during CPU bring-up - Add new SEV/SNP platform shutdown API in ccp" * tag 'v6.16-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (382 commits) x86/fpu: Fix irq_fpu_usable() to return false during CPU onlining crypto: qat - add missing header inclusion crypto: api - Redo lookup on EEXIST Revert "crypto: testmgr - Add hash export format testing" crypto: marvell/cesa - Do not chain submitted requests crypto: powerpc/poly1305 - add depends on BROKEN for now Revert "crypto: powerpc/poly1305 - Add SIMD fallback" crypto: ccp - Add missing tee info reg for teev2 crypto: ccp - Add missing bootloader info reg for pspv5 crypto: sun8i-ce - move fallback ahash_request to the end of the struct crypto: octeontx2 - Use dynamic allocated memory region for lmtst crypto: octeontx2 - Initialize cptlfs device info once crypto: xts - Only add ecb if it is not already there crypto: lrw - Only add ecb if it is not already there crypto: testmgr - Add hash export format testing crypto: testmgr - Use ahash for generic tfm crypto: hmac - Add ahash support crypto: testmgr - Ignore EEXIST on shash allocation crypto: algapi - Add driver template support to crypto_inst_setname crypto: shash - Set reqsize in shash_alg ...
Diffstat (limited to 'crypto/scatterwalk.c')
-rw-r--r--crypto/scatterwalk.c274
1 files changed, 259 insertions, 15 deletions
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 8225801488d5c1..1d010e2a1b1a2e 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -10,10 +10,25 @@
*/
#include <crypto/scatterwalk.h>
+#include <linux/crypto.h>
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+enum {
+ SKCIPHER_WALK_SLOW = 1 << 0,
+ SKCIPHER_WALK_COPY = 1 << 1,
+ SKCIPHER_WALK_DIFF = 1 << 2,
+ SKCIPHER_WALK_SLEEP = 1 << 3,
+};
+
+static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
+{
+ return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+}
void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes)
{
@@ -89,27 +104,23 @@ EXPORT_SYMBOL_GPL(memcpy_to_sglist);
void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct scatter_walk swalk;
- struct scatter_walk dwalk;
+ struct skcipher_walk walk = {};
if (unlikely(nbytes == 0)) /* in case sg == NULL */
return;
- scatterwalk_start(&swalk, src);
- scatterwalk_start(&dwalk, dst);
+ walk.total = nbytes;
+
+ scatterwalk_start(&walk.in, src);
+ scatterwalk_start(&walk.out, dst);
+ skcipher_walk_first(&walk, true);
do {
- unsigned int slen, dlen;
- unsigned int len;
-
- slen = scatterwalk_next(&swalk, nbytes);
- dlen = scatterwalk_next(&dwalk, nbytes);
- len = min(slen, dlen);
- memcpy(dwalk.addr, swalk.addr, len);
- scatterwalk_done_dst(&dwalk, len);
- scatterwalk_done_src(&swalk, len);
- nbytes -= len;
- } while (nbytes);
+ if (walk.src.virt.addr != walk.dst.virt.addr)
+ memcpy(walk.dst.virt.addr, walk.src.virt.addr,
+ walk.nbytes);
+ skcipher_walk_done(&walk, 0);
+ } while (walk.nbytes);
}
EXPORT_SYMBOL_GPL(memcpy_sglist);
@@ -135,3 +146,236 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
return dst;
}
EXPORT_SYMBOL_GPL(scatterwalk_ffwd);
+
+static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
+{
+ unsigned alignmask = walk->alignmask;
+ unsigned n;
+ void *buffer;
+
+ if (!walk->buffer)
+ walk->buffer = walk->page;
+ buffer = walk->buffer;
+ if (!buffer) {
+ /* Min size for a buffer of bsize bytes aligned to alignmask */
+ n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
+
+ buffer = kzalloc(n, skcipher_walk_gfp(walk));
+ if (!buffer)
+ return skcipher_walk_done(walk, -ENOMEM);
+ walk->buffer = buffer;
+ }
+
+ buffer = PTR_ALIGN(buffer, alignmask + 1);
+ memcpy_from_scatterwalk(buffer, &walk->in, bsize);
+ walk->out.__addr = buffer;
+ walk->in.__addr = walk->out.addr;
+
+ walk->nbytes = bsize;
+ walk->flags |= SKCIPHER_WALK_SLOW;
+
+ return 0;
+}
+
+static int skcipher_next_copy(struct skcipher_walk *walk)
+{
+ void *tmp = walk->page;
+
+ scatterwalk_map(&walk->in);
+ memcpy(tmp, walk->in.addr, walk->nbytes);
+ scatterwalk_unmap(&walk->in);
+ /*
+ * walk->in is advanced later when the number of bytes actually
+ * processed (which might be less than walk->nbytes) is known.
+ */
+
+ walk->in.__addr = tmp;
+ walk->out.__addr = tmp;
+ return 0;
+}
+
+static int skcipher_next_fast(struct skcipher_walk *walk)
+{
+ unsigned long diff;
+
+ diff = offset_in_page(walk->in.offset) -
+ offset_in_page(walk->out.offset);
+ diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) -
+ (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT));
+
+ scatterwalk_map(&walk->out);
+ walk->in.__addr = walk->out.__addr;
+
+ if (diff) {
+ walk->flags |= SKCIPHER_WALK_DIFF;
+ scatterwalk_map(&walk->in);
+ }
+
+ return 0;
+}
+
+static int skcipher_walk_next(struct skcipher_walk *walk)
+{
+ unsigned int bsize;
+ unsigned int n;
+
+ n = walk->total;
+ bsize = min(walk->stride, max(n, walk->blocksize));
+ n = scatterwalk_clamp(&walk->in, n);
+ n = scatterwalk_clamp(&walk->out, n);
+
+ if (unlikely(n < bsize)) {
+ if (unlikely(walk->total < walk->blocksize))
+ return skcipher_walk_done(walk, -EINVAL);
+
+slow_path:
+ return skcipher_next_slow(walk, bsize);
+ }
+ walk->nbytes = n;
+
+ if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
+ if (!walk->page) {
+ gfp_t gfp = skcipher_walk_gfp(walk);
+
+ walk->page = (void *)__get_free_page(gfp);
+ if (!walk->page)
+ goto slow_path;
+ }
+ walk->flags |= SKCIPHER_WALK_COPY;
+ return skcipher_next_copy(walk);
+ }
+
+ return skcipher_next_fast(walk);
+}
+
+static int skcipher_copy_iv(struct skcipher_walk *walk)
+{
+ unsigned alignmask = walk->alignmask;
+ unsigned ivsize = walk->ivsize;
+ unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1);
+ unsigned size;
+ u8 *iv;
+
+ /* Min size for a buffer of stride + ivsize, aligned to alignmask */
+ size = aligned_stride + ivsize +
+ (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
+
+ walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
+ if (!walk->buffer)
+ return -ENOMEM;
+
+ iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride;
+
+ walk->iv = memcpy(iv, walk->iv, walk->ivsize);
+ return 0;
+}
+
+int skcipher_walk_first(struct skcipher_walk *walk, bool atomic)
+{
+ if (WARN_ON_ONCE(in_hardirq()))
+ return -EDEADLK;
+
+ walk->flags = atomic ? 0 : SKCIPHER_WALK_SLEEP;
+
+ walk->buffer = NULL;
+ if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
+ int err = skcipher_copy_iv(walk);
+ if (err)
+ return err;
+ }
+
+ walk->page = NULL;
+
+ return skcipher_walk_next(walk);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_first);
+
+/**
+ * skcipher_walk_done() - finish one step of a skcipher_walk
+ * @walk: the skcipher_walk
+ * @res: number of bytes *not* processed (>= 0) from walk->nbytes,
+ * or a -errno value to terminate the walk due to an error
+ *
+ * This function cleans up after one step of walking through the source and
+ * destination scatterlists, and advances to the next step if applicable.
+ * walk->nbytes is set to the number of bytes available in the next step,
+ * walk->total is set to the new total number of bytes remaining, and
+ * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there
+ * is no more data, or if an error occurred (i.e. -errno return), then
+ * walk->nbytes and walk->total are set to 0 and all resources owned by the
+ * skcipher_walk are freed.
+ *
+ * Return: 0 or a -errno value. If @res was a -errno value then it will be
+ * returned, but other errors may occur too.
+ */
+int skcipher_walk_done(struct skcipher_walk *walk, int res)
+{
+ unsigned int n = walk->nbytes; /* num bytes processed this step */
+ unsigned int total = 0; /* new total remaining */
+
+ if (!n)
+ goto finish;
+
+ if (likely(res >= 0)) {
+ n -= res; /* subtract num bytes *not* processed */
+ total = walk->total - n;
+ }
+
+ if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
+ SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF)))) {
+ scatterwalk_advance(&walk->in, n);
+ } else if (walk->flags & SKCIPHER_WALK_DIFF) {
+ scatterwalk_done_src(&walk->in, n);
+ } else if (walk->flags & SKCIPHER_WALK_COPY) {
+ scatterwalk_advance(&walk->in, n);
+ scatterwalk_map(&walk->out);
+ memcpy(walk->out.addr, walk->page, n);
+ } else { /* SKCIPHER_WALK_SLOW */
+ if (res > 0) {
+ /*
+ * Didn't process all bytes. Either the algorithm is
+ * broken, or this was the last step and it turned out
+ * the message wasn't evenly divisible into blocks but
+ * the algorithm requires it.
+ */
+ res = -EINVAL;
+ total = 0;
+ } else
+ memcpy_to_scatterwalk(&walk->out, walk->out.addr, n);
+ goto dst_done;
+ }
+
+ scatterwalk_done_dst(&walk->out, n);
+dst_done:
+
+ if (res > 0)
+ res = 0;
+
+ walk->total = total;
+ walk->nbytes = 0;
+
+ if (total) {
+ if (walk->flags & SKCIPHER_WALK_SLEEP)
+ cond_resched();
+ walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF);
+ return skcipher_walk_next(walk);
+ }
+
+finish:
+ /* Short-circuit for the common/fast path. */
+ if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
+ goto out;
+
+ if (walk->iv != walk->oiv)
+ memcpy(walk->oiv, walk->iv, walk->ivsize);
+ if (walk->buffer != walk->page)
+ kfree(walk->buffer);
+ if (walk->page)
+ free_page((unsigned long)walk->page);
+
+out:
+ return res;
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_done);