diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-12-03 11:28:38 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-12-03 11:28:38 -0800 |
| commit | a619fe35ab41fded440d3762d4fbad84ff86a4d4 (patch) | |
| tree | 2cc42a8bb9ec80f7850059c5cab383233c804957 /crypto/skcipher.c | |
| parent | c8321831480d80af01ce001bd6626fc130fd13b1 (diff) | |
| parent | 48bc9da3c97c15f1ea24934bcb3b736acd30163d (diff) | |
| download | net-a619fe35ab41fded440d3762d4fbad84ff86a4d4.tar.gz | |
Merge tag 'v6.19-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"API:
- Rewrite memcpy_sglist from scratch
- Add on-stack AEAD request allocation
- Fix partial block processing in ahash
Algorithms:
- Remove ansi_cprng
- Remove tcrypt tests for poly1305
- Fix EINPROGRESS processing in authenc
- Fix double-free in zstd
Drivers:
- Use drbg ctr helper when reseeding xilinx-trng
- Add support for PCI device 0x115A to ccp
- Add support of paes in caam
- Add support for aes-xts in dthev2
Others:
- Use likely in rhashtable lookup
- Fix lockdep false-positive in padata by removing a helper"
* tag 'v6.19-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (71 commits)
crypto: zstd - fix double-free in per-CPU stream cleanup
crypto: ahash - Zero positive err value in ahash_update_finish
crypto: ahash - Fix crypto_ahash_import with partial block data
crypto: lib/mpi - use min() instead of min_t()
crypto: ccp - use min() instead of min_t()
hwrng: core - use min3() instead of nested min_t()
crypto: aesni - ctr_crypt() use min() instead of min_t()
crypto: drbg - Delete unused ctx from struct sdesc
crypto: testmgr - Add missing DES weak and semi-weak key tests
Revert "crypto: scatterwalk - Move skcipher walk and use it for memcpy_sglist"
crypto: scatterwalk - Fix memcpy_sglist() to always succeed
crypto: iaa - Request to add Kanchana P Sridhar to Maintainers.
crypto: tcrypt - Remove unused poly1305 support
crypto: ansi_cprng - Remove unused ansi_cprng algorithm
crypto: asymmetric_keys - fix uninitialized pointers with free attribute
KEYS: Avoid -Wflex-array-member-not-at-end warning
crypto: ccree - Correctly handle return of sg_nents_for_len
crypto: starfive - Correctly handle return of sg_nents_for_len
crypto: iaa - Fix incorrect return value in save_iaa_wq()
crypto: zstd - Remove unnecessary size_t cast
...
Diffstat (limited to 'crypto/skcipher.c')
| -rw-r--r-- | crypto/skcipher.c | 261 |
1 files changed, 255 insertions, 6 deletions
diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 8fa5d9686d0850..14a820cb06c788 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -17,6 +17,7 @@ #include <linux/cryptouser.h> #include <linux/err.h> #include <linux/kernel.h> +#include <linux/mm.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/slab.h> @@ -27,14 +28,258 @@ #define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e +enum { + SKCIPHER_WALK_SLOW = 1 << 0, + SKCIPHER_WALK_COPY = 1 << 1, + SKCIPHER_WALK_DIFF = 1 << 2, + SKCIPHER_WALK_SLEEP = 1 << 3, +}; + static const struct crypto_type crypto_skcipher_type; +static int skcipher_walk_next(struct skcipher_walk *walk); + +static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) +{ + return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; +} + static inline struct skcipher_alg *__crypto_skcipher_alg( struct crypto_alg *alg) { return container_of(alg, struct skcipher_alg, base); } +/** + * skcipher_walk_done() - finish one step of a skcipher_walk + * @walk: the skcipher_walk + * @res: number of bytes *not* processed (>= 0) from walk->nbytes, + * or a -errno value to terminate the walk due to an error + * + * This function cleans up after one step of walking through the source and + * destination scatterlists, and advances to the next step if applicable. + * walk->nbytes is set to the number of bytes available in the next step, + * walk->total is set to the new total number of bytes remaining, and + * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there + * is no more data, or if an error occurred (i.e. -errno return), then + * walk->nbytes and walk->total are set to 0 and all resources owned by the + * skcipher_walk are freed. + * + * Return: 0 or a -errno value. If @res was a -errno value then it will be + * returned, but other errors may occur too. + */ +int skcipher_walk_done(struct skcipher_walk *walk, int res) +{ + unsigned int n = walk->nbytes; /* num bytes processed this step */ + unsigned int total = 0; /* new total remaining */ + + if (!n) + goto finish; + + if (likely(res >= 0)) { + n -= res; /* subtract num bytes *not* processed */ + total = walk->total - n; + } + + if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW | + SKCIPHER_WALK_COPY | + SKCIPHER_WALK_DIFF)))) { + scatterwalk_advance(&walk->in, n); + } else if (walk->flags & SKCIPHER_WALK_DIFF) { + scatterwalk_done_src(&walk->in, n); + } else if (walk->flags & SKCIPHER_WALK_COPY) { + scatterwalk_advance(&walk->in, n); + scatterwalk_map(&walk->out); + memcpy(walk->out.addr, walk->page, n); + } else { /* SKCIPHER_WALK_SLOW */ + if (res > 0) { + /* + * Didn't process all bytes. Either the algorithm is + * broken, or this was the last step and it turned out + * the message wasn't evenly divisible into blocks but + * the algorithm requires it. + */ + res = -EINVAL; + total = 0; + } else + memcpy_to_scatterwalk(&walk->out, walk->out.addr, n); + goto dst_done; + } + + scatterwalk_done_dst(&walk->out, n); +dst_done: + + if (res > 0) + res = 0; + + walk->total = total; + walk->nbytes = 0; + + if (total) { + if (walk->flags & SKCIPHER_WALK_SLEEP) + cond_resched(); + walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | + SKCIPHER_WALK_DIFF); + return skcipher_walk_next(walk); + } + +finish: + /* Short-circuit for the common/fast path. */ + if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) + goto out; + + if (walk->iv != walk->oiv) + memcpy(walk->oiv, walk->iv, walk->ivsize); + if (walk->buffer != walk->page) + kfree(walk->buffer); + if (walk->page) + free_page((unsigned long)walk->page); + +out: + return res; +} +EXPORT_SYMBOL_GPL(skcipher_walk_done); + +static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) +{ + unsigned alignmask = walk->alignmask; + unsigned n; + void *buffer; + + if (!walk->buffer) + walk->buffer = walk->page; + buffer = walk->buffer; + if (!buffer) { + /* Min size for a buffer of bsize bytes aligned to alignmask */ + n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); + + buffer = kzalloc(n, skcipher_walk_gfp(walk)); + if (!buffer) + return skcipher_walk_done(walk, -ENOMEM); + walk->buffer = buffer; + } + + buffer = PTR_ALIGN(buffer, alignmask + 1); + memcpy_from_scatterwalk(buffer, &walk->in, bsize); + walk->out.__addr = buffer; + walk->in.__addr = walk->out.addr; + + walk->nbytes = bsize; + walk->flags |= SKCIPHER_WALK_SLOW; + + return 0; +} + +static int skcipher_next_copy(struct skcipher_walk *walk) +{ + void *tmp = walk->page; + + scatterwalk_map(&walk->in); + memcpy(tmp, walk->in.addr, walk->nbytes); + scatterwalk_unmap(&walk->in); + /* + * walk->in is advanced later when the number of bytes actually + * processed (which might be less than walk->nbytes) is known. + */ + + walk->in.__addr = tmp; + walk->out.__addr = tmp; + return 0; +} + +static int skcipher_next_fast(struct skcipher_walk *walk) +{ + unsigned long diff; + + diff = offset_in_page(walk->in.offset) - + offset_in_page(walk->out.offset); + diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) - + (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT)); + + scatterwalk_map(&walk->out); + walk->in.__addr = walk->out.__addr; + + if (diff) { + walk->flags |= SKCIPHER_WALK_DIFF; + scatterwalk_map(&walk->in); + } + + return 0; +} + +static int skcipher_walk_next(struct skcipher_walk *walk) +{ + unsigned int bsize; + unsigned int n; + + n = walk->total; + bsize = min(walk->stride, max(n, walk->blocksize)); + n = scatterwalk_clamp(&walk->in, n); + n = scatterwalk_clamp(&walk->out, n); + + if (unlikely(n < bsize)) { + if (unlikely(walk->total < walk->blocksize)) + return skcipher_walk_done(walk, -EINVAL); + +slow_path: + return skcipher_next_slow(walk, bsize); + } + walk->nbytes = n; + + if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { + if (!walk->page) { + gfp_t gfp = skcipher_walk_gfp(walk); + + walk->page = (void *)__get_free_page(gfp); + if (!walk->page) + goto slow_path; + } + walk->flags |= SKCIPHER_WALK_COPY; + return skcipher_next_copy(walk); + } + + return skcipher_next_fast(walk); +} + +static int skcipher_copy_iv(struct skcipher_walk *walk) +{ + unsigned alignmask = walk->alignmask; + unsigned ivsize = walk->ivsize; + unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1); + unsigned size; + u8 *iv; + + /* Min size for a buffer of stride + ivsize, aligned to alignmask */ + size = aligned_stride + ivsize + + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); + + walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); + if (!walk->buffer) + return -ENOMEM; + + iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride; + + walk->iv = memcpy(iv, walk->iv, walk->ivsize); + return 0; +} + +static int skcipher_walk_first(struct skcipher_walk *walk) +{ + if (WARN_ON_ONCE(in_hardirq())) + return -EDEADLK; + + walk->buffer = NULL; + if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { + int err = skcipher_copy_iv(walk); + if (err) + return err; + } + + walk->page = NULL; + + return skcipher_walk_next(walk); +} + int skcipher_walk_virt(struct skcipher_walk *__restrict walk, struct skcipher_request *__restrict req, bool atomic) { @@ -49,8 +294,10 @@ int skcipher_walk_virt(struct skcipher_walk *__restrict walk, walk->nbytes = 0; walk->iv = req->iv; walk->oiv = req->iv; - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)) - atomic = true; + if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic) + walk->flags = SKCIPHER_WALK_SLEEP; + else + walk->flags = 0; if (unlikely(!walk->total)) return 0; @@ -67,7 +314,7 @@ int skcipher_walk_virt(struct skcipher_walk *__restrict walk, else walk->stride = alg->walksize; - return skcipher_walk_first(walk, atomic); + return skcipher_walk_first(walk); } EXPORT_SYMBOL_GPL(skcipher_walk_virt); @@ -80,8 +327,10 @@ static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk, walk->nbytes = 0; walk->iv = req->iv; walk->oiv = req->iv; - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)) - atomic = true; + if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic) + walk->flags = SKCIPHER_WALK_SLEEP; + else + walk->flags = 0; if (unlikely(!walk->total)) return 0; @@ -94,7 +343,7 @@ static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk, walk->ivsize = crypto_aead_ivsize(tfm); walk->alignmask = crypto_aead_alignmask(tfm); - return skcipher_walk_first(walk, atomic); + return skcipher_walk_first(walk); } int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk, |
