aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/crypto/userspace-if.rst7
-rw-r--r--Documentation/devicetree/bindings/crypto/amd,ccp-seattle-v1a.yaml3
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml1
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom,prng.yaml1
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom-qce.yaml1
-rw-r--r--Documentation/devicetree/bindings/rng/microchip,pic32-rng.txt17
-rw-r--r--Documentation/devicetree/bindings/rng/microchip,pic32-rng.yaml40
-rw-r--r--Documentation/security/keys/trusted-encrypted.rst88
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/arm/configs/axm55xx_defconfig1
-rw-r--r--arch/arm/configs/clps711x_defconfig1
-rw-r--r--arch/arm/configs/dove_defconfig1
-rw-r--r--arch/arm/configs/ep93xx_defconfig1
-rw-r--r--arch/arm/configs/jornada720_defconfig1
-rw-r--r--arch/arm/configs/keystone_defconfig1
-rw-r--r--arch/arm/configs/lpc32xx_defconfig1
-rw-r--r--arch/arm/configs/mmp2_defconfig1
-rw-r--r--arch/arm/configs/mv78xx0_defconfig1
-rw-r--r--arch/arm/configs/omap1_defconfig1
-rw-r--r--arch/arm/configs/orion5x_defconfig1
-rw-r--r--arch/arm/configs/pxa168_defconfig1
-rw-r--r--arch/arm/configs/pxa3xx_defconfig1
-rw-r--r--arch/arm/configs/pxa910_defconfig1
-rw-r--r--arch/arm/configs/spitz_defconfig1
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/hexagon/configs/comet_defconfig1
-rw-r--r--arch/m68k/configs/amcore_defconfig1
-rw-r--r--arch/m68k/configs/amiga_defconfig1
-rw-r--r--arch/m68k/configs/apollo_defconfig1
-rw-r--r--arch/m68k/configs/atari_defconfig1
-rw-r--r--arch/m68k/configs/bvme6000_defconfig1
-rw-r--r--arch/m68k/configs/hp300_defconfig1
-rw-r--r--arch/m68k/configs/mac_defconfig1
-rw-r--r--arch/m68k/configs/multi_defconfig1
-rw-r--r--arch/m68k/configs/mvme147_defconfig1
-rw-r--r--arch/m68k/configs/mvme16x_defconfig1
-rw-r--r--arch/m68k/configs/q40_defconfig1
-rw-r--r--arch/m68k/configs/stmark2_defconfig1
-rw-r--r--arch/m68k/configs/sun3_defconfig1
-rw-r--r--arch/m68k/configs/sun3x_defconfig1
-rw-r--r--arch/mips/configs/decstation_64_defconfig1
-rw-r--r--arch/mips/configs/decstation_defconfig1
-rw-r--r--arch/mips/configs/decstation_r4k_defconfig1
-rw-r--r--arch/s390/configs/debug_defconfig1
-rw-r--r--arch/s390/configs/defconfig1
-rw-r--r--arch/sh/configs/ap325rxa_defconfig1
-rw-r--r--arch/sh/configs/apsh4a3a_defconfig1
-rw-r--r--arch/sh/configs/apsh4ad0a_defconfig1
-rw-r--r--arch/sh/configs/dreamcast_defconfig1
-rw-r--r--arch/sh/configs/ecovec24_defconfig1
-rw-r--r--arch/sh/configs/edosk7760_defconfig1
-rw-r--r--arch/sh/configs/espt_defconfig1
-rw-r--r--arch/sh/configs/hp6xx_defconfig1
-rw-r--r--arch/sh/configs/landisk_defconfig1
-rw-r--r--arch/sh/configs/lboxre2_defconfig1
-rw-r--r--arch/sh/configs/migor_defconfig1
-rw-r--r--arch/sh/configs/r7780mp_defconfig1
-rw-r--r--arch/sh/configs/r7785rp_defconfig1
-rw-r--r--arch/sh/configs/rts7751r2d1_defconfig1
-rw-r--r--arch/sh/configs/rts7751r2dplus_defconfig1
-rw-r--r--arch/sh/configs/sdk7780_defconfig1
-rw-r--r--arch/sh/configs/sdk7786_defconfig1
-rw-r--r--arch/sh/configs/se7206_defconfig1
-rw-r--r--arch/sh/configs/se7343_defconfig1
-rw-r--r--arch/sh/configs/se7705_defconfig1
-rw-r--r--arch/sh/configs/se7712_defconfig1
-rw-r--r--arch/sh/configs/se7721_defconfig1
-rw-r--r--arch/sh/configs/se7722_defconfig1
-rw-r--r--arch/sh/configs/se7724_defconfig1
-rw-r--r--arch/sh/configs/se7750_defconfig1
-rw-r--r--arch/sh/configs/se7751_defconfig1
-rw-r--r--arch/sh/configs/se7780_defconfig1
-rw-r--r--arch/sh/configs/sh03_defconfig1
-rw-r--r--arch/sh/configs/sh2007_defconfig1
-rw-r--r--arch/sh/configs/sh7710voipgw_defconfig1
-rw-r--r--arch/sh/configs/sh7757lcr_defconfig1
-rw-r--r--arch/sh/configs/sh7763rdp_defconfig1
-rw-r--r--arch/sh/configs/sh7785lcr_32bit_defconfig1
-rw-r--r--arch/sh/configs/sh7785lcr_defconfig1
-rw-r--r--arch/sh/configs/shmin_defconfig1
-rw-r--r--arch/sh/configs/shx3_defconfig1
-rw-r--r--arch/sh/configs/titan_defconfig1
-rw-r--r--arch/sh/configs/ul2_defconfig1
-rw-r--r--arch/sh/configs/urquell_defconfig1
-rw-r--r--arch/sparc/configs/sparc32_defconfig1
-rw-r--r--arch/sparc/configs/sparc64_defconfig1
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c3
-rw-r--r--arch/xtensa/configs/audio_kc705_defconfig1
-rw-r--r--arch/xtensa/configs/generic_kc705_defconfig1
-rw-r--r--arch/xtensa/configs/iss_defconfig1
-rw-r--r--arch/xtensa/configs/nommu_kc705_defconfig1
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig1
-rw-r--r--arch/xtensa/configs/virt_defconfig1
-rw-r--r--arch/xtensa/configs/xip_kc705_defconfig1
-rw-r--r--crypto/Kconfig21
-rw-r--r--crypto/Makefile3
-rw-r--r--crypto/aead.c20
-rw-r--r--crypto/af_alg.c5
-rw-r--r--crypto/ahash.c18
-rw-r--r--crypto/algif_hash.c3
-rw-r--r--crypto/algif_rng.c3
-rw-r--r--crypto/ansi_cprng.c474
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c14
-rw-r--r--crypto/asymmetric_keys/restrict.c7
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c2
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c2
-rw-r--r--crypto/authenc.c75
-rw-r--r--crypto/deflate.c3
-rw-r--r--crypto/df_sp80090a.c232
-rw-r--r--crypto/drbg.c266
-rw-r--r--crypto/fips.c5
-rw-r--r--crypto/scatterwalk.c345
-rw-r--r--crypto/skcipher.c261
-rw-r--r--crypto/tcrypt.c8
-rw-r--r--crypto/tcrypt.h18
-rw-r--r--crypto/testmgr.c97
-rw-r--r--crypto/testmgr.h226
-rw-r--r--crypto/zstd.c17
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c11
-rw-r--r--drivers/char/hw_random/core.c11
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c2
-rw-r--r--drivers/crypto/atmel-i2c.c2
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c9
-rw-r--r--drivers/crypto/caam/blob_gen.c86
-rw-r--r--drivers/crypto/caam/caamalg.c128
-rw-r--r--drivers/crypto/caam/caamalg_desc.c87
-rw-r--r--drivers/crypto/caam/caamalg_desc.h13
-rw-r--r--drivers/crypto/caam/caamrng.c4
-rw-r--r--drivers/crypto/caam/desc.h9
-rw-r--r--drivers/crypto/caam/desc_constr.h8
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.c2
-rw-r--r--drivers/crypto/ccp/ccp-dev.c2
-rw-r--r--drivers/crypto/ccp/sp-dev.h2
-rw-r--r--drivers/crypto/ccp/sp-pci.c19
-rw-r--r--drivers/crypto/ccp/sp-platform.c17
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c6
-rw-r--r--drivers/crypto/hifn_795x.c7
-rw-r--r--drivers/crypto/hisilicon/qm.c55
-rw-r--r--drivers/crypto/hisilicon/sgl.c5
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_isr.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sriov.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_vf_isr.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c18
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c7
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c5
-rw-r--r--drivers/crypto/qce/core.c3
-rw-r--r--drivers/crypto/qce/dma.c6
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c3
-rw-r--r--drivers/crypto/starfive/jh7110-hash.c6
-rw-r--r--drivers/crypto/ti/Kconfig1
-rw-r--r--drivers/crypto/ti/dthev2-aes.c137
-rw-r--r--drivers/crypto/ti/dthev2-common.h10
-rw-r--r--drivers/crypto/xilinx/xilinx-trng.c39
-rw-r--r--include/crypto/aead.h87
-rw-r--r--include/crypto/algapi.h12
-rw-r--r--include/crypto/df_sp80090a.h28
-rw-r--r--include/crypto/drbg.h25
-rw-r--r--include/crypto/internal/drbg.h54
-rw-r--r--include/crypto/internal/skcipher.h48
-rw-r--r--include/crypto/rng.h11
-rw-r--r--include/crypto/scatterwalk.h117
-rw-r--r--include/keys/asymmetric-type.h2
-rw-r--r--include/linux/rhashtable.h70
-rw-r--r--include/soc/fsl/caam-blob.h26
-rw-r--r--kernel/padata.c12
-rw-r--r--lib/crypto/mpi/mpicoder.c2
-rw-r--r--security/keys/trusted-keys/trusted_caam.c108
170 files changed, 2028 insertions, 1682 deletions
diff --git a/Documentation/crypto/userspace-if.rst b/Documentation/crypto/userspace-if.rst
index f80f243e227e62..8158b363cd98f5 100644
--- a/Documentation/crypto/userspace-if.rst
+++ b/Documentation/crypto/userspace-if.rst
@@ -302,10 +302,9 @@ follows:
Depending on the RNG type, the RNG must be seeded. The seed is provided
-using the setsockopt interface to set the key. For example, the
-ansi_cprng requires a seed. The DRBGs do not require a seed, but may be
-seeded. The seed is also known as a *Personalization String* in NIST SP 800-90A
-standard.
+using the setsockopt interface to set the key. The SP800-90A DRBGs do
+not require a seed, but may be seeded. The seed is also known as a
+*Personalization String* in NIST SP 800-90A standard.
Using the read()/recvmsg() system calls, random numbers can be obtained.
The kernel generates at most 128 bytes in one call. If user space
diff --git a/Documentation/devicetree/bindings/crypto/amd,ccp-seattle-v1a.yaml b/Documentation/devicetree/bindings/crypto/amd,ccp-seattle-v1a.yaml
index 32bf3a1c3b420f..5fb70847105915 100644
--- a/Documentation/devicetree/bindings/crypto/amd,ccp-seattle-v1a.yaml
+++ b/Documentation/devicetree/bindings/crypto/amd,ccp-seattle-v1a.yaml
@@ -21,6 +21,9 @@ properties:
dma-coherent: true
+ iommus:
+ maxItems: 4
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml b/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml
index 08fe6a707a3714..c3408dcf5d2057 100644
--- a/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml
+++ b/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml
@@ -13,6 +13,7 @@ properties:
compatible:
items:
- enum:
+ - qcom,kaanapali-inline-crypto-engine
- qcom,qcs8300-inline-crypto-engine
- qcom,sa8775p-inline-crypto-engine
- qcom,sc7180-inline-crypto-engine
diff --git a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml
index ed7e16bd11d33c..597441d94cf1ee 100644
--- a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml
+++ b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml
@@ -20,6 +20,7 @@ properties:
- qcom,ipq5332-trng
- qcom,ipq5424-trng
- qcom,ipq9574-trng
+ - qcom,kaanapali-trng
- qcom,qcs615-trng
- qcom,qcs8300-trng
- qcom,sa8255p-trng
diff --git a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
index e009cb712fb8a2..79d5be2548bc52 100644
--- a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
+++ b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
@@ -45,6 +45,7 @@ properties:
- items:
- enum:
+ - qcom,kaanapali-qce
- qcom,qcs615-qce
- qcom,qcs8300-qce
- qcom,sa8775p-qce
diff --git a/Documentation/devicetree/bindings/rng/microchip,pic32-rng.txt b/Documentation/devicetree/bindings/rng/microchip,pic32-rng.txt
deleted file mode 100644
index c6d1003befb772..00000000000000
--- a/Documentation/devicetree/bindings/rng/microchip,pic32-rng.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-* Microchip PIC32 Random Number Generator
-
-The PIC32 RNG provides a pseudo random number generator which can be seeded by
-another true random number generator.
-
-Required properties:
-- compatible : should be "microchip,pic32mzda-rng"
-- reg : Specifies base physical address and size of the registers.
-- clocks: clock phandle.
-
-Example:
-
- rng: rng@1f8e6000 {
- compatible = "microchip,pic32mzda-rng";
- reg = <0x1f8e6000 0x1000>;
- clocks = <&PBCLK5>;
- };
diff --git a/Documentation/devicetree/bindings/rng/microchip,pic32-rng.yaml b/Documentation/devicetree/bindings/rng/microchip,pic32-rng.yaml
new file mode 100644
index 00000000000000..1f6f6fb81ddc53
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/microchip,pic32-rng.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rng/microchip,pic32-rng.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip PIC32 Random Number Generator
+
+description: |
+ The PIC32 RNG provides a pseudo random number generator which can be seeded
+ by another true random number generator.
+
+maintainers:
+ - Joshua Henderson <joshua.henderson@microchip.com>
+
+properties:
+ compatible:
+ enum:
+ - microchip,pic32mzda-rng
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ rng: rng@1f8e6000 {
+ compatible = "microchip,pic32mzda-rng";
+ reg = <0x1f8e6000 0x1000>;
+ clocks = <&PBCLK5>;
+ };
diff --git a/Documentation/security/keys/trusted-encrypted.rst b/Documentation/security/keys/trusted-encrypted.rst
index f4d7e162d5e475..eae6a36b1c9a11 100644
--- a/Documentation/security/keys/trusted-encrypted.rst
+++ b/Documentation/security/keys/trusted-encrypted.rst
@@ -10,6 +10,37 @@ of a Trust Source for greater security, while Encrypted Keys can be used on any
system. All user level blobs, are displayed and loaded in hex ASCII for
convenience, and are integrity verified.
+Trusted Keys as Protected key
+=============================
+It is the secure way of keeping the keys in the kernel key-ring as Trusted-Key,
+such that:
+
+- Key-blob, an encrypted key-data, created to be stored, loaded and seen by
+ userspace.
+- Key-data, the plain-key text in the system memory, to be used by
+ kernel space only.
+
+Though key-data is not accessible to the user-space in plain-text, but it is in
+plain-text in system memory, when used in kernel space. Even though kernel-space
+attracts small surface attack, but with compromised kernel or side-channel
+attack accessing the system memory can lead to a chance of the key getting
+compromised/leaked.
+
+In order to protect the key in kernel space, the concept of "protected-keys" is
+introduced which will act as an added layer of protection. The key-data of the
+protected keys is encrypted with Key-Encryption-Key(KEK), and decrypted inside
+the trust source boundary. The plain-key text never available out-side in the
+system memory. Thus, any crypto operation that is to be executed using the
+protected key, can only be done by the trust source, which generated the
+key blob.
+
+Hence, if the protected-key is leaked or compromised, it is of no use to the
+hacker.
+
+Trusted keys as protected keys, with trust source having the capability of
+generating:
+
+- Key-Blob, to be loaded, stored and seen by user-space.
Trust Source
============
@@ -252,7 +283,7 @@ in bytes. Trusted Keys can be 32 - 128 bytes (256 - 1024 bits).
Trusted Keys usage: CAAM
------------------------
-Usage::
+Trusted Keys Usage::
keyctl add trusted name "new keylen" ring
keyctl add trusted name "load hex_blob" ring
@@ -262,6 +293,21 @@ Usage::
CAAM-specific format. The key length for new keys is always in bytes.
Trusted Keys can be 32 - 128 bytes (256 - 1024 bits).
+Trusted Keys as Protected Keys Usage::
+
+ keyctl add trusted name "new keylen pk [options]" ring
+ keyctl add trusted name "load hex_blob [options]" ring
+ keyctl print keyid
+
+ where, 'pk' is used to direct trust source to generate protected key.
+
+ options:
+ key_enc_algo = For CAAM, supported enc algo are ECB(2), CCM(1).
+
+"keyctl print" returns an ASCII hex copy of the sealed key, which is in a
+CAAM-specific format. The key length for new keys is always in bytes.
+Trusted Keys can be 32 - 128 bytes (256 - 1024 bits).
+
Trusted Keys usage: DCP
-----------------------
@@ -343,6 +389,46 @@ Load a trusted key from the saved blob::
f1f8fff03ad0acb083725535636addb08d73dedb9832da198081e5deae84bfaf0409c22b
e4a8aea2b607ec96931e6f4d4fe563ba
+Create and save a trusted key as protected key named "kmk" of length 32 bytes.
+
+::
+
+ $ keyctl add trusted kmk "new 32 pk key_enc_algo=1" @u
+ 440502848
+
+ $ keyctl show
+ Session Keyring
+ -3 --alswrv 500 500 keyring: _ses
+ 97833714 --alswrv 500 -1 \_ keyring: _uid.500
+ 440502848 --alswrv 500 500 \_ trusted: kmk
+
+ $ keyctl print 440502848
+ 0101000000000000000001005d01b7e3f4a6be5709930f3b70a743cbb42e0cc95e18e915
+ 3f60da455bbf1144ad12e4f92b452f966929f6105fd29ca28e4d4d5a031d068478bacb0b
+ 27351119f822911b0a11ba3d3498ba6a32e50dac7f32894dd890eb9ad578e4e292c83722
+ a52e56a097e6a68b3f56f7a52ece0cdccba1eb62cad7d817f6dc58898b3ac15f36026fec
+ d568bd4a706cb60bb37be6d8f1240661199d640b66fb0fe3b079f97f450b9ef9c22c6d5d
+ dd379f0facd1cd020281dfa3c70ba21a3fa6fc2471dc6d13ecf8298b946f65345faa5ef0
+ f1f8fff03ad0acb083725535636addb08d73dedb9832da198081e5deae84bfaf0409c22b
+ e4a8aea2b607ec96931e6f4d4fe563ba
+
+ $ keyctl pipe 440502848 > kmk.blob
+
+Load a trusted key from the saved blob::
+
+ $ keyctl add trusted kmk "load `cat kmk.blob` key_enc_algo=1" @u
+ 268728824
+
+ $ keyctl print 268728824
+ 0101000000000000000001005d01b7e3f4a6be5709930f3b70a743cbb42e0cc95e18e915
+ 3f60da455bbf1144ad12e4f92b452f966929f6105fd29ca28e4d4d5a031d068478bacb0b
+ 27351119f822911b0a11ba3d3498ba6a32e50dac7f32894dd890eb9ad578e4e292c83722
+ a52e56a097e6a68b3f56f7a52ece0cdccba1eb62cad7d817f6dc58898b3ac15f36026fec
+ d568bd4a706cb60bb37be6d8f1240661199d640b66fb0fe3b079f97f450b9ef9c22c6d5d
+ dd379f0facd1cd020281dfa3c70ba21a3fa6fc2471dc6d13ecf8298b946f65345faa5ef0
+ f1f8fff03ad0acb083725535636addb08d73dedb9832da198081e5deae84bfaf0409c22b
+ e4a8aea2b607ec96931e6f4d4fe563ba
+
Reseal (TPM specific) a trusted key under new PCR values::
$ keyctl update 268728824 "update pcrinfo=`cat pcr.blob`"
diff --git a/MAINTAINERS b/MAINTAINERS
index 472dc58de40d07..ff0106c3f1495d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6613,7 +6613,6 @@ CRYPTOGRAPHIC RANDOM NUMBER GENERATOR
M: Neil Horman <nhorman@tuxdriver.com>
L: linux-crypto@vger.kernel.org
S: Maintained
-F: crypto/ansi_cprng.c
F: crypto/rng.c
CS3308 MEDIA DRIVER
@@ -12573,6 +12572,7 @@ F: drivers/dma/ioat*
INTEL IAA CRYPTO DRIVER
M: Kristen Accardi <kristen.c.accardi@intel.com>
M: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+M: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
L: linux-crypto@vger.kernel.org
S: Supported
F: Documentation/driver-api/crypto/iaa/iaa-crypto.rst
diff --git a/arch/arm/configs/axm55xx_defconfig b/arch/arm/configs/axm55xx_defconfig
index 242a61208a0f80..22b189090e15f4 100644
--- a/arch/arm/configs/axm55xx_defconfig
+++ b/arch/arm/configs/axm55xx_defconfig
@@ -232,4 +232,3 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_DEBUG_USER=y
CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_SHA256=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/clps711x_defconfig b/arch/arm/configs/clps711x_defconfig
index 6fa3477e6b026d..f66d502ce2efcc 100644
--- a/arch/arm/configs/clps711x_defconfig
+++ b/arch/arm/configs/clps711x_defconfig
@@ -75,5 +75,4 @@ CONFIG_MINIX_FS=y
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_LL=y
CONFIG_EARLY_PRINTK=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig
index bb6c4748bfc80a..e98c35df675e65 100644
--- a/arch/arm/configs/dove_defconfig
+++ b/arch/arm/configs/dove_defconfig
@@ -126,7 +126,6 @@ CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_MARVELL_CESA=y
CONFIG_PRINTK_TIME=y
# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/arm/configs/ep93xx_defconfig b/arch/arm/configs/ep93xx_defconfig
index 7f3756d8b086d2..9f3c7324d1cf48 100644
--- a/arch/arm/configs/ep93xx_defconfig
+++ b/arch/arm/configs/ep93xx_defconfig
@@ -119,4 +119,3 @@ CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_LL=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/jornada720_defconfig b/arch/arm/configs/jornada720_defconfig
index e6ec768f42e2cd..d57285cfefb29d 100644
--- a/arch/arm/configs/jornada720_defconfig
+++ b/arch/arm/configs/jornada720_defconfig
@@ -92,4 +92,3 @@ CONFIG_NLS_UTF8=m
CONFIG_DEBUG_KERNEL=y
# CONFIG_FTRACE is not set
CONFIG_DEBUG_LL=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig
index c1291ca290b23c..b0cadd8781526e 100644
--- a/arch/arm/configs/keystone_defconfig
+++ b/arch/arm/configs/keystone_defconfig
@@ -228,7 +228,6 @@ CONFIG_CRYPTO_DES=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_CTR=y
CONFIG_CRYPTO_XCBC=y
-CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_USER_API_HASH=y
CONFIG_CRYPTO_USER_API_SKCIPHER=y
CONFIG_DMA_CMA=y
diff --git a/arch/arm/configs/lpc32xx_defconfig b/arch/arm/configs/lpc32xx_defconfig
index 9afccd76446b6b..2bddb0924a8c02 100644
--- a/arch/arm/configs/lpc32xx_defconfig
+++ b/arch/arm/configs/lpc32xx_defconfig
@@ -177,7 +177,6 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_CRYPTO_ANSI_CPRNG=y
# CONFIG_CRYPTO_HW is not set
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
diff --git a/arch/arm/configs/mmp2_defconfig b/arch/arm/configs/mmp2_defconfig
index f67e9cda73e24f..a9a212abfd69ad 100644
--- a/arch/arm/configs/mmp2_defconfig
+++ b/arch/arm/configs/mmp2_defconfig
@@ -78,4 +78,3 @@ CONFIG_DEBUG_USER=y
CONFIG_DEBUG_LL=y
CONFIG_DEBUG_MMP_UART3=y
CONFIG_EARLY_PRINTK=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/mv78xx0_defconfig b/arch/arm/configs/mv78xx0_defconfig
index 55f4ab67a30681..d3a26efe766c4c 100644
--- a/arch/arm/configs/mv78xx0_defconfig
+++ b/arch/arm/configs/mv78xx0_defconfig
@@ -121,4 +121,3 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_SCHEDSTATS=y
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_LL=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 24c54bf1e2433e..dee820474f444e 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -220,7 +220,6 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig
index c28426250ec3fc..002c9145026bc2 100644
--- a/arch/arm/configs/orion5x_defconfig
+++ b/arch/arm/configs/orion5x_defconfig
@@ -145,4 +145,3 @@ CONFIG_LATENCYTOP=y
# CONFIG_FTRACE is not set
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_LL=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/pxa168_defconfig b/arch/arm/configs/pxa168_defconfig
index 4748c7d33cb8a9..8cbca84fe33a3c 100644
--- a/arch/arm/configs/pxa168_defconfig
+++ b/arch/arm/configs/pxa168_defconfig
@@ -48,4 +48,3 @@ CONFIG_MAGIC_SYSRQ=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_LL=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/pxa3xx_defconfig b/arch/arm/configs/pxa3xx_defconfig
index 381356faf38251..07d422f0ff348a 100644
--- a/arch/arm/configs/pxa3xx_defconfig
+++ b/arch/arm/configs/pxa3xx_defconfig
@@ -106,5 +106,4 @@ CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_FTRACE is not set
CONFIG_DEBUG_USER=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/configs/pxa910_defconfig b/arch/arm/configs/pxa910_defconfig
index 49b59c600ae1c2..71ed0d73f8a94d 100644
--- a/arch/arm/configs/pxa910_defconfig
+++ b/arch/arm/configs/pxa910_defconfig
@@ -59,4 +59,3 @@ CONFIG_DEBUG_USER=y
CONFIG_DEBUG_LL=y
CONFIG_DEBUG_MMP_UART2=y
CONFIG_EARLY_PRINTK=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index 395df2f9dc8ee0..c130af6d44d48e 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -228,7 +228,6 @@ CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 997fa7cd9de501..94d372e3b9bdae 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1784,7 +1784,6 @@ CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_GHASH_ARM64_CE=y
CONFIG_CRYPTO_SM3_ARM64_CE=m
diff --git a/arch/hexagon/configs/comet_defconfig b/arch/hexagon/configs/comet_defconfig
index 22d7f8ac58a346..83b7626550c8fe 100644
--- a/arch/hexagon/configs/comet_defconfig
+++ b/arch/hexagon/configs/comet_defconfig
@@ -69,7 +69,6 @@ CONFIG_INET=y
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_CRYPTO_MD5=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
CONFIG_FRAME_WARN=0
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/m68k/configs/amcore_defconfig b/arch/m68k/configs/amcore_defconfig
index 60767811e34a18..88832e9cd7cbca 100644
--- a/arch/m68k/configs/amcore_defconfig
+++ b/arch/m68k/configs/amcore_defconfig
@@ -86,5 +86,4 @@ CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_CRYPTO_ECHAINIV is not set
-CONFIG_CRYPTO_ANSI_CPRNG=y
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index e717099b77cc62..bfc1ee7c8158d6 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -590,7 +590,6 @@ CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ZSTD=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 6b0bab75203515..d9d1f3c4c70d78 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -547,7 +547,6 @@ CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ZSTD=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index bb59e3f06ed568..523205adccc89f 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -567,7 +567,6 @@ CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ZSTD=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index ed85517ff01c6a..7b0a4ef0b010f2 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -539,7 +539,6 @@ CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ZSTD=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 0217a698285618..089c5c394c625f 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -549,7 +549,6 @@ CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ZSTD=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index a9ba431a940816..5f2484c36733de 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -566,7 +566,6 @@ CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ZSTD=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index f0d0d120e14477..74f0a1f6d8717f 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -653,7 +653,6 @@ CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ZSTD=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index baf0fc7a66d945..4bee18c820e4cf 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -539,7 +539,6 @@ CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ZSTD=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 9cb732f0c6ce16..322c17e55c9aed 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -540,7 +540,6 @@ CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ZSTD=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 6b3295e4c5aef4..82f9baab8feaa8 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -556,7 +556,6 @@ CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ZSTD=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
diff --git a/arch/m68k/configs/stmark2_defconfig b/arch/m68k/configs/stmark2_defconfig
index f3268fed02fc1a..f9ecb1dcc060d8 100644
--- a/arch/m68k/configs/stmark2_defconfig
+++ b/arch/m68k/configs/stmark2_defconfig
@@ -84,7 +84,6 @@ CONFIG_FSCACHE=y
CONFIG_CRAMFS=y
CONFIG_SQUASHFS=y
CONFIG_ROMFS_FS=y
-CONFIG_CRYPTO_ANSI_CPRNG=y
# CONFIG_CRYPTO_HW is not set
CONFIG_PRINTK_TIME=y
# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 1f3a2fad6d2f66..f94ad226cb5b5a 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -537,7 +537,6 @@ CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ZSTD=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index d8ef36564d506a..a5ecfc505ab2df 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -537,7 +537,6 @@ CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ZSTD=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
CONFIG_CRYPTO_USER_API_HASH=m
diff --git a/arch/mips/configs/decstation_64_defconfig b/arch/mips/configs/decstation_64_defconfig
index 52a63dd7aac7ed..dad98c57529284 100644
--- a/arch/mips/configs/decstation_64_defconfig
+++ b/arch/mips/configs/decstation_64_defconfig
@@ -200,7 +200,6 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
index 59fb7ee5eeb0b2..4e1b51a4ad9000 100644
--- a/arch/mips/configs/decstation_defconfig
+++ b/arch/mips/configs/decstation_defconfig
@@ -195,7 +195,6 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/decstation_r4k_defconfig b/arch/mips/configs/decstation_r4k_defconfig
index 8be1cb433e95a2..4e550dffc23df0 100644
--- a/arch/mips/configs/decstation_r4k_defconfig
+++ b/arch/mips/configs/decstation_r4k_defconfig
@@ -195,7 +195,6 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 1df484ed632969..0713914b25b469 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -805,7 +805,6 @@ CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ZSTD=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index df89105dd52020..c064e0cacc9897 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -789,7 +789,6 @@ CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ZSTD=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_JITTERENTROPY_OSR=1
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
diff --git a/arch/sh/configs/ap325rxa_defconfig b/arch/sh/configs/ap325rxa_defconfig
index 48b2e97114f979..b62be0d067f0b4 100644
--- a/arch/sh/configs/ap325rxa_defconfig
+++ b/arch/sh/configs/ap325rxa_defconfig
@@ -97,4 +97,3 @@ CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/apsh4a3a_defconfig b/arch/sh/configs/apsh4a3a_defconfig
index 85db9ce42d1a9c..4ecd1ab398fe33 100644
--- a/arch/sh/configs/apsh4a3a_defconfig
+++ b/arch/sh/configs/apsh4a3a_defconfig
@@ -86,5 +86,4 @@ CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
# CONFIG_FTRACE is not set
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig
index e8b3b720578b00..8c6de64f65c8b3 100644
--- a/arch/sh/configs/apsh4ad0a_defconfig
+++ b/arch/sh/configs/apsh4ad0a_defconfig
@@ -116,4 +116,3 @@ CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_VM=y
CONFIG_DWARF_UNWINDER=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/dreamcast_defconfig b/arch/sh/configs/dreamcast_defconfig
index 0c9f2030bb7c9a..4573d5d64989b8 100644
--- a/arch/sh/configs/dreamcast_defconfig
+++ b/arch/sh/configs/dreamcast_defconfig
@@ -66,6 +66,5 @@ CONFIG_LOGO=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=y
diff --git a/arch/sh/configs/ecovec24_defconfig b/arch/sh/configs/ecovec24_defconfig
index fcca7cc5a75a5a..458115d83184cc 100644
--- a/arch/sh/configs/ecovec24_defconfig
+++ b/arch/sh/configs/ecovec24_defconfig
@@ -126,4 +126,3 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_DEBUG_FS=y
CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/edosk7760_defconfig b/arch/sh/configs/edosk7760_defconfig
index 98f4611ba553e3..abeae220606a3a 100644
--- a/arch/sh/configs/edosk7760_defconfig
+++ b/arch/sh/configs/edosk7760_defconfig
@@ -110,4 +110,3 @@ CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_CRYPTO=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/espt_defconfig b/arch/sh/configs/espt_defconfig
index e5d102cbff894e..3968fe75f6934d 100644
--- a/arch/sh/configs/espt_defconfig
+++ b/arch/sh/configs/espt_defconfig
@@ -108,4 +108,3 @@ CONFIG_NLS_KOI8_U=y
CONFIG_NLS_UTF8=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/hp6xx_defconfig b/arch/sh/configs/hp6xx_defconfig
index 3582af15ad8609..04a9fcb4342a09 100644
--- a/arch/sh/configs/hp6xx_defconfig
+++ b/arch/sh/configs/hp6xx_defconfig
@@ -54,5 +54,4 @@ CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/sh/configs/landisk_defconfig b/arch/sh/configs/landisk_defconfig
index 22177aa8f961da..4c39a23e6e0dda 100644
--- a/arch/sh/configs/landisk_defconfig
+++ b/arch/sh/configs/landisk_defconfig
@@ -109,4 +109,3 @@ CONFIG_SMB_FS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_932=y
CONFIG_SH_STANDARD_BIOS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/lboxre2_defconfig b/arch/sh/configs/lboxre2_defconfig
index ff992301622ba1..9f53b514a8e7e9 100644
--- a/arch/sh/configs/lboxre2_defconfig
+++ b/arch/sh/configs/lboxre2_defconfig
@@ -56,4 +56,3 @@ CONFIG_TMPFS=y
CONFIG_ROMFS_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_SH_STANDARD_BIOS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig
index 31dbd8888aaa97..7cdaa909ffd619 100644
--- a/arch/sh/configs/migor_defconfig
+++ b/arch/sh/configs/migor_defconfig
@@ -87,5 +87,4 @@ CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_DEBUG_FS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig
index 58b792dacfece8..af954f75444b14 100644
--- a/arch/sh/configs/r7780mp_defconfig
+++ b/arch/sh/configs/r7780mp_defconfig
@@ -103,4 +103,3 @@ CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig
index 7edf18451158d0..a66dd6d74cf12f 100644
--- a/arch/sh/configs/r7785rp_defconfig
+++ b/arch/sh/configs/r7785rp_defconfig
@@ -101,4 +101,3 @@ CONFIG_4KSTACKS=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/rts7751r2d1_defconfig b/arch/sh/configs/rts7751r2d1_defconfig
index 69568cc4039608..0c54ab2b06e6fe 100644
--- a/arch/sh/configs/rts7751r2d1_defconfig
+++ b/arch/sh/configs/rts7751r2d1_defconfig
@@ -86,4 +86,3 @@ CONFIG_TMPFS=y
CONFIG_MINIX_FS=y
CONFIG_NLS_CODEPAGE_932=y
CONFIG_DEBUG_FS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/rts7751r2dplus_defconfig b/arch/sh/configs/rts7751r2dplus_defconfig
index ecb4bdb5bb58fa..3173b616b2cba6 100644
--- a/arch/sh/configs/rts7751r2dplus_defconfig
+++ b/arch/sh/configs/rts7751r2dplus_defconfig
@@ -91,4 +91,3 @@ CONFIG_TMPFS=y
CONFIG_MINIX_FS=y
CONFIG_NLS_CODEPAGE_932=y
CONFIG_DEBUG_FS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/sdk7780_defconfig b/arch/sh/configs/sdk7780_defconfig
index 311817161afb80..57ac7af157b857 100644
--- a/arch/sh/configs/sdk7780_defconfig
+++ b/arch/sh/configs/sdk7780_defconfig
@@ -134,4 +134,3 @@ CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_SH_STANDARD_BIOS=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index 2433aa5f44a861..d6fa3a422e24d5 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -211,4 +211,3 @@ CONFIG_FUNCTION_TRACER=y
CONFIG_DMA_API_DEBUG=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DWARF_UNWINDER=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
index 64f9308ee5865d..d67158f69c52ab 100644
--- a/arch/sh/configs/se7206_defconfig
+++ b/arch/sh/configs/se7206_defconfig
@@ -99,5 +99,4 @@ CONFIG_FRAME_POINTER=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/sh/configs/se7343_defconfig b/arch/sh/configs/se7343_defconfig
index b0baa5771c26a8..2d4d1f974f14f8 100644
--- a/arch/sh/configs/se7343_defconfig
+++ b/arch/sh/configs/se7343_defconfig
@@ -91,4 +91,3 @@ CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_NFSD=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/se7705_defconfig b/arch/sh/configs/se7705_defconfig
index 1752ddc2694a43..ad55a4d9d57dce 100644
--- a/arch/sh/configs/se7705_defconfig
+++ b/arch/sh/configs/se7705_defconfig
@@ -51,4 +51,3 @@ CONFIG_PROC_KCORE=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig
index 1078c286a610cd..dee1d88f6a7d75 100644
--- a/arch/sh/configs/se7712_defconfig
+++ b/arch/sh/configs/se7712_defconfig
@@ -94,4 +94,3 @@ CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_FRAME_POINTER=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig
index edb9e0d2dce5b7..a4551cadaf0ba1 100644
--- a/arch/sh/configs/se7721_defconfig
+++ b/arch/sh/configs/se7721_defconfig
@@ -120,4 +120,3 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_FRAME_POINTER=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/se7722_defconfig b/arch/sh/configs/se7722_defconfig
index 33daa0a17a3265..85b8eb013b79fe 100644
--- a/arch/sh/configs/se7722_defconfig
+++ b/arch/sh/configs/se7722_defconfig
@@ -53,4 +53,3 @@ CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_SH_STANDARD_BIOS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/se7724_defconfig b/arch/sh/configs/se7724_defconfig
index d572655f842d63..9e3a54936f76f1 100644
--- a/arch/sh/configs/se7724_defconfig
+++ b/arch/sh/configs/se7724_defconfig
@@ -126,4 +126,3 @@ CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/se7750_defconfig b/arch/sh/configs/se7750_defconfig
index a1e25d7de8a608..83ae513e546290 100644
--- a/arch/sh/configs/se7750_defconfig
+++ b/arch/sh/configs/se7750_defconfig
@@ -52,4 +52,3 @@ CONFIG_ROOT_NFS=y
CONFIG_PARTITION_ADVANCED=y
# CONFIG_MSDOS_PARTITION is not set
# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/se7751_defconfig b/arch/sh/configs/se7751_defconfig
index 8b5fe4ec16bc28..8f5ddab3c10677 100644
--- a/arch/sh/configs/se7751_defconfig
+++ b/arch/sh/configs/se7751_defconfig
@@ -42,4 +42,3 @@ CONFIG_EXT2_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/se7780_defconfig b/arch/sh/configs/se7780_defconfig
index 21303304eda28b..12463b76612062 100644
--- a/arch/sh/configs/se7780_defconfig
+++ b/arch/sh/configs/se7780_defconfig
@@ -102,4 +102,3 @@ CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_DEBUG_FS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig
index 3d194d81c92be7..9036431646f805 100644
--- a/arch/sh/configs/sh03_defconfig
+++ b/arch/sh/configs/sh03_defconfig
@@ -118,6 +118,5 @@ CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_DEFLATE=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=y
diff --git a/arch/sh/configs/sh2007_defconfig b/arch/sh/configs/sh2007_defconfig
index 889daa5d2faab4..e32d2ce72699fe 100644
--- a/arch/sh/configs/sh2007_defconfig
+++ b/arch/sh/configs/sh2007_defconfig
@@ -191,5 +191,4 @@ CONFIG_CRYPTO_TEA=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/sh/configs/sh7710voipgw_defconfig b/arch/sh/configs/sh7710voipgw_defconfig
index 5b151bb2bc43b7..c8625ba5be79ac 100644
--- a/arch/sh/configs/sh7710voipgw_defconfig
+++ b/arch/sh/configs/sh7710voipgw_defconfig
@@ -51,4 +51,3 @@ CONFIG_THERMAL=y
# CONFIG_DNOTIFY is not set
CONFIG_JFFS2_FS=y
CONFIG_DEBUG_FS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/sh7757lcr_defconfig b/arch/sh/configs/sh7757lcr_defconfig
index 25e9d22779b3dc..93066ae429ecce 100644
--- a/arch/sh/configs/sh7757lcr_defconfig
+++ b/arch/sh/configs/sh7757lcr_defconfig
@@ -81,4 +81,3 @@ CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
# CONFIG_FTRACE is not set
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/sh7763rdp_defconfig b/arch/sh/configs/sh7763rdp_defconfig
index e7b72ff377a80f..5123eed9bbe0cc 100644
--- a/arch/sh/configs/sh7763rdp_defconfig
+++ b/arch/sh/configs/sh7763rdp_defconfig
@@ -110,4 +110,3 @@ CONFIG_NLS_KOI8_U=y
CONFIG_NLS_UTF8=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig
index 17d2471d8e515f..eb63aa61b0465c 100644
--- a/arch/sh/configs/sh7785lcr_32bit_defconfig
+++ b/arch/sh/configs/sh7785lcr_32bit_defconfig
@@ -144,5 +144,4 @@ CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_LATENCYTOP=y
# CONFIG_FTRACE is not set
CONFIG_CRYPTO_HMAC=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/sh/configs/sh7785lcr_defconfig b/arch/sh/configs/sh7785lcr_defconfig
index 34c8fe755addac..2fcf50d8c820cb 100644
--- a/arch/sh/configs/sh7785lcr_defconfig
+++ b/arch/sh/configs/sh7785lcr_defconfig
@@ -112,5 +112,4 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_CRYPTO_HMAC=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/sh/configs/shmin_defconfig b/arch/sh/configs/shmin_defconfig
index bfeb004f130ecb..11ac2e66ec7e53 100644
--- a/arch/sh/configs/shmin_defconfig
+++ b/arch/sh/configs/shmin_defconfig
@@ -49,4 +49,3 @@ CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_SH_STANDARD_BIOS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/shx3_defconfig b/arch/sh/configs/shx3_defconfig
index 52e7a42d66c724..0cb69a0b92cbc9 100644
--- a/arch/sh/configs/shx3_defconfig
+++ b/arch/sh/configs/shx3_defconfig
@@ -97,4 +97,3 @@ CONFIG_DEBUG_VM=y
CONFIG_FRAME_POINTER=y
CONFIG_SH_STANDARD_BIOS=y
CONFIG_DEBUG_STACK_USAGE=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig
index 2c474645ec3619..896e980d04e149 100644
--- a/arch/sh/configs/titan_defconfig
+++ b/arch/sh/configs/titan_defconfig
@@ -261,4 +261,3 @@ CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/ul2_defconfig b/arch/sh/configs/ul2_defconfig
index b0c2ba4783530e..0d1c858754dbbc 100644
--- a/arch/sh/configs/ul2_defconfig
+++ b/arch/sh/configs/ul2_defconfig
@@ -80,4 +80,3 @@ CONFIG_NLS_CODEPAGE_932=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_CRYPTO_MICHAEL_MIC=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/urquell_defconfig b/arch/sh/configs/urquell_defconfig
index e6d807f52253e9..f51ff6b1ec38f4 100644
--- a/arch/sh/configs/urquell_defconfig
+++ b/arch/sh/configs/urquell_defconfig
@@ -142,5 +142,4 @@ CONFIG_FRAME_POINTER=y
# CONFIG_FTRACE is not set
# CONFIG_DUMP_CODE is not set
CONFIG_CRYPTO_HMAC=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig
index f6341b063b01be..e021ecfb5a7717 100644
--- a/arch/sparc/configs/sparc32_defconfig
+++ b/arch/sparc/configs/sparc32_defconfig
@@ -92,5 +92,4 @@ CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index 127940aafc3951..9f3f41246ae6da 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -227,7 +227,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_VCC=m
CONFIG_PATA_CMD64X=y
CONFIG_IP_PNP=y
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index bb6e2c47ffc612..48405e02d6e466 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -693,8 +693,7 @@ ctr_crypt(struct skcipher_request *req,
* operation into two at the point where the overflow
* will occur. After the first part, add the carry bit.
*/
- p1_nbytes = min_t(unsigned int, nbytes,
- (nblocks - ctr64) * AES_BLOCK_SIZE);
+ p1_nbytes = min(nbytes, (nblocks - ctr64) * AES_BLOCK_SIZE);
(*ctr64_func)(key, walk.src.virt.addr,
walk.dst.virt.addr, p1_nbytes, le_ctr);
le_ctr[0] = 0;
diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig
index dc942bbac69ff8..4655d6bd7ee079 100644
--- a/arch/xtensa/configs/audio_kc705_defconfig
+++ b/arch/xtensa/configs/audio_kc705_defconfig
@@ -133,4 +133,3 @@ CONFIG_STACKTRACE=y
CONFIG_RCU_TRACE=y
# CONFIG_FTRACE is not set
# CONFIG_S32C1I_SELFTEST is not set
-CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
index 3ee7e1c5655637..b3de2bd74d545e 100644
--- a/arch/xtensa/configs/generic_kc705_defconfig
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -121,4 +121,3 @@ CONFIG_RCU_TRACE=y
# CONFIG_FTRACE is not set
CONFIG_LD_NO_RELAX=y
# CONFIG_S32C1I_SELFTEST is not set
-CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index 32ce8fb068f066..324266824faeff 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -28,4 +28,3 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
# CONFIG_FRAME_POINTER is not set
CONFIG_DETECT_HUNG_TASK=y
-CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig
index c6e96f0aa700c2..47dd80930e9066 100644
--- a/arch/xtensa/configs/nommu_kc705_defconfig
+++ b/arch/xtensa/configs/nommu_kc705_defconfig
@@ -122,4 +122,3 @@ CONFIG_RCU_TRACE=y
# CONFIG_FTRACE is not set
# CONFIG_LD_NO_RELAX is not set
# CONFIG_CRYPTO_ECHAINIV is not set
-CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index 373d42b9e510b2..c8c92c5b24d5de 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -125,4 +125,3 @@ CONFIG_RCU_TRACE=y
# CONFIG_FTRACE is not set
CONFIG_LD_NO_RELAX=y
# CONFIG_S32C1I_SELFTEST is not set
-CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/configs/virt_defconfig b/arch/xtensa/configs/virt_defconfig
index 72628d31e87a7c..982dd67ed1747e 100644
--- a/arch/xtensa/configs/virt_defconfig
+++ b/arch/xtensa/configs/virt_defconfig
@@ -92,7 +92,6 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
-CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_FONTS=y
CONFIG_PRINTK_TIME=y
diff --git a/arch/xtensa/configs/xip_kc705_defconfig b/arch/xtensa/configs/xip_kc705_defconfig
index 5d6013ea70fc75..3e9486222a66c3 100644
--- a/arch/xtensa/configs/xip_kc705_defconfig
+++ b/arch/xtensa/configs/xip_kc705_defconfig
@@ -98,7 +98,6 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
-CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/crypto/Kconfig b/crypto/Kconfig
index bf8b8a60a0c0e4..2e5b195b1b063e 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -25,7 +25,7 @@ menu "Crypto core or helper"
config CRYPTO_FIPS
bool "FIPS 200 compliance"
- depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && CRYPTO_SELFTESTS
+ depends on CRYPTO_DRBG && CRYPTO_SELFTESTS
depends on (MODULE_SIG || !MODULES)
help
This option enables the fips boot option which is
@@ -1161,17 +1161,6 @@ endmenu
menu "Random number generation"
-config CRYPTO_ANSI_CPRNG
- tristate "ANSI PRNG (Pseudo Random Number Generator)"
- select CRYPTO_AES
- select CRYPTO_RNG
- help
- Pseudo RNG (random number generator) (ANSI X9.31 Appendix A.2.4)
-
- This uses the AES cipher algorithm.
-
- Note that this option must be enabled if CRYPTO_FIPS is selected
-
menuconfig CRYPTO_DRBG_MENU
tristate "NIST SP800-90A DRBG (Deterministic Random Bit Generator)"
help
@@ -1197,8 +1186,7 @@ config CRYPTO_DRBG_HASH
config CRYPTO_DRBG_CTR
bool "CTR_DRBG"
- select CRYPTO_AES
- select CRYPTO_CTR
+ select CRYPTO_DF80090A
help
CTR_DRBG variant as defined in NIST SP800-90A.
@@ -1334,6 +1322,11 @@ config CRYPTO_KDF800108_CTR
select CRYPTO_HMAC
select CRYPTO_SHA256
+config CRYPTO_DF80090A
+ tristate
+ select CRYPTO_AES
+ select CRYPTO_CTR
+
endmenu
menu "Userspace interface"
diff --git a/crypto/Makefile b/crypto/Makefile
index 093c56a45d3f1b..16a35649dd9126 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -162,7 +162,6 @@ obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
obj-$(CONFIG_CRYPTO_XXHASH) += xxhash_generic.o
obj-$(CONFIG_CRYPTO_842) += 842.o
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
-obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
obj-$(CONFIG_CRYPTO_DRBG) += drbg.o
obj-$(CONFIG_CRYPTO_JITTERENTROPY) += jitterentropy_rng.o
CFLAGS_jitterentropy.o = -O0
@@ -207,4 +206,6 @@ obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o
#
obj-$(CONFIG_CRYPTO_KDF800108_CTR) += kdf_sp800108.o
+obj-$(CONFIG_CRYPTO_DF80090A) += df_sp80090a.o
+
obj-$(CONFIG_CRYPTO_KRB5) += krb5/
diff --git a/crypto/aead.c b/crypto/aead.c
index 5d14b775036eeb..08d44c5e5c3366 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -120,6 +120,7 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
struct aead_alg *alg = crypto_aead_alg(aead);
crypto_aead_set_flags(aead, CRYPTO_TFM_NEED_KEY);
+ crypto_aead_set_reqsize(aead, crypto_tfm_alg_reqsize(tfm));
aead->authsize = alg->maxauthsize;
@@ -204,6 +205,25 @@ struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_alloc_aead);
+struct crypto_sync_aead *crypto_alloc_sync_aead(const char *alg_name, u32 type, u32 mask)
+{
+ struct crypto_aead *tfm;
+
+ /* Only sync algorithms are allowed. */
+ mask |= CRYPTO_ALG_ASYNC;
+ type &= ~(CRYPTO_ALG_ASYNC);
+
+ tfm = crypto_alloc_tfm(alg_name, &crypto_aead_type, type, mask);
+
+ if (!IS_ERR(tfm) && WARN_ON(crypto_aead_reqsize(tfm) > MAX_SYNC_AEAD_REQSIZE)) {
+ crypto_free_aead(tfm);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return (struct crypto_sync_aead *)tfm;
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_sync_aead);
+
int crypto_has_aead(const char *alg_name, u32 type, u32 mask)
{
return crypto_type_has_alg(alg_name, &crypto_aead_type, type, mask);
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index ca6fdcc6c54aca..6c271e55f44d94 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -1212,15 +1212,14 @@ struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
if (unlikely(!areq))
return ERR_PTR(-ENOMEM);
+ memset(areq, 0, areqlen);
+
ctx->inflight = true;
areq->areqlen = areqlen;
areq->sk = sk;
areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl;
- areq->last_rsgl = NULL;
INIT_LIST_HEAD(&areq->rsgl_list);
- areq->tsgl = NULL;
- areq->tsgl_entries = 0;
return areq;
}
diff --git a/crypto/ahash.c b/crypto/ahash.c
index dfb4f5476428fc..66492ae75fcfb6 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -423,7 +423,11 @@ static int ahash_update_finish(struct ahash_request *req, int err)
req->nbytes += nonzero - blen;
- blen = err < 0 ? 0 : err + nonzero;
+ blen = 0;
+ if (err >= 0) {
+ blen = err + nonzero;
+ err = 0;
+ }
if (ahash_request_isvirt(req))
memcpy(buf, req->svirt + req->nbytes - blen, blen);
else
@@ -661,6 +665,12 @@ int crypto_ahash_import_core(struct ahash_request *req, const void *in)
in);
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
+ if (crypto_ahash_block_only(tfm)) {
+ unsigned int reqsize = crypto_ahash_reqsize(tfm);
+ u8 *buf = ahash_request_ctx(req);
+
+ buf[reqsize - 1] = 0;
+ }
return crypto_ahash_alg(tfm)->import_core(req, in);
}
EXPORT_SYMBOL_GPL(crypto_ahash_import_core);
@@ -674,10 +684,14 @@ int crypto_ahash_import(struct ahash_request *req, const void *in)
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
if (crypto_ahash_block_only(tfm)) {
+ unsigned int plen = crypto_ahash_blocksize(tfm) + 1;
unsigned int reqsize = crypto_ahash_reqsize(tfm);
+ unsigned int ss = crypto_ahash_statesize(tfm);
u8 *buf = ahash_request_ctx(req);
- buf[reqsize - 1] = 0;
+ memcpy(buf + reqsize - plen, in + ss - plen, plen);
+ if (buf[reqsize - 1] >= plen)
+ return -EOVERFLOW;
}
return crypto_ahash_alg(tfm)->import(req, in);
}
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index e3f1a4852737b0..4d3dfc60a16a6d 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -416,9 +416,8 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
if (!ctx)
return -ENOMEM;
- ctx->result = NULL;
+ memset(ctx, 0, len);
ctx->len = len;
- ctx->more = false;
crypto_init_wait(&ctx->wait);
ask->private = ctx;
diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c
index 10c41adac3b1f3..1a86e40c8372e5 100644
--- a/crypto/algif_rng.c
+++ b/crypto/algif_rng.c
@@ -248,9 +248,8 @@ static int rng_accept_parent(void *private, struct sock *sk)
if (!ctx)
return -ENOMEM;
+ memset(ctx, 0, len);
ctx->len = len;
- ctx->addtl = NULL;
- ctx->addtl_len = 0;
/*
* No seeding done at that point -- if multiple accepts are
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
deleted file mode 100644
index 153523ce607646..00000000000000
--- a/crypto/ansi_cprng.c
+++ /dev/null
@@ -1,474 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * PRNG: Pseudo Random Number Generator
- * Based on NIST Recommended PRNG From ANSI X9.31 Appendix A.2.4 using
- * AES 128 cipher
- *
- * (C) Neil Horman <nhorman@tuxdriver.com>
- */
-
-#include <crypto/internal/cipher.h>
-#include <crypto/internal/rng.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/string.h>
-
-#define DEFAULT_PRNG_KEY "0123456789abcdef"
-#define DEFAULT_PRNG_KSZ 16
-#define DEFAULT_BLK_SZ 16
-#define DEFAULT_V_SEED "zaybxcwdveuftgsh"
-
-/*
- * Flags for the prng_context flags field
- */
-
-#define PRNG_FIXED_SIZE 0x1
-#define PRNG_NEED_RESET 0x2
-
-/*
- * Note: DT is our counter value
- * I is our intermediate value
- * V is our seed vector
- * See http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf
- * for implementation details
- */
-
-
-struct prng_context {
- spinlock_t prng_lock;
- unsigned char rand_data[DEFAULT_BLK_SZ];
- unsigned char last_rand_data[DEFAULT_BLK_SZ];
- unsigned char DT[DEFAULT_BLK_SZ];
- unsigned char I[DEFAULT_BLK_SZ];
- unsigned char V[DEFAULT_BLK_SZ];
- u32 rand_data_valid;
- struct crypto_cipher *tfm;
- u32 flags;
-};
-
-static int dbg;
-
-static void hexdump(char *note, unsigned char *buf, unsigned int len)
-{
- if (dbg) {
- printk(KERN_CRIT "%s", note);
- print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
- 16, 1,
- buf, len, false);
- }
-}
-
-#define dbgprint(format, args...) do {\
-if (dbg)\
- printk(format, ##args);\
-} while (0)
-
-static void xor_vectors(unsigned char *in1, unsigned char *in2,
- unsigned char *out, unsigned int size)
-{
- int i;
-
- for (i = 0; i < size; i++)
- out[i] = in1[i] ^ in2[i];
-
-}
-/*
- * Returns DEFAULT_BLK_SZ bytes of random data per call
- * returns 0 if generation succeeded, <0 if something went wrong
- */
-static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test)
-{
- int i;
- unsigned char tmp[DEFAULT_BLK_SZ];
- unsigned char *output = NULL;
-
-
- dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n",
- ctx);
-
- hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ);
- hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ);
- hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ);
-
- /*
- * This algorithm is a 3 stage state machine
- */
- for (i = 0; i < 3; i++) {
-
- switch (i) {
- case 0:
- /*
- * Start by encrypting the counter value
- * This gives us an intermediate value I
- */
- memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ);
- output = ctx->I;
- hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ);
- break;
- case 1:
-
- /*
- * Next xor I with our secret vector V
- * encrypt that result to obtain our
- * pseudo random data which we output
- */
- xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ);
- hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ);
- output = ctx->rand_data;
- break;
- case 2:
- /*
- * First check that we didn't produce the same
- * random data that we did last time around through this
- */
- if (!memcmp(ctx->rand_data, ctx->last_rand_data,
- DEFAULT_BLK_SZ)) {
- if (cont_test) {
- panic("cprng %p Failed repetition check!\n",
- ctx);
- }
-
- printk(KERN_ERR
- "ctx %p Failed repetition check!\n",
- ctx);
-
- ctx->flags |= PRNG_NEED_RESET;
- return -EINVAL;
- }
- memcpy(ctx->last_rand_data, ctx->rand_data,
- DEFAULT_BLK_SZ);
-
- /*
- * Lastly xor the random data with I
- * and encrypt that to obtain a new secret vector V
- */
- xor_vectors(ctx->rand_data, ctx->I, tmp,
- DEFAULT_BLK_SZ);
- output = ctx->V;
- hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ);
- break;
- }
-
-
- /* do the encryption */
- crypto_cipher_encrypt_one(ctx->tfm, output, tmp);
-
- }
-
- /*
- * Now update our DT value
- */
- for (i = DEFAULT_BLK_SZ - 1; i >= 0; i--) {
- ctx->DT[i] += 1;
- if (ctx->DT[i] != 0)
- break;
- }
-
- dbgprint("Returning new block for context %p\n", ctx);
- ctx->rand_data_valid = 0;
-
- hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ);
- hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ);
- hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ);
- hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ);
-
- return 0;
-}
-
-/* Our exported functions */
-static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx,
- int do_cont_test)
-{
- unsigned char *ptr = buf;
- unsigned int byte_count = (unsigned int)nbytes;
- int err;
-
-
- spin_lock_bh(&ctx->prng_lock);
-
- err = -EINVAL;
- if (ctx->flags & PRNG_NEED_RESET)
- goto done;
-
- /*
- * If the FIXED_SIZE flag is on, only return whole blocks of
- * pseudo random data
- */
- err = -EINVAL;
- if (ctx->flags & PRNG_FIXED_SIZE) {
- if (nbytes < DEFAULT_BLK_SZ)
- goto done;
- byte_count = DEFAULT_BLK_SZ;
- }
-
- /*
- * Return 0 in case of success as mandated by the kernel
- * crypto API interface definition.
- */
- err = 0;
-
- dbgprint(KERN_CRIT "getting %d random bytes for context %p\n",
- byte_count, ctx);
-
-
-remainder:
- if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
- if (_get_more_prng_bytes(ctx, do_cont_test) < 0) {
- memset(buf, 0, nbytes);
- err = -EINVAL;
- goto done;
- }
- }
-
- /*
- * Copy any data less than an entire block
- */
- if (byte_count < DEFAULT_BLK_SZ) {
-empty_rbuf:
- while (ctx->rand_data_valid < DEFAULT_BLK_SZ) {
- *ptr = ctx->rand_data[ctx->rand_data_valid];
- ptr++;
- byte_count--;
- ctx->rand_data_valid++;
- if (byte_count == 0)
- goto done;
- }
- }
-
- /*
- * Now copy whole blocks
- */
- for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) {
- if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
- if (_get_more_prng_bytes(ctx, do_cont_test) < 0) {
- memset(buf, 0, nbytes);
- err = -EINVAL;
- goto done;
- }
- }
- if (ctx->rand_data_valid > 0)
- goto empty_rbuf;
- memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ);
- ctx->rand_data_valid += DEFAULT_BLK_SZ;
- ptr += DEFAULT_BLK_SZ;
- }
-
- /*
- * Now go back and get any remaining partial block
- */
- if (byte_count)
- goto remainder;
-
-done:
- spin_unlock_bh(&ctx->prng_lock);
- dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",
- err, ctx);
- return err;
-}
-
-static void free_prng_context(struct prng_context *ctx)
-{
- crypto_free_cipher(ctx->tfm);
-}
-
-static int reset_prng_context(struct prng_context *ctx,
- const unsigned char *key, size_t klen,
- const unsigned char *V, const unsigned char *DT)
-{
- int ret;
- const unsigned char *prng_key;
-
- spin_lock_bh(&ctx->prng_lock);
- ctx->flags |= PRNG_NEED_RESET;
-
- prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY;
-
- if (!key)
- klen = DEFAULT_PRNG_KSZ;
-
- if (V)
- memcpy(ctx->V, V, DEFAULT_BLK_SZ);
- else
- memcpy(ctx->V, DEFAULT_V_SEED, DEFAULT_BLK_SZ);
-
- if (DT)
- memcpy(ctx->DT, DT, DEFAULT_BLK_SZ);
- else
- memset(ctx->DT, 0, DEFAULT_BLK_SZ);
-
- memset(ctx->rand_data, 0, DEFAULT_BLK_SZ);
- memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ);
-
- ctx->rand_data_valid = DEFAULT_BLK_SZ;
-
- ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen);
- if (ret) {
- dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
- crypto_cipher_get_flags(ctx->tfm));
- goto out;
- }
-
- ret = 0;
- ctx->flags &= ~PRNG_NEED_RESET;
-out:
- spin_unlock_bh(&ctx->prng_lock);
- return ret;
-}
-
-static int cprng_init(struct crypto_tfm *tfm)
-{
- struct prng_context *ctx = crypto_tfm_ctx(tfm);
-
- spin_lock_init(&ctx->prng_lock);
- ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->tfm)) {
- dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
- ctx);
- return PTR_ERR(ctx->tfm);
- }
-
- if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0)
- return -EINVAL;
-
- /*
- * after allocation, we should always force the user to reset
- * so they don't inadvertently use the insecure default values
- * without specifying them intentially
- */
- ctx->flags |= PRNG_NEED_RESET;
- return 0;
-}
-
-static void cprng_exit(struct crypto_tfm *tfm)
-{
- free_prng_context(crypto_tfm_ctx(tfm));
-}
-
-static int cprng_get_random(struct crypto_rng *tfm,
- const u8 *src, unsigned int slen,
- u8 *rdata, unsigned int dlen)
-{
- struct prng_context *prng = crypto_rng_ctx(tfm);
-
- return get_prng_bytes(rdata, dlen, prng, 0);
-}
-
-/*
- * This is the cprng_registered reset method the seed value is
- * interpreted as the tuple { V KEY DT}
- * V and KEY are required during reset, and DT is optional, detected
- * as being present by testing the length of the seed
- */
-static int cprng_reset(struct crypto_rng *tfm,
- const u8 *seed, unsigned int slen)
-{
- struct prng_context *prng = crypto_rng_ctx(tfm);
- const u8 *key = seed + DEFAULT_BLK_SZ;
- const u8 *dt = NULL;
-
- if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ)
- return -EINVAL;
-
- if (slen >= (2 * DEFAULT_BLK_SZ + DEFAULT_PRNG_KSZ))
- dt = key + DEFAULT_PRNG_KSZ;
-
- reset_prng_context(prng, key, DEFAULT_PRNG_KSZ, seed, dt);
-
- if (prng->flags & PRNG_NEED_RESET)
- return -EINVAL;
- return 0;
-}
-
-#ifdef CONFIG_CRYPTO_FIPS
-static int fips_cprng_get_random(struct crypto_rng *tfm,
- const u8 *src, unsigned int slen,
- u8 *rdata, unsigned int dlen)
-{
- struct prng_context *prng = crypto_rng_ctx(tfm);
-
- return get_prng_bytes(rdata, dlen, prng, 1);
-}
-
-static int fips_cprng_reset(struct crypto_rng *tfm,
- const u8 *seed, unsigned int slen)
-{
- u8 rdata[DEFAULT_BLK_SZ];
- const u8 *key = seed + DEFAULT_BLK_SZ;
- int rc;
-
- struct prng_context *prng = crypto_rng_ctx(tfm);
-
- if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ)
- return -EINVAL;
-
- /* fips strictly requires seed != key */
- if (!memcmp(seed, key, DEFAULT_PRNG_KSZ))
- return -EINVAL;
-
- rc = cprng_reset(tfm, seed, slen);
-
- if (!rc)
- goto out;
-
- /* this primes our continuity test */
- rc = get_prng_bytes(rdata, DEFAULT_BLK_SZ, prng, 0);
- prng->rand_data_valid = DEFAULT_BLK_SZ;
-
-out:
- return rc;
-}
-#endif
-
-static struct rng_alg rng_algs[] = { {
- .generate = cprng_get_random,
- .seed = cprng_reset,
- .seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ,
- .base = {
- .cra_name = "stdrng",
- .cra_driver_name = "ansi_cprng",
- .cra_priority = 100,
- .cra_ctxsize = sizeof(struct prng_context),
- .cra_module = THIS_MODULE,
- .cra_init = cprng_init,
- .cra_exit = cprng_exit,
- }
-#ifdef CONFIG_CRYPTO_FIPS
-}, {
- .generate = fips_cprng_get_random,
- .seed = fips_cprng_reset,
- .seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ,
- .base = {
- .cra_name = "fips(ansi_cprng)",
- .cra_driver_name = "fips_ansi_cprng",
- .cra_priority = 300,
- .cra_ctxsize = sizeof(struct prng_context),
- .cra_module = THIS_MODULE,
- .cra_init = cprng_init,
- .cra_exit = cprng_exit,
- }
-#endif
-} };
-
-/* Module initalization */
-static int __init prng_mod_init(void)
-{
- return crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs));
-}
-
-static void __exit prng_mod_fini(void)
-{
- crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs));
-}
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
-MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
-module_param(dbg, int, 0);
-MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
-module_init(prng_mod_init);
-module_exit(prng_mod_fini);
-MODULE_ALIAS_CRYPTO("stdrng");
-MODULE_ALIAS_CRYPTO("ansi_cprng");
-MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index ba2d9d1ea235a7..348966ea2175c9 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -11,6 +11,7 @@
#include <crypto/public_key.h>
#include <linux/seq_file.h>
#include <linux/module.h>
+#include <linux/overflow.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <keys/system_keyring.h>
@@ -141,12 +142,17 @@ struct asymmetric_key_id *asymmetric_key_generate_id(const void *val_1,
size_t len_2)
{
struct asymmetric_key_id *kid;
-
- kid = kmalloc(sizeof(struct asymmetric_key_id) + len_1 + len_2,
- GFP_KERNEL);
+ size_t kid_sz;
+ size_t len;
+
+ if (check_add_overflow(len_1, len_2, &len))
+ return ERR_PTR(-EOVERFLOW);
+ if (check_add_overflow(sizeof(struct asymmetric_key_id), len, &kid_sz))
+ return ERR_PTR(-EOVERFLOW);
+ kid = kmalloc(kid_sz, GFP_KERNEL);
if (!kid)
return ERR_PTR(-ENOMEM);
- kid->len = len_1 + len_2;
+ kid->len = len;
memcpy(kid->data, val_1, len_1);
memcpy(kid->data + len_1, val_2, len_2);
return kid;
diff --git a/crypto/asymmetric_keys/restrict.c b/crypto/asymmetric_keys/restrict.c
index afcd4d101ac550..86292965f4930c 100644
--- a/crypto/asymmetric_keys/restrict.c
+++ b/crypto/asymmetric_keys/restrict.c
@@ -17,9 +17,12 @@ static struct asymmetric_key_id *ca_keyid;
#ifndef MODULE
static struct {
- struct asymmetric_key_id id;
- unsigned char data[10];
+ /* Must be last as it ends in a flexible-array member. */
+ TRAILING_OVERLAP(struct asymmetric_key_id, id, data,
+ unsigned char data[10];
+ );
} cakey;
+static_assert(offsetof(typeof(cakey), id.data) == offsetof(typeof(cakey), data));
static int __init ca_keys_setup(char *str)
{
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 8df3fa60a44f80..b37cae914987b6 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -60,7 +60,7 @@ EXPORT_SYMBOL_GPL(x509_free_certificate);
*/
struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
{
- struct x509_certificate *cert __free(x509_free_certificate);
+ struct x509_certificate *cert __free(x509_free_certificate) = NULL;
struct x509_parse_context *ctx __free(kfree) = NULL;
struct asymmetric_key_id *kid;
long ret;
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 8409d7d36cb4f3..12e3341e806b8d 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -148,7 +148,7 @@ not_self_signed:
*/
static int x509_key_preparse(struct key_preparsed_payload *prep)
{
- struct x509_certificate *cert __free(x509_free_certificate);
+ struct x509_certificate *cert __free(x509_free_certificate) = NULL;
struct asymmetric_key_ids *kids __free(kfree) = NULL;
char *p, *desc __free(kfree) = NULL;
const char *q;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index a723769c87779d..ac679ce2cb9533 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -37,7 +37,7 @@ struct authenc_request_ctx {
static void authenc_request_complete(struct aead_request *req, int err)
{
- if (err != -EINPROGRESS)
+ if (err != -EINPROGRESS && err != -EBUSY)
aead_request_complete(req, err);
}
@@ -107,27 +107,42 @@ out:
return err;
}
-static void authenc_geniv_ahash_done(void *data, int err)
+static void authenc_geniv_ahash_finish(struct aead_request *req)
{
- struct aead_request *req = data;
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
- if (err)
- goto out;
-
scatterwalk_map_and_copy(ahreq->result, req->dst,
req->assoclen + req->cryptlen,
crypto_aead_authsize(authenc), 1);
+}
-out:
+static void authenc_geniv_ahash_done(void *data, int err)
+{
+ struct aead_request *req = data;
+
+ if (!err)
+ authenc_geniv_ahash_finish(req);
aead_request_complete(req, err);
}
-static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
+/*
+ * Used when the ahash request was invoked in the async callback context
+ * of the previous skcipher request. Eat any EINPROGRESS notifications.
+ */
+static void authenc_geniv_ahash_done2(void *data, int err)
+{
+ struct aead_request *req = data;
+
+ if (!err)
+ authenc_geniv_ahash_finish(req);
+ authenc_request_complete(req, err);
+}
+
+static int crypto_authenc_genicv(struct aead_request *req, unsigned int mask)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
@@ -136,6 +151,7 @@ static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
struct crypto_ahash *auth = ctx->auth;
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+ unsigned int flags = aead_request_flags(req) & ~mask;
u8 *hash = areq_ctx->tail;
int err;
@@ -143,7 +159,8 @@ static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
ahash_request_set_crypt(ahreq, req->dst, hash,
req->assoclen + req->cryptlen);
ahash_request_set_callback(ahreq, flags,
- authenc_geniv_ahash_done, req);
+ mask ? authenc_geniv_ahash_done2 :
+ authenc_geniv_ahash_done, req);
err = crypto_ahash_digest(ahreq);
if (err)
@@ -159,12 +176,11 @@ static void crypto_authenc_encrypt_done(void *data, int err)
{
struct aead_request *areq = data;
- if (err)
- goto out;
-
- err = crypto_authenc_genicv(areq, 0);
-
-out:
+ if (err) {
+ aead_request_complete(areq, err);
+ return;
+ }
+ err = crypto_authenc_genicv(areq, CRYPTO_TFM_REQ_MAY_SLEEP);
authenc_request_complete(areq, err);
}
@@ -199,11 +215,18 @@ static int crypto_authenc_encrypt(struct aead_request *req)
if (err)
return err;
- return crypto_authenc_genicv(req, aead_request_flags(req));
+ return crypto_authenc_genicv(req, 0);
+}
+
+static void authenc_decrypt_tail_done(void *data, int err)
+{
+ struct aead_request *req = data;
+
+ authenc_request_complete(req, err);
}
static int crypto_authenc_decrypt_tail(struct aead_request *req,
- unsigned int flags)
+ unsigned int mask)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
@@ -214,6 +237,7 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
struct skcipher_request *skreq = (void *)(areq_ctx->tail +
ictx->reqoff);
unsigned int authsize = crypto_aead_authsize(authenc);
+ unsigned int flags = aead_request_flags(req) & ~mask;
u8 *ihash = ahreq->result + authsize;
struct scatterlist *src, *dst;
@@ -230,7 +254,9 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
skcipher_request_set_tfm(skreq, ctx->enc);
skcipher_request_set_callback(skreq, flags,
- req->base.complete, req->base.data);
+ mask ? authenc_decrypt_tail_done :
+ req->base.complete,
+ mask ? req : req->base.data);
skcipher_request_set_crypt(skreq, src, dst,
req->cryptlen - authsize, req->iv);
@@ -241,12 +267,11 @@ static void authenc_verify_ahash_done(void *data, int err)
{
struct aead_request *req = data;
- if (err)
- goto out;
-
- err = crypto_authenc_decrypt_tail(req, 0);
-
-out:
+ if (err) {
+ aead_request_complete(req, err);
+ return;
+ }
+ err = crypto_authenc_decrypt_tail(req, CRYPTO_TFM_REQ_MAY_SLEEP);
authenc_request_complete(req, err);
}
@@ -273,7 +298,7 @@ static int crypto_authenc_decrypt(struct aead_request *req)
if (err)
return err;
- return crypto_authenc_decrypt_tail(req, aead_request_flags(req));
+ return crypto_authenc_decrypt_tail(req, 0);
}
static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 21404515dc77ec..a3e1fff55661b7 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/overflow.h>
#include <linux/percpu.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
@@ -39,7 +40,7 @@ static void *deflate_alloc_stream(void)
DEFLATE_DEF_MEMLEVEL));
struct deflate_stream *ctx;
- ctx = kvmalloc(sizeof(*ctx) + size, GFP_KERNEL);
+ ctx = kvmalloc(struct_size(ctx, workspace, size), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
diff --git a/crypto/df_sp80090a.c b/crypto/df_sp80090a.c
new file mode 100644
index 00000000000000..dc63b31a93fc2d
--- /dev/null
+++ b/crypto/df_sp80090a.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * NIST SP800-90A DRBG derivation function
+ *
+ * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <crypto/aes.h>
+#include <crypto/df_sp80090a.h>
+#include <crypto/internal/drbg.h>
+
+static void drbg_kcapi_symsetkey(struct crypto_aes_ctx *aesctx,
+ const unsigned char *key,
+ u8 keylen);
+static void drbg_kcapi_symsetkey(struct crypto_aes_ctx *aesctx,
+ const unsigned char *key, u8 keylen)
+{
+ aes_expandkey(aesctx, key, keylen);
+}
+
+static void drbg_kcapi_sym(struct crypto_aes_ctx *aesctx,
+ unsigned char *outval,
+ const struct drbg_string *in, u8 blocklen_bytes)
+{
+ /* there is only component in *in */
+ BUG_ON(in->len < blocklen_bytes);
+ aes_encrypt(aesctx, outval, in->buf);
+}
+
+/* BCC function for CTR DRBG as defined in 10.4.3 */
+
+static void drbg_ctr_bcc(struct crypto_aes_ctx *aesctx,
+ unsigned char *out, const unsigned char *key,
+ struct list_head *in,
+ u8 blocklen_bytes,
+ u8 keylen)
+{
+ struct drbg_string *curr = NULL;
+ struct drbg_string data;
+ short cnt = 0;
+
+ drbg_string_fill(&data, out, blocklen_bytes);
+
+ /* 10.4.3 step 2 / 4 */
+ drbg_kcapi_symsetkey(aesctx, key, keylen);
+ list_for_each_entry(curr, in, list) {
+ const unsigned char *pos = curr->buf;
+ size_t len = curr->len;
+ /* 10.4.3 step 4.1 */
+ while (len) {
+ /* 10.4.3 step 4.2 */
+ if (blocklen_bytes == cnt) {
+ cnt = 0;
+ drbg_kcapi_sym(aesctx, out, &data, blocklen_bytes);
+ }
+ out[cnt] ^= *pos;
+ pos++;
+ cnt++;
+ len--;
+ }
+ }
+ /* 10.4.3 step 4.2 for last block */
+ if (cnt)
+ drbg_kcapi_sym(aesctx, out, &data, blocklen_bytes);
+}
+
+/*
+ * scratchpad usage: drbg_ctr_update is interlinked with crypto_drbg_ctr_df
+ * (and drbg_ctr_bcc, but this function does not need any temporary buffers),
+ * the scratchpad is used as follows:
+ * drbg_ctr_update:
+ * temp
+ * start: drbg->scratchpad
+ * length: drbg_statelen(drbg) + drbg_blocklen(drbg)
+ * note: the cipher writing into this variable works
+ * blocklen-wise. Now, when the statelen is not a multiple
+ * of blocklen, the generateion loop below "spills over"
+ * by at most blocklen. Thus, we need to give sufficient
+ * memory.
+ * df_data
+ * start: drbg->scratchpad +
+ * drbg_statelen(drbg) + drbg_blocklen(drbg)
+ * length: drbg_statelen(drbg)
+ *
+ * crypto_drbg_ctr_df:
+ * pad
+ * start: df_data + drbg_statelen(drbg)
+ * length: drbg_blocklen(drbg)
+ * iv
+ * start: pad + drbg_blocklen(drbg)
+ * length: drbg_blocklen(drbg)
+ * temp
+ * start: iv + drbg_blocklen(drbg)
+ * length: drbg_satelen(drbg) + drbg_blocklen(drbg)
+ * note: temp is the buffer that the BCC function operates
+ * on. BCC operates blockwise. drbg_statelen(drbg)
+ * is sufficient when the DRBG state length is a multiple
+ * of the block size. For AES192 (and maybe other ciphers)
+ * this is not correct and the length for temp is
+ * insufficient (yes, that also means for such ciphers,
+ * the final output of all BCC rounds are truncated).
+ * Therefore, add drbg_blocklen(drbg) to cover all
+ * possibilities.
+ * refer to crypto_drbg_ctr_df_datalen() to get required length
+ */
+
+/* Derivation Function for CTR DRBG as defined in 10.4.2 */
+int crypto_drbg_ctr_df(struct crypto_aes_ctx *aesctx,
+ unsigned char *df_data, size_t bytes_to_return,
+ struct list_head *seedlist,
+ u8 blocklen_bytes,
+ u8 statelen)
+{
+ unsigned char L_N[8];
+ /* S3 is input */
+ struct drbg_string S1, S2, S4, cipherin;
+ LIST_HEAD(bcc_list);
+ unsigned char *pad = df_data + statelen;
+ unsigned char *iv = pad + blocklen_bytes;
+ unsigned char *temp = iv + blocklen_bytes;
+ size_t padlen = 0;
+ unsigned int templen = 0;
+ /* 10.4.2 step 7 */
+ unsigned int i = 0;
+ /* 10.4.2 step 8 */
+ const unsigned char *K = (unsigned char *)
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f";
+ unsigned char *X;
+ size_t generated_len = 0;
+ size_t inputlen = 0;
+ struct drbg_string *seed = NULL;
+ u8 keylen;
+
+ memset(pad, 0, blocklen_bytes);
+ memset(iv, 0, blocklen_bytes);
+ keylen = statelen - blocklen_bytes;
+ /* 10.4.2 step 1 is implicit as we work byte-wise */
+
+ /* 10.4.2 step 2 */
+ if ((512 / 8) < bytes_to_return)
+ return -EINVAL;
+
+ /* 10.4.2 step 2 -- calculate the entire length of all input data */
+ list_for_each_entry(seed, seedlist, list)
+ inputlen += seed->len;
+ drbg_cpu_to_be32(inputlen, &L_N[0]);
+
+ /* 10.4.2 step 3 */
+ drbg_cpu_to_be32(bytes_to_return, &L_N[4]);
+
+ /* 10.4.2 step 5: length is L_N, input_string, one byte, padding */
+ padlen = (inputlen + sizeof(L_N) + 1) % (blocklen_bytes);
+ /* wrap the padlen appropriately */
+ if (padlen)
+ padlen = blocklen_bytes - padlen;
+ /*
+ * pad / padlen contains the 0x80 byte and the following zero bytes.
+ * As the calculated padlen value only covers the number of zero
+ * bytes, this value has to be incremented by one for the 0x80 byte.
+ */
+ padlen++;
+ pad[0] = 0x80;
+
+ /* 10.4.2 step 4 -- first fill the linked list and then order it */
+ drbg_string_fill(&S1, iv, blocklen_bytes);
+ list_add_tail(&S1.list, &bcc_list);
+ drbg_string_fill(&S2, L_N, sizeof(L_N));
+ list_add_tail(&S2.list, &bcc_list);
+ list_splice_tail(seedlist, &bcc_list);
+ drbg_string_fill(&S4, pad, padlen);
+ list_add_tail(&S4.list, &bcc_list);
+
+ /* 10.4.2 step 9 */
+ while (templen < (keylen + (blocklen_bytes))) {
+ /*
+ * 10.4.2 step 9.1 - the padding is implicit as the buffer
+ * holds zeros after allocation -- even the increment of i
+ * is irrelevant as the increment remains within length of i
+ */
+ drbg_cpu_to_be32(i, iv);
+ /* 10.4.2 step 9.2 -- BCC and concatenation with temp */
+ drbg_ctr_bcc(aesctx, temp + templen, K, &bcc_list,
+ blocklen_bytes, keylen);
+ /* 10.4.2 step 9.3 */
+ i++;
+ templen += blocklen_bytes;
+ }
+
+ /* 10.4.2 step 11 */
+ X = temp + (keylen);
+ drbg_string_fill(&cipherin, X, blocklen_bytes);
+
+ /* 10.4.2 step 12: overwriting of outval is implemented in next step */
+
+ /* 10.4.2 step 13 */
+ drbg_kcapi_symsetkey(aesctx, temp, keylen);
+ while (generated_len < bytes_to_return) {
+ short blocklen = 0;
+ /*
+ * 10.4.2 step 13.1: the truncation of the key length is
+ * implicit as the key is only drbg_blocklen in size based on
+ * the implementation of the cipher function callback
+ */
+ drbg_kcapi_sym(aesctx, X, &cipherin, blocklen_bytes);
+ blocklen = (blocklen_bytes <
+ (bytes_to_return - generated_len)) ?
+ blocklen_bytes :
+ (bytes_to_return - generated_len);
+ /* 10.4.2 step 13.2 and 14 */
+ memcpy(df_data + generated_len, X, blocklen);
+ generated_len += blocklen;
+ }
+
+ memset(iv, 0, blocklen_bytes);
+ memset(temp, 0, statelen + blocklen_bytes);
+ memset(pad, 0, blocklen_bytes);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_drbg_ctr_df);
+
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
+MODULE_DESCRIPTION("Derivation Function conformant to SP800-90A");
diff --git a/crypto/drbg.c b/crypto/drbg.c
index dbe4c8bb5ceb73..1d433dae995579 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -98,6 +98,7 @@
*/
#include <crypto/drbg.h>
+#include <crypto/df_sp80090a.h>
#include <crypto/internal/cipher.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
@@ -261,26 +262,6 @@ static int drbg_fips_continuous_test(struct drbg_state *drbg,
return 0;
}
-/*
- * Convert an integer into a byte representation of this integer.
- * The byte representation is big-endian
- *
- * @val value to be converted
- * @buf buffer holding the converted integer -- caller must ensure that
- * buffer size is at least 32 bit
- */
-#if (defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR))
-static inline void drbg_cpu_to_be32(__u32 val, unsigned char *buf)
-{
- struct s {
- __be32 conv;
- };
- struct s *conversion = (struct s *) buf;
-
- conversion->conv = cpu_to_be32(val);
-}
-#endif /* defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR) */
-
/******************************************************************
* CTR DRBG callback functions
******************************************************************/
@@ -294,10 +275,6 @@ MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes192");
MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes128");
MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes128");
-static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
- const unsigned char *key);
-static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
- const struct drbg_string *in);
static int drbg_init_sym_kernel(struct drbg_state *drbg);
static int drbg_fini_sym_kernel(struct drbg_state *drbg);
static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
@@ -305,202 +282,12 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *outbuf, u32 outlen);
#define DRBG_OUTSCRATCHLEN 256
-/* BCC function for CTR DRBG as defined in 10.4.3 */
-static int drbg_ctr_bcc(struct drbg_state *drbg,
- unsigned char *out, const unsigned char *key,
- struct list_head *in)
-{
- int ret = 0;
- struct drbg_string *curr = NULL;
- struct drbg_string data;
- short cnt = 0;
-
- drbg_string_fill(&data, out, drbg_blocklen(drbg));
-
- /* 10.4.3 step 2 / 4 */
- drbg_kcapi_symsetkey(drbg, key);
- list_for_each_entry(curr, in, list) {
- const unsigned char *pos = curr->buf;
- size_t len = curr->len;
- /* 10.4.3 step 4.1 */
- while (len) {
- /* 10.4.3 step 4.2 */
- if (drbg_blocklen(drbg) == cnt) {
- cnt = 0;
- ret = drbg_kcapi_sym(drbg, out, &data);
- if (ret)
- return ret;
- }
- out[cnt] ^= *pos;
- pos++;
- cnt++;
- len--;
- }
- }
- /* 10.4.3 step 4.2 for last block */
- if (cnt)
- ret = drbg_kcapi_sym(drbg, out, &data);
-
- return ret;
-}
-
-/*
- * scratchpad usage: drbg_ctr_update is interlinked with drbg_ctr_df
- * (and drbg_ctr_bcc, but this function does not need any temporary buffers),
- * the scratchpad is used as follows:
- * drbg_ctr_update:
- * temp
- * start: drbg->scratchpad
- * length: drbg_statelen(drbg) + drbg_blocklen(drbg)
- * note: the cipher writing into this variable works
- * blocklen-wise. Now, when the statelen is not a multiple
- * of blocklen, the generateion loop below "spills over"
- * by at most blocklen. Thus, we need to give sufficient
- * memory.
- * df_data
- * start: drbg->scratchpad +
- * drbg_statelen(drbg) + drbg_blocklen(drbg)
- * length: drbg_statelen(drbg)
- *
- * drbg_ctr_df:
- * pad
- * start: df_data + drbg_statelen(drbg)
- * length: drbg_blocklen(drbg)
- * iv
- * start: pad + drbg_blocklen(drbg)
- * length: drbg_blocklen(drbg)
- * temp
- * start: iv + drbg_blocklen(drbg)
- * length: drbg_satelen(drbg) + drbg_blocklen(drbg)
- * note: temp is the buffer that the BCC function operates
- * on. BCC operates blockwise. drbg_statelen(drbg)
- * is sufficient when the DRBG state length is a multiple
- * of the block size. For AES192 (and maybe other ciphers)
- * this is not correct and the length for temp is
- * insufficient (yes, that also means for such ciphers,
- * the final output of all BCC rounds are truncated).
- * Therefore, add drbg_blocklen(drbg) to cover all
- * possibilities.
- */
-
-/* Derivation Function for CTR DRBG as defined in 10.4.2 */
static int drbg_ctr_df(struct drbg_state *drbg,
unsigned char *df_data, size_t bytes_to_return,
struct list_head *seedlist)
{
- int ret = -EFAULT;
- unsigned char L_N[8];
- /* S3 is input */
- struct drbg_string S1, S2, S4, cipherin;
- LIST_HEAD(bcc_list);
- unsigned char *pad = df_data + drbg_statelen(drbg);
- unsigned char *iv = pad + drbg_blocklen(drbg);
- unsigned char *temp = iv + drbg_blocklen(drbg);
- size_t padlen = 0;
- unsigned int templen = 0;
- /* 10.4.2 step 7 */
- unsigned int i = 0;
- /* 10.4.2 step 8 */
- const unsigned char *K = (unsigned char *)
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f";
- unsigned char *X;
- size_t generated_len = 0;
- size_t inputlen = 0;
- struct drbg_string *seed = NULL;
-
- memset(pad, 0, drbg_blocklen(drbg));
- memset(iv, 0, drbg_blocklen(drbg));
-
- /* 10.4.2 step 1 is implicit as we work byte-wise */
-
- /* 10.4.2 step 2 */
- if ((512/8) < bytes_to_return)
- return -EINVAL;
-
- /* 10.4.2 step 2 -- calculate the entire length of all input data */
- list_for_each_entry(seed, seedlist, list)
- inputlen += seed->len;
- drbg_cpu_to_be32(inputlen, &L_N[0]);
-
- /* 10.4.2 step 3 */
- drbg_cpu_to_be32(bytes_to_return, &L_N[4]);
-
- /* 10.4.2 step 5: length is L_N, input_string, one byte, padding */
- padlen = (inputlen + sizeof(L_N) + 1) % (drbg_blocklen(drbg));
- /* wrap the padlen appropriately */
- if (padlen)
- padlen = drbg_blocklen(drbg) - padlen;
- /*
- * pad / padlen contains the 0x80 byte and the following zero bytes.
- * As the calculated padlen value only covers the number of zero
- * bytes, this value has to be incremented by one for the 0x80 byte.
- */
- padlen++;
- pad[0] = 0x80;
-
- /* 10.4.2 step 4 -- first fill the linked list and then order it */
- drbg_string_fill(&S1, iv, drbg_blocklen(drbg));
- list_add_tail(&S1.list, &bcc_list);
- drbg_string_fill(&S2, L_N, sizeof(L_N));
- list_add_tail(&S2.list, &bcc_list);
- list_splice_tail(seedlist, &bcc_list);
- drbg_string_fill(&S4, pad, padlen);
- list_add_tail(&S4.list, &bcc_list);
-
- /* 10.4.2 step 9 */
- while (templen < (drbg_keylen(drbg) + (drbg_blocklen(drbg)))) {
- /*
- * 10.4.2 step 9.1 - the padding is implicit as the buffer
- * holds zeros after allocation -- even the increment of i
- * is irrelevant as the increment remains within length of i
- */
- drbg_cpu_to_be32(i, iv);
- /* 10.4.2 step 9.2 -- BCC and concatenation with temp */
- ret = drbg_ctr_bcc(drbg, temp + templen, K, &bcc_list);
- if (ret)
- goto out;
- /* 10.4.2 step 9.3 */
- i++;
- templen += drbg_blocklen(drbg);
- }
-
- /* 10.4.2 step 11 */
- X = temp + (drbg_keylen(drbg));
- drbg_string_fill(&cipherin, X, drbg_blocklen(drbg));
-
- /* 10.4.2 step 12: overwriting of outval is implemented in next step */
-
- /* 10.4.2 step 13 */
- drbg_kcapi_symsetkey(drbg, temp);
- while (generated_len < bytes_to_return) {
- short blocklen = 0;
- /*
- * 10.4.2 step 13.1: the truncation of the key length is
- * implicit as the key is only drbg_blocklen in size based on
- * the implementation of the cipher function callback
- */
- ret = drbg_kcapi_sym(drbg, X, &cipherin);
- if (ret)
- goto out;
- blocklen = (drbg_blocklen(drbg) <
- (bytes_to_return - generated_len)) ?
- drbg_blocklen(drbg) :
- (bytes_to_return - generated_len);
- /* 10.4.2 step 13.2 and 14 */
- memcpy(df_data + generated_len, X, blocklen);
- generated_len += blocklen;
- }
-
- ret = 0;
-
-out:
- memset(iv, 0, drbg_blocklen(drbg));
- memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
- memset(pad, 0, drbg_blocklen(drbg));
- return ret;
+ return crypto_drbg_ctr_df(drbg->priv_data, df_data, drbg_statelen(drbg),
+ seedlist, drbg_blocklen(drbg), drbg_statelen(drbg));
}
/*
@@ -1310,10 +1097,8 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
sb_size = 0;
else if (drbg->core->flags & DRBG_CTR)
sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg) + /* temp */
- drbg_statelen(drbg) + /* df_data */
- drbg_blocklen(drbg) + /* pad */
- drbg_blocklen(drbg) + /* iv */
- drbg_statelen(drbg) + drbg_blocklen(drbg); /* temp */
+ crypto_drbg_ctr_df_datalen(drbg_statelen(drbg),
+ drbg_blocklen(drbg));
else
sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg);
@@ -1658,7 +1443,6 @@ static void drbg_kcapi_set_entropy(struct crypto_rng *tfm,
#if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC)
struct sdesc {
struct shash_desc shash;
- char ctx[];
};
static int drbg_init_hash_kernel(struct drbg_state *drbg)
@@ -1721,10 +1505,9 @@ static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
#ifdef CONFIG_CRYPTO_DRBG_CTR
static int drbg_fini_sym_kernel(struct drbg_state *drbg)
{
- struct crypto_cipher *tfm =
- (struct crypto_cipher *)drbg->priv_data;
- if (tfm)
- crypto_free_cipher(tfm);
+ struct crypto_aes_ctx *aesctx = (struct crypto_aes_ctx *)drbg->priv_data;
+
+ kfree(aesctx);
drbg->priv_data = NULL;
if (drbg->ctr_handle)
@@ -1743,20 +1526,16 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
static int drbg_init_sym_kernel(struct drbg_state *drbg)
{
- struct crypto_cipher *tfm;
+ struct crypto_aes_ctx *aesctx;
struct crypto_skcipher *sk_tfm;
struct skcipher_request *req;
unsigned int alignmask;
char ctr_name[CRYPTO_MAX_ALG_NAME];
- tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0);
- if (IS_ERR(tfm)) {
- pr_info("DRBG: could not allocate cipher TFM handle: %s\n",
- drbg->core->backend_cra_name);
- return PTR_ERR(tfm);
- }
- BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm));
- drbg->priv_data = tfm;
+ aesctx = kzalloc(sizeof(*aesctx), GFP_KERNEL);
+ if (!aesctx)
+ return -ENOMEM;
+ drbg->priv_data = aesctx;
if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
drbg->core->backend_cra_name) >= CRYPTO_MAX_ALG_NAME) {
@@ -1800,25 +1579,6 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
return alignmask;
}
-static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
- const unsigned char *key)
-{
- struct crypto_cipher *tfm = drbg->priv_data;
-
- crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg)));
-}
-
-static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
- const struct drbg_string *in)
-{
- struct crypto_cipher *tfm = drbg->priv_data;
-
- /* there is only component in *in */
- BUG_ON(in->len < drbg_blocklen(drbg));
- crypto_cipher_encrypt_one(tfm, outval, in->buf);
- return 0;
-}
-
static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *inbuf, u32 inlen,
u8 *outbuf, u32 outlen)
diff --git a/crypto/fips.c b/crypto/fips.c
index e88a604cb42b59..65d2bc070a2636 100644
--- a/crypto/fips.c
+++ b/crypto/fips.c
@@ -24,7 +24,10 @@ EXPORT_SYMBOL_GPL(fips_fail_notif_chain);
/* Process kernel command-line parameter at boot time. fips=0 or fips=1 */
static int fips_enable(char *str)
{
- fips_enabled = !!simple_strtol(str, NULL, 0);
+ if (kstrtoint(str, 0, &fips_enabled))
+ return 0;
+
+ fips_enabled = !!fips_enabled;
pr_info("fips mode: %s\n", str_enabled_disabled(fips_enabled));
return 1;
}
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 1d010e2a1b1a2e..be0e248438066c 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -10,25 +10,10 @@
*/
#include <crypto/scatterwalk.h>
-#include <linux/crypto.h>
-#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
-#include <linux/slab.h>
-
-enum {
- SKCIPHER_WALK_SLOW = 1 << 0,
- SKCIPHER_WALK_COPY = 1 << 1,
- SKCIPHER_WALK_DIFF = 1 << 2,
- SKCIPHER_WALK_SLEEP = 1 << 3,
-};
-
-static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
-{
- return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
-}
void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes)
{
@@ -101,26 +86,97 @@ void memcpy_to_sglist(struct scatterlist *sg, unsigned int start,
}
EXPORT_SYMBOL_GPL(memcpy_to_sglist);
+/**
+ * memcpy_sglist() - Copy data from one scatterlist to another
+ * @dst: The destination scatterlist. Can be NULL if @nbytes == 0.
+ * @src: The source scatterlist. Can be NULL if @nbytes == 0.
+ * @nbytes: Number of bytes to copy
+ *
+ * The scatterlists can describe exactly the same memory, in which case this
+ * function is a no-op. No other overlaps are supported.
+ *
+ * Context: Any context
+ */
void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct skcipher_walk walk = {};
+ unsigned int src_offset, dst_offset;
- if (unlikely(nbytes == 0)) /* in case sg == NULL */
+ if (unlikely(nbytes == 0)) /* in case src and/or dst is NULL */
return;
- walk.total = nbytes;
-
- scatterwalk_start(&walk.in, src);
- scatterwalk_start(&walk.out, dst);
+ src_offset = src->offset;
+ dst_offset = dst->offset;
+ for (;;) {
+ /* Compute the length to copy this step. */
+ unsigned int len = min3(src->offset + src->length - src_offset,
+ dst->offset + dst->length - dst_offset,
+ nbytes);
+ struct page *src_page = sg_page(src);
+ struct page *dst_page = sg_page(dst);
+ const void *src_virt;
+ void *dst_virt;
+
+ if (IS_ENABLED(CONFIG_HIGHMEM)) {
+ /* HIGHMEM: we may have to actually map the pages. */
+ const unsigned int src_oip = offset_in_page(src_offset);
+ const unsigned int dst_oip = offset_in_page(dst_offset);
+ const unsigned int limit = PAGE_SIZE;
+
+ /* Further limit len to not cross a page boundary. */
+ len = min3(len, limit - src_oip, limit - dst_oip);
+
+ /* Compute the source and destination pages. */
+ src_page += src_offset / PAGE_SIZE;
+ dst_page += dst_offset / PAGE_SIZE;
+
+ if (src_page != dst_page) {
+ /* Copy between different pages. */
+ memcpy_page(dst_page, dst_oip,
+ src_page, src_oip, len);
+ flush_dcache_page(dst_page);
+ } else if (src_oip != dst_oip) {
+ /* Copy between different parts of same page. */
+ dst_virt = kmap_local_page(dst_page);
+ memcpy(dst_virt + dst_oip, dst_virt + src_oip,
+ len);
+ kunmap_local(dst_virt);
+ flush_dcache_page(dst_page);
+ } /* Else, it's the same memory. No action needed. */
+ } else {
+ /*
+ * !HIGHMEM: no mapping needed. Just work in the linear
+ * buffer of each sg entry. Note that we can cross page
+ * boundaries, as they are not significant in this case.
+ */
+ src_virt = page_address(src_page) + src_offset;
+ dst_virt = page_address(dst_page) + dst_offset;
+ if (src_virt != dst_virt) {
+ memcpy(dst_virt, src_virt, len);
+ if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)
+ __scatterwalk_flush_dcache_pages(
+ dst_page, dst_offset, len);
+ } /* Else, it's the same memory. No action needed. */
+ }
+ nbytes -= len;
+ if (nbytes == 0) /* No more to copy? */
+ break;
- skcipher_walk_first(&walk, true);
- do {
- if (walk.src.virt.addr != walk.dst.virt.addr)
- memcpy(walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes);
- skcipher_walk_done(&walk, 0);
- } while (walk.nbytes);
+ /*
+ * There's more to copy. Advance the offsets by the length
+ * copied this step, and advance the sg entries as needed.
+ */
+ src_offset += len;
+ if (src_offset >= src->offset + src->length) {
+ src = sg_next(src);
+ src_offset = src->offset;
+ }
+ dst_offset += len;
+ if (dst_offset >= dst->offset + dst->length) {
+ dst = sg_next(dst);
+ dst_offset = dst->offset;
+ }
+ }
}
EXPORT_SYMBOL_GPL(memcpy_sglist);
@@ -146,236 +202,3 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
return dst;
}
EXPORT_SYMBOL_GPL(scatterwalk_ffwd);
-
-static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
-{
- unsigned alignmask = walk->alignmask;
- unsigned n;
- void *buffer;
-
- if (!walk->buffer)
- walk->buffer = walk->page;
- buffer = walk->buffer;
- if (!buffer) {
- /* Min size for a buffer of bsize bytes aligned to alignmask */
- n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
-
- buffer = kzalloc(n, skcipher_walk_gfp(walk));
- if (!buffer)
- return skcipher_walk_done(walk, -ENOMEM);
- walk->buffer = buffer;
- }
-
- buffer = PTR_ALIGN(buffer, alignmask + 1);
- memcpy_from_scatterwalk(buffer, &walk->in, bsize);
- walk->out.__addr = buffer;
- walk->in.__addr = walk->out.addr;
-
- walk->nbytes = bsize;
- walk->flags |= SKCIPHER_WALK_SLOW;
-
- return 0;
-}
-
-static int skcipher_next_copy(struct skcipher_walk *walk)
-{
- void *tmp = walk->page;
-
- scatterwalk_map(&walk->in);
- memcpy(tmp, walk->in.addr, walk->nbytes);
- scatterwalk_unmap(&walk->in);
- /*
- * walk->in is advanced later when the number of bytes actually
- * processed (which might be less than walk->nbytes) is known.
- */
-
- walk->in.__addr = tmp;
- walk->out.__addr = tmp;
- return 0;
-}
-
-static int skcipher_next_fast(struct skcipher_walk *walk)
-{
- unsigned long diff;
-
- diff = offset_in_page(walk->in.offset) -
- offset_in_page(walk->out.offset);
- diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) -
- (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT));
-
- scatterwalk_map(&walk->out);
- walk->in.__addr = walk->out.__addr;
-
- if (diff) {
- walk->flags |= SKCIPHER_WALK_DIFF;
- scatterwalk_map(&walk->in);
- }
-
- return 0;
-}
-
-static int skcipher_walk_next(struct skcipher_walk *walk)
-{
- unsigned int bsize;
- unsigned int n;
-
- n = walk->total;
- bsize = min(walk->stride, max(n, walk->blocksize));
- n = scatterwalk_clamp(&walk->in, n);
- n = scatterwalk_clamp(&walk->out, n);
-
- if (unlikely(n < bsize)) {
- if (unlikely(walk->total < walk->blocksize))
- return skcipher_walk_done(walk, -EINVAL);
-
-slow_path:
- return skcipher_next_slow(walk, bsize);
- }
- walk->nbytes = n;
-
- if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
- if (!walk->page) {
- gfp_t gfp = skcipher_walk_gfp(walk);
-
- walk->page = (void *)__get_free_page(gfp);
- if (!walk->page)
- goto slow_path;
- }
- walk->flags |= SKCIPHER_WALK_COPY;
- return skcipher_next_copy(walk);
- }
-
- return skcipher_next_fast(walk);
-}
-
-static int skcipher_copy_iv(struct skcipher_walk *walk)
-{
- unsigned alignmask = walk->alignmask;
- unsigned ivsize = walk->ivsize;
- unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1);
- unsigned size;
- u8 *iv;
-
- /* Min size for a buffer of stride + ivsize, aligned to alignmask */
- size = aligned_stride + ivsize +
- (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
-
- walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
- if (!walk->buffer)
- return -ENOMEM;
-
- iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride;
-
- walk->iv = memcpy(iv, walk->iv, walk->ivsize);
- return 0;
-}
-
-int skcipher_walk_first(struct skcipher_walk *walk, bool atomic)
-{
- if (WARN_ON_ONCE(in_hardirq()))
- return -EDEADLK;
-
- walk->flags = atomic ? 0 : SKCIPHER_WALK_SLEEP;
-
- walk->buffer = NULL;
- if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
- int err = skcipher_copy_iv(walk);
- if (err)
- return err;
- }
-
- walk->page = NULL;
-
- return skcipher_walk_next(walk);
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_first);
-
-/**
- * skcipher_walk_done() - finish one step of a skcipher_walk
- * @walk: the skcipher_walk
- * @res: number of bytes *not* processed (>= 0) from walk->nbytes,
- * or a -errno value to terminate the walk due to an error
- *
- * This function cleans up after one step of walking through the source and
- * destination scatterlists, and advances to the next step if applicable.
- * walk->nbytes is set to the number of bytes available in the next step,
- * walk->total is set to the new total number of bytes remaining, and
- * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there
- * is no more data, or if an error occurred (i.e. -errno return), then
- * walk->nbytes and walk->total are set to 0 and all resources owned by the
- * skcipher_walk are freed.
- *
- * Return: 0 or a -errno value. If @res was a -errno value then it will be
- * returned, but other errors may occur too.
- */
-int skcipher_walk_done(struct skcipher_walk *walk, int res)
-{
- unsigned int n = walk->nbytes; /* num bytes processed this step */
- unsigned int total = 0; /* new total remaining */
-
- if (!n)
- goto finish;
-
- if (likely(res >= 0)) {
- n -= res; /* subtract num bytes *not* processed */
- total = walk->total - n;
- }
-
- if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
- SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF)))) {
- scatterwalk_advance(&walk->in, n);
- } else if (walk->flags & SKCIPHER_WALK_DIFF) {
- scatterwalk_done_src(&walk->in, n);
- } else if (walk->flags & SKCIPHER_WALK_COPY) {
- scatterwalk_advance(&walk->in, n);
- scatterwalk_map(&walk->out);
- memcpy(walk->out.addr, walk->page, n);
- } else { /* SKCIPHER_WALK_SLOW */
- if (res > 0) {
- /*
- * Didn't process all bytes. Either the algorithm is
- * broken, or this was the last step and it turned out
- * the message wasn't evenly divisible into blocks but
- * the algorithm requires it.
- */
- res = -EINVAL;
- total = 0;
- } else
- memcpy_to_scatterwalk(&walk->out, walk->out.addr, n);
- goto dst_done;
- }
-
- scatterwalk_done_dst(&walk->out, n);
-dst_done:
-
- if (res > 0)
- res = 0;
-
- walk->total = total;
- walk->nbytes = 0;
-
- if (total) {
- if (walk->flags & SKCIPHER_WALK_SLEEP)
- cond_resched();
- walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF);
- return skcipher_walk_next(walk);
- }
-
-finish:
- /* Short-circuit for the common/fast path. */
- if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
- goto out;
-
- if (walk->iv != walk->oiv)
- memcpy(walk->oiv, walk->iv, walk->ivsize);
- if (walk->buffer != walk->page)
- kfree(walk->buffer);
- if (walk->page)
- free_page((unsigned long)walk->page);
-
-out:
- return res;
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_done);
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 8fa5d9686d0850..14a820cb06c788 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -17,6 +17,7 @@
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -27,14 +28,258 @@
#define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e
+enum {
+ SKCIPHER_WALK_SLOW = 1 << 0,
+ SKCIPHER_WALK_COPY = 1 << 1,
+ SKCIPHER_WALK_DIFF = 1 << 2,
+ SKCIPHER_WALK_SLEEP = 1 << 3,
+};
+
static const struct crypto_type crypto_skcipher_type;
+static int skcipher_walk_next(struct skcipher_walk *walk);
+
+static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
+{
+ return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+}
+
static inline struct skcipher_alg *__crypto_skcipher_alg(
struct crypto_alg *alg)
{
return container_of(alg, struct skcipher_alg, base);
}
+/**
+ * skcipher_walk_done() - finish one step of a skcipher_walk
+ * @walk: the skcipher_walk
+ * @res: number of bytes *not* processed (>= 0) from walk->nbytes,
+ * or a -errno value to terminate the walk due to an error
+ *
+ * This function cleans up after one step of walking through the source and
+ * destination scatterlists, and advances to the next step if applicable.
+ * walk->nbytes is set to the number of bytes available in the next step,
+ * walk->total is set to the new total number of bytes remaining, and
+ * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there
+ * is no more data, or if an error occurred (i.e. -errno return), then
+ * walk->nbytes and walk->total are set to 0 and all resources owned by the
+ * skcipher_walk are freed.
+ *
+ * Return: 0 or a -errno value. If @res was a -errno value then it will be
+ * returned, but other errors may occur too.
+ */
+int skcipher_walk_done(struct skcipher_walk *walk, int res)
+{
+ unsigned int n = walk->nbytes; /* num bytes processed this step */
+ unsigned int total = 0; /* new total remaining */
+
+ if (!n)
+ goto finish;
+
+ if (likely(res >= 0)) {
+ n -= res; /* subtract num bytes *not* processed */
+ total = walk->total - n;
+ }
+
+ if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
+ SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF)))) {
+ scatterwalk_advance(&walk->in, n);
+ } else if (walk->flags & SKCIPHER_WALK_DIFF) {
+ scatterwalk_done_src(&walk->in, n);
+ } else if (walk->flags & SKCIPHER_WALK_COPY) {
+ scatterwalk_advance(&walk->in, n);
+ scatterwalk_map(&walk->out);
+ memcpy(walk->out.addr, walk->page, n);
+ } else { /* SKCIPHER_WALK_SLOW */
+ if (res > 0) {
+ /*
+ * Didn't process all bytes. Either the algorithm is
+ * broken, or this was the last step and it turned out
+ * the message wasn't evenly divisible into blocks but
+ * the algorithm requires it.
+ */
+ res = -EINVAL;
+ total = 0;
+ } else
+ memcpy_to_scatterwalk(&walk->out, walk->out.addr, n);
+ goto dst_done;
+ }
+
+ scatterwalk_done_dst(&walk->out, n);
+dst_done:
+
+ if (res > 0)
+ res = 0;
+
+ walk->total = total;
+ walk->nbytes = 0;
+
+ if (total) {
+ if (walk->flags & SKCIPHER_WALK_SLEEP)
+ cond_resched();
+ walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF);
+ return skcipher_walk_next(walk);
+ }
+
+finish:
+ /* Short-circuit for the common/fast path. */
+ if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
+ goto out;
+
+ if (walk->iv != walk->oiv)
+ memcpy(walk->oiv, walk->iv, walk->ivsize);
+ if (walk->buffer != walk->page)
+ kfree(walk->buffer);
+ if (walk->page)
+ free_page((unsigned long)walk->page);
+
+out:
+ return res;
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_done);
+
+static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
+{
+ unsigned alignmask = walk->alignmask;
+ unsigned n;
+ void *buffer;
+
+ if (!walk->buffer)
+ walk->buffer = walk->page;
+ buffer = walk->buffer;
+ if (!buffer) {
+ /* Min size for a buffer of bsize bytes aligned to alignmask */
+ n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
+
+ buffer = kzalloc(n, skcipher_walk_gfp(walk));
+ if (!buffer)
+ return skcipher_walk_done(walk, -ENOMEM);
+ walk->buffer = buffer;
+ }
+
+ buffer = PTR_ALIGN(buffer, alignmask + 1);
+ memcpy_from_scatterwalk(buffer, &walk->in, bsize);
+ walk->out.__addr = buffer;
+ walk->in.__addr = walk->out.addr;
+
+ walk->nbytes = bsize;
+ walk->flags |= SKCIPHER_WALK_SLOW;
+
+ return 0;
+}
+
+static int skcipher_next_copy(struct skcipher_walk *walk)
+{
+ void *tmp = walk->page;
+
+ scatterwalk_map(&walk->in);
+ memcpy(tmp, walk->in.addr, walk->nbytes);
+ scatterwalk_unmap(&walk->in);
+ /*
+ * walk->in is advanced later when the number of bytes actually
+ * processed (which might be less than walk->nbytes) is known.
+ */
+
+ walk->in.__addr = tmp;
+ walk->out.__addr = tmp;
+ return 0;
+}
+
+static int skcipher_next_fast(struct skcipher_walk *walk)
+{
+ unsigned long diff;
+
+ diff = offset_in_page(walk->in.offset) -
+ offset_in_page(walk->out.offset);
+ diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) -
+ (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT));
+
+ scatterwalk_map(&walk->out);
+ walk->in.__addr = walk->out.__addr;
+
+ if (diff) {
+ walk->flags |= SKCIPHER_WALK_DIFF;
+ scatterwalk_map(&walk->in);
+ }
+
+ return 0;
+}
+
+static int skcipher_walk_next(struct skcipher_walk *walk)
+{
+ unsigned int bsize;
+ unsigned int n;
+
+ n = walk->total;
+ bsize = min(walk->stride, max(n, walk->blocksize));
+ n = scatterwalk_clamp(&walk->in, n);
+ n = scatterwalk_clamp(&walk->out, n);
+
+ if (unlikely(n < bsize)) {
+ if (unlikely(walk->total < walk->blocksize))
+ return skcipher_walk_done(walk, -EINVAL);
+
+slow_path:
+ return skcipher_next_slow(walk, bsize);
+ }
+ walk->nbytes = n;
+
+ if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
+ if (!walk->page) {
+ gfp_t gfp = skcipher_walk_gfp(walk);
+
+ walk->page = (void *)__get_free_page(gfp);
+ if (!walk->page)
+ goto slow_path;
+ }
+ walk->flags |= SKCIPHER_WALK_COPY;
+ return skcipher_next_copy(walk);
+ }
+
+ return skcipher_next_fast(walk);
+}
+
+static int skcipher_copy_iv(struct skcipher_walk *walk)
+{
+ unsigned alignmask = walk->alignmask;
+ unsigned ivsize = walk->ivsize;
+ unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1);
+ unsigned size;
+ u8 *iv;
+
+ /* Min size for a buffer of stride + ivsize, aligned to alignmask */
+ size = aligned_stride + ivsize +
+ (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
+
+ walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
+ if (!walk->buffer)
+ return -ENOMEM;
+
+ iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride;
+
+ walk->iv = memcpy(iv, walk->iv, walk->ivsize);
+ return 0;
+}
+
+static int skcipher_walk_first(struct skcipher_walk *walk)
+{
+ if (WARN_ON_ONCE(in_hardirq()))
+ return -EDEADLK;
+
+ walk->buffer = NULL;
+ if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
+ int err = skcipher_copy_iv(walk);
+ if (err)
+ return err;
+ }
+
+ walk->page = NULL;
+
+ return skcipher_walk_next(walk);
+}
+
int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
struct skcipher_request *__restrict req, bool atomic)
{
@@ -49,8 +294,10 @@ int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP))
- atomic = true;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
+ walk->flags = SKCIPHER_WALK_SLEEP;
+ else
+ walk->flags = 0;
if (unlikely(!walk->total))
return 0;
@@ -67,7 +314,7 @@ int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
else
walk->stride = alg->walksize;
- return skcipher_walk_first(walk, atomic);
+ return skcipher_walk_first(walk);
}
EXPORT_SYMBOL_GPL(skcipher_walk_virt);
@@ -80,8 +327,10 @@ static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk,
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP))
- atomic = true;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
+ walk->flags = SKCIPHER_WALK_SLEEP;
+ else
+ walk->flags = 0;
if (unlikely(!walk->total))
return 0;
@@ -94,7 +343,7 @@ static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk,
walk->ivsize = crypto_aead_ivsize(tfm);
walk->alignmask = crypto_aead_alignmask(tfm);
- return skcipher_walk_first(walk, atomic);
+ return skcipher_walk_first(walk);
}
int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk,
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 32d9eaf2c8af41..62fef100e599cd 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1754,10 +1754,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret = min(ret, tcrypt_test("hmac(streebog512)"));
break;
- case 150:
- ret = min(ret, tcrypt_test("ansi_cprng"));
- break;
-
case 151:
ret = min(ret, tcrypt_test("rfc4106(gcm(aes))"));
break;
@@ -2264,10 +2260,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_hash_speed("crc32c", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
- case 321:
- test_hash_speed("poly1305", sec, poly1305_speed_template);
- if (mode > 300 && mode < 400) break;
- fallthrough;
case 322:
test_hash_speed("sha3-224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 7f938ac93e583b..85c3f77bcfb462 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -96,22 +96,4 @@ static struct hash_speed generic_hash_speed_template[] = {
{ .blen = 0, .plen = 0, }
};
-static struct hash_speed poly1305_speed_template[] = {
- { .blen = 96, .plen = 16, },
- { .blen = 96, .plen = 32, },
- { .blen = 96, .plen = 96, },
- { .blen = 288, .plen = 16, },
- { .blen = 288, .plen = 32, },
- { .blen = 288, .plen = 288, },
- { .blen = 1056, .plen = 32, },
- { .blen = 1056, .plen = 1056, },
- { .blen = 2080, .plen = 32, },
- { .blen = 2080, .plen = 2080, },
- { .blen = 4128, .plen = 4128, },
- { .blen = 8224, .plen = 8224, },
-
- /* End marker */
- { .blen = 0, .plen = 0, }
-};
-
#endif /* _CRYPTO_TCRYPT_H */
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 6fb53978df112e..a302be53896d32 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -117,11 +117,6 @@ struct hash_test_suite {
unsigned int count;
};
-struct cprng_test_suite {
- const struct cprng_testvec *vecs;
- unsigned int count;
-};
-
struct drbg_test_suite {
const struct drbg_testvec *vecs;
unsigned int count;
@@ -154,7 +149,6 @@ struct alg_test_desc {
struct cipher_test_suite cipher;
struct comp_test_suite comp;
struct hash_test_suite hash;
- struct cprng_test_suite cprng;
struct drbg_test_suite drbg;
struct akcipher_test_suite akcipher;
struct sig_test_suite sig;
@@ -3442,68 +3436,6 @@ out:
return ret;
}
-static int test_cprng(struct crypto_rng *tfm,
- const struct cprng_testvec *template,
- unsigned int tcount)
-{
- const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
- int err = 0, i, j, seedsize;
- u8 *seed;
- char result[32];
-
- seedsize = crypto_rng_seedsize(tfm);
-
- seed = kmalloc(seedsize, GFP_KERNEL);
- if (!seed) {
- printk(KERN_ERR "alg: cprng: Failed to allocate seed space "
- "for %s\n", algo);
- return -ENOMEM;
- }
-
- for (i = 0; i < tcount; i++) {
- memset(result, 0, 32);
-
- memcpy(seed, template[i].v, template[i].vlen);
- memcpy(seed + template[i].vlen, template[i].key,
- template[i].klen);
- memcpy(seed + template[i].vlen + template[i].klen,
- template[i].dt, template[i].dtlen);
-
- err = crypto_rng_reset(tfm, seed, seedsize);
- if (err) {
- printk(KERN_ERR "alg: cprng: Failed to reset rng "
- "for %s\n", algo);
- goto out;
- }
-
- for (j = 0; j < template[i].loops; j++) {
- err = crypto_rng_get_bytes(tfm, result,
- template[i].rlen);
- if (err < 0) {
- printk(KERN_ERR "alg: cprng: Failed to obtain "
- "the correct amount of random data for "
- "%s (requested %d)\n", algo,
- template[i].rlen);
- goto out;
- }
- }
-
- err = memcmp(result, template[i].result,
- template[i].rlen);
- if (err) {
- printk(KERN_ERR "alg: cprng: Test %d failed for %s\n",
- i, algo);
- hexdump(result, template[i].rlen);
- err = -EINVAL;
- goto out;
- }
- }
-
-out:
- kfree(seed);
- return err;
-}
-
static int alg_test_cipher(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
@@ -3550,29 +3482,6 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
return err;
}
-static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
- u32 type, u32 mask)
-{
- struct crypto_rng *rng;
- int err;
-
- rng = crypto_alloc_rng(driver, type, mask);
- if (IS_ERR(rng)) {
- if (PTR_ERR(rng) == -ENOENT)
- return 0;
- printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
- "%ld\n", driver, PTR_ERR(rng));
- return PTR_ERR(rng);
- }
-
- err = test_cprng(rng, desc->suite.cprng.vecs, desc->suite.cprng.count);
-
- crypto_free_rng(rng);
-
- return err;
-}
-
-
static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
const char *driver, u32 type, u32 mask)
{
@@ -4171,12 +4080,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.aead = __VECS(aegis128_tv_template)
}
}, {
- .alg = "ansi_cprng",
- .test = alg_test_cprng,
- .suite = {
- .cprng = __VECS(ansi_cprng_aes_tv_template)
- }
- }, {
.alg = "authenc(hmac(md5),ecb(cipher_null))",
.generic_driver = "authenc(hmac-md5-lib,ecb-cipher_null)",
.test = alg_test_aead,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index a3e4695945ca11..80bf5f1b67a646 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -119,18 +119,6 @@ struct aead_testvec {
int crypt_error;
};
-struct cprng_testvec {
- const char *key;
- const char *dt;
- const char *v;
- const char *result;
- unsigned char klen;
- unsigned short dtlen;
- unsigned short vlen;
- unsigned short rlen;
- unsigned short loops;
-};
-
struct drbg_testvec {
const unsigned char *entropy;
size_t entropylen;
@@ -9023,6 +9011,126 @@ static const struct cipher_testvec des_tv_template[] = {
.ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
.ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
.len = 8,
+ }, { /* Weak key */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xe0\xe0\xe0\xe0\xf1\xf1\xf1\xf1",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Weak key */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x1f\x1f\x1f\x1f\x0e\x0e\x0e\x0e",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Weak key */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 1a */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x01\xfe\x01\xfe\x01\xfe\x01\xfe",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 1b */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xfe\x01\xfe\x01\xfe\x01\xfe\x01",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 2a */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x1f\xe0\x1f\xe0\x0e\xf1\x0e\xf1",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 2b */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xe0\x1f\xe0\x1f\xf1\x0e\xf1\x0e",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 3a */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x01\xe0\x01\xe0\x01\xf1\x01\xf1",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 3b */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xe0\x01\xe0\x01\xf1\x01\xf1\x01",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 4a */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x1f\xfe\x1f\xfe\x0e\xfe\x0e\xfe",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 4b */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xfe\x1f\xfe\x1f\xfe\x0e\xfe\x0e",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 5a */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x01\x1f\x01\x1f\x01\x0e\x01\x0e",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 5b */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\x1f\x01\x1f\x01\x0e\x01\x0e\x01",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 6a */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xe0\xfe\xe0\xfe\xf1\xfe\xf1\xfe",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
+ }, { /* Semi-weak key pair 6b */
+ .setkey_error = -EINVAL,
+ .wk = 1,
+ .key = "\xfe\xe0\xfe\xe0\xfe\xf1\xfe\xf1",
+ .klen = 8,
+ .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
+ .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
+ .len = 8,
}, { /* Two blocks -- for testing encryption across pages */
.key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
.klen = 8,
@@ -22377,100 +22485,6 @@ static const struct aead_testvec aegis128_tv_template[] = {
};
/*
- * ANSI X9.31 Continuous Pseudo-Random Number Generator (AES mode)
- * test vectors, taken from Appendix B.2.9 and B.2.10:
- * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf
- * Only AES-128 is supported at this time.
- */
-static const struct cprng_testvec ansi_cprng_aes_tv_template[] = {
- {
- .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
- "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
- .klen = 16,
- .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
- "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xf9",
- .dtlen = 16,
- .v = "\x80\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .vlen = 16,
- .result = "\x59\x53\x1e\xd1\x3b\xb0\xc0\x55"
- "\x84\x79\x66\x85\xc1\x2f\x76\x41",
- .rlen = 16,
- .loops = 1,
- }, {
- .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
- "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
- .klen = 16,
- .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
- "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfa",
- .dtlen = 16,
- .v = "\xc0\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .vlen = 16,
- .result = "\x7c\x22\x2c\xf4\xca\x8f\xa2\x4c"
- "\x1c\x9c\xb6\x41\xa9\xf3\x22\x0d",
- .rlen = 16,
- .loops = 1,
- }, {
- .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
- "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
- .klen = 16,
- .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
- "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfb",
- .dtlen = 16,
- .v = "\xe0\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .vlen = 16,
- .result = "\x8a\xaa\x00\x39\x66\x67\x5b\xe5"
- "\x29\x14\x28\x81\xa9\x4d\x4e\xc7",
- .rlen = 16,
- .loops = 1,
- }, {
- .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
- "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
- .klen = 16,
- .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
- "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfc",
- .dtlen = 16,
- .v = "\xf0\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .vlen = 16,
- .result = "\x88\xdd\xa4\x56\x30\x24\x23\xe5"
- "\xf6\x9d\xa5\x7e\x7b\x95\xc7\x3a",
- .rlen = 16,
- .loops = 1,
- }, {
- .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
- "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
- .klen = 16,
- .dt = "\xe6\xb3\xbe\x78\x2a\x23\xfa\x62"
- "\xd7\x1d\x4a\xfb\xb0\xe9\x22\xfd",
- .dtlen = 16,
- .v = "\xf8\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .vlen = 16,
- .result = "\x05\x25\x92\x46\x61\x79\xd2\xcb"
- "\x78\xc4\x0b\x14\x0a\x5a\x9a\xc8",
- .rlen = 16,
- .loops = 1,
- }, { /* Monte Carlo Test */
- .key = "\x9f\x5b\x51\x20\x0b\xf3\x34\xb5"
- "\xd8\x2b\xe8\xc3\x72\x55\xc8\x48",
- .klen = 16,
- .dt = "\x63\x76\xbb\xe5\x29\x02\xba\x3b"
- "\x67\xc9\x25\xfa\x70\x1f\x11\xac",
- .dtlen = 16,
- .v = "\x57\x2c\x8e\x76\x87\x26\x47\x97"
- "\x7e\x74\xfb\xdd\xc4\x95\x01\xd1",
- .vlen = 16,
- .result = "\x48\xe9\xbd\x0d\x06\xee\x18\xfb"
- "\xe4\x57\x90\xd5\xc3\xfc\x9b\x73",
- .rlen = 16,
- .loops = 10000,
- },
-};
-
-/*
* SP800-90A DRBG Test vectors from
* http://csrc.nist.gov/groups/STM/cavp/documents/drbg/drbgtestvectors.zip
*
diff --git a/crypto/zstd.c b/crypto/zstd.c
index ac318d333b6847..cbbd0413751aea 100644
--- a/crypto/zstd.c
+++ b/crypto/zstd.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
+#include <linux/overflow.h>
#include <linux/vmalloc.h>
#include <linux/zstd.h>
#include <crypto/internal/acompress.h>
@@ -25,7 +26,7 @@ struct zstd_ctx {
zstd_dctx *dctx;
size_t wksp_size;
zstd_parameters params;
- u8 wksp[] __aligned(8);
+ u8 wksp[] __aligned(8) __counted_by(wksp_size);
};
static DEFINE_MUTEX(zstd_stream_lock);
@@ -38,13 +39,12 @@ static void *zstd_alloc_stream(void)
params = zstd_get_params(ZSTD_DEF_LEVEL, ZSTD_MAX_SIZE);
- wksp_size = max_t(size_t,
- zstd_cstream_workspace_bound(&params.cParams),
- zstd_dstream_workspace_bound(ZSTD_MAX_SIZE));
+ wksp_size = max(zstd_cstream_workspace_bound(&params.cParams),
+ zstd_dstream_workspace_bound(ZSTD_MAX_SIZE));
if (!wksp_size)
return ERR_PTR(-EINVAL);
- ctx = kvmalloc(sizeof(*ctx) + wksp_size, GFP_KERNEL);
+ ctx = kvmalloc(struct_size(ctx, wksp, wksp_size), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
@@ -75,11 +75,6 @@ static int zstd_init(struct crypto_acomp *acomp_tfm)
return ret;
}
-static void zstd_exit(struct crypto_acomp *acomp_tfm)
-{
- crypto_acomp_free_streams(&zstd_streams);
-}
-
static int zstd_compress_one(struct acomp_req *req, struct zstd_ctx *ctx,
const void *src, void *dst, unsigned int *dlen)
{
@@ -297,7 +292,6 @@ static struct acomp_alg zstd_acomp = {
.cra_module = THIS_MODULE,
},
.init = zstd_init,
- .exit = zstd_exit,
.compress = zstd_compress,
.decompress = zstd_decompress,
};
@@ -310,6 +304,7 @@ static int __init zstd_mod_init(void)
static void __exit zstd_mod_fini(void)
{
crypto_unregister_acomp(&zstd_acomp);
+ crypto_acomp_free_streams(&zstd_streams);
}
module_init(zstd_mod_init);
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index aa2b135e3ee230..6d6ac409efcfaf 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -138,12 +138,11 @@ static const struct of_device_id bcm2835_rng_of_match[] = {
{ .compatible = "brcm,bcm6368-rng"},
{},
};
+MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
static int bcm2835_rng_probe(struct platform_device *pdev)
{
- const struct bcm2835_rng_of_data *of_data;
struct device *dev = &pdev->dev;
- const struct of_device_id *rng_id;
struct bcm2835_rng_priv *priv;
int err;
@@ -171,12 +170,10 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
priv->rng.cleanup = bcm2835_rng_cleanup;
if (dev_of_node(dev)) {
- rng_id = of_match_node(bcm2835_rng_of_match, dev->of_node);
- if (!rng_id)
- return -EINVAL;
+ const struct bcm2835_rng_of_data *of_data;
/* Check for rng init function, execute it */
- of_data = rng_id->data;
+ of_data = of_device_get_match_data(dev);
if (of_data)
priv->mask_interrupts = of_data->mask_interrupts;
}
@@ -191,8 +188,6 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
return err;
}
-MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
-
static const struct platform_device_id bcm2835_rng_devtype[] = {
{ .name = "bcm2835-rng" },
{ .name = "bcm63xx-rng" },
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 018316f546215a..96d7fe41b373d5 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -341,6 +341,9 @@ static ssize_t rng_current_store(struct device *dev,
if (sysfs_streq(buf, "")) {
err = enable_best_rng();
+ } else if (sysfs_streq(buf, "none")) {
+ cur_rng_set_by_user = 1;
+ drop_current_rng();
} else {
list_for_each_entry(rng, &rng_list, list) {
if (sysfs_streq(rng->name, buf)) {
@@ -392,7 +395,7 @@ static ssize_t rng_available_show(struct device *dev,
strlcat(buf, rng->name, PAGE_SIZE);
strlcat(buf, " ", PAGE_SIZE);
}
- strlcat(buf, "\n", PAGE_SIZE);
+ strlcat(buf, "none\n", PAGE_SIZE);
mutex_unlock(&rng_mutex);
return strlen(buf);
@@ -542,10 +545,10 @@ int hwrng_register(struct hwrng *rng)
init_completion(&rng->dying);
/* Adjust quality field to always have a proper value */
- rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024);
+ rng->quality = min3(default_quality, 1024, rng->quality ?: 1024);
- if (!current_rng ||
- (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
+ if (!cur_rng_set_by_user &&
+ (!current_rng || rng->quality > current_rng->quality)) {
/*
* Set new rng as current as the new rng source
* provides better entropy quality and was not
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index a6688d54984c4d..8d3b5d2890f8b7 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -728,6 +728,7 @@ config CRYPTO_DEV_TEGRA
config CRYPTO_DEV_XILINX_TRNG
tristate "Support for Xilinx True Random Generator"
depends on ZYNQMP_FIRMWARE || COMPILE_TEST
+ select CRYPTO_DF80090A
select CRYPTO_RNG
select HW_RANDOM
help
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 8bc08089f044fe..36a1ebca2e70c1 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -502,6 +502,7 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
ss = algt->ss;
+ j = 0;
digestsize = crypto_ahash_digestsize(tfm);
if (digestsize == SHA224_DIGEST_SIZE)
@@ -536,7 +537,6 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
goto err_dma_result;
}
- j = 0;
len = areq->nbytes;
sg = areq->src;
i = 0;
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
index a895e4289efa0c..9688d116d07e59 100644
--- a/drivers/crypto/atmel-i2c.c
+++ b/drivers/crypto/atmel-i2c.c
@@ -402,7 +402,7 @@ EXPORT_SYMBOL(atmel_i2c_probe);
static int __init atmel_i2c_init(void)
{
- atmel_wq = alloc_workqueue("atmel_wq", 0, 0);
+ atmel_wq = alloc_workqueue("atmel_wq", WQ_PERCPU, 0);
return atmel_wq ? 0 : -ENOMEM;
}
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 75ee065da1ec31..b04d6379244ae8 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -252,7 +252,7 @@ struct artpec6_crypto_dma_descriptors {
};
enum artpec6_crypto_variant {
- ARTPEC6_CRYPTO,
+ ARTPEC6_CRYPTO = 1,
ARTPEC7_CRYPTO,
};
@@ -2842,7 +2842,6 @@ MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
static int artpec6_crypto_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
enum artpec6_crypto_variant variant;
struct artpec6_crypto *ac;
struct device *dev = &pdev->dev;
@@ -2853,12 +2852,10 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
if (artpec6_crypto_dev)
return -ENODEV;
- match = of_match_node(artpec6_crypto_of_match, dev->of_node);
- if (!match)
+ variant = (enum artpec6_crypto_variant)of_device_get_match_data(dev);
+ if (!variant)
return -EINVAL;
- variant = (enum artpec6_crypto_variant)match->data;
-
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c
index 079a22cc9f02be..c18dbac564939c 100644
--- a/drivers/crypto/caam/blob_gen.c
+++ b/drivers/crypto/caam/blob_gen.c
@@ -2,13 +2,14 @@
/*
* Copyright (C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
* Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
- * Copyright 2024 NXP
+ * Copyright 2024-2025 NXP
*/
#define pr_fmt(fmt) "caam blob_gen: " fmt
#include <linux/bitfield.h>
#include <linux/device.h>
+#include <keys/trusted-type.h>
#include <soc/fsl/caam-blob.h>
#include "compat.h"
@@ -60,18 +61,27 @@ static void caam_blob_job_done(struct device *dev, u32 *desc, u32 err, void *con
complete(&res->completion);
}
+static u32 check_caam_state(struct device *jrdev)
+{
+ const struct caam_drv_private *ctrlpriv;
+
+ ctrlpriv = dev_get_drvdata(jrdev->parent);
+ return FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status));
+}
+
int caam_process_blob(struct caam_blob_priv *priv,
struct caam_blob_info *info, bool encap)
{
- const struct caam_drv_private *ctrlpriv;
struct caam_blob_job_result testres;
struct device *jrdev = &priv->jrdev;
dma_addr_t dma_in, dma_out;
int op = OP_PCLID_BLOB;
+ int hwbk_caam_ovhd = 0;
size_t output_len;
u32 *desc;
u32 moo;
int ret;
+ int len;
if (info->key_mod_len > CAAM_BLOB_KEYMOD_LENGTH)
return -EINVAL;
@@ -82,14 +92,29 @@ int caam_process_blob(struct caam_blob_priv *priv,
} else {
op |= OP_TYPE_DECAP_PROTOCOL;
output_len = info->input_len - CAAM_BLOB_OVERHEAD;
+ info->output_len = output_len;
+ }
+
+ if (encap && info->pkey_info.is_pkey) {
+ op |= OP_PCL_BLOB_BLACK;
+ if (info->pkey_info.key_enc_algo == CAAM_ENC_ALGO_CCM) {
+ op |= OP_PCL_BLOB_EKT;
+ hwbk_caam_ovhd = CAAM_CCM_OVERHEAD;
+ }
+ if ((info->input_len + hwbk_caam_ovhd) > MAX_KEY_SIZE)
+ return -EINVAL;
+
+ len = info->input_len + hwbk_caam_ovhd;
+ } else {
+ len = info->input_len;
}
desc = kzalloc(CAAM_BLOB_DESC_BYTES_MAX, GFP_KERNEL);
if (!desc)
return -ENOMEM;
- dma_in = dma_map_single(jrdev, info->input, info->input_len,
- DMA_TO_DEVICE);
+ dma_in = dma_map_single(jrdev, info->input, len,
+ encap ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, dma_in)) {
dev_err(jrdev, "unable to map input DMA buffer\n");
ret = -ENOMEM;
@@ -104,8 +129,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
goto out_unmap_in;
}
- ctrlpriv = dev_get_drvdata(jrdev->parent);
- moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status));
+ moo = check_caam_state(jrdev);
if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
dev_warn(jrdev,
"using insecure test key, enable HAB to use unique device key!\n");
@@ -117,18 +141,48 @@ int caam_process_blob(struct caam_blob_priv *priv,
* Class 1 Context DWords 0+1+2+3. The random BK is stored in the
* Class 1 Key Register. Operation Mode is set to AES-CCM.
*/
-
init_job_desc(desc, 0);
+
+ if (encap && info->pkey_info.is_pkey) {
+ /*!1. key command used to load class 1 key register
+ * from input plain key.
+ */
+ append_key(desc, dma_in, info->input_len,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ /*!2. Fifostore to store protected key from class 1 key register. */
+ if (info->pkey_info.key_enc_algo == CAAM_ENC_ALGO_CCM) {
+ append_fifo_store(desc, dma_in, info->input_len,
+ LDST_CLASS_1_CCB |
+ FIFOST_TYPE_KEY_CCM_JKEK);
+ } else {
+ append_fifo_store(desc, dma_in, info->input_len,
+ LDST_CLASS_1_CCB |
+ FIFOST_TYPE_KEY_KEK);
+ }
+ /*
+ * JUMP_OFFSET specifies the offset of the JUMP target from
+ * the JUMP command's address in the descriptor buffer.
+ */
+ append_jump(desc, JUMP_COND_NOP | BIT(0) << JUMP_OFFSET_SHIFT);
+ }
+
+ /*!3. Load class 2 key with key modifier. */
append_key_as_imm(desc, info->key_mod, info->key_mod_len,
- info->key_mod_len, CLASS_2 | KEY_DEST_CLASS_REG);
- append_seq_in_ptr_intlen(desc, dma_in, info->input_len, 0);
- append_seq_out_ptr_intlen(desc, dma_out, output_len, 0);
+ info->key_mod_len, CLASS_2 | KEY_DEST_CLASS_REG);
+
+ /*!4. SEQ IN PTR Command. */
+ append_seq_in_ptr(desc, dma_in, info->input_len, 0);
+
+ /*!5. SEQ OUT PTR Command. */
+ append_seq_out_ptr(desc, dma_out, output_len, 0);
+
+ /*!6. Blob encapsulation/decapsulation PROTOCOL Command. */
append_operation(desc, op);
- print_hex_dump_debug("data@"__stringify(__LINE__)": ",
+ print_hex_dump_debug("data@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 1, info->input,
- info->input_len, false);
- print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ len, false);
+ print_hex_dump_debug("jobdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 1, desc,
desc_bytes(desc), false);
@@ -139,7 +193,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
if (ret == -EINPROGRESS) {
wait_for_completion(&testres.completion);
ret = testres.err;
- print_hex_dump_debug("output@"__stringify(__LINE__)": ",
+ print_hex_dump_debug("output@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 1, info->output,
output_len, false);
}
@@ -149,10 +203,10 @@ int caam_process_blob(struct caam_blob_priv *priv,
dma_unmap_single(jrdev, dma_out, output_len, DMA_FROM_DEVICE);
out_unmap_in:
- dma_unmap_single(jrdev, dma_in, info->input_len, DMA_TO_DEVICE);
+ dma_unmap_single(jrdev, dma_in, len,
+ encap ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
out_free:
kfree(desc);
-
return ret;
}
EXPORT_SYMBOL(caam_process_blob);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 2cfb1b8d8c7cfc..32a6e6e15ee25c 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2016-2019, 2023 NXP
+ * Copyright 2016-2019, 2023, 2025 NXP
*
* Based on talitos crypto API driver.
*
@@ -61,13 +61,16 @@
#include <crypto/internal/engine.h>
#include <crypto/internal/skcipher.h>
#include <crypto/xts.h>
+#include <keys/trusted-type.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/key-type.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <soc/fsl/caam-blob.h>
/*
* crypto alg
@@ -119,12 +122,15 @@ struct caam_ctx {
dma_addr_t sh_desc_enc_dma;
dma_addr_t sh_desc_dec_dma;
dma_addr_t key_dma;
+ u8 protected_key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t protected_key_dma;
enum dma_data_direction dir;
struct device *jrdev;
struct alginfo adata;
struct alginfo cdata;
unsigned int authsize;
bool xts_key_fallback;
+ bool is_blob;
struct crypto_skcipher *fallback;
};
@@ -751,9 +757,14 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+ /* Here keylen is actual key length */
ctx->cdata.keylen = keylen;
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
+ /* Here protected key len is plain key length */
+ ctx->cdata.plain_keylen = keylen;
+ ctx->cdata.key_cmd_opt = 0;
+
/* skcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
@@ -772,6 +783,62 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
return 0;
}
+static int paes_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_pkey_info *pkey_info = (struct caam_pkey_info *)key;
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
+ struct device *jrdev = ctx->jrdev;
+ int err;
+
+ ctx->cdata.key_inline = false;
+
+ keylen = keylen - CAAM_PKEY_HEADER;
+
+ /* Retrieve the length of key */
+ ctx->cdata.plain_keylen = pkey_info->plain_key_sz;
+
+ /* Retrieve the length of blob*/
+ ctx->cdata.keylen = keylen;
+
+ /* Retrieve the address of the blob */
+ ctx->cdata.key_virt = pkey_info->key_buf;
+
+ /* Validate key length for AES algorithms */
+ err = aes_check_keylen(ctx->cdata.plain_keylen);
+ if (err) {
+ dev_err(jrdev, "bad key length\n");
+ return err;
+ }
+
+ /* set command option */
+ ctx->cdata.key_cmd_opt |= KEY_ENC;
+
+ /* check if the Protected-Key is CCM key */
+ if (pkey_info->key_enc_algo == CAAM_ENC_ALGO_CCM)
+ ctx->cdata.key_cmd_opt |= KEY_EKT;
+
+ memcpy(ctx->key, ctx->cdata.key_virt, keylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ ctx->cdata.key_dma = ctx->key_dma;
+
+ if (pkey_info->key_enc_algo == CAAM_ENC_ALGO_CCM)
+ ctx->protected_key_dma = dma_map_single(jrdev, ctx->protected_key,
+ ctx->cdata.plain_keylen +
+ CAAM_CCM_OVERHEAD,
+ DMA_FROM_DEVICE);
+ else
+ ctx->protected_key_dma = dma_map_single(jrdev, ctx->protected_key,
+ ctx->cdata.plain_keylen,
+ DMA_FROM_DEVICE);
+
+ ctx->cdata.protected_key_dma = ctx->protected_key_dma;
+ ctx->is_blob = true;
+
+ return 0;
+}
+
static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
const u8 *key, unsigned int keylen)
{
@@ -1254,7 +1321,9 @@ static void init_skcipher_job(struct skcipher_request *req,
struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *jrdev = ctx->jrdev;
int ivsize = crypto_skcipher_ivsize(skcipher);
- u32 *desc = edesc->hw_desc;
+ u32 *desc = !ctx->is_blob ? edesc->hw_desc :
+ (u32 *)((u8 *)edesc->hw_desc + CAAM_DESC_BYTES_MAX);
+ dma_addr_t desc_dma;
u32 *sh_desc;
u32 in_options = 0, out_options = 0;
dma_addr_t src_dma, dst_dma, ptr;
@@ -1269,11 +1338,6 @@ static void init_skcipher_job(struct skcipher_request *req,
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
- sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
- ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
-
- len = desc_len(sh_desc);
- init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
if (ivsize || edesc->mapped_src_nents > 1) {
src_dma = edesc->sec4_sg_dma;
@@ -1283,8 +1347,6 @@ static void init_skcipher_job(struct skcipher_request *req,
src_dma = sg_dma_address(req->src);
}
- append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
-
if (likely(req->src == req->dst)) {
dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
out_options = in_options;
@@ -1296,7 +1358,25 @@ static void init_skcipher_job(struct skcipher_request *req,
out_options = LDST_SGF;
}
- append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
+ if (ctx->is_blob) {
+ cnstr_desc_skcipher_enc_dec(desc, &ctx->cdata,
+ src_dma, dst_dma, req->cryptlen + ivsize,
+ in_options, out_options,
+ ivsize, encrypt);
+
+ desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE);
+
+ cnstr_desc_protected_blob_decap(edesc->hw_desc, &ctx->cdata, desc_dma);
+ } else {
+ sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
+ ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
+
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+ append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
+
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
+ }
}
/*
@@ -1817,6 +1897,7 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
int ret = 0;
+ int len;
/*
* XTS is expected to return an error even for input length = 0
@@ -1842,8 +1923,12 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
crypto_skcipher_decrypt(&rctx->fallback_req);
}
+ len = DESC_JOB_IO_LEN * CAAM_CMD_SZ;
+ if (ctx->is_blob)
+ len += CAAM_DESC_BYTES_MAX;
+
/* allocate extended descriptor */
- edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ edesc = skcipher_edesc_alloc(req, len);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1888,6 +1973,27 @@ static struct caam_skcipher_alg driver_algs[] = {
{
.skcipher.base = {
.base = {
+ .cra_name = "cbc(paes)",
+ .cra_driver_name = "cbc-paes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = paes_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE + CAAM_BLOB_OVERHEAD +
+ CAAM_PKEY_HEADER,
+ .max_keysize = AES_MAX_KEY_SIZE + CAAM_BLOB_OVERHEAD +
+ CAAM_PKEY_HEADER,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher.base = {
+ .base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-caam",
.cra_blocksize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 7571e1ac913b1d..04c1105eb1f58c 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -2,12 +2,13 @@
/*
* Shared descriptors for aead, skcipher algorithms
*
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2019, 2025 NXP
*/
#include "compat.h"
#include "desc_constr.h"
#include "caamalg_desc.h"
+#include <soc/fsl/caam-blob.h>
/*
* For aead functions, read payload and write payload,
@@ -1364,6 +1365,84 @@ static inline void skcipher_append_src_dst(u32 *desc)
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
}
+void cnstr_desc_skcipher_enc_dec(u32 * const desc, struct alginfo *cdata,
+ dma_addr_t src, dma_addr_t dst, unsigned int data_sz,
+ unsigned int in_options, unsigned int out_options,
+ unsigned int ivsize, const bool encrypt)
+{
+ u32 options = cdata->algtype | OP_ALG_AS_INIT;
+
+ if (encrypt)
+ options |= OP_ALG_ENCRYPT;
+ else
+ options |= OP_ALG_DECRYPT;
+
+ init_job_desc(desc, 0);
+
+ append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL |
+ JUMP_COND_NOP | JUMP_TEST_ALL | 1);
+
+ append_key(desc, cdata->protected_key_dma, cdata->plain_keylen,
+ CLASS_1 | KEY_DEST_CLASS_REG | cdata->key_cmd_opt);
+
+ append_seq_in_ptr(desc, src, data_sz, in_options);
+
+ append_seq_out_ptr(desc, dst, data_sz, out_options);
+
+ /* Load IV, if there is one */
+ if (ivsize)
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB);
+
+ append_operation(desc, options);
+
+ skcipher_append_src_dst(desc);
+
+ /* Store IV */
+ if (ivsize)
+ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB);
+
+ print_hex_dump_debug("skcipher_enc_dec job desc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+}
+EXPORT_SYMBOL(cnstr_desc_skcipher_enc_dec);
+
+void cnstr_desc_protected_blob_decap(u32 * const desc, struct alginfo *cdata,
+ dma_addr_t next_desc_addr)
+{
+ u32 protected_store;
+
+ init_job_desc(desc, 0);
+
+ /* Load key modifier */
+ append_load_as_imm(desc, KEYMOD, sizeof(KEYMOD) - 1,
+ LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY);
+
+ append_seq_in_ptr_intlen(desc, cdata->key_dma,
+ cdata->plain_keylen + CAAM_BLOB_OVERHEAD, 0);
+
+ append_seq_out_ptr_intlen(desc, cdata->protected_key_dma,
+ cdata->plain_keylen, 0);
+
+ protected_store = OP_PCLID_BLOB | OP_PCL_BLOB_BLACK;
+ if ((cdata->key_cmd_opt >> KEY_EKT_OFFSET) & 1)
+ protected_store |= OP_PCL_BLOB_EKT;
+
+ append_operation(desc, OP_TYPE_DECAP_PROTOCOL | protected_store);
+
+ if (next_desc_addr) {
+ append_jump(desc, JUMP_TYPE_NONLOCAL | JUMP_TEST_ALL);
+ append_ptr(desc, next_desc_addr);
+ }
+
+ print_hex_dump_debug("protected blob decap job desc@" __stringify(__LINE__) ":",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+}
+EXPORT_SYMBOL(cnstr_desc_protected_blob_decap);
+
/**
* cnstr_shdsc_skcipher_encap - skcipher encapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
@@ -1391,7 +1470,8 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
/* Load class1 key only */
append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
- cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ cdata->plain_keylen, CLASS_1 | KEY_DEST_CLASS_REG
+ | cdata->key_cmd_opt);
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
@@ -1466,7 +1546,8 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
/* Load class1 key only */
append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
- cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ cdata->plain_keylen, CLASS_1 | KEY_DEST_CLASS_REG
+ | cdata->key_cmd_opt);
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index f2893393ba5e77..323490a4a75693 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -2,7 +2,7 @@
/*
* Shared descriptors for aead, skcipher algorithms
*
- * Copyright 2016 NXP
+ * Copyright 2016, 2025 NXP
*/
#ifndef _CAAMALG_DESC_H_
@@ -48,6 +48,9 @@
#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
16 * CAAM_CMD_SZ)
+/* Key modifier for CAAM Protected blobs */
+#define KEYMOD "SECURE_KEY"
+
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
unsigned int icvsize, int era);
@@ -113,4 +116,12 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata);
void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata);
+void cnstr_desc_protected_blob_decap(u32 * const desc, struct alginfo *cdata,
+ dma_addr_t next_desc);
+
+void cnstr_desc_skcipher_enc_dec(u32 * const desc, struct alginfo *cdata,
+ dma_addr_t src, dma_addr_t dst, unsigned int data_sz,
+ unsigned int in_options, unsigned int out_options,
+ unsigned int ivsize, const bool encrypt);
+
#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index b3d14a7f4dd147..0eb43c8625164f 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -181,7 +181,9 @@ static inline void test_len(struct hwrng *rng, size_t len, bool wait)
struct device *dev = ctx->ctrldev;
buf = kcalloc(CAAM_RNG_MAX_FIFO_STORE_SIZE, sizeof(u8), GFP_KERNEL);
-
+ if (!buf) {
+ return;
+ }
while (len > 0) {
read_len = rng->read(rng, buf, len, wait);
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index e13470901586b5..c28e94fcb8c79b 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -4,7 +4,7 @@
* Definitions to support CAAM descriptor instruction generation
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2018 NXP
+ * Copyright 2018, 2025 NXP
*/
#ifndef DESC_H
@@ -162,6 +162,7 @@
* Enhanced Encryption of Key
*/
#define KEY_EKT 0x00100000
+#define KEY_EKT_OFFSET 20
/*
* Encrypted with Trusted Key
@@ -403,6 +404,7 @@
#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_CCM_JKEK (0x14 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
@@ -1001,6 +1003,11 @@
#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
+/* Blob protocol protinfo bits */
+
+#define OP_PCL_BLOB_BLACK 0x0004
+#define OP_PCL_BLOB_EKT 0x0100
+
/* For DTLS - OP_PCLID_DTLS */
#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 824c94d44f943d..2a29dd2c9c8a38 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -3,7 +3,7 @@
* caam descriptor construction helper functions
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
- * Copyright 2019 NXP
+ * Copyright 2019, 2025 NXP
*/
#ifndef DESC_CONSTR_H
@@ -498,17 +498,23 @@ do { \
* @keylen: length of the provided algorithm key, in bytes
* @keylen_pad: padded length of the provided algorithm key, in bytes
* @key_dma: dma (bus) address where algorithm key resides
+ * @protected_key_dma: dma (bus) address where protected key resides
* @key_virt: virtual address where algorithm key resides
* @key_inline: true - key can be inlined in the descriptor; false - key is
* referenced by the descriptor
+ * @plain_keylen: size of the key to be loaded by the CAAM
+ * @key_cmd_opt: optional parameters for KEY command
*/
struct alginfo {
u32 algtype;
unsigned int keylen;
unsigned int keylen_pad;
dma_addr_t key_dma;
+ dma_addr_t protected_key_dma;
const void *key_virt;
bool key_inline;
+ u32 plain_keylen;
+ u32 key_cmd_opt;
};
/**
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
index d4e06999af9b7e..a6a76e50ba84f0 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
@@ -192,7 +192,7 @@ int nitrox_mbox_init(struct nitrox_device *ndev)
}
/* allocate pf2vf response workqueue */
- ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0);
+ ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", WQ_PERCPU, 0);
if (!ndev->iov.pf2vf_wq) {
kfree(ndev->iov.vfdev);
ndev->iov.vfdev = NULL;
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index c531d13d971f3f..246801912e1a53 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -507,7 +507,7 @@ int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
u32 trng_value;
- int len = min_t(int, sizeof(trng_value), max);
+ int len = min(sizeof(trng_value), max);
/* Locking is provided by the caller so we can update device
* hwrng-related fields safely
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 6f9d7063257d70..1335a83fe052ea 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -95,7 +95,7 @@ struct sp_device {
struct device *dev;
- struct sp_dev_vdata *dev_vdata;
+ const struct sp_dev_vdata *dev_vdata;
unsigned int ord;
char name[SP_MAX_NAME_LEN];
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index e7bb803912a6d3..8891ceee1d7d05 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -459,6 +459,17 @@ static const struct psp_vdata pspv6 = {
.intsts_reg = 0x10514, /* P2CMSG_INTSTS */
};
+static const struct psp_vdata pspv7 = {
+ .tee = &teev2,
+ .cmdresp_reg = 0x10944, /* C2PMSG_17 */
+ .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
+ .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10510, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10514, /* P2CMSG_INTSTS */
+};
+
#endif
static const struct sp_dev_vdata dev_vdata[] = {
@@ -525,6 +536,13 @@ static const struct sp_dev_vdata dev_vdata[] = {
.psp_vdata = &pspv6,
#endif
},
+ { /* 9 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv7,
+#endif
+ },
+
};
static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
@@ -539,6 +557,7 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] },
{ PCI_VDEVICE(AMD, 0x17D8), (kernel_ulong_t)&dev_vdata[8] },
+ { PCI_VDEVICE(AMD, 0x115A), (kernel_ulong_t)&dev_vdata[9] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index 3933cac1694d4e..3f9843fa77827c 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -52,24 +52,13 @@ static const struct of_device_id sp_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sp_of_match);
-static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
-{
- const struct of_device_id *match;
-
- match = of_match_node(sp_of_match, pdev->dev.of_node);
- if (match && match->data)
- return (struct sp_dev_vdata *)match->data;
-
- return NULL;
-}
-
-static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
+static const struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
{
const struct acpi_device_id *match;
match = acpi_match_device(sp_acpi_match, &pdev->dev);
if (match && match->driver_data)
- return (struct sp_dev_vdata *)match->driver_data;
+ return (const struct sp_dev_vdata *)match->driver_data;
return NULL;
}
@@ -123,7 +112,7 @@ static int sp_platform_probe(struct platform_device *pdev)
goto e_err;
sp->dev_specific = sp_platform;
- sp->dev_vdata = pdev->dev.of_node ? sp_get_of_version(pdev)
+ sp->dev_vdata = pdev->dev.of_node ? of_device_get_match_data(&pdev->dev)
: sp_get_acpi_version(pdev);
if (!sp->dev_vdata) {
ret = -ENODEV;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 3963bb91321fc1..dc7e0cd51c2596 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -1235,6 +1235,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
int rc = 0;
u32 dummy = 0;
u32 mapped_nents = 0;
+ int sg_nents;
dev_dbg(dev, " update params : curr_buff=%p curr_buff_cnt=0x%X nbytes=0x%X src=%p curr_index=%u\n",
curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
@@ -1248,7 +1249,10 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
if (total_in_len < block_size) {
dev_dbg(dev, " less than one block: curr_buff=%p *curr_buff_cnt=0x%X copy_to=%p\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
- areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
+ sg_nents = sg_nents_for_len(src, nbytes);
+ if (sg_nents < 0)
+ return sg_nents;
+ areq_ctx->in_nents = sg_nents;
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 92599152674545..edf36f6add527b 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -913,11 +913,10 @@ static void hifn_init_pll(struct hifn_device *dev)
else
pllcfg |= HIFN_PLL_REF_CLK_HBI;
- if (hifn_pll_ref[3] != '\0')
- freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
- else {
+ if (hifn_pll_ref[3] == '\0' ||
+ kstrtouint(hifn_pll_ref + 3, 10, &freq)) {
freq = 66;
- dev_info(&dev->pdev->dev, "assuming %uMHz clock speed, override with hifn_pll_ref=%.3s<frequency>\n",
+ dev_info(&dev->pdev->dev, "assuming %u MHz clock speed, override with hifn_pll_ref=%.3s<frequency>\n",
freq, hifn_pll_ref);
}
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 3b391a1466353d..be25ecbdba695a 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -64,10 +64,10 @@
#define QM_EQE_AEQE_SIZE (2UL << 12)
#define QM_EQC_PHASE_SHIFT 16
-#define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
+#define QM_EQE_PHASE(dw0) (((dw0) >> 16) & 0x1)
#define QM_EQE_CQN_MASK GENMASK(15, 0)
-#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
+#define QM_AEQE_PHASE(dw0) (((dw0) >> 16) & 0x1)
#define QM_AEQE_TYPE_SHIFT 17
#define QM_AEQE_TYPE_MASK 0xf
#define QM_AEQE_CQN_MASK GENMASK(15, 0)
@@ -976,23 +976,23 @@ static void qm_get_complete_eqe_num(struct hisi_qm *qm)
{
struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
struct hisi_qm_poll_data *poll_data = NULL;
+ u32 dw0 = le32_to_cpu(eqe->dw0);
u16 eq_depth = qm->eq_depth;
u16 cqn, eqe_num = 0;
- if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) {
+ if (QM_EQE_PHASE(dw0) != qm->status.eqc_phase) {
atomic64_inc(&qm->debug.dfx.err_irq_cnt);
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
return;
}
- cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
+ cqn = dw0 & QM_EQE_CQN_MASK;
if (unlikely(cqn >= qm->qp_num))
return;
poll_data = &qm->poll_data[cqn];
- while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
- cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
- poll_data->qp_finish_id[eqe_num] = cqn;
+ while (QM_EQE_PHASE(dw0) != qm->status.eqc_phase) {
+ poll_data->qp_finish_id[eqe_num] = dw0 & QM_EQE_CQN_MASK;
eqe_num++;
if (qm->status.eq_head == eq_depth - 1) {
@@ -1006,6 +1006,8 @@ static void qm_get_complete_eqe_num(struct hisi_qm *qm)
if (eqe_num == (eq_depth >> 1) - 1)
break;
+
+ dw0 = le32_to_cpu(eqe->dw0);
}
poll_data->eqe_num = eqe_num;
@@ -1098,15 +1100,15 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
{
struct hisi_qm *qm = data;
struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
+ u32 dw0 = le32_to_cpu(aeqe->dw0);
u16 aeq_depth = qm->aeq_depth;
u32 type, qp_id;
atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
- while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
- type = (le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT) &
- QM_AEQE_TYPE_MASK;
- qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK;
+ while (QM_AEQE_PHASE(dw0) == qm->status.aeqc_phase) {
+ type = (dw0 >> QM_AEQE_TYPE_SHIFT) & QM_AEQE_TYPE_MASK;
+ qp_id = dw0 & QM_AEQE_CQN_MASK;
switch (type) {
case QM_EQ_OVERFLOW:
@@ -1134,6 +1136,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
aeqe++;
qm->status.aeq_head++;
}
+ dw0 = le32_to_cpu(aeqe->dw0);
}
qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
@@ -1283,6 +1286,13 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
(factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
}
break;
+ /*
+ * Note: The current logic only needs to handle the above three types
+ * If new types are added, they need to be supplemented here,
+ * otherwise undefined behavior may occur.
+ */
+ default:
+ break;
}
}
@@ -2652,10 +2662,10 @@ static int qm_hw_err_isolate(struct hisi_qm *qm)
}
}
list_add(&hw_err->list, &isolate->qm_hw_errs);
- mutex_unlock(&isolate->isolate_lock);
if (count >= isolate->err_threshold)
isolate->is_isolate = true;
+ mutex_unlock(&isolate->isolate_lock);
return 0;
}
@@ -2664,12 +2674,10 @@ static void qm_hw_err_destroy(struct hisi_qm *qm)
{
struct qm_hw_err *err, *tmp;
- mutex_lock(&qm->isolate_data.isolate_lock);
list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) {
list_del(&err->list);
kfree(err);
}
- mutex_unlock(&qm->isolate_data.isolate_lock);
}
static enum uacce_dev_state hisi_qm_get_isolate_state(struct uacce_device *uacce)
@@ -2697,10 +2705,12 @@ static int hisi_qm_isolate_threshold_write(struct uacce_device *uacce, u32 num)
if (qm->isolate_data.is_isolate)
return -EPERM;
+ mutex_lock(&qm->isolate_data.isolate_lock);
qm->isolate_data.err_threshold = num;
/* After the policy is updated, need to reset the hardware err list */
qm_hw_err_destroy(qm);
+ mutex_unlock(&qm->isolate_data.isolate_lock);
return 0;
}
@@ -2737,7 +2747,10 @@ static void qm_remove_uacce(struct hisi_qm *qm)
struct uacce_device *uacce = qm->uacce;
if (qm->use_sva) {
+ mutex_lock(&qm->isolate_data.isolate_lock);
qm_hw_err_destroy(qm);
+ mutex_unlock(&qm->isolate_data.isolate_lock);
+
uacce_remove(uacce);
qm->uacce = NULL;
}
@@ -3678,6 +3691,7 @@ static void qm_clear_vft_config(struct hisi_qm *qm)
static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
{
struct device *dev = &qm->pdev->dev;
+ struct qm_shaper_factor t_factor;
u32 ir = qos * QM_QOS_RATE;
int ret, total_vfs, i;
@@ -3685,6 +3699,7 @@ static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
if (fun_index > total_vfs)
return -EINVAL;
+ memcpy(&t_factor, &qm->factor[fun_index], sizeof(t_factor));
qm->factor[fun_index].func_qos = qos;
ret = qm_get_shaper_para(ir, &qm->factor[fun_index]);
@@ -3698,11 +3713,21 @@ static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
if (ret) {
dev_err(dev, "type: %d, failed to set shaper vft!\n", i);
- return -EINVAL;
+ goto back_func_qos;
}
}
return 0;
+
+back_func_qos:
+ memcpy(&qm->factor[fun_index], &t_factor, sizeof(t_factor));
+ for (i--; i >= ALG_TYPE_0; i--) {
+ ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
+ if (ret)
+ dev_err(dev, "failed to restore shaper vft during rollback!\n");
+ }
+
+ return -EINVAL;
}
static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 7a9ef2a9972a22..24c7b6ab285ba6 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -245,11 +245,6 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, struct scatterlist *sgl,
}
curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
- if (IS_ERR(curr_hw_sgl)) {
- dev_err(dev, "Get SGL error!\n");
- ret = -ENOMEM;
- goto err_unmap;
- }
curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
curr_hw_sge = curr_hw_sgl->sge_entries;
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 23f585219fb4b2..d0058757b0000d 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -805,7 +805,7 @@ static int save_iaa_wq(struct idxd_wq *wq)
if (!cpus_per_iaa)
cpus_per_iaa = 1;
out:
- return 0;
+ return ret;
}
static void remove_iaa_wq(struct idxd_wq *wq)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
index 35679b21ff63bb..667d5e320f509c 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -276,11 +276,11 @@ int adf_notify_fatal_error(struct adf_accel_dev *accel_dev)
int adf_init_aer(void)
{
device_reset_wq = alloc_workqueue("qat_device_reset_wq",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!device_reset_wq)
return -EFAULT;
- device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0);
+ device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", WQ_PERCPU, 0);
if (!device_sriov_wq) {
destroy_workqueue(device_reset_wq);
device_reset_wq = NULL;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c
index 12e5656136610c..4639d7fd93e6e8 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c
@@ -384,7 +384,8 @@ EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
*/
int __init adf_init_misc_wq(void)
{
- adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0);
+ adf_misc_wq = alloc_workqueue("qat_misc_wq",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
return !adf_misc_wq ? -ENOMEM : 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
index 31d1ef0cb1f52e..bb904ba4bf840b 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
@@ -299,7 +299,8 @@ EXPORT_SYMBOL_GPL(adf_sriov_configure);
int __init adf_init_pf_wq(void)
{
/* Workqueue for PF2VF responses */
- pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
+ pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
return !pf2vf_resp_wq ? -ENOMEM : 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
index a4636ec9f9cac0..d0fef20a3df4a2 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
@@ -299,7 +299,8 @@ EXPORT_SYMBOL_GPL(adf_flush_vf_wq);
*/
int __init adf_init_vf_wq(void)
{
- adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
+ adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
return !adf_vf_stop_wq ? -EFAULT : 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index 18c3e4416dc51e..06d49cb781ae81 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -200,20 +200,12 @@ qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
static int qat_uclo_parse_num(char *str, unsigned int *num)
{
- char buf[16] = {0};
- unsigned long ae = 0;
- int i;
-
- strscpy(buf, str, sizeof(buf));
- for (i = 0; i < 16; i++) {
- if (!isdigit(buf[i])) {
- buf[i] = '\0';
- break;
- }
- }
- if ((kstrtoul(buf, 10, &ae)))
- return -EFAULT;
+ unsigned long long ae;
+ char *end;
+ ae = simple_strtoull(str, &end, 10);
+ if (ae > UINT_MAX || str == end || (end - str) > 19)
+ return -EINVAL;
*num = (unsigned int)ae;
return 0;
}
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 9c21f5d835d2b0..301bdf239e7d6c 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -420,7 +420,6 @@ static int mv_cesa_probe(struct platform_device *pdev)
{
const struct mv_cesa_caps *caps = &orion_caps;
const struct mbus_dram_target_info *dram;
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct mv_cesa_dev *cesa;
struct mv_cesa_engine *engines;
@@ -433,11 +432,9 @@ static int mv_cesa_probe(struct platform_device *pdev)
}
if (dev->of_node) {
- match = of_match_node(mv_cesa_of_match_table, dev->of_node);
- if (!match || !match->data)
+ caps = of_device_get_match_data(dev);
+ if (!caps)
return -ENOTSUPP;
-
- caps = match->data;
}
cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index ebdf4efa09d4d7..b5cc5401f7044b 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -3,6 +3,7 @@
#include <linux/ctype.h>
#include <linux/firmware.h>
+#include <linux/string.h>
#include <linux/string_choices.h>
#include "otx2_cptpf_ucode.h"
#include "otx2_cpt_common.h"
@@ -458,13 +459,13 @@ static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info,
u16 rid)
{
char filename[OTX2_CPT_NAME_LENGTH];
- char eng_type[8] = {0};
+ char eng_type[8];
int ret, e, i;
INIT_LIST_HEAD(&fw_info->ucodes);
for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
- strcpy(eng_type, get_eng_type_str(e));
+ strscpy(eng_type, get_eng_type_str(e));
for (i = 0; i < strlen(eng_type); i++)
eng_type[i] = tolower(eng_type[i]);
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index e95e84486d9aef..b966f3365b7de8 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -21,7 +21,6 @@
#include "sha.h"
#include "aead.h"
-#define QCE_MAJOR_VERSION5 0x05
#define QCE_QUEUE_LENGTH 1
#define QCE_DEFAULT_MEM_BANDWIDTH 393600
@@ -161,7 +160,7 @@ static int qce_check_version(struct qce_device *qce)
* the driver does not support v5 with minor 0 because it has special
* alignment requirements.
*/
- if (major != QCE_MAJOR_VERSION5 || minor == 0)
+ if (major == 5 && minor == 0)
return -ENODEV;
qce->burst_size = QCE_BAM_BURST_SIZE;
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 1dec7aea852dd9..68cafd4741ad3d 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -24,11 +24,13 @@ int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma)
dma->txchan = dma_request_chan(dev, "tx");
if (IS_ERR(dma->txchan))
- return PTR_ERR(dma->txchan);
+ return dev_err_probe(dev, PTR_ERR(dma->txchan),
+ "Failed to get TX DMA channel\n");
dma->rxchan = dma_request_chan(dev, "rx");
if (IS_ERR(dma->rxchan)) {
- ret = PTR_ERR(dma->rxchan);
+ ret = dev_err_probe(dev, PTR_ERR(dma->rxchan),
+ "Failed to get RX DMA channel\n");
goto error_rx;
}
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index 9393e10671c248..e80f9148c0129f 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -321,8 +321,7 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
algt->stat_req++;
rkc->nreq++;
- ivsize = crypto_skcipher_ivsize(tfm);
- if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
+ if (areq->iv && ivsize > 0) {
if (rctx->mode & RK_CRYPTO_DEC) {
offset = areq->cryptlen - ivsize;
scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c
index e6839c7bfb73ac..54b7af4a7aee82 100644
--- a/drivers/crypto/starfive/jh7110-hash.c
+++ b/drivers/crypto/starfive/jh7110-hash.c
@@ -325,6 +325,7 @@ static int starfive_hash_digest(struct ahash_request *req)
struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
struct starfive_cryp_dev *cryp = ctx->cryp;
+ int sg_len;
memset(rctx, 0, sizeof(struct starfive_cryp_request_ctx));
@@ -333,7 +334,10 @@ static int starfive_hash_digest(struct ahash_request *req)
rctx->in_sg = req->src;
rctx->blksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
rctx->digsize = crypto_ahash_digestsize(tfm);
- rctx->in_sg_len = sg_nents_for_len(rctx->in_sg, rctx->total);
+ sg_len = sg_nents_for_len(rctx->in_sg, rctx->total);
+ if (sg_len < 0)
+ return sg_len;
+ rctx->in_sg_len = sg_len;
ctx->rctx = rctx;
return crypto_transfer_hash_request_to_engine(cryp->engine, req);
diff --git a/drivers/crypto/ti/Kconfig b/drivers/crypto/ti/Kconfig
index d4f91c1e0cb55c..a3692ceec49bc3 100644
--- a/drivers/crypto/ti/Kconfig
+++ b/drivers/crypto/ti/Kconfig
@@ -6,6 +6,7 @@ config CRYPTO_DEV_TI_DTHEV2
select CRYPTO_SKCIPHER
select CRYPTO_ECB
select CRYPTO_CBC
+ select CRYPTO_XTS
help
This enables support for the TI DTHE V2 hw cryptography engine
which can be found on TI K3 SOCs. Selecting this enables use
diff --git a/drivers/crypto/ti/dthev2-aes.c b/drivers/crypto/ti/dthev2-aes.c
index 3547c41fa4ed39..156729ccc50ec2 100644
--- a/drivers/crypto/ti/dthev2-aes.c
+++ b/drivers/crypto/ti/dthev2-aes.c
@@ -25,6 +25,7 @@
// AES Engine
#define DTHE_P_AES_BASE 0x7000
+
#define DTHE_P_AES_KEY1_0 0x0038
#define DTHE_P_AES_KEY1_1 0x003C
#define DTHE_P_AES_KEY1_2 0x0030
@@ -33,6 +34,16 @@
#define DTHE_P_AES_KEY1_5 0x002C
#define DTHE_P_AES_KEY1_6 0x0020
#define DTHE_P_AES_KEY1_7 0x0024
+
+#define DTHE_P_AES_KEY2_0 0x0018
+#define DTHE_P_AES_KEY2_1 0x001C
+#define DTHE_P_AES_KEY2_2 0x0010
+#define DTHE_P_AES_KEY2_3 0x0014
+#define DTHE_P_AES_KEY2_4 0x0008
+#define DTHE_P_AES_KEY2_5 0x000C
+#define DTHE_P_AES_KEY2_6 0x0000
+#define DTHE_P_AES_KEY2_7 0x0004
+
#define DTHE_P_AES_IV_IN_0 0x0040
#define DTHE_P_AES_IV_IN_1 0x0044
#define DTHE_P_AES_IV_IN_2 0x0048
@@ -52,6 +63,7 @@
enum aes_ctrl_mode_masks {
AES_CTRL_ECB_MASK = 0x00,
AES_CTRL_CBC_MASK = BIT(5),
+ AES_CTRL_XTS_MASK = BIT(12) | BIT(11),
};
#define DTHE_AES_CTRL_MODE_CLEAR_MASK ~GENMASK(28, 5)
@@ -88,6 +100,31 @@ static int dthe_cipher_init_tfm(struct crypto_skcipher *tfm)
return 0;
}
+static int dthe_cipher_xts_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+
+ ctx->dev_data = dev_data;
+ ctx->keylen = 0;
+
+ ctx->skcipher_fb = crypto_alloc_sync_skcipher("xts(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->skcipher_fb)) {
+ dev_err(dev_data->dev, "fallback driver xts(aes) couldn't be loaded\n");
+ return PTR_ERR(ctx->skcipher_fb);
+ }
+
+ return 0;
+}
+
+static void dthe_cipher_xts_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_sync_skcipher(ctx->skcipher_fb);
+}
+
static int dthe_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
{
struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -119,6 +156,27 @@ static int dthe_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, unsig
return dthe_aes_setkey(tfm, key, keylen);
}
+static int dthe_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (keylen != 2 * AES_KEYSIZE_128 &&
+ keylen != 2 * AES_KEYSIZE_192 &&
+ keylen != 2 * AES_KEYSIZE_256)
+ return -EINVAL;
+
+ ctx->aes_mode = DTHE_AES_XTS;
+ ctx->keylen = keylen / 2;
+ memcpy(ctx->key, key, keylen);
+
+ crypto_sync_skcipher_clear_flags(ctx->skcipher_fb, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ctx->skcipher_fb,
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(ctx->skcipher_fb, key, keylen);
+}
+
static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx,
struct dthe_aes_req_ctx *rctx,
u32 *iv_in)
@@ -141,6 +199,24 @@ static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx,
writel_relaxed(ctx->key[7], aes_base_reg + DTHE_P_AES_KEY1_7);
}
+ if (ctx->aes_mode == DTHE_AES_XTS) {
+ size_t key2_offset = ctx->keylen / sizeof(u32);
+
+ writel_relaxed(ctx->key[key2_offset + 0], aes_base_reg + DTHE_P_AES_KEY2_0);
+ writel_relaxed(ctx->key[key2_offset + 1], aes_base_reg + DTHE_P_AES_KEY2_1);
+ writel_relaxed(ctx->key[key2_offset + 2], aes_base_reg + DTHE_P_AES_KEY2_2);
+ writel_relaxed(ctx->key[key2_offset + 3], aes_base_reg + DTHE_P_AES_KEY2_3);
+
+ if (ctx->keylen > AES_KEYSIZE_128) {
+ writel_relaxed(ctx->key[key2_offset + 4], aes_base_reg + DTHE_P_AES_KEY2_4);
+ writel_relaxed(ctx->key[key2_offset + 5], aes_base_reg + DTHE_P_AES_KEY2_5);
+ }
+ if (ctx->keylen == AES_KEYSIZE_256) {
+ writel_relaxed(ctx->key[key2_offset + 6], aes_base_reg + DTHE_P_AES_KEY2_6);
+ writel_relaxed(ctx->key[key2_offset + 7], aes_base_reg + DTHE_P_AES_KEY2_7);
+ }
+ }
+
if (rctx->enc)
ctrl_val |= DTHE_AES_CTRL_DIR_ENC;
@@ -160,6 +236,9 @@ static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx,
case DTHE_AES_CBC:
ctrl_val |= AES_CTRL_CBC_MASK;
break;
+ case DTHE_AES_XTS:
+ ctrl_val |= AES_CTRL_XTS_MASK;
+ break;
}
if (iv_in) {
@@ -315,24 +394,45 @@ aes_err:
local_bh_disable();
crypto_finalize_skcipher_request(dev_data->engine, req, ret);
local_bh_enable();
- return ret;
+ return 0;
}
static int dthe_aes_crypt(struct skcipher_request *req)
{
struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
struct dthe_data *dev_data = dthe_get_dev(ctx);
struct crypto_engine *engine;
/*
- * If data is not a multiple of AES_BLOCK_SIZE, need to return -EINVAL
- * If data length input is zero, no need to do any operation.
+ * If data is not a multiple of AES_BLOCK_SIZE:
+ * - need to return -EINVAL for ECB, CBC as they are block ciphers
+ * - need to fallback to software as H/W doesn't support Ciphertext Stealing for XTS
*/
- if (req->cryptlen % AES_BLOCK_SIZE)
+ if (req->cryptlen % AES_BLOCK_SIZE) {
+ if (ctx->aes_mode == DTHE_AES_XTS) {
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->skcipher_fb);
+
+ skcipher_request_set_callback(subreq, skcipher_request_flags(req),
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+
+ return rctx->enc ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+ }
return -EINVAL;
+ }
- if (req->cryptlen == 0)
+ /*
+ * If data length input is zero, no need to do any operation.
+ * Except for XTS mode, where data length should be non-zero.
+ */
+ if (req->cryptlen == 0) {
+ if (ctx->aes_mode == DTHE_AES_XTS)
+ return -EINVAL;
return 0;
+ }
engine = dev_data->engine;
return crypto_transfer_skcipher_request_to_engine(engine, req);
@@ -399,7 +499,32 @@ static struct skcipher_engine_alg cipher_algs[] = {
.cra_module = THIS_MODULE,
},
.op.do_one_request = dthe_aes_run,
- } /* CBC AES */
+ }, /* CBC AES */
+ {
+ .base.init = dthe_cipher_xts_init_tfm,
+ .base.exit = dthe_cipher_xts_exit_tfm,
+ .base.setkey = dthe_aes_xts_setkey,
+ .base.encrypt = dthe_aes_encrypt,
+ .base.decrypt = dthe_aes_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE * 2,
+ .base.max_keysize = AES_MAX_KEY_SIZE * 2,
+ .base.ivsize = AES_IV_SIZE,
+ .base.base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-dthev2",
+ .cra_priority = 299,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_alignmask = AES_BLOCK_SIZE - 1,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dthe_tfm_ctx),
+ .cra_reqsize = sizeof(struct dthe_aes_req_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .op.do_one_request = dthe_aes_run,
+ }, /* XTS AES */
};
int dthe_register_aes_algs(void)
diff --git a/drivers/crypto/ti/dthev2-common.h b/drivers/crypto/ti/dthev2-common.h
index 68c94acda8aaa1..c7a06a4c353ff2 100644
--- a/drivers/crypto/ti/dthev2-common.h
+++ b/drivers/crypto/ti/dthev2-common.h
@@ -27,10 +27,16 @@
#define DTHE_REG_SIZE 4
#define DTHE_DMA_TIMEOUT_MS 2000
+/*
+ * Size of largest possible key (of all algorithms) to be stored in dthe_tfm_ctx
+ * This is currently the keysize of XTS-AES-256 which is 512 bits (64 bytes)
+ */
+#define DTHE_MAX_KEYSIZE (AES_MAX_KEY_SIZE * 2)
enum dthe_aes_mode {
DTHE_AES_ECB = 0,
DTHE_AES_CBC,
+ DTHE_AES_XTS,
};
/* Driver specific struct definitions */
@@ -73,12 +79,14 @@ struct dthe_list {
* @keylen: AES key length
* @key: AES key
* @aes_mode: AES mode
+ * @skcipher_fb: Fallback crypto skcipher handle for AES-XTS mode
*/
struct dthe_tfm_ctx {
struct dthe_data *dev_data;
unsigned int keylen;
- u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+ u32 key[DTHE_MAX_KEYSIZE / sizeof(u32)];
enum dthe_aes_mode aes_mode;
+ struct crypto_sync_skcipher *skcipher_fb;
};
/**
diff --git a/drivers/crypto/xilinx/xilinx-trng.c b/drivers/crypto/xilinx/xilinx-trng.c
index 4e4700d68127dc..db0fbb28ff32be 100644
--- a/drivers/crypto/xilinx/xilinx-trng.c
+++ b/drivers/crypto/xilinx/xilinx-trng.c
@@ -8,7 +8,6 @@
#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/delay.h>
-#include <linux/errno.h>
#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/hw_random.h>
#include <linux/io.h>
@@ -18,10 +17,11 @@
#include <linux/mutex.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-#include <linux/string.h>
+#include <crypto/aes.h>
+#include <crypto/df_sp80090a.h>
+#include <crypto/internal/drbg.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/rng.h>
-#include <crypto/aes.h>
/* TRNG Registers Offsets */
#define TRNG_STATUS_OFFSET 0x4U
@@ -59,6 +59,8 @@
struct xilinx_rng {
void __iomem *rng_base;
struct device *dev;
+ unsigned char *scratchpadbuf;
+ struct crypto_aes_ctx *aesctx;
struct mutex lock; /* Protect access to TRNG device */
struct hwrng trng;
};
@@ -182,9 +184,13 @@ static void xtrng_enable_entropy(struct xilinx_rng *rng)
static int xtrng_reseed_internal(struct xilinx_rng *rng)
{
u8 entropy[TRNG_ENTROPY_SEED_LEN_BYTES];
+ struct drbg_string data;
+ LIST_HEAD(seedlist);
u32 val;
int ret;
+ drbg_string_fill(&data, entropy, TRNG_SEED_LEN_BYTES);
+ list_add_tail(&data.list, &seedlist);
memset(entropy, 0, sizeof(entropy));
xtrng_enable_entropy(rng);
@@ -192,9 +198,14 @@ static int xtrng_reseed_internal(struct xilinx_rng *rng)
ret = xtrng_collect_random_data(rng, entropy, TRNG_SEED_LEN_BYTES, true);
if (ret != TRNG_SEED_LEN_BYTES)
return -EINVAL;
+ ret = crypto_drbg_ctr_df(rng->aesctx, rng->scratchpadbuf,
+ TRNG_SEED_LEN_BYTES, &seedlist, AES_BLOCK_SIZE,
+ TRNG_SEED_LEN_BYTES);
+ if (ret)
+ return ret;
xtrng_write_multiple_registers(rng->rng_base + TRNG_EXT_SEED_OFFSET,
- (u32 *)entropy, TRNG_NUM_INIT_REGS);
+ (u32 *)rng->scratchpadbuf, TRNG_NUM_INIT_REGS);
/* select reseed operation */
iowrite32(TRNG_CTRL_PRNGXS_MASK, rng->rng_base + TRNG_CTRL_OFFSET);
@@ -324,6 +335,7 @@ static void xtrng_hwrng_unregister(struct hwrng *trng)
static int xtrng_probe(struct platform_device *pdev)
{
struct xilinx_rng *rng;
+ size_t sb_size;
int ret;
rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
@@ -333,15 +345,26 @@ static int xtrng_probe(struct platform_device *pdev)
rng->dev = &pdev->dev;
rng->rng_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rng->rng_base)) {
- dev_err(&pdev->dev, "Failed to map resource %ld\n", PTR_ERR(rng->rng_base));
+ dev_err(&pdev->dev, "Failed to map resource %pe\n", rng->rng_base);
return PTR_ERR(rng->rng_base);
}
+ rng->aesctx = devm_kzalloc(&pdev->dev, sizeof(*rng->aesctx), GFP_KERNEL);
+ if (!rng->aesctx)
+ return -ENOMEM;
+
+ sb_size = crypto_drbg_ctr_df_datalen(TRNG_SEED_LEN_BYTES, AES_BLOCK_SIZE);
+ rng->scratchpadbuf = devm_kzalloc(&pdev->dev, sb_size, GFP_KERNEL);
+ if (!rng->scratchpadbuf) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
xtrng_trng_reset(rng->rng_base);
ret = xtrng_reseed_internal(rng);
if (ret) {
dev_err(&pdev->dev, "TRNG Seed fail\n");
- return ret;
+ goto end;
}
xilinx_rng_dev = rng;
@@ -349,8 +372,9 @@ static int xtrng_probe(struct platform_device *pdev)
ret = crypto_register_rng(&xtrng_trng_alg);
if (ret) {
dev_err(&pdev->dev, "Crypto Random device registration failed: %d\n", ret);
- return ret;
+ goto end;
}
+
ret = xtrng_hwrng_register(&rng->trng);
if (ret) {
dev_err(&pdev->dev, "HWRNG device registration failed: %d\n", ret);
@@ -363,6 +387,7 @@ static int xtrng_probe(struct platform_device *pdev)
crypto_rng_free:
crypto_unregister_rng(&xtrng_trng_alg);
+end:
return ret;
}
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 0e8a416386780d..8e66a1fa9c7867 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -159,6 +159,21 @@ struct crypto_aead {
struct crypto_tfm base;
};
+struct crypto_sync_aead {
+ struct crypto_aead base;
+};
+
+#define MAX_SYNC_AEAD_REQSIZE 384
+
+#define SYNC_AEAD_REQUEST_ON_STACK(name, _tfm) \
+ char __##name##_desc[sizeof(struct aead_request) + \
+ MAX_SYNC_AEAD_REQSIZE \
+ ] CRYPTO_MINALIGN_ATTR; \
+ struct aead_request *name = \
+ (((struct aead_request *)__##name##_desc)->base.tfm = \
+ crypto_sync_aead_tfm((_tfm)), \
+ (void *)__##name##_desc)
+
static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_aead, base);
@@ -180,11 +195,18 @@ static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
*/
struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
+struct crypto_sync_aead *crypto_alloc_sync_aead(const char *alg_name, u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
{
return &tfm->base;
}
+static inline struct crypto_tfm *crypto_sync_aead_tfm(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_tfm(&tfm->base);
+}
+
/**
* crypto_free_aead() - zeroize and free aead handle
* @tfm: cipher handle to be freed
@@ -196,6 +218,11 @@ static inline void crypto_free_aead(struct crypto_aead *tfm)
crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
}
+static inline void crypto_free_sync_aead(struct crypto_sync_aead *tfm)
+{
+ crypto_free_aead(&tfm->base);
+}
+
/**
* crypto_has_aead() - Search for the availability of an aead.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
@@ -238,6 +265,11 @@ static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
return crypto_aead_alg_ivsize(crypto_aead_alg(tfm));
}
+static inline unsigned int crypto_sync_aead_ivsize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_ivsize(&tfm->base);
+}
+
/**
* crypto_aead_authsize() - obtain maximum authentication data size
* @tfm: cipher handle
@@ -255,6 +287,11 @@ static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
return tfm->authsize;
}
+static inline unsigned int crypto_sync_aead_authsize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_authsize(&tfm->base);
+}
+
static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
{
return alg->maxauthsize;
@@ -265,6 +302,11 @@ static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
}
+static inline unsigned int crypto_sync_aead_maxauthsize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_maxauthsize(&tfm->base);
+}
+
/**
* crypto_aead_blocksize() - obtain block size of cipher
* @tfm: cipher handle
@@ -280,6 +322,11 @@ static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
}
+static inline unsigned int crypto_sync_aead_blocksize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_blocksize(&tfm->base);
+}
+
static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
{
return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
@@ -300,6 +347,21 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
}
+static inline u32 crypto_sync_aead_get_flags(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_get_flags(&tfm->base);
+}
+
+static inline void crypto_sync_aead_set_flags(struct crypto_sync_aead *tfm, u32 flags)
+{
+ crypto_aead_set_flags(&tfm->base, flags);
+}
+
+static inline void crypto_sync_aead_clear_flags(struct crypto_sync_aead *tfm, u32 flags)
+{
+ crypto_aead_clear_flags(&tfm->base, flags);
+}
+
/**
* crypto_aead_setkey() - set key for cipher
* @tfm: cipher handle
@@ -319,6 +381,12 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
int crypto_aead_setkey(struct crypto_aead *tfm,
const u8 *key, unsigned int keylen);
+static inline int crypto_sync_aead_setkey(struct crypto_sync_aead *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto_aead_setkey(&tfm->base, key, keylen);
+}
+
/**
* crypto_aead_setauthsize() - set authentication data size
* @tfm: cipher handle
@@ -331,11 +399,24 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
*/
int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
+static inline int crypto_sync_aead_setauthsize(struct crypto_sync_aead *tfm,
+ unsigned int authsize)
+{
+ return crypto_aead_setauthsize(&tfm->base, authsize);
+}
+
static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
{
return __crypto_aead_cast(req->base.tfm);
}
+static inline struct crypto_sync_aead *crypto_sync_aead_reqtfm(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ return container_of(tfm, struct crypto_sync_aead, base);
+}
+
/**
* crypto_aead_encrypt() - encrypt plaintext
* @req: reference to the aead_request handle that holds all information
@@ -417,6 +498,12 @@ static inline void aead_request_set_tfm(struct aead_request *req,
req->base.tfm = crypto_aead_tfm(tfm);
}
+static inline void aead_request_set_sync_tfm(struct aead_request *req,
+ struct crypto_sync_aead *tfm)
+{
+ aead_request_set_tfm(req, &tfm->base);
+}
+
/**
* aead_request_alloc() - allocate request data structure
* @tfm: cipher handle to be registered with the request
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index fc457494063610..05deea9dac5e88 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -107,6 +107,18 @@ struct crypto_queue {
unsigned int max_qlen;
};
+struct scatter_walk {
+ /* Must be the first member, see struct skcipher_walk. */
+ union {
+ void *const addr;
+
+ /* Private API field, do not touch. */
+ union crypto_no_such_thing *__addr;
+ };
+ struct scatterlist *sg;
+ unsigned int offset;
+};
+
struct crypto_attr_alg {
char name[CRYPTO_MAX_ALG_NAME];
};
diff --git a/include/crypto/df_sp80090a.h b/include/crypto/df_sp80090a.h
new file mode 100644
index 00000000000000..6b25305fe61138
--- /dev/null
+++ b/include/crypto/df_sp80090a.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright Stephan Mueller <smueller@chronox.de>, 2014
+ */
+
+#ifndef _CRYPTO_DF80090A_H
+#define _CRYPTO_DF80090A_H
+
+#include <crypto/internal/cipher.h>
+#include <crypto/aes.h>
+
+static inline int crypto_drbg_ctr_df_datalen(u8 statelen, u8 blocklen)
+{
+ return statelen + /* df_data */
+ blocklen + /* pad */
+ blocklen + /* iv */
+ statelen + blocklen; /* temp */
+}
+
+int crypto_drbg_ctr_df(struct crypto_aes_ctx *aes,
+ unsigned char *df_data,
+ size_t bytes_to_return,
+ struct list_head *seedlist,
+ u8 blocklen_bytes,
+ u8 statelen);
+
+#endif /* _CRYPTO_DF80090A_H */
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index af5ad51d3eef81..2d42518cbdce80 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -47,6 +47,7 @@
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/slab.h>
+#include <crypto/internal/drbg.h>
#include <crypto/internal/rng.h>
#include <crypto/rng.h>
#include <linux/fips.h>
@@ -54,30 +55,6 @@
#include <linux/list.h>
#include <linux/workqueue.h>
-/*
- * Concatenation Helper and string operation helper
- *
- * SP800-90A requires the concatenation of different data. To avoid copying
- * buffers around or allocate additional memory, the following data structure
- * is used to point to the original memory with its size. In addition, it
- * is used to build a linked list. The linked list defines the concatenation
- * of individual buffers. The order of memory block referenced in that
- * linked list determines the order of concatenation.
- */
-struct drbg_string {
- const unsigned char *buf;
- size_t len;
- struct list_head list;
-};
-
-static inline void drbg_string_fill(struct drbg_string *string,
- const unsigned char *buf, size_t len)
-{
- string->buf = buf;
- string->len = len;
- INIT_LIST_HEAD(&string->list);
-}
-
struct drbg_state;
typedef uint32_t drbg_flag_t;
diff --git a/include/crypto/internal/drbg.h b/include/crypto/internal/drbg.h
new file mode 100644
index 00000000000000..371e52dcee6c57
--- /dev/null
+++ b/include/crypto/internal/drbg.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * NIST SP800-90A DRBG derivation function
+ *
+ * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _INTERNAL_DRBG_H
+#define _INTERNAL_DRBG_H
+
+/*
+ * Convert an integer into a byte representation of this integer.
+ * The byte representation is big-endian
+ *
+ * @val value to be converted
+ * @buf buffer holding the converted integer -- caller must ensure that
+ * buffer size is at least 32 bit
+ */
+static inline void drbg_cpu_to_be32(__u32 val, unsigned char *buf)
+{
+ struct s {
+ __be32 conv;
+ };
+ struct s *conversion = (struct s *)buf;
+
+ conversion->conv = cpu_to_be32(val);
+}
+
+/*
+ * Concatenation Helper and string operation helper
+ *
+ * SP800-90A requires the concatenation of different data. To avoid copying
+ * buffers around or allocate additional memory, the following data structure
+ * is used to point to the original memory with its size. In addition, it
+ * is used to build a linked list. The linked list defines the concatenation
+ * of individual buffers. The order of memory block referenced in that
+ * linked list determines the order of concatenation.
+ */
+struct drbg_string {
+ const unsigned char *buf;
+ size_t len;
+ struct list_head list;
+};
+
+static inline void drbg_string_fill(struct drbg_string *string,
+ const unsigned char *buf, size_t len)
+{
+ string->buf = buf;
+ string->len = len;
+ INIT_LIST_HEAD(&string->list);
+}
+
+#endif //_INTERNAL_DRBG_H
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index d5aa535263f650..0cad8e7364c8e1 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -10,7 +10,6 @@
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
-#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <linux/types.h>
@@ -55,6 +54,47 @@ struct crypto_lskcipher_spawn {
struct crypto_spawn base;
};
+struct skcipher_walk {
+ union {
+ /* Virtual address of the source. */
+ struct {
+ struct {
+ const void *const addr;
+ } virt;
+ } src;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk in;
+ };
+
+ union {
+ /* Virtual address of the destination. */
+ struct {
+ struct {
+ void *const addr;
+ } virt;
+ } dst;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk out;
+ };
+
+ unsigned int nbytes;
+ unsigned int total;
+
+ u8 *page;
+ u8 *buffer;
+ u8 *oiv;
+ void *iv;
+
+ unsigned int ivsize;
+
+ int flags;
+ unsigned int blocksize;
+ unsigned int stride;
+ unsigned int alignmask;
+};
+
static inline struct crypto_instance *skcipher_crypto_instance(
struct skcipher_instance *inst)
{
@@ -171,6 +211,7 @@ void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count);
int lskcipher_register_instance(struct crypto_template *tmpl,
struct lskcipher_instance *inst);
+int skcipher_walk_done(struct skcipher_walk *walk, int res);
int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
struct skcipher_request *__restrict req,
bool atomic);
@@ -181,6 +222,11 @@ int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk,
struct aead_request *__restrict req,
bool atomic);
+static inline void skcipher_walk_abort(struct skcipher_walk *walk)
+{
+ skcipher_walk_done(walk, -ECANCELED);
+}
+
static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
{
return crypto_tfm_ctx(&tfm->base);
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index f8224cc390f8dc..d451b54b322a1c 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -169,12 +169,11 @@ static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
*
* The reset function completely re-initializes the random number generator
* referenced by the cipher handle by clearing the current state. The new state
- * is initialized with the caller provided seed or automatically, depending
- * on the random number generator type (the ANSI X9.31 RNG requires
- * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding).
- * The seed is provided as a parameter to this function call. The provided seed
- * should have the length of the seed size defined for the random number
- * generator as defined by crypto_rng_seedsize.
+ * is initialized with the caller provided seed or automatically, depending on
+ * the random number generator type. (The SP800-90A DRBGs perform an automatic
+ * seeding.) The seed is provided as a parameter to this function call. The
+ * provided seed should have the length of the seed size defined for the random
+ * number generator as defined by crypto_rng_seedsize.
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 83d14376ff2bc8..624fab589c2c71 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -11,64 +11,11 @@
#ifndef _CRYPTO_SCATTERWALK_H
#define _CRYPTO_SCATTERWALK_H
-#include <linux/errno.h>
+#include <crypto/algapi.h>
+
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
-#include <linux/types.h>
-
-struct scatter_walk {
- /* Must be the first member, see struct skcipher_walk. */
- union {
- void *const addr;
-
- /* Private API field, do not touch. */
- union crypto_no_such_thing *__addr;
- };
- struct scatterlist *sg;
- unsigned int offset;
-};
-
-struct skcipher_walk {
- union {
- /* Virtual address of the source. */
- struct {
- struct {
- const void *const addr;
- } virt;
- } src;
-
- /* Private field for the API, do not use. */
- struct scatter_walk in;
- };
-
- union {
- /* Virtual address of the destination. */
- struct {
- struct {
- void *const addr;
- } virt;
- } dst;
-
- /* Private field for the API, do not use. */
- struct scatter_walk out;
- };
-
- unsigned int nbytes;
- unsigned int total;
-
- u8 *page;
- u8 *buffer;
- u8 *oiv;
- void *iv;
-
- unsigned int ivsize;
-
- int flags;
- unsigned int blocksize;
- unsigned int stride;
- unsigned int alignmask;
-};
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
struct scatterlist *sg, int num)
@@ -227,6 +174,34 @@ static inline void scatterwalk_done_src(struct scatter_walk *walk,
scatterwalk_advance(walk, nbytes);
}
+/*
+ * Flush the dcache of any pages that overlap the region
+ * [offset, offset + nbytes) relative to base_page.
+ *
+ * This should be called only when ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, to ensure
+ * that all relevant code (including the call to sg_page() in the caller, if
+ * applicable) gets fully optimized out when !ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE.
+ */
+static inline void __scatterwalk_flush_dcache_pages(struct page *base_page,
+ unsigned int offset,
+ unsigned int nbytes)
+{
+ unsigned int num_pages;
+
+ base_page += offset / PAGE_SIZE;
+ offset %= PAGE_SIZE;
+
+ /*
+ * This is an overflow-safe version of
+ * num_pages = DIV_ROUND_UP(offset + nbytes, PAGE_SIZE).
+ */
+ num_pages = nbytes / PAGE_SIZE;
+ num_pages += DIV_ROUND_UP(offset + (nbytes % PAGE_SIZE), PAGE_SIZE);
+
+ for (unsigned int i = 0; i < num_pages; i++)
+ flush_dcache_page(base_page + i);
+}
+
/**
* scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist
* @walk: the scatter_walk
@@ -240,27 +215,9 @@ static inline void scatterwalk_done_dst(struct scatter_walk *walk,
unsigned int nbytes)
{
scatterwalk_unmap(walk);
- /*
- * Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just
- * relying on flush_dcache_page() being a no-op when not implemented,
- * since otherwise the BUG_ON in sg_page() does not get optimized out.
- * This also avoids having to consider whether the loop would get
- * reliably optimized out or not.
- */
- if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) {
- struct page *base_page;
- unsigned int offset;
- int start, end, i;
-
- base_page = sg_page(walk->sg);
- offset = walk->offset;
- start = offset >> PAGE_SHIFT;
- end = start + (nbytes >> PAGE_SHIFT);
- end += (offset_in_page(offset) + offset_in_page(nbytes) +
- PAGE_SIZE - 1) >> PAGE_SHIFT;
- for (i = start; i < end; i++)
- flush_dcache_page(base_page + i);
- }
+ if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)
+ __scatterwalk_flush_dcache_pages(sg_page(walk->sg),
+ walk->offset, nbytes);
scatterwalk_advance(walk, nbytes);
}
@@ -296,12 +253,4 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
unsigned int len);
-int skcipher_walk_first(struct skcipher_walk *walk, bool atomic);
-int skcipher_walk_done(struct skcipher_walk *walk, int res);
-
-static inline void skcipher_walk_abort(struct skcipher_walk *walk)
-{
- skcipher_walk_done(walk, -ECANCELED);
-}
-
#endif /* _CRYPTO_SCATTERWALK_H */
diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h
index 69a13e1e5b2e5d..1b91c8f98688d1 100644
--- a/include/keys/asymmetric-type.h
+++ b/include/keys/asymmetric-type.h
@@ -49,7 +49,7 @@ enum asymmetric_payload_bits {
*/
struct asymmetric_key_id {
unsigned short len;
- unsigned char data[];
+ unsigned char data[] __counted_by(len);
};
struct asymmetric_key_ids {
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 05a221ce79a6f5..08e664b21f5a28 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -355,12 +355,25 @@ static inline void rht_unlock(struct bucket_table *tbl,
local_irq_restore(flags);
}
-static inline struct rhash_head *__rht_ptr(
- struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
+enum rht_lookup_freq {
+ RHT_LOOKUP_NORMAL,
+ RHT_LOOKUP_LIKELY,
+};
+
+static __always_inline struct rhash_head *__rht_ptr(
+ struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt,
+ const enum rht_lookup_freq freq)
{
- return (struct rhash_head *)
- ((unsigned long)p & ~BIT(0) ?:
- (unsigned long)RHT_NULLS_MARKER(bkt));
+ unsigned long p_val = (unsigned long)p & ~BIT(0);
+
+ BUILD_BUG_ON(!__builtin_constant_p(freq));
+
+ if (freq == RHT_LOOKUP_LIKELY)
+ return (struct rhash_head *)
+ (likely(p_val) ? p_val : (unsigned long)RHT_NULLS_MARKER(bkt));
+ else
+ return (struct rhash_head *)
+ (p_val ?: (unsigned long)RHT_NULLS_MARKER(bkt));
}
/*
@@ -370,10 +383,17 @@ static inline struct rhash_head *__rht_ptr(
* rht_ptr_exclusive() dereferences in a context where exclusive
* access is guaranteed, such as when destroying the table.
*/
+static __always_inline struct rhash_head *__rht_ptr_rcu(
+ struct rhash_lock_head __rcu *const *bkt,
+ const enum rht_lookup_freq freq)
+{
+ return __rht_ptr(rcu_dereference_all(*bkt), bkt, freq);
+}
+
static inline struct rhash_head *rht_ptr_rcu(
struct rhash_lock_head __rcu *const *bkt)
{
- return __rht_ptr(rcu_dereference_all(*bkt), bkt);
+ return __rht_ptr_rcu(bkt, RHT_LOOKUP_NORMAL);
}
static inline struct rhash_head *rht_ptr(
@@ -381,13 +401,15 @@ static inline struct rhash_head *rht_ptr(
struct bucket_table *tbl,
unsigned int hash)
{
- return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
+ return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt,
+ RHT_LOOKUP_NORMAL);
}
static inline struct rhash_head *rht_ptr_exclusive(
struct rhash_lock_head __rcu *const *bkt)
{
- return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
+ return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt,
+ RHT_LOOKUP_NORMAL);
}
static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
@@ -588,7 +610,8 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
/* Internal function, do not use. */
static __always_inline struct rhash_head *__rhashtable_lookup(
struct rhashtable *ht, const void *key,
- const struct rhashtable_params params)
+ const struct rhashtable_params params,
+ const enum rht_lookup_freq freq)
{
struct rhashtable_compare_arg arg = {
.ht = ht,
@@ -599,12 +622,13 @@ static __always_inline struct rhash_head *__rhashtable_lookup(
struct rhash_head *he;
unsigned int hash;
+ BUILD_BUG_ON(!__builtin_constant_p(freq));
tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, key, params);
bkt = rht_bucket(tbl, hash);
do {
- rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) {
+ rht_for_each_rcu_from(he, __rht_ptr_rcu(bkt, freq), tbl, hash) {
if (params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, he)) :
rhashtable_compare(&arg, rht_obj(ht, he)))
@@ -643,11 +667,22 @@ static __always_inline void *rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
- struct rhash_head *he = __rhashtable_lookup(ht, key, params);
+ struct rhash_head *he = __rhashtable_lookup(ht, key, params,
+ RHT_LOOKUP_NORMAL);
return he ? rht_obj(ht, he) : NULL;
}
+static __always_inline void *rhashtable_lookup_likely(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhash_head *he = __rhashtable_lookup(ht, key, params,
+ RHT_LOOKUP_LIKELY);
+
+ return likely(he) ? rht_obj(ht, he) : NULL;
+}
+
/**
* rhashtable_lookup_fast - search hash table, without RCU read lock
* @ht: hash table
@@ -693,11 +728,22 @@ static __always_inline struct rhlist_head *rhltable_lookup(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
{
- struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
+ struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
+ RHT_LOOKUP_NORMAL);
return he ? container_of(he, struct rhlist_head, rhead) : NULL;
}
+static __always_inline struct rhlist_head *rhltable_lookup_likely(
+ struct rhltable *hlt, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
+ RHT_LOOKUP_LIKELY);
+
+ return likely(he) ? container_of(he, struct rhlist_head, rhead) : NULL;
+}
+
/* Internal function, please use rhashtable_insert_fast() instead. This
* function returns the existing element already in hashes if there is a clash,
* otherwise it returns an error via ERR_PTR().
diff --git a/include/soc/fsl/caam-blob.h b/include/soc/fsl/caam-blob.h
index 937cac52f36d84..922f7ec3e23137 100644
--- a/include/soc/fsl/caam-blob.h
+++ b/include/soc/fsl/caam-blob.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ * Copyright 2024-2025 NXP
*/
#ifndef __CAAM_BLOB_GEN
@@ -12,11 +13,34 @@
#define CAAM_BLOB_KEYMOD_LENGTH 16
#define CAAM_BLOB_OVERHEAD (32 + 16)
#define CAAM_BLOB_MAX_LEN 4096
+#define CAAM_ENC_ALGO_CCM 0x1
+#define CAAM_ENC_ALGO_ECB 0x2
+#define CAAM_NONCE_SIZE 6
+#define CAAM_ICV_SIZE 6
+#define CAAM_CCM_OVERHEAD (CAAM_NONCE_SIZE + CAAM_ICV_SIZE)
struct caam_blob_priv;
/**
+ * struct caam_pkey_info - information for CAAM protected key
+ * @is_pkey: flag to identify, if the key is protected.
+ * @key_enc_algo: identifies the algorithm, ccm or ecb
+ * @plain_key_sz: size of plain key.
+ * @key_buf: contains key data
+ */
+struct caam_pkey_info {
+ u8 is_pkey;
+ u8 key_enc_algo;
+ u16 plain_key_sz;
+ u8 key_buf[];
+} __packed;
+
+/* sizeof struct caam_pkey_info */
+#define CAAM_PKEY_HEADER 4
+
+/**
* struct caam_blob_info - information for CAAM blobbing
+ * @pkey_info: pointer to keep protected key information
* @input: pointer to input buffer (must be DMAable)
* @input_len: length of @input buffer in bytes.
* @output: pointer to output buffer (must be DMAable)
@@ -26,6 +50,8 @@ struct caam_blob_priv;
* May not exceed %CAAM_BLOB_KEYMOD_LENGTH
*/
struct caam_blob_info {
+ struct caam_pkey_info pkey_info;
+
void *input;
size_t input_len;
diff --git a/kernel/padata.c b/kernel/padata.c
index f4def028c48c08..aa66d91e20f9c1 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -506,12 +506,6 @@ void __init padata_do_multithreaded(struct padata_mt_job *job)
padata_works_free(&works);
}
-static void __padata_list_init(struct padata_list *pd_list)
-{
- INIT_LIST_HEAD(&pd_list->list);
- spin_lock_init(&pd_list->lock);
-}
-
/* Initialize all percpu queues used by serial workers */
static void padata_init_squeues(struct parallel_data *pd)
{
@@ -521,7 +515,8 @@ static void padata_init_squeues(struct parallel_data *pd)
for_each_cpu(cpu, pd->cpumask.cbcpu) {
squeue = per_cpu_ptr(pd->squeue, cpu);
squeue->pd = pd;
- __padata_list_init(&squeue->serial);
+ INIT_LIST_HEAD(&squeue->serial.list);
+ spin_lock_init(&squeue->serial.lock);
INIT_WORK(&squeue->work, padata_serial_worker);
}
}
@@ -534,7 +529,8 @@ static void padata_init_reorder_list(struct parallel_data *pd)
for_each_cpu(cpu, pd->cpumask.pcpu) {
list = per_cpu_ptr(pd->reorder_list, cpu);
- __padata_list_init(list);
+ INIT_LIST_HEAD(&list->list);
+ spin_lock_init(&list->lock);
}
}
diff --git a/lib/crypto/mpi/mpicoder.c b/lib/crypto/mpi/mpicoder.c
index 47f6939599b33a..bf716a03c7045e 100644
--- a/lib/crypto/mpi/mpicoder.c
+++ b/lib/crypto/mpi/mpicoder.c
@@ -398,7 +398,7 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
while (sg_miter_next(&miter)) {
buff = miter.addr;
- len = min_t(unsigned, miter.length, nbytes);
+ len = min(miter.length, nbytes);
nbytes -= len;
for (x = 0; x < len; x++) {
diff --git a/security/keys/trusted-keys/trusted_caam.c b/security/keys/trusted-keys/trusted_caam.c
index e3415c520c0a4d..601943ce0d60f7 100644
--- a/security/keys/trusted-keys/trusted_caam.c
+++ b/security/keys/trusted-keys/trusted_caam.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ * Copyright 2025 NXP
*/
#include <keys/trusted_caam.h>
#include <keys/trusted-type.h>
#include <linux/build_bug.h>
#include <linux/key-type.h>
+#include <linux/parser.h>
#include <soc/fsl/caam-blob.h>
static struct caam_blob_priv *blobifier;
@@ -16,6 +18,77 @@ static struct caam_blob_priv *blobifier;
static_assert(MAX_KEY_SIZE + CAAM_BLOB_OVERHEAD <= CAAM_BLOB_MAX_LEN);
static_assert(MAX_BLOB_SIZE <= CAAM_BLOB_MAX_LEN);
+enum {
+ opt_err,
+ opt_key_enc_algo,
+};
+
+static const match_table_t key_tokens = {
+ {opt_key_enc_algo, "key_enc_algo=%s"},
+ {opt_err, NULL}
+};
+
+#ifdef CAAM_DEBUG
+static inline void dump_options(const struct caam_pkey_info *pkey_info)
+{
+ pr_info("key encryption algo %d\n", pkey_info->key_enc_algo);
+}
+#else
+static inline void dump_options(const struct caam_pkey_info *pkey_info)
+{
+}
+#endif
+
+static int get_pkey_options(char *c,
+ struct caam_pkey_info *pkey_info)
+{
+ substring_t args[MAX_OPT_ARGS];
+ unsigned long token_mask = 0;
+ u16 key_enc_algo;
+ char *p = c;
+ int token;
+ int res;
+
+ if (!c)
+ return 0;
+
+ while ((p = strsep(&c, " \t"))) {
+ if (*p == '\0' || *p == ' ' || *p == '\t')
+ continue;
+ token = match_token(p, key_tokens, args);
+ if (test_and_set_bit(token, &token_mask))
+ return -EINVAL;
+
+ switch (token) {
+ case opt_key_enc_algo:
+ res = kstrtou16(args[0].from, 16, &key_enc_algo);
+ if (res < 0)
+ return -EINVAL;
+ pkey_info->key_enc_algo = key_enc_algo;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static bool is_key_pkey(char **datablob)
+{
+ char *c = NULL;
+
+ do {
+ /* Second argument onwards,
+ * determine if tied to HW
+ */
+ c = strsep(datablob, " \t");
+ if (c && (strcmp(c, "pk") == 0))
+ return true;
+ } while (c);
+
+ return false;
+}
+
static int trusted_caam_seal(struct trusted_key_payload *p, char *datablob)
{
int ret;
@@ -25,11 +98,30 @@ static int trusted_caam_seal(struct trusted_key_payload *p, char *datablob)
.key_mod = KEYMOD, .key_mod_len = sizeof(KEYMOD) - 1,
};
+ /*
+ * If it is to be treated as protected key,
+ * read next arguments too.
+ */
+ if (is_key_pkey(&datablob)) {
+ info.pkey_info.plain_key_sz = p->key_len;
+ info.pkey_info.is_pkey = 1;
+ ret = get_pkey_options(datablob, &info.pkey_info);
+ if (ret < 0)
+ return 0;
+ dump_options(&info.pkey_info);
+ }
+
ret = caam_encap_blob(blobifier, &info);
if (ret)
return ret;
p->blob_len = info.output_len;
+ if (info.pkey_info.is_pkey) {
+ p->key_len = p->blob_len + sizeof(struct caam_pkey_info);
+ memcpy(p->key, &info.pkey_info, sizeof(struct caam_pkey_info));
+ memcpy(p->key + sizeof(struct caam_pkey_info), p->blob, p->blob_len);
+ }
+
return 0;
}
@@ -42,11 +134,27 @@ static int trusted_caam_unseal(struct trusted_key_payload *p, char *datablob)
.key_mod = KEYMOD, .key_mod_len = sizeof(KEYMOD) - 1,
};
+ if (is_key_pkey(&datablob)) {
+ info.pkey_info.plain_key_sz = p->blob_len - CAAM_BLOB_OVERHEAD;
+ info.pkey_info.is_pkey = 1;
+ ret = get_pkey_options(datablob, &info.pkey_info);
+ if (ret < 0)
+ return 0;
+ dump_options(&info.pkey_info);
+
+ p->key_len = p->blob_len + sizeof(struct caam_pkey_info);
+ memcpy(p->key, &info.pkey_info, sizeof(struct caam_pkey_info));
+ memcpy(p->key + sizeof(struct caam_pkey_info), p->blob, p->blob_len);
+
+ return 0;
+ }
+
ret = caam_decap_blob(blobifier, &info);
if (ret)
return ret;
p->key_len = info.output_len;
+
return 0;
}