aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-driver-qat46
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom-qce.yaml50
-rw-r--r--Documentation/devicetree/bindings/crypto/starfive,jh7110-crypto.yaml70
-rw-r--r--MAINTAINERS7
-rw-r--r--arch/arm/crypto/sha1_neon_glue.c12
-rw-r--r--arch/arm/crypto/sha256_neon_glue.c12
-rw-r--r--arch/arm/crypto/sha512-neon-glue.c12
-rw-r--r--arch/arm64/crypto/sha256-glue.c3
-rw-r--r--arch/x86/platform/efi/quirks.c8
-rw-r--r--arch/x86/platform/efi/runtime-map.c2
-rw-r--r--crypto/Kconfig60
-rw-r--r--crypto/Makefile4
-rw-r--r--crypto/aegis-neon.h17
-rw-r--r--crypto/aegis128-neon-inner.c1
-rw-r--r--crypto/aegis128-neon.c12
-rw-r--r--crypto/ahash.c9
-rw-r--r--crypto/akcipher.c124
-rw-r--r--crypto/api.c27
-rw-r--r--crypto/asymmetric_keys/public_key.c321
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c29
-rw-r--r--crypto/cipher.c28
-rw-r--r--crypto/cmac.c36
-rw-r--r--crypto/hmac.c1
-rw-r--r--crypto/internal.h22
-rw-r--r--crypto/jitterentropy-kcapi.c190
-rw-r--r--crypto/jitterentropy-testing.c294
-rw-r--r--crypto/jitterentropy.c154
-rw-r--r--crypto/jitterentropy.h20
-rw-r--r--crypto/rsa.c36
-rw-r--r--crypto/shash.c12
-rw-r--r--crypto/sig.c157
-rw-r--r--crypto/sm2.c106
-rw-r--r--drivers/char/hw_random/Kconfig27
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/cn10k-rng.c63
-rw-r--r--drivers/char/hw_random/histb-rng.c (renamed from drivers/crypto/hisilicon/trng/trng-stb.c)83
-rw-r--r--drivers/char/hw_random/imx-rngc.c53
-rw-r--r--drivers/char/hw_random/st-rng.c21
-rw-r--r--drivers/char/hw_random/virtio-rng.c10
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/atmel-ecc.c2
-rw-r--r--drivers/crypto/atmel-sha204a.c2
-rw-r--r--drivers/crypto/caam/Kconfig9
-rw-r--r--drivers/crypto/caam/caamrng.c48
-rw-r--r--drivers/crypto/caam/ctrl.c274
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/regs.h14
-rw-r--r--drivers/crypto/ccp/platform-access.c5
-rw-r--r--drivers/crypto/ccp/sp-pci.c43
-rw-r--r--drivers/crypto/hisilicon/Kconfig7
-rw-r--r--drivers/crypto/hisilicon/Makefile2
-rw-r--r--drivers/crypto/hisilicon/trng/Makefile3
-rw-r--r--drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c2
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c229
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h2
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c45
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c12
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c12
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_drv.c12
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c12
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_engine.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg.c24
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h8
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dbgfs.c69
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dbgfs.h29
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs.c60
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw.h3
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_asym_algs.c14
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c8
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c12
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c12
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h15
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c3
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c34
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h33
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf.h7
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c41
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c247
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c10
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c8
-rw-r--r--drivers/crypto/n2_core.c9
-rw-r--r--drivers/crypto/nx/Makefile2
-rw-r--r--drivers/crypto/nx/nx.h4
-rw-r--r--drivers/crypto/sa2ul.h2
-rw-r--r--drivers/crypto/starfive/Kconfig20
-rw-r--r--drivers/crypto/starfive/Makefile4
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.c258
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.h172
-rw-r--r--drivers/crypto/starfive/jh7110-hash.c899
-rw-r--r--drivers/crypto/starfive/jh7110-rsa.c617
-rw-r--r--drivers/firmware/efi/efi.c1
-rw-r--r--drivers/firmware/efi/esrt.c6
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c6
-rw-r--r--drivers/firmware/efi/vars.c12
-rw-r--r--drivers/md/dm-crypt.c15
-rw-r--r--fs/efivarfs/super.c39
-rw-r--r--fs/nfsd/nfs4xdr.c10
-rw-r--r--fs/smb/client/cifs_debug.c17
-rw-r--r--fs/smb/client/cifs_dfs_ref.c20
-rw-r--r--fs/smb/client/cifsfs.c30
-rw-r--r--fs/smb/client/cifsglob.h10
-rw-r--r--fs/smb/client/cifsproto.h4
-rw-r--r--fs/smb/client/cifssmb.c211
-rw-r--r--fs/smb/client/connect.c92
-rw-r--r--fs/smb/client/dfs.c96
-rw-r--r--fs/smb/client/dfs.h19
-rw-r--r--fs/smb/client/dfs_cache.c8
-rw-r--r--fs/smb/client/file.c25
-rw-r--r--fs/smb/client/fs_context.c59
-rw-r--r--fs/smb/client/inode.c4
-rw-r--r--fs/smb/client/misc.c55
-rw-r--r--fs/smb/client/smb2inode.c9
-rw-r--r--fs/smb/client/smb2ops.c31
-rw-r--r--fs/smb/client/smb2pdu.c6
-rw-r--r--fs/smb/client/smb2transport.c12
-rw-r--r--fs/smb/client/trace.h20
-rw-r--r--fs/smb/client/transport.c20
-rw-r--r--fs/smb/server/mgmt/tree_connect.c11
-rw-r--r--fs/smb/server/mgmt/tree_connect.h3
-rw-r--r--fs/smb/server/smb2pdu.c121
-rw-r--r--fs/smb/server/smb_common.c8
-rw-r--r--fs/smb/server/smb_common.h2
-rw-r--r--fs/smb/server/smbacl.c2
-rw-r--r--fs/smb/server/transport_ipc.c4
-rw-r--r--fs/smb/server/vfs.c12
-rw-r--r--fs/smb/server/vfs.h2
-rw-r--r--include/crypto/akcipher.h36
-rw-r--r--include/crypto/algapi.h1
-rw-r--r--include/crypto/engine.h2
-rw-r--r--include/crypto/hash.h3
-rw-r--r--include/crypto/internal/cipher.h2
-rw-r--r--include/crypto/internal/hash.h12
-rw-r--r--include/crypto/internal/sig.h17
-rw-r--r--include/crypto/public_key.h2
-rw-r--r--include/crypto/sha2.h2
-rw-r--r--include/crypto/sha256_base.h50
-rw-r--r--include/crypto/sig.h140
-rw-r--r--include/crypto/sm2.h21
-rw-r--r--include/keys/asymmetric-parser.h2
-rw-r--r--include/linux/crypto.h3
-rw-r--r--include/linux/efi.h11
-rw-r--r--include/linux/mm.h9
-rw-r--r--include/uapi/linux/wireless.h2
-rw-r--r--kernel/livepatch/transition.c2
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/pid_namespace.c2
-rw-r--r--lib/crypto/sha256.c77
-rw-r--r--mm/mmap.c107
-rw-r--r--mm/mremap.c28
-rw-r--r--mm/nommu.c16
162 files changed, 5594 insertions, 1495 deletions
diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat
index 087842b1969e..ef6d6c57105e 100644
--- a/Documentation/ABI/testing/sysfs-driver-qat
+++ b/Documentation/ABI/testing/sysfs-driver-qat
@@ -27,7 +27,18 @@ Description: (RW) Reports the current configuration of the QAT device.
* sym;asym: the device is configured for running crypto
services
+ * asym;sym: identical to sym;asym
* dc: the device is configured for running compression services
+ * sym: the device is configured for running symmetric crypto
+ services
+ * asym: the device is configured for running asymmetric crypto
+ services
+ * asym;dc: the device is configured for running asymmetric
+ crypto services and compression services
+ * dc;asym: identical to asym;dc
+ * sym;dc: the device is configured for running symmetric crypto
+ services and compression services
+ * dc;sym: identical to sym;dc
It is possible to set the configuration only if the device
is in the `down` state (see /sys/bus/pci/devices/<BDF>/qat/state)
@@ -47,3 +58,38 @@ Description: (RW) Reports the current configuration of the QAT device.
dc
This attribute is only available for qat_4xxx devices.
+
+What: /sys/bus/pci/devices/<BDF>/qat/pm_idle_enabled
+Date: June 2023
+KernelVersion: 6.5
+Description: (RW) This configuration option provides a way to force the device into remaining in
+ the MAX power state.
+ If idle support is enabled the device will transition to the `MIN` power state when
+ idle, otherwise will stay in the MAX power state.
+ Write to the file to enable or disable idle support.
+
+ The values are:
+
+ * 0: idle support is disabled
+ * 1: idle support is enabled
+
+ Default value is 1.
+
+ It is possible to set the pm_idle_enabled value only if the device
+ is in the `down` state (see /sys/bus/pci/devices/<BDF>/qat/state)
+
+ The following example shows how to change the pm_idle_enabled of
+ a device::
+
+ # cat /sys/bus/pci/devices/<BDF>/qat/state
+ up
+ # cat /sys/bus/pci/devices/<BDF>/qat/pm_idle_enabled
+ 1
+ # echo down > /sys/bus/pci/devices/<BDF>/qat/state
+ # echo 0 > /sys/bus/pci/devices/<BDF>/qat/pm_idle_enabled
+ # echo up > /sys/bus/pci/devices/<BDF>/qat/state
+ # cat /sys/bus/pci/devices/<BDF>/qat/pm_idle_enabled
+ 0
+
+ This attribute is only available for qat_4xxx devices.
diff --git a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
index e375bd981300..bb828068c3b8 100644
--- a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
+++ b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
@@ -26,10 +26,18 @@ properties:
- items:
- enum:
+ - qcom,ipq4019-qce
+ - qcom,sm8150-qce
+ - const: qcom,qce
+
+ - items:
+ - enum:
- qcom,ipq6018-qce
- qcom,ipq8074-qce
- qcom,msm8996-qce
+ - qcom,qcm2290-qce
- qcom,sdm845-qce
+ - qcom,sm6115-qce
- const: qcom,ipq4019-qce
- const: qcom,qce
@@ -46,16 +54,12 @@ properties:
maxItems: 1
clocks:
- items:
- - description: iface clocks register interface.
- - description: bus clocks data transfer interface.
- - description: core clocks rest of the crypto block.
+ minItems: 1
+ maxItems: 3
clock-names:
- items:
- - const: iface
- - const: bus
- - const: core
+ minItems: 1
+ maxItems: 3
iommus:
minItems: 1
@@ -89,9 +93,37 @@ allOf:
enum:
- qcom,crypto-v5.1
- qcom,crypto-v5.4
- - qcom,ipq4019-qce
+ - qcom,ipq6018-qce
+ - qcom,ipq8074-qce
+ - qcom,msm8996-qce
+ - qcom,sdm845-qce
+ then:
+ properties:
+ clocks:
+ maxItems: 3
+ clock-names:
+ items:
+ - const: iface
+ - const: bus
+ - const: core
+ required:
+ - clocks
+ - clock-names
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,qcm2290-qce
+ - qcom,sm6115-qce
then:
+ properties:
+ clocks:
+ maxItems: 1
+ clock-names:
+ items:
+ - const: core
required:
- clocks
- clock-names
diff --git a/Documentation/devicetree/bindings/crypto/starfive,jh7110-crypto.yaml b/Documentation/devicetree/bindings/crypto/starfive,jh7110-crypto.yaml
new file mode 100644
index 000000000000..71a2876bd6e4
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/starfive,jh7110-crypto.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/starfive,jh7110-crypto.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: StarFive Cryptographic Module
+
+maintainers:
+ - Jia Jie Ho <[email protected]>
+ - William Qiu <[email protected]>
+
+properties:
+ compatible:
+ const: starfive,jh7110-crypto
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Hardware reference clock
+ - description: AHB reference clock
+
+ clock-names:
+ items:
+ - const: hclk
+ - const: ahb
+
+ interrupts:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ dmas:
+ items:
+ - description: TX DMA channel
+ - description: RX DMA channel
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - resets
+ - dmas
+ - dma-names
+
+additionalProperties: false
+
+examples:
+ - |
+ crypto: crypto@16000000 {
+ compatible = "starfive,jh7110-crypto";
+ reg = <0x16000000 0x4000>;
+ clocks = <&clk 15>, <&clk 16>;
+ clock-names = "hclk", "ahb";
+ interrupts = <28>;
+ resets = <&reset 3>;
+ dmas = <&dma 1 2>,
+ <&dma 0 2>;
+ dma-names = "tx", "rx";
+ };
+...
diff --git a/MAINTAINERS b/MAINTAINERS
index 910a3bd814b7..308b5250b7de 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -20265,6 +20265,13 @@ F: Documentation/devicetree/bindings/clock/starfive,jh71*.yaml
F: drivers/clk/starfive/clk-starfive-jh71*
F: include/dt-bindings/clock/starfive?jh71*.h
+STARFIVE CRYPTO DRIVER
+M: Jia Jie Ho <[email protected]>
+M: William Qiu <[email protected]>
+S: Supported
+F: Documentation/devicetree/bindings/crypto/starfive*
+F: drivers/crypto/starfive/
+
STARFIVE JH71X0 PINCTRL DRIVERS
M: Emil Renner Berthing <[email protected]>
M: Jianlong Huang <[email protected]>
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
index cfe36ae0f3f5..9c70b87e69f7 100644
--- a/arch/arm/crypto/sha1_neon_glue.c
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -26,8 +26,8 @@
#include "sha1.h"
-asmlinkage void sha1_transform_neon(void *state_h, const char *data,
- unsigned int rounds);
+asmlinkage void sha1_transform_neon(struct sha1_state *state_h,
+ const u8 *data, int rounds);
static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
@@ -39,8 +39,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
return sha1_update_arm(desc, data, len);
kernel_neon_begin();
- sha1_base_do_update(desc, data, len,
- (sha1_block_fn *)sha1_transform_neon);
+ sha1_base_do_update(desc, data, len, sha1_transform_neon);
kernel_neon_end();
return 0;
@@ -54,9 +53,8 @@ static int sha1_neon_finup(struct shash_desc *desc, const u8 *data,
kernel_neon_begin();
if (len)
- sha1_base_do_update(desc, data, len,
- (sha1_block_fn *)sha1_transform_neon);
- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_transform_neon);
+ sha1_base_do_update(desc, data, len, sha1_transform_neon);
+ sha1_base_do_finalize(desc, sha1_transform_neon);
kernel_neon_end();
return sha1_base_finish(desc, out);
diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
index 701706262ef3..ccdcfff71910 100644
--- a/arch/arm/crypto/sha256_neon_glue.c
+++ b/arch/arm/crypto/sha256_neon_glue.c
@@ -21,8 +21,8 @@
#include "sha256_glue.h"
-asmlinkage void sha256_block_data_order_neon(u32 *digest, const void *data,
- unsigned int num_blks);
+asmlinkage void sha256_block_data_order_neon(struct sha256_state *digest,
+ const u8 *data, int num_blks);
static int crypto_sha256_neon_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
@@ -34,8 +34,7 @@ static int crypto_sha256_neon_update(struct shash_desc *desc, const u8 *data,
return crypto_sha256_arm_update(desc, data, len);
kernel_neon_begin();
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order_neon);
+ sha256_base_do_update(desc, data, len, sha256_block_data_order_neon);
kernel_neon_end();
return 0;
@@ -50,9 +49,8 @@ static int crypto_sha256_neon_finup(struct shash_desc *desc, const u8 *data,
kernel_neon_begin();
if (len)
sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order_neon);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order_neon);
+ sha256_block_data_order_neon);
+ sha256_base_do_finalize(desc, sha256_block_data_order_neon);
kernel_neon_end();
return sha256_base_finish(desc, out);
diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c
index c879ad32db51..c6e58fe475ac 100644
--- a/arch/arm/crypto/sha512-neon-glue.c
+++ b/arch/arm/crypto/sha512-neon-glue.c
@@ -20,8 +20,8 @@
MODULE_ALIAS_CRYPTO("sha384-neon");
MODULE_ALIAS_CRYPTO("sha512-neon");
-asmlinkage void sha512_block_data_order_neon(u64 *state, u8 const *src,
- int blocks);
+asmlinkage void sha512_block_data_order_neon(struct sha512_state *state,
+ const u8 *src, int blocks);
static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
@@ -33,8 +33,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
return sha512_arm_update(desc, data, len);
kernel_neon_begin();
- sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order_neon);
+ sha512_base_do_update(desc, data, len, sha512_block_data_order_neon);
kernel_neon_end();
return 0;
@@ -49,9 +48,8 @@ static int sha512_neon_finup(struct shash_desc *desc, const u8 *data,
kernel_neon_begin();
if (len)
sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order_neon);
- sha512_base_do_finalize(desc,
- (sha512_block_fn *)sha512_block_data_order_neon);
+ sha512_block_data_order_neon);
+ sha512_base_do_finalize(desc, sha512_block_data_order_neon);
kernel_neon_end();
return sha512_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index 9462f6088b3f..9b5c86e07a9a 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -12,8 +12,9 @@
#include <crypto/internal/simd.h>
#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
-#include <linux/types.h>
+#include <linux/module.h>
#include <linux/string.h>
+#include <linux/types.h>
MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash for arm64");
MODULE_AUTHOR("Andy Polyakov <[email protected]>");
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index b0b848d6933a..f0cc00032751 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -114,6 +114,14 @@ void efi_delete_dummy_variable(void)
EFI_VARIABLE_RUNTIME_ACCESS, 0, NULL);
}
+u64 efivar_reserved_space(void)
+{
+ if (efi_no_storage_paranoia)
+ return 0;
+ return EFI_MIN_RESERVE;
+}
+EXPORT_SYMBOL_GPL(efivar_reserved_space);
+
/*
* In the nonblocking case we do not attempt to perform garbage
* collection if we do not have enough free space. Rather, we do the
diff --git a/arch/x86/platform/efi/runtime-map.c b/arch/x86/platform/efi/runtime-map.c
index bbee682ef8cd..a6f02cef3ca2 100644
--- a/arch/x86/platform/efi/runtime-map.c
+++ b/arch/x86/platform/efi/runtime-map.c
@@ -93,7 +93,7 @@ static void map_release(struct kobject *kobj)
kfree(entry);
}
-static struct kobj_type __refdata map_ktype = {
+static const struct kobj_type __refconst map_ktype = {
.sysfs_ops = &map_attr_ops,
.default_groups = def_groups,
.release = map_release,
diff --git a/crypto/Kconfig b/crypto/Kconfig
index a0e080d5f6ae..650b1b3620d8 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -71,8 +71,15 @@ config CRYPTO_AEAD
config CRYPTO_AEAD2
tristate
select CRYPTO_ALGAPI2
- select CRYPTO_NULL2
- select CRYPTO_RNG2
+
+config CRYPTO_SIG
+ tristate
+ select CRYPTO_SIG2
+ select CRYPTO_ALGAPI
+
+config CRYPTO_SIG2
+ tristate
+ select CRYPTO_ALGAPI2
config CRYPTO_SKCIPHER
tristate
@@ -82,7 +89,6 @@ config CRYPTO_SKCIPHER
config CRYPTO_SKCIPHER2
tristate
select CRYPTO_ALGAPI2
- select CRYPTO_RNG2
config CRYPTO_HASH
tristate
@@ -143,12 +149,14 @@ config CRYPTO_MANAGER
config CRYPTO_MANAGER2
def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y)
+ select CRYPTO_ACOMP2
select CRYPTO_AEAD2
- select CRYPTO_HASH2
- select CRYPTO_SKCIPHER2
select CRYPTO_AKCIPHER2
+ select CRYPTO_SIG2
+ select CRYPTO_HASH2
select CRYPTO_KPP2
- select CRYPTO_ACOMP2
+ select CRYPTO_RNG2
+ select CRYPTO_SKCIPHER2
config CRYPTO_USER
tristate "Userspace cryptographic algorithm configuration"
@@ -833,13 +841,16 @@ config CRYPTO_GCM
This is required for IPSec ESP (XFRM_ESP).
-config CRYPTO_SEQIV
- tristate "Sequence Number IV Generator"
+config CRYPTO_GENIV
+ tristate
select CRYPTO_AEAD
- select CRYPTO_SKCIPHER
select CRYPTO_NULL
- select CRYPTO_RNG_DEFAULT
select CRYPTO_MANAGER
+ select CRYPTO_RNG_DEFAULT
+
+config CRYPTO_SEQIV
+ tristate "Sequence Number IV Generator"
+ select CRYPTO_GENIV
help
Sequence Number IV generator
@@ -850,10 +861,7 @@ config CRYPTO_SEQIV
config CRYPTO_ECHAINIV
tristate "Encrypted Chain IV Generator"
- select CRYPTO_AEAD
- select CRYPTO_NULL
- select CRYPTO_RNG_DEFAULT
- select CRYPTO_MANAGER
+ select CRYPTO_GENIV
help
Encrypted Chain IV generator
@@ -1277,6 +1285,7 @@ endif # if CRYPTO_DRBG_MENU
config CRYPTO_JITTERENTROPY
tristate "CPU Jitter Non-Deterministic RNG (Random Number Generator)"
select CRYPTO_RNG
+ select CRYPTO_SHA3
help
CPU Jitter RNG (Random Number Generator) from the Jitterentropy library
@@ -1287,6 +1296,26 @@ config CRYPTO_JITTERENTROPY
See https://www.chronox.de/jent.html
+config CRYPTO_JITTERENTROPY_TESTINTERFACE
+ bool "CPU Jitter RNG Test Interface"
+ depends on CRYPTO_JITTERENTROPY
+ help
+ The test interface allows a privileged process to capture
+ the raw unconditioned high resolution time stamp noise that
+ is collected by the Jitter RNG for statistical analysis. As
+ this data is used at the same time to generate random bits,
+ the Jitter RNG operates in an insecure mode as long as the
+ recording is enabled. This interface therefore is only
+ intended for testing purposes and is not suitable for
+ production systems.
+
+ The raw noise data can be obtained using the jent_raw_hires
+ debugfs file. Using the option
+ jitterentropy_testing.boot_raw_hires_test=1 the raw noise of
+ the first 1000 entropy events since boot can be sampled.
+
+ If unsure, select N.
+
config CRYPTO_KDF800108_CTR
tristate
select CRYPTO_HMAC
@@ -1372,6 +1401,9 @@ config CRYPTO_STATS
help
Enable the gathering of crypto stats.
+ Enabling this option reduces the performance of the crypto API. It
+ should only be enabled when there is actually a use case for it.
+
This collects data sizes, numbers of requests, and numbers
of errors processed by:
- AEAD ciphers (encrypt, decrypt)
diff --git a/crypto/Makefile b/crypto/Makefile
index d0126c915834..953a7e105e58 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -14,7 +14,7 @@ crypto_algapi-y := algapi.o scatterwalk.o $(crypto_algapi-y)
obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o
obj-$(CONFIG_CRYPTO_AEAD2) += aead.o
-obj-$(CONFIG_CRYPTO_AEAD2) += geniv.o
+obj-$(CONFIG_CRYPTO_GENIV) += geniv.o
obj-$(CONFIG_CRYPTO_SKCIPHER2) += skcipher.o
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
@@ -25,6 +25,7 @@ crypto_hash-y += shash.o
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
+obj-$(CONFIG_CRYPTO_SIG2) += sig.o
obj-$(CONFIG_CRYPTO_KPP2) += kpp.o
dh_generic-y := dh.o
@@ -171,6 +172,7 @@ CFLAGS_jitterentropy.o = -O0
KASAN_SANITIZE_jitterentropy.o = n
UBSAN_SANITIZE_jitterentropy.o = n
jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o
+obj-$(CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE) += jitterentropy-testing.o
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
obj-$(CONFIG_CRYPTO_POLYVAL) += polyval-generic.o
diff --git a/crypto/aegis-neon.h b/crypto/aegis-neon.h
new file mode 100644
index 000000000000..61e5614b45de
--- /dev/null
+++ b/crypto/aegis-neon.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef _AEGIS_NEON_H
+#define _AEGIS_NEON_H
+
+void crypto_aegis128_init_neon(void *state, const void *key, const void *iv);
+void crypto_aegis128_update_neon(void *state, const void *msg);
+void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src,
+ unsigned int size);
+void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src,
+ unsigned int size);
+int crypto_aegis128_final_neon(void *state, void *tag_xor,
+ unsigned int assoclen,
+ unsigned int cryptlen,
+ unsigned int authsize);
+
+#endif
diff --git a/crypto/aegis128-neon-inner.c b/crypto/aegis128-neon-inner.c
index 7de485907d81..b6a52a386b22 100644
--- a/crypto/aegis128-neon-inner.c
+++ b/crypto/aegis128-neon-inner.c
@@ -16,6 +16,7 @@
#define AEGIS_BLOCK_SIZE 16
#include <stddef.h>
+#include "aegis-neon.h"
extern int aegis128_have_aes_insn;
diff --git a/crypto/aegis128-neon.c b/crypto/aegis128-neon.c
index a7856915ec85..9ee50549e823 100644
--- a/crypto/aegis128-neon.c
+++ b/crypto/aegis128-neon.c
@@ -7,17 +7,7 @@
#include <asm/neon.h>
#include "aegis.h"
-
-void crypto_aegis128_init_neon(void *state, const void *key, const void *iv);
-void crypto_aegis128_update_neon(void *state, const void *msg);
-void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src,
- unsigned int size);
-void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src,
- unsigned int size);
-int crypto_aegis128_final_neon(void *state, void *tag_xor,
- unsigned int assoclen,
- unsigned int cryptlen,
- unsigned int authsize);
+#include "aegis-neon.h"
int aegis128_have_aes_insn __ro_after_init;
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 324651040446..709ef0940799 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -31,12 +31,6 @@ struct ahash_request_priv {
void *ubuf[] CRYPTO_MINALIGN_ATTR;
};
-static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
-{
- return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
- halg);
-}
-
static int hash_walk_next(struct crypto_hash_walk *walk)
{
unsigned int alignmask = walk->alignmask;
@@ -432,6 +426,8 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
hash->setkey = ahash_nosetkey;
+ crypto_ahash_set_statesize(hash, alg->halg.statesize);
+
if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
return crypto_init_shash_ops_async(tfm);
@@ -573,6 +569,7 @@ struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
nhash->import = hash->import;
nhash->setkey = hash->setkey;
nhash->reqsize = hash->reqsize;
+ nhash->statesize = hash->statesize;
if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
return crypto_clone_shash_ops_async(nhash, hash);
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index 7960ceb528c3..52813f0b19e4 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -17,6 +18,8 @@
#include "internal.h"
+#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
+
static int __maybe_unused crypto_akcipher_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
@@ -105,7 +108,7 @@ static const struct crypto_type crypto_akcipher_type = {
.report_stat = crypto_akcipher_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
- .maskset = CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AKCIPHER,
.tfmsize = offsetof(struct crypto_akcipher, base),
};
@@ -186,5 +189,124 @@ int akcipher_register_instance(struct crypto_template *tmpl,
}
EXPORT_SYMBOL_GPL(akcipher_register_instance);
+int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data)
+{
+ unsigned int reqsize = crypto_akcipher_reqsize(data->tfm);
+ struct akcipher_request *req;
+ struct scatterlist *sg;
+ unsigned int mlen;
+ unsigned int len;
+ u8 *buf;
+
+ if (data->dst)
+ mlen = max(data->slen, data->dlen);
+ else
+ mlen = data->slen + data->dlen;
+
+ len = sizeof(*req) + reqsize + mlen;
+ if (len < mlen)
+ return -EOVERFLOW;
+
+ req = kzalloc(len, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ data->req = req;
+ akcipher_request_set_tfm(req, data->tfm);
+
+ buf = (u8 *)(req + 1) + reqsize;
+ data->buf = buf;
+ memcpy(buf, data->src, data->slen);
+
+ sg = &data->sg;
+ sg_init_one(sg, buf, mlen);
+ akcipher_request_set_crypt(req, sg, data->dst ? sg : NULL,
+ data->slen, data->dlen);
+
+ crypto_init_wait(&data->cwait);
+ akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+ crypto_req_done, &data->cwait);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_akcipher_sync_prep);
+
+int crypto_akcipher_sync_post(struct crypto_akcipher_sync_data *data, int err)
+{
+ err = crypto_wait_req(err, &data->cwait);
+ if (data->dst)
+ memcpy(data->dst, data->buf, data->dlen);
+ data->dlen = data->req->dst_len;
+ kfree_sensitive(data->req);
+ return err;
+}
+EXPORT_SYMBOL_GPL(crypto_akcipher_sync_post);
+
+int crypto_akcipher_sync_encrypt(struct crypto_akcipher *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen)
+{
+ struct crypto_akcipher_sync_data data = {
+ .tfm = tfm,
+ .src = src,
+ .dst = dst,
+ .slen = slen,
+ .dlen = dlen,
+ };
+
+ return crypto_akcipher_sync_prep(&data) ?:
+ crypto_akcipher_sync_post(&data,
+ crypto_akcipher_encrypt(data.req));
+}
+EXPORT_SYMBOL_GPL(crypto_akcipher_sync_encrypt);
+
+int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen)
+{
+ struct crypto_akcipher_sync_data data = {
+ .tfm = tfm,
+ .src = src,
+ .dst = dst,
+ .slen = slen,
+ .dlen = dlen,
+ };
+
+ return crypto_akcipher_sync_prep(&data) ?:
+ crypto_akcipher_sync_post(&data,
+ crypto_akcipher_decrypt(data.req)) ?:
+ data.dlen;
+}
+EXPORT_SYMBOL_GPL(crypto_akcipher_sync_decrypt);
+
+static void crypto_exit_akcipher_ops_sig(struct crypto_tfm *tfm)
+{
+ struct crypto_akcipher **ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_akcipher(*ctx);
+}
+
+int crypto_init_akcipher_ops_sig(struct crypto_tfm *tfm)
+{
+ struct crypto_akcipher **ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *calg = tfm->__crt_alg;
+ struct crypto_akcipher *akcipher;
+
+ if (!crypto_mod_get(calg))
+ return -EAGAIN;
+
+ akcipher = crypto_create_tfm(calg, &crypto_akcipher_type);
+ if (IS_ERR(akcipher)) {
+ crypto_mod_put(calg);
+ return PTR_ERR(akcipher);
+ }
+
+ *ctx = akcipher;
+ tfm->exit = crypto_exit_akcipher_ops_sig;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_init_akcipher_ops_sig);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic public key cipher type");
diff --git a/crypto/api.c b/crypto/api.c
index d375e8cd770d..b9cc0c906efe 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -345,15 +345,6 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup);
-static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
-{
- const struct crypto_type *type_obj = tfm->__crt_alg->cra_type;
-
- if (type_obj)
- return type_obj->init(tfm, type, mask);
- return 0;
-}
-
static void crypto_exit_ops(struct crypto_tfm *tfm)
{
const struct crypto_type *type = tfm->__crt_alg->cra_type;
@@ -395,25 +386,21 @@ void crypto_shoot_alg(struct crypto_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_shoot_alg);
-struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
- u32 mask)
+struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type,
+ u32 mask, gfp_t gfp)
{
struct crypto_tfm *tfm = NULL;
unsigned int tfm_size;
int err = -ENOMEM;
tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask);
- tfm = kzalloc(tfm_size, GFP_KERNEL);
+ tfm = kzalloc(tfm_size, gfp);
if (tfm == NULL)
goto out_err;
tfm->__crt_alg = alg;
refcount_set(&tfm->refcnt, 1);
- err = crypto_init_ops(tfm, type, mask);
- if (err)
- goto out_free_tfm;
-
if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
goto cra_init_failed;
@@ -421,7 +408,6 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
cra_init_failed:
crypto_exit_ops(tfm);
-out_free_tfm:
if (err == -EAGAIN)
crypto_shoot_alg(alg);
kfree(tfm);
@@ -430,6 +416,13 @@ out_err:
out:
return tfm;
}
+EXPORT_SYMBOL_GPL(__crypto_alloc_tfmgfp);
+
+struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
+ u32 mask)
+{
+ return __crypto_alloc_tfmgfp(alg, type, mask, GFP_KERNEL);
+}
EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);
/*
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index 50c933f86b21..e787598cb3f7 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -8,18 +8,17 @@
*/
#define pr_fmt(fmt) "PKEY: "fmt
-#include <linux/module.h>
-#include <linux/export.h>
+#include <crypto/akcipher.h>
+#include <crypto/public_key.h>
+#include <crypto/sig.h>
+#include <keys/asymmetric-subtype.h>
+#include <linux/asn1.h>
+#include <linux/err.h>
#include <linux/kernel.h>
-#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/seq_file.h>
-#include <linux/scatterlist.h>
-#include <linux/asn1.h>
-#include <keys/asymmetric-subtype.h>
-#include <crypto/public_key.h>
-#include <crypto/akcipher.h>
-#include <crypto/sm2.h>
-#include <crypto/sm3_base.h>
+#include <linux/slab.h>
+#include <linux/string.h>
MODULE_DESCRIPTION("In-software asymmetric public-key subtype");
MODULE_AUTHOR("Red Hat, Inc.");
@@ -67,10 +66,13 @@ static void public_key_destroy(void *payload0, void *payload3)
static int
software_key_determine_akcipher(const struct public_key *pkey,
const char *encoding, const char *hash_algo,
- char alg_name[CRYPTO_MAX_ALG_NAME])
+ char alg_name[CRYPTO_MAX_ALG_NAME], bool *sig,
+ enum kernel_pkey_operation op)
{
int n;
+ *sig = true;
+
if (!encoding)
return -EINVAL;
@@ -79,14 +81,18 @@ software_key_determine_akcipher(const struct public_key *pkey,
* RSA signatures usually use EMSA-PKCS1-1_5 [RFC3447 sec 8.2].
*/
if (strcmp(encoding, "pkcs1") == 0) {
- if (!hash_algo)
+ if (!hash_algo) {
+ *sig = false;
n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
"pkcs1pad(%s)",
pkey->pkey_algo);
- else
+ } else {
+ *sig = op == kernel_pkey_sign ||
+ op == kernel_pkey_verify;
n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
"pkcs1pad(%s,%s)",
pkey->pkey_algo, hash_algo);
+ }
return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0;
}
if (strcmp(encoding, "raw") != 0)
@@ -97,6 +103,7 @@ software_key_determine_akcipher(const struct public_key *pkey,
*/
if (hash_algo)
return -EINVAL;
+ *sig = false;
} else if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) {
if (strcmp(encoding, "x962") != 0)
return -EINVAL;
@@ -154,37 +161,70 @@ static int software_key_query(const struct kernel_pkey_params *params,
struct crypto_akcipher *tfm;
struct public_key *pkey = params->key->payload.data[asym_crypto];
char alg_name[CRYPTO_MAX_ALG_NAME];
+ struct crypto_sig *sig;
u8 *key, *ptr;
int ret, len;
+ bool issig;
ret = software_key_determine_akcipher(pkey, params->encoding,
- params->hash_algo, alg_name);
+ params->hash_algo, alg_name,
+ &issig, kernel_pkey_sign);
if (ret < 0)
return ret;
- tfm = crypto_alloc_akcipher(alg_name, 0, 0);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- ret = -ENOMEM;
key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
GFP_KERNEL);
if (!key)
- goto error_free_tfm;
+ return -ENOMEM;
+
memcpy(key, pkey->key, pkey->keylen);
ptr = key + pkey->keylen;
ptr = pkey_pack_u32(ptr, pkey->algo);
ptr = pkey_pack_u32(ptr, pkey->paramlen);
memcpy(ptr, pkey->params, pkey->paramlen);
- if (pkey->key_is_private)
- ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
- else
- ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
- if (ret < 0)
- goto error_free_key;
+ if (issig) {
+ sig = crypto_alloc_sig(alg_name, 0, 0);
+ if (IS_ERR(sig))
+ goto error_free_key;
+
+ if (pkey->key_is_private)
+ ret = crypto_sig_set_privkey(sig, key, pkey->keylen);
+ else
+ ret = crypto_sig_set_pubkey(sig, key, pkey->keylen);
+ if (ret < 0)
+ goto error_free_tfm;
+
+ len = crypto_sig_maxsize(sig);
+
+ info->supported_ops = KEYCTL_SUPPORTS_VERIFY;
+ if (pkey->key_is_private)
+ info->supported_ops |= KEYCTL_SUPPORTS_SIGN;
+
+ if (strcmp(params->encoding, "pkcs1") == 0) {
+ info->supported_ops |= KEYCTL_SUPPORTS_ENCRYPT;
+ if (pkey->key_is_private)
+ info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT;
+ }
+ } else {
+ tfm = crypto_alloc_akcipher(alg_name, 0, 0);
+ if (IS_ERR(tfm))
+ goto error_free_key;
+
+ if (pkey->key_is_private)
+ ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
+ else
+ ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
+ if (ret < 0)
+ goto error_free_tfm;
+
+ len = crypto_akcipher_maxsize(tfm);
+
+ info->supported_ops = KEYCTL_SUPPORTS_ENCRYPT;
+ if (pkey->key_is_private)
+ info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT;
+ }
- len = crypto_akcipher_maxsize(tfm);
info->key_size = len * 8;
if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) {
@@ -210,17 +250,16 @@ static int software_key_query(const struct kernel_pkey_params *params,
info->max_enc_size = len;
info->max_dec_size = len;
- info->supported_ops = (KEYCTL_SUPPORTS_ENCRYPT |
- KEYCTL_SUPPORTS_VERIFY);
- if (pkey->key_is_private)
- info->supported_ops |= (KEYCTL_SUPPORTS_DECRYPT |
- KEYCTL_SUPPORTS_SIGN);
+
ret = 0;
+error_free_tfm:
+ if (issig)
+ crypto_free_sig(sig);
+ else
+ crypto_free_akcipher(tfm);
error_free_key:
kfree(key);
-error_free_tfm:
- crypto_free_akcipher(tfm);
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
@@ -232,34 +271,26 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
const void *in, void *out)
{
const struct public_key *pkey = params->key->payload.data[asym_crypto];
- struct akcipher_request *req;
- struct crypto_akcipher *tfm;
- struct crypto_wait cwait;
- struct scatterlist in_sg, out_sg;
char alg_name[CRYPTO_MAX_ALG_NAME];
+ struct crypto_akcipher *tfm;
+ struct crypto_sig *sig;
char *key, *ptr;
+ bool issig;
+ int ksz;
int ret;
pr_devel("==>%s()\n", __func__);
ret = software_key_determine_akcipher(pkey, params->encoding,
- params->hash_algo, alg_name);
+ params->hash_algo, alg_name,
+ &issig, params->op);
if (ret < 0)
return ret;
- tfm = crypto_alloc_akcipher(alg_name, 0, 0);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- ret = -ENOMEM;
- req = akcipher_request_alloc(tfm, GFP_KERNEL);
- if (!req)
- goto error_free_tfm;
-
key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
GFP_KERNEL);
if (!key)
- goto error_free_req;
+ return -ENOMEM;
memcpy(key, pkey->key, pkey->keylen);
ptr = key + pkey->keylen;
@@ -267,123 +298,84 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
ptr = pkey_pack_u32(ptr, pkey->paramlen);
memcpy(ptr, pkey->params, pkey->paramlen);
- if (pkey->key_is_private)
- ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
- else
- ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
- if (ret)
- goto error_free_key;
+ if (issig) {
+ sig = crypto_alloc_sig(alg_name, 0, 0);
+ if (IS_ERR(sig))
+ goto error_free_key;
+
+ if (pkey->key_is_private)
+ ret = crypto_sig_set_privkey(sig, key, pkey->keylen);
+ else
+ ret = crypto_sig_set_pubkey(sig, key, pkey->keylen);
+ if (ret)
+ goto error_free_tfm;
+
+ ksz = crypto_sig_maxsize(sig);
+ } else {
+ tfm = crypto_alloc_akcipher(alg_name, 0, 0);
+ if (IS_ERR(tfm))
+ goto error_free_key;
+
+ if (pkey->key_is_private)
+ ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
+ else
+ ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
+ if (ret)
+ goto error_free_tfm;
+
+ ksz = crypto_akcipher_maxsize(tfm);
+ }
- sg_init_one(&in_sg, in, params->in_len);
- sg_init_one(&out_sg, out, params->out_len);
- akcipher_request_set_crypt(req, &in_sg, &out_sg, params->in_len,
- params->out_len);
- crypto_init_wait(&cwait);
- akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP,
- crypto_req_done, &cwait);
+ ret = -EINVAL;
/* Perform the encryption calculation. */
switch (params->op) {
case kernel_pkey_encrypt:
- ret = crypto_akcipher_encrypt(req);
+ if (issig)
+ break;
+ ret = crypto_akcipher_sync_encrypt(tfm, in, params->in_len,
+ out, params->out_len);
break;
case kernel_pkey_decrypt:
- ret = crypto_akcipher_decrypt(req);
+ if (issig)
+ break;
+ ret = crypto_akcipher_sync_decrypt(tfm, in, params->in_len,
+ out, params->out_len);
break;
case kernel_pkey_sign:
- ret = crypto_akcipher_sign(req);
+ if (!issig)
+ break;
+ ret = crypto_sig_sign(sig, in, params->in_len,
+ out, params->out_len);
break;
default:
BUG();
}
- ret = crypto_wait_req(ret, &cwait);
if (ret == 0)
- ret = req->dst_len;
+ ret = ksz;
+error_free_tfm:
+ if (issig)
+ crypto_free_sig(sig);
+ else
+ crypto_free_akcipher(tfm);
error_free_key:
kfree(key);
-error_free_req:
- akcipher_request_free(req);
-error_free_tfm:
- crypto_free_akcipher(tfm);
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
-#if IS_REACHABLE(CONFIG_CRYPTO_SM2)
-static int cert_sig_digest_update(const struct public_key_signature *sig,
- struct crypto_akcipher *tfm_pkey)
-{
- struct crypto_shash *tfm;
- struct shash_desc *desc;
- size_t desc_size;
- unsigned char dgst[SM3_DIGEST_SIZE];
- int ret;
-
- BUG_ON(!sig->data);
-
- /* SM2 signatures always use the SM3 hash algorithm */
- if (!sig->hash_algo || strcmp(sig->hash_algo, "sm3") != 0)
- return -EINVAL;
-
- ret = sm2_compute_z_digest(tfm_pkey, SM2_DEFAULT_USERID,
- SM2_DEFAULT_USERID_LEN, dgst);
- if (ret)
- return ret;
-
- tfm = crypto_alloc_shash(sig->hash_algo, 0, 0);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
- desc = kzalloc(desc_size, GFP_KERNEL);
- if (!desc) {
- ret = -ENOMEM;
- goto error_free_tfm;
- }
-
- desc->tfm = tfm;
-
- ret = crypto_shash_init(desc);
- if (ret < 0)
- goto error_free_desc;
-
- ret = crypto_shash_update(desc, dgst, SM3_DIGEST_SIZE);
- if (ret < 0)
- goto error_free_desc;
-
- ret = crypto_shash_finup(desc, sig->data, sig->data_size, sig->digest);
-
-error_free_desc:
- kfree(desc);
-error_free_tfm:
- crypto_free_shash(tfm);
- return ret;
-}
-#else
-static inline int cert_sig_digest_update(
- const struct public_key_signature *sig,
- struct crypto_akcipher *tfm_pkey)
-{
- return -ENOTSUPP;
-}
-#endif /* ! IS_REACHABLE(CONFIG_CRYPTO_SM2) */
-
/*
* Verify a signature using a public key.
*/
int public_key_verify_signature(const struct public_key *pkey,
const struct public_key_signature *sig)
{
- struct crypto_wait cwait;
- struct crypto_akcipher *tfm;
- struct akcipher_request *req;
- struct scatterlist src_sg;
char alg_name[CRYPTO_MAX_ALG_NAME];
- char *buf, *ptr;
- size_t buf_len;
+ struct crypto_sig *tfm;
+ char *key, *ptr;
+ bool issig;
int ret;
pr_devel("==>%s()\n", __func__);
@@ -408,63 +400,40 @@ int public_key_verify_signature(const struct public_key *pkey,
}
ret = software_key_determine_akcipher(pkey, sig->encoding,
- sig->hash_algo, alg_name);
+ sig->hash_algo, alg_name,
+ &issig, kernel_pkey_verify);
if (ret < 0)
return ret;
- tfm = crypto_alloc_akcipher(alg_name, 0, 0);
+ tfm = crypto_alloc_sig(alg_name, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
- ret = -ENOMEM;
- req = akcipher_request_alloc(tfm, GFP_KERNEL);
- if (!req)
+ key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
+ GFP_KERNEL);
+ if (!key)
goto error_free_tfm;
- buf_len = max_t(size_t, pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
- sig->s_size + sig->digest_size);
-
- buf = kmalloc(buf_len, GFP_KERNEL);
- if (!buf)
- goto error_free_req;
-
- memcpy(buf, pkey->key, pkey->keylen);
- ptr = buf + pkey->keylen;
+ memcpy(key, pkey->key, pkey->keylen);
+ ptr = key + pkey->keylen;
ptr = pkey_pack_u32(ptr, pkey->algo);
ptr = pkey_pack_u32(ptr, pkey->paramlen);
memcpy(ptr, pkey->params, pkey->paramlen);
if (pkey->key_is_private)
- ret = crypto_akcipher_set_priv_key(tfm, buf, pkey->keylen);
+ ret = crypto_sig_set_privkey(tfm, key, pkey->keylen);
else
- ret = crypto_akcipher_set_pub_key(tfm, buf, pkey->keylen);
+ ret = crypto_sig_set_pubkey(tfm, key, pkey->keylen);
if (ret)
- goto error_free_buf;
+ goto error_free_key;
- if (strcmp(pkey->pkey_algo, "sm2") == 0 && sig->data_size) {
- ret = cert_sig_digest_update(sig, tfm);
- if (ret)
- goto error_free_buf;
- }
+ ret = crypto_sig_verify(tfm, sig->s, sig->s_size,
+ sig->digest, sig->digest_size);
- memcpy(buf, sig->s, sig->s_size);
- memcpy(buf + sig->s_size, sig->digest, sig->digest_size);
-
- sg_init_one(&src_sg, buf, sig->s_size + sig->digest_size);
- akcipher_request_set_crypt(req, &src_sg, NULL, sig->s_size,
- sig->digest_size);
- crypto_init_wait(&cwait);
- akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP,
- crypto_req_done, &cwait);
- ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
-
-error_free_buf:
- kfree(buf);
-error_free_req:
- akcipher_request_free(req);
+error_free_key:
+ kfree(key);
error_free_tfm:
- crypto_free_akcipher(tfm);
+ crypto_free_sig(tfm);
pr_devel("<==%s() = %d\n", __func__, ret);
if (WARN_ON_ONCE(ret > 0))
ret = -EINVAL;
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 0b4943a4592b..6fdfc82e23a8 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -6,13 +6,15 @@
*/
#define pr_fmt(fmt) "X.509: "fmt
+#include <crypto/hash.h>
+#include <crypto/sm2.h>
+#include <keys/asymmetric-parser.h>
+#include <keys/asymmetric-subtype.h>
+#include <keys/system_keyring.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <keys/asymmetric-subtype.h>
-#include <keys/asymmetric-parser.h>
-#include <keys/system_keyring.h>
-#include <crypto/hash.h>
+#include <linux/string.h>
#include "asymmetric_keys.h"
#include "x509_parser.h"
@@ -30,9 +32,6 @@ int x509_get_sig_params(struct x509_certificate *cert)
pr_devel("==>%s()\n", __func__);
- sig->data = cert->tbs;
- sig->data_size = cert->tbs_size;
-
sig->s = kmemdup(cert->raw_sig, cert->raw_sig_size, GFP_KERNEL);
if (!sig->s)
return -ENOMEM;
@@ -65,7 +64,21 @@ int x509_get_sig_params(struct x509_certificate *cert)
desc->tfm = tfm;
- ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest);
+ if (strcmp(cert->pub->pkey_algo, "sm2") == 0) {
+ ret = strcmp(sig->hash_algo, "sm3") != 0 ? -EINVAL :
+ crypto_shash_init(desc) ?:
+ sm2_compute_z_digest(desc, cert->pub->key,
+ cert->pub->keylen, sig->digest) ?:
+ crypto_shash_init(desc) ?:
+ crypto_shash_update(desc, sig->digest,
+ sig->digest_size) ?:
+ crypto_shash_finup(desc, cert->tbs, cert->tbs_size,
+ sig->digest);
+ } else {
+ ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size,
+ sig->digest);
+ }
+
if (ret < 0)
goto error_2;
diff --git a/crypto/cipher.c b/crypto/cipher.c
index b47141ed4a9f..47c77a3e5978 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -90,3 +90,31 @@ void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
cipher_crypt_one(tfm, dst, src, false);
}
EXPORT_SYMBOL_NS_GPL(crypto_cipher_decrypt_one, CRYPTO_INTERNAL);
+
+struct crypto_cipher *crypto_clone_cipher(struct crypto_cipher *cipher)
+{
+ struct crypto_tfm *tfm = crypto_cipher_tfm(cipher);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto_cipher *ncipher;
+ struct crypto_tfm *ntfm;
+
+ if (alg->cra_init)
+ return ERR_PTR(-ENOSYS);
+
+ if (unlikely(!crypto_mod_get(alg)))
+ return ERR_PTR(-ESTALE);
+
+ ntfm = __crypto_alloc_tfmgfp(alg, CRYPTO_ALG_TYPE_CIPHER,
+ CRYPTO_ALG_TYPE_MASK, GFP_ATOMIC);
+ if (IS_ERR(ntfm)) {
+ crypto_mod_put(alg);
+ return ERR_CAST(ntfm);
+ }
+
+ ntfm->crt_flags = tfm->crt_flags;
+
+ ncipher = __crypto_cipher_cast(ntfm);
+
+ return ncipher;
+}
+EXPORT_SYMBOL_GPL(crypto_clone_cipher);
diff --git a/crypto/cmac.c b/crypto/cmac.c
index f4a5d3bfb376..fce6b0f58e88 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -198,13 +198,14 @@ static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out)
return 0;
}
-static int cmac_init_tfm(struct crypto_tfm *tfm)
+static int cmac_init_tfm(struct crypto_shash *tfm)
{
+ struct shash_instance *inst = shash_alg_instance(tfm);
+ struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
+ struct crypto_cipher_spawn *spawn;
struct crypto_cipher *cipher;
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst);
- struct cmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+ spawn = shash_instance_ctx(inst);
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
@@ -212,11 +213,26 @@ static int cmac_init_tfm(struct crypto_tfm *tfm)
ctx->child = cipher;
return 0;
-};
+}
+
+static int cmac_clone_tfm(struct crypto_shash *tfm, struct crypto_shash *otfm)
+{
+ struct cmac_tfm_ctx *octx = crypto_shash_ctx(otfm);
+ struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
+ struct crypto_cipher *cipher;
+
+ cipher = crypto_clone_cipher(octx->child);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ ctx->child = cipher;
-static void cmac_exit_tfm(struct crypto_tfm *tfm)
+ return 0;
+}
+
+static void cmac_exit_tfm(struct crypto_shash *tfm)
{
- struct cmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
crypto_free_cipher(ctx->child);
}
@@ -274,13 +290,13 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
~(crypto_tfm_ctx_alignment() - 1))
+ alg->cra_blocksize * 2;
- inst->alg.base.cra_init = cmac_init_tfm;
- inst->alg.base.cra_exit = cmac_exit_tfm;
-
inst->alg.init = crypto_cmac_digest_init;
inst->alg.update = crypto_cmac_digest_update;
inst->alg.final = crypto_cmac_digest_final;
inst->alg.setkey = crypto_cmac_digest_setkey;
+ inst->alg.init_tfm = cmac_init_tfm;
+ inst->alg.clone_tfm = cmac_clone_tfm;
+ inst->alg.exit_tfm = cmac_exit_tfm;
inst->free = shash_free_singlespawn_instance;
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 09a7872b4060..ea93f4c55f25 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -177,6 +177,7 @@ static int hmac_clone_tfm(struct crypto_shash *dst, struct crypto_shash *src)
static void hmac_exit_tfm(struct crypto_shash *parent)
{
struct hmac_ctx *ctx = hmac_ctx(parent);
+
crypto_free_shash(ctx->hash);
}
diff --git a/crypto/internal.h b/crypto/internal.h
index 8dd746b1130b..63e59240d5fb 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -18,9 +18,12 @@
#include <linux/numa.h>
#include <linux/refcount.h>
#include <linux/rwsem.h>
+#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/types.h>
+struct akcipher_request;
+struct crypto_akcipher;
struct crypto_instance;
struct crypto_template;
@@ -32,6 +35,19 @@ struct crypto_larval {
bool test_started;
};
+struct crypto_akcipher_sync_data {
+ struct crypto_akcipher *tfm;
+ const void *src;
+ void *dst;
+ unsigned int slen;
+ unsigned int dlen;
+
+ struct akcipher_request *req;
+ struct crypto_wait cwait;
+ struct scatterlist sg;
+ u8 *buf;
+};
+
enum {
CRYPTOA_UNSPEC,
CRYPTOA_ALG,
@@ -102,6 +118,8 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
struct crypto_alg *nalg);
void crypto_remove_final(struct list_head *list);
void crypto_shoot_alg(struct crypto_alg *alg);
+struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type,
+ u32 mask, gfp_t gfp);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask);
void *crypto_create_tfm_node(struct crypto_alg *alg,
@@ -109,6 +127,10 @@ void *crypto_create_tfm_node(struct crypto_alg *alg,
void *crypto_clone_tfm(const struct crypto_type *frontend,
struct crypto_tfm *otfm);
+int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data);
+int crypto_akcipher_sync_post(struct crypto_akcipher_sync_data *data, int err);
+int crypto_init_akcipher_ops_sig(struct crypto_tfm *tfm);
+
static inline void *crypto_create_tfm(struct crypto_alg *alg,
const struct crypto_type *frontend)
{
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index b9edfaa51b27..7d1463a1562a 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -2,7 +2,7 @@
* Non-physical true random number generator based on timing jitter --
* Linux Kernel Crypto API specific code
*
- * Copyright Stephan Mueller <[email protected]>, 2015
+ * Copyright Stephan Mueller <[email protected]>, 2015 - 2023
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -37,6 +37,8 @@
* DAMAGE.
*/
+#include <crypto/hash.h>
+#include <crypto/sha3.h>
#include <linux/fips.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -46,6 +48,8 @@
#include "jitterentropy.h"
+#define JENT_CONDITIONING_HASH "sha3-256-generic"
+
/***************************************************************************
* Helper function
***************************************************************************/
@@ -60,11 +64,6 @@ void jent_zfree(void *ptr)
kfree_sensitive(ptr);
}
-void jent_memcpy(void *dest, const void *src, unsigned int n)
-{
- memcpy(dest, src, n);
-}
-
/*
* Obtain a high-resolution time stamp value. The time stamp is used to measure
* the execution time of a given code path and its variations. Hence, the time
@@ -89,6 +88,92 @@ void jent_get_nstime(__u64 *out)
tmp = ktime_get_ns();
*out = tmp;
+ jent_raw_hires_entropy_store(tmp);
+}
+
+int jent_hash_time(void *hash_state, __u64 time, u8 *addtl,
+ unsigned int addtl_len, __u64 hash_loop_cnt,
+ unsigned int stuck)
+{
+ struct shash_desc *hash_state_desc = (struct shash_desc *)hash_state;
+ SHASH_DESC_ON_STACK(desc, hash_state_desc->tfm);
+ u8 intermediary[SHA3_256_DIGEST_SIZE];
+ __u64 j = 0;
+ int ret;
+
+ desc->tfm = hash_state_desc->tfm;
+
+ if (sizeof(intermediary) != crypto_shash_digestsize(desc->tfm)) {
+ pr_warn_ratelimited("Unexpected digest size\n");
+ return -EINVAL;
+ }
+
+ /*
+ * This loop fills a buffer which is injected into the entropy pool.
+ * The main reason for this loop is to execute something over which we
+ * can perform a timing measurement. The injection of the resulting
+ * data into the pool is performed to ensure the result is used and
+ * the compiler cannot optimize the loop away in case the result is not
+ * used at all. Yet that data is considered "additional information"
+ * considering the terminology from SP800-90A without any entropy.
+ *
+ * Note, it does not matter which or how much data you inject, we are
+ * interested in one Keccack1600 compression operation performed with
+ * the crypto_shash_final.
+ */
+ for (j = 0; j < hash_loop_cnt; j++) {
+ ret = crypto_shash_init(desc) ?:
+ crypto_shash_update(desc, intermediary,
+ sizeof(intermediary)) ?:
+ crypto_shash_finup(desc, addtl, addtl_len, intermediary);
+ if (ret)
+ goto err;
+ }
+
+ /*
+ * Inject the data from the previous loop into the pool. This data is
+ * not considered to contain any entropy, but it stirs the pool a bit.
+ */
+ ret = crypto_shash_update(desc, intermediary, sizeof(intermediary));
+ if (ret)
+ goto err;
+
+ /*
+ * Insert the time stamp into the hash context representing the pool.
+ *
+ * If the time stamp is stuck, do not finally insert the value into the
+ * entropy pool. Although this operation should not do any harm even
+ * when the time stamp has no entropy, SP800-90B requires that any
+ * conditioning operation to have an identical amount of input data
+ * according to section 3.1.5.
+ */
+ if (!stuck) {
+ ret = crypto_shash_update(hash_state_desc, (u8 *)&time,
+ sizeof(__u64));
+ }
+
+err:
+ shash_desc_zero(desc);
+ memzero_explicit(intermediary, sizeof(intermediary));
+
+ return ret;
+}
+
+int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len)
+{
+ struct shash_desc *hash_state_desc = (struct shash_desc *)hash_state;
+ u8 jent_block[SHA3_256_DIGEST_SIZE];
+ /* Obtain data from entropy pool and re-initialize it */
+ int ret = crypto_shash_final(hash_state_desc, jent_block) ?:
+ crypto_shash_init(hash_state_desc) ?:
+ crypto_shash_update(hash_state_desc, jent_block,
+ sizeof(jent_block));
+
+ if (!ret && dst_len)
+ memcpy(dst, jent_block, dst_len);
+
+ memzero_explicit(jent_block, sizeof(jent_block));
+ return ret;
}
/***************************************************************************
@@ -98,32 +183,82 @@ void jent_get_nstime(__u64 *out)
struct jitterentropy {
spinlock_t jent_lock;
struct rand_data *entropy_collector;
+ struct crypto_shash *tfm;
+ struct shash_desc *sdesc;
};
-static int jent_kcapi_init(struct crypto_tfm *tfm)
+static void jent_kcapi_cleanup(struct crypto_tfm *tfm)
{
struct jitterentropy *rng = crypto_tfm_ctx(tfm);
- int ret = 0;
- rng->entropy_collector = jent_entropy_collector_alloc(1, 0);
- if (!rng->entropy_collector)
- ret = -ENOMEM;
+ spin_lock(&rng->jent_lock);
- spin_lock_init(&rng->jent_lock);
- return ret;
-}
+ if (rng->sdesc) {
+ shash_desc_zero(rng->sdesc);
+ kfree(rng->sdesc);
+ }
+ rng->sdesc = NULL;
-static void jent_kcapi_cleanup(struct crypto_tfm *tfm)
-{
- struct jitterentropy *rng = crypto_tfm_ctx(tfm);
+ if (rng->tfm)
+ crypto_free_shash(rng->tfm);
+ rng->tfm = NULL;
- spin_lock(&rng->jent_lock);
if (rng->entropy_collector)
jent_entropy_collector_free(rng->entropy_collector);
rng->entropy_collector = NULL;
spin_unlock(&rng->jent_lock);
}
+static int jent_kcapi_init(struct crypto_tfm *tfm)
+{
+ struct jitterentropy *rng = crypto_tfm_ctx(tfm);
+ struct crypto_shash *hash;
+ struct shash_desc *sdesc;
+ int size, ret = 0;
+
+ spin_lock_init(&rng->jent_lock);
+
+ /*
+ * Use SHA3-256 as conditioner. We allocate only the generic
+ * implementation as we are not interested in high-performance. The
+ * execution time of the SHA3 operation is measured and adds to the
+ * Jitter RNG's unpredictable behavior. If we have a slower hash
+ * implementation, the execution timing variations are larger. When
+ * using a fast implementation, we would need to call it more often
+ * as its variations are lower.
+ */
+ hash = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0);
+ if (IS_ERR(hash)) {
+ pr_err("Cannot allocate conditioning digest\n");
+ return PTR_ERR(hash);
+ }
+ rng->tfm = hash;
+
+ size = sizeof(struct shash_desc) + crypto_shash_descsize(hash);
+ sdesc = kmalloc(size, GFP_KERNEL);
+ if (!sdesc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sdesc->tfm = hash;
+ crypto_shash_init(sdesc);
+ rng->sdesc = sdesc;
+
+ rng->entropy_collector = jent_entropy_collector_alloc(1, 0, sdesc);
+ if (!rng->entropy_collector) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ spin_lock_init(&rng->jent_lock);
+ return 0;
+
+err:
+ jent_kcapi_cleanup(tfm);
+ return ret;
+}
+
static int jent_kcapi_random(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *rdata, unsigned int dlen)
@@ -180,20 +315,34 @@ static struct rng_alg jent_alg = {
.cra_module = THIS_MODULE,
.cra_init = jent_kcapi_init,
.cra_exit = jent_kcapi_cleanup,
-
}
};
static int __init jent_mod_init(void)
{
+ SHASH_DESC_ON_STACK(desc, tfm);
+ struct crypto_shash *tfm;
int ret = 0;
- ret = jent_entropy_init();
+ jent_testing_init();
+
+ tfm = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0);
+ if (IS_ERR(tfm)) {
+ jent_testing_exit();
+ return PTR_ERR(tfm);
+ }
+
+ desc->tfm = tfm;
+ crypto_shash_init(desc);
+ ret = jent_entropy_init(desc);
+ shash_desc_zero(desc);
+ crypto_free_shash(tfm);
if (ret) {
/* Handle permanent health test error */
if (fips_enabled)
panic("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret);
+ jent_testing_exit();
pr_info("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret);
return -EFAULT;
}
@@ -202,6 +351,7 @@ static int __init jent_mod_init(void)
static void __exit jent_mod_exit(void)
{
+ jent_testing_exit();
crypto_unregister_rng(&jent_alg);
}
diff --git a/crypto/jitterentropy-testing.c b/crypto/jitterentropy-testing.c
new file mode 100644
index 000000000000..5cb6a77b8e3b
--- /dev/null
+++ b/crypto/jitterentropy-testing.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Test interface for Jitter RNG.
+ *
+ * Copyright (C) 2023, Stephan Mueller <[email protected]>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include "jitterentropy.h"
+
+#define JENT_TEST_RINGBUFFER_SIZE (1<<10)
+#define JENT_TEST_RINGBUFFER_MASK (JENT_TEST_RINGBUFFER_SIZE - 1)
+
+struct jent_testing {
+ u32 jent_testing_rb[JENT_TEST_RINGBUFFER_SIZE];
+ u32 rb_reader;
+ atomic_t rb_writer;
+ atomic_t jent_testing_enabled;
+ spinlock_t lock;
+ wait_queue_head_t read_wait;
+};
+
+static struct dentry *jent_raw_debugfs_root = NULL;
+
+/*************************** Generic Data Handling ****************************/
+
+/*
+ * boot variable:
+ * 0 ==> No boot test, gathering of runtime data allowed
+ * 1 ==> Boot test enabled and ready for collecting data, gathering runtime
+ * data is disabled
+ * 2 ==> Boot test completed and disabled, gathering of runtime data is
+ * disabled
+ */
+
+static void jent_testing_reset(struct jent_testing *data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->lock, flags);
+ data->rb_reader = 0;
+ atomic_set(&data->rb_writer, 0);
+ spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static void jent_testing_data_init(struct jent_testing *data, u32 boot)
+{
+ /*
+ * The boot time testing implies we have a running test. If the
+ * caller wants to clear it, he has to unset the boot_test flag
+ * at runtime via sysfs to enable regular runtime testing
+ */
+ if (boot)
+ return;
+
+ jent_testing_reset(data);
+ atomic_set(&data->jent_testing_enabled, 1);
+ pr_warn("Enabling data collection\n");
+}
+
+static void jent_testing_fini(struct jent_testing *data, u32 boot)
+{
+ /* If we have boot data, we do not reset yet to allow data to be read */
+ if (boot)
+ return;
+
+ atomic_set(&data->jent_testing_enabled, 0);
+ jent_testing_reset(data);
+ pr_warn("Disabling data collection\n");
+}
+
+static bool jent_testing_store(struct jent_testing *data, u32 value,
+ u32 *boot)
+{
+ unsigned long flags;
+
+ if (!atomic_read(&data->jent_testing_enabled) && (*boot != 1))
+ return false;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ /*
+ * Disable entropy testing for boot time testing after ring buffer
+ * is filled.
+ */
+ if (*boot) {
+ if (((u32)atomic_read(&data->rb_writer)) >
+ JENT_TEST_RINGBUFFER_SIZE) {
+ *boot = 2;
+ pr_warn_once("One time data collection test disabled\n");
+ spin_unlock_irqrestore(&data->lock, flags);
+ return false;
+ }
+
+ if (atomic_read(&data->rb_writer) == 1)
+ pr_warn("One time data collection test enabled\n");
+ }
+
+ data->jent_testing_rb[((u32)atomic_read(&data->rb_writer)) &
+ JENT_TEST_RINGBUFFER_MASK] = value;
+ atomic_inc(&data->rb_writer);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ if (wq_has_sleeper(&data->read_wait))
+ wake_up_interruptible(&data->read_wait);
+
+ return true;
+}
+
+static bool jent_testing_have_data(struct jent_testing *data)
+{
+ return ((((u32)atomic_read(&data->rb_writer)) &
+ JENT_TEST_RINGBUFFER_MASK) !=
+ (data->rb_reader & JENT_TEST_RINGBUFFER_MASK));
+}
+
+static int jent_testing_reader(struct jent_testing *data, u32 *boot,
+ u8 *outbuf, u32 outbuflen)
+{
+ unsigned long flags;
+ int collected_data = 0;
+
+ jent_testing_data_init(data, *boot);
+
+ while (outbuflen) {
+ u32 writer = (u32)atomic_read(&data->rb_writer);
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ /* We have no data or reached the writer. */
+ if (!writer || (writer == data->rb_reader)) {
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /*
+ * Now we gathered all boot data, enable regular data
+ * collection.
+ */
+ if (*boot) {
+ *boot = 0;
+ goto out;
+ }
+
+ wait_event_interruptible(data->read_wait,
+ jent_testing_have_data(data));
+ if (signal_pending(current)) {
+ collected_data = -ERESTARTSYS;
+ goto out;
+ }
+
+ continue;
+ }
+
+ /* We copy out word-wise */
+ if (outbuflen < sizeof(u32)) {
+ spin_unlock_irqrestore(&data->lock, flags);
+ goto out;
+ }
+
+ memcpy(outbuf, &data->jent_testing_rb[data->rb_reader],
+ sizeof(u32));
+ data->rb_reader++;
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ outbuf += sizeof(u32);
+ outbuflen -= sizeof(u32);
+ collected_data += sizeof(u32);
+ }
+
+out:
+ jent_testing_fini(data, *boot);
+ return collected_data;
+}
+
+static int jent_testing_extract_user(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos,
+ int (*reader)(u8 *outbuf, u32 outbuflen))
+{
+ u8 *tmp, *tmp_aligned;
+ int ret = 0, large_request = (nbytes > 256);
+
+ if (!nbytes)
+ return 0;
+
+ /*
+ * The intention of this interface is for collecting at least
+ * 1000 samples due to the SP800-90B requirements. So, we make no
+ * effort in avoiding allocating more memory that actually needed
+ * by the user. Hence, we allocate sufficient memory to always hold
+ * that amount of data.
+ */
+ tmp = kmalloc(JENT_TEST_RINGBUFFER_SIZE + sizeof(u32), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp_aligned = PTR_ALIGN(tmp, sizeof(u32));
+
+ while (nbytes) {
+ int i;
+
+ if (large_request && need_resched()) {
+ if (signal_pending(current)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ break;
+ }
+ schedule();
+ }
+
+ i = min_t(int, nbytes, JENT_TEST_RINGBUFFER_SIZE);
+ i = reader(tmp_aligned, i);
+ if (i <= 0) {
+ if (i < 0)
+ ret = i;
+ break;
+ }
+ if (copy_to_user(buf, tmp_aligned, i)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ nbytes -= i;
+ buf += i;
+ ret += i;
+ }
+
+ kfree_sensitive(tmp);
+
+ if (ret > 0)
+ *ppos += ret;
+
+ return ret;
+}
+
+/************** Raw High-Resolution Timer Entropy Data Handling **************/
+
+static u32 boot_raw_hires_test = 0;
+module_param(boot_raw_hires_test, uint, 0644);
+MODULE_PARM_DESC(boot_raw_hires_test,
+ "Enable gathering boot time high resolution timer entropy of the first Jitter RNG entropy events");
+
+static struct jent_testing jent_raw_hires = {
+ .rb_reader = 0,
+ .rb_writer = ATOMIC_INIT(0),
+ .lock = __SPIN_LOCK_UNLOCKED(jent_raw_hires.lock),
+ .read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(jent_raw_hires.read_wait)
+};
+
+int jent_raw_hires_entropy_store(__u32 value)
+{
+ return jent_testing_store(&jent_raw_hires, value, &boot_raw_hires_test);
+}
+EXPORT_SYMBOL(jent_raw_hires_entropy_store);
+
+static int jent_raw_hires_entropy_reader(u8 *outbuf, u32 outbuflen)
+{
+ return jent_testing_reader(&jent_raw_hires, &boot_raw_hires_test,
+ outbuf, outbuflen);
+}
+
+static ssize_t jent_raw_hires_read(struct file *file, char __user *to,
+ size_t count, loff_t *ppos)
+{
+ return jent_testing_extract_user(file, to, count, ppos,
+ jent_raw_hires_entropy_reader);
+}
+
+static const struct file_operations jent_raw_hires_fops = {
+ .owner = THIS_MODULE,
+ .read = jent_raw_hires_read,
+};
+
+/******************************* Initialization *******************************/
+
+void jent_testing_init(void)
+{
+ jent_raw_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+ debugfs_create_file_unsafe("jent_raw_hires", 0400,
+ jent_raw_debugfs_root, NULL,
+ &jent_raw_hires_fops);
+}
+EXPORT_SYMBOL(jent_testing_init);
+
+void jent_testing_exit(void)
+{
+ debugfs_remove_recursive(jent_raw_debugfs_root);
+}
+EXPORT_SYMBOL(jent_testing_exit);
diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
index 22f48bf4c6f5..c7d7f2caa779 100644
--- a/crypto/jitterentropy.c
+++ b/crypto/jitterentropy.c
@@ -2,7 +2,7 @@
* Non-physical true random number generator based on timing jitter --
* Jitter RNG standalone code.
*
- * Copyright Stephan Mueller <[email protected]>, 2015 - 2020
+ * Copyright Stephan Mueller <[email protected]>, 2015 - 2023
*
* Design
* ======
@@ -47,7 +47,7 @@
/*
* This Jitterentropy RNG is based on the jitterentropy library
- * version 2.2.0 provided at https://www.chronox.de/jent.html
+ * version 3.4.0 provided at https://www.chronox.de/jent.html
*/
#ifdef __OPTIMIZE__
@@ -57,21 +57,22 @@
typedef unsigned long long __u64;
typedef long long __s64;
typedef unsigned int __u32;
+typedef unsigned char u8;
#define NULL ((void *) 0)
/* The entropy pool */
struct rand_data {
+ /* SHA3-256 is used as conditioner */
+#define DATA_SIZE_BITS 256
/* all data values that are vital to maintain the security
* of the RNG are marked as SENSITIVE. A user must not
* access that information while the RNG executes its loops to
* calculate the next random value. */
- __u64 data; /* SENSITIVE Actual random number */
- __u64 old_data; /* SENSITIVE Previous random number */
- __u64 prev_time; /* SENSITIVE Previous time stamp */
-#define DATA_SIZE_BITS ((sizeof(__u64)) * 8)
- __u64 last_delta; /* SENSITIVE stuck test */
- __s64 last_delta2; /* SENSITIVE stuck test */
- unsigned int osr; /* Oversample rate */
+ void *hash_state; /* SENSITIVE hash state entropy pool */
+ __u64 prev_time; /* SENSITIVE Previous time stamp */
+ __u64 last_delta; /* SENSITIVE stuck test */
+ __s64 last_delta2; /* SENSITIVE stuck test */
+ unsigned int osr; /* Oversample rate */
#define JENT_MEMORY_BLOCKS 64
#define JENT_MEMORY_BLOCKSIZE 32
#define JENT_MEMORY_ACCESSLOOPS 128
@@ -117,7 +118,6 @@ struct rand_data {
* zero). */
#define JENT_ESTUCK 8 /* Too many stuck results during init. */
#define JENT_EHEALTH 9 /* Health test failed during initialization */
-#define JENT_ERCT 10 /* RCT failed during initialization */
/*
* The output n bits can receive more than n bits of min entropy, of course,
@@ -302,15 +302,13 @@ static int jent_permanent_health_failure(struct rand_data *ec)
* an entropy collection.
*
* Input:
- * @ec entropy collector struct -- may be NULL
* @bits is the number of low bits of the timer to consider
* @min is the number of bits we shift the timer value to the right at
* the end to make sure we have a guaranteed minimum value
*
* @return Newly calculated loop counter
*/
-static __u64 jent_loop_shuffle(struct rand_data *ec,
- unsigned int bits, unsigned int min)
+static __u64 jent_loop_shuffle(unsigned int bits, unsigned int min)
{
__u64 time = 0;
__u64 shuffle = 0;
@@ -318,12 +316,7 @@ static __u64 jent_loop_shuffle(struct rand_data *ec,
unsigned int mask = (1<<bits) - 1;
jent_get_nstime(&time);
- /*
- * Mix the current state of the random number into the shuffle
- * calculation to balance that shuffle a bit more.
- */
- if (ec)
- time ^= ec->data;
+
/*
* We fold the time value as much as possible to ensure that as many
* bits of the time stamp are included as possible.
@@ -345,81 +338,32 @@ static __u64 jent_loop_shuffle(struct rand_data *ec,
* execution time jitter
*
* This function injects the individual bits of the time value into the
- * entropy pool using an LFSR.
+ * entropy pool using a hash.
*
- * The code is deliberately inefficient with respect to the bit shifting
- * and shall stay that way. This function is the root cause why the code
- * shall be compiled without optimization. This function not only acts as
- * folding operation, but this function's execution is used to measure
- * the CPU execution time jitter. Any change to the loop in this function
- * implies that careful retesting must be done.
- *
- * @ec [in] entropy collector struct
- * @time [in] time stamp to be injected
- * @loop_cnt [in] if a value not equal to 0 is set, use the given value as
- * number of loops to perform the folding
- * @stuck [in] Is the time stamp identified as stuck?
+ * ec [in] entropy collector
+ * time [in] time stamp to be injected
+ * stuck [in] Is the time stamp identified as stuck?
*
* Output:
- * updated ec->data
- *
- * @return Number of loops the folding operation is performed
+ * updated hash context in the entropy collector or error code
*/
-static void jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt,
- int stuck)
+static int jent_condition_data(struct rand_data *ec, __u64 time, int stuck)
{
- unsigned int i;
- __u64 j = 0;
- __u64 new = 0;
-#define MAX_FOLD_LOOP_BIT 4
-#define MIN_FOLD_LOOP_BIT 0
- __u64 fold_loop_cnt =
- jent_loop_shuffle(ec, MAX_FOLD_LOOP_BIT, MIN_FOLD_LOOP_BIT);
-
- /*
- * testing purposes -- allow test app to set the counter, not
- * needed during runtime
- */
- if (loop_cnt)
- fold_loop_cnt = loop_cnt;
- for (j = 0; j < fold_loop_cnt; j++) {
- new = ec->data;
- for (i = 1; (DATA_SIZE_BITS) >= i; i++) {
- __u64 tmp = time << (DATA_SIZE_BITS - i);
-
- tmp = tmp >> (DATA_SIZE_BITS - 1);
-
- /*
- * Fibonacci LSFR with polynomial of
- * x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is
- * primitive according to
- * http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf
- * (the shift values are the polynomial values minus one
- * due to counting bits from 0 to 63). As the current
- * position is always the LSB, the polynomial only needs
- * to shift data in from the left without wrap.
- */
- tmp ^= ((new >> 63) & 1);
- tmp ^= ((new >> 60) & 1);
- tmp ^= ((new >> 55) & 1);
- tmp ^= ((new >> 30) & 1);
- tmp ^= ((new >> 27) & 1);
- tmp ^= ((new >> 22) & 1);
- new <<= 1;
- new ^= tmp;
- }
- }
-
- /*
- * If the time stamp is stuck, do not finally insert the value into
- * the entropy pool. Although this operation should not do any harm
- * even when the time stamp has no entropy, SP800-90B requires that
- * any conditioning operation (SP800-90B considers the LFSR to be a
- * conditioning operation) to have an identical amount of input
- * data according to section 3.1.5.
- */
- if (!stuck)
- ec->data = new;
+#define SHA3_HASH_LOOP (1<<3)
+ struct {
+ int rct_count;
+ unsigned int apt_observations;
+ unsigned int apt_count;
+ unsigned int apt_base;
+ } addtl = {
+ ec->rct_count,
+ ec->apt_observations,
+ ec->apt_count,
+ ec->apt_base
+ };
+
+ return jent_hash_time(ec->hash_state, time, (u8 *)&addtl, sizeof(addtl),
+ SHA3_HASH_LOOP, stuck);
}
/*
@@ -453,7 +397,7 @@ static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
#define MAX_ACC_LOOP_BIT 7
#define MIN_ACC_LOOP_BIT 0
__u64 acc_loop_cnt =
- jent_loop_shuffle(ec, MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT);
+ jent_loop_shuffle(MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT);
if (NULL == ec || NULL == ec->mem)
return;
@@ -521,14 +465,15 @@ static int jent_measure_jitter(struct rand_data *ec)
stuck = jent_stuck(ec, current_delta);
/* Now call the next noise sources which also injects the data */
- jent_lfsr_time(ec, current_delta, 0, stuck);
+ if (jent_condition_data(ec, current_delta, stuck))
+ stuck = 1;
return stuck;
}
/*
* Generator of one 64 bit random number
- * Function fills rand_data->data
+ * Function fills rand_data->hash_state
*
* @ec [in] Reference to entropy collector
*/
@@ -575,7 +520,7 @@ static void jent_gen_entropy(struct rand_data *ec)
* @return 0 when request is fulfilled or an error
*
* The following error codes can occur:
- * -1 entropy_collector is NULL
+ * -1 entropy_collector is NULL or the generation failed
* -2 Intermittent health failure
* -3 Permanent health failure
*/
@@ -605,7 +550,7 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
* Perform startup health tests and return permanent
* error if it fails.
*/
- if (jent_entropy_init())
+ if (jent_entropy_init(ec->hash_state))
return -3;
return -2;
@@ -615,7 +560,8 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
tocopy = (DATA_SIZE_BITS / 8);
else
tocopy = len;
- jent_memcpy(p, &ec->data, tocopy);
+ if (jent_read_random_block(ec->hash_state, p, tocopy))
+ return -1;
len -= tocopy;
p += tocopy;
@@ -629,7 +575,8 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
***************************************************************************/
struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
- unsigned int flags)
+ unsigned int flags,
+ void *hash_state)
{
struct rand_data *entropy_collector;
@@ -656,6 +603,8 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
osr = 1; /* minimum sampling rate is 1 */
entropy_collector->osr = osr;
+ entropy_collector->hash_state = hash_state;
+
/* fill the data pad with non-zero values */
jent_gen_entropy(entropy_collector);
@@ -669,7 +618,7 @@ void jent_entropy_collector_free(struct rand_data *entropy_collector)
jent_zfree(entropy_collector);
}
-int jent_entropy_init(void)
+int jent_entropy_init(void *hash_state)
{
int i;
__u64 delta_sum = 0;
@@ -682,6 +631,7 @@ int jent_entropy_init(void)
/* Required for RCT */
ec.osr = 1;
+ ec.hash_state = hash_state;
/* We could perform statistical tests here, but the problem is
* that we only have a few loop counts to do testing. These
@@ -719,7 +669,7 @@ int jent_entropy_init(void)
/* Invoke core entropy collection logic */
jent_get_nstime(&time);
ec.prev_time = time;
- jent_lfsr_time(&ec, time, 0, 0);
+ jent_condition_data(&ec, time, 0);
jent_get_nstime(&time2);
/* test whether timer works */
@@ -762,14 +712,12 @@ int jent_entropy_init(void)
if ((nonstuck % JENT_APT_WINDOW_SIZE) == 0) {
jent_apt_reset(&ec,
delta & JENT_APT_WORD_MASK);
- if (jent_health_failure(&ec))
- return JENT_EHEALTH;
}
}
- /* Validate RCT */
- if (jent_rct_failure(&ec))
- return JENT_ERCT;
+ /* Validate health test result */
+ if (jent_health_failure(&ec))
+ return JENT_EHEALTH;
/* test whether we have an increasing timer */
if (!(time2 > time))
diff --git a/crypto/jitterentropy.h b/crypto/jitterentropy.h
index 5cc583f6bc6b..4c92176ea2b1 100644
--- a/crypto/jitterentropy.h
+++ b/crypto/jitterentropy.h
@@ -2,14 +2,28 @@
extern void *jent_zalloc(unsigned int len);
extern void jent_zfree(void *ptr);
-extern void jent_memcpy(void *dest, const void *src, unsigned int n);
extern void jent_get_nstime(__u64 *out);
+extern int jent_hash_time(void *hash_state, __u64 time, u8 *addtl,
+ unsigned int addtl_len, __u64 hash_loop_cnt,
+ unsigned int stuck);
+int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len);
struct rand_data;
-extern int jent_entropy_init(void);
+extern int jent_entropy_init(void *hash_state);
extern int jent_read_entropy(struct rand_data *ec, unsigned char *data,
unsigned int len);
extern struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
- unsigned int flags);
+ unsigned int flags,
+ void *hash_state);
extern void jent_entropy_collector_free(struct rand_data *entropy_collector);
+
+#ifdef CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE
+int jent_raw_hires_entropy_store(__u32 value);
+void jent_testing_init(void);
+void jent_testing_exit(void);
+#else /* CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE */
+static inline int jent_raw_hires_entropy_store(__u32 value) { return 0; }
+static inline void jent_testing_init(void) { }
+static inline void jent_testing_exit(void) { }
+#endif /* CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE */
diff --git a/crypto/rsa.c b/crypto/rsa.c
index c50f2d2a4d06..c79613cdce6e 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -205,6 +205,32 @@ static int rsa_check_key_length(unsigned int len)
return -EINVAL;
}
+static int rsa_check_exponent_fips(MPI e)
+{
+ MPI e_max = NULL;
+
+ /* check if odd */
+ if (!mpi_test_bit(e, 0)) {
+ return -EINVAL;
+ }
+
+ /* check if 2^16 < e < 2^256. */
+ if (mpi_cmp_ui(e, 65536) <= 0) {
+ return -EINVAL;
+ }
+
+ e_max = mpi_alloc(0);
+ mpi_set_bit(e_max, 256);
+
+ if (mpi_cmp(e, e_max) >= 0) {
+ mpi_free(e_max);
+ return -EINVAL;
+ }
+
+ mpi_free(e_max);
+ return 0;
+}
+
static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
@@ -232,6 +258,11 @@ static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
return -EINVAL;
}
+ if (fips_enabled && rsa_check_exponent_fips(mpi_key->e)) {
+ rsa_free_mpi_key(mpi_key);
+ return -EINVAL;
+ }
+
return 0;
err:
@@ -290,6 +321,11 @@ static int rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
return -EINVAL;
}
+ if (fips_enabled && rsa_check_exponent_fips(mpi_key->e)) {
+ rsa_free_mpi_key(mpi_key);
+ return -EINVAL;
+ }
+
return 0;
err:
diff --git a/crypto/shash.c b/crypto/shash.c
index 717b42df3495..1fadb6b59bdc 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -597,7 +597,7 @@ struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash)
return hash;
}
- if (!alg->clone_tfm)
+ if (!alg->clone_tfm && (alg->init_tfm || alg->base.cra_init))
return ERR_PTR(-ENOSYS);
nhash = crypto_clone_tfm(&crypto_shash_type, tfm);
@@ -606,10 +606,12 @@ struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash)
nhash->descsize = hash->descsize;
- err = alg->clone_tfm(nhash, hash);
- if (err) {
- crypto_free_shash(nhash);
- return ERR_PTR(err);
+ if (alg->clone_tfm) {
+ err = alg->clone_tfm(nhash, hash);
+ if (err) {
+ crypto_free_shash(nhash);
+ return ERR_PTR(err);
+ }
}
return nhash;
diff --git a/crypto/sig.c b/crypto/sig.c
new file mode 100644
index 000000000000..b48c18ec65cd
--- /dev/null
+++ b/crypto/sig.c
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Public Key Signature Algorithm
+ *
+ * Copyright (c) 2023 Herbert Xu <[email protected]>
+ */
+
+#include <crypto/akcipher.h>
+#include <crypto/internal/sig.h>
+#include <linux/cryptouser.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <net/netlink.h>
+
+#include "internal.h"
+
+#define CRYPTO_ALG_TYPE_SIG_MASK 0x0000000e
+
+static const struct crypto_type crypto_sig_type;
+
+static inline struct crypto_sig *__crypto_sig_tfm(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_sig, base);
+}
+
+static int crypto_sig_init_tfm(struct crypto_tfm *tfm)
+{
+ if (tfm->__crt_alg->cra_type != &crypto_sig_type)
+ return crypto_init_akcipher_ops_sig(tfm);
+
+ return 0;
+}
+
+static void __maybe_unused crypto_sig_show(struct seq_file *m,
+ struct crypto_alg *alg)
+{
+ seq_puts(m, "type : sig\n");
+}
+
+static int __maybe_unused crypto_sig_report(struct sk_buff *skb,
+ struct crypto_alg *alg)
+{
+ struct crypto_report_akcipher rsig = {};
+
+ strscpy(rsig.type, "sig", sizeof(rsig.type));
+
+ return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, sizeof(rsig), &rsig);
+}
+
+static int __maybe_unused crypto_sig_report_stat(struct sk_buff *skb,
+ struct crypto_alg *alg)
+{
+ struct crypto_stat_akcipher rsig = {};
+
+ strscpy(rsig.type, "sig", sizeof(rsig.type));
+
+ return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, sizeof(rsig), &rsig);
+}
+
+static const struct crypto_type crypto_sig_type = {
+ .extsize = crypto_alg_extsize,
+ .init_tfm = crypto_sig_init_tfm,
+#ifdef CONFIG_PROC_FS
+ .show = crypto_sig_show,
+#endif
+#if IS_ENABLED(CONFIG_CRYPTO_USER)
+ .report = crypto_sig_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_sig_report_stat,
+#endif
+ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_SIG_MASK,
+ .type = CRYPTO_ALG_TYPE_SIG,
+ .tfmsize = offsetof(struct crypto_sig, base),
+};
+
+struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask)
+{
+ return crypto_alloc_tfm(alg_name, &crypto_sig_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_sig);
+
+int crypto_sig_maxsize(struct crypto_sig *tfm)
+{
+ struct crypto_akcipher **ctx = crypto_sig_ctx(tfm);
+
+ return crypto_akcipher_maxsize(*ctx);
+}
+EXPORT_SYMBOL_GPL(crypto_sig_maxsize);
+
+int crypto_sig_sign(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen)
+{
+ struct crypto_akcipher **ctx = crypto_sig_ctx(tfm);
+ struct crypto_akcipher_sync_data data = {
+ .tfm = *ctx,
+ .src = src,
+ .dst = dst,
+ .slen = slen,
+ .dlen = dlen,
+ };
+
+ return crypto_akcipher_sync_prep(&data) ?:
+ crypto_akcipher_sync_post(&data,
+ crypto_akcipher_sign(data.req));
+}
+EXPORT_SYMBOL_GPL(crypto_sig_sign);
+
+int crypto_sig_verify(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *digest, unsigned int dlen)
+{
+ struct crypto_akcipher **ctx = crypto_sig_ctx(tfm);
+ struct crypto_akcipher_sync_data data = {
+ .tfm = *ctx,
+ .src = src,
+ .slen = slen,
+ .dlen = dlen,
+ };
+ int err;
+
+ err = crypto_akcipher_sync_prep(&data);
+ if (err)
+ return err;
+
+ memcpy(data.buf + slen, digest, dlen);
+
+ return crypto_akcipher_sync_post(&data,
+ crypto_akcipher_verify(data.req));
+}
+EXPORT_SYMBOL_GPL(crypto_sig_verify);
+
+int crypto_sig_set_pubkey(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen)
+{
+ struct crypto_akcipher **ctx = crypto_sig_ctx(tfm);
+
+ return crypto_akcipher_set_pub_key(*ctx, key, keylen);
+}
+EXPORT_SYMBOL_GPL(crypto_sig_set_pubkey);
+
+int crypto_sig_set_privkey(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen)
+{
+ struct crypto_akcipher **ctx = crypto_sig_ctx(tfm);
+
+ return crypto_akcipher_set_priv_key(*ctx, key, keylen);
+}
+EXPORT_SYMBOL_GPL(crypto_sig_set_privkey);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Public Key Signature Algorithms");
diff --git a/crypto/sm2.c b/crypto/sm2.c
index ed9307dac3d1..285b3cb7c0bc 100644
--- a/crypto/sm2.c
+++ b/crypto/sm2.c
@@ -13,11 +13,14 @@
#include <crypto/internal/akcipher.h>
#include <crypto/akcipher.h>
#include <crypto/hash.h>
-#include <crypto/sm3.h>
#include <crypto/rng.h>
#include <crypto/sm2.h>
#include "sm2signature.asn1.h"
+/* The default user id as specified in GM/T 0009-2012 */
+#define SM2_DEFAULT_USERID "1234567812345678"
+#define SM2_DEFAULT_USERID_LEN 16
+
#define MPI_NBYTES(m) ((mpi_get_nbits(m) + 7) / 8)
struct ecc_domain_parms {
@@ -60,6 +63,9 @@ static const struct ecc_domain_parms sm2_ecp = {
.h = 1
};
+static int __sm2_set_pub_key(struct mpi_ec_ctx *ec,
+ const void *key, unsigned int keylen);
+
static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec)
{
const struct ecc_domain_parms *ecp = &sm2_ecp;
@@ -213,12 +219,13 @@ int sm2_get_signature_s(void *context, size_t hdrlen, unsigned char tag,
return 0;
}
-static int sm2_z_digest_update(struct sm3_state *sctx,
- MPI m, unsigned int pbytes)
+static int sm2_z_digest_update(struct shash_desc *desc,
+ MPI m, unsigned int pbytes)
{
static const unsigned char zero[32];
unsigned char *in;
unsigned int inlen;
+ int err;
in = mpi_get_buffer(m, &inlen, NULL);
if (!in)
@@ -226,21 +233,22 @@ static int sm2_z_digest_update(struct sm3_state *sctx,
if (inlen < pbytes) {
/* padding with zero */
- sm3_update(sctx, zero, pbytes - inlen);
- sm3_update(sctx, in, inlen);
+ err = crypto_shash_update(desc, zero, pbytes - inlen) ?:
+ crypto_shash_update(desc, in, inlen);
} else if (inlen > pbytes) {
/* skip the starting zero */
- sm3_update(sctx, in + inlen - pbytes, pbytes);
+ err = crypto_shash_update(desc, in + inlen - pbytes, pbytes);
} else {
- sm3_update(sctx, in, inlen);
+ err = crypto_shash_update(desc, in, inlen);
}
kfree(in);
- return 0;
+ return err;
}
-static int sm2_z_digest_update_point(struct sm3_state *sctx,
- MPI_POINT point, struct mpi_ec_ctx *ec, unsigned int pbytes)
+static int sm2_z_digest_update_point(struct shash_desc *desc,
+ MPI_POINT point, struct mpi_ec_ctx *ec,
+ unsigned int pbytes)
{
MPI x, y;
int ret = -EINVAL;
@@ -248,50 +256,68 @@ static int sm2_z_digest_update_point(struct sm3_state *sctx,
x = mpi_new(0);
y = mpi_new(0);
- if (!mpi_ec_get_affine(x, y, point, ec) &&
- !sm2_z_digest_update(sctx, x, pbytes) &&
- !sm2_z_digest_update(sctx, y, pbytes))
- ret = 0;
+ ret = mpi_ec_get_affine(x, y, point, ec) ? -EINVAL :
+ sm2_z_digest_update(desc, x, pbytes) ?:
+ sm2_z_digest_update(desc, y, pbytes);
mpi_free(x);
mpi_free(y);
return ret;
}
-int sm2_compute_z_digest(struct crypto_akcipher *tfm,
- const unsigned char *id, size_t id_len,
- unsigned char dgst[SM3_DIGEST_SIZE])
+int sm2_compute_z_digest(struct shash_desc *desc,
+ const void *key, unsigned int keylen, void *dgst)
{
- struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
- uint16_t bits_len;
- unsigned char entl[2];
- struct sm3_state sctx;
+ struct mpi_ec_ctx *ec;
+ unsigned int bits_len;
unsigned int pbytes;
+ u8 entl[2];
+ int err;
- if (id_len > (USHRT_MAX / 8) || !ec->Q)
- return -EINVAL;
+ ec = kmalloc(sizeof(*ec), GFP_KERNEL);
+ if (!ec)
+ return -ENOMEM;
+
+ err = __sm2_set_pub_key(ec, key, keylen);
+ if (err)
+ goto out_free_ec;
- bits_len = (uint16_t)(id_len * 8);
+ bits_len = SM2_DEFAULT_USERID_LEN * 8;
entl[0] = bits_len >> 8;
entl[1] = bits_len & 0xff;
pbytes = MPI_NBYTES(ec->p);
/* ZA = H256(ENTLA | IDA | a | b | xG | yG | xA | yA) */
- sm3_init(&sctx);
- sm3_update(&sctx, entl, 2);
- sm3_update(&sctx, id, id_len);
-
- if (sm2_z_digest_update(&sctx, ec->a, pbytes) ||
- sm2_z_digest_update(&sctx, ec->b, pbytes) ||
- sm2_z_digest_update_point(&sctx, ec->G, ec, pbytes) ||
- sm2_z_digest_update_point(&sctx, ec->Q, ec, pbytes))
- return -EINVAL;
+ err = crypto_shash_init(desc);
+ if (err)
+ goto out_deinit_ec;
- sm3_final(&sctx, dgst);
- return 0;
+ err = crypto_shash_update(desc, entl, 2);
+ if (err)
+ goto out_deinit_ec;
+
+ err = crypto_shash_update(desc, SM2_DEFAULT_USERID,
+ SM2_DEFAULT_USERID_LEN);
+ if (err)
+ goto out_deinit_ec;
+
+ err = sm2_z_digest_update(desc, ec->a, pbytes) ?:
+ sm2_z_digest_update(desc, ec->b, pbytes) ?:
+ sm2_z_digest_update_point(desc, ec->G, ec, pbytes) ?:
+ sm2_z_digest_update_point(desc, ec->Q, ec, pbytes);
+ if (err)
+ goto out_deinit_ec;
+
+ err = crypto_shash_final(desc, dgst);
+
+out_deinit_ec:
+ sm2_ec_ctx_deinit(ec);
+out_free_ec:
+ kfree(ec);
+ return err;
}
-EXPORT_SYMBOL(sm2_compute_z_digest);
+EXPORT_SYMBOL_GPL(sm2_compute_z_digest);
static int _sm2_verify(struct mpi_ec_ctx *ec, MPI hash, MPI sig_r, MPI sig_s)
{
@@ -391,6 +417,14 @@ static int sm2_set_pub_key(struct crypto_akcipher *tfm,
const void *key, unsigned int keylen)
{
struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
+
+ return __sm2_set_pub_key(ec, key, keylen);
+
+}
+
+static int __sm2_set_pub_key(struct mpi_ec_ctx *ec,
+ const void *key, unsigned int keylen)
+{
MPI a;
int rc;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 4fdf07ae3c54..e0b3786ca51b 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -335,9 +335,20 @@ config HW_RANDOM_HISI
If unsure, say Y.
+config HW_RANDOM_HISTB
+ tristate "Hisilicon STB Random Number Generator support"
+ depends on ARCH_HISI || COMPILE_TEST
+ default ARCH_HISI
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Hisilicon Hi37xx SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called histb-rng.
+
config HW_RANDOM_ST
tristate "ST Microelectronics HW Random Number Generator support"
- depends on HW_RANDOM && ARCH_STI
+ depends on HW_RANDOM && (ARCH_STI || COMPILE_TEST)
help
This driver provides kernel-side support for the Random Number
Generator hardware found on STi series of SoCs.
@@ -400,9 +411,9 @@ config HW_RANDOM_POLARFIRE_SOC
config HW_RANDOM_MESON
tristate "Amlogic Meson Random Number Generator support"
- depends on HW_RANDOM
depends on ARCH_MESON || COMPILE_TEST
- default y
+ depends on HAS_IOMEM && OF
+ default HW_RANDOM if ARCH_MESON
help
This driver provides kernel-side support for the Random Number
Generator hardware found on Amlogic Meson SoCs.
@@ -427,9 +438,9 @@ config HW_RANDOM_CAVIUM
config HW_RANDOM_MTK
tristate "Mediatek Random Number Generator support"
- depends on HW_RANDOM
depends on ARCH_MEDIATEK || COMPILE_TEST
- default y
+ depends on HAS_IOMEM && OF
+ default HW_RANDOM if ARCH_MEDIATEK
help
This driver provides kernel-side support for the Random Number
Generator hardware found on Mediatek SoCs.
@@ -456,7 +467,8 @@ config HW_RANDOM_S390
config HW_RANDOM_EXYNOS
tristate "Samsung Exynos True Random Number Generator support"
depends on ARCH_EXYNOS || COMPILE_TEST
- default HW_RANDOM
+ depends on HAS_IOMEM
+ default HW_RANDOM if ARCH_EXYNOS
help
This driver provides support for the True Random Number
Generator available in Exynos SoCs.
@@ -483,7 +495,8 @@ config HW_RANDOM_OPTEE
config HW_RANDOM_NPCM
tristate "NPCM Random Number Generator support"
depends on ARCH_NPCM || COMPILE_TEST
- default HW_RANDOM
+ depends on HAS_IOMEM
+ default HW_RANDOM if ARCH_NPCM
help
This driver provides support for the Random Number
Generator hardware available in Nuvoton NPCM SoCs.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 09bde4a0f971..32549a1186dc 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
+obj-$(CONFIG_HW_RANDOM_HISTB) += histb-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c
index c1193f85982c..0cd7e1a8e499 100644
--- a/drivers/char/hw_random/cn10k-rng.c
+++ b/drivers/char/hw_random/cn10k-rng.c
@@ -23,14 +23,49 @@
#define RNM_PF_RANDOM 0x400
#define RNM_TRNG_RESULT 0x408
+/* Extended TRNG Read and Status Registers */
+#define RNM_PF_TRNG_DAT 0x1000
+#define RNM_PF_TRNG_RES 0x1008
+
struct cn10k_rng {
void __iomem *reg_base;
struct hwrng ops;
struct pci_dev *pdev;
+ /* Octeon CN10K-A A0/A1, CNF10K-A A0/A1 and CNF10K-B A0/B0
+ * does not support extended TRNG registers
+ */
+ bool extended_trng_regs;
};
#define PLAT_OCTEONTX_RESET_RNG_EBG_HEALTH_STATE 0xc2000b0f
+#define PCI_SUBSYS_DEVID_CN10K_A_RNG 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A_RNG 0xBA00
+#define PCI_SUBSYS_DEVID_CNF10K_B_RNG 0xBC00
+
+static bool cn10k_is_extended_trng_regs_supported(struct pci_dev *pdev)
+{
+ /* CN10K-A A0/A1 */
+ if ((pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RNG) &&
+ (!pdev->revision || (pdev->revision & 0xff) == 0x50 ||
+ (pdev->revision & 0xff) == 0x51))
+ return false;
+
+ /* CNF10K-A A0 */
+ if ((pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_RNG) &&
+ (!pdev->revision || (pdev->revision & 0xff) == 0x60 ||
+ (pdev->revision & 0xff) == 0x61))
+ return false;
+
+ /* CNF10K-B A0/B0 */
+ if ((pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_B_RNG) &&
+ (!pdev->revision || (pdev->revision & 0xff) == 0x70 ||
+ (pdev->revision & 0xff) == 0x74))
+ return false;
+
+ return true;
+}
+
static unsigned long reset_rng_health_state(struct cn10k_rng *rng)
{
struct arm_smccc_res res;
@@ -63,9 +98,23 @@ static int check_rng_health(struct cn10k_rng *rng)
return 0;
}
-static void cn10k_read_trng(struct cn10k_rng *rng, u64 *value)
+/* Returns true when valid data available otherwise return false */
+static bool cn10k_read_trng(struct cn10k_rng *rng, u64 *value)
{
+ u16 retry_count = 0;
u64 upper, lower;
+ u64 status;
+
+ if (rng->extended_trng_regs) {
+ do {
+ *value = readq(rng->reg_base + RNM_PF_TRNG_DAT);
+ if (*value)
+ return true;
+ status = readq(rng->reg_base + RNM_PF_TRNG_RES);
+ if (!status && (retry_count++ > 0x1000))
+ return false;
+ } while (!status);
+ }
*value = readq(rng->reg_base + RNM_PF_RANDOM);
@@ -82,6 +131,7 @@ static void cn10k_read_trng(struct cn10k_rng *rng, u64 *value)
*value = (upper & 0xFFFFFFFF00000000) | (lower & 0xFFFFFFFF);
}
+ return true;
}
static int cn10k_rng_read(struct hwrng *hwrng, void *data,
@@ -100,7 +150,8 @@ static int cn10k_rng_read(struct hwrng *hwrng, void *data,
size = max;
while (size >= 8) {
- cn10k_read_trng(rng, &value);
+ if (!cn10k_read_trng(rng, &value))
+ goto out;
*((u64 *)pos) = value;
size -= 8;
@@ -108,7 +159,8 @@ static int cn10k_rng_read(struct hwrng *hwrng, void *data,
}
if (size > 0) {
- cn10k_read_trng(rng, &value);
+ if (!cn10k_read_trng(rng, &value))
+ goto out;
while (size > 0) {
*pos = (u8)value;
@@ -118,6 +170,7 @@ static int cn10k_rng_read(struct hwrng *hwrng, void *data,
}
}
+out:
return max - size;
}
@@ -144,9 +197,11 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!rng->ops.name)
return -ENOMEM;
- rng->ops.read = cn10k_rng_read;
+ rng->ops.read = cn10k_rng_read;
rng->ops.priv = (unsigned long)rng;
+ rng->extended_trng_regs = cn10k_is_extended_trng_regs_supported(pdev);
+
reset_rng_health_state(rng);
err = devm_hwrng_register(&pdev->dev, &rng->ops);
diff --git a/drivers/crypto/hisilicon/trng/trng-stb.c b/drivers/char/hw_random/histb-rng.c
index 29200a7d3d81..f652e1135e4b 100644
--- a/drivers/crypto/hisilicon/trng/trng-stb.c
+++ b/drivers/char/hw_random/histb-rng.c
@@ -1,31 +1,27 @@
// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
/*
- * Device driver for True RNG in HiSTB SoCs
- *
* Copyright (c) 2023 David Yang
*/
-#include <crypto/internal/rng.h>
-#include <linux/device.h>
#include <linux/err.h>
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
-#define HISTB_TRNG_CTRL 0x0
+#define RNG_CTRL 0x0
#define RNG_SOURCE GENMASK(1, 0)
#define DROP_ENABLE BIT(5)
#define POST_PROCESS_ENABLE BIT(7)
#define POST_PROCESS_DEPTH GENMASK(15, 8)
-#define HISTB_TRNG_NUMBER 0x4
-#define HISTB_TRNG_STAT 0x8
+#define RNG_NUMBER 0x4
+#define RNG_STAT 0x8
#define DATA_COUNT GENMASK(2, 0) /* max 4 */
-struct histb_trng_priv {
+struct histb_rng_priv {
struct hwrng rng;
void __iomem *base;
};
@@ -35,19 +31,19 @@ struct histb_trng_priv {
* depth = 1 -> ~1ms
* depth = 255 -> ~16ms
*/
-static int histb_trng_wait(void __iomem *base)
+static int histb_rng_wait(void __iomem *base)
{
u32 val;
- return readl_relaxed_poll_timeout(base + HISTB_TRNG_STAT, val,
+ return readl_relaxed_poll_timeout(base + RNG_STAT, val,
val & DATA_COUNT, 1000, 30 * 1000);
}
-static void histb_trng_init(void __iomem *base, unsigned int depth)
+static void histb_rng_init(void __iomem *base, unsigned int depth)
{
u32 val;
- val = readl_relaxed(base + HISTB_TRNG_CTRL);
+ val = readl_relaxed(base + RNG_CTRL);
val &= ~RNG_SOURCE;
val |= 2;
@@ -58,72 +54,72 @@ static void histb_trng_init(void __iomem *base, unsigned int depth)
val |= POST_PROCESS_ENABLE;
val |= DROP_ENABLE;
- writel_relaxed(val, base + HISTB_TRNG_CTRL);
+ writel_relaxed(val, base + RNG_CTRL);
}
-static int histb_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+static int histb_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
- struct histb_trng_priv *priv = container_of(rng, typeof(*priv), rng);
+ struct histb_rng_priv *priv = container_of(rng, typeof(*priv), rng);
void __iomem *base = priv->base;
for (int i = 0; i < max; i += sizeof(u32)) {
- if (!(readl_relaxed(base + HISTB_TRNG_STAT) & DATA_COUNT)) {
+ if (!(readl_relaxed(base + RNG_STAT) & DATA_COUNT)) {
if (!wait)
return i;
- if (histb_trng_wait(base)) {
+ if (histb_rng_wait(base)) {
pr_err("failed to generate random number, generated %d\n",
i);
return i ? i : -ETIMEDOUT;
}
}
- *(u32 *) (data + i) = readl_relaxed(base + HISTB_TRNG_NUMBER);
+ *(u32 *) (data + i) = readl_relaxed(base + RNG_NUMBER);
}
return max;
}
-static unsigned int histb_trng_get_depth(void __iomem *base)
+static unsigned int histb_rng_get_depth(void __iomem *base)
{
- return (readl_relaxed(base + HISTB_TRNG_CTRL) & POST_PROCESS_DEPTH) >> 8;
+ return (readl_relaxed(base + RNG_CTRL) & POST_PROCESS_DEPTH) >> 8;
}
static ssize_t
depth_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct histb_trng_priv *priv = dev_get_drvdata(dev);
+ struct histb_rng_priv *priv = dev_get_drvdata(dev);
void __iomem *base = priv->base;
- return sprintf(buf, "%d\n", histb_trng_get_depth(base));
+ return sprintf(buf, "%d\n", histb_rng_get_depth(base));
}
static ssize_t
depth_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct histb_trng_priv *priv = dev_get_drvdata(dev);
+ struct histb_rng_priv *priv = dev_get_drvdata(dev);
void __iomem *base = priv->base;
unsigned int depth;
if (kstrtouint(buf, 0, &depth))
return -ERANGE;
- histb_trng_init(base, depth);
+ histb_rng_init(base, depth);
return count;
}
static DEVICE_ATTR_RW(depth);
-static struct attribute *histb_trng_attrs[] = {
+static struct attribute *histb_rng_attrs[] = {
&dev_attr_depth.attr,
NULL,
};
-ATTRIBUTE_GROUPS(histb_trng);
+ATTRIBUTE_GROUPS(histb_rng);
-static int histb_trng_probe(struct platform_device *pdev)
+static int histb_rng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct histb_trng_priv *priv;
+ struct histb_rng_priv *priv;
void __iomem *base;
int ret;
@@ -133,17 +129,17 @@ static int histb_trng_probe(struct platform_device *pdev)
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
- return -ENOMEM;
+ return PTR_ERR(base);
- histb_trng_init(base, 144);
- if (histb_trng_wait(base)) {
+ histb_rng_init(base, 144);
+ if (histb_rng_wait(base)) {
dev_err(dev, "cannot bring up device\n");
return -ENODEV;
}
priv->base = base;
priv->rng.name = pdev->name;
- priv->rng.read = histb_trng_read;
+ priv->rng.read = histb_rng_read;
ret = devm_hwrng_register(dev, &priv->rng);
if (ret) {
dev_err(dev, "failed to register hwrng: %d\n", ret);
@@ -155,22 +151,23 @@ static int histb_trng_probe(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id histb_trng_of_match[] = {
- { .compatible = "hisilicon,histb-trng", },
+static const struct of_device_id histb_rng_of_match[] = {
+ { .compatible = "hisilicon,histb-rng", },
{ }
};
+MODULE_DEVICE_TABLE(of, histb_rng_of_match);
-static struct platform_driver histb_trng_driver = {
- .probe = histb_trng_probe,
+static struct platform_driver histb_rng_driver = {
+ .probe = histb_rng_probe,
.driver = {
- .name = "histb-trng",
- .of_match_table = histb_trng_of_match,
- .dev_groups = histb_trng_groups,
+ .name = "histb-rng",
+ .of_match_table = histb_rng_of_match,
+ .dev_groups = histb_rng_groups,
},
};
-module_platform_driver(histb_trng_driver);
+module_platform_driver(histb_rng_driver);
-MODULE_DESCRIPTION("HiSTB True RNG");
+MODULE_DESCRIPTION("Hisilicon STB random number generator driver");
MODULE_LICENSE("Dual MIT/GPL");
MODULE_AUTHOR("David Yang <[email protected]>");
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index a1c24148ed31..bf07f17f78c8 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -17,6 +17,7 @@
#include <linux/hw_random.h>
#include <linux/completion.h>
#include <linux/io.h>
+#include <linux/bitfield.h>
#define RNGC_VER_ID 0x0000
#define RNGC_COMMAND 0x0004
@@ -26,7 +27,7 @@
#define RNGC_FIFO 0x0014
/* the fields in the ver id register */
-#define RNGC_TYPE_SHIFT 28
+#define RNG_TYPE GENMASK(31, 28)
#define RNGC_VER_MAJ_SHIFT 8
/* the rng_type field */
@@ -34,20 +35,19 @@
#define RNGC_TYPE_RNGC 0x2
-#define RNGC_CMD_CLR_ERR 0x00000020
-#define RNGC_CMD_CLR_INT 0x00000010
-#define RNGC_CMD_SEED 0x00000002
-#define RNGC_CMD_SELF_TEST 0x00000001
+#define RNGC_CMD_CLR_ERR BIT(5)
+#define RNGC_CMD_CLR_INT BIT(4)
+#define RNGC_CMD_SEED BIT(1)
+#define RNGC_CMD_SELF_TEST BIT(0)
-#define RNGC_CTRL_MASK_ERROR 0x00000040
-#define RNGC_CTRL_MASK_DONE 0x00000020
-#define RNGC_CTRL_AUTO_SEED 0x00000010
+#define RNGC_CTRL_MASK_ERROR BIT(6)
+#define RNGC_CTRL_MASK_DONE BIT(5)
+#define RNGC_CTRL_AUTO_SEED BIT(4)
-#define RNGC_STATUS_ERROR 0x00010000
-#define RNGC_STATUS_FIFO_LEVEL_MASK 0x00000f00
-#define RNGC_STATUS_FIFO_LEVEL_SHIFT 8
-#define RNGC_STATUS_SEED_DONE 0x00000020
-#define RNGC_STATUS_ST_DONE 0x00000010
+#define RNGC_STATUS_ERROR BIT(16)
+#define RNGC_STATUS_FIFO_LEVEL_MASK GENMASK(11, 8)
+#define RNGC_STATUS_SEED_DONE BIT(5)
+#define RNGC_STATUS_ST_DONE BIT(4)
#define RNGC_ERROR_STATUS_STAT_ERR 0x00000008
@@ -110,7 +110,7 @@ static int imx_rngc_self_test(struct imx_rngc *rngc)
cmd = readl(rngc->base + RNGC_COMMAND);
writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND);
- ret = wait_for_completion_timeout(&rngc->rng_op_done, RNGC_TIMEOUT);
+ ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT));
imx_rngc_irq_mask_clear(rngc);
if (!ret)
return -ETIMEDOUT;
@@ -122,7 +122,6 @@ static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
unsigned int status;
- unsigned int level;
int retval = 0;
while (max >= sizeof(u32)) {
@@ -132,11 +131,7 @@ static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
if (status & RNGC_STATUS_ERROR)
break;
- /* how many random numbers are in FIFO? [0-16] */
- level = (status & RNGC_STATUS_FIFO_LEVEL_MASK) >>
- RNGC_STATUS_FIFO_LEVEL_SHIFT;
-
- if (level) {
+ if (status & RNGC_STATUS_FIFO_LEVEL_MASK) {
/* retrieve a random number from FIFO */
*(u32 *)data = readl(rngc->base + RNGC_FIFO);
@@ -187,9 +182,7 @@ static int imx_rngc_init(struct hwrng *rng)
cmd = readl(rngc->base + RNGC_COMMAND);
writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND);
- ret = wait_for_completion_timeout(&rngc->rng_op_done,
- RNGC_TIMEOUT);
-
+ ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT));
if (!ret) {
ret = -ETIMEDOUT;
goto err;
@@ -229,7 +222,7 @@ static void imx_rngc_cleanup(struct hwrng *rng)
imx_rngc_irq_mask_clear(rngc);
}
-static int imx_rngc_probe(struct platform_device *pdev)
+static int __init imx_rngc_probe(struct platform_device *pdev)
{
struct imx_rngc *rngc;
int ret;
@@ -256,7 +249,7 @@ static int imx_rngc_probe(struct platform_device *pdev)
return irq;
ver_id = readl(rngc->base + RNGC_VER_ID);
- rng_type = ver_id >> RNGC_TYPE_SHIFT;
+ rng_type = FIELD_GET(RNG_TYPE, ver_id);
/*
* This driver supports only RNGC and RNGB. (There's a different
* driver for RNGA.)
@@ -305,7 +298,7 @@ static int imx_rngc_probe(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused imx_rngc_suspend(struct device *dev)
+static int imx_rngc_suspend(struct device *dev)
{
struct imx_rngc *rngc = dev_get_drvdata(dev);
@@ -314,7 +307,7 @@ static int __maybe_unused imx_rngc_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused imx_rngc_resume(struct device *dev)
+static int imx_rngc_resume(struct device *dev)
{
struct imx_rngc *rngc = dev_get_drvdata(dev);
@@ -323,10 +316,10 @@ static int __maybe_unused imx_rngc_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
static const struct of_device_id imx_rngc_dt_ids[] = {
- { .compatible = "fsl,imx25-rngb", .data = NULL, },
+ { .compatible = "fsl,imx25-rngb" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
@@ -334,7 +327,7 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
static struct platform_driver imx_rngc_driver = {
.driver = {
.name = KBUILD_MODNAME,
- .pm = &imx_rngc_pm_ops,
+ .pm = pm_sleep_ptr(&imx_rngc_pm_ops),
.of_match_table = imx_rngc_dt_ids,
},
};
diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c
index 15ba1e6fae4d..6e9dfac9fc9f 100644
--- a/drivers/char/hw_random/st-rng.c
+++ b/drivers/char/hw_random/st-rng.c
@@ -42,7 +42,6 @@
struct st_rng_data {
void __iomem *base;
- struct clk *clk;
struct hwrng ops;
};
@@ -85,26 +84,18 @@ static int st_rng_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
ddata->ops.priv = (unsigned long)ddata;
ddata->ops.read = st_rng_read;
ddata->ops.name = pdev->name;
ddata->base = base;
- ddata->clk = clk;
-
- dev_set_drvdata(&pdev->dev, ddata);
ret = devm_hwrng_register(&pdev->dev, &ddata->ops);
if (ret) {
dev_err(&pdev->dev, "Failed to register HW RNG\n");
- clk_disable_unprepare(clk);
return ret;
}
@@ -113,15 +104,6 @@ static int st_rng_probe(struct platform_device *pdev)
return 0;
}
-static int st_rng_remove(struct platform_device *pdev)
-{
- struct st_rng_data *ddata = dev_get_drvdata(&pdev->dev);
-
- clk_disable_unprepare(ddata->clk);
-
- return 0;
-}
-
static const struct of_device_id st_rng_match[] __maybe_unused = {
{ .compatible = "st,rng" },
{},
@@ -134,7 +116,6 @@ static struct platform_driver st_rng_driver = {
.of_match_table = of_match_ptr(st_rng_match),
},
.probe = st_rng_probe,
- .remove = st_rng_remove
};
module_platform_driver(st_rng_driver);
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index f7690e0f92ed..e41a84e6b4b5 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -4,6 +4,7 @@
* Copyright (C) 2007, 2008 Rusty Russell IBM Corporation
*/
+#include <asm/barrier.h>
#include <linux/err.h>
#include <linux/hw_random.h>
#include <linux/scatterlist.h>
@@ -37,13 +38,13 @@ struct virtrng_info {
static void random_recv_done(struct virtqueue *vq)
{
struct virtrng_info *vi = vq->vdev->priv;
+ unsigned int len;
/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
- if (!virtqueue_get_buf(vi->vq, &vi->data_avail))
+ if (!virtqueue_get_buf(vi->vq, &len))
return;
- vi->data_idx = 0;
-
+ smp_store_release(&vi->data_avail, len);
complete(&vi->have_data);
}
@@ -52,7 +53,6 @@ static void request_entropy(struct virtrng_info *vi)
struct scatterlist sg;
reinit_completion(&vi->have_data);
- vi->data_avail = 0;
vi->data_idx = 0;
sg_init_one(&sg, vi->data, sizeof(vi->data));
@@ -88,7 +88,7 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
read = 0;
/* copy available data */
- if (vi->data_avail) {
+ if (smp_load_acquire(&vi->data_avail)) {
chunk = copy_data(vi, buf, size);
size -= chunk;
read += chunk;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 9c440cd0fed0..9f5b2d28bff5 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -807,5 +807,6 @@ config CRYPTO_DEV_SA2UL
acceleration for cryptographic algorithms on these devices.
source "drivers/crypto/aspeed/Kconfig"
+source "drivers/crypto/starfive/Kconfig"
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 51d36701e785..d859d6a5f3a4 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -50,3 +50,4 @@ obj-y += xilinx/
obj-y += hisilicon/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
obj-y += intel/
+obj-y += starfive/
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index aac64b555204..432beabd79e6 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -389,7 +389,7 @@ static struct i2c_driver atmel_ecc_driver = {
.name = "atmel-ecc",
.of_match_table = of_match_ptr(atmel_ecc_dt_ids),
},
- .probe_new = atmel_ecc_probe,
+ .probe = atmel_ecc_probe,
.remove = atmel_ecc_remove,
.id_table = atmel_ecc_id,
};
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index 44a185a84760..c77f482d2a97 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -141,7 +141,7 @@ static const struct i2c_device_id atmel_sha204a_id[] = {
MODULE_DEVICE_TABLE(i2c, atmel_sha204a_id);
static struct i2c_driver atmel_sha204a_driver = {
- .probe_new = atmel_sha204a_probe,
+ .probe = atmel_sha204a_probe,
.remove = atmel_sha204a_remove,
.id_table = atmel_sha204a_id,
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index ec6a9e6ad4d2..c631f99e415f 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -162,6 +162,15 @@ config CRYPTO_DEV_FSL_CAAM_PRNG_API
config CRYPTO_DEV_FSL_CAAM_BLOB_GEN
bool
+config CRYPTO_DEV_FSL_CAAM_RNG_TEST
+ bool "Test caam rng"
+ select CRYPTO_DEV_FSL_CAAM_RNG_API
+ help
+ Selecting this will enable a self-test to run for the
+ caam RNG.
+ This test is several minutes long and executes
+ just before the RNG is registered with the hw_random API.
+
endif # CRYPTO_DEV_FSL_CAAM_JR
endif # CRYPTO_DEV_FSL_CAAM
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 50eb55da45c2..b3d14a7f4dd1 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -172,6 +172,50 @@ static void caam_cleanup(struct hwrng *rng)
kfifo_free(&ctx->fifo);
}
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
+static inline void test_len(struct hwrng *rng, size_t len, bool wait)
+{
+ u8 *buf;
+ int read_len;
+ struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
+ struct device *dev = ctx->ctrldev;
+
+ buf = kcalloc(CAAM_RNG_MAX_FIFO_STORE_SIZE, sizeof(u8), GFP_KERNEL);
+
+ while (len > 0) {
+ read_len = rng->read(rng, buf, len, wait);
+
+ if (read_len < 0 || (read_len == 0 && wait)) {
+ dev_err(dev, "RNG Read FAILED received %d bytes\n",
+ read_len);
+ kfree(buf);
+ return;
+ }
+
+ print_hex_dump_debug("random bytes@: ",
+ DUMP_PREFIX_ADDRESS, 16, 4,
+ buf, read_len, 1);
+
+ len = len - read_len;
+ }
+
+ kfree(buf);
+}
+
+static inline void test_mode_once(struct hwrng *rng, bool wait)
+{
+ test_len(rng, 32, wait);
+ test_len(rng, 64, wait);
+ test_len(rng, 128, wait);
+}
+
+static void self_test(struct hwrng *rng)
+{
+ pr_info("Executing RNG SELF-TEST with wait\n");
+ test_mode_once(rng, true);
+}
+#endif
+
static int caam_init(struct hwrng *rng)
{
struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
@@ -258,6 +302,10 @@ int caam_rng_init(struct device *ctrldev)
return ret;
}
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
+ self_test(&ctx->rng);
+#endif
+
devres_close_group(ctrldev, caam_rng_init);
return 0;
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index bedcc2ab3a00..ff9ddbbca377 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -79,6 +79,15 @@ static void build_deinstantiation_desc(u32 *desc, int handle)
append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
}
+static const struct of_device_id imx8m_machine_match[] = {
+ { .compatible = "fsl,imx8mm", },
+ { .compatible = "fsl,imx8mn", },
+ { .compatible = "fsl,imx8mp", },
+ { .compatible = "fsl,imx8mq", },
+ { .compatible = "fsl,imx8ulp", },
+ { }
+};
+
/*
* run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
* the software (no JR/QI used).
@@ -105,10 +114,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
* Apparently on i.MX8M{Q,M,N,P} it doesn't matter if virt_en == 1
* and the following steps should be performed regardless
*/
- of_machine_is_compatible("fsl,imx8mq") ||
- of_machine_is_compatible("fsl,imx8mm") ||
- of_machine_is_compatible("fsl,imx8mn") ||
- of_machine_is_compatible("fsl,imx8mp")) {
+ of_match_node(imx8m_machine_match, of_root)) {
clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
@@ -344,16 +350,15 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
/*
* kick_trng - sets the various parameters for enabling the initialization
* of the RNG4 block in CAAM
- * @pdev - pointer to the platform device
+ * @dev - pointer to the controller device
* @ent_delay - Defines the length (in system clocks) of each entropy sample.
*/
-static void kick_trng(struct platform_device *pdev, int ent_delay)
+static void kick_trng(struct device *dev, int ent_delay)
{
- struct device *ctrldev = &pdev->dev;
- struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
struct caam_ctrl __iomem *ctrl;
struct rng4tst __iomem *r4tst;
- u32 val;
+ u32 val, rtsdctl;
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
r4tst = &ctrl->r4tst[0];
@@ -369,26 +374,38 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
* Performance-wise, it does not make sense to
* set the delay to a value that is lower
* than the last one that worked (i.e. the state handles
- * were instantiated properly. Thus, instead of wasting
- * time trying to set the values controlling the sample
- * frequency, the function simply returns.
+ * were instantiated properly).
+ */
+ rtsdctl = rd_reg32(&r4tst->rtsdctl);
+ val = (rtsdctl & RTSDCTL_ENT_DLY_MASK) >> RTSDCTL_ENT_DLY_SHIFT;
+ if (ent_delay > val) {
+ val = ent_delay;
+ /* min. freq. count, equal to 1/4 of the entropy sample length */
+ wr_reg32(&r4tst->rtfrqmin, val >> 2);
+ /* max. freq. count, equal to 16 times the entropy sample length */
+ wr_reg32(&r4tst->rtfrqmax, val << 4);
+ }
+
+ wr_reg32(&r4tst->rtsdctl, (val << RTSDCTL_ENT_DLY_SHIFT) |
+ RTSDCTL_SAMP_SIZE_VAL);
+
+ /*
+ * To avoid reprogramming the self-test parameters over and over again,
+ * use RTSDCTL[SAMP_SIZE] as an indicator.
*/
- val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
- >> RTSDCTL_ENT_DLY_SHIFT;
- if (ent_delay <= val)
- goto start_rng;
-
- val = rd_reg32(&r4tst->rtsdctl);
- val = (val & ~RTSDCTL_ENT_DLY_MASK) |
- (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
- wr_reg32(&r4tst->rtsdctl, val);
- /* min. freq. count, equal to 1/4 of the entropy sample length */
- wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
- /* disable maximum frequency count */
- wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
- /* read the control register */
- val = rd_reg32(&r4tst->rtmctl);
-start_rng:
+ if ((rtsdctl & RTSDCTL_SAMP_SIZE_MASK) != RTSDCTL_SAMP_SIZE_VAL) {
+ wr_reg32(&r4tst->rtscmisc, (2 << 16) | 32);
+ wr_reg32(&r4tst->rtpkrrng, 570);
+ wr_reg32(&r4tst->rtpkrmax, 1600);
+ wr_reg32(&r4tst->rtscml, (122 << 16) | 317);
+ wr_reg32(&r4tst->rtscrl[0], (80 << 16) | 107);
+ wr_reg32(&r4tst->rtscrl[1], (57 << 16) | 62);
+ wr_reg32(&r4tst->rtscrl[2], (39 << 16) | 39);
+ wr_reg32(&r4tst->rtscrl[3], (27 << 16) | 26);
+ wr_reg32(&r4tst->rtscrl[4], (19 << 16) | 18);
+ wr_reg32(&r4tst->rtscrl[5], (18 << 16) | 17);
+ }
+
/*
* select raw sampling in both entropy shifter
* and statistical checker; ; put RNG4 into run mode
@@ -618,10 +635,115 @@ static bool needs_entropy_delay_adjustment(void)
return false;
}
+static int caam_ctrl_rng_init(struct device *dev)
+{
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+ struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+ int ret, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
+ u8 rng_vid;
+
+ if (ctrlpriv->era < 10) {
+ struct caam_perfmon __iomem *perfmon;
+
+ perfmon = ctrlpriv->total_jobrs ?
+ (struct caam_perfmon __iomem *)&ctrlpriv->jr[0]->perfmon :
+ (struct caam_perfmon __iomem *)&ctrl->perfmon;
+
+ rng_vid = (rd_reg32(&perfmon->cha_id_ls) &
+ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
+ } else {
+ struct version_regs __iomem *vreg;
+
+ vreg = ctrlpriv->total_jobrs ?
+ (struct version_regs __iomem *)&ctrlpriv->jr[0]->vreg :
+ (struct version_regs __iomem *)&ctrl->vreg;
+
+ rng_vid = (rd_reg32(&vreg->rng) & CHA_VER_VID_MASK) >>
+ CHA_VER_VID_SHIFT;
+ }
+
+ /*
+ * If SEC has RNG version >= 4 and RNG state handle has not been
+ * already instantiated, do RNG instantiation
+ * In case of SoCs with Management Complex, RNG is managed by MC f/w.
+ */
+ if (!(ctrlpriv->mc_en && ctrlpriv->pr_support) && rng_vid >= 4) {
+ ctrlpriv->rng4_sh_init =
+ rd_reg32(&ctrl->r4tst[0].rdsta);
+ /*
+ * If the secure keys (TDKEK, JDKEK, TDSK), were already
+ * generated, signal this to the function that is instantiating
+ * the state handles. An error would occur if RNG4 attempts
+ * to regenerate these keys before the next POR.
+ */
+ gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
+ ctrlpriv->rng4_sh_init &= RDSTA_MASK;
+ do {
+ int inst_handles =
+ rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_MASK;
+ /*
+ * If either SH were instantiated by somebody else
+ * (e.g. u-boot) then it is assumed that the entropy
+ * parameters are properly set and thus the function
+ * setting these (kick_trng(...)) is skipped.
+ * Also, if a handle was instantiated, do not change
+ * the TRNG parameters.
+ */
+ if (needs_entropy_delay_adjustment())
+ ent_delay = 12000;
+ if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+ dev_info(dev,
+ "Entropy delay = %u\n",
+ ent_delay);
+ kick_trng(dev, ent_delay);
+ ent_delay += 400;
+ }
+ /*
+ * if instantiate_rng(...) fails, the loop will rerun
+ * and the kick_trng(...) function will modify the
+ * upper and lower limits of the entropy sampling
+ * interval, leading to a successful initialization of
+ * the RNG.
+ */
+ ret = instantiate_rng(dev, inst_handles,
+ gen_sk);
+ /*
+ * Entropy delay is determined via TRNG characterization.
+ * TRNG characterization is run across different voltages
+ * and temperatures.
+ * If worst case value for ent_dly is identified,
+ * the loop can be skipped for that platform.
+ */
+ if (needs_entropy_delay_adjustment())
+ break;
+ if (ret == -EAGAIN)
+ /*
+ * if here, the loop will rerun,
+ * so don't hog the CPU
+ */
+ cpu_relax();
+ } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
+ if (ret) {
+ dev_err(dev, "failed to instantiate RNG");
+ return ret;
+ }
+ /*
+ * Set handles initialized by this module as the complement of
+ * the already initialized ones
+ */
+ ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK;
+
+ /* Enable RDB bit so that RNG works faster */
+ clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
+ }
+
+ return 0;
+}
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
- int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
+ int ret, ring;
u64 caam_id;
const struct soc_device_attribute *imx_soc_match;
struct device *dev;
@@ -631,10 +753,8 @@ static int caam_probe(struct platform_device *pdev)
struct caam_perfmon __iomem *perfmon;
struct dentry *dfs_root;
u32 scfgr, comp_params;
- u8 rng_vid;
int pg_size;
int BLOCK_OFFSET = 0;
- bool pr_support = false;
bool reg_access = true;
ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
@@ -646,6 +766,9 @@ static int caam_probe(struct platform_device *pdev)
nprop = pdev->dev.of_node;
imx_soc_match = soc_device_match(caam_imx_soc_table);
+ if (!imx_soc_match && of_match_node(imx8m_machine_match, of_root))
+ return -EPROBE_DEFER;
+
caam_imx = (bool)imx_soc_match;
if (imx_soc_match) {
@@ -770,7 +893,8 @@ static int caam_probe(struct platform_device *pdev)
mc_version = fsl_mc_get_version();
if (mc_version)
- pr_support = check_version(mc_version, 10, 20, 0);
+ ctrlpriv->pr_support = check_version(mc_version, 10, 20,
+ 0);
else
return -EPROBE_DEFER;
}
@@ -861,9 +985,6 @@ set_dma_mask:
return -ENOMEM;
}
- if (!reg_access)
- goto report_live;
-
comp_params = rd_reg32(&perfmon->comp_parms_ls);
ctrlpriv->blob_present = !!(comp_params & CTPR_LS_BLOB);
@@ -873,8 +994,6 @@ set_dma_mask:
* check both here.
*/
if (ctrlpriv->era < 10) {
- rng_vid = (rd_reg32(&perfmon->cha_id_ls) &
- CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
ctrlpriv->blob_present = ctrlpriv->blob_present &&
(rd_reg32(&perfmon->cha_num_ls) & CHA_ID_LS_AES_MASK);
} else {
@@ -884,91 +1003,16 @@ set_dma_mask:
(struct version_regs __iomem *)&ctrlpriv->jr[0]->vreg :
(struct version_regs __iomem *)&ctrl->vreg;
- rng_vid = (rd_reg32(&vreg->rng) & CHA_VER_VID_MASK) >>
- CHA_VER_VID_SHIFT;
ctrlpriv->blob_present = ctrlpriv->blob_present &&
(rd_reg32(&vreg->aesa) & CHA_VER_MISC_AES_NUM_MASK);
}
- /*
- * If SEC has RNG version >= 4 and RNG state handle has not been
- * already instantiated, do RNG instantiation
- * In case of SoCs with Management Complex, RNG is managed by MC f/w.
- */
- if (!(ctrlpriv->mc_en && pr_support) && rng_vid >= 4) {
- ctrlpriv->rng4_sh_init =
- rd_reg32(&ctrl->r4tst[0].rdsta);
- /*
- * If the secure keys (TDKEK, JDKEK, TDSK), were already
- * generated, signal this to the function that is instantiating
- * the state handles. An error would occur if RNG4 attempts
- * to regenerate these keys before the next POR.
- */
- gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
- ctrlpriv->rng4_sh_init &= RDSTA_MASK;
- do {
- int inst_handles =
- rd_reg32(&ctrl->r4tst[0].rdsta) &
- RDSTA_MASK;
- /*
- * If either SH were instantiated by somebody else
- * (e.g. u-boot) then it is assumed that the entropy
- * parameters are properly set and thus the function
- * setting these (kick_trng(...)) is skipped.
- * Also, if a handle was instantiated, do not change
- * the TRNG parameters.
- */
- if (needs_entropy_delay_adjustment())
- ent_delay = 12000;
- if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
- dev_info(dev,
- "Entropy delay = %u\n",
- ent_delay);
- kick_trng(pdev, ent_delay);
- ent_delay += 400;
- }
- /*
- * if instantiate_rng(...) fails, the loop will rerun
- * and the kick_trng(...) function will modify the
- * upper and lower limits of the entropy sampling
- * interval, leading to a successful initialization of
- * the RNG.
- */
- ret = instantiate_rng(dev, inst_handles,
- gen_sk);
- /*
- * Entropy delay is determined via TRNG characterization.
- * TRNG characterization is run across different voltages
- * and temperatures.
- * If worst case value for ent_dly is identified,
- * the loop can be skipped for that platform.
- */
- if (needs_entropy_delay_adjustment())
- break;
- if (ret == -EAGAIN)
- /*
- * if here, the loop will rerun,
- * so don't hog the CPU
- */
- cpu_relax();
- } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
- if (ret) {
- dev_err(dev, "failed to instantiate RNG");
+ if (reg_access) {
+ ret = caam_ctrl_rng_init(dev);
+ if (ret)
return ret;
- }
- /*
- * Set handles initialized by this module as the complement of
- * the already initialized ones
- */
- ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK;
-
- /* Enable RDB bit so that RNG works faster */
- clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
}
-report_live:
- /* NOTE: RTIC detection ought to go here, around Si time */
-
caam_id = (u64)rd_reg32(&perfmon->caam_id_ms) << 32 |
(u64)rd_reg32(&perfmon->caam_id_ls);
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 86ed1b91c22d..b4f7bf77f487 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -95,6 +95,7 @@ struct caam_drv_private {
u8 blob_present; /* Nonzero if BLOB support present in device */
u8 mc_en; /* Nonzero if MC f/w is active */
u8 optee_en; /* Nonzero if OP-TEE f/w is active */
+ bool pr_support; /* RNG prediction resistance available */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
int era; /* CAAM Era (internal HW revision) */
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 66928f8a0c4b..189e74c21f0c 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -3,7 +3,7 @@
* CAAM hardware register-level view
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2018 NXP
+ * Copyright 2018, 2023 NXP
*/
#ifndef REGS_H
@@ -523,6 +523,8 @@ struct rng4tst {
#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
#define RTSDCTL_ENT_DLY_MIN 3200
#define RTSDCTL_ENT_DLY_MAX 12800
+#define RTSDCTL_SAMP_SIZE_MASK 0xffff
+#define RTSDCTL_SAMP_SIZE_VAL 512
u32 rtsdctl; /* seed control register */
union {
u32 rtsblim; /* PRGM=1: sparse bit limit register */
@@ -534,7 +536,15 @@ struct rng4tst {
u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */
u32 rtfrqcnt; /* PRGM=0: freq. count register */
};
- u32 rsvd1[40];
+ union {
+ u32 rtscmc; /* statistical check run monobit count */
+ u32 rtscml; /* statistical check run monobit limit */
+ };
+ union {
+ u32 rtscrc[6]; /* statistical check run length count */
+ u32 rtscrl[6]; /* statistical check run length limit */
+ };
+ u32 rsvd1[33];
#define RDSTA_SKVT 0x80000000
#define RDSTA_SKVN 0x40000000
#define RDSTA_PR0 BIT(4)
diff --git a/drivers/crypto/ccp/platform-access.c b/drivers/crypto/ccp/platform-access.c
index 939c924fc383..94367bc49e35 100644
--- a/drivers/crypto/ccp/platform-access.c
+++ b/drivers/crypto/ccp/platform-access.c
@@ -67,6 +67,11 @@ int psp_send_platform_access_msg(enum psp_platform_access_msg msg,
return -ENODEV;
pa_dev = psp->platform_access_data;
+
+ if (!pa_dev->vdata->cmdresp_reg || !pa_dev->vdata->cmdbuff_addr_lo_reg ||
+ !pa_dev->vdata->cmdbuff_addr_hi_reg)
+ return -ENODEV;
+
cmd = psp->io_regs + pa_dev->vdata->cmdresp_reg;
lo = psp->io_regs + pa_dev->vdata->cmdbuff_addr_lo_reg;
hi = psp->io_regs + pa_dev->vdata->cmdbuff_addr_hi_reg;
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index aa15bc4cac2b..b603ad9b8341 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -361,6 +361,14 @@ static const struct tee_vdata teev1 = {
.ring_rptr_reg = 0x10554, /* C2PMSG_21 */
};
+static const struct tee_vdata teev2 = {
+ .cmdresp_reg = 0x10944, /* C2PMSG_17 */
+ .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
+ .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
+ .ring_wptr_reg = 0x10950, /* C2PMSG_20 */
+ .ring_rptr_reg = 0x10954, /* C2PMSG_21 */
+};
+
static const struct platform_access_vdata pa_v1 = {
.cmdresp_reg = 0x10570, /* C2PMSG_28 */
.cmdbuff_addr_lo_reg = 0x10574, /* C2PMSG_29 */
@@ -369,6 +377,11 @@ static const struct platform_access_vdata pa_v1 = {
.doorbell_cmd_reg = 0x10a40, /* C2PMSG_80 */
};
+static const struct platform_access_vdata pa_v2 = {
+ .doorbell_button_reg = 0x10a24, /* C2PMSG_73 */
+ .doorbell_cmd_reg = 0x10a40, /* C2PMSG_80 */
+};
+
static const struct psp_vdata pspv1 = {
.sev = &sevv1,
.feature_reg = 0x105fc, /* C2PMSG_63 */
@@ -399,6 +412,22 @@ static const struct psp_vdata pspv4 = {
.intsts_reg = 0x10694, /* P2CMSG_INTSTS */
};
+static const struct psp_vdata pspv5 = {
+ .tee = &teev2,
+ .platform_access = &pa_v2,
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10510, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10514, /* P2CMSG_INTSTS */
+};
+
+static const struct psp_vdata pspv6 = {
+ .sev = &sevv2,
+ .tee = &teev2,
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10510, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10514, /* P2CMSG_INTSTS */
+};
+
#endif
static const struct sp_dev_vdata dev_vdata[] = {
@@ -453,6 +482,18 @@ static const struct sp_dev_vdata dev_vdata[] = {
.psp_vdata = &pspv3,
#endif
},
+ { /* 7 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv5,
+#endif
+ },
+ { /* 8 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv6,
+#endif
+ },
};
static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
@@ -463,6 +504,8 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
{ PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] },
+ { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] },
+ { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index e8690c223584..4137a8bf131f 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -82,10 +82,3 @@ config CRYPTO_DEV_HISI_TRNG
select CRYPTO_RNG
help
Support for HiSilicon TRNG Driver.
-
-config CRYPTO_DEV_HISTB_TRNG
- tristate "Support for HiSTB TRNG Driver"
- depends on ARCH_HISI || COMPILE_TEST
- select HW_RANDOM
- help
- Support for HiSTB TRNG Driver.
diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile
index fc51e0edec69..8595a5a5d228 100644
--- a/drivers/crypto/hisilicon/Makefile
+++ b/drivers/crypto/hisilicon/Makefile
@@ -5,4 +5,4 @@ obj-$(CONFIG_CRYPTO_DEV_HISI_SEC2) += sec2/
obj-$(CONFIG_CRYPTO_DEV_HISI_QM) += hisi_qm.o
hisi_qm-objs = qm.o sgl.o debugfs.o
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += zip/
-obj-y += trng/
+obj-$(CONFIG_CRYPTO_DEV_HISI_TRNG) += trng/
diff --git a/drivers/crypto/hisilicon/trng/Makefile b/drivers/crypto/hisilicon/trng/Makefile
index cf20b057c66b..d909079f351c 100644
--- a/drivers/crypto/hisilicon/trng/Makefile
+++ b/drivers/crypto/hisilicon/trng/Makefile
@@ -1,5 +1,2 @@
obj-$(CONFIG_CRYPTO_DEV_HISI_TRNG) += hisi-trng-v2.o
hisi-trng-v2-objs = trng.o
-
-obj-$(CONFIG_CRYPTO_DEV_HISTB_TRNG) += histb-trng.o
-histb-trng-objs += trng-stb.o
diff --git a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
index ed15379a9818..4a18095ae5d8 100644
--- a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
+++ b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
@@ -1175,9 +1175,9 @@ static int aead_perform(struct aead_request *req, int encrypt,
/* The 12 hmac bytes are scattered,
* we need to copy them into a safe buffer */
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, &dma);
- crypt->icv_rev_aes = dma;
if (unlikely(!req_ctx->hmac_virt))
goto free_buf_dst;
+ crypt->icv_rev_aes = dma;
if (!encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
req->src, cryptlen, authsize, 0);
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 7324b86a4f40..e543a9e24a06 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -11,37 +11,76 @@
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
+enum adf_fw_objs {
+ ADF_FW_SYM_OBJ,
+ ADF_FW_ASYM_OBJ,
+ ADF_FW_DC_OBJ,
+ ADF_FW_ADMIN_OBJ,
+};
+
+static const char * const adf_4xxx_fw_objs[] = {
+ [ADF_FW_SYM_OBJ] = ADF_4XXX_SYM_OBJ,
+ [ADF_FW_ASYM_OBJ] = ADF_4XXX_ASYM_OBJ,
+ [ADF_FW_DC_OBJ] = ADF_4XXX_DC_OBJ,
+ [ADF_FW_ADMIN_OBJ] = ADF_4XXX_ADMIN_OBJ,
+};
+
+static const char * const adf_402xx_fw_objs[] = {
+ [ADF_FW_SYM_OBJ] = ADF_402XX_SYM_OBJ,
+ [ADF_FW_ASYM_OBJ] = ADF_402XX_ASYM_OBJ,
+ [ADF_FW_DC_OBJ] = ADF_402XX_DC_OBJ,
+ [ADF_FW_ADMIN_OBJ] = ADF_402XX_ADMIN_OBJ,
+};
+
struct adf_fw_config {
u32 ae_mask;
- char *obj_name;
+ enum adf_fw_objs obj;
};
-static struct adf_fw_config adf_4xxx_fw_cy_config[] = {
- {0xF0, ADF_4XXX_SYM_OBJ},
- {0xF, ADF_4XXX_ASYM_OBJ},
- {0x100, ADF_4XXX_ADMIN_OBJ},
+static const struct adf_fw_config adf_fw_cy_config[] = {
+ {0xF0, ADF_FW_SYM_OBJ},
+ {0xF, ADF_FW_ASYM_OBJ},
+ {0x100, ADF_FW_ADMIN_OBJ},
};
-static struct adf_fw_config adf_4xxx_fw_dc_config[] = {
- {0xF0, ADF_4XXX_DC_OBJ},
- {0xF, ADF_4XXX_DC_OBJ},
- {0x100, ADF_4XXX_ADMIN_OBJ},
+static const struct adf_fw_config adf_fw_dc_config[] = {
+ {0xF0, ADF_FW_DC_OBJ},
+ {0xF, ADF_FW_DC_OBJ},
+ {0x100, ADF_FW_ADMIN_OBJ},
};
-static struct adf_fw_config adf_402xx_fw_cy_config[] = {
- {0xF0, ADF_402XX_SYM_OBJ},
- {0xF, ADF_402XX_ASYM_OBJ},
- {0x100, ADF_402XX_ADMIN_OBJ},
+static const struct adf_fw_config adf_fw_sym_config[] = {
+ {0xF0, ADF_FW_SYM_OBJ},
+ {0xF, ADF_FW_SYM_OBJ},
+ {0x100, ADF_FW_ADMIN_OBJ},
};
-static struct adf_fw_config adf_402xx_fw_dc_config[] = {
- {0xF0, ADF_402XX_DC_OBJ},
- {0xF, ADF_402XX_DC_OBJ},
- {0x100, ADF_402XX_ADMIN_OBJ},
+static const struct adf_fw_config adf_fw_asym_config[] = {
+ {0xF0, ADF_FW_ASYM_OBJ},
+ {0xF, ADF_FW_ASYM_OBJ},
+ {0x100, ADF_FW_ADMIN_OBJ},
};
+static const struct adf_fw_config adf_fw_asym_dc_config[] = {
+ {0xF0, ADF_FW_ASYM_OBJ},
+ {0xF, ADF_FW_DC_OBJ},
+ {0x100, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_sym_dc_config[] = {
+ {0xF0, ADF_FW_SYM_OBJ},
+ {0xF, ADF_FW_DC_OBJ},
+ {0x100, ADF_FW_ADMIN_OBJ},
+};
+
+static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dc_config));
+static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_config));
+static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_config));
+static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config));
+static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config));
+
/* Worker thread to service arbiter mappings */
-static const u32 thrd_to_arb_map_cy[ADF_4XXX_MAX_ACCELENGINES] = {
+static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
0x5555555, 0x5555555, 0x5555555, 0x5555555,
0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA,
0x0
@@ -61,12 +100,26 @@ static struct adf_hw_device_class adf_4xxx_class = {
enum dev_services {
SVC_CY = 0,
+ SVC_CY2,
SVC_DC,
+ SVC_SYM,
+ SVC_ASYM,
+ SVC_DC_ASYM,
+ SVC_ASYM_DC,
+ SVC_DC_SYM,
+ SVC_SYM_DC,
};
static const char *const dev_cfg_services[] = {
[SVC_CY] = ADF_CFG_CY,
+ [SVC_CY2] = ADF_CFG_ASYM_SYM,
[SVC_DC] = ADF_CFG_DC,
+ [SVC_SYM] = ADF_CFG_SYM,
+ [SVC_ASYM] = ADF_CFG_ASYM,
+ [SVC_DC_ASYM] = ADF_CFG_DC_ASYM,
+ [SVC_ASYM_DC] = ADF_CFG_ASYM_DC,
+ [SVC_DC_SYM] = ADF_CFG_DC_SYM,
+ [SVC_SYM_DC] = ADF_CFG_SYM_DC,
};
static int get_service_enabled(struct adf_accel_dev *accel_dev)
@@ -156,45 +209,50 @@ static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
- u32 capabilities_cy, capabilities_dc;
+ u32 capabilities_sym, capabilities_asym, capabilities_dc;
u32 fusectl1;
/* Read accelerator capabilities mask */
pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1);
- capabilities_cy = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
- ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
ICP_ACCEL_CAPABILITIES_CIPHER |
ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
ICP_ACCEL_CAPABILITIES_SHA3 |
ICP_ACCEL_CAPABILITIES_SHA3_EXT |
ICP_ACCEL_CAPABILITIES_HKDF |
- ICP_ACCEL_CAPABILITIES_ECEDMONT |
ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
ICP_ACCEL_CAPABILITIES_AES_V2;
/* A set bit in fusectl1 means the feature is OFF in this SKU */
if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_HKDF;
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
+
if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
+
if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3;
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
+
+ capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_ECEDMONT;
+
if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
- capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
}
capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
@@ -211,12 +269,23 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
switch (get_service_enabled(accel_dev)) {
case SVC_CY:
- return capabilities_cy;
+ case SVC_CY2:
+ return capabilities_sym | capabilities_asym;
case SVC_DC:
return capabilities_dc;
+ case SVC_SYM:
+ return capabilities_sym;
+ case SVC_ASYM:
+ return capabilities_asym;
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ return capabilities_asym | capabilities_dc;
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ return capabilities_sym | capabilities_dc;
+ default:
+ return 0;
}
-
- return 0;
}
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
@@ -227,13 +296,11 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
switch (get_service_enabled(accel_dev)) {
- case SVC_CY:
- return thrd_to_arb_map_cy;
case SVC_DC:
return thrd_to_arb_map_dc;
+ default:
+ return default_thrd_to_arb_map;
}
-
- return NULL;
}
static void get_arb_info(struct arb_info *arb_info)
@@ -304,47 +371,83 @@ static int adf_init_device(struct adf_accel_dev *accel_dev)
static u32 uof_get_num_objs(void)
{
- BUILD_BUG_ON_MSG(ARRAY_SIZE(adf_4xxx_fw_cy_config) !=
- ARRAY_SIZE(adf_4xxx_fw_dc_config),
- "Size mismatch between adf_4xxx_fw_*_config arrays");
-
- return ARRAY_SIZE(adf_4xxx_fw_cy_config);
+ return ARRAY_SIZE(adf_fw_cy_config);
}
-static char *uof_get_name_4xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
+static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
+ const char * const fw_objs[], int num_objs)
{
+ int id;
+
switch (get_service_enabled(accel_dev)) {
case SVC_CY:
- return adf_4xxx_fw_cy_config[obj_num].obj_name;
+ case SVC_CY2:
+ id = adf_fw_cy_config[obj_num].obj;
+ break;
case SVC_DC:
- return adf_4xxx_fw_dc_config[obj_num].obj_name;
+ id = adf_fw_dc_config[obj_num].obj;
+ break;
+ case SVC_SYM:
+ id = adf_fw_sym_config[obj_num].obj;
+ break;
+ case SVC_ASYM:
+ id = adf_fw_asym_config[obj_num].obj;
+ break;
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ id = adf_fw_asym_dc_config[obj_num].obj;
+ break;
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ id = adf_fw_sym_dc_config[obj_num].obj;
+ break;
+ default:
+ id = -EINVAL;
+ break;
}
- return NULL;
+ if (id < 0 || id > num_objs)
+ return NULL;
+
+ return fw_objs[id];
}
-static char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_num)
+static const char *uof_get_name_4xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
{
- switch (get_service_enabled(accel_dev)) {
- case SVC_CY:
- return adf_402xx_fw_cy_config[obj_num].obj_name;
- case SVC_DC:
- return adf_402xx_fw_dc_config[obj_num].obj_name;
- }
+ int num_fw_objs = ARRAY_SIZE(adf_4xxx_fw_objs);
- return NULL;
+ return uof_get_name(accel_dev, obj_num, adf_4xxx_fw_objs, num_fw_objs);
+}
+
+static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ int num_fw_objs = ARRAY_SIZE(adf_402xx_fw_objs);
+
+ return uof_get_name(accel_dev, obj_num, adf_402xx_fw_objs, num_fw_objs);
}
static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
{
switch (get_service_enabled(accel_dev)) {
case SVC_CY:
- return adf_4xxx_fw_cy_config[obj_num].ae_mask;
+ return adf_fw_cy_config[obj_num].ae_mask;
case SVC_DC:
- return adf_4xxx_fw_dc_config[obj_num].ae_mask;
+ return adf_fw_dc_config[obj_num].ae_mask;
+ case SVC_CY2:
+ return adf_fw_cy_config[obj_num].ae_mask;
+ case SVC_SYM:
+ return adf_fw_sym_config[obj_num].ae_mask;
+ case SVC_ASYM:
+ return adf_fw_asym_config[obj_num].ae_mask;
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ return adf_fw_asym_dc_config[obj_num].ae_mask;
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ return adf_fw_sym_dc_config[obj_num].ae_mask;
+ default:
+ return 0;
}
-
- return 0;
}
void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
index 085e259c245a..e5b314d2b60e 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -72,7 +72,7 @@ enum icp_qat_4xxx_slice_mask {
ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE = BIT(3),
ICP_ACCEL_4XXX_MASK_UCS_SLICE = BIT(4),
ICP_ACCEL_4XXX_MASK_EIA3_SLICE = BIT(5),
- ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(6),
+ ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(7),
};
void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id);
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index ceb87327a5fe..1a15600361d0 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -7,6 +7,7 @@
#include <adf_accel_devices.h>
#include <adf_cfg.h>
#include <adf_common_drv.h>
+#include <adf_dbgfs.h>
#include "adf_4xxx_hw_data.h"
#include "qat_compression.h"
@@ -24,11 +25,25 @@ MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
enum configs {
DEV_CFG_CY = 0,
DEV_CFG_DC,
+ DEV_CFG_SYM,
+ DEV_CFG_ASYM,
+ DEV_CFG_ASYM_SYM,
+ DEV_CFG_ASYM_DC,
+ DEV_CFG_DC_ASYM,
+ DEV_CFG_SYM_DC,
+ DEV_CFG_DC_SYM,
};
static const char * const services_operations[] = {
ADF_CFG_CY,
ADF_CFG_DC,
+ ADF_CFG_SYM,
+ ADF_CFG_ASYM,
+ ADF_CFG_ASYM_SYM,
+ ADF_CFG_ASYM_DC,
+ ADF_CFG_DC_ASYM,
+ ADF_CFG_SYM_DC,
+ ADF_CFG_DC_SYM,
};
static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
@@ -37,8 +52,8 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
adf_clean_hw_data_4xxx(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
- debugfs_remove(accel_dev->debugfs_dir);
adf_devmgr_rm_dev(accel_dev, NULL);
}
@@ -241,6 +256,21 @@ err:
return ret;
}
+static int adf_no_dev_config(struct adf_accel_dev *accel_dev)
+{
+ unsigned long val;
+ int ret;
+
+ val = 0;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ return ret;
+
+ return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC);
+}
+
int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
{
char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
@@ -265,11 +295,15 @@ int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
switch (ret) {
case DEV_CFG_CY:
+ case DEV_CFG_ASYM_SYM:
ret = adf_crypto_dev_config(accel_dev);
break;
case DEV_CFG_DC:
ret = adf_comp_dev_config(accel_dev);
break;
+ default:
+ ret = adf_no_dev_config(accel_dev);
+ break;
}
if (ret)
@@ -289,7 +323,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_accel_dev *accel_dev;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
- char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
unsigned long bar_mask;
struct adf_bar *bar;
@@ -348,12 +381,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
- /* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
- hw_data->dev_class->name, pci_name(pdev));
-
- accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
@@ -410,6 +437,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
+ adf_dbgfs_init(accel_dev);
+
ret = adf_dev_up(accel_dev, true);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
index bb4dca735ab5..468c9102093f 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
@@ -16,6 +16,7 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
+#include <adf_dbgfs.h>
#include "adf_c3xxx_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
@@ -65,8 +66,8 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
- debugfs_remove(accel_dev->debugfs_dir);
adf_devmgr_rm_dev(accel_dev, NULL);
}
@@ -75,7 +76,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_accel_dev *accel_dev;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
- char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
unsigned long bar_mask;
int ret;
@@ -142,12 +142,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
- /* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
- hw_data->dev_class->name, pci_name(pdev));
-
- accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
@@ -199,6 +193,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err_free_reg;
}
+ adf_dbgfs_init(accel_dev);
+
ret = adf_dev_up(accel_dev, true);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
index e8cc10f64134..d5a0ecca9d0b 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
@@ -16,6 +16,7 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
+#include <adf_dbgfs.h>
#include "adf_c3xxxvf_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
@@ -64,8 +65,8 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
- debugfs_remove(accel_dev->debugfs_dir);
pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
adf_devmgr_rm_dev(accel_dev, pf);
}
@@ -76,7 +77,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_accel_dev *pf;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
- char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
unsigned long bar_mask;
int ret;
@@ -123,12 +123,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
- /* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
- hw_data->dev_class->name, pci_name(pdev));
-
- accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
@@ -173,6 +167,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->vf.msg_received);
+ adf_dbgfs_init(accel_dev);
+
ret = adf_dev_up(accel_dev, false);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
index ca18ae14c099..0186921be936 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
@@ -16,6 +16,7 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
+#include <adf_dbgfs.h>
#include "adf_c62x_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
@@ -65,8 +66,8 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
- debugfs_remove(accel_dev->debugfs_dir);
adf_devmgr_rm_dev(accel_dev, NULL);
}
@@ -75,7 +76,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_accel_dev *accel_dev;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
- char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
unsigned long bar_mask;
int ret;
@@ -142,12 +142,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
- /* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
- hw_data->dev_class->name, pci_name(pdev));
-
- accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
@@ -199,6 +193,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err_free_reg;
}
+ adf_dbgfs_init(accel_dev);
+
ret = adf_dev_up(accel_dev, true);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
index 37566309df94..c9ae6c0d0dca 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
@@ -16,6 +16,7 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
+#include <adf_dbgfs.h>
#include "adf_c62xvf_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
@@ -64,8 +65,8 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
- debugfs_remove(accel_dev->debugfs_dir);
pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
adf_devmgr_rm_dev(accel_dev, pf);
}
@@ -76,7 +77,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_accel_dev *pf;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
- char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
unsigned long bar_mask;
int ret;
@@ -123,12 +123,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
- /* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
- hw_data->dev_class->name, pci_name(pdev));
-
- accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
@@ -173,6 +167,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->vf.msg_received);
+ adf_dbgfs_init(accel_dev);
+
ret = adf_dev_up(accel_dev, false);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 1fb8d50f509f..38de3aba6e8c 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -27,7 +27,9 @@ intel_qat-objs := adf_cfg.o \
qat_hal.o \
qat_bl.o
-intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
+intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
+ adf_dbgfs.o
+
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index bd19e6460899..0399417b91fc 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -202,7 +202,7 @@ struct adf_hw_device_data {
int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
- char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
+ const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
u32 (*uof_get_num_objs)(void);
u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
int (*dev_config)(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
index 4ce2b666929e..6be064dc64c8 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
@@ -13,7 +13,7 @@ static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr,
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
struct icp_qat_fw_loader_handle *loader;
- char *obj_name;
+ const char *obj_name;
u32 num_objs;
u32 ae_mask;
int i;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
index 3b6184c35081..118775ee02f2 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
@@ -286,7 +286,6 @@ int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay)
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
-EXPORT_SYMBOL_GPL(adf_init_admin_pm);
int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
{
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.c b/drivers/crypto/intel/qat/qat_common/adf_cfg.c
index 1931e5b37f2b..8836f015c39c 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.c
@@ -74,15 +74,30 @@ int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
INIT_LIST_HEAD(&dev_cfg_data->sec_list);
init_rwsem(&dev_cfg_data->lock);
accel_dev->cfg = dev_cfg_data;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
- /* accel_dev->debugfs_dir should always be non-NULL here */
- dev_cfg_data->debug = debugfs_create_file("dev_cfg", S_IRUSR,
+void adf_cfg_dev_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+ struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+ dev_cfg_data->debug = debugfs_create_file("dev_cfg", 0400,
accel_dev->debugfs_dir,
dev_cfg_data,
&qat_dev_cfg_fops);
- return 0;
}
-EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
+
+void adf_cfg_dev_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+ struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+ if (!dev_cfg_data)
+ return;
+
+ debugfs_remove(dev_cfg_data->debug);
+ dev_cfg_data->debug = NULL;
+}
static void adf_cfg_section_del_all(struct list_head *head);
@@ -116,7 +131,6 @@ void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
down_write(&dev_cfg_data->lock);
adf_cfg_section_del_all(&dev_cfg_data->sec_list);
up_write(&dev_cfg_data->lock);
- debugfs_remove(dev_cfg_data->debug);
kfree(dev_cfg_data);
accel_dev->cfg = NULL;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.h b/drivers/crypto/intel/qat/qat_common/adf_cfg.h
index 376cde61a60e..c0c9052b2213 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.h
@@ -31,6 +31,8 @@ struct adf_cfg_device_data {
int adf_cfg_dev_add(struct adf_accel_dev *accel_dev);
void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev);
+void adf_cfg_dev_dbgfs_add(struct adf_accel_dev *accel_dev);
+void adf_cfg_dev_dbgfs_rm(struct adf_accel_dev *accel_dev);
int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name);
void adf_cfg_del_all(struct adf_accel_dev *accel_dev);
int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
index 5d8c3bdb258c..3ae1e5caee0e 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
@@ -25,7 +25,15 @@
#define ADF_DC "Dc"
#define ADF_CFG_DC "dc"
#define ADF_CFG_CY "sym;asym"
+#define ADF_CFG_SYM "sym"
+#define ADF_CFG_ASYM "asym"
+#define ADF_CFG_ASYM_SYM "asym;sym"
+#define ADF_CFG_ASYM_DC "asym;dc"
+#define ADF_CFG_DC_ASYM "dc;asym"
+#define ADF_CFG_SYM_DC "sym;dc"
+#define ADF_CFG_DC_SYM "dc;sym"
#define ADF_SERVICES_ENABLED "ServicesEnabled"
+#define ADF_PM_IDLE_SUPPORT "PmIdleSupport"
#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_ENABLED
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index db79759bee61..b8132eb9bc2a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -187,7 +187,7 @@ void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle);
int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, void *addr_ptr,
int mem_size);
int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
- void *addr_ptr, u32 mem_size, char *obj_name);
+ void *addr_ptr, u32 mem_size, const char *obj_name);
int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
unsigned int cfg_ae_mask);
int adf_init_misc_wq(void);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
new file mode 100644
index 000000000000..d0a2f892e6eb
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/debugfs.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+#include "adf_dbgfs.h"
+
+/**
+ * adf_dbgfs_init() - add persistent debugfs entries
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * This function creates debugfs entries that are persistent through a device
+ * state change (from up to down or vice versa).
+ */
+void adf_dbgfs_init(struct adf_accel_dev *accel_dev)
+{
+ char name[ADF_DEVICE_NAME_LENGTH];
+ void *ret;
+
+ /* Create dev top level debugfs entry */
+ snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+ accel_dev->hw_device->dev_class->name,
+ pci_name(accel_dev->accel_pci_dev.pci_dev));
+
+ ret = debugfs_create_dir(name, NULL);
+ if (IS_ERR_OR_NULL(ret))
+ return;
+
+ accel_dev->debugfs_dir = ret;
+
+ adf_cfg_dev_dbgfs_add(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_dbgfs_init);
+
+/**
+ * adf_dbgfs_exit() - remove persistent debugfs entries
+ * @accel_dev: Pointer to acceleration device.
+ */
+void adf_dbgfs_exit(struct adf_accel_dev *accel_dev)
+{
+ adf_cfg_dev_dbgfs_rm(accel_dev);
+ debugfs_remove(accel_dev->debugfs_dir);
+}
+EXPORT_SYMBOL_GPL(adf_dbgfs_exit);
+
+/**
+ * adf_dbgfs_add() - add non-persistent debugfs entries
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * This function creates debugfs entries that are not persistent through
+ * a device state change (from up to down or vice versa).
+ */
+void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+ if (!accel_dev->debugfs_dir)
+ return;
+}
+
+/**
+ * adf_dbgfs_rm() - remove non-persistent debugfs entries
+ * @accel_dev: Pointer to acceleration device.
+ */
+void adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+ if (!accel_dev->debugfs_dir)
+ return;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.h b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.h
new file mode 100644
index 000000000000..e0cb2c2a2ed0
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_DBGFS_H
+#define ADF_DBGFS_H
+
+#ifdef CONFIG_DEBUG_FS
+void adf_dbgfs_init(struct adf_accel_dev *accel_dev);
+void adf_dbgfs_add(struct adf_accel_dev *accel_dev);
+void adf_dbgfs_rm(struct adf_accel_dev *accel_dev);
+void adf_dbgfs_exit(struct adf_accel_dev *accel_dev);
+#else
+static inline void adf_dbgfs_init(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_dbgfs_exit(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
index 7037c0892a8a..34c6cd8e27c0 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
@@ -23,15 +23,25 @@ struct adf_gen4_pm_data {
static int send_host_msg(struct adf_accel_dev *accel_dev)
{
+ char pm_idle_support_cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {};
void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ bool pm_idle_support;
u32 msg;
+ int ret;
msg = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG);
if (msg & ADF_GEN4_PM_MSG_PENDING)
return -EBUSY;
+ adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_PM_IDLE_SUPPORT, pm_idle_support_cfg);
+ ret = kstrtobool(pm_idle_support_cfg, &pm_idle_support);
+ if (ret)
+ pm_idle_support = true;
+
/* Send HOST_MSG */
- msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK, PM_SET_MIN);
+ msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK,
+ pm_idle_support ? PM_SET_MIN : PM_NO_CHANGE);
msg |= ADF_GEN4_PM_MSG_PENDING;
ADF_CSR_WR(pmisc, ADF_GEN4_PM_HOST_MSG, msg);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
index f8f8a9ee29e5..dd112923e006 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
@@ -37,6 +37,7 @@
#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER (0x0)
#define ADF_GEN4_PM_MAX_IDLE_FILTER (0x7)
+#define ADF_GEN4_PM_DEFAULT_IDLE_SUPPORT (0x1)
int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev);
bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index 0985f64ab11a..826179c98524 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -7,6 +7,7 @@
#include "adf_accel_devices.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
+#include "adf_dbgfs.h"
static LIST_HEAD(service_table);
static DEFINE_MUTEX(service_lock);
@@ -216,6 +217,9 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
return -EFAULT;
}
+
+ adf_dbgfs_add(accel_dev);
+
return 0;
}
@@ -240,6 +244,8 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
!test_bit(ADF_STATUS_STARTING, &accel_dev->status))
return;
+ adf_dbgfs_rm(accel_dev);
+
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
index 3eb6611ab1b1..a74d2f930367 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
@@ -78,6 +78,13 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
static const char * const services_operations[] = {
ADF_CFG_CY,
ADF_CFG_DC,
+ ADF_CFG_SYM,
+ ADF_CFG_ASYM,
+ ADF_CFG_ASYM_SYM,
+ ADF_CFG_ASYM_DC,
+ ADF_CFG_DC_ASYM,
+ ADF_CFG_SYM_DC,
+ ADF_CFG_DC_SYM,
};
static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr,
@@ -145,12 +152,65 @@ static ssize_t cfg_services_store(struct device *dev, struct device_attribute *a
return count;
}
+static ssize_t pm_idle_enabled_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ char pm_idle_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {};
+ struct adf_accel_dev *accel_dev;
+ int ret;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_PM_IDLE_SUPPORT, pm_idle_enabled);
+ if (ret)
+ return sysfs_emit(buf, "1\n");
+
+ return sysfs_emit(buf, "%s\n", pm_idle_enabled);
+}
+
+static ssize_t pm_idle_enabled_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long pm_idle_enabled_cfg_val;
+ struct adf_accel_dev *accel_dev;
+ bool pm_idle_enabled;
+ int ret;
+
+ ret = kstrtobool(buf, &pm_idle_enabled);
+ if (ret)
+ return ret;
+
+ pm_idle_enabled_cfg_val = pm_idle_enabled;
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ if (adf_dev_started(accel_dev)) {
+ dev_info(dev, "Device qat_dev%d must be down to set pm_idle_enabled.\n",
+ accel_dev->accel_id);
+ return -EINVAL;
+ }
+
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_PM_IDLE_SUPPORT, &pm_idle_enabled_cfg_val,
+ ADF_DEC);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(pm_idle_enabled);
+
static DEVICE_ATTR_RW(state);
static DEVICE_ATTR_RW(cfg_services);
static struct attribute *qat_attrs[] = {
&dev_attr_state.attr,
&dev_attr_cfg_services.attr,
+ &dev_attr_pm_idle_enabled.attr,
NULL,
};
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
index 4042739bb6fa..a65059e56248 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
@@ -87,8 +87,7 @@ enum icp_qat_capabilities_mask {
ICP_ACCEL_CAPABILITIES_AUTHENTICATION = BIT(3),
ICP_ACCEL_CAPABILITIES_RESERVED_1 = BIT(4),
ICP_ACCEL_CAPABILITIES_COMPRESSION = BIT(5),
- ICP_ACCEL_CAPABILITIES_LZS_COMPRESSION = BIT(6),
- ICP_ACCEL_CAPABILITIES_RAND = BIT(7),
+ /* Bits 6-7 are currently reserved */
ICP_ACCEL_CAPABILITIES_ZUC = BIT(8),
ICP_ACCEL_CAPABILITIES_SHA3 = BIT(9),
/* Bits 10-11 are currently reserved */
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c
index 538dcbfbcd26..3c4bba4a8779 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c
@@ -106,7 +106,6 @@ static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
default:
return -EFAULT;
}
- return -EFAULT;
}
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
diff --git a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
index 935a7e012946..4128200a9032 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
@@ -170,15 +170,14 @@ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
}
areq->dst_len = req->ctx.dh->p_size;
+ dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
+ DMA_FROM_DEVICE);
if (req->dst_align) {
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
areq->dst_len, 1);
kfree_sensitive(req->dst_align);
}
- dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
- DMA_FROM_DEVICE);
-
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
DMA_TO_DEVICE);
dma_unmap_single(dev, req->phy_out,
@@ -521,12 +520,14 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
- kfree_sensitive(req->src_align);
-
dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
DMA_TO_DEVICE);
+ kfree_sensitive(req->src_align);
+
areq->dst_len = req->ctx.rsa->key_sz;
+ dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
+ DMA_FROM_DEVICE);
if (req->dst_align) {
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
areq->dst_len, 1);
@@ -534,9 +535,6 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
kfree_sensitive(req->dst_align);
}
- dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
- DMA_FROM_DEVICE);
-
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
dma_unmap_single(dev, req->phy_out,
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index 3ba8ca20b3d7..ce837bcc1cab 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -1685,7 +1685,7 @@ static void qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle)
}
static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
- char *obj_name, char **obj_ptr,
+ const char *obj_name, char **obj_ptr,
unsigned int *obj_size)
{
struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr;
@@ -1837,8 +1837,8 @@ static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr)
static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_mof_file_hdr *mof_ptr,
- u32 mof_size, char *obj_name, char **obj_ptr,
- unsigned int *obj_size)
+ u32 mof_size, const char *obj_name,
+ char **obj_ptr, unsigned int *obj_size)
{
struct icp_qat_mof_chunkhdr *mof_chunkhdr;
unsigned int file_id = mof_ptr->file_id;
@@ -1888,7 +1888,7 @@ static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle,
}
int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
- void *addr_ptr, u32 mem_size, char *obj_name)
+ void *addr_ptr, u32 mem_size, const char *obj_name)
{
char *obj_addr;
u32 obj_size;
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
index e18860ab5c8e..1e748e8ce12d 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
@@ -16,6 +16,7 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
+#include <adf_dbgfs.h>
#include "adf_dh895xcc_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
@@ -65,8 +66,8 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
- debugfs_remove(accel_dev->debugfs_dir);
adf_devmgr_rm_dev(accel_dev, NULL);
}
@@ -75,7 +76,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_accel_dev *accel_dev;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
- char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
unsigned long bar_mask;
int ret;
@@ -140,12 +140,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
- /* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
- hw_data->dev_class->name, pci_name(pdev));
-
- accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
@@ -199,6 +193,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err_free_reg;
}
+ adf_dbgfs_init(accel_dev);
+
ret = adf_dev_up(accel_dev, true);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
index 96854a1cd87e..fefb85ceaeb9 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
@@ -16,6 +16,7 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
+#include <adf_dbgfs.h>
#include "adf_dh895xccvf_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
@@ -64,8 +65,8 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
- debugfs_remove(accel_dev->debugfs_dir);
pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
adf_devmgr_rm_dev(accel_dev, pf);
}
@@ -76,7 +77,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_accel_dev *pf;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
- char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
unsigned long bar_mask;
int ret;
@@ -123,12 +123,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
- /* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
- hw_data->dev_class->name, pci_name(pdev));
-
- accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
@@ -173,6 +167,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->vf.msg_received);
+ adf_dbgfs_init(accel_dev);
+
ret = adf_dev_up(accel_dev, false);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index c6f2fa753b7c..0f37dfd42d85 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -297,7 +297,7 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int len)
{
- struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct mv_cesa_des3_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
err = verify_skcipher_des3_key(cipher, key);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index 6019066a6451..46b778bbbee4 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -40,11 +40,26 @@ enum otx2_cpt_eng_type {
};
/* Take mbox id from end of CPT mbox range in AF (range 0xA00 - 0xBFF) */
+#define MBOX_MSG_RX_INLINE_IPSEC_LF_CFG 0xBFE
#define MBOX_MSG_GET_ENG_GRP_NUM 0xBFF
#define MBOX_MSG_GET_CAPS 0xBFD
#define MBOX_MSG_GET_KVF_LIMITS 0xBFC
/*
+ * Message request to config cpt lf for inline inbound ipsec.
+ * This message is only used between CPT PF <-> CPT VF
+ */
+struct otx2_cpt_rx_inline_lf_cfg {
+ struct mbox_msghdr hdr;
+ u16 sso_pf_func;
+ u16 param1;
+ u16 param2;
+ u16 opcode;
+ u32 credit;
+ u32 reserved;
+};
+
+/*
* Message request and response to get engine group number
* which has attached a given type of engines (SE, AE, IE)
* This messages are only used between CPT PF <=> CPT VF
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
index 115997475beb..273ee5352a50 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -141,6 +141,8 @@ int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs)
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.pcifunc = 0;
req->cptlfs = lfs->lfs_num;
+ req->cpt_blkaddr = lfs->blkaddr;
+ req->modify = 1;
ret = otx2_cpt_send_mbox_msg(mbox, lfs->pdev);
if (ret)
return ret;
@@ -168,6 +170,7 @@ int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs)
req->hdr.id = MBOX_MSG_DETACH_RESOURCES;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.pcifunc = 0;
+ req->cptlfs = 1;
ret = otx2_cpt_send_mbox_msg(mbox, lfs->pdev);
if (ret)
return ret;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
index 71e5f79431af..6edd27ff8c4e 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -13,10 +13,10 @@ static void cptlf_do_set_done_time_wait(struct otx2_cptlf_info *lf,
{
union otx2_cptx_lf_done_wait done_wait;
- done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
- OTX2_CPT_LF_DONE_WAIT);
+ done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
+ lf->slot, OTX2_CPT_LF_DONE_WAIT);
done_wait.s.time_wait = time_wait;
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_DONE_WAIT, done_wait.u);
}
@@ -24,10 +24,10 @@ static void cptlf_do_set_done_num_wait(struct otx2_cptlf_info *lf, int num_wait)
{
union otx2_cptx_lf_done_wait done_wait;
- done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
- OTX2_CPT_LF_DONE_WAIT);
+ done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
+ lf->slot, OTX2_CPT_LF_DONE_WAIT);
done_wait.s.num_wait = num_wait;
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_DONE_WAIT, done_wait.u);
}
@@ -147,7 +147,7 @@ static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable)
irq_misc.s.nwrp = 0x1;
for (slot = 0; slot < lfs->lfs_num; slot++)
- otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot, reg,
+ otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, reg,
irq_misc.u);
}
@@ -157,7 +157,7 @@ static void cptlf_enable_intrs(struct otx2_cptlfs_info *lfs)
/* Enable done interrupts */
for (slot = 0; slot < lfs->lfs_num; slot++)
- otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
+ otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot,
OTX2_CPT_LF_DONE_INT_ENA_W1S, 0x1);
/* Enable Misc interrupts */
cptlf_set_misc_intrs(lfs, true);
@@ -168,7 +168,7 @@ static void cptlf_disable_intrs(struct otx2_cptlfs_info *lfs)
int slot;
for (slot = 0; slot < lfs->lfs_num; slot++)
- otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
+ otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot,
OTX2_CPT_LF_DONE_INT_ENA_W1C, 0x1);
cptlf_set_misc_intrs(lfs, false);
}
@@ -177,7 +177,7 @@ static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf)
{
union otx2_cptx_lf_done irq_cnt;
- irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_DONE);
return irq_cnt.s.done;
}
@@ -189,8 +189,8 @@ static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg)
struct device *dev;
dev = &lf->lfs->pdev->dev;
- irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
- OTX2_CPT_LF_MISC_INT);
+ irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
+ lf->slot, OTX2_CPT_LF_MISC_INT);
irq_misc_ack.u = 0x0;
if (irq_misc.s.fault) {
@@ -222,7 +222,7 @@ static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg)
}
/* Acknowledge interrupts */
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_MISC_INT, irq_misc_ack.u);
return IRQ_HANDLED;
@@ -237,13 +237,13 @@ static irqreturn_t cptlf_done_intr_handler(int irq, void *arg)
/* Read the number of completed requests */
irq_cnt = cptlf_read_done_cnt(lf);
if (irq_cnt) {
- done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0,
+ done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
lf->slot, OTX2_CPT_LF_DONE_WAIT);
/* Acknowledge the number of completed requests */
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_DONE_ACK, irq_cnt);
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_DONE_WAIT, done_wait.u);
if (unlikely(!lf->wqe)) {
dev_err(&lf->lfs->pdev->dev, "No work for LF %d\n",
@@ -393,7 +393,7 @@ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
OTX2_CPT_LMT_LF_LMTLINEX(0));
lfs->lf[slot].ioreg = lfs->reg_base +
- OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_CPT0, slot,
+ OTX2_CPT_RVU_FUNC_ADDR_S(lfs->blkaddr, slot,
OTX2_CPT_LF_NQX(0));
}
/* Send request to attach LFs */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index 4fcaf61a70e3..5302fe3d0e6f 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -180,7 +180,7 @@ static inline void otx2_cptlf_set_iqueues_base_addr(
for (slot = 0; slot < lfs->lfs_num; slot++) {
lf_q_base.u = lfs->lf[slot].iqueue.dma_addr;
- otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
+ otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot,
OTX2_CPT_LF_Q_BASE, lf_q_base.u);
}
}
@@ -191,7 +191,7 @@ static inline void otx2_cptlf_do_set_iqueue_size(struct otx2_cptlf_info *lf)
lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40 +
OTX2_CPT_EXTRA_SIZE_DIV40;
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
OTX2_CPT_LF_Q_SIZE, lf_q_size.u);
}
@@ -207,15 +207,16 @@ static inline void otx2_cptlf_do_disable_iqueue(struct otx2_cptlf_info *lf)
{
union otx2_cptx_lf_ctl lf_ctl = { .u = 0x0 };
union otx2_cptx_lf_inprog lf_inprog;
+ u8 blkaddr = lf->lfs->blkaddr;
int timeout = 20;
/* Disable instructions enqueuing */
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,
OTX2_CPT_LF_CTL, lf_ctl.u);
/* Wait for instruction queue to become empty */
do {
- lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0,
+ lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr,
lf->slot, OTX2_CPT_LF_INPROG);
if (!lf_inprog.s.inflight)
break;
@@ -234,7 +235,7 @@ static inline void otx2_cptlf_do_disable_iqueue(struct otx2_cptlf_info *lf)
* the queue should be empty at this point
*/
lf_inprog.s.eena = 0x0;
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,
OTX2_CPT_LF_INPROG, lf_inprog.u);
}
@@ -249,14 +250,15 @@ static inline void otx2_cptlf_disable_iqueues(struct otx2_cptlfs_info *lfs)
static inline void otx2_cptlf_set_iqueue_enq(struct otx2_cptlf_info *lf,
bool enable)
{
+ u8 blkaddr = lf->lfs->blkaddr;
union otx2_cptx_lf_ctl lf_ctl;
- lf_ctl.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ lf_ctl.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot,
OTX2_CPT_LF_CTL);
/* Set iqueue's enqueuing */
lf_ctl.s.ena = enable ? 0x1 : 0x0;
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,
OTX2_CPT_LF_CTL, lf_ctl.u);
}
@@ -269,13 +271,14 @@ static inline void otx2_cptlf_set_iqueue_exec(struct otx2_cptlf_info *lf,
bool enable)
{
union otx2_cptx_lf_inprog lf_inprog;
+ u8 blkaddr = lf->lfs->blkaddr;
- lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot,
OTX2_CPT_LF_INPROG);
/* Set iqueue's execution */
lf_inprog.s.eena = enable ? 0x1 : 0x0;
- otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,
OTX2_CPT_LF_INPROG, lf_inprog.u);
}
@@ -364,6 +367,18 @@ static inline bool otx2_cptlf_started(struct otx2_cptlfs_info *lfs)
return atomic_read(&lfs->state) == OTX2_CPTLF_STARTED;
}
+static inline void otx2_cptlf_set_dev_info(struct otx2_cptlfs_info *lfs,
+ struct pci_dev *pdev,
+ void __iomem *reg_base,
+ struct otx2_mbox *mbox,
+ int blkaddr)
+{
+ lfs->pdev = pdev;
+ lfs->reg_base = reg_base;
+ lfs->mbox = mbox;
+ lfs->blkaddr = blkaddr;
+}
+
int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_msk, int pri,
int lfs_num);
void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
index 936174b012e8..a209ec5af381 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
@@ -31,6 +31,7 @@ struct otx2_cptpf_dev {
struct otx2_cptvf_info vf[OTX2_CPT_MAX_VFS_NUM];
struct otx2_cpt_eng_grps eng_grps;/* Engine groups information */
struct otx2_cptlfs_info lfs; /* CPT LFs attached to this PF */
+ struct otx2_cptlfs_info cpt1_lfs; /* CPT1 LFs attached to this PF */
/* HW capabilities for each engine type */
union otx2_cpt_eng_caps eng_caps[OTX2_CPT_MAX_ENG_TYPES];
bool is_eng_caps_discovered;
@@ -40,6 +41,9 @@ struct otx2_cptpf_dev {
struct work_struct afpf_mbox_work;
struct workqueue_struct *afpf_mbox_wq;
+ struct otx2_mbox afpf_mbox_up;
+ struct work_struct afpf_mbox_up_work;
+
/* VF <=> PF mbox */
struct otx2_mbox vfpf_mbox;
struct workqueue_struct *vfpf_mbox_wq;
@@ -52,8 +56,10 @@ struct otx2_cptpf_dev {
u8 pf_id; /* RVU PF number */
u8 max_vfs; /* Maximum number of VFs supported by CPT */
u8 enabled_vfs; /* Number of enabled VFs */
+ u8 sso_pf_func_ovrd; /* SSO PF_FUNC override bit */
u8 kvf_limits; /* Kernel crypto limits */
bool has_cpt1;
+ u8 rsrc_req_blkaddr;
/* Devlink */
struct devlink *dl;
@@ -61,6 +67,7 @@ struct otx2_cptpf_dev {
irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg);
void otx2_cptpf_afpf_mbox_handler(struct work_struct *work);
+void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work);
irqreturn_t otx2_cptpf_vfpf_mbox_intr(int irq, void *arg);
void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 30e6acfc93d9..e34223daa327 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -13,6 +13,8 @@
#define OTX2_CPT_DRV_NAME "rvu_cptpf"
#define OTX2_CPT_DRV_STRING "Marvell RVU CPT Physical Function Driver"
+#define CPT_UC_RID_CN9K_B0 1
+
static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
int num_vfs)
{
@@ -473,10 +475,19 @@ static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
if (err)
goto error;
+ err = otx2_mbox_init(&cptpf->afpf_mbox_up, cptpf->afpf_mbox_base,
+ pdev, cptpf->reg_base, MBOX_DIR_PFAF_UP, 1);
+ if (err)
+ goto mbox_cleanup;
+
INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
+ INIT_WORK(&cptpf->afpf_mbox_up_work, otx2_cptpf_afpf_mbox_up_handler);
mutex_init(&cptpf->lock);
+
return 0;
+mbox_cleanup:
+ otx2_mbox_destroy(&cptpf->afpf_mbox);
error:
destroy_workqueue(cptpf->afpf_mbox_wq);
return err;
@@ -486,6 +497,33 @@ static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
{
destroy_workqueue(cptpf->afpf_mbox_wq);
otx2_mbox_destroy(&cptpf->afpf_mbox);
+ otx2_mbox_destroy(&cptpf->afpf_mbox_up);
+}
+
+static ssize_t sso_pf_func_ovrd_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", cptpf->sso_pf_func_ovrd);
+}
+
+static ssize_t sso_pf_func_ovrd_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
+ u8 sso_pf_func_ovrd;
+
+ if (!(cptpf->pdev->revision == CPT_UC_RID_CN9K_B0))
+ return count;
+
+ if (kstrtou8(buf, 0, &sso_pf_func_ovrd))
+ return -EINVAL;
+
+ cptpf->sso_pf_func_ovrd = sso_pf_func_ovrd;
+
+ return count;
}
static ssize_t kvf_limits_show(struct device *dev,
@@ -518,8 +556,11 @@ static ssize_t kvf_limits_store(struct device *dev,
}
static DEVICE_ATTR_RW(kvf_limits);
+static DEVICE_ATTR_RW(sso_pf_func_ovrd);
+
static struct attribute *cptpf_attrs[] = {
&dev_attr_kvf_limits.attr,
+ &dev_attr_sso_pf_func_ovrd.attr,
NULL
};
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
index dee0aa60b698..480b3720f15a 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -5,6 +5,20 @@
#include "otx2_cptpf.h"
#include "rvu_reg.h"
+/* Fastpath ipsec opcode with inplace processing */
+#define CPT_INLINE_RX_OPCODE (0x26 | (1 << 6))
+#define CN10K_CPT_INLINE_RX_OPCODE (0x29 | (1 << 6))
+
+#define cpt_inline_rx_opcode(pdev) \
+({ \
+ u8 opcode; \
+ if (is_dev_otx2(pdev)) \
+ opcode = CPT_INLINE_RX_OPCODE; \
+ else \
+ opcode = CN10K_CPT_INLINE_RX_OPCODE; \
+ (opcode); \
+})
+
/*
* CPT PF driver version, It will be incremented by 1 for every feature
* addition in CPT mailbox messages.
@@ -112,6 +126,139 @@ static int handle_msg_kvf_limits(struct otx2_cptpf_dev *cptpf,
return 0;
}
+static int send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev *cptpf,
+ int sso_pf_func, u8 slot)
+{
+ struct cpt_inline_ipsec_cfg_msg *req;
+ struct pci_dev *pdev = cptpf->pdev;
+
+ req = (struct cpt_inline_ipsec_cfg_msg *)
+ otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0,
+ sizeof(*req), sizeof(struct msg_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ memset(req, 0, sizeof(*req));
+ req->hdr.id = MBOX_MSG_CPT_INLINE_IPSEC_CFG;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
+ req->dir = CPT_INLINE_INBOUND;
+ req->slot = slot;
+ req->sso_pf_func_ovrd = cptpf->sso_pf_func_ovrd;
+ req->sso_pf_func = sso_pf_func;
+ req->enable = 1;
+
+ return otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
+}
+
+static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp,
+ struct otx2_cpt_rx_inline_lf_cfg *req)
+{
+ struct nix_inline_ipsec_cfg *nix_req;
+ struct pci_dev *pdev = cptpf->pdev;
+ int ret;
+
+ nix_req = (struct nix_inline_ipsec_cfg *)
+ otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0,
+ sizeof(*nix_req),
+ sizeof(struct msg_rsp));
+ if (nix_req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ memset(nix_req, 0, sizeof(*nix_req));
+ nix_req->hdr.id = MBOX_MSG_NIX_INLINE_IPSEC_CFG;
+ nix_req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ nix_req->enable = 1;
+ if (!req->credit || req->credit > OTX2_CPT_INST_QLEN_MSGS)
+ nix_req->cpt_credit = OTX2_CPT_INST_QLEN_MSGS - 1;
+ else
+ nix_req->cpt_credit = req->credit - 1;
+ nix_req->gen_cfg.egrp = egrp;
+ if (req->opcode)
+ nix_req->gen_cfg.opcode = req->opcode;
+ else
+ nix_req->gen_cfg.opcode = cpt_inline_rx_opcode(pdev);
+ nix_req->gen_cfg.param1 = req->param1;
+ nix_req->gen_cfg.param2 = req->param2;
+ nix_req->inst_qsel.cpt_pf_func = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
+ nix_req->inst_qsel.cpt_slot = 0;
+ ret = otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
+ if (ret)
+ return ret;
+
+ if (cptpf->has_cpt1) {
+ ret = send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 1);
+ if (ret)
+ return ret;
+ }
+
+ return send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 0);
+}
+
+static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
+ struct mbox_msghdr *req)
+{
+ struct otx2_cpt_rx_inline_lf_cfg *cfg_req;
+ u8 egrp;
+ int ret;
+
+ cfg_req = (struct otx2_cpt_rx_inline_lf_cfg *)req;
+ if (cptpf->lfs.lfs_num) {
+ dev_err(&cptpf->pdev->dev,
+ "LF is already configured for RX inline ipsec.\n");
+ return -EEXIST;
+ }
+ /*
+ * Allow LFs to execute requests destined to only grp IE_TYPES and
+ * set queue priority of each LF to high
+ */
+ egrp = otx2_cpt_get_eng_grp(&cptpf->eng_grps, OTX2_CPT_IE_TYPES);
+ if (egrp == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
+ dev_err(&cptpf->pdev->dev,
+ "Engine group for inline ipsec is not available\n");
+ return -ENOENT;
+ }
+
+ otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
+ &cptpf->afpf_mbox, BLKADDR_CPT0);
+ ret = otx2_cptlf_init(&cptpf->lfs, 1 << egrp, OTX2_CPT_QUEUE_HI_PRIO,
+ 1);
+ if (ret) {
+ dev_err(&cptpf->pdev->dev,
+ "LF configuration failed for RX inline ipsec.\n");
+ return ret;
+ }
+
+ if (cptpf->has_cpt1) {
+ cptpf->rsrc_req_blkaddr = BLKADDR_CPT1;
+ otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
+ cptpf->reg_base, &cptpf->afpf_mbox,
+ BLKADDR_CPT1);
+ ret = otx2_cptlf_init(&cptpf->cpt1_lfs, 1 << egrp,
+ OTX2_CPT_QUEUE_HI_PRIO, 1);
+ if (ret) {
+ dev_err(&cptpf->pdev->dev,
+ "LF configuration failed for RX inline ipsec.\n");
+ goto lf_cleanup;
+ }
+ cptpf->rsrc_req_blkaddr = 0;
+ }
+
+ ret = rx_inline_ipsec_lf_cfg(cptpf, egrp, cfg_req);
+ if (ret)
+ goto lf1_cleanup;
+
+ return 0;
+
+lf1_cleanup:
+ otx2_cptlf_shutdown(&cptpf->cpt1_lfs);
+lf_cleanup:
+ otx2_cptlf_shutdown(&cptpf->lfs);
+ return ret;
+}
+
static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
struct otx2_cptvf_info *vf,
struct mbox_msghdr *req, int size)
@@ -132,6 +279,10 @@ static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
case MBOX_MSG_GET_KVF_LIMITS:
err = handle_msg_kvf_limits(cptpf, vf, req);
break;
+ case MBOX_MSG_RX_INLINE_IPSEC_LF_CFG:
+ err = handle_msg_rx_inline_ipsec_lf_cfg(cptpf, req);
+ break;
+
default:
err = forward_to_af(cptpf, vf, req, size);
break;
@@ -224,14 +375,28 @@ void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg)
{
struct otx2_cptpf_dev *cptpf = arg;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
u64 intr;
/* Read the interrupt bits */
intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT);
if (intr & 0x1ULL) {
- /* Schedule work queue function to process the MBOX request */
- queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work);
+ mbox = &cptpf->afpf_mbox;
+ mdev = &mbox->dev[0];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ /* Schedule work queue function to process the MBOX request */
+ queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work);
+
+ mbox = &cptpf->afpf_mbox_up;
+ mdev = &mbox->dev[0];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ /* Schedule work queue function to process the MBOX request */
+ queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_up_work);
/* Clear and ack the interrupt */
otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT,
0x1ULL);
@@ -242,6 +407,7 @@ irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg)
static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
struct mbox_msghdr *msg)
{
+ struct otx2_cptlfs_info *lfs = &cptpf->lfs;
struct device *dev = &cptpf->pdev->dev;
struct cpt_rd_wr_reg_msg *rsp_rd_wr;
@@ -254,6 +420,8 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
msg->sig, msg->id);
return;
}
+ if (cptpf->rsrc_req_blkaddr == BLKADDR_CPT1)
+ lfs = &cptpf->cpt1_lfs;
switch (msg->id) {
case MBOX_MSG_READY:
@@ -273,11 +441,14 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
break;
case MBOX_MSG_ATTACH_RESOURCES:
if (!msg->rc)
- cptpf->lfs.are_lfs_attached = 1;
+ lfs->are_lfs_attached = 1;
break;
case MBOX_MSG_DETACH_RESOURCES:
if (!msg->rc)
- cptpf->lfs.are_lfs_attached = 0;
+ lfs->are_lfs_attached = 0;
+ break;
+ case MBOX_MSG_CPT_INLINE_IPSEC_CFG:
+ case MBOX_MSG_NIX_INLINE_IPSEC_CFG:
break;
default:
@@ -367,3 +538,71 @@ void otx2_cptpf_afpf_mbox_handler(struct work_struct *work)
}
otx2_mbox_reset(afpf_mbox, 0);
}
+
+static void handle_msg_cpt_inst_lmtst(struct otx2_cptpf_dev *cptpf,
+ struct mbox_msghdr *msg)
+{
+ struct cpt_inst_lmtst_req *req = (struct cpt_inst_lmtst_req *)msg;
+ struct otx2_cptlfs_info *lfs = &cptpf->lfs;
+ struct msg_rsp *rsp;
+
+ if (cptpf->lfs.lfs_num)
+ lfs->ops->send_cmd((union otx2_cpt_inst_s *)req->inst, 1,
+ &lfs->lf[0]);
+
+ rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(&cptpf->afpf_mbox_up, 0,
+ sizeof(*rsp));
+ if (!rsp)
+ return;
+
+ rsp->hdr.id = msg->id;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = 0;
+ rsp->hdr.rc = 0;
+}
+
+static void process_afpf_mbox_up_msg(struct otx2_cptpf_dev *cptpf,
+ struct mbox_msghdr *msg)
+{
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(&cptpf->pdev->dev,
+ "MBOX msg with unknown ID %d\n", msg->id);
+ return;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_CPT_INST_LMTST:
+ handle_msg_cpt_inst_lmtst(cptpf, msg);
+ break;
+ default:
+ otx2_reply_invalid_msg(&cptpf->afpf_mbox_up, 0, 0, msg->id);
+ }
+}
+
+void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work)
+{
+ struct otx2_cptpf_dev *cptpf;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ int offset, i;
+
+ cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_up_work);
+ mbox = &cptpf->afpf_mbox_up;
+ mdev = &mbox->dev[0];
+ /* Sync mbox data into memory */
+ smp_wmb();
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+
+ process_afpf_mbox_up_msg(cptpf, msg);
+
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+ otx2_mbox_msg_send(mbox, 0);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 1577986677f6..1958b797a421 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -1504,11 +1504,9 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
if (ret)
goto delete_grps;
- lfs->pdev = pdev;
- lfs->reg_base = cptpf->reg_base;
- lfs->mbox = &cptpf->afpf_mbox;
- lfs->blkaddr = BLKADDR_CPT0;
- ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
+ otx2_cptlf_set_dev_info(lfs, cptpf->pdev, cptpf->reg_base,
+ &cptpf->afpf_mbox, BLKADDR_CPT0);
+ ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
OTX2_CPT_QUEUE_HI_PRIO, 1);
if (ret)
goto delete_grps;
@@ -1562,7 +1560,7 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
free_result:
kfree(result);
lf_cleanup:
- otx2_cptlf_shutdown(&cptpf->lfs);
+ otx2_cptlf_shutdown(lfs);
delete_grps:
delete_engine_grps(pdev, &cptpf->eng_grps);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
index 4207e2236903..994291e90da1 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
@@ -19,6 +19,7 @@ struct otx2_cptvf_dev {
struct otx2_mbox pfvf_mbox;
struct work_struct pfvf_mbox_work;
struct workqueue_struct *pfvf_mbox_wq;
+ int blkaddr;
void *bbuf_base;
unsigned long cap_flag;
};
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
index 6023a7adb70c..bac729c885f9 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -277,12 +277,11 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
if (ret)
return ret;
- lfs->reg_base = cptvf->reg_base;
- lfs->pdev = cptvf->pdev;
- lfs->mbox = &cptvf->pfvf_mbox;
-
lfs_num = cptvf->lfs.kvf_limits ? cptvf->lfs.kvf_limits :
num_online_cpus();
+
+ otx2_cptlf_set_dev_info(lfs, cptvf->pdev, cptvf->reg_base,
+ &cptvf->pfvf_mbox, cptvf->blkaddr);
ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO,
lfs_num);
if (ret)
@@ -380,6 +379,7 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
if (ret)
goto destroy_pfvf_mbox;
+ cptvf->blkaddr = BLKADDR_CPT0;
/* Initialize CPT LFs */
ret = cptvf_lf_init(cptvf);
if (ret)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 20d0dcd50344..4f6ca229ee5e 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/cpumask.h>
#include <linux/slab.h>
@@ -1795,11 +1796,9 @@ static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
struct spu_mdesc_info *ip,
const char *node_name)
{
- const unsigned int *reg;
- u64 node;
+ u64 node, reg;
- reg = of_get_property(dev->dev.of_node, "reg", NULL);
- if (!reg)
+ if (of_property_read_reg(dev->dev.of_node, 0, &reg, NULL) < 0)
return -ENODEV;
mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
@@ -1810,7 +1809,7 @@ static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
if (!name || strcmp(name, node_name))
continue;
chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
- if (!chdl || (*chdl != *reg))
+ if (!chdl || (*chdl != reg))
continue;
ip->cfg_handle = *chdl;
return get_irq_props(mdesc, node, ip);
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
index d00181a26dd6..483cef62acee 100644
--- a/drivers/crypto/nx/Makefile
+++ b/drivers/crypto/nx/Makefile
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_NX_ENCRYPT) += nx-crypto.o
nx-crypto-objs := nx.o \
- nx_debugfs.o \
nx-aes-cbc.o \
nx-aes-ecb.o \
nx-aes-gcm.o \
@@ -11,6 +10,7 @@ nx-crypto-objs := nx.o \
nx-sha256.o \
nx-sha512.o
+nx-crypto-$(CONFIG_DEBUG_FS) += nx_debugfs.o
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
nx-compress-objs := nx-842.o
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index c6233173c612..2697baebb6a3 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -170,8 +170,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
void nx_debugfs_init(struct nx_crypto_driver *);
void nx_debugfs_fini(struct nx_crypto_driver *);
#else
-#define NX_DEBUGFS_INIT(drv) (0)
-#define NX_DEBUGFS_FINI(drv) (0)
+#define NX_DEBUGFS_INIT(drv) do {} while (0)
+#define NX_DEBUGFS_FINI(drv) do {} while (0)
#endif
#define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL)
diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h
index 92bf97232a29..12c17a68d350 100644
--- a/drivers/crypto/sa2ul.h
+++ b/drivers/crypto/sa2ul.h
@@ -170,7 +170,7 @@ struct sa_tfm_ctx;
* the following range, so avoid using it.
*/
#define SA_UNSAFE_DATA_SZ_MIN 240
-#define SA_UNSAFE_DATA_SZ_MAX 256
+#define SA_UNSAFE_DATA_SZ_MAX 255
struct sa_match_data;
diff --git a/drivers/crypto/starfive/Kconfig b/drivers/crypto/starfive/Kconfig
new file mode 100644
index 000000000000..df745fcb09df
--- /dev/null
+++ b/drivers/crypto/starfive/Kconfig
@@ -0,0 +1,20 @@
+#
+# StarFive crypto drivers configuration
+#
+
+config CRYPTO_DEV_JH7110
+ tristate "StarFive JH7110 cryptographic engine driver"
+ depends on SOC_STARFIVE || AMBA_PL08X || COMPILE_TEST
+ depends on HAS_DMA
+ select CRYPTO_ENGINE
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_SM3_GENERIC
+ select CRYPTO_RSA
+ help
+ Support for StarFive JH7110 crypto hardware acceleration engine.
+ This module provides acceleration for public key algo,
+ skciphers, AEAD and hash functions.
+
+ If you choose 'M' here, this module will be called jh7110-crypto.
diff --git a/drivers/crypto/starfive/Makefile b/drivers/crypto/starfive/Makefile
new file mode 100644
index 000000000000..98b01d2f1ccf
--- /dev/null
+++ b/drivers/crypto/starfive/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CRYPTO_DEV_JH7110) += jh7110-crypto.o
+jh7110-crypto-objs := jh7110-cryp.o jh7110-hash.o jh7110-rsa.o
diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c
new file mode 100644
index 000000000000..cc43556b6c80
--- /dev/null
+++ b/drivers/crypto/starfive/jh7110-cryp.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cryptographic API.
+ *
+ * Support for StarFive hardware cryptographic engine.
+ * Copyright (c) 2022 StarFive Technology
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "jh7110-cryp.h"
+
+#define DRIVER_NAME "jh7110-crypto"
+
+struct starfive_dev_list {
+ struct list_head dev_list;
+ spinlock_t lock; /* protect dev_list */
+};
+
+static struct starfive_dev_list dev_list = {
+ .dev_list = LIST_HEAD_INIT(dev_list.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(dev_list.lock),
+};
+
+struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = NULL, *tmp;
+
+ spin_lock_bh(&dev_list.lock);
+ if (!ctx->cryp) {
+ list_for_each_entry(tmp, &dev_list.dev_list, list) {
+ cryp = tmp;
+ break;
+ }
+ ctx->cryp = cryp;
+ } else {
+ cryp = ctx->cryp;
+ }
+
+ spin_unlock_bh(&dev_list.lock);
+
+ return cryp;
+}
+
+static int starfive_dma_init(struct starfive_cryp_dev *cryp)
+{
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ cryp->tx = dma_request_chan(cryp->dev, "tx");
+ if (IS_ERR(cryp->tx))
+ return dev_err_probe(cryp->dev, PTR_ERR(cryp->tx),
+ "Error requesting tx dma channel.\n");
+
+ cryp->rx = dma_request_chan(cryp->dev, "rx");
+ if (IS_ERR(cryp->rx)) {
+ dma_release_channel(cryp->tx);
+ return dev_err_probe(cryp->dev, PTR_ERR(cryp->rx),
+ "Error requesting rx dma channel.\n");
+ }
+
+ return 0;
+}
+
+static void starfive_dma_cleanup(struct starfive_cryp_dev *cryp)
+{
+ dma_release_channel(cryp->tx);
+ dma_release_channel(cryp->rx);
+}
+
+static irqreturn_t starfive_cryp_irq(int irq, void *priv)
+{
+ u32 status;
+ struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)priv;
+
+ status = readl(cryp->base + STARFIVE_IE_FLAG_OFFSET);
+ if (status & STARFIVE_IE_FLAG_HASH_DONE) {
+ status = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
+ status |= STARFIVE_IE_MASK_HASH_DONE;
+ writel(status, cryp->base + STARFIVE_IE_MASK_OFFSET);
+ tasklet_schedule(&cryp->hash_done);
+ }
+
+ if (status & STARFIVE_IE_FLAG_PKA_DONE) {
+ status = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
+ status |= STARFIVE_IE_MASK_PKA_DONE;
+ writel(status, cryp->base + STARFIVE_IE_MASK_OFFSET);
+ complete(&cryp->pka_done);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int starfive_cryp_probe(struct platform_device *pdev)
+{
+ struct starfive_cryp_dev *cryp;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL);
+ if (!cryp)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, cryp);
+ cryp->dev = &pdev->dev;
+
+ cryp->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(cryp->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(cryp->base),
+ "Error remapping memory for platform device\n");
+
+ tasklet_init(&cryp->hash_done, starfive_hash_done_task, (unsigned long)cryp);
+
+ cryp->phys_base = res->start;
+ cryp->dma_maxburst = 32;
+
+ cryp->hclk = devm_clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(cryp->hclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(cryp->hclk),
+ "Error getting hardware reference clock\n");
+
+ cryp->ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(cryp->ahb))
+ return dev_err_probe(&pdev->dev, PTR_ERR(cryp->ahb),
+ "Error getting ahb reference clock\n");
+
+ cryp->rst = devm_reset_control_get_shared(cryp->dev, NULL);
+ if (IS_ERR(cryp->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(cryp->rst),
+ "Error getting hardware reset line\n");
+
+ init_completion(&cryp->pka_done);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, starfive_cryp_irq, 0, pdev->name,
+ (void *)cryp);
+ if (ret)
+ return dev_err_probe(&pdev->dev, irq,
+ "Failed to register interrupt handler\n");
+
+ clk_prepare_enable(cryp->hclk);
+ clk_prepare_enable(cryp->ahb);
+ reset_control_deassert(cryp->rst);
+
+ spin_lock(&dev_list.lock);
+ list_add(&cryp->list, &dev_list.dev_list);
+ spin_unlock(&dev_list.lock);
+
+ ret = starfive_dma_init(cryp);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ goto err_probe_defer;
+ else
+ goto err_dma_init;
+ }
+
+ /* Initialize crypto engine */
+ cryp->engine = crypto_engine_alloc_init(&pdev->dev, 1);
+ if (!cryp->engine) {
+ ret = -ENOMEM;
+ goto err_engine;
+ }
+
+ ret = crypto_engine_start(cryp->engine);
+ if (ret)
+ goto err_engine_start;
+
+ ret = starfive_hash_register_algs();
+ if (ret)
+ goto err_algs_hash;
+
+ ret = starfive_rsa_register_algs();
+ if (ret)
+ goto err_algs_rsa;
+
+ return 0;
+
+err_algs_rsa:
+ starfive_hash_unregister_algs();
+err_algs_hash:
+ crypto_engine_stop(cryp->engine);
+err_engine_start:
+ crypto_engine_exit(cryp->engine);
+err_engine:
+ starfive_dma_cleanup(cryp);
+err_dma_init:
+ spin_lock(&dev_list.lock);
+ list_del(&cryp->list);
+ spin_unlock(&dev_list.lock);
+
+ clk_disable_unprepare(cryp->hclk);
+ clk_disable_unprepare(cryp->ahb);
+ reset_control_assert(cryp->rst);
+
+ tasklet_kill(&cryp->hash_done);
+err_probe_defer:
+ return ret;
+}
+
+static int starfive_cryp_remove(struct platform_device *pdev)
+{
+ struct starfive_cryp_dev *cryp = platform_get_drvdata(pdev);
+
+ starfive_hash_unregister_algs();
+ starfive_rsa_unregister_algs();
+
+ tasklet_kill(&cryp->hash_done);
+
+ crypto_engine_stop(cryp->engine);
+ crypto_engine_exit(cryp->engine);
+
+ starfive_dma_cleanup(cryp);
+
+ spin_lock(&dev_list.lock);
+ list_del(&cryp->list);
+ spin_unlock(&dev_list.lock);
+
+ clk_disable_unprepare(cryp->hclk);
+ clk_disable_unprepare(cryp->ahb);
+ reset_control_assert(cryp->rst);
+
+ return 0;
+}
+
+static const struct of_device_id starfive_dt_ids[] __maybe_unused = {
+ { .compatible = "starfive,jh7110-crypto", .data = NULL},
+ {},
+};
+MODULE_DEVICE_TABLE(of, starfive_dt_ids);
+
+static struct platform_driver starfive_cryp_driver = {
+ .probe = starfive_cryp_probe,
+ .remove = starfive_cryp_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = starfive_dt_ids,
+ },
+};
+
+module_platform_driver(starfive_cryp_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("StarFive JH7110 Cryptographic Module");
diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h
new file mode 100644
index 000000000000..0cdcffc0d7d4
--- /dev/null
+++ b/drivers/crypto/starfive/jh7110-cryp.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __STARFIVE_STR_H__
+#define __STARFIVE_STR_H__
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+
+#include <crypto/engine.h>
+#include <crypto/sha2.h>
+#include <crypto/sm3.h>
+
+#define STARFIVE_ALG_CR_OFFSET 0x0
+#define STARFIVE_ALG_FIFO_OFFSET 0x4
+#define STARFIVE_IE_MASK_OFFSET 0x8
+#define STARFIVE_IE_FLAG_OFFSET 0xc
+#define STARFIVE_DMA_IN_LEN_OFFSET 0x10
+#define STARFIVE_DMA_OUT_LEN_OFFSET 0x14
+
+#define STARFIVE_IE_MASK_HASH_DONE 0x4
+#define STARFIVE_IE_MASK_PKA_DONE 0x8
+#define STARFIVE_IE_FLAG_HASH_DONE 0x4
+#define STARFIVE_IE_FLAG_PKA_DONE 0x8
+
+#define STARFIVE_MSG_BUFFER_SIZE SZ_16K
+#define MAX_KEY_SIZE SHA512_BLOCK_SIZE
+
+union starfive_hash_csr {
+ u32 v;
+ struct {
+ u32 start :1;
+ u32 reset :1;
+ u32 ie :1;
+ u32 firstb :1;
+#define STARFIVE_HASH_SM3 0x0
+#define STARFIVE_HASH_SHA224 0x3
+#define STARFIVE_HASH_SHA256 0x4
+#define STARFIVE_HASH_SHA384 0x5
+#define STARFIVE_HASH_SHA512 0x6
+#define STARFIVE_HASH_MODE_MASK 0x7
+ u32 mode :3;
+ u32 rsvd_1 :1;
+ u32 final :1;
+ u32 rsvd_2 :2;
+#define STARFIVE_HASH_HMAC_FLAGS 0x800
+ u32 hmac :1;
+ u32 rsvd_3 :1;
+#define STARFIVE_HASH_KEY_DONE BIT(13)
+ u32 key_done :1;
+ u32 key_flag :1;
+ u32 hmac_done :1;
+#define STARFIVE_HASH_BUSY BIT(16)
+ u32 busy :1;
+ u32 hashdone :1;
+ u32 rsvd_4 :14;
+ };
+};
+
+union starfive_pka_cacr {
+ u32 v;
+ struct {
+ u32 start :1;
+ u32 reset :1;
+ u32 ie :1;
+ u32 rsvd_0 :1;
+ u32 fifo_mode :1;
+ u32 not_r2 :1;
+ u32 ecc_sub :1;
+ u32 pre_expf :1;
+ u32 cmd :4;
+ u32 rsvd_1 :1;
+ u32 ctrl_dummy :1;
+ u32 ctrl_false :1;
+ u32 cln_done :1;
+ u32 opsize :6;
+ u32 rsvd_2 :2;
+ u32 exposize :6;
+ u32 rsvd_3 :1;
+ u32 bigendian :1;
+ };
+};
+
+struct starfive_rsa_key {
+ u8 *n;
+ u8 *e;
+ u8 *d;
+ int e_bitlen;
+ int d_bitlen;
+ int bitlen;
+ size_t key_sz;
+};
+
+union starfive_alg_cr {
+ u32 v;
+ struct {
+ u32 start :1;
+ u32 aes_dma_en :1;
+ u32 rsvd_0 :1;
+ u32 hash_dma_en :1;
+ u32 alg_done :1;
+ u32 rsvd_1 :3;
+ u32 clear :1;
+ u32 rsvd_2 :23;
+ };
+};
+
+struct starfive_cryp_ctx {
+ struct crypto_engine_ctx enginectx;
+ struct starfive_cryp_dev *cryp;
+ struct starfive_cryp_request_ctx *rctx;
+
+ unsigned int hash_mode;
+ u8 key[MAX_KEY_SIZE];
+ int keylen;
+ bool is_hmac;
+ struct starfive_rsa_key rsa_key;
+ struct crypto_akcipher *akcipher_fbk;
+ struct crypto_ahash *ahash_fbk;
+};
+
+struct starfive_cryp_dev {
+ struct list_head list;
+ struct device *dev;
+ struct clk *hclk;
+ struct clk *ahb;
+ struct reset_control *rst;
+
+ void __iomem *base;
+ phys_addr_t phys_base;
+
+ u32 dma_maxburst;
+ struct dma_chan *tx;
+ struct dma_chan *rx;
+ struct dma_slave_config cfg_in;
+ struct dma_slave_config cfg_out;
+ struct crypto_engine *engine;
+ struct tasklet_struct hash_done;
+ struct completion pka_done;
+ int err;
+ union starfive_alg_cr alg_cr;
+ union {
+ struct ahash_request *hreq;
+ } req;
+};
+
+struct starfive_cryp_request_ctx {
+ union {
+ union starfive_hash_csr hash;
+ union starfive_pka_cacr pka;
+ } csr;
+
+ struct scatterlist *in_sg;
+ struct scatterlist *out_sg;
+ struct ahash_request ahash_fbk_req;
+ size_t total;
+ size_t nents;
+ unsigned int blksize;
+ unsigned int digsize;
+ unsigned long in_sg_len;
+ u8 rsa_data[] __aligned(sizeof(u32));
+};
+
+struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx);
+
+int starfive_hash_register_algs(void);
+void starfive_hash_unregister_algs(void);
+
+int starfive_rsa_register_algs(void);
+void starfive_rsa_unregister_algs(void);
+
+void starfive_hash_done_task(unsigned long param);
+#endif
diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c
new file mode 100644
index 000000000000..5064150b8a1c
--- /dev/null
+++ b/drivers/crypto/starfive/jh7110-hash.c
@@ -0,0 +1,899 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hash function and HMAC support for StarFive driver
+ *
+ * Copyright (c) 2022 StarFive Technology
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/dma-direct.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/amba/pl080.h>
+
+#include <crypto/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+
+#include "jh7110-cryp.h"
+
+#define STARFIVE_HASH_REGS_OFFSET 0x300
+#define STARFIVE_HASH_SHACSR (STARFIVE_HASH_REGS_OFFSET + 0x0)
+#define STARFIVE_HASH_SHAWDR (STARFIVE_HASH_REGS_OFFSET + 0x4)
+#define STARFIVE_HASH_SHARDR (STARFIVE_HASH_REGS_OFFSET + 0x8)
+#define STARFIVE_HASH_SHAWSR (STARFIVE_HASH_REGS_OFFSET + 0xC)
+#define STARFIVE_HASH_SHAWLEN3 (STARFIVE_HASH_REGS_OFFSET + 0x10)
+#define STARFIVE_HASH_SHAWLEN2 (STARFIVE_HASH_REGS_OFFSET + 0x14)
+#define STARFIVE_HASH_SHAWLEN1 (STARFIVE_HASH_REGS_OFFSET + 0x18)
+#define STARFIVE_HASH_SHAWLEN0 (STARFIVE_HASH_REGS_OFFSET + 0x1C)
+#define STARFIVE_HASH_SHAWKR (STARFIVE_HASH_REGS_OFFSET + 0x20)
+#define STARFIVE_HASH_SHAWKLEN (STARFIVE_HASH_REGS_OFFSET + 0x24)
+
+#define STARFIVE_HASH_BUFLEN SHA512_BLOCK_SIZE
+#define STARFIVE_HASH_RESET 0x2
+
+static inline int starfive_hash_wait_busy(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->base + STARFIVE_HASH_SHACSR, status,
+ !(status & STARFIVE_HASH_BUSY), 10, 100000);
+}
+
+static inline int starfive_hash_wait_key_done(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->base + STARFIVE_HASH_SHACSR, status,
+ (status & STARFIVE_HASH_KEY_DONE), 10, 100000);
+}
+
+static int starfive_hash_hmac_key(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ int klen = ctx->keylen, loop;
+ unsigned int *key = (unsigned int *)ctx->key;
+ unsigned char *cl;
+
+ writel(ctx->keylen, cryp->base + STARFIVE_HASH_SHAWKLEN);
+
+ rctx->csr.hash.hmac = 1;
+ rctx->csr.hash.key_flag = 1;
+
+ writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR);
+
+ for (loop = 0; loop < klen / sizeof(unsigned int); loop++, key++)
+ writel(*key, cryp->base + STARFIVE_HASH_SHAWKR);
+
+ if (klen & 0x3) {
+ cl = (unsigned char *)key;
+ for (loop = 0; loop < (klen & 0x3); loop++, cl++)
+ writeb(*cl, cryp->base + STARFIVE_HASH_SHAWKR);
+ }
+
+ if (starfive_hash_wait_key_done(ctx))
+ return dev_err_probe(cryp->dev, -ETIMEDOUT, "starfive_hash_wait_key_done error\n");
+
+ return 0;
+}
+
+static void starfive_hash_start(void *param)
+{
+ struct starfive_cryp_ctx *ctx = param;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ union starfive_alg_cr alg_cr;
+ union starfive_hash_csr csr;
+ u32 stat;
+
+ dma_unmap_sg(cryp->dev, rctx->in_sg, rctx->in_sg_len, DMA_TO_DEVICE);
+
+ alg_cr.v = 0;
+ alg_cr.clear = 1;
+
+ writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
+
+ csr.v = readl(cryp->base + STARFIVE_HASH_SHACSR);
+ csr.firstb = 0;
+ csr.final = 1;
+
+ stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
+ stat &= ~STARFIVE_IE_MASK_HASH_DONE;
+ writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+ writel(csr.v, cryp->base + STARFIVE_HASH_SHACSR);
+}
+
+static int starfive_hash_xmit_dma(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct dma_async_tx_descriptor *in_desc;
+ union starfive_alg_cr alg_cr;
+ int total_len;
+ int ret;
+
+ if (!rctx->total) {
+ starfive_hash_start(ctx);
+ return 0;
+ }
+
+ writel(rctx->total, cryp->base + STARFIVE_DMA_IN_LEN_OFFSET);
+
+ total_len = rctx->total;
+ total_len = (total_len & 0x3) ? (((total_len >> 2) + 1) << 2) : total_len;
+ sg_dma_len(rctx->in_sg) = total_len;
+
+ alg_cr.v = 0;
+ alg_cr.start = 1;
+ alg_cr.hash_dma_en = 1;
+
+ writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
+
+ ret = dma_map_sg(cryp->dev, rctx->in_sg, rctx->in_sg_len, DMA_TO_DEVICE);
+ if (!ret)
+ return dev_err_probe(cryp->dev, -EINVAL, "dma_map_sg() error\n");
+
+ cryp->cfg_in.direction = DMA_MEM_TO_DEV;
+ cryp->cfg_in.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cryp->cfg_in.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cryp->cfg_in.src_maxburst = cryp->dma_maxburst;
+ cryp->cfg_in.dst_maxburst = cryp->dma_maxburst;
+ cryp->cfg_in.dst_addr = cryp->phys_base + STARFIVE_ALG_FIFO_OFFSET;
+
+ dmaengine_slave_config(cryp->tx, &cryp->cfg_in);
+
+ in_desc = dmaengine_prep_slave_sg(cryp->tx, rctx->in_sg,
+ ret, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (!in_desc)
+ return -EINVAL;
+
+ in_desc->callback = starfive_hash_start;
+ in_desc->callback_param = ctx;
+
+ dmaengine_submit(in_desc);
+ dma_async_issue_pending(cryp->tx);
+
+ return 0;
+}
+
+static int starfive_hash_xmit(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ int ret = 0;
+
+ rctx->csr.hash.v = 0;
+ rctx->csr.hash.reset = 1;
+ writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR);
+
+ if (starfive_hash_wait_busy(ctx))
+ return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error resetting engine.\n");
+
+ rctx->csr.hash.v = 0;
+ rctx->csr.hash.mode = ctx->hash_mode;
+ rctx->csr.hash.ie = 1;
+
+ if (ctx->is_hmac) {
+ ret = starfive_hash_hmac_key(ctx);
+ if (ret)
+ return ret;
+ } else {
+ rctx->csr.hash.start = 1;
+ rctx->csr.hash.firstb = 1;
+ writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR);
+ }
+
+ return starfive_hash_xmit_dma(ctx);
+}
+
+static int starfive_hash_copy_hash(struct ahash_request *req)
+{
+ struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ int count, *data;
+ int mlen;
+
+ if (!req->result)
+ return 0;
+
+ mlen = rctx->digsize / sizeof(u32);
+ data = (u32 *)req->result;
+
+ for (count = 0; count < mlen; count++)
+ data[count] = readl(ctx->cryp->base + STARFIVE_HASH_SHARDR);
+
+ return 0;
+}
+
+void starfive_hash_done_task(unsigned long param)
+{
+ struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)param;
+ int err = cryp->err;
+
+ if (!err)
+ err = starfive_hash_copy_hash(cryp->req.hreq);
+
+ /* Reset to clear hash_done in irq register*/
+ writel(STARFIVE_HASH_RESET, cryp->base + STARFIVE_HASH_SHACSR);
+
+ crypto_finalize_hash_request(cryp->engine, cryp->req.hreq, err);
+}
+
+static int starfive_hash_check_aligned(struct scatterlist *sg, size_t total, size_t align)
+{
+ int len = 0;
+
+ if (!total)
+ return 0;
+
+ if (!IS_ALIGNED(total, align))
+ return -EINVAL;
+
+ while (sg) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+ return -EINVAL;
+
+ if (!IS_ALIGNED(sg->length, align))
+ return -EINVAL;
+
+ len += sg->length;
+ sg = sg_next(sg);
+ }
+
+ if (len != total)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int starfive_hash_one_request(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+
+ if (!cryp)
+ return -ENODEV;
+
+ return starfive_hash_xmit(ctx);
+}
+
+static int starfive_hash_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk);
+ ahash_request_set_callback(&rctx->ahash_fbk_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+
+ ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src,
+ req->result, req->nbytes);
+
+ return crypto_ahash_init(&rctx->ahash_fbk_req);
+}
+
+static int starfive_hash_update(struct ahash_request *req)
+{
+ struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk);
+ ahash_request_set_callback(&rctx->ahash_fbk_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+
+ ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src,
+ req->result, req->nbytes);
+
+ return crypto_ahash_update(&rctx->ahash_fbk_req);
+}
+
+static int starfive_hash_final(struct ahash_request *req)
+{
+ struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk);
+ ahash_request_set_callback(&rctx->ahash_fbk_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+
+ ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src,
+ req->result, req->nbytes);
+
+ return crypto_ahash_final(&rctx->ahash_fbk_req);
+}
+
+static int starfive_hash_finup(struct ahash_request *req)
+{
+ struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk);
+ ahash_request_set_callback(&rctx->ahash_fbk_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+
+ ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src,
+ req->result, req->nbytes);
+
+ return crypto_ahash_finup(&rctx->ahash_fbk_req);
+}
+
+static int starfive_hash_digest_fb(struct ahash_request *req)
+{
+ struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk);
+ ahash_request_set_callback(&rctx->ahash_fbk_req, req->base.flags,
+ req->base.complete, req->base.data);
+
+ ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src,
+ req->result, req->nbytes);
+
+ return crypto_ahash_digest(&rctx->ahash_fbk_req);
+}
+
+static int starfive_hash_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+
+ memset(rctx, 0, sizeof(struct starfive_cryp_request_ctx));
+
+ cryp->req.hreq = req;
+ rctx->total = req->nbytes;
+ rctx->in_sg = req->src;
+ rctx->blksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ rctx->digsize = crypto_ahash_digestsize(tfm);
+ rctx->in_sg_len = sg_nents_for_len(rctx->in_sg, rctx->total);
+ ctx->rctx = rctx;
+
+ if (starfive_hash_check_aligned(rctx->in_sg, rctx->total, rctx->blksize))
+ return starfive_hash_digest_fb(req);
+
+ return crypto_transfer_hash_request_to_engine(cryp->engine, req);
+}
+
+static int starfive_hash_export(struct ahash_request *req, void *out)
+{
+ struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk);
+ ahash_request_set_callback(&rctx->ahash_fbk_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+
+ return crypto_ahash_export(&rctx->ahash_fbk_req, out);
+}
+
+static int starfive_hash_import(struct ahash_request *req, const void *in)
+{
+ struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk);
+ ahash_request_set_callback(&rctx->ahash_fbk_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+
+ return crypto_ahash_import(&rctx->ahash_fbk_req, in);
+}
+
+static int starfive_hash_init_tfm(struct crypto_ahash *hash,
+ const char *alg_name,
+ unsigned int mode)
+{
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
+
+ ctx->cryp = starfive_cryp_find_dev(ctx);
+
+ if (!ctx->cryp)
+ return -ENODEV;
+
+ ctx->ahash_fbk = crypto_alloc_ahash(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(ctx->ahash_fbk))
+ return dev_err_probe(ctx->cryp->dev, PTR_ERR(ctx->ahash_fbk),
+ "starfive_hash: Could not load fallback driver.\n");
+
+ crypto_ahash_set_statesize(hash, crypto_ahash_statesize(ctx->ahash_fbk));
+ crypto_ahash_set_reqsize(hash, sizeof(struct starfive_cryp_request_ctx) +
+ crypto_ahash_reqsize(ctx->ahash_fbk));
+
+ ctx->keylen = 0;
+ ctx->hash_mode = mode;
+
+ ctx->enginectx.op.do_one_request = starfive_hash_one_request;
+ ctx->enginectx.op.prepare_request = NULL;
+ ctx->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+static void starfive_hash_exit_tfm(struct crypto_ahash *hash)
+{
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
+
+ crypto_free_ahash(ctx->ahash_fbk);
+
+ ctx->ahash_fbk = NULL;
+ ctx->enginectx.op.do_one_request = NULL;
+ ctx->enginectx.op.prepare_request = NULL;
+ ctx->enginectx.op.unprepare_request = NULL;
+}
+
+static int starfive_hash_long_setkey(struct starfive_cryp_ctx *ctx,
+ const u8 *key, unsigned int keylen,
+ const char *alg_name)
+{
+ struct crypto_wait wait;
+ struct ahash_request *req;
+ struct scatterlist sg;
+ struct crypto_ahash *ahash_tfm;
+ u8 *buf;
+ int ret;
+
+ ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
+ if (IS_ERR(ahash_tfm))
+ return PTR_ERR(ahash_tfm);
+
+ req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto err_free_ahash;
+ }
+
+ crypto_init_wait(&wait);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+ crypto_ahash_clear_flags(ahash_tfm, ~0);
+
+ buf = kzalloc(keylen + STARFIVE_HASH_BUFLEN, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_free_req;
+ }
+
+ memcpy(buf, key, keylen);
+ sg_init_one(&sg, buf, keylen);
+ ahash_request_set_crypt(req, &sg, ctx->key, keylen);
+
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
+
+ kfree(buf);
+err_free_req:
+ ahash_request_free(req);
+err_free_ahash:
+ crypto_free_ahash(ahash_tfm);
+ return ret;
+}
+
+static int starfive_hash_setkey(struct crypto_ahash *hash,
+ const u8 *key, unsigned int keylen)
+{
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
+ unsigned int digestsize = crypto_ahash_digestsize(hash);
+ unsigned int blocksize = crypto_ahash_blocksize(hash);
+ const char *alg_name;
+
+ crypto_ahash_setkey(ctx->ahash_fbk, key, keylen);
+
+ if (keylen <= blocksize) {
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+ return 0;
+ }
+
+ ctx->keylen = digestsize;
+
+ switch (digestsize) {
+ case SHA224_DIGEST_SIZE:
+ alg_name = "sha224-starfive";
+ break;
+ case SHA256_DIGEST_SIZE:
+ if (ctx->hash_mode == STARFIVE_HASH_SM3)
+ alg_name = "sm3-starfive";
+ else
+ alg_name = "sha256-starfive";
+ break;
+ case SHA384_DIGEST_SIZE:
+ alg_name = "sha384-starfive";
+ break;
+ case SHA512_DIGEST_SIZE:
+ alg_name = "sha512-starfive";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return starfive_hash_long_setkey(ctx, key, keylen, alg_name);
+}
+
+static int starfive_sha224_init_tfm(struct crypto_ahash *hash)
+{
+ return starfive_hash_init_tfm(hash, "sha224-generic",
+ STARFIVE_HASH_SHA224);
+}
+
+static int starfive_sha256_init_tfm(struct crypto_ahash *hash)
+{
+ return starfive_hash_init_tfm(hash, "sha256-generic",
+ STARFIVE_HASH_SHA256);
+}
+
+static int starfive_sha384_init_tfm(struct crypto_ahash *hash)
+{
+ return starfive_hash_init_tfm(hash, "sha384-generic",
+ STARFIVE_HASH_SHA384);
+}
+
+static int starfive_sha512_init_tfm(struct crypto_ahash *hash)
+{
+ return starfive_hash_init_tfm(hash, "sha512-generic",
+ STARFIVE_HASH_SHA512);
+}
+
+static int starfive_sm3_init_tfm(struct crypto_ahash *hash)
+{
+ return starfive_hash_init_tfm(hash, "sm3-generic",
+ STARFIVE_HASH_SM3);
+}
+
+static int starfive_hmac_sha224_init_tfm(struct crypto_ahash *hash)
+{
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
+
+ ctx->is_hmac = true;
+
+ return starfive_hash_init_tfm(hash, "hmac(sha224-generic)",
+ STARFIVE_HASH_SHA224);
+}
+
+static int starfive_hmac_sha256_init_tfm(struct crypto_ahash *hash)
+{
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
+
+ ctx->is_hmac = true;
+
+ return starfive_hash_init_tfm(hash, "hmac(sha256-generic)",
+ STARFIVE_HASH_SHA256);
+}
+
+static int starfive_hmac_sha384_init_tfm(struct crypto_ahash *hash)
+{
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
+
+ ctx->is_hmac = true;
+
+ return starfive_hash_init_tfm(hash, "hmac(sha384-generic)",
+ STARFIVE_HASH_SHA384);
+}
+
+static int starfive_hmac_sha512_init_tfm(struct crypto_ahash *hash)
+{
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
+
+ ctx->is_hmac = true;
+
+ return starfive_hash_init_tfm(hash, "hmac(sha512-generic)",
+ STARFIVE_HASH_SHA512);
+}
+
+static int starfive_hmac_sm3_init_tfm(struct crypto_ahash *hash)
+{
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
+
+ ctx->is_hmac = true;
+
+ return starfive_hash_init_tfm(hash, "hmac(sm3-generic)",
+ STARFIVE_HASH_SM3);
+}
+
+static struct ahash_alg algs_sha2_sm3[] = {
+{
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_sha224_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+}, {
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_hmac_sha224_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .setkey = starfive_hash_setkey,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "sha224-hmac-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+}, {
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_sha256_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+}, {
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_hmac_sha256_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .setkey = starfive_hash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "sha256-hmac-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+}, {
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_sha384_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "sha384-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+}, {
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_hmac_sha384_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .setkey = starfive_hash_setkey,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "sha384-hmac-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+}, {
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_sha512_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+}, {
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_hmac_sha512_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .setkey = starfive_hash_setkey,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "sha512-hmac-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+}, {
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_sm3_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = sizeof(struct sm3_state),
+ .base = {
+ .cra_name = "sm3",
+ .cra_driver_name = "sm3-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+}, {
+ .init = starfive_hash_init,
+ .update = starfive_hash_update,
+ .final = starfive_hash_final,
+ .finup = starfive_hash_finup,
+ .digest = starfive_hash_digest,
+ .export = starfive_hash_export,
+ .import = starfive_hash_import,
+ .init_tfm = starfive_hmac_sm3_init_tfm,
+ .exit_tfm = starfive_hash_exit_tfm,
+ .setkey = starfive_hash_setkey,
+ .halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = sizeof(struct sm3_state),
+ .base = {
+ .cra_name = "hmac(sm3)",
+ .cra_driver_name = "sm3-hmac-starfive",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ }
+ }
+},
+};
+
+int starfive_hash_register_algs(void)
+{
+ return crypto_register_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3));
+}
+
+void starfive_hash_unregister_algs(void)
+{
+ crypto_unregister_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3));
+}
diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c
new file mode 100644
index 000000000000..f31bbd825f88
--- /dev/null
+++ b/drivers/crypto/starfive/jh7110-rsa.c
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * StarFive Public Key Algo acceleration driver
+ *
+ * Copyright (c) 2022 StarFive Technology
+ */
+
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direct.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <crypto/akcipher.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/scatterwalk.h>
+
+#include "jh7110-cryp.h"
+
+#define STARFIVE_PKA_REGS_OFFSET 0x400
+#define STARFIVE_PKA_CACR_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x0)
+#define STARFIVE_PKA_CASR_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x4)
+#define STARFIVE_PKA_CAAR_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x8)
+#define STARFIVE_PKA_CAER_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x108)
+#define STARFIVE_PKA_CANR_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x208)
+
+// R^2 mod N and N0'
+#define CRYPTO_CMD_PRE 0x0
+// A * R mod N ==> A
+#define CRYPTO_CMD_ARN 0x5
+// A * E * R mod N ==> A
+#define CRYPTO_CMD_AERN 0x6
+// A * A * R mod N ==> A
+#define CRYPTO_CMD_AARN 0x7
+
+#define STARFIVE_RSA_MAX_KEYSZ 256
+#define STARFIVE_RSA_RESET 0x2
+
+static inline int starfive_pka_wait_done(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+
+ return wait_for_completion_timeout(&cryp->pka_done,
+ usecs_to_jiffies(100000));
+}
+
+static inline void starfive_pka_irq_mask_clear(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ u32 stat;
+
+ stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
+ stat &= ~STARFIVE_IE_MASK_PKA_DONE;
+ writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+
+ reinit_completion(&cryp->pka_done);
+}
+
+static void starfive_rsa_free_key(struct starfive_rsa_key *key)
+{
+ if (key->d)
+ kfree_sensitive(key->d);
+ if (key->e)
+ kfree_sensitive(key->e);
+ if (key->n)
+ kfree_sensitive(key->n);
+ memset(key, 0, sizeof(*key));
+}
+
+static unsigned int starfive_rsa_get_nbit(u8 *pa, u32 snum, int key_sz)
+{
+ u32 i;
+ u8 value;
+
+ i = snum >> 3;
+
+ value = pa[key_sz - i - 1];
+ value >>= snum & 0x7;
+ value &= 0x1;
+
+ return value;
+}
+
+static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx,
+ u32 *out, u32 *in, u8 mont,
+ u32 *mod, int bit_len)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ int count = rctx->total / sizeof(u32) - 1;
+ int loop;
+ u32 temp;
+ u8 opsize;
+
+ opsize = (bit_len - 1) >> 5;
+ rctx->csr.pka.v = 0;
+
+ writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+
+ for (loop = 0; loop <= opsize; loop++)
+ writel(mod[opsize - loop], cryp->base + STARFIVE_PKA_CANR_OFFSET + loop * 4);
+
+ if (mont) {
+ rctx->csr.pka.v = 0;
+ rctx->csr.pka.cln_done = 1;
+ rctx->csr.pka.opsize = opsize;
+ rctx->csr.pka.exposize = opsize;
+ rctx->csr.pka.cmd = CRYPTO_CMD_PRE;
+ rctx->csr.pka.start = 1;
+ rctx->csr.pka.not_r2 = 1;
+ rctx->csr.pka.ie = 1;
+
+ starfive_pka_irq_mask_clear(ctx);
+ writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+
+ if (!starfive_pka_wait_done(ctx))
+ return -ETIMEDOUT;
+
+ for (loop = 0; loop <= opsize; loop++)
+ writel(in[opsize - loop], cryp->base + STARFIVE_PKA_CAAR_OFFSET + loop * 4);
+
+ writel(0x1000000, cryp->base + STARFIVE_PKA_CAER_OFFSET);
+
+ for (loop = 1; loop <= opsize; loop++)
+ writel(0, cryp->base + STARFIVE_PKA_CAER_OFFSET + loop * 4);
+
+ rctx->csr.pka.v = 0;
+ rctx->csr.pka.cln_done = 1;
+ rctx->csr.pka.opsize = opsize;
+ rctx->csr.pka.exposize = opsize;
+ rctx->csr.pka.cmd = CRYPTO_CMD_AERN;
+ rctx->csr.pka.start = 1;
+ rctx->csr.pka.ie = 1;
+
+ starfive_pka_irq_mask_clear(ctx);
+ writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+
+ if (!starfive_pka_wait_done(ctx))
+ return -ETIMEDOUT;
+ } else {
+ rctx->csr.pka.v = 0;
+ rctx->csr.pka.cln_done = 1;
+ rctx->csr.pka.opsize = opsize;
+ rctx->csr.pka.exposize = opsize;
+ rctx->csr.pka.cmd = CRYPTO_CMD_PRE;
+ rctx->csr.pka.start = 1;
+ rctx->csr.pka.pre_expf = 1;
+ rctx->csr.pka.ie = 1;
+
+ starfive_pka_irq_mask_clear(ctx);
+ writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+
+ if (!starfive_pka_wait_done(ctx))
+ return -ETIMEDOUT;
+
+ for (loop = 0; loop <= count; loop++)
+ writel(in[count - loop], cryp->base + STARFIVE_PKA_CAER_OFFSET + loop * 4);
+
+ /*pad with 0 up to opsize*/
+ for (loop = count + 1; loop <= opsize; loop++)
+ writel(0, cryp->base + STARFIVE_PKA_CAER_OFFSET + loop * 4);
+
+ rctx->csr.pka.v = 0;
+ rctx->csr.pka.cln_done = 1;
+ rctx->csr.pka.opsize = opsize;
+ rctx->csr.pka.exposize = opsize;
+ rctx->csr.pka.cmd = CRYPTO_CMD_ARN;
+ rctx->csr.pka.start = 1;
+ rctx->csr.pka.ie = 1;
+
+ starfive_pka_irq_mask_clear(ctx);
+ writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+
+ if (!starfive_pka_wait_done(ctx))
+ return -ETIMEDOUT;
+ }
+
+ for (loop = 0; loop <= opsize; loop++) {
+ temp = readl(cryp->base + STARFIVE_PKA_CAAR_OFFSET + 0x4 * loop);
+ out[opsize - loop] = temp;
+ }
+
+ return 0;
+}
+
+static int starfive_rsa_cpu_start(struct starfive_cryp_ctx *ctx, u32 *result,
+ u8 *de, u32 *n, int key_sz)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_rsa_key *key = &ctx->rsa_key;
+ u32 temp;
+ int ret = 0;
+ int opsize, mlen, loop;
+ unsigned int *mta;
+
+ opsize = (key_sz - 1) >> 2;
+
+ mta = kmalloc(key_sz, GFP_KERNEL);
+ if (!mta)
+ return -ENOMEM;
+
+ ret = starfive_rsa_montgomery_form(ctx, mta, (u32 *)rctx->rsa_data,
+ 0, n, key_sz << 3);
+ if (ret) {
+ dev_err_probe(cryp->dev, ret, "Conversion to Montgomery failed");
+ goto rsa_err;
+ }
+
+ for (loop = 0; loop <= opsize; loop++)
+ writel(mta[opsize - loop],
+ cryp->base + STARFIVE_PKA_CAER_OFFSET + loop * 4);
+
+ for (loop = key->bitlen - 1; loop > 0; loop--) {
+ mlen = starfive_rsa_get_nbit(de, loop - 1, key_sz);
+
+ rctx->csr.pka.v = 0;
+ rctx->csr.pka.cln_done = 1;
+ rctx->csr.pka.opsize = opsize;
+ rctx->csr.pka.exposize = opsize;
+ rctx->csr.pka.cmd = CRYPTO_CMD_AARN;
+ rctx->csr.pka.start = 1;
+ rctx->csr.pka.ie = 1;
+
+ starfive_pka_irq_mask_clear(ctx);
+ writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+
+ ret = -ETIMEDOUT;
+ if (!starfive_pka_wait_done(ctx))
+ goto rsa_err;
+
+ if (mlen) {
+ rctx->csr.pka.v = 0;
+ rctx->csr.pka.cln_done = 1;
+ rctx->csr.pka.opsize = opsize;
+ rctx->csr.pka.exposize = opsize;
+ rctx->csr.pka.cmd = CRYPTO_CMD_AERN;
+ rctx->csr.pka.start = 1;
+ rctx->csr.pka.ie = 1;
+
+ starfive_pka_irq_mask_clear(ctx);
+ writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+
+ if (!starfive_pka_wait_done(ctx))
+ goto rsa_err;
+ }
+ }
+
+ for (loop = 0; loop <= opsize; loop++) {
+ temp = readl(cryp->base + STARFIVE_PKA_CAAR_OFFSET + 0x4 * loop);
+ result[opsize - loop] = temp;
+ }
+
+ ret = starfive_rsa_montgomery_form(ctx, result, result, 1, n, key_sz << 3);
+ if (ret)
+ dev_err_probe(cryp->dev, ret, "Conversion from Montgomery failed");
+rsa_err:
+ kfree(mta);
+ return ret;
+}
+
+static int starfive_rsa_start(struct starfive_cryp_ctx *ctx, u8 *result,
+ u8 *de, u8 *n, int key_sz)
+{
+ return starfive_rsa_cpu_start(ctx, (u32 *)result, de, (u32 *)n, key_sz);
+}
+
+static int starfive_rsa_enc_core(struct starfive_cryp_ctx *ctx, int enc)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_rsa_key *key = &ctx->rsa_key;
+ int ret = 0;
+
+ writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+
+ rctx->total = sg_copy_to_buffer(rctx->in_sg, rctx->nents,
+ rctx->rsa_data, rctx->total);
+
+ if (enc) {
+ key->bitlen = key->e_bitlen;
+ ret = starfive_rsa_start(ctx, rctx->rsa_data, key->e,
+ key->n, key->key_sz);
+ } else {
+ key->bitlen = key->d_bitlen;
+ ret = starfive_rsa_start(ctx, rctx->rsa_data, key->d,
+ key->n, key->key_sz);
+ }
+
+ if (ret)
+ goto err_rsa_crypt;
+
+ sg_copy_buffer(rctx->out_sg, sg_nents(rctx->out_sg),
+ rctx->rsa_data, key->key_sz, 0, 0);
+
+err_rsa_crypt:
+ writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+ kfree(rctx->rsa_data);
+ return ret;
+}
+
+static int starfive_rsa_enc(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_rsa_key *key = &ctx->rsa_key;
+ struct starfive_cryp_request_ctx *rctx = akcipher_request_ctx(req);
+ int ret;
+
+ if (!key->key_sz) {
+ akcipher_request_set_tfm(req, ctx->akcipher_fbk);
+ ret = crypto_akcipher_encrypt(req);
+ akcipher_request_set_tfm(req, tfm);
+ return ret;
+ }
+
+ if (unlikely(!key->n || !key->e))
+ return -EINVAL;
+
+ if (req->dst_len < key->key_sz)
+ return dev_err_probe(cryp->dev, -EOVERFLOW,
+ "Output buffer length less than parameter n\n");
+
+ rctx->in_sg = req->src;
+ rctx->out_sg = req->dst;
+ rctx->total = req->src_len;
+ rctx->nents = sg_nents(rctx->in_sg);
+ ctx->rctx = rctx;
+
+ return starfive_rsa_enc_core(ctx, 1);
+}
+
+static int starfive_rsa_dec(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_rsa_key *key = &ctx->rsa_key;
+ struct starfive_cryp_request_ctx *rctx = akcipher_request_ctx(req);
+ int ret;
+
+ if (!key->key_sz) {
+ akcipher_request_set_tfm(req, ctx->akcipher_fbk);
+ ret = crypto_akcipher_decrypt(req);
+ akcipher_request_set_tfm(req, tfm);
+ return ret;
+ }
+
+ if (unlikely(!key->n || !key->d))
+ return -EINVAL;
+
+ if (req->dst_len < key->key_sz)
+ return dev_err_probe(cryp->dev, -EOVERFLOW,
+ "Output buffer length less than parameter n\n");
+
+ rctx->in_sg = req->src;
+ rctx->out_sg = req->dst;
+ ctx->rctx = rctx;
+ rctx->total = req->src_len;
+
+ return starfive_rsa_enc_core(ctx, 0);
+}
+
+static int starfive_rsa_set_n(struct starfive_rsa_key *rsa_key,
+ const char *value, size_t vlen)
+{
+ const char *ptr = value;
+ unsigned int bitslen;
+ int ret;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+ rsa_key->key_sz = vlen;
+ bitslen = rsa_key->key_sz << 3;
+
+ /* check valid key size */
+ if (bitslen & 0x1f)
+ return -EINVAL;
+
+ ret = -ENOMEM;
+ rsa_key->n = kmemdup(ptr, rsa_key->key_sz, GFP_KERNEL);
+ if (!rsa_key->n)
+ goto err;
+
+ return 0;
+ err:
+ rsa_key->key_sz = 0;
+ rsa_key->n = NULL;
+ starfive_rsa_free_key(rsa_key);
+ return ret;
+}
+
+static int starfive_rsa_set_e(struct starfive_rsa_key *rsa_key,
+ const char *value, size_t vlen)
+{
+ const char *ptr = value;
+ unsigned char pt;
+ int loop;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+ pt = *ptr;
+
+ if (!rsa_key->key_sz || !vlen || vlen > rsa_key->key_sz) {
+ rsa_key->e = NULL;
+ return -EINVAL;
+ }
+
+ rsa_key->e = kzalloc(rsa_key->key_sz, GFP_KERNEL);
+ if (!rsa_key->e)
+ return -ENOMEM;
+
+ for (loop = 8; loop > 0; loop--) {
+ if (pt >> (loop - 1))
+ break;
+ }
+
+ rsa_key->e_bitlen = (vlen - 1) * 8 + loop;
+
+ memcpy(rsa_key->e + (rsa_key->key_sz - vlen), ptr, vlen);
+
+ return 0;
+}
+
+static int starfive_rsa_set_d(struct starfive_rsa_key *rsa_key,
+ const char *value, size_t vlen)
+{
+ const char *ptr = value;
+ unsigned char pt;
+ int loop;
+ int ret;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+ pt = *ptr;
+
+ ret = -EINVAL;
+ if (!rsa_key->key_sz || !vlen || vlen > rsa_key->key_sz)
+ goto err;
+
+ ret = -ENOMEM;
+ rsa_key->d = kzalloc(rsa_key->key_sz, GFP_KERNEL);
+ if (!rsa_key->d)
+ goto err;
+
+ for (loop = 8; loop > 0; loop--) {
+ if (pt >> (loop - 1))
+ break;
+ }
+
+ rsa_key->d_bitlen = (vlen - 1) * 8 + loop;
+
+ memcpy(rsa_key->d + (rsa_key->key_sz - vlen), ptr, vlen);
+
+ return 0;
+ err:
+ rsa_key->d = NULL;
+ return ret;
+}
+
+static int starfive_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen, bool private)
+{
+ struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct rsa_key raw_key = {NULL};
+ struct starfive_rsa_key *rsa_key = &ctx->rsa_key;
+ int ret;
+
+ if (private)
+ ret = rsa_parse_priv_key(&raw_key, key, keylen);
+ else
+ ret = rsa_parse_pub_key(&raw_key, key, keylen);
+ if (ret < 0)
+ goto err;
+
+ starfive_rsa_free_key(rsa_key);
+
+ /* Use fallback for mod > 256 + 1 byte prefix */
+ if (raw_key.n_sz > STARFIVE_RSA_MAX_KEYSZ + 1)
+ return 0;
+
+ ret = starfive_rsa_set_n(rsa_key, raw_key.n, raw_key.n_sz);
+ if (ret)
+ return ret;
+
+ ret = starfive_rsa_set_e(rsa_key, raw_key.e, raw_key.e_sz);
+ if (ret)
+ goto err;
+
+ if (private) {
+ ret = starfive_rsa_set_d(rsa_key, raw_key.d, raw_key.d_sz);
+ if (ret)
+ goto err;
+ }
+
+ if (!rsa_key->n || !rsa_key->e) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (private && !rsa_key->d) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+ err:
+ starfive_rsa_free_key(rsa_key);
+ return ret;
+}
+
+static int starfive_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
+
+ ret = crypto_akcipher_set_pub_key(ctx->akcipher_fbk, key, keylen);
+ if (ret)
+ return ret;
+
+ return starfive_rsa_setkey(tfm, key, keylen, false);
+}
+
+static int starfive_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
+
+ ret = crypto_akcipher_set_priv_key(ctx->akcipher_fbk, key, keylen);
+ if (ret)
+ return ret;
+
+ return starfive_rsa_setkey(tfm, key, keylen, true);
+}
+
+static unsigned int starfive_rsa_max_size(struct crypto_akcipher *tfm)
+{
+ struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ if (ctx->rsa_key.key_sz)
+ return ctx->rsa_key.key_sz;
+
+ return crypto_akcipher_maxsize(ctx->akcipher_fbk);
+}
+
+static int starfive_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ ctx->akcipher_fbk = crypto_alloc_akcipher("rsa-generic", 0, 0);
+ if (IS_ERR(ctx->akcipher_fbk))
+ return PTR_ERR(ctx->akcipher_fbk);
+
+ ctx->cryp = starfive_cryp_find_dev(ctx);
+ if (!ctx->cryp) {
+ crypto_free_akcipher(ctx->akcipher_fbk);
+ return -ENODEV;
+ }
+
+ akcipher_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) +
+ sizeof(struct crypto_akcipher) + 32);
+
+ return 0;
+}
+
+static void starfive_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct starfive_rsa_key *key = (struct starfive_rsa_key *)&ctx->rsa_key;
+
+ crypto_free_akcipher(ctx->akcipher_fbk);
+ starfive_rsa_free_key(key);
+}
+
+static struct akcipher_alg starfive_rsa = {
+ .encrypt = starfive_rsa_enc,
+ .decrypt = starfive_rsa_dec,
+ .sign = starfive_rsa_dec,
+ .verify = starfive_rsa_enc,
+ .set_pub_key = starfive_rsa_set_pub_key,
+ .set_priv_key = starfive_rsa_set_priv_key,
+ .max_size = starfive_rsa_max_size,
+ .init = starfive_rsa_init_tfm,
+ .exit = starfive_rsa_exit_tfm,
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "starfive-rsa",
+ .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_priority = 3000,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ },
+};
+
+int starfive_rsa_register_algs(void)
+{
+ return crypto_register_akcipher(&starfive_rsa);
+}
+
+void starfive_rsa_unregister_algs(void)
+{
+ crypto_unregister_akcipher(&starfive_rsa);
+}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 3a6ee7bb06f1..1599f1176842 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -214,6 +214,7 @@ static int generic_ops_register(void)
generic_ops.get_variable = efi.get_variable;
generic_ops.get_next_variable = efi.get_next_variable;
generic_ops.query_variable_store = efi_query_variable_store;
+ generic_ops.query_variable_info = efi.query_variable_info;
if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
generic_ops.set_variable = efi.set_variable;
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 87729c365be1..7a81c0ce4780 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -95,10 +95,6 @@ static ssize_t esre_attr_show(struct kobject *kobj,
struct esre_entry *entry = to_entry(kobj);
struct esre_attribute *attr = to_attr(_attr);
- /* Don't tell normal users what firmware versions we've got... */
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
return attr->show(entry, buf);
}
@@ -156,7 +152,7 @@ static void esre_release(struct kobject *kobj)
kfree(entry);
}
-static struct kobj_type esre1_ktype = {
+static const struct kobj_type esre1_ktype = {
.release = esre_release,
.sysfs_ops = &esre_attr_ops,
.default_groups = esre1_groups,
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 1e0203d74691..732984295295 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -378,6 +378,9 @@ efi_status_t efi_exit_boot_services(void *handle, void *priv,
struct efi_boot_memmap *map;
efi_status_t status;
+ if (efi_disable_pci_dma)
+ efi_pci_disable_bridge_busmaster();
+
status = efi_get_memory_map(&map, true);
if (status != EFI_SUCCESS)
return status;
@@ -388,9 +391,6 @@ efi_status_t efi_exit_boot_services(void *handle, void *priv,
return status;
}
- if (efi_disable_pci_dma)
- efi_pci_disable_bridge_busmaster();
-
status = efi_bs_call(exit_boot_services, handle, map->map_key);
if (status == EFI_INVALID_PARAMETER) {
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index bfc5fa6aa47b..e9dc7116daf1 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -245,3 +245,15 @@ efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor,
return status;
}
EXPORT_SYMBOL_NS_GPL(efivar_set_variable, EFIVAR);
+
+efi_status_t efivar_query_variable_info(u32 attr,
+ u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size)
+{
+ if (!__efivars->ops->query_variable_info)
+ return EFI_UNSUPPORTED;
+ return __efivars->ops->query_variable_info(attr, storage_space,
+ remaining_space, max_variable_size);
+}
+EXPORT_SYMBOL_NS_GPL(efivar_query_variable_info, EFIVAR);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 27f4aa8d10bd..1dc6227d353e 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -31,10 +31,10 @@
#include <asm/unaligned.h>
#include <crypto/hash.h>
#include <crypto/md5.h>
-#include <crypto/algapi.h>
#include <crypto/skcipher.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
+#include <crypto/utils.h>
#include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
#include <linux/key-type.h>
#include <keys/user-type.h>
@@ -745,16 +745,23 @@ static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
- u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
+ struct crypto_skcipher *tfm = any_tfm(cc);
struct skcipher_request *req;
struct scatterlist src, dst;
DECLARE_CRYPTO_WAIT(wait);
+ unsigned int reqsize;
int err;
+ u8 *buf;
- req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
+ reqsize = ALIGN(crypto_skcipher_reqsize(tfm), __alignof__(__le64));
+
+ req = kmalloc(reqsize + cc->iv_size, GFP_NOIO);
if (!req)
return -ENOMEM;
+ skcipher_request_set_tfm(req, tfm);
+
+ buf = (u8 *)req + reqsize;
memset(buf, 0, cc->iv_size);
*(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
@@ -763,7 +770,7 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
- skcipher_request_free(req);
+ kfree_sensitive(req);
return err;
}
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index 482d612b716b..e028fafa04f3 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -13,6 +13,7 @@
#include <linux/ucs2_string.h>
#include <linux/slab.h>
#include <linux/magic.h>
+#include <linux/statfs.h>
#include "internal.h"
@@ -23,8 +24,44 @@ static void efivarfs_evict_inode(struct inode *inode)
clear_inode(inode);
}
+static int efivarfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ const u32 attr = EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS;
+ u64 storage_space, remaining_space, max_variable_size;
+ efi_status_t status;
+
+ status = efivar_query_variable_info(attr, &storage_space, &remaining_space,
+ &max_variable_size);
+ if (status != EFI_SUCCESS)
+ return efi_status_to_err(status);
+
+ /*
+ * This is not a normal filesystem, so no point in pretending it has a block
+ * size; we declare f_bsize to 1, so that we can then report the exact value
+ * sent by EFI QueryVariableInfo in f_blocks and f_bfree
+ */
+ buf->f_bsize = 1;
+ buf->f_namelen = NAME_MAX;
+ buf->f_blocks = storage_space;
+ buf->f_bfree = remaining_space;
+ buf->f_type = dentry->d_sb->s_magic;
+
+ /*
+ * In f_bavail we declare the free space that the kernel will allow writing
+ * when the storage_paranoia x86 quirk is active. To use more, users
+ * should boot the kernel with efi_no_storage_paranoia.
+ */
+ if (remaining_space > efivar_reserved_space())
+ buf->f_bavail = remaining_space - efivar_reserved_space();
+ else
+ buf->f_bavail = 0;
+
+ return 0;
+}
static const struct super_operations efivarfs_ops = {
- .statfs = simple_statfs,
+ .statfs = efivarfs_statfs,
.drop_inode = generic_delete_inode,
.evict_inode = efivarfs_evict_inode,
};
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 26b1343c8035..b30dca7de8cc 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -3370,6 +3370,11 @@ out_acl:
if (status)
goto out;
}
+ if (bmval1 & FATTR4_WORD1_TIME_CREATE) {
+ status = nfsd4_encode_nfstime4(xdr, &stat.btime);
+ if (status)
+ goto out;
+ }
if (bmval1 & FATTR4_WORD1_TIME_DELTA) {
p = xdr_reserve_space(xdr, 12);
if (!p)
@@ -3386,11 +3391,6 @@ out_acl:
if (status)
goto out;
}
- if (bmval1 & FATTR4_WORD1_TIME_CREATE) {
- status = nfsd4_encode_nfstime4(xdr, &stat.btime);
- if (status)
- goto out;
- }
if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) {
u64 ino = stat.ino;
diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
index b279f745466e..fb4162a52844 100644
--- a/fs/smb/client/cifs_debug.c
+++ b/fs/smb/client/cifs_debug.c
@@ -122,6 +122,12 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
seq_puts(m, " nosparse");
if (tcon->need_reconnect)
seq_puts(m, "\tDISCONNECTED ");
+ spin_lock(&tcon->tc_lock);
+ if (tcon->origin_fullpath) {
+ seq_printf(m, "\n\tDFS origin fullpath: %s",
+ tcon->origin_fullpath);
+ }
+ spin_unlock(&tcon->tc_lock);
seq_putc(m, '\n');
}
@@ -330,6 +336,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
spin_lock(&server->srv_lock);
if (server->hostname)
seq_printf(m, "Hostname: %s ", server->hostname);
+ seq_printf(m, "\nClientGUID: %pUL", server->client_guid);
spin_unlock(&server->srv_lock);
#ifdef CONFIG_CIFS_SMB_DIRECT
if (!server->rdma)
@@ -427,13 +434,9 @@ skip_rdma:
seq_printf(m, "\nIn Send: %d In MaxReq Wait: %d",
atomic_read(&server->in_send),
atomic_read(&server->num_waiters));
- if (IS_ENABLED(CONFIG_CIFS_DFS_UPCALL)) {
- if (server->origin_fullpath)
- seq_printf(m, "\nDFS origin full path: %s",
- server->origin_fullpath);
- if (server->leaf_fullpath)
- seq_printf(m, "\nDFS leaf full path: %s",
- server->leaf_fullpath);
+ if (server->leaf_fullpath) {
+ seq_printf(m, "\nDFS leaf full path: %s",
+ server->leaf_fullpath);
}
seq_printf(m, "\n\n\tSessions: ");
diff --git a/fs/smb/client/cifs_dfs_ref.c b/fs/smb/client/cifs_dfs_ref.c
index 0329a907bdfe..b1c2499b1c3b 100644
--- a/fs/smb/client/cifs_dfs_ref.c
+++ b/fs/smb/client/cifs_dfs_ref.c
@@ -118,12 +118,12 @@ cifs_build_devname(char *nodename, const char *prepath)
return dev;
}
-static int set_dest_addr(struct smb3_fs_context *ctx, const char *full_path)
+static int set_dest_addr(struct smb3_fs_context *ctx)
{
struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
int rc;
- rc = dns_resolve_server_name_to_ip(full_path, addr, NULL);
+ rc = dns_resolve_server_name_to_ip(ctx->source, addr, NULL);
if (!rc)
cifs_set_port(addr, ctx->port);
return rc;
@@ -171,10 +171,9 @@ static struct vfsmount *cifs_dfs_do_automount(struct path *path)
mnt = ERR_CAST(full_path);
goto out;
}
- cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
tmp = *cur_ctx;
- tmp.source = full_path;
+ tmp.source = NULL;
tmp.leaf_fullpath = NULL;
tmp.UNC = tmp.prepath = NULL;
tmp.dfs_root_ses = NULL;
@@ -185,13 +184,22 @@ static struct vfsmount *cifs_dfs_do_automount(struct path *path)
goto out;
}
- rc = set_dest_addr(ctx, full_path);
+ rc = smb3_parse_devname(full_path, ctx);
if (rc) {
mnt = ERR_PTR(rc);
goto out;
}
- rc = smb3_parse_devname(full_path, ctx);
+ ctx->source = smb3_fs_context_fullpath(ctx, '/');
+ if (IS_ERR(ctx->source)) {
+ mnt = ERR_CAST(ctx->source);
+ ctx->source = NULL;
+ goto out;
+ }
+ cifs_dbg(FYI, "%s: ctx: source=%s UNC=%s prepath=%s dstaddr=%pISpc\n",
+ __func__, ctx->source, ctx->UNC, ctx->prepath, &ctx->dstaddr);
+
+ rc = set_dest_addr(ctx);
if (!rc)
mnt = fc_mount(fc);
else
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index 4f4492eb975f..a4d8b0ea1c8c 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -688,6 +688,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_puts(s, ",noautotune");
if (tcon->ses->server->noblocksnd)
seq_puts(s, ",noblocksend");
+ if (tcon->ses->server->nosharesock)
+ seq_puts(s, ",nosharesock");
if (tcon->snapshot_time)
seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
@@ -884,11 +886,11 @@ struct dentry *
cifs_smb3_do_mount(struct file_system_type *fs_type,
int flags, struct smb3_fs_context *old_ctx)
{
- int rc;
- struct super_block *sb = NULL;
- struct cifs_sb_info *cifs_sb = NULL;
struct cifs_mnt_data mnt_data;
+ struct cifs_sb_info *cifs_sb;
+ struct super_block *sb;
struct dentry *root;
+ int rc;
if (cifsFYI) {
cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
@@ -897,11 +899,9 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
cifs_info("Attempting to mount %s\n", old_ctx->source);
}
- cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
- if (cifs_sb == NULL) {
- root = ERR_PTR(-ENOMEM);
- goto out;
- }
+ cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
+ if (!cifs_sb)
+ return ERR_PTR(-ENOMEM);
cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
if (!cifs_sb->ctx) {
@@ -938,10 +938,8 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
if (IS_ERR(sb)) {
- root = ERR_CAST(sb);
cifs_umount(cifs_sb);
- cifs_sb = NULL;
- goto out;
+ return ERR_CAST(sb);
}
if (sb->s_root) {
@@ -972,13 +970,9 @@ out_super:
deactivate_locked_super(sb);
return root;
out:
- if (cifs_sb) {
- if (!sb || IS_ERR(sb)) { /* otherwise kill_sb will handle */
- kfree(cifs_sb->prepath);
- smb3_cleanup_fs_context(cifs_sb->ctx);
- kfree(cifs_sb);
- }
- }
+ kfree(cifs_sb->prepath);
+ smb3_cleanup_fs_context(cifs_sb->ctx);
+ kfree(cifs_sb);
return root;
}
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index b212a4e16b39..ca2da713c5fe 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -736,23 +736,20 @@ struct TCP_Server_Info {
#endif
struct mutex refpath_lock; /* protects leaf_fullpath */
/*
- * origin_fullpath: Canonical copy of smb3_fs_context::source.
- * It is used for matching existing DFS tcons.
- *
* leaf_fullpath: Canonical DFS referral path related to this
* connection.
* It is used in DFS cache refresher, reconnect and may
* change due to nested DFS links.
*
- * Both protected by @refpath_lock and @srv_lock. The @refpath_lock is
- * mosly used for not requiring a copy of @leaf_fullpath when getting
+ * Protected by @refpath_lock and @srv_lock. The @refpath_lock is
+ * mostly used for not requiring a copy of @leaf_fullpath when getting
* cached or new DFS referrals (which might also sleep during I/O).
* While @srv_lock is held for making string and NULL comparions against
* both fields as in mount(2) and cache refresh.
*
* format: \\HOST\SHARE[\OPTIONAL PATH]
*/
- char *origin_fullpath, *leaf_fullpath;
+ char *leaf_fullpath;
};
static inline bool is_smb1(struct TCP_Server_Info *server)
@@ -1205,6 +1202,7 @@ struct cifs_tcon {
struct delayed_work dfs_cache_work;
#endif
struct delayed_work query_interfaces; /* query interfaces workqueue job */
+ char *origin_fullpath; /* canonical copy of smb3_fs_context::source */
};
/*
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index d127aded2f28..1d71d658e167 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -85,6 +85,8 @@ extern void release_mid(struct mid_q_entry *mid);
extern void cifs_wake_up_task(struct mid_q_entry *mid);
extern int cifs_handle_standard(struct TCP_Server_Info *server,
struct mid_q_entry *mid);
+extern char *smb3_fs_context_fullpath(const struct smb3_fs_context *ctx,
+ char dirsep);
extern int smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx);
extern int smb3_parse_opt(const char *options, const char *key, char **val);
extern int cifs_ipaddr_cmp(struct sockaddr *srcaddr, struct sockaddr *rhs);
@@ -650,7 +652,7 @@ int smb2_parse_query_directory(struct cifs_tcon *tcon, struct kvec *rsp_iov,
int resp_buftype,
struct cifs_search_info *srch_inf);
-struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server);
+struct super_block *cifs_get_dfs_tcon_super(struct cifs_tcon *tcon);
void cifs_put_tcp_super(struct super_block *sb);
int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix);
char *extract_hostname(const char *unc);
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index 9d963caec35c..19f7385abeec 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -3958,11 +3958,12 @@ CIFSFindFirst(const unsigned int xid, struct cifs_tcon *tcon,
TRANSACTION2_FFIRST_REQ *pSMB = NULL;
TRANSACTION2_FFIRST_RSP *pSMBr = NULL;
T2_FFIRST_RSP_PARMS *parms;
- int rc = 0;
+ struct nls_table *nls_codepage;
+ unsigned int lnoff;
+ __u16 params, byte_count;
int bytes_returned = 0;
int name_len, remap;
- __u16 params, byte_count;
- struct nls_table *nls_codepage;
+ int rc = 0;
cifs_dbg(FYI, "In FindFirst for %s\n", searchName);
@@ -4043,63 +4044,52 @@ findFirstRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->stats.cifs_stats.num_ffirst);
- if (rc) {/* BB add logic to retry regular search if Unix search
- rejected unexpectedly by server */
- /* BB Add code to handle unsupported level rc */
+ if (rc) {
+ /*
+ * BB: add logic to retry regular search if Unix search rejected
+ * unexpectedly by server.
+ */
+ /* BB: add code to handle unsupported level rc */
cifs_dbg(FYI, "Error in FindFirst = %d\n", rc);
-
cifs_buf_release(pSMB);
-
- /* BB eventually could optimize out free and realloc of buf */
- /* for this case */
+ /*
+ * BB: eventually could optimize out free and realloc of buf for
+ * this case.
+ */
if (rc == -EAGAIN)
goto findFirstRetry;
- } else { /* decode response */
- /* BB remember to free buffer if error BB */
- rc = validate_t2((struct smb_t2_rsp *)pSMBr);
- if (rc == 0) {
- unsigned int lnoff;
-
- if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
- psrch_inf->unicode = true;
- else
- psrch_inf->unicode = false;
-
- psrch_inf->ntwrk_buf_start = (char *)pSMBr;
- psrch_inf->smallBuf = false;
- psrch_inf->srch_entries_start =
- (char *) &pSMBr->hdr.Protocol +
- le16_to_cpu(pSMBr->t2.DataOffset);
- parms = (T2_FFIRST_RSP_PARMS *)((char *) &pSMBr->hdr.Protocol +
- le16_to_cpu(pSMBr->t2.ParameterOffset));
-
- if (parms->EndofSearch)
- psrch_inf->endOfSearch = true;
- else
- psrch_inf->endOfSearch = false;
-
- psrch_inf->entries_in_buffer =
- le16_to_cpu(parms->SearchCount);
- psrch_inf->index_of_last_entry = 2 /* skip . and .. */ +
- psrch_inf->entries_in_buffer;
- lnoff = le16_to_cpu(parms->LastNameOffset);
- if (CIFSMaxBufSize < lnoff) {
- cifs_dbg(VFS, "ignoring corrupt resume name\n");
- psrch_inf->last_entry = NULL;
- return rc;
- }
-
- psrch_inf->last_entry = psrch_inf->srch_entries_start +
- lnoff;
-
- if (pnetfid)
- *pnetfid = parms->SearchHandle;
- } else {
- cifs_buf_release(pSMB);
- }
+ return rc;
+ }
+ /* decode response */
+ rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+ if (rc) {
+ cifs_buf_release(pSMB);
+ return rc;
}
- return rc;
+ psrch_inf->unicode = !!(pSMBr->hdr.Flags2 & SMBFLG2_UNICODE);
+ psrch_inf->ntwrk_buf_start = (char *)pSMBr;
+ psrch_inf->smallBuf = false;
+ psrch_inf->srch_entries_start = (char *)&pSMBr->hdr.Protocol +
+ le16_to_cpu(pSMBr->t2.DataOffset);
+
+ parms = (T2_FFIRST_RSP_PARMS *)((char *)&pSMBr->hdr.Protocol +
+ le16_to_cpu(pSMBr->t2.ParameterOffset));
+ psrch_inf->endOfSearch = !!parms->EndofSearch;
+
+ psrch_inf->entries_in_buffer = le16_to_cpu(parms->SearchCount);
+ psrch_inf->index_of_last_entry = 2 /* skip . and .. */ +
+ psrch_inf->entries_in_buffer;
+ lnoff = le16_to_cpu(parms->LastNameOffset);
+ if (CIFSMaxBufSize < lnoff) {
+ cifs_dbg(VFS, "ignoring corrupt resume name\n");
+ psrch_inf->last_entry = NULL;
+ } else {
+ psrch_inf->last_entry = psrch_inf->srch_entries_start + lnoff;
+ if (pnetfid)
+ *pnetfid = parms->SearchHandle;
+ }
+ return 0;
}
int CIFSFindNext(const unsigned int xid, struct cifs_tcon *tcon,
@@ -4109,11 +4099,12 @@ int CIFSFindNext(const unsigned int xid, struct cifs_tcon *tcon,
TRANSACTION2_FNEXT_REQ *pSMB = NULL;
TRANSACTION2_FNEXT_RSP *pSMBr = NULL;
T2_FNEXT_RSP_PARMS *parms;
- char *response_data;
- int rc = 0;
- int bytes_returned;
unsigned int name_len;
+ unsigned int lnoff;
__u16 params, byte_count;
+ char *response_data;
+ int bytes_returned;
+ int rc = 0;
cifs_dbg(FYI, "In FindNext\n");
@@ -4158,8 +4149,8 @@ int CIFSFindNext(const unsigned int xid, struct cifs_tcon *tcon,
pSMB->ResumeFileName[name_len] = 0;
pSMB->ResumeFileName[name_len+1] = 0;
} else {
- rc = -EINVAL;
- goto FNext2_err_exit;
+ cifs_buf_release(pSMB);
+ return -EINVAL;
}
byte_count = params + 1 /* pad */ ;
pSMB->TotalParameterCount = cpu_to_le16(params);
@@ -4170,71 +4161,61 @@ int CIFSFindNext(const unsigned int xid, struct cifs_tcon *tcon,
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->stats.cifs_stats.num_fnext);
+
if (rc) {
+ cifs_buf_release(pSMB);
if (rc == -EBADF) {
psrch_inf->endOfSearch = true;
- cifs_buf_release(pSMB);
rc = 0; /* search probably was closed at end of search*/
- } else
+ } else {
cifs_dbg(FYI, "FindNext returned = %d\n", rc);
- } else { /* decode response */
- rc = validate_t2((struct smb_t2_rsp *)pSMBr);
-
- if (rc == 0) {
- unsigned int lnoff;
-
- /* BB fixme add lock for file (srch_info) struct here */
- if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
- psrch_inf->unicode = true;
- else
- psrch_inf->unicode = false;
- response_data = (char *) &pSMBr->hdr.Protocol +
- le16_to_cpu(pSMBr->t2.ParameterOffset);
- parms = (T2_FNEXT_RSP_PARMS *)response_data;
- response_data = (char *)&pSMBr->hdr.Protocol +
- le16_to_cpu(pSMBr->t2.DataOffset);
- if (psrch_inf->smallBuf)
- cifs_small_buf_release(
- psrch_inf->ntwrk_buf_start);
- else
- cifs_buf_release(psrch_inf->ntwrk_buf_start);
- psrch_inf->srch_entries_start = response_data;
- psrch_inf->ntwrk_buf_start = (char *)pSMB;
- psrch_inf->smallBuf = false;
- if (parms->EndofSearch)
- psrch_inf->endOfSearch = true;
- else
- psrch_inf->endOfSearch = false;
- psrch_inf->entries_in_buffer =
- le16_to_cpu(parms->SearchCount);
- psrch_inf->index_of_last_entry +=
- psrch_inf->entries_in_buffer;
- lnoff = le16_to_cpu(parms->LastNameOffset);
- if (CIFSMaxBufSize < lnoff) {
- cifs_dbg(VFS, "ignoring corrupt resume name\n");
- psrch_inf->last_entry = NULL;
- return rc;
- } else
- psrch_inf->last_entry =
- psrch_inf->srch_entries_start + lnoff;
-
-/* cifs_dbg(FYI, "fnxt2 entries in buf %d index_of_last %d\n",
- psrch_inf->entries_in_buffer, psrch_inf->index_of_last_entry); */
-
- /* BB fixme add unlock here */
}
-
+ return rc;
}
- /* BB On error, should we leave previous search buf (and count and
- last entry fields) intact or free the previous one? */
-
- /* Note: On -EAGAIN error only caller can retry on handle based calls
- since file handle passed in no longer valid */
-FNext2_err_exit:
- if (rc != 0)
+ /* decode response */
+ rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+ if (rc) {
cifs_buf_release(pSMB);
- return rc;
+ return rc;
+ }
+ /* BB fixme add lock for file (srch_info) struct here */
+ psrch_inf->unicode = !!(pSMBr->hdr.Flags2 & SMBFLG2_UNICODE);
+ response_data = (char *)&pSMBr->hdr.Protocol +
+ le16_to_cpu(pSMBr->t2.ParameterOffset);
+ parms = (T2_FNEXT_RSP_PARMS *)response_data;
+ response_data = (char *)&pSMBr->hdr.Protocol +
+ le16_to_cpu(pSMBr->t2.DataOffset);
+
+ if (psrch_inf->smallBuf)
+ cifs_small_buf_release(psrch_inf->ntwrk_buf_start);
+ else
+ cifs_buf_release(psrch_inf->ntwrk_buf_start);
+
+ psrch_inf->srch_entries_start = response_data;
+ psrch_inf->ntwrk_buf_start = (char *)pSMB;
+ psrch_inf->smallBuf = false;
+ psrch_inf->endOfSearch = !!parms->EndofSearch;
+ psrch_inf->entries_in_buffer = le16_to_cpu(parms->SearchCount);
+ psrch_inf->index_of_last_entry += psrch_inf->entries_in_buffer;
+ lnoff = le16_to_cpu(parms->LastNameOffset);
+ if (CIFSMaxBufSize < lnoff) {
+ cifs_dbg(VFS, "ignoring corrupt resume name\n");
+ psrch_inf->last_entry = NULL;
+ } else {
+ psrch_inf->last_entry =
+ psrch_inf->srch_entries_start + lnoff;
+ }
+ /* BB fixme add unlock here */
+
+ /*
+ * BB: On error, should we leave previous search buf
+ * (and count and last entry fields) intact or free the previous one?
+ *
+ * Note: On -EAGAIN error only caller can retry on handle based calls
+ * since file handle passed in no longer valid.
+ */
+ return 0;
}
int
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 9d16626e7a66..dab7bc876507 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -996,7 +996,6 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
*/
}
- kfree(server->origin_fullpath);
kfree(server->leaf_fullpath);
kfree(server);
@@ -1436,7 +1435,9 @@ match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
}
/* this function must be called with srv_lock held */
-static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+static int match_server(struct TCP_Server_Info *server,
+ struct smb3_fs_context *ctx,
+ bool match_super)
{
struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
@@ -1467,36 +1468,38 @@ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *
(struct sockaddr *)&server->srcaddr))
return 0;
/*
- * - Match for an DFS tcon (@server->origin_fullpath).
- * - Match for an DFS root server connection (@server->leaf_fullpath).
- * - If none of the above and @ctx->leaf_fullpath is set, then
- * it is a new DFS connection.
- * - If 'nodfs' mount option was passed, then match only connections
- * that have no DFS referrals set
- * (e.g. can't failover to other targets).
+ * When matching cifs.ko superblocks (@match_super == true), we can't
+ * really match either @server->leaf_fullpath or @server->dstaddr
+ * directly since this @server might belong to a completely different
+ * server -- in case of domain-based DFS referrals or DFS links -- as
+ * provided earlier by mount(2) through 'source' and 'ip' options.
+ *
+ * Otherwise, match the DFS referral in @server->leaf_fullpath or the
+ * destination address in @server->dstaddr.
+ *
+ * When using 'nodfs' mount option, we avoid sharing it with DFS
+ * connections as they might failover.
*/
- if (!ctx->nodfs) {
- if (ctx->source && server->origin_fullpath) {
- if (!dfs_src_pathname_equal(ctx->source,
- server->origin_fullpath))
+ if (!match_super) {
+ if (!ctx->nodfs) {
+ if (server->leaf_fullpath) {
+ if (!ctx->leaf_fullpath ||
+ strcasecmp(server->leaf_fullpath,
+ ctx->leaf_fullpath))
+ return 0;
+ } else if (ctx->leaf_fullpath) {
return 0;
+ }
} else if (server->leaf_fullpath) {
- if (!ctx->leaf_fullpath ||
- strcasecmp(server->leaf_fullpath,
- ctx->leaf_fullpath))
- return 0;
- } else if (ctx->leaf_fullpath) {
return 0;
}
- } else if (server->origin_fullpath || server->leaf_fullpath) {
- return 0;
}
/*
* Match for a regular connection (address/hostname/port) which has no
* DFS referrals set.
*/
- if (!server->origin_fullpath && !server->leaf_fullpath &&
+ if (!server->leaf_fullpath &&
(strcasecmp(server->hostname, ctx->server_hostname) ||
!match_server_address(server, addr) ||
!match_port(server, addr)))
@@ -1532,7 +1535,8 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx)
* Skip ses channels since they're only handled in lower layers
* (e.g. cifs_send_recv).
*/
- if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) {
+ if (CIFS_SERVER_IS_CHAN(server) ||
+ !match_server(server, ctx, false)) {
spin_unlock(&server->srv_lock);
continue;
}
@@ -2320,10 +2324,16 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
if (tcon->status == TID_EXITING)
return 0;
- /* Skip UNC validation when matching DFS connections or superblocks */
- if (!server->origin_fullpath && !server->leaf_fullpath &&
- strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE))
+
+ if (tcon->origin_fullpath) {
+ if (!ctx->source ||
+ !dfs_src_pathname_equal(ctx->source,
+ tcon->origin_fullpath))
+ return 0;
+ } else if (!server->leaf_fullpath &&
+ strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE)) {
return 0;
+ }
if (tcon->seal != ctx->seal)
return 0;
if (tcon->snapshot_time != ctx->snapshot_time)
@@ -2722,7 +2732,7 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
}
static int match_prepath(struct super_block *sb,
- struct TCP_Server_Info *server,
+ struct cifs_tcon *tcon,
struct cifs_mnt_data *mnt_data)
{
struct smb3_fs_context *ctx = mnt_data->ctx;
@@ -2733,8 +2743,8 @@ static int match_prepath(struct super_block *sb,
bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
new->prepath;
- if (server->origin_fullpath &&
- dfs_src_pathname_equal(server->origin_fullpath, ctx->source))
+ if (tcon->origin_fullpath &&
+ dfs_src_pathname_equal(tcon->origin_fullpath, ctx->source))
return 1;
if (old_set && new_set && !strcmp(new->prepath, old->prepath))
@@ -2767,8 +2777,9 @@ cifs_match_super(struct super_block *sb, void *data)
}
tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
- if (tlink == NULL) {
- /* can not match superblock if tlink were ever null */
+ if (IS_ERR_OR_NULL(tlink)) {
+ pr_warn_once("%s: skip super matching due to bad tlink(%p)\n",
+ __func__, tlink);
spin_unlock(&cifs_tcp_ses_lock);
return 0;
}
@@ -2782,10 +2793,10 @@ cifs_match_super(struct super_block *sb, void *data)
spin_lock(&ses->ses_lock);
spin_lock(&ses->chan_lock);
spin_lock(&tcon->tc_lock);
- if (!match_server(tcp_srv, ctx) ||
+ if (!match_server(tcp_srv, ctx, true) ||
!match_session(ses, ctx) ||
!match_tcon(tcon, ctx) ||
- !match_prepath(sb, tcp_srv, mnt_data)) {
+ !match_prepath(sb, tcon, mnt_data)) {
rc = 0;
goto out;
}
@@ -2933,11 +2944,11 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
static int
generic_ip_connect(struct TCP_Server_Info *server)
{
- int rc = 0;
- __be16 sport;
- int slen, sfamily;
- struct socket *socket = server->ssocket;
struct sockaddr *saddr;
+ struct socket *socket;
+ int slen, sfamily;
+ __be16 sport;
+ int rc = 0;
saddr = (struct sockaddr *) &server->dstaddr;
@@ -2959,18 +2970,19 @@ generic_ip_connect(struct TCP_Server_Info *server)
ntohs(sport));
}
- if (socket == NULL) {
+ if (server->ssocket) {
+ socket = server->ssocket;
+ } else {
rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM,
- IPPROTO_TCP, &socket, 1);
+ IPPROTO_TCP, &server->ssocket, 1);
if (rc < 0) {
cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
- server->ssocket = NULL;
return rc;
}
/* BB other socket options to set KEEPALIVE, NODELAY? */
cifs_dbg(FYI, "Socket created\n");
- server->ssocket = socket;
+ socket = server->ssocket;
socket->sk->sk_allocation = GFP_NOFS;
socket->sk->sk_use_task_frag = false;
if (sfamily == AF_INET6)
diff --git a/fs/smb/client/dfs.c b/fs/smb/client/dfs.c
index 2390b2fedd6a..26d14dd0482e 100644
--- a/fs/smb/client/dfs.c
+++ b/fs/smb/client/dfs.c
@@ -54,39 +54,6 @@ out:
return rc;
}
-/*
- * cifs_build_path_to_root returns full path to root when we do not have an
- * existing connection (tcon)
- */
-static char *build_unc_path_to_root(const struct smb3_fs_context *ctx,
- const struct cifs_sb_info *cifs_sb, bool useppath)
-{
- char *full_path, *pos;
- unsigned int pplen = useppath && ctx->prepath ? strlen(ctx->prepath) + 1 : 0;
- unsigned int unc_len = strnlen(ctx->UNC, MAX_TREE_SIZE + 1);
-
- if (unc_len > MAX_TREE_SIZE)
- return ERR_PTR(-EINVAL);
-
- full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
- if (full_path == NULL)
- return ERR_PTR(-ENOMEM);
-
- memcpy(full_path, ctx->UNC, unc_len);
- pos = full_path + unc_len;
-
- if (pplen) {
- *pos = CIFS_DIR_SEP(cifs_sb);
- memcpy(pos + 1, ctx->prepath, pplen);
- pos += pplen;
- }
-
- *pos = '\0'; /* add trailing null */
- convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
- cifs_dbg(FYI, "%s: full_path=%s\n", __func__, full_path);
- return full_path;
-}
-
static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path)
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
@@ -179,6 +146,7 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
struct TCP_Server_Info *server;
struct cifs_tcon *tcon;
char *origin_fullpath = NULL;
+ char sep = CIFS_DIR_SEP(cifs_sb);
int num_links = 0;
int rc;
@@ -186,7 +154,7 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
if (IS_ERR(ref_path))
return PTR_ERR(ref_path);
- full_path = build_unc_path_to_root(ctx, cifs_sb, true);
+ full_path = smb3_fs_context_fullpath(ctx, sep);
if (IS_ERR(full_path)) {
rc = PTR_ERR(full_path);
full_path = NULL;
@@ -228,7 +196,7 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
kfree(full_path);
ref_path = full_path = NULL;
- full_path = build_unc_path_to_root(ctx, cifs_sb, true);
+ full_path = smb3_fs_context_fullpath(ctx, sep);
if (IS_ERR(full_path)) {
rc = PTR_ERR(full_path);
full_path = NULL;
@@ -249,14 +217,12 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
server = mnt_ctx->server;
tcon = mnt_ctx->tcon;
- mutex_lock(&server->refpath_lock);
- spin_lock(&server->srv_lock);
- if (!server->origin_fullpath) {
- server->origin_fullpath = origin_fullpath;
+ spin_lock(&tcon->tc_lock);
+ if (!tcon->origin_fullpath) {
+ tcon->origin_fullpath = origin_fullpath;
origin_fullpath = NULL;
}
- spin_unlock(&server->srv_lock);
- mutex_unlock(&server->refpath_lock);
+ spin_unlock(&tcon->tc_lock);
if (list_empty(&tcon->dfs_ses_list)) {
list_replace_init(&mnt_ctx->dfs_ses_list,
@@ -279,18 +245,13 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct cifs_ses *ses;
- char *source = ctx->source;
bool nodfs = ctx->nodfs;
int rc;
*isdfs = false;
- /* Temporarily set @ctx->source to NULL as we're not matching DFS
- * superblocks yet. See cifs_match_super() and match_server().
- */
- ctx->source = NULL;
rc = get_session(mnt_ctx, NULL);
if (rc)
- goto out;
+ return rc;
ctx->dfs_root_ses = mnt_ctx->ses;
/*
@@ -303,8 +264,9 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
if (!nodfs) {
rc = dfs_get_referral(mnt_ctx, ctx->UNC + 1, NULL, NULL);
if (rc) {
- if (rc != -ENOENT && rc != -EOPNOTSUPP && rc != -EIO)
- goto out;
+ cifs_dbg(FYI, "%s: no dfs referral for %s: %d\n",
+ __func__, ctx->UNC + 1, rc);
+ cifs_dbg(FYI, "%s: assuming non-dfs mount...\n", __func__);
nodfs = true;
}
}
@@ -312,7 +274,7 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
rc = cifs_mount_get_tcon(mnt_ctx);
if (!rc)
rc = cifs_is_path_remote(mnt_ctx);
- goto out;
+ return rc;
}
*isdfs = true;
@@ -328,12 +290,7 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
rc = __dfs_mount_share(mnt_ctx);
if (ses == ctx->dfs_root_ses)
cifs_put_smb_ses(ses);
-out:
- /*
- * Restore previous value of @ctx->source so DFS superblock can be
- * matched in cifs_match_super().
- */
- ctx->source = source;
+
return rc;
}
@@ -567,11 +524,11 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
int rc;
struct TCP_Server_Info *server = tcon->ses->server;
const struct smb_version_operations *ops = server->ops;
- struct super_block *sb = NULL;
- struct cifs_sb_info *cifs_sb;
struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
- char *tree;
+ struct cifs_sb_info *cifs_sb = NULL;
+ struct super_block *sb = NULL;
struct dfs_info3_param ref = {0};
+ char *tree;
/* only send once per connect */
spin_lock(&tcon->tc_lock);
@@ -603,19 +560,18 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
goto out;
}
- sb = cifs_get_tcp_super(server);
- if (IS_ERR(sb)) {
- rc = PTR_ERR(sb);
- cifs_dbg(VFS, "%s: could not find superblock: %d\n", __func__, rc);
- goto out;
- }
-
- cifs_sb = CIFS_SB(sb);
+ sb = cifs_get_dfs_tcon_super(tcon);
+ if (!IS_ERR(sb))
+ cifs_sb = CIFS_SB(sb);
- /* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
- if (!server->leaf_fullpath ||
+ /*
+ * Tree connect to last share in @tcon->tree_name whether dfs super or
+ * cached dfs referral was not found.
+ */
+ if (!cifs_sb || !server->leaf_fullpath ||
dfs_cache_noreq_find(server->leaf_fullpath + 1, &ref, &tl)) {
- rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, cifs_sb->local_nls);
+ rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon,
+ cifs_sb ? cifs_sb->local_nls : nlsc);
goto out;
}
diff --git a/fs/smb/client/dfs.h b/fs/smb/client/dfs.h
index 1c90df5ecfbd..98e9d2aca6a7 100644
--- a/fs/smb/client/dfs.h
+++ b/fs/smb/client/dfs.h
@@ -39,16 +39,15 @@ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
- struct TCP_Server_Info *server = tcon->ses->server;
size_t len;
char *s;
- spin_lock(&server->srv_lock);
- if (unlikely(!server->origin_fullpath)) {
- spin_unlock(&server->srv_lock);
+ spin_lock(&tcon->tc_lock);
+ if (unlikely(!tcon->origin_fullpath)) {
+ spin_unlock(&tcon->tc_lock);
return ERR_PTR(-EREMOTE);
}
- spin_unlock(&server->srv_lock);
+ spin_unlock(&tcon->tc_lock);
s = dentry_path_raw(dentry, page, PATH_MAX);
if (IS_ERR(s))
@@ -57,16 +56,16 @@ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
if (!s[1])
s++;
- spin_lock(&server->srv_lock);
- len = strlen(server->origin_fullpath);
+ spin_lock(&tcon->tc_lock);
+ len = strlen(tcon->origin_fullpath);
if (s < (char *)page + len) {
- spin_unlock(&server->srv_lock);
+ spin_unlock(&tcon->tc_lock);
return ERR_PTR(-ENAMETOOLONG);
}
s -= len;
- memcpy(s, server->origin_fullpath, len);
- spin_unlock(&server->srv_lock);
+ memcpy(s, tcon->origin_fullpath, len);
+ spin_unlock(&tcon->tc_lock);
convert_delimiter(s, '/');
return s;
diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c
index 1513b2709889..33adf43a01f1 100644
--- a/fs/smb/client/dfs_cache.c
+++ b/fs/smb/client/dfs_cache.c
@@ -1248,18 +1248,20 @@ static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
{
struct cifs_tcon *tcon;
- struct TCP_Server_Info *server;
if (!cifs_sb || !cifs_sb->master_tlink)
return -EINVAL;
tcon = cifs_sb_master_tcon(cifs_sb);
- server = tcon->ses->server;
- if (!server->origin_fullpath) {
+ spin_lock(&tcon->tc_lock);
+ if (!tcon->origin_fullpath) {
+ spin_unlock(&tcon->tc_lock);
cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
return 0;
}
+ spin_unlock(&tcon->tc_lock);
+
/*
* After reconnecting to a different server, unique ids won't match anymore, so we disable
* serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index f30f6ddc4b81..879bc8e6555c 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -4936,20 +4936,19 @@ oplock_break_ack:
_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
/*
- * releasing stale oplock after recent reconnect of smb session using
- * a now incorrect file handle is not a data integrity issue but do
- * not bother sending an oplock release if session to server still is
- * disconnected since oplock already released by the server
+ * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
+ * an acknowledgment to be sent when the file has already been closed.
+ * check for server null, since can race with kill_sb calling tree disconnect.
*/
- if (!oplock_break_cancelled) {
- /* check for server null since can race with kill_sb calling tree disconnect */
- if (tcon->ses && tcon->ses->server) {
- rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
- volatile_fid, net_fid, cinode);
- cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
- } else
- pr_warn_once("lease break not sent for unmounted share\n");
- }
+ spin_lock(&cinode->open_file_lock);
+ if (tcon->ses && tcon->ses->server && !oplock_break_cancelled &&
+ !list_empty(&cinode->openFileList)) {
+ spin_unlock(&cinode->open_file_lock);
+ rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
+ volatile_fid, net_fid, cinode);
+ cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
+ } else
+ spin_unlock(&cinode->open_file_lock);
cifs_done_oplock_break(cinode);
}
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index 1bda75609b64..4946a0c59600 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -441,14 +441,17 @@ out:
* but there are some bugs that prevent rename from working if there are
* multiple delimiters.
*
- * Returns a sanitized duplicate of @path. @gfp indicates the GFP_* flags
- * for kstrdup.
+ * Return a sanitized duplicate of @path or NULL for empty prefix paths.
+ * Otherwise, return ERR_PTR.
+ *
+ * @gfp indicates the GFP_* flags for kstrdup.
* The caller is responsible for freeing the original.
*/
#define IS_DELIM(c) ((c) == '/' || (c) == '\\')
char *cifs_sanitize_prepath(char *prepath, gfp_t gfp)
{
char *cursor1 = prepath, *cursor2 = prepath;
+ char *s;
/* skip all prepended delimiters */
while (IS_DELIM(*cursor1))
@@ -469,8 +472,39 @@ char *cifs_sanitize_prepath(char *prepath, gfp_t gfp)
if (IS_DELIM(*(cursor2 - 1)))
cursor2--;
- *(cursor2) = '\0';
- return kstrdup(prepath, gfp);
+ *cursor2 = '\0';
+ if (!*prepath)
+ return NULL;
+ s = kstrdup(prepath, gfp);
+ if (!s)
+ return ERR_PTR(-ENOMEM);
+ return s;
+}
+
+/*
+ * Return full path based on the values of @ctx->{UNC,prepath}.
+ *
+ * It is assumed that both values were already parsed by smb3_parse_devname().
+ */
+char *smb3_fs_context_fullpath(const struct smb3_fs_context *ctx, char dirsep)
+{
+ size_t ulen, plen;
+ char *s;
+
+ ulen = strlen(ctx->UNC);
+ plen = ctx->prepath ? strlen(ctx->prepath) + 1 : 0;
+
+ s = kmalloc(ulen + plen + 1, GFP_KERNEL);
+ if (!s)
+ return ERR_PTR(-ENOMEM);
+ memcpy(s, ctx->UNC, ulen);
+ if (plen) {
+ s[ulen] = dirsep;
+ memcpy(s + ulen + 1, ctx->prepath, plen);
+ }
+ s[ulen + plen] = '\0';
+ convert_delimiter(s, dirsep);
+ return s;
}
/*
@@ -484,6 +518,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
char *pos;
const char *delims = "/\\";
size_t len;
+ int rc;
if (unlikely(!devname || !*devname)) {
cifs_dbg(VFS, "Device name not specified\n");
@@ -511,6 +546,8 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
/* now go until next delimiter or end of string */
len = strcspn(pos, delims);
+ if (!len)
+ return -EINVAL;
/* move "pos" up to delimiter or NULL */
pos += len;
@@ -533,8 +570,11 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
return 0;
ctx->prepath = cifs_sanitize_prepath(pos, GFP_KERNEL);
- if (!ctx->prepath)
- return -ENOMEM;
+ if (IS_ERR(ctx->prepath)) {
+ rc = PTR_ERR(ctx->prepath);
+ ctx->prepath = NULL;
+ return rc;
+ }
return 0;
}
@@ -1146,12 +1186,13 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
cifs_errorf(fc, "Unknown error parsing devname\n");
goto cifs_parse_mount_err;
}
- ctx->source = kstrdup(param->string, GFP_KERNEL);
- if (ctx->source == NULL) {
+ ctx->source = smb3_fs_context_fullpath(ctx, '/');
+ if (IS_ERR(ctx->source)) {
+ ctx->source = NULL;
cifs_errorf(fc, "OOM when copying UNC string\n");
goto cifs_parse_mount_err;
}
- fc->source = kstrdup(param->string, GFP_KERNEL);
+ fc->source = kstrdup(ctx->source, GFP_KERNEL);
if (fc->source == NULL) {
cifs_errorf(fc, "OOM when copying UNC string\n");
goto cifs_parse_mount_err;
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 1087ac6104a9..c3eeae07e139 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -2344,8 +2344,8 @@ cifs_invalidate_mapping(struct inode *inode)
if (inode->i_mapping && inode->i_mapping->nrpages != 0) {
rc = invalidate_inode_pages2(inode->i_mapping);
if (rc)
- cifs_dbg(VFS, "%s: Could not invalidate inode %p\n",
- __func__, inode);
+ cifs_dbg(VFS, "%s: invalidate inode %p failed with rc %d\n",
+ __func__, inode, rc);
}
return rc;
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index cd914be905b2..70dbfe6584f9 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -156,6 +156,7 @@ tconInfoFree(struct cifs_tcon *tcon)
#ifdef CONFIG_CIFS_DFS_UPCALL
dfs_put_root_smb_sessions(&tcon->dfs_ses_list);
#endif
+ kfree(tcon->origin_fullpath);
kfree(tcon);
}
@@ -1106,20 +1107,25 @@ struct super_cb_data {
struct super_block *sb;
};
-static void tcp_super_cb(struct super_block *sb, void *arg)
+static void tcon_super_cb(struct super_block *sb, void *arg)
{
struct super_cb_data *sd = arg;
- struct TCP_Server_Info *server = sd->data;
struct cifs_sb_info *cifs_sb;
- struct cifs_tcon *tcon;
+ struct cifs_tcon *t1 = sd->data, *t2;
if (sd->sb)
return;
cifs_sb = CIFS_SB(sb);
- tcon = cifs_sb_master_tcon(cifs_sb);
- if (tcon->ses->server == server)
+ t2 = cifs_sb_master_tcon(cifs_sb);
+
+ spin_lock(&t2->tc_lock);
+ if (t1->ses == t2->ses &&
+ t1->ses->server == t2->ses->server &&
+ t2->origin_fullpath &&
+ dfs_src_pathname_equal(t2->origin_fullpath, t1->origin_fullpath))
sd->sb = sb;
+ spin_unlock(&t2->tc_lock);
}
static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *),
@@ -1145,6 +1151,7 @@ static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void
return sd.sb;
}
}
+ pr_warn_once("%s: could not find dfs superblock\n", __func__);
return ERR_PTR(-EINVAL);
}
@@ -1154,9 +1161,15 @@ static void __cifs_put_super(struct super_block *sb)
cifs_sb_deactive(sb);
}
-struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server)
+struct super_block *cifs_get_dfs_tcon_super(struct cifs_tcon *tcon)
{
- return __cifs_get_super(tcp_super_cb, server);
+ spin_lock(&tcon->tc_lock);
+ if (!tcon->origin_fullpath) {
+ spin_unlock(&tcon->tc_lock);
+ return ERR_PTR(-ENOENT);
+ }
+ spin_unlock(&tcon->tc_lock);
+ return __cifs_get_super(tcon_super_cb, tcon);
}
void cifs_put_tcp_super(struct super_block *sb)
@@ -1198,16 +1211,21 @@ int match_target_ip(struct TCP_Server_Info *server,
int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
{
+ int rc;
+
kfree(cifs_sb->prepath);
+ cifs_sb->prepath = NULL;
if (prefix && *prefix) {
cifs_sb->prepath = cifs_sanitize_prepath(prefix, GFP_ATOMIC);
- if (!cifs_sb->prepath)
- return -ENOMEM;
-
- convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
- } else
- cifs_sb->prepath = NULL;
+ if (IS_ERR(cifs_sb->prepath)) {
+ rc = PTR_ERR(cifs_sb->prepath);
+ cifs_sb->prepath = NULL;
+ return rc;
+ }
+ if (cifs_sb->prepath)
+ convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
+ }
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
return 0;
@@ -1238,9 +1256,16 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid,
*/
if (strlen(full_path) < 2 || !cifs_sb ||
(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
- !is_tcon_dfs(tcon) || !ses->server->origin_fullpath)
+ !is_tcon_dfs(tcon))
return 0;
+ spin_lock(&tcon->tc_lock);
+ if (!tcon->origin_fullpath) {
+ spin_unlock(&tcon->tc_lock);
+ return 0;
+ }
+ spin_unlock(&tcon->tc_lock);
+
/*
* Slow path - tcon is DFS and @full_path has prefix path, so attempt
* to get a referral to figure out whether it is an DFS link.
@@ -1264,7 +1289,7 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid,
/*
* XXX: we are not using dfs_cache_find() here because we might
- * end filling all the DFS cache and thus potentially
+ * end up filling all the DFS cache and thus potentially
* removing cached DFS targets that the client would eventually
* need during failover.
*/
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index 163a03298430..8e696fbd72fa 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -398,9 +398,6 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
rsp_iov);
finished:
- if (cfile)
- cifsFileInfo_put(cfile);
-
SMB2_open_free(&rqst[0]);
if (rc == -EREMCHG) {
pr_warn_once("server share %s deleted\n", tcon->tree_name);
@@ -529,6 +526,9 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
break;
}
+ if (cfile)
+ cifsFileInfo_put(cfile);
+
if (rc && err_iov && err_buftype) {
memcpy(err_iov, rsp_iov, 3 * sizeof(*err_iov));
memcpy(err_buftype, resp_buftype, 3 * sizeof(*err_buftype));
@@ -609,9 +609,6 @@ int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
if (islink)
rc = -EREMOTE;
}
- if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb &&
- (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
- rc = -EOPNOTSUPP;
}
out:
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 5639d8c48570..87abce010974 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -109,7 +109,11 @@ smb2_add_credits(struct TCP_Server_Info *server,
server->credits--;
server->oplock_credits++;
}
- }
+ } else if ((server->in_flight > 0) && (server->oplock_credits > 3) &&
+ ((optype & CIFS_OP_MASK) == CIFS_OBREAK_OP))
+ /* if now have too many oplock credits, rebalance so don't starve normal ops */
+ change_conf(server);
+
scredits = *val;
in_flight = server->in_flight;
spin_unlock(&server->req_lock);
@@ -211,6 +215,16 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
spin_lock(&server->req_lock);
while (1) {
+ spin_unlock(&server->req_lock);
+
+ spin_lock(&server->srv_lock);
+ if (server->tcpStatus == CifsExiting) {
+ spin_unlock(&server->srv_lock);
+ return -ENOENT;
+ }
+ spin_unlock(&server->srv_lock);
+
+ spin_lock(&server->req_lock);
if (server->credits <= 0) {
spin_unlock(&server->req_lock);
cifs_num_waiters_inc(server);
@@ -221,15 +235,6 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
return rc;
spin_lock(&server->req_lock);
} else {
- spin_unlock(&server->req_lock);
- spin_lock(&server->srv_lock);
- if (server->tcpStatus == CifsExiting) {
- spin_unlock(&server->srv_lock);
- return -ENOENT;
- }
- spin_unlock(&server->srv_lock);
-
- spin_lock(&server->req_lock);
scredits = server->credits;
/* can deadlock with reopen */
if (scredits <= 8) {
@@ -4409,6 +4414,8 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
}
spin_unlock(&cifs_tcp_ses_lock);
+ trace_smb3_ses_not_found(ses_id);
+
return -EAGAIN;
}
/*
@@ -4439,8 +4446,8 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
rc = smb2_get_enc_key(server, le64_to_cpu(tr_hdr->SessionId), enc, key);
if (rc) {
- cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
- enc ? "en" : "de");
+ cifs_server_dbg(FYI, "%s: Could not get %scryption key. sid: 0x%llx\n", __func__,
+ enc ? "en" : "de", le64_to_cpu(tr_hdr->SessionId));
return rc;
}
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 17fe212ab895..e04766fe6f80 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -3797,6 +3797,12 @@ void smb2_reconnect_server(struct work_struct *work)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+ spin_lock(&ses->ses_lock);
+ if (ses->ses_status == SES_EXITING) {
+ spin_unlock(&ses->ses_lock);
+ continue;
+ }
+ spin_unlock(&ses->ses_lock);
tcon_selected = false;
diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
index 790acf65a092..c6db898dab7c 100644
--- a/fs/smb/client/smb2transport.c
+++ b/fs/smb/client/smb2transport.c
@@ -92,7 +92,8 @@ int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
if (ses->Suid == ses_id)
goto found;
}
- cifs_server_dbg(VFS, "%s: Could not find session 0x%llx\n",
+ trace_smb3_ses_not_found(ses_id);
+ cifs_server_dbg(FYI, "%s: Could not find session 0x%llx\n",
__func__, ses_id);
rc = -ENOENT;
goto out;
@@ -153,7 +154,14 @@ smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
if (ses->Suid != ses_id)
continue;
+
+ spin_lock(&ses->ses_lock);
+ if (ses->ses_status == SES_EXITING) {
+ spin_unlock(&ses->ses_lock);
+ continue;
+ }
++ses->ses_count;
+ spin_unlock(&ses->ses_lock);
return ses;
}
@@ -557,7 +565,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
rc = smb2_get_sign_key(le64_to_cpu(shdr->SessionId), server, key);
if (unlikely(rc)) {
- cifs_server_dbg(VFS, "%s: Could not get signing key\n", __func__);
+ cifs_server_dbg(FYI, "%s: Could not get signing key\n", __func__);
return rc;
}
diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
index d3053bd8ae73..e671bd16f00c 100644
--- a/fs/smb/client/trace.h
+++ b/fs/smb/client/trace.h
@@ -1003,6 +1003,26 @@ DEFINE_EVENT(smb3_reconnect_class, smb3_##name, \
DEFINE_SMB3_RECONNECT_EVENT(reconnect);
DEFINE_SMB3_RECONNECT_EVENT(partial_send_reconnect);
+DECLARE_EVENT_CLASS(smb3_ses_class,
+ TP_PROTO(__u64 sesid),
+ TP_ARGS(sesid),
+ TP_STRUCT__entry(
+ __field(__u64, sesid)
+ ),
+ TP_fast_assign(
+ __entry->sesid = sesid;
+ ),
+ TP_printk("sid=0x%llx",
+ __entry->sesid)
+)
+
+#define DEFINE_SMB3_SES_EVENT(name) \
+DEFINE_EVENT(smb3_ses_class, smb3_##name, \
+ TP_PROTO(__u64 sesid), \
+ TP_ARGS(sesid))
+
+DEFINE_SMB3_SES_EVENT(ses_not_found);
+
DECLARE_EVENT_CLASS(smb3_credit_class,
TP_PROTO(__u64 currmid,
__u64 conn_id,
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index 0474d0bba0a2..f280502a2aee 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -522,6 +522,16 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
}
while (1) {
+ spin_unlock(&server->req_lock);
+
+ spin_lock(&server->srv_lock);
+ if (server->tcpStatus == CifsExiting) {
+ spin_unlock(&server->srv_lock);
+ return -ENOENT;
+ }
+ spin_unlock(&server->srv_lock);
+
+ spin_lock(&server->req_lock);
if (*credits < num_credits) {
scredits = *credits;
spin_unlock(&server->req_lock);
@@ -547,15 +557,6 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
return -ERESTARTSYS;
spin_lock(&server->req_lock);
} else {
- spin_unlock(&server->req_lock);
-
- spin_lock(&server->srv_lock);
- if (server->tcpStatus == CifsExiting) {
- spin_unlock(&server->srv_lock);
- return -ENOENT;
- }
- spin_unlock(&server->srv_lock);
-
/*
* For normal commands, reserve the last MAX_COMPOUND
* credits to compound requests.
@@ -569,7 +570,6 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
* for servers that are slow to hand out credits on
* new sessions.
*/
- spin_lock(&server->req_lock);
if (!optype && num_credits == 1 &&
server->in_flight > 2 * MAX_COMPOUND &&
*credits <= MAX_COMPOUND) {
diff --git a/fs/smb/server/mgmt/tree_connect.c b/fs/smb/server/mgmt/tree_connect.c
index f07a05f37651..408cddf2f094 100644
--- a/fs/smb/server/mgmt/tree_connect.c
+++ b/fs/smb/server/mgmt/tree_connect.c
@@ -120,17 +120,6 @@ struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
return tcon;
}
-struct ksmbd_share_config *ksmbd_tree_conn_share(struct ksmbd_session *sess,
- unsigned int id)
-{
- struct ksmbd_tree_connect *tc;
-
- tc = ksmbd_tree_conn_lookup(sess, id);
- if (tc)
- return tc->share_conf;
- return NULL;
-}
-
int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess)
{
int ret = 0;
diff --git a/fs/smb/server/mgmt/tree_connect.h b/fs/smb/server/mgmt/tree_connect.h
index 700df36cf3e3..562d647ad9fa 100644
--- a/fs/smb/server/mgmt/tree_connect.h
+++ b/fs/smb/server/mgmt/tree_connect.h
@@ -53,9 +53,6 @@ int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
unsigned int id);
-struct ksmbd_share_config *ksmbd_tree_conn_share(struct ksmbd_session *sess,
- unsigned int id);
-
int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess);
#endif /* __TREE_CONNECT_MANAGEMENT_H__ */
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index da1787c68ba0..cf8822103f50 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -543,7 +543,7 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
if (le32_to_cpu(hdr->NextCommand) > 0)
sz = large_sz;
- work->response_buf = kvmalloc(sz, GFP_KERNEL | __GFP_ZERO);
+ work->response_buf = kvzalloc(sz, GFP_KERNEL);
if (!work->response_buf)
return -ENOMEM;
@@ -1322,9 +1322,8 @@ static int decode_negotiation_token(struct ksmbd_conn *conn,
static int ntlm_negotiate(struct ksmbd_work *work,
struct negotiate_message *negblob,
- size_t negblob_len)
+ size_t negblob_len, struct smb2_sess_setup_rsp *rsp)
{
- struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
struct challenge_message *chgblob;
unsigned char *spnego_blob = NULL;
u16 spnego_blob_len;
@@ -1429,10 +1428,10 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
return user;
}
-static int ntlm_authenticate(struct ksmbd_work *work)
+static int ntlm_authenticate(struct ksmbd_work *work,
+ struct smb2_sess_setup_req *req,
+ struct smb2_sess_setup_rsp *rsp)
{
- struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
- struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_conn *conn = work->conn;
struct ksmbd_session *sess = work->sess;
struct channel *chann = NULL;
@@ -1566,10 +1565,10 @@ binding_session:
}
#ifdef CONFIG_SMB_SERVER_KERBEROS5
-static int krb5_authenticate(struct ksmbd_work *work)
+static int krb5_authenticate(struct ksmbd_work *work,
+ struct smb2_sess_setup_req *req,
+ struct smb2_sess_setup_rsp *rsp)
{
- struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
- struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_conn *conn = work->conn;
struct ksmbd_session *sess = work->sess;
char *in_blob, *out_blob;
@@ -1647,7 +1646,9 @@ static int krb5_authenticate(struct ksmbd_work *work)
return 0;
}
#else
-static int krb5_authenticate(struct ksmbd_work *work)
+static int krb5_authenticate(struct ksmbd_work *work,
+ struct smb2_sess_setup_req *req,
+ struct smb2_sess_setup_rsp *rsp)
{
return -EOPNOTSUPP;
}
@@ -1656,8 +1657,8 @@ static int krb5_authenticate(struct ksmbd_work *work)
int smb2_sess_setup(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
- struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
+ struct smb2_sess_setup_req *req;
+ struct smb2_sess_setup_rsp *rsp;
struct ksmbd_session *sess;
struct negotiate_message *negblob;
unsigned int negblob_len, negblob_off;
@@ -1665,6 +1666,8 @@ int smb2_sess_setup(struct ksmbd_work *work)
ksmbd_debug(SMB, "Received request for session setup\n");
+ WORK_BUFFERS(work, req, rsp);
+
rsp->StructureSize = cpu_to_le16(9);
rsp->SessionFlags = 0;
rsp->SecurityBufferOffset = cpu_to_le16(72);
@@ -1786,7 +1789,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
if (conn->preferred_auth_mech &
(KSMBD_AUTH_KRB5 | KSMBD_AUTH_MSKRB5)) {
- rc = krb5_authenticate(work);
+ rc = krb5_authenticate(work, req, rsp);
if (rc) {
rc = -EINVAL;
goto out_err;
@@ -1800,7 +1803,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
sess->Preauth_HashValue = NULL;
} else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
if (negblob->MessageType == NtLmNegotiate) {
- rc = ntlm_negotiate(work, negblob, negblob_len);
+ rc = ntlm_negotiate(work, negblob, negblob_len, rsp);
if (rc)
goto out_err;
rsp->hdr.Status =
@@ -1813,7 +1816,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
le16_to_cpu(rsp->SecurityBufferLength) - 1);
} else if (negblob->MessageType == NtLmAuthenticate) {
- rc = ntlm_authenticate(work);
+ rc = ntlm_authenticate(work, req, rsp);
if (rc)
goto out_err;
@@ -1911,14 +1914,16 @@ out_err:
int smb2_tree_connect(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_tree_connect_req *req = smb2_get_msg(work->request_buf);
- struct smb2_tree_connect_rsp *rsp = smb2_get_msg(work->response_buf);
+ struct smb2_tree_connect_req *req;
+ struct smb2_tree_connect_rsp *rsp;
struct ksmbd_session *sess = work->sess;
char *treename = NULL, *name = NULL;
struct ksmbd_tree_conn_status status;
struct ksmbd_share_config *share;
int rc = -EINVAL;
+ WORK_BUFFERS(work, req, rsp);
+
treename = smb_strndup_from_utf16(req->Buffer,
le16_to_cpu(req->PathLength), true,
conn->local_nls);
@@ -2087,19 +2092,19 @@ static int smb2_create_open_flags(bool file_present, __le32 access,
*/
int smb2_tree_disconnect(struct ksmbd_work *work)
{
- struct smb2_tree_disconnect_rsp *rsp = smb2_get_msg(work->response_buf);
+ struct smb2_tree_disconnect_rsp *rsp;
+ struct smb2_tree_disconnect_req *req;
struct ksmbd_session *sess = work->sess;
struct ksmbd_tree_connect *tcon = work->tcon;
+ WORK_BUFFERS(work, req, rsp);
+
rsp->StructureSize = cpu_to_le16(4);
inc_rfc1001_len(work->response_buf, 4);
ksmbd_debug(SMB, "request\n");
if (!tcon || test_and_set_bit(TREE_CONN_EXPIRE, &tcon->status)) {
- struct smb2_tree_disconnect_req *req =
- smb2_get_msg(work->request_buf);
-
ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
@@ -2122,10 +2127,14 @@ int smb2_tree_disconnect(struct ksmbd_work *work)
int smb2_session_logoff(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_logoff_rsp *rsp = smb2_get_msg(work->response_buf);
+ struct smb2_logoff_req *req;
+ struct smb2_logoff_rsp *rsp;
struct ksmbd_session *sess;
- struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
- u64 sess_id = le64_to_cpu(req->hdr.SessionId);
+ u64 sess_id;
+
+ WORK_BUFFERS(work, req, rsp);
+
+ sess_id = le64_to_cpu(req->hdr.SessionId);
rsp->StructureSize = cpu_to_le16(4);
inc_rfc1001_len(work->response_buf, 4);
@@ -2165,12 +2174,14 @@ int smb2_session_logoff(struct ksmbd_work *work)
*/
static noinline int create_smb2_pipe(struct ksmbd_work *work)
{
- struct smb2_create_rsp *rsp = smb2_get_msg(work->response_buf);
- struct smb2_create_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_create_rsp *rsp;
+ struct smb2_create_req *req;
int id;
int err;
char *name;
+ WORK_BUFFERS(work, req, rsp);
+
name = smb_strndup_from_utf16(req->Buffer, le16_to_cpu(req->NameLength),
1, work->conn->local_nls);
if (IS_ERR(name)) {
@@ -2872,11 +2883,9 @@ int smb2_open(struct ksmbd_work *work)
if (!file_present) {
daccess = cpu_to_le32(GENERIC_ALL_FLAGS);
} else {
- rc = ksmbd_vfs_query_maximal_access(idmap,
+ ksmbd_vfs_query_maximal_access(idmap,
path.dentry,
&daccess);
- if (rc)
- goto err_out;
already_permitted = true;
}
maximal_access = daccess;
@@ -5305,8 +5314,10 @@ int smb2_query_info(struct ksmbd_work *work)
static noinline int smb2_close_pipe(struct ksmbd_work *work)
{
u64 id;
- struct smb2_close_req *req = smb2_get_msg(work->request_buf);
- struct smb2_close_rsp *rsp = smb2_get_msg(work->response_buf);
+ struct smb2_close_req *req;
+ struct smb2_close_rsp *rsp;
+
+ WORK_BUFFERS(work, req, rsp);
id = req->VolatileFileId;
ksmbd_session_rpc_close(work->sess, id);
@@ -5448,6 +5459,9 @@ int smb2_echo(struct ksmbd_work *work)
{
struct smb2_echo_rsp *rsp = smb2_get_msg(work->response_buf);
+ if (work->next_smb2_rcv_hdr_off)
+ rsp = ksmbd_resp_buf_next(work);
+
rsp->StructureSize = cpu_to_le16(4);
rsp->Reserved = 0;
inc_rfc1001_len(work->response_buf, 4);
@@ -6082,8 +6096,10 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
int nbytes = 0, err;
u64 id;
struct ksmbd_rpc_command *rpc_resp;
- struct smb2_read_req *req = smb2_get_msg(work->request_buf);
- struct smb2_read_rsp *rsp = smb2_get_msg(work->response_buf);
+ struct smb2_read_req *req;
+ struct smb2_read_rsp *rsp;
+
+ WORK_BUFFERS(work, req, rsp);
id = req->VolatileFileId;
@@ -6096,7 +6112,7 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
}
work->aux_payload_buf =
- kvmalloc(rpc_resp->payload_sz, GFP_KERNEL | __GFP_ZERO);
+ kvmalloc(rpc_resp->payload_sz, GFP_KERNEL);
if (!work->aux_payload_buf) {
err = -ENOMEM;
goto out;
@@ -6248,7 +6264,7 @@ int smb2_read(struct ksmbd_work *work)
ksmbd_debug(SMB, "filename %pD, offset %lld, len %zu\n",
fp->filp, offset, length);
- work->aux_payload_buf = kvmalloc(length, GFP_KERNEL | __GFP_ZERO);
+ work->aux_payload_buf = kvzalloc(length, GFP_KERNEL);
if (!work->aux_payload_buf) {
err = -ENOMEM;
goto out;
@@ -6331,14 +6347,16 @@ out:
*/
static noinline int smb2_write_pipe(struct ksmbd_work *work)
{
- struct smb2_write_req *req = smb2_get_msg(work->request_buf);
- struct smb2_write_rsp *rsp = smb2_get_msg(work->response_buf);
+ struct smb2_write_req *req;
+ struct smb2_write_rsp *rsp;
struct ksmbd_rpc_command *rpc_resp;
u64 id = 0;
int err = 0, ret = 0;
char *data_buf;
size_t length;
+ WORK_BUFFERS(work, req, rsp);
+
length = le32_to_cpu(req->Length);
id = req->VolatileFileId;
@@ -6397,7 +6415,7 @@ static ssize_t smb2_write_rdma_channel(struct ksmbd_work *work,
int ret;
ssize_t nbytes;
- data_buf = kvmalloc(length, GFP_KERNEL | __GFP_ZERO);
+ data_buf = kvzalloc(length, GFP_KERNEL);
if (!data_buf)
return -ENOMEM;
@@ -6607,6 +6625,9 @@ int smb2_cancel(struct ksmbd_work *work)
struct ksmbd_work *iter;
struct list_head *command_list;
+ if (work->next_smb2_rcv_hdr_off)
+ hdr = ksmbd_resp_buf_next(work);
+
ksmbd_debug(SMB, "smb2 cancel called on mid %llu, async flags 0x%x\n",
hdr->MessageId, hdr->Flags);
@@ -6766,8 +6787,8 @@ static inline bool lock_defer_pending(struct file_lock *fl)
*/
int smb2_lock(struct ksmbd_work *work)
{
- struct smb2_lock_req *req = smb2_get_msg(work->request_buf);
- struct smb2_lock_rsp *rsp = smb2_get_msg(work->response_buf);
+ struct smb2_lock_req *req;
+ struct smb2_lock_rsp *rsp;
struct smb2_lock_element *lock_ele;
struct ksmbd_file *fp = NULL;
struct file_lock *flock = NULL;
@@ -6784,6 +6805,8 @@ int smb2_lock(struct ksmbd_work *work)
LIST_HEAD(rollback_list);
int prior_lock = 0;
+ WORK_BUFFERS(work, req, rsp);
+
ksmbd_debug(SMB, "Received lock request\n");
fp = ksmbd_lookup_fd_slow(work, req->VolatileFileId, req->PersistentFileId);
if (!fp) {
@@ -7897,8 +7920,8 @@ out:
*/
static void smb20_oplock_break_ack(struct ksmbd_work *work)
{
- struct smb2_oplock_break *req = smb2_get_msg(work->request_buf);
- struct smb2_oplock_break *rsp = smb2_get_msg(work->response_buf);
+ struct smb2_oplock_break *req;
+ struct smb2_oplock_break *rsp;
struct ksmbd_file *fp;
struct oplock_info *opinfo = NULL;
__le32 err = 0;
@@ -7907,6 +7930,8 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
char req_oplevel = 0, rsp_oplevel = 0;
unsigned int oplock_change_type;
+ WORK_BUFFERS(work, req, rsp);
+
volatile_id = req->VolatileFid;
persistent_id = req->PersistentFid;
req_oplevel = req->OplockLevel;
@@ -8041,8 +8066,8 @@ static int check_lease_state(struct lease *lease, __le32 req_state)
static void smb21_lease_break_ack(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_lease_ack *req = smb2_get_msg(work->request_buf);
- struct smb2_lease_ack *rsp = smb2_get_msg(work->response_buf);
+ struct smb2_lease_ack *req;
+ struct smb2_lease_ack *rsp;
struct oplock_info *opinfo;
__le32 err = 0;
int ret = 0;
@@ -8050,6 +8075,8 @@ static void smb21_lease_break_ack(struct ksmbd_work *work)
__le32 lease_state;
struct lease *lease;
+ WORK_BUFFERS(work, req, rsp);
+
ksmbd_debug(OPLOCK, "smb21 lease break, lease state(0x%x)\n",
le32_to_cpu(req->LeaseState));
opinfo = lookup_lease_in_table(conn, req->LeaseKey);
@@ -8175,8 +8202,10 @@ err_out:
*/
int smb2_oplock_break(struct ksmbd_work *work)
{
- struct smb2_oplock_break *req = smb2_get_msg(work->request_buf);
- struct smb2_oplock_break *rsp = smb2_get_msg(work->response_buf);
+ struct smb2_oplock_break *req;
+ struct smb2_oplock_break *rsp;
+
+ WORK_BUFFERS(work, req, rsp);
switch (le16_to_cpu(req->StructureSize)) {
case OP_BREAK_STRUCT_SIZE_20:
diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
index 569e5eecdf3d..ef20f63e55e6 100644
--- a/fs/smb/server/smb_common.c
+++ b/fs/smb/server/smb_common.c
@@ -266,7 +266,7 @@ static int ksmbd_negotiate_smb_dialect(void *buf)
if (smb2_neg_size > smb_buf_length)
goto err_out;
- if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
+ if (struct_size(req, Dialects, le16_to_cpu(req->DialectCount)) >
smb_buf_length)
goto err_out;
@@ -359,8 +359,8 @@ static int smb1_check_user_session(struct ksmbd_work *work)
*/
static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
{
- work->response_buf = kmalloc(MAX_CIFS_SMALL_BUFFER_SIZE,
- GFP_KERNEL | __GFP_ZERO);
+ work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE,
+ GFP_KERNEL);
work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
if (!work->response_buf) {
@@ -536,7 +536,7 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
out[baselen + 3] = PERIOD;
if (dot_present)
- memcpy(&out[baselen + 4], extension, 4);
+ memcpy(out + baselen + 4, extension, 4);
else
out[baselen + 4] = '\0';
smbConvertToUTF16((__le16 *)shortname, out, PATH_MAX,
diff --git a/fs/smb/server/smb_common.h b/fs/smb/server/smb_common.h
index 6b0d5f1fe85c..aeca0f46068f 100644
--- a/fs/smb/server/smb_common.h
+++ b/fs/smb/server/smb_common.h
@@ -200,7 +200,7 @@ struct smb_hdr {
struct smb_negotiate_req {
struct smb_hdr hdr; /* wct = 0 */
__le16 ByteCount;
- unsigned char DialectsArray[1];
+ unsigned char DialectsArray[];
} __packed;
struct smb_negotiate_rsp {
diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
index ad919a4239d0..e5e438bf5499 100644
--- a/fs/smb/server/smbacl.c
+++ b/fs/smb/server/smbacl.c
@@ -97,7 +97,7 @@ int compare_sids(const struct smb_sid *ctsid, const struct smb_sid *cwsid)
/* compare all of the subauth values if any */
num_sat = ctsid->num_subauth;
num_saw = cwsid->num_subauth;
- num_subauth = num_sat < num_saw ? num_sat : num_saw;
+ num_subauth = min(num_sat, num_saw);
if (num_subauth) {
for (i = 0; i < num_subauth; ++i) {
if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
index 40c721f9227e..b49d47bdafc9 100644
--- a/fs/smb/server/transport_ipc.c
+++ b/fs/smb/server/transport_ipc.c
@@ -229,7 +229,7 @@ static struct ksmbd_ipc_msg *ipc_msg_alloc(size_t sz)
struct ksmbd_ipc_msg *msg;
size_t msg_sz = sz + sizeof(struct ksmbd_ipc_msg);
- msg = kvmalloc(msg_sz, GFP_KERNEL | __GFP_ZERO);
+ msg = kvzalloc(msg_sz, GFP_KERNEL);
if (msg)
msg->sz = sz;
return msg;
@@ -268,7 +268,7 @@ static int handle_response(int type, void *payload, size_t sz)
entry->type + 1, type);
}
- entry->response = kvmalloc(sz, GFP_KERNEL | __GFP_ZERO);
+ entry->response = kvzalloc(sz, GFP_KERNEL);
if (!entry->response) {
ret = -ENOMEM;
break;
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index 81489fdedd8e..e35914457350 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -121,11 +121,9 @@ err_out:
return -ENOENT;
}
-int ksmbd_vfs_query_maximal_access(struct mnt_idmap *idmap,
+void ksmbd_vfs_query_maximal_access(struct mnt_idmap *idmap,
struct dentry *dentry, __le32 *daccess)
{
- int ret = 0;
-
*daccess = cpu_to_le32(FILE_READ_ATTRIBUTES | READ_CONTROL);
if (!inode_permission(idmap, d_inode(dentry), MAY_OPEN | MAY_WRITE))
@@ -142,8 +140,6 @@ int ksmbd_vfs_query_maximal_access(struct mnt_idmap *idmap,
if (!inode_permission(idmap, d_inode(dentry->d_parent), MAY_EXEC | MAY_WRITE))
*daccess |= FILE_DELETE_LE;
-
- return ret;
}
/**
@@ -440,7 +436,7 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
}
if (v_len < size) {
- wbuf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ wbuf = kvzalloc(size, GFP_KERNEL);
if (!wbuf) {
err = -ENOMEM;
goto out;
@@ -857,7 +853,7 @@ ssize_t ksmbd_vfs_listxattr(struct dentry *dentry, char **list)
if (size <= 0)
return size;
- vlist = kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
+ vlist = kvzalloc(size, GFP_KERNEL);
if (!vlist)
return -ENOMEM;
@@ -1207,7 +1203,7 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
err = ksmbd_vfs_path_lookup_locked(share_conf, name, flags, path);
if (!err)
- return err;
+ return 0;
if (caseless) {
char *filepath;
diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h
index 8c0931d4d531..80039312c255 100644
--- a/fs/smb/server/vfs.h
+++ b/fs/smb/server/vfs.h
@@ -72,7 +72,7 @@ struct ksmbd_kstat {
};
int ksmbd_vfs_lock_parent(struct dentry *parent, struct dentry *child);
-int ksmbd_vfs_query_maximal_access(struct mnt_idmap *idmap,
+void ksmbd_vfs_query_maximal_access(struct mnt_idmap *idmap,
struct dentry *dentry, __le32 *daccess);
int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode);
int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode);
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index f35fd653e4e5..670508f1dca1 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -374,6 +374,42 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
}
/**
+ * crypto_akcipher_sync_encrypt() - Invoke public key encrypt operation
+ *
+ * Function invokes the specific public key encrypt operation for a given
+ * public key algorithm
+ *
+ * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ * @src: source buffer
+ * @slen: source length
+ * @dst: destinatino obuffer
+ * @dlen: destination length
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_akcipher_sync_encrypt(struct crypto_akcipher *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen);
+
+/**
+ * crypto_akcipher_sync_decrypt() - Invoke public key decrypt operation
+ *
+ * Function invokes the specific public key decrypt operation for a given
+ * public key algorithm
+ *
+ * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ * @src: source buffer
+ * @slen: source length
+ * @dst: destinatino obuffer
+ * @dlen: destination length
+ *
+ * Return: Output length on success; error code in case of error
+ */
+int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen);
+
+/**
* crypto_akcipher_sign() - Invoke public key sign operation
*
* Function invokes the specific public key sign operation for a given
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 016d5a302b84..6156161b181f 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -56,7 +56,6 @@ struct sk_buff;
struct crypto_type {
unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
unsigned int (*extsize)(struct crypto_alg *alg);
- int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
int (*init_tfm)(struct crypto_tfm *tfm);
void (*show)(struct seq_file *m, struct crypto_alg *alg);
int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index ae133e98d813..2038764b30c2 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -78,7 +78,7 @@ struct crypto_engine {
/*
* struct crypto_engine_op - crypto hardware engine operations
- * @prepare__request: do some prepare if need before handle the current request
+ * @prepare_request: do some preparation if needed before handling the current request
* @unprepare_request: undo any work done by prepare_request()
* @do_one_request: do encryption for current request
*/
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index e69542d86a2b..f7c2a22cd776 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -260,6 +260,7 @@ struct crypto_ahash {
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
+ unsigned int statesize;
unsigned int reqsize;
struct crypto_tfm base;
};
@@ -400,7 +401,7 @@ static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
*/
static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm)
{
- return crypto_hash_alg_common(tfm)->statesize;
+ return tfm->statesize;
}
static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
diff --git a/include/crypto/internal/cipher.h b/include/crypto/internal/cipher.h
index a9174ba90250..5030f6d2df31 100644
--- a/include/crypto/internal/cipher.h
+++ b/include/crypto/internal/cipher.h
@@ -176,6 +176,8 @@ void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src);
+struct crypto_cipher *crypto_clone_cipher(struct crypto_cipher *cipher);
+
struct crypto_cipher_spawn {
struct crypto_spawn base;
};
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 37edf3f4e8af..cf65676e45f4 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -149,6 +149,18 @@ static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg)
halg);
}
+static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
+{
+ return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
+ halg);
+}
+
+static inline void crypto_ahash_set_statesize(struct crypto_ahash *tfm,
+ unsigned int size)
+{
+ tfm->statesize = size;
+}
+
static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
unsigned int reqsize)
{
diff --git a/include/crypto/internal/sig.h b/include/crypto/internal/sig.h
new file mode 100644
index 000000000000..97cb26ef8115
--- /dev/null
+++ b/include/crypto/internal/sig.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Public Key Signature Algorithm
+ *
+ * Copyright (c) 2023 Herbert Xu <[email protected]>
+ */
+#ifndef _CRYPTO_INTERNAL_SIG_H
+#define _CRYPTO_INTERNAL_SIG_H
+
+#include <crypto/algapi.h>
+#include <crypto/sig.h>
+
+static inline void *crypto_sig_ctx(struct crypto_sig *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+#endif
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 653992a6e941..8fadd561c50e 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -48,8 +48,6 @@ struct public_key_signature {
const char *pkey_algo;
const char *hash_algo;
const char *encoding;
- const void *data;
- unsigned int data_size;
};
extern void public_key_signature_free(struct public_key_signature *sig);
diff --git a/include/crypto/sha2.h b/include/crypto/sha2.h
index 2838f529f31e..b9e9281d76c9 100644
--- a/include/crypto/sha2.h
+++ b/include/crypto/sha2.h
@@ -128,7 +128,7 @@ static inline void sha224_init(struct sha256_state *sctx)
sctx->state[7] = SHA224_H7;
sctx->count = 0;
}
-void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len);
+/* Simply use sha256_update as it is equivalent to sha224_update. */
void sha224_final(struct sha256_state *sctx, u8 *out);
#endif /* _CRYPTO_SHA2_H */
diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h
index 76173c613058..ab904d82236f 100644
--- a/include/crypto/sha256_base.h
+++ b/include/crypto/sha256_base.h
@@ -8,13 +8,12 @@
#ifndef _CRYPTO_SHA256_BASE_H
#define _CRYPTO_SHA256_BASE_H
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
#include <linux/string.h>
-
-#include <asm/unaligned.h>
+#include <linux/types.h>
typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src,
int blocks);
@@ -35,12 +34,11 @@ static inline int sha256_base_init(struct shash_desc *desc)
return 0;
}
-static inline int sha256_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha256_block_fn *block_fn)
+static inline int lib_sha256_base_do_update(struct sha256_state *sctx,
+ const u8 *data,
+ unsigned int len,
+ sha256_block_fn *block_fn)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
sctx->count += len;
@@ -73,11 +71,20 @@ static inline int sha256_base_do_update(struct shash_desc *desc,
return 0;
}
-static inline int sha256_base_do_finalize(struct shash_desc *desc,
- sha256_block_fn *block_fn)
+static inline int sha256_base_do_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha256_block_fn *block_fn)
{
- const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ return lib_sha256_base_do_update(sctx, data, len, block_fn);
+}
+
+static inline int lib_sha256_base_do_finalize(struct sha256_state *sctx,
+ sha256_block_fn *block_fn)
+{
+ const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
__be64 *bits = (__be64 *)(sctx->buf + bit_offset);
unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
@@ -96,10 +103,17 @@ static inline int sha256_base_do_finalize(struct shash_desc *desc,
return 0;
}
-static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
+static inline int sha256_base_do_finalize(struct shash_desc *desc,
+ sha256_block_fn *block_fn)
{
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ return lib_sha256_base_do_finalize(sctx, block_fn);
+}
+
+static inline int lib_sha256_base_finish(struct sha256_state *sctx, u8 *out,
+ unsigned int digest_size)
+{
__be32 *digest = (__be32 *)out;
int i;
@@ -110,4 +124,12 @@ static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
return 0;
}
+static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
+{
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ return lib_sha256_base_finish(sctx, out, digest_size);
+}
+
#endif /* _CRYPTO_SHA256_BASE_H */
diff --git a/include/crypto/sig.h b/include/crypto/sig.h
new file mode 100644
index 000000000000..641b4714c448
--- /dev/null
+++ b/include/crypto/sig.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Public Key Signature Algorithm
+ *
+ * Copyright (c) 2023 Herbert Xu <[email protected]>
+ */
+#ifndef _CRYPTO_SIG_H
+#define _CRYPTO_SIG_H
+
+#include <linux/crypto.h>
+
+/**
+ * struct crypto_sig - user-instantiated objects which encapsulate
+ * algorithms and core processing logic
+ *
+ * @base: Common crypto API algorithm data structure
+ */
+struct crypto_sig {
+ struct crypto_tfm base;
+};
+
+/**
+ * DOC: Generic Public Key Signature API
+ *
+ * The Public Key Signature API is used with the algorithms of type
+ * CRYPTO_ALG_TYPE_SIG (listed as type "sig" in /proc/crypto)
+ */
+
+/**
+ * crypto_alloc_sig() - allocate signature tfm handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * signing algorithm e.g. "ecdsa"
+ * @type: specifies the type of the algorithm
+ * @mask: specifies the mask for the algorithm
+ *
+ * Allocate a handle for public key signature algorithm. The returned struct
+ * crypto_sig is the handle that is required for any subsequent
+ * API invocation for signature operations.
+ *
+ * Return: allocated handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_sig_tfm(struct crypto_sig *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_sig() - free signature tfm handle
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
+ */
+static inline void crypto_free_sig(struct crypto_sig *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_sig_tfm(tfm));
+}
+
+/**
+ * crypto_sig_maxsize() - Get len for output buffer
+ *
+ * Function returns the dest buffer size required for a given key.
+ * Function assumes that the key is already set in the transformation. If this
+ * function is called without a setkey or with a failed setkey, you will end up
+ * in a NULL dereference.
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ */
+int crypto_sig_maxsize(struct crypto_sig *tfm);
+
+/**
+ * crypto_sig_sign() - Invoke signing operation
+ *
+ * Function invokes the specific signing operation for a given algorithm
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ * @src: source buffer
+ * @slen: source length
+ * @dst: destinatino obuffer
+ * @dlen: destination length
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_sig_sign(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen);
+
+/**
+ * crypto_sig_verify() - Invoke signature verification
+ *
+ * Function invokes the specific signature verification operation
+ * for a given algorithm.
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ * @src: source buffer
+ * @slen: source length
+ * @digest: digest
+ * @dlen: digest length
+ *
+ * Return: zero on verification success; error code in case of error.
+ */
+int crypto_sig_verify(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *digest, unsigned int dlen);
+
+/**
+ * crypto_sig_set_pubkey() - Invoke set public key operation
+ *
+ * Function invokes the algorithm specific set key function, which knows
+ * how to decode and interpret the encoded key and parameters
+ *
+ * @tfm: tfm handle
+ * @key: BER encoded public key, algo OID, paramlen, BER encoded
+ * parameters
+ * @keylen: length of the key (not including other data)
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_sig_set_pubkey(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen);
+
+/**
+ * crypto_sig_set_privkey() - Invoke set private key operation
+ *
+ * Function invokes the algorithm specific set key function, which knows
+ * how to decode and interpret the encoded key and parameters
+ *
+ * @tfm: tfm handle
+ * @key: BER encoded private key, algo OID, paramlen, BER encoded
+ * parameters
+ * @keylen: length of the key (not including other data)
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_sig_set_privkey(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen);
+#endif
diff --git a/include/crypto/sm2.h b/include/crypto/sm2.h
index af452556dcd4..04a92c1013c8 100644
--- a/include/crypto/sm2.h
+++ b/include/crypto/sm2.h
@@ -11,15 +11,18 @@
#ifndef _CRYPTO_SM2_H
#define _CRYPTO_SM2_H
-#include <crypto/sm3.h>
-#include <crypto/akcipher.h>
+struct shash_desc;
-/* The default user id as specified in GM/T 0009-2012 */
-#define SM2_DEFAULT_USERID "1234567812345678"
-#define SM2_DEFAULT_USERID_LEN 16
-
-extern int sm2_compute_z_digest(struct crypto_akcipher *tfm,
- const unsigned char *id, size_t id_len,
- unsigned char dgst[SM3_DIGEST_SIZE]);
+#if IS_REACHABLE(CONFIG_CRYPTO_SM2)
+int sm2_compute_z_digest(struct shash_desc *desc,
+ const void *key, unsigned int keylen, void *dgst);
+#else
+static inline int sm2_compute_z_digest(struct shash_desc *desc,
+ const void *key, unsigned int keylen,
+ void *dgst)
+{
+ return -ENOTSUPP;
+}
+#endif
#endif /* _CRYPTO_SM2_H */
diff --git a/include/keys/asymmetric-parser.h b/include/keys/asymmetric-parser.h
index c47dc5405f79..516a3f51179e 100644
--- a/include/keys/asymmetric-parser.h
+++ b/include/keys/asymmetric-parser.h
@@ -10,6 +10,8 @@
#ifndef _KEYS_ASYMMETRIC_PARSER_H
#define _KEYS_ASYMMETRIC_PARSER_H
+struct key_preparsed_payload;
+
/*
* Key data parser. Called during key instantiation.
*/
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index fa310ac1db59..31f6fee0c36c 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -25,11 +25,12 @@
#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
#define CRYPTO_ALG_TYPE_AEAD 0x00000003
#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
+#define CRYPTO_ALG_TYPE_AKCIPHER 0x00000006
+#define CRYPTO_ALG_TYPE_SIG 0x00000007
#define CRYPTO_ALG_TYPE_KPP 0x00000008
#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
-#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
#define CRYPTO_ALG_TYPE_HASH 0x0000000e
#define CRYPTO_ALG_TYPE_SHASH 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH 0x0000000f
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 18d83a613635..ab088c662e88 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1056,6 +1056,7 @@ struct efivar_operations {
efi_set_variable_t *set_variable;
efi_set_variable_t *set_variable_nonblocking;
efi_query_variable_store_t *query_variable_store;
+ efi_query_variable_info_t *query_variable_info;
};
struct efivars {
@@ -1063,6 +1064,12 @@ struct efivars {
const struct efivar_operations *ops;
};
+#ifdef CONFIG_X86
+u64 __attribute_const__ efivar_reserved_space(void);
+#else
+static inline u64 efivar_reserved_space(void) { return 0; }
+#endif
+
/*
* The maximum size of VariableName + Data = 1024
* Therefore, it's reasonable to save that much
@@ -1101,6 +1108,10 @@ efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor,
efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor,
u32 attr, unsigned long data_size, void *data);
+efi_status_t efivar_query_variable_info(u32 attr, u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size);
+
#if IS_ENABLED(CONFIG_EFI_CAPSULE_LOADER)
extern bool efi_capsule_pending(int *reset_type);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 39aa409e84d5..703ba8203da3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2323,6 +2323,9 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int generic_error_remove_page(struct address_space *mapping, struct page *page);
+struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
+ unsigned long address, struct pt_regs *regs);
+
#ifdef CONFIG_MMU
extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
@@ -2334,8 +2337,6 @@ void unmap_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t nr, bool even_cows);
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
-struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
- unsigned long address, struct pt_regs *regs);
#else
static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
@@ -3176,7 +3177,7 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long pgoff, unsigned long *populate, struct list_head *uf);
extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
- bool downgrade);
+ bool unlock);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
@@ -3184,7 +3185,7 @@ extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in,
#ifdef CONFIG_MMU
extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end,
- struct list_head *uf, bool downgrade);
+ struct list_head *uf, bool unlock);
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
static inline void mm_populate(unsigned long addr, unsigned long len)
diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h
index 08967b3f19c8..3c2ad5fae17f 100644
--- a/include/uapi/linux/wireless.h
+++ b/include/uapi/linux/wireless.h
@@ -835,7 +835,7 @@ struct iw_encode_ext {
* individual keys */
__u16 alg; /* IW_ENCODE_ALG_* */
__u16 key_len;
- __u8 key[0];
+ __u8 key[];
};
/* SIOCSIWMLME data */
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index e9fd83a02228..e54c3d60a904 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -15,7 +15,7 @@
#include "transition.h"
#define MAX_STACK_ENTRIES 100
-DEFINE_PER_CPU(unsigned long[MAX_STACK_ENTRIES], klp_stack_entries);
+static DEFINE_PER_CPU(unsigned long[MAX_STACK_ENTRIES], klp_stack_entries);
#define STACK_ERR_BUF_SIZE 128
diff --git a/kernel/pid.c b/kernel/pid.c
index 8bce3aebc949..6a1d23a11026 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -657,7 +657,7 @@ void __init pid_idr_init(void)
idr_init(&init_pid_ns.idr);
init_pid_ns.pid_cachep = kmem_cache_create("pid",
- struct_size((struct pid *)NULL, numbers, 1),
+ struct_size_t(struct pid, numbers, 1),
__alignof__(struct pid),
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT,
NULL);
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 70a929784a5d..0bf44afe04dd 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -48,7 +48,7 @@ static struct kmem_cache *create_pid_cachep(unsigned int level)
return kc;
snprintf(name, sizeof(name), "pid_%u", level + 1);
- len = struct_size((struct pid *)NULL, numbers, level + 1);
+ len = struct_size_t(struct pid, numbers, level + 1);
mutex_lock(&pid_caches_mutex);
/* Name collision forces to do allocation under mutex. */
if (!*pkc)
diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c
index 72a4b0b1df28..3ac1ef8677db 100644
--- a/lib/crypto/sha256.c
+++ b/lib/crypto/sha256.c
@@ -11,12 +11,11 @@
* Copyright (c) 2014 Red Hat Inc.
*/
-#include <linux/bitops.h>
-#include <linux/export.h>
+#include <asm/unaligned.h>
+#include <crypto/sha256_base.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
-#include <crypto/sha2.h>
-#include <asm/unaligned.h>
static const u32 SHA256_K[] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
@@ -119,80 +118,40 @@ static void sha256_transform(u32 *state, const u8 *input, u32 *W)
state[4] += e; state[5] += f; state[6] += g; state[7] += h;
}
-void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
+static void sha256_transform_blocks(struct sha256_state *sctx,
+ const u8 *input, int blocks)
{
- unsigned int partial, done;
- const u8 *src;
u32 W[64];
- partial = sctx->count & 0x3f;
- sctx->count += len;
- done = 0;
- src = data;
-
- if ((partial + len) > 63) {
- if (partial) {
- done = -partial;
- memcpy(sctx->buf + partial, data, done + 64);
- src = sctx->buf;
- }
+ do {
+ sha256_transform(sctx->state, input, W);
+ input += SHA256_BLOCK_SIZE;
+ } while (--blocks);
- do {
- sha256_transform(sctx->state, src, W);
- done += 64;
- src = data + done;
- } while (done + 63 < len);
-
- memzero_explicit(W, sizeof(W));
-
- partial = 0;
- }
- memcpy(sctx->buf + partial, src, len - done);
+ memzero_explicit(W, sizeof(W));
}
-EXPORT_SYMBOL(sha256_update);
-void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
+void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
{
- sha256_update(sctx, data, len);
+ lib_sha256_base_do_update(sctx, data, len, sha256_transform_blocks);
}
-EXPORT_SYMBOL(sha224_update);
+EXPORT_SYMBOL(sha256_update);
-static void __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words)
+static void __sha256_final(struct sha256_state *sctx, u8 *out, int digest_size)
{
- __be32 *dst = (__be32 *)out;
- __be64 bits;
- unsigned int index, pad_len;
- int i;
- static const u8 padding[64] = { 0x80, };
-
- /* Save number of bits */
- bits = cpu_to_be64(sctx->count << 3);
-
- /* Pad out to 56 mod 64. */
- index = sctx->count & 0x3f;
- pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
- sha256_update(sctx, padding, pad_len);
-
- /* Append length (before padding) */
- sha256_update(sctx, (const u8 *)&bits, sizeof(bits));
-
- /* Store state in digest */
- for (i = 0; i < digest_words; i++)
- put_unaligned_be32(sctx->state[i], &dst[i]);
-
- /* Zeroize sensitive information. */
- memzero_explicit(sctx, sizeof(*sctx));
+ lib_sha256_base_do_finalize(sctx, sha256_transform_blocks);
+ lib_sha256_base_finish(sctx, out, digest_size);
}
void sha256_final(struct sha256_state *sctx, u8 *out)
{
- __sha256_final(sctx, out, 8);
+ __sha256_final(sctx, out, 32);
}
EXPORT_SYMBOL(sha256_final);
void sha224_final(struct sha256_state *sctx, u8 *out)
{
- __sha256_final(sctx, out, 7);
+ __sha256_final(sctx, out, 28);
}
EXPORT_SYMBOL(sha224_final);
diff --git a/mm/mmap.c b/mm/mmap.c
index 3e5793ebbaae..51e70fa98450 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -193,8 +193,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
struct mm_struct *mm = current->mm;
struct vm_area_struct *brkvma, *next = NULL;
unsigned long min_brk;
- bool populate;
- bool downgraded = false;
+ bool populate = false;
LIST_HEAD(uf);
struct vma_iterator vmi;
@@ -236,13 +235,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
goto success;
}
- /*
- * Always allow shrinking brk.
- * do_vma_munmap() may downgrade mmap_lock to read.
- */
+ /* Always allow shrinking brk. */
if (brk <= mm->brk) {
- int ret;
-
/* Search one past newbrk */
vma_iter_init(&vmi, mm, newbrk);
brkvma = vma_find(&vmi, oldbrk);
@@ -250,19 +244,14 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
goto out; /* mapping intersects with an existing non-brk vma. */
/*
* mm->brk must be protected by write mmap_lock.
- * do_vma_munmap() may downgrade the lock, so update it
+ * do_vma_munmap() will drop the lock on success, so update it
* before calling do_vma_munmap().
*/
mm->brk = brk;
- ret = do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true);
- if (ret == 1) {
- downgraded = true;
- goto success;
- } else if (!ret)
- goto success;
-
- mm->brk = origbrk;
- goto out;
+ if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true))
+ goto out;
+
+ goto success_unlocked;
}
if (check_brk_limits(oldbrk, newbrk - oldbrk))
@@ -283,19 +272,19 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
goto out;
mm->brk = brk;
+ if (mm->def_flags & VM_LOCKED)
+ populate = true;
success:
- populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
- if (downgraded)
- mmap_read_unlock(mm);
- else
- mmap_write_unlock(mm);
+ mmap_write_unlock(mm);
+success_unlocked:
userfaultfd_unmap_complete(mm, &uf);
if (populate)
mm_populate(oldbrk, newbrk - oldbrk);
return brk;
out:
+ mm->brk = origbrk;
mmap_write_unlock(mm);
return origbrk;
}
@@ -2428,14 +2417,16 @@ int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
* @start: The aligned start address to munmap.
* @end: The aligned end address to munmap.
* @uf: The userfaultfd list_head
- * @downgrade: Set to true to attempt a write downgrade of the mmap_lock
+ * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
+ * success.
*
- * If @downgrade is true, check return code for potential release of the lock.
+ * Return: 0 on success and drops the lock if so directed, error and leaves the
+ * lock held otherwise.
*/
static int
do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct mm_struct *mm, unsigned long start,
- unsigned long end, struct list_head *uf, bool downgrade)
+ unsigned long end, struct list_head *uf, bool unlock)
{
struct vm_area_struct *prev, *next = NULL;
struct maple_tree mt_detach;
@@ -2551,33 +2542,24 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
/* Point of no return */
mm->locked_vm -= locked_vm;
mm->map_count -= count;
- /*
- * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
- * VM_GROWSUP VMA. Such VMAs can change their size under
- * down_read(mmap_lock) and collide with the VMA we are about to unmap.
- */
- if (downgrade) {
- if (next && (next->vm_flags & VM_GROWSDOWN))
- downgrade = false;
- else if (prev && (prev->vm_flags & VM_GROWSUP))
- downgrade = false;
- else
- mmap_write_downgrade(mm);
- }
+ if (unlock)
+ mmap_write_downgrade(mm);
/*
* We can free page tables without write-locking mmap_lock because VMAs
* were isolated before we downgraded mmap_lock.
*/
- unmap_region(mm, &mt_detach, vma, prev, next, start, end, !downgrade);
+ unmap_region(mm, &mt_detach, vma, prev, next, start, end, !unlock);
/* Statistics and freeing VMAs */
mas_set(&mas_detach, start);
remove_mt(mm, &mas_detach);
__mt_destroy(&mt_detach);
+ if (unlock)
+ mmap_read_unlock(mm);
validate_mm(mm);
- return downgrade ? 1 : 0;
+ return 0;
clear_tree_failed:
userfaultfd_error:
@@ -2600,18 +2582,18 @@ map_count_exceeded:
* @start: The start address to munmap
* @len: The length of the range to munmap
* @uf: The userfaultfd list_head
- * @downgrade: set to true if the user wants to attempt to write_downgrade the
- * mmap_lock
+ * @unlock: set to true if the user wants to drop the mmap_lock on success
*
* This function takes a @mas that is either pointing to the previous VMA or set
* to MA_START and sets it up to remove the mapping(s). The @len will be
* aligned and any arch_unmap work will be preformed.
*
- * Returns: -EINVAL on failure, 1 on success and unlock, 0 otherwise.
+ * Return: 0 on success and drops the lock if so directed, error and leaves the
+ * lock held otherwise.
*/
int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
- bool downgrade)
+ bool unlock)
{
unsigned long end;
struct vm_area_struct *vma;
@@ -2628,10 +2610,13 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
/* Find the first overlapping VMA */
vma = vma_find(vmi, end);
- if (!vma)
+ if (!vma) {
+ if (unlock)
+ mmap_write_unlock(mm);
return 0;
+ }
- return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, downgrade);
+ return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
}
/* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
@@ -2639,6 +2624,8 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
* @start: The start address to munmap
* @len: The length to be munmapped.
* @uf: The userfaultfd list_head
+ *
+ * Return: 0 on success, error otherwise.
*/
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
struct list_head *uf)
@@ -2899,7 +2886,7 @@ unacct_error:
return error;
}
-static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
+static int __vm_munmap(unsigned long start, size_t len, bool unlock)
{
int ret;
struct mm_struct *mm = current->mm;
@@ -2909,16 +2896,8 @@ static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
if (mmap_write_lock_killable(mm))
return -EINTR;
- ret = do_vmi_munmap(&vmi, mm, start, len, &uf, downgrade);
- /*
- * Returning 1 indicates mmap_lock is downgraded.
- * But 1 is not legal return value of vm_munmap() and munmap(), reset
- * it to 0 before return.
- */
- if (ret == 1) {
- mmap_read_unlock(mm);
- ret = 0;
- } else
+ ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
+ if (ret || !unlock)
mmap_write_unlock(mm);
userfaultfd_unmap_complete(mm, &uf);
@@ -3028,21 +3007,23 @@ out:
* @start: the start of the address to unmap
* @end: The end of the address to unmap
* @uf: The userfaultfd list_head
- * @downgrade: Attempt to downgrade or not
+ * @unlock: Drop the lock on success
*
- * Returns: 0 on success and not downgraded, 1 on success and downgraded.
* unmaps a VMA mapping when the vma iterator is already in position.
* Does not handle alignment.
+ *
+ * Return: 0 on success drops the lock of so directed, error on failure and will
+ * still hold the lock.
*/
int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct list_head *uf, bool downgrade)
+ unsigned long start, unsigned long end, struct list_head *uf,
+ bool unlock)
{
struct mm_struct *mm = vma->vm_mm;
int ret;
arch_unmap(mm, start, end);
- ret = do_vmi_align_munmap(vmi, vma, mm, start, end, uf, downgrade);
+ ret = do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
validate_mm(mm);
return ret;
}
diff --git a/mm/mremap.c b/mm/mremap.c
index fe6b722ae633..11e06e4ab33b 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -715,7 +715,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
}
vma_iter_init(&vmi, mm, old_addr);
- if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) {
+ if (!do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false)) {
/* OOM: unable to split vma, just get accounts right */
if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
vm_acct_memory(old_len >> PAGE_SHIFT);
@@ -913,7 +913,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
struct vm_area_struct *vma;
unsigned long ret = -EINVAL;
bool locked = false;
- bool downgraded = false;
struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
LIST_HEAD(uf_unmap_early);
LIST_HEAD(uf_unmap);
@@ -999,24 +998,23 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
* Always allow a shrinking remap: that just unmaps
* the unnecessary pages..
* do_vmi_munmap does all the needed commit accounting, and
- * downgrades mmap_lock to read if so directed.
+ * unlocks the mmap_lock if so directed.
*/
if (old_len >= new_len) {
- int retval;
VMA_ITERATOR(vmi, mm, addr + new_len);
- retval = do_vmi_munmap(&vmi, mm, addr + new_len,
- old_len - new_len, &uf_unmap, true);
- /* Returning 1 indicates mmap_lock is downgraded to read. */
- if (retval == 1) {
- downgraded = true;
- } else if (retval < 0 && old_len != new_len) {
- ret = retval;
+ if (old_len == new_len) {
+ ret = addr;
goto out;
}
+ ret = do_vmi_munmap(&vmi, mm, addr + new_len, old_len - new_len,
+ &uf_unmap, true);
+ if (ret)
+ goto out;
+
ret = addr;
- goto out;
+ goto out_unlocked;
}
/*
@@ -1101,12 +1099,10 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
out:
if (offset_in_page(ret))
locked = false;
- if (downgraded)
- mmap_read_unlock(current->mm);
- else
- mmap_write_unlock(current->mm);
+ mmap_write_unlock(current->mm);
if (locked && new_len > old_len)
mm_populate(new_addr + old_len, new_len - old_len);
+out_unlocked:
userfaultfd_unmap_complete(mm, &uf_unmap_early);
mremap_userfaultfd_complete(&uf, addr, ret, old_len);
userfaultfd_unmap_complete(mm, &uf_unmap);
diff --git a/mm/nommu.c b/mm/nommu.c
index 37d0b03143f1..c072a660ec2c 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -631,6 +631,22 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
EXPORT_SYMBOL(find_vma);
/*
+ * At least xtensa ends up having protection faults even with no
+ * MMU.. No stack expansion, at least.
+ */
+struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
+ unsigned long addr, struct pt_regs *regs)
+{
+ struct vm_area_struct *vma;
+
+ mmap_read_lock(mm);
+ vma = vma_lookup(mm, addr);
+ if (!vma)
+ mmap_read_unlock(mm);
+ return vma;
+}
+
+/*
* expand a stack to a given address
* - not supported under NOMMU conditions
*/