diff options
Diffstat (limited to 'lib/crypto')
| -rw-r--r-- | lib/crypto/Kconfig | 3 | ||||
| -rw-r--r-- | lib/crypto/Makefile | 3 | ||||
| -rw-r--r-- | lib/crypto/blake2s-selftest.c | 41 | ||||
| -rw-r--r-- | lib/crypto/blake2s.c | 37 | ||||
| -rw-r--r-- | lib/crypto/sha1.c | 140 | 
5 files changed, 217 insertions, 7 deletions
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 2082af43d51f..9ff549f63540 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -121,6 +121,9 @@ config CRYPTO_LIB_CHACHA20POLY1305  	select CRYPTO_LIB_POLY1305  	select CRYPTO_ALGAPI +config CRYPTO_LIB_SHA1 +	tristate +  config CRYPTO_LIB_SHA256  	tristate diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 26be2bbe09c5..919cbb2c220d 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -34,6 +34,9 @@ libpoly1305-y					:= poly1305-donna32.o  libpoly1305-$(CONFIG_ARCH_SUPPORTS_INT128)	:= poly1305-donna64.o  libpoly1305-y					+= poly1305.o +obj-$(CONFIG_CRYPTO_LIB_SHA1)			+= libsha1.o +libsha1-y					:= sha1.o +  obj-$(CONFIG_CRYPTO_LIB_SHA256)			+= libsha256.o  libsha256-y					:= sha256.o diff --git a/lib/crypto/blake2s-selftest.c b/lib/crypto/blake2s-selftest.c index 409e4b728770..7d77dea15587 100644 --- a/lib/crypto/blake2s-selftest.c +++ b/lib/crypto/blake2s-selftest.c @@ -4,6 +4,8 @@   */  #include <crypto/internal/blake2s.h> +#include <linux/kernel.h> +#include <linux/random.h>  #include <linux/string.h>  /* @@ -587,5 +589,44 @@ bool __init blake2s_selftest(void)  		}  	} +	for (i = 0; i < 32; ++i) { +		enum { TEST_ALIGNMENT = 16 }; +		u8 unaligned_block[BLAKE2S_BLOCK_SIZE + TEST_ALIGNMENT - 1] +					__aligned(TEST_ALIGNMENT); +		u8 blocks[BLAKE2S_BLOCK_SIZE * 2]; +		struct blake2s_state state1, state2; + +		get_random_bytes(blocks, sizeof(blocks)); +		get_random_bytes(&state, sizeof(state)); + +#if defined(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) && \ +    defined(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S) +		memcpy(&state1, &state, sizeof(state1)); +		memcpy(&state2, &state, sizeof(state2)); +		blake2s_compress(&state1, blocks, 2, BLAKE2S_BLOCK_SIZE); +		blake2s_compress_generic(&state2, blocks, 2, BLAKE2S_BLOCK_SIZE); +		if (memcmp(&state1, &state2, sizeof(state1))) { +			pr_err("blake2s random compress self-test %d: FAIL\n", +			       i + 1); +			success = false; +		} +#endif + +		memcpy(&state1, &state, sizeof(state1)); +		blake2s_compress(&state1, blocks, 1, BLAKE2S_BLOCK_SIZE); +		for (l = 1; l < TEST_ALIGNMENT; ++l) { +			memcpy(unaligned_block + l, blocks, +			       BLAKE2S_BLOCK_SIZE); +			memcpy(&state2, &state, sizeof(state2)); +			blake2s_compress(&state2, unaligned_block + l, 1, +					 BLAKE2S_BLOCK_SIZE); +			if (memcmp(&state1, &state2, sizeof(state1))) { +				pr_err("blake2s random compress align %d self-test %d: FAIL\n", +				       l, i + 1); +				success = false; +			} +		} +	} +  	return success;  } diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c index c71c09621c09..98e688c6d891 100644 --- a/lib/crypto/blake2s.c +++ b/lib/crypto/blake2s.c @@ -16,16 +16,44 @@  #include <linux/init.h>  #include <linux/bug.h> +static inline void blake2s_set_lastblock(struct blake2s_state *state) +{ +	state->f[0] = -1; +} +  void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)  { -	__blake2s_update(state, in, inlen, false); +	const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; + +	if (unlikely(!inlen)) +		return; +	if (inlen > fill) { +		memcpy(state->buf + state->buflen, in, fill); +		blake2s_compress(state, state->buf, 1, BLAKE2S_BLOCK_SIZE); +		state->buflen = 0; +		in += fill; +		inlen -= fill; +	} +	if (inlen > BLAKE2S_BLOCK_SIZE) { +		const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); +		blake2s_compress(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); +		in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); +		inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); +	} +	memcpy(state->buf + state->buflen, in, inlen); +	state->buflen += inlen;  }  EXPORT_SYMBOL(blake2s_update);  void blake2s_final(struct blake2s_state *state, u8 *out)  {  	WARN_ON(IS_ENABLED(DEBUG) && !out); -	__blake2s_final(state, out, false); +	blake2s_set_lastblock(state); +	memset(state->buf + state->buflen, 0, +	       BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ +	blake2s_compress(state, state->buf, 1, state->buflen); +	cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); +	memcpy(out, state->h, state->outlen);  	memzero_explicit(state, sizeof(*state));  }  EXPORT_SYMBOL(blake2s_final); @@ -38,12 +66,7 @@ static int __init blake2s_mod_init(void)  	return 0;  } -static void __exit blake2s_mod_exit(void) -{ -} -  module_init(blake2s_mod_init); -module_exit(blake2s_mod_exit);  MODULE_LICENSE("GPL v2");  MODULE_DESCRIPTION("BLAKE2s hash function");  MODULE_AUTHOR("Jason A. Donenfeld <[email protected]>"); diff --git a/lib/crypto/sha1.c b/lib/crypto/sha1.c new file mode 100644 index 000000000000..1aebe7be9401 --- /dev/null +++ b/lib/crypto/sha1.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SHA1 routine optimized to do word accesses rather than byte accesses, + * and to avoid unnecessary copies into the context array. + * + * This was based on the git SHA1 implementation. + */ + +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/module.h> +#include <linux/bitops.h> +#include <linux/string.h> +#include <crypto/sha1.h> +#include <asm/unaligned.h> + +/* + * If you have 32 registers or more, the compiler can (and should) + * try to change the array[] accesses into registers. However, on + * machines with less than ~25 registers, that won't really work, + * and at least gcc will make an unholy mess of it. + * + * So to avoid that mess which just slows things down, we force + * the stores to memory to actually happen (we might be better off + * with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as + * suggested by Artur Skawina - that will also make gcc unable to + * try to do the silly "optimize away loads" part because it won't + * see what the value will be). + * + * Ben Herrenschmidt reports that on PPC, the C version comes close + * to the optimized asm with this (ie on PPC you don't want that + * 'volatile', since there are lots of registers). + * + * On ARM we get the best code generation by forcing a full memory barrier + * between each SHA_ROUND, otherwise gcc happily get wild with spilling and + * the stack frame size simply explode and performance goes down the drain. + */ + +#ifdef CONFIG_X86 +  #define setW(x, val) (*(volatile __u32 *)&W(x) = (val)) +#elif defined(CONFIG_ARM) +  #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0) +#else +  #define setW(x, val) (W(x) = (val)) +#endif + +/* This "rolls" over the 512-bit array */ +#define W(x) (array[(x)&15]) + +/* + * Where do we get the source from? The first 16 iterations get it from + * the input data, the next mix it from the 512-bit array. + */ +#define SHA_SRC(t) get_unaligned_be32((__u32 *)data + t) +#define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1) + +#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \ +	__u32 TEMP = input(t); setW(t, TEMP); \ +	E += TEMP + rol32(A,5) + (fn) + (constant); \ +	B = ror32(B, 2); \ +	TEMP = E; E = D; D = C; C = B; B = A; A = TEMP; } while (0) + +#define T_0_15(t, A, B, C, D, E)  SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) +#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) +#define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E ) +#define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E ) +#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) ,  0xca62c1d6, A, B, C, D, E ) + +/** + * sha1_transform - single block SHA1 transform (deprecated) + * + * @digest: 160 bit digest to update + * @data:   512 bits of data to hash + * @array:  16 words of workspace (see note) + * + * This function executes SHA-1's internal compression function.  It updates the + * 160-bit internal state (@digest) with a single 512-bit data block (@data). + * + * Don't use this function.  SHA-1 is no longer considered secure.  And even if + * you do have to use SHA-1, this isn't the correct way to hash something with + * SHA-1 as this doesn't handle padding and finalization. + * + * Note: If the hash is security sensitive, the caller should be sure + * to clear the workspace. This is left to the caller to avoid + * unnecessary clears between chained hashing operations. + */ +void sha1_transform(__u32 *digest, const char *data, __u32 *array) +{ +	__u32 A, B, C, D, E; +	unsigned int i = 0; + +	A = digest[0]; +	B = digest[1]; +	C = digest[2]; +	D = digest[3]; +	E = digest[4]; + +	/* Round 1 - iterations 0-16 take their input from 'data' */ +	for (; i < 16; ++i) +		T_0_15(i, A, B, C, D, E); + +	/* Round 1 - tail. Input from 512-bit mixing array */ +	for (; i < 20; ++i) +		T_16_19(i, A, B, C, D, E); + +	/* Round 2 */ +	for (; i < 40; ++i) +		T_20_39(i, A, B, C, D, E); + +	/* Round 3 */ +	for (; i < 60; ++i) +		T_40_59(i, A, B, C, D, E); + +	/* Round 4 */ +	for (; i < 80; ++i) +		T_60_79(i, A, B, C, D, E); + +	digest[0] += A; +	digest[1] += B; +	digest[2] += C; +	digest[3] += D; +	digest[4] += E; +} +EXPORT_SYMBOL(sha1_transform); + +/** + * sha1_init - initialize the vectors for a SHA1 digest + * @buf: vector to initialize + */ +void sha1_init(__u32 *buf) +{ +	buf[0] = 0x67452301; +	buf[1] = 0xefcdab89; +	buf[2] = 0x98badcfe; +	buf[3] = 0x10325476; +	buf[4] = 0xc3d2e1f0; +} +EXPORT_SYMBOL(sha1_init); + +MODULE_LICENSE("GPL");  |