diff options
Diffstat (limited to 'include/crypto')
| -rw-r--r-- | include/crypto/acompress.h | 269 | ||||
| -rw-r--r-- | include/crypto/aead.h | 50 | ||||
| -rw-r--r-- | include/crypto/cbc.h | 146 | ||||
| -rw-r--r-- | include/crypto/cryptd.h | 13 | ||||
| -rw-r--r-- | include/crypto/dh.h | 58 | ||||
| -rw-r--r-- | include/crypto/ecdh.h | 58 | ||||
| -rw-r--r-- | include/crypto/engine.h | 6 | ||||
| -rw-r--r-- | include/crypto/gf128mul.h | 15 | ||||
| -rw-r--r-- | include/crypto/hash.h | 2 | ||||
| -rw-r--r-- | include/crypto/internal/acompress.h | 81 | ||||
| -rw-r--r-- | include/crypto/internal/scompress.h | 136 | ||||
| -rw-r--r-- | include/crypto/internal/simd.h | 17 | ||||
| -rw-r--r-- | include/crypto/internal/skcipher.h | 65 | ||||
| -rw-r--r-- | include/crypto/kpp.h | 15 | ||||
| -rw-r--r-- | include/crypto/skcipher.h | 4 | ||||
| -rw-r--r-- | include/crypto/xts.h | 26 | 
16 files changed, 895 insertions, 66 deletions
| diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h new file mode 100644 index 000000000000..e328b52425a8 --- /dev/null +++ b/include/crypto/acompress.h @@ -0,0 +1,269 @@ +/* + * Asynchronous Compression operations + * + * Copyright (c) 2016, Intel Corporation + * Authors: Weigang Li <[email protected]> + *          Giovanni Cabiddu <[email protected]> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_ACOMP_H +#define _CRYPTO_ACOMP_H +#include <linux/crypto.h> + +#define CRYPTO_ACOMP_ALLOC_OUTPUT	0x00000001 + +/** + * struct acomp_req - asynchronous (de)compression request + * + * @base:	Common attributes for asynchronous crypto requests + * @src:	Source Data + * @dst:	Destination data + * @slen:	Size of the input buffer + * @dlen:	Size of the output buffer and number of bytes produced + * @flags:	Internal flags + * @__ctx:	Start of private context data + */ +struct acomp_req { +	struct crypto_async_request base; +	struct scatterlist *src; +	struct scatterlist *dst; +	unsigned int slen; +	unsigned int dlen; +	u32 flags; +	void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +/** + * struct crypto_acomp - user-instantiated objects which encapsulate + * algorithms and core processing logic + * + * @compress:		Function performs a compress operation + * @decompress:		Function performs a de-compress operation + * @dst_free:		Frees destination buffer if allocated inside the + *			algorithm + * @reqsize:		Context size for (de)compression requests + * @base:		Common crypto API algorithm data structure + */ +struct crypto_acomp { +	int (*compress)(struct acomp_req *req); +	int (*decompress)(struct acomp_req *req); +	void (*dst_free)(struct scatterlist *dst); +	unsigned int reqsize; +	struct crypto_tfm base; +}; + +/** + * struct acomp_alg - asynchronous compression algorithm + * + * @compress:	Function performs a compress operation + * @decompress:	Function performs a de-compress operation + * @dst_free:	Frees destination buffer if allocated inside the algorithm + * @init:	Initialize the cryptographic transformation object. + *		This function is used to initialize the cryptographic + *		transformation object. This function is called only once at + *		the instantiation time, right after the transformation context + *		was allocated. In case the cryptographic hardware has some + *		special requirements which need to be handled by software, this + *		function shall check for the precise requirement of the + *		transformation and put any software fallbacks in place. + * @exit:	Deinitialize the cryptographic transformation object. This is a + *		counterpart to @init, used to remove various changes set in + *		@init. + * + * @reqsize:	Context size for (de)compression requests + * @base:	Common crypto API algorithm data structure + */ +struct acomp_alg { +	int (*compress)(struct acomp_req *req); +	int (*decompress)(struct acomp_req *req); +	void (*dst_free)(struct scatterlist *dst); +	int (*init)(struct crypto_acomp *tfm); +	void (*exit)(struct crypto_acomp *tfm); +	unsigned int reqsize; +	struct crypto_alg base; +}; + +/** + * DOC: Asynchronous Compression API + * + * The Asynchronous Compression API is used with the algorithms of type + * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto) + */ + +/** + * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle + * @alg_name:	is the cra_name / name or cra_driver_name / driver name of the + *		compression algorithm e.g. "deflate" + * @type:	specifies the type of the algorithm + * @mask:	specifies the mask for the algorithm + * + * Allocate a handle for a compression algorithm. The returned struct + * crypto_acomp is the handle that is required for any subsequent + * API invocation for the compression operations. + * + * Return:	allocated handle in case of success; IS_ERR() is true in case + *		of an error, PTR_ERR() returns the error code. + */ +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, +					u32 mask); + +static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm) +{ +	return &tfm->base; +} + +static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg) +{ +	return container_of(alg, struct acomp_alg, base); +} + +static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm) +{ +	return container_of(tfm, struct crypto_acomp, base); +} + +static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm) +{ +	return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg); +} + +static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm) +{ +	return tfm->reqsize; +} + +static inline void acomp_request_set_tfm(struct acomp_req *req, +					 struct crypto_acomp *tfm) +{ +	req->base.tfm = crypto_acomp_tfm(tfm); +} + +static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req) +{ +	return __crypto_acomp_tfm(req->base.tfm); +} + +/** + * crypto_free_acomp() -- free ACOMPRESS tfm handle + * + * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + */ +static inline void crypto_free_acomp(struct crypto_acomp *tfm) +{ +	crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm)); +} + +static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask) +{ +	type &= ~CRYPTO_ALG_TYPE_MASK; +	type |= CRYPTO_ALG_TYPE_ACOMPRESS; +	mask |= CRYPTO_ALG_TYPE_MASK; + +	return crypto_has_alg(alg_name, type, mask); +} + +/** + * acomp_request_alloc() -- allocates asynchronous (de)compression request + * + * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * + * Return:	allocated handle in case of success or NULL in case of an error + */ +struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm); + +/** + * acomp_request_free() -- zeroize and free asynchronous (de)compression + *			   request as well as the output buffer if allocated + *			   inside the algorithm + * + * @req:	request to free + */ +void acomp_request_free(struct acomp_req *req); + +/** + * acomp_request_set_callback() -- Sets an asynchronous callback + * + * Callback will be called when an asynchronous operation on a given + * request is finished. + * + * @req:	request that the callback will be set for + * @flgs:	specify for instance if the operation may backlog + * @cmlp:	callback which will be called + * @data:	private data used by the caller + */ +static inline void acomp_request_set_callback(struct acomp_req *req, +					      u32 flgs, +					      crypto_completion_t cmpl, +					      void *data) +{ +	req->base.complete = cmpl; +	req->base.data = data; +	req->base.flags = flgs; +} + +/** + * acomp_request_set_params() -- Sets request parameters + * + * Sets parameters required by an acomp operation + * + * @req:	asynchronous compress request + * @src:	pointer to input buffer scatterlist + * @dst:	pointer to output buffer scatterlist. If this is NULL, the + *		acomp layer will allocate the output memory + * @slen:	size of the input buffer + * @dlen:	size of the output buffer. If dst is NULL, this can be used by + *		the user to specify the maximum amount of memory to allocate + */ +static inline void acomp_request_set_params(struct acomp_req *req, +					    struct scatterlist *src, +					    struct scatterlist *dst, +					    unsigned int slen, +					    unsigned int dlen) +{ +	req->src = src; +	req->dst = dst; +	req->slen = slen; +	req->dlen = dlen; + +	if (!req->dst) +		req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; +} + +/** + * crypto_acomp_compress() -- Invoke asynchronous compress operation + * + * Function invokes the asynchronous compress operation + * + * @req:	asynchronous compress request + * + * Return:	zero on success; error code in case of error + */ +static inline int crypto_acomp_compress(struct acomp_req *req) +{ +	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + +	return tfm->compress(req); +} + +/** + * crypto_acomp_decompress() -- Invoke asynchronous decompress operation + * + * Function invokes the asynchronous decompress operation + * + * @req:	asynchronous compress request + * + * Return:	zero on success; error code in case of error + */ +static inline int crypto_acomp_decompress(struct acomp_req *req) +{ +	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + +	return tfm->decompress(req); +} + +#endif diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 12f84327ca36..03b97629442c 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -55,14 +55,14 @@   * The scatter list pointing to the input data must contain:   *   * * for RFC4106 ciphers, the concatenation of - * associated authentication data || IV || plaintext or ciphertext. Note, the - * same IV (buffer) is also set with the aead_request_set_crypt call. Note, - * the API call of aead_request_set_ad must provide the length of the AAD and - * the IV. The API call of aead_request_set_crypt only points to the size of - * the input plaintext or ciphertext. + *   associated authentication data || IV || plaintext or ciphertext. Note, the + *   same IV (buffer) is also set with the aead_request_set_crypt call. Note, + *   the API call of aead_request_set_ad must provide the length of the AAD and + *   the IV. The API call of aead_request_set_crypt only points to the size of + *   the input plaintext or ciphertext.   *   * * for "normal" AEAD ciphers, the concatenation of - * associated authentication data || plaintext or ciphertext. + *   associated authentication data || plaintext or ciphertext.   *   * It is important to note that if multiple scatter gather list entries form   * the input data mentioned above, the first entry must not point to a NULL @@ -452,7 +452,7 @@ static inline void aead_request_free(struct aead_request *req)   * completes   *   * The callback function is registered with the aead_request handle and - * must comply with the following template + * must comply with the following template::   *   *	void callback_function(struct crypto_async_request *req, int error)   */ @@ -483,30 +483,18 @@ static inline void aead_request_set_callback(struct aead_request *req,   * destination is the ciphertext. For a decryption operation, the use is   * reversed - the source is the ciphertext and the destination is the plaintext.   * - * For both src/dst the layout is associated data, plain/cipher text, - * authentication tag. - * - * The content of the AD in the destination buffer after processing - * will either be untouched, or it will contain a copy of the AD - * from the source buffer.  In order to ensure that it always has - * a copy of the AD, the user must copy the AD over either before - * or after processing.  Of course this is not relevant if the user - * is doing in-place processing where src == dst. - * - * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption, - *		  the caller must concatenate the ciphertext followed by the - *		  authentication tag and provide the entire data stream to the - *		  decryption operation (i.e. the data length used for the - *		  initialization of the scatterlist and the data length for the - *		  decryption operation is identical). For encryption, however, - *		  the authentication tag is created while encrypting the data. - *		  The destination buffer must hold sufficient space for the - *		  ciphertext and the authentication tag while the encryption - *		  invocation must only point to the plaintext data size. The - *		  following code snippet illustrates the memory usage - *		  buffer = kmalloc(ptbuflen + (enc ? authsize : 0)); - *		  sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0)); - *		  aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv); + * The memory structure for cipher operation has the following structure: + * + * - AEAD encryption input:  assoc data || plaintext + * - AEAD encryption output: assoc data || cipherntext || auth tag + * - AEAD decryption input:  assoc data || ciphertext || auth tag + * - AEAD decryption output: assoc data || plaintext + * + * Albeit the kernel requires the presence of the AAD buffer, however, + * the kernel does not fill the AAD buffer in the output case. If the + * caller wants to have that data buffer filled, the caller must either + * use an in-place cipher operation (i.e. same memory location for + * input/output memory location).   */  static inline void aead_request_set_crypt(struct aead_request *req,  					  struct scatterlist *src, diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h new file mode 100644 index 000000000000..f5b8bfc22e6d --- /dev/null +++ b/include/crypto/cbc.h @@ -0,0 +1,146 @@ +/* + * CBC: Cipher Block Chaining mode + * + * Copyright (c) 2016 Herbert Xu <[email protected]> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _CRYPTO_CBC_H +#define _CRYPTO_CBC_H + +#include <crypto/internal/skcipher.h> +#include <linux/string.h> +#include <linux/types.h> + +static inline int crypto_cbc_encrypt_segment( +	struct skcipher_walk *walk, struct crypto_skcipher *tfm, +	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) +{ +	unsigned int bsize = crypto_skcipher_blocksize(tfm); +	unsigned int nbytes = walk->nbytes; +	u8 *src = walk->src.virt.addr; +	u8 *dst = walk->dst.virt.addr; +	u8 *iv = walk->iv; + +	do { +		crypto_xor(iv, src, bsize); +		fn(tfm, iv, dst); +		memcpy(iv, dst, bsize); + +		src += bsize; +		dst += bsize; +	} while ((nbytes -= bsize) >= bsize); + +	return nbytes; +} + +static inline int crypto_cbc_encrypt_inplace( +	struct skcipher_walk *walk, struct crypto_skcipher *tfm, +	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) +{ +	unsigned int bsize = crypto_skcipher_blocksize(tfm); +	unsigned int nbytes = walk->nbytes; +	u8 *src = walk->src.virt.addr; +	u8 *iv = walk->iv; + +	do { +		crypto_xor(src, iv, bsize); +		fn(tfm, src, src); +		iv = src; + +		src += bsize; +	} while ((nbytes -= bsize) >= bsize); + +	memcpy(walk->iv, iv, bsize); + +	return nbytes; +} + +static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req, +					  void (*fn)(struct crypto_skcipher *, +						     const u8 *, u8 *)) +{ +	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); +	struct skcipher_walk walk; +	int err; + +	err = skcipher_walk_virt(&walk, req, false); + +	while (walk.nbytes) { +		if (walk.src.virt.addr == walk.dst.virt.addr) +			err = crypto_cbc_encrypt_inplace(&walk, tfm, fn); +		else +			err = crypto_cbc_encrypt_segment(&walk, tfm, fn); +		err = skcipher_walk_done(&walk, err); +	} + +	return err; +} + +static inline int crypto_cbc_decrypt_segment( +	struct skcipher_walk *walk, struct crypto_skcipher *tfm, +	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) +{ +	unsigned int bsize = crypto_skcipher_blocksize(tfm); +	unsigned int nbytes = walk->nbytes; +	u8 *src = walk->src.virt.addr; +	u8 *dst = walk->dst.virt.addr; +	u8 *iv = walk->iv; + +	do { +		fn(tfm, src, dst); +		crypto_xor(dst, iv, bsize); +		iv = src; + +		src += bsize; +		dst += bsize; +	} while ((nbytes -= bsize) >= bsize); + +	memcpy(walk->iv, iv, bsize); + +	return nbytes; +} + +static inline int crypto_cbc_decrypt_inplace( +	struct skcipher_walk *walk, struct crypto_skcipher *tfm, +	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) +{ +	unsigned int bsize = crypto_skcipher_blocksize(tfm); +	unsigned int nbytes = walk->nbytes; +	u8 *src = walk->src.virt.addr; +	u8 last_iv[bsize]; + +	/* Start of the last block. */ +	src += nbytes - (nbytes & (bsize - 1)) - bsize; +	memcpy(last_iv, src, bsize); + +	for (;;) { +		fn(tfm, src, src); +		if ((nbytes -= bsize) < bsize) +			break; +		crypto_xor(src, src - bsize, bsize); +		src -= bsize; +	} + +	crypto_xor(src, walk->iv, bsize); +	memcpy(walk->iv, last_iv, bsize); + +	return nbytes; +} + +static inline int crypto_cbc_decrypt_blocks( +	struct skcipher_walk *walk, struct crypto_skcipher *tfm, +	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) +{ +	if (walk->src.virt.addr == walk->dst.virt.addr) +		return crypto_cbc_decrypt_inplace(walk, tfm, fn); +	else +		return crypto_cbc_decrypt_segment(walk, tfm, fn); +} + +#endif	/* _CRYPTO_CBC_H */ diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h index bc792d5a9e88..94418cbf9013 100644 --- a/include/crypto/cryptd.h +++ b/include/crypto/cryptd.h @@ -12,10 +12,10 @@  #ifndef _CRYPTO_CRYPT_H  #define _CRYPTO_CRYPT_H -#include <linux/crypto.h>  #include <linux/kernel.h>  #include <crypto/aead.h>  #include <crypto/hash.h> +#include <crypto/skcipher.h>  struct cryptd_ablkcipher {  	struct crypto_ablkcipher base; @@ -34,6 +34,17 @@ struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);  bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm);  void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); +struct cryptd_skcipher { +	struct crypto_skcipher base; +}; + +struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, +					      u32 type, u32 mask); +struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm); +/* Must be called without moving CPUs. */ +bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm); +void cryptd_free_skcipher(struct cryptd_skcipher *tfm); +  struct cryptd_ahash {  	struct crypto_ahash base;  }; diff --git a/include/crypto/dh.h b/include/crypto/dh.h index 5102a8f282e6..6b424ad3482e 100644 --- a/include/crypto/dh.h +++ b/include/crypto/dh.h @@ -13,6 +13,27 @@  #ifndef _CRYPTO_DH_  #define _CRYPTO_DH_ +/** + * DOC: DH Helper Functions + * + * To use DH with the KPP cipher API, the following data structure and + * functions should be used. + * + * To use DH with KPP, the following functions should be used to operate on + * a DH private key. The packet private key that can be set with + * the KPP API function call of crypto_kpp_set_secret. + */ + +/** + * struct dh - define a DH private key + * + * @key:	Private DH key + * @p:		Diffie-Hellman parameter P + * @g:		Diffie-Hellman generator G + * @key_size:	Size of the private DH key + * @p_size:	Size of DH parameter P + * @g_size:	Size of DH generator G + */  struct dh {  	void *key;  	void *p; @@ -22,8 +43,45 @@ struct dh {  	unsigned int g_size;  }; +/** + * crypto_dh_key_len() - Obtain the size of the private DH key + * @params:	private DH key + * + * This function returns the packet DH key size. A caller can use that + * with the provided DH private key reference to obtain the required + * memory size to hold a packet key. + * + * Return: size of the key in bytes + */  int crypto_dh_key_len(const struct dh *params); + +/** + * crypto_dh_encode_key() - encode the private key + * @buf:	Buffer allocated by the caller to hold the packet DH + *		private key. The buffer should be at least crypto_dh_key_len + *		bytes in size. + * @len:	Length of the packet private key buffer + * @params:	Buffer with the caller-specified private key + * + * The DH implementations operate on a packet representation of the private + * key. + * + * Return:	-EINVAL if buffer has insufficient size, 0 on success + */  int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params); + +/** + * crypto_dh_decode_key() - decode a private key + * @buf:	Buffer holding a packet key that should be decoded + * @len:	Lenth of the packet private key buffer + * @params:	Buffer allocated by the caller that is filled with the + *		unpacket DH private key. + * + * The unpacking obtains the private key by pointing @p to the correct location + * in @buf. Thus, both pointers refer to the same memory. + * + * Return:	-EINVAL if buffer has insufficient size, 0 on success + */  int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params);  #endif diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h index 84bad548d194..03a64f62ba7a 100644 --- a/include/crypto/ecdh.h +++ b/include/crypto/ecdh.h @@ -13,18 +13,76 @@  #ifndef _CRYPTO_ECDH_  #define _CRYPTO_ECDH_ +/** + * DOC: ECDH Helper Functions + * + * To use ECDH with the KPP cipher API, the following data structure and + * functions should be used. + * + * The ECC curves known to the ECDH implementation are specified in this + * header file. + * + * To use ECDH with KPP, the following functions should be used to operate on + * an ECDH private key. The packet private key that can be set with + * the KPP API function call of crypto_kpp_set_secret. + */ +  /* Curves IDs */  #define ECC_CURVE_NIST_P192	0x0001  #define ECC_CURVE_NIST_P256	0x0002 +/** + * struct ecdh - define an ECDH private key + * + * @curve_id:	ECC curve the key is based on. + * @key:	Private ECDH key + * @key_size:	Size of the private ECDH key + */  struct ecdh {  	unsigned short curve_id;  	char *key;  	unsigned short key_size;  }; +/** + * crypto_ecdh_key_len() - Obtain the size of the private ECDH key + * @params:	private ECDH key + * + * This function returns the packet ECDH key size. A caller can use that + * with the provided ECDH private key reference to obtain the required + * memory size to hold a packet key. + * + * Return: size of the key in bytes + */  int crypto_ecdh_key_len(const struct ecdh *params); + +/** + * crypto_ecdh_encode_key() - encode the private key + * @buf:	Buffer allocated by the caller to hold the packet ECDH + *		private key. The buffer should be at least crypto_ecdh_key_len + *		bytes in size. + * @len:	Length of the packet private key buffer + * @p:		Buffer with the caller-specified private key + * + * The ECDH implementations operate on a packet representation of the private + * key. + * + * Return:	-EINVAL if buffer has insufficient size, 0 on success + */  int crypto_ecdh_encode_key(char *buf, unsigned int len, const struct ecdh *p); + +/** + * crypto_ecdh_decode_key() - decode a private key + * @buf:	Buffer holding a packet key that should be decoded + * @len:	Lenth of the packet private key buffer + * @p:		Buffer allocated by the caller that is filled with the + *		unpacket ECDH private key. + * + * The unpacking obtains the private key by pointing @p to the correct location + * in @buf. Thus, both pointers refer to the same memory. + * + * Return:	-EINVAL if buffer has insufficient size, 0 on success + */  int crypto_ecdh_decode_key(const char *buf, unsigned int len, struct ecdh *p);  #endif diff --git a/include/crypto/engine.h b/include/crypto/engine.h index 04eb5c77addd..1bf600fc99f7 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -43,8 +43,7 @@   * @prepare_hash_request: do some prepare if need before handle the current request   * @unprepare_hash_request: undo any work done by prepare_hash_request()   * @hash_one_request: do hash for current request - * @kworker: thread struct for request pump - * @kworker_task: pointer to task for request pump kworker thread + * @kworker: kthread worker struct for request pump   * @pump_requests: work struct for scheduling work to the request pump   * @priv_data: the engine private data   * @cur_req: the current request which is on processing @@ -78,8 +77,7 @@ struct crypto_engine {  	int (*hash_one_request)(struct crypto_engine *engine,  				struct ahash_request *req); -	struct kthread_worker           kworker; -	struct task_struct              *kworker_task; +	struct kthread_worker           *kworker;  	struct kthread_work             pump_requests;  	void				*priv_data; diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h index da2530e34b26..592d47e565a8 100644 --- a/include/crypto/gf128mul.h +++ b/include/crypto/gf128mul.h @@ -177,24 +177,23 @@ void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t);  static inline void gf128mul_free_4k(struct gf128mul_4k *t)  { -	kfree(t); +	kzfree(t);  } -/* 64k table optimization, implemented for lle and bbe */ +/* 64k table optimization, implemented for bbe */  struct gf128mul_64k {  	struct gf128mul_4k *t[16];  }; -/* first initialize with the constant factor with which you - * want to multiply and then call gf128_64k_lle with the other - * factor in the first argument, the table in the second and a - * scratch register in the third. Afterwards *a = *r. */ -struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g); +/* First initialize with the constant factor with which you + * want to multiply and then call gf128mul_64k_bbe with the other + * factor in the first argument, and the table in the second. + * Afterwards, the result is stored in *a. + */  struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g);  void gf128mul_free_64k(struct gf128mul_64k *t); -void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t);  void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t);  #endif /* _CRYPTO_GF128MUL_H */ diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 26605888a199..216a2b876147 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -605,7 +605,7 @@ static inline struct ahash_request *ahash_request_cast(   * the cipher operation completes.   *   * The callback function is registered with the &ahash_request handle and - * must comply with the following template + * must comply with the following template::   *   *	void callback_function(struct crypto_async_request *req, int error)   */ diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h new file mode 100644 index 000000000000..1de2b5af12d7 --- /dev/null +++ b/include/crypto/internal/acompress.h @@ -0,0 +1,81 @@ +/* + * Asynchronous Compression operations + * + * Copyright (c) 2016, Intel Corporation + * Authors: Weigang Li <[email protected]> + *          Giovanni Cabiddu <[email protected]> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_ACOMP_INT_H +#define _CRYPTO_ACOMP_INT_H +#include <crypto/acompress.h> + +/* + * Transform internal helpers. + */ +static inline void *acomp_request_ctx(struct acomp_req *req) +{ +	return req->__ctx; +} + +static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm) +{ +	return tfm->base.__crt_ctx; +} + +static inline void acomp_request_complete(struct acomp_req *req, +					  int err) +{ +	req->base.complete(&req->base, err); +} + +static inline const char *acomp_alg_name(struct crypto_acomp *tfm) +{ +	return crypto_acomp_tfm(tfm)->__crt_alg->cra_name; +} + +static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm) +{ +	struct acomp_req *req; + +	req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); +	if (likely(req)) +		acomp_request_set_tfm(req, tfm); +	return req; +} + +static inline void __acomp_request_free(struct acomp_req *req) +{ +	kzfree(req); +} + +/** + * crypto_register_acomp() -- Register asynchronous compression algorithm + * + * Function registers an implementation of an asynchronous + * compression algorithm + * + * @alg:	algorithm definition + * + * Return:	zero on success; error code in case of error + */ +int crypto_register_acomp(struct acomp_alg *alg); + +/** + * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm + * + * Function unregisters an implementation of an asynchronous + * compression algorithm + * + * @alg:	algorithm definition + * + * Return:	zero on success; error code in case of error + */ +int crypto_unregister_acomp(struct acomp_alg *alg); + +#endif diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h new file mode 100644 index 000000000000..3fda3c5655a0 --- /dev/null +++ b/include/crypto/internal/scompress.h @@ -0,0 +1,136 @@ +/* + * Synchronous Compression operations + * + * Copyright 2015 LG Electronics Inc. + * Copyright (c) 2016, Intel Corporation + * Author: Giovanni Cabiddu <[email protected]> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_SCOMP_INT_H +#define _CRYPTO_SCOMP_INT_H +#include <linux/crypto.h> + +#define SCOMP_SCRATCH_SIZE	131072 + +struct crypto_scomp { +	struct crypto_tfm base; +}; + +/** + * struct scomp_alg - synchronous compression algorithm + * + * @alloc_ctx:	Function allocates algorithm specific context + * @free_ctx:	Function frees context allocated with alloc_ctx + * @compress:	Function performs a compress operation + * @decompress:	Function performs a de-compress operation + * @init:	Initialize the cryptographic transformation object. + *		This function is used to initialize the cryptographic + *		transformation object. This function is called only once at + *		the instantiation time, right after the transformation context + *		was allocated. In case the cryptographic hardware has some + *		special requirements which need to be handled by software, this + *		function shall check for the precise requirement of the + *		transformation and put any software fallbacks in place. + * @exit:	Deinitialize the cryptographic transformation object. This is a + *		counterpart to @init, used to remove various changes set in + *		@init. + * @base:	Common crypto API algorithm data structure + */ +struct scomp_alg { +	void *(*alloc_ctx)(struct crypto_scomp *tfm); +	void (*free_ctx)(struct crypto_scomp *tfm, void *ctx); +	int (*compress)(struct crypto_scomp *tfm, const u8 *src, +			unsigned int slen, u8 *dst, unsigned int *dlen, +			void *ctx); +	int (*decompress)(struct crypto_scomp *tfm, const u8 *src, +			  unsigned int slen, u8 *dst, unsigned int *dlen, +			  void *ctx); +	struct crypto_alg base; +}; + +static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg) +{ +	return container_of(alg, struct scomp_alg, base); +} + +static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm) +{ +	return container_of(tfm, struct crypto_scomp, base); +} + +static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm) +{ +	return &tfm->base; +} + +static inline void crypto_free_scomp(struct crypto_scomp *tfm) +{ +	crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm)); +} + +static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm) +{ +	return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg); +} + +static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm) +{ +	return crypto_scomp_alg(tfm)->alloc_ctx(tfm); +} + +static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm, +					 void *ctx) +{ +	return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx); +} + +static inline int crypto_scomp_compress(struct crypto_scomp *tfm, +					const u8 *src, unsigned int slen, +					u8 *dst, unsigned int *dlen, void *ctx) +{ +	return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx); +} + +static inline int crypto_scomp_decompress(struct crypto_scomp *tfm, +					  const u8 *src, unsigned int slen, +					  u8 *dst, unsigned int *dlen, +					  void *ctx) +{ +	return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen, +						 ctx); +} + +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm); +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req); +void crypto_acomp_scomp_free_ctx(struct acomp_req *req); + +/** + * crypto_register_scomp() -- Register synchronous compression algorithm + * + * Function registers an implementation of a synchronous + * compression algorithm + * + * @alg:	algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_scomp(struct scomp_alg *alg); + +/** + * crypto_unregister_scomp() -- Unregister synchronous compression algorithm + * + * Function unregisters an implementation of a synchronous + * compression algorithm + * + * @alg:	algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_unregister_scomp(struct scomp_alg *alg); + +#endif diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h new file mode 100644 index 000000000000..429509968f68 --- /dev/null +++ b/include/crypto/internal/simd.h @@ -0,0 +1,17 @@ +/* + * Shared crypto simd helpers + */ + +#ifndef _CRYPTO_INTERNAL_SIMD_H +#define _CRYPTO_INTERNAL_SIMD_H + +struct simd_skcipher_alg; + +struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, +						      const char *drvname, +						      const char *basename); +struct simd_skcipher_alg *simd_skcipher_create(const char *algname, +					       const char *basename); +void simd_skcipher_free(struct simd_skcipher_alg *alg); + +#endif /* _CRYPTO_INTERNAL_SIMD_H */ diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index a21a95e1a375..8735979ed341 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -15,8 +15,10 @@  #include <crypto/algapi.h>  #include <crypto/skcipher.h> +#include <linux/list.h>  #include <linux/types.h> +struct aead_request;  struct rtattr;  struct skcipher_instance { @@ -34,6 +36,40 @@ struct crypto_skcipher_spawn {  	struct crypto_spawn base;  }; +struct skcipher_walk { +	union { +		struct { +			struct page *page; +			unsigned long offset; +		} phys; + +		struct { +			u8 *page; +			void *addr; +		} virt; +	} src, dst; + +	struct scatter_walk in; +	unsigned int nbytes; + +	struct scatter_walk out; +	unsigned int total; + +	struct list_head buffers; + +	u8 *page; +	u8 *buffer; +	u8 *oiv; +	void *iv; + +	unsigned int ivsize; + +	int flags; +	unsigned int blocksize; +	unsigned int chunksize; +	unsigned int alignmask; +}; +  extern const struct crypto_type crypto_givcipher_type;  static inline struct crypto_instance *skcipher_crypto_instance( @@ -68,14 +104,6 @@ static inline void crypto_set_skcipher_spawn(  int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,  			 u32 type, u32 mask); -static inline int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn, -					const char *name, u32 type, u32 mask) -{ -	return crypto_grab_skcipher(spawn, name, type, mask); -} - -struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask); -  static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)  {  	crypto_drop_spawn(&spawn->base); @@ -99,12 +127,6 @@ static inline struct crypto_skcipher *crypto_spawn_skcipher(  	return crypto_spawn_tfm2(&spawn->base);  } -static inline struct crypto_skcipher *crypto_spawn_skcipher2( -	struct crypto_skcipher_spawn *spawn) -{ -	return crypto_spawn_skcipher(spawn); -} -  static inline void crypto_skcipher_set_reqsize(  	struct crypto_skcipher *skcipher, unsigned int reqsize)  { @@ -118,6 +140,21 @@ void crypto_unregister_skciphers(struct skcipher_alg *algs, int count);  int skcipher_register_instance(struct crypto_template *tmpl,  			       struct skcipher_instance *inst); +int skcipher_walk_done(struct skcipher_walk *walk, int err); +int skcipher_walk_virt(struct skcipher_walk *walk, +		       struct skcipher_request *req, +		       bool atomic); +void skcipher_walk_atomise(struct skcipher_walk *walk); +int skcipher_walk_async(struct skcipher_walk *walk, +			struct skcipher_request *req); +int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req, +		       bool atomic); +int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, +			       struct aead_request *req, bool atomic); +int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, +			       struct aead_request *req, bool atomic); +void skcipher_walk_complete(struct skcipher_walk *walk, int err); +  static inline void ablkcipher_request_complete(struct ablkcipher_request *req,  					       int err)  { diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index 30791f75c180..4307a2f2365f 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h @@ -71,7 +71,7 @@ struct crypto_kpp {   *   * @reqsize:		Request context size required by algorithm   *			implementation - * @base		Common crypto API algorithm data structure + * @base:		Common crypto API algorithm data structure   */  struct kpp_alg {  	int (*set_secret)(struct crypto_kpp *tfm, void *buffer, @@ -89,7 +89,7 @@ struct kpp_alg {  };  /** - * DOC: Generic Key-agreement Protocol Primitevs API + * DOC: Generic Key-agreement Protocol Primitives API   *   * The KPP API is used with the algorithm type   * CRYPTO_ALG_TYPE_KPP (listed as type "kpp" in /proc/crypto) @@ -264,6 +264,12 @@ struct kpp_secret {   * Function invokes the specific kpp operation for a given alg.   *   * @tfm:	tfm handle + * @buffer:	Buffer holding the packet representation of the private + *		key. The structure of the packet key depends on the particular + *		KPP implementation. Packing and unpacking helpers are provided + *		for ECDH and DH (see the respective header files for those + *		implementations). + * @len:	Length of the packet private key buffer.   *   * Return: zero on success; error code in case of error   */ @@ -279,7 +285,10 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, void *buffer,   * crypto_kpp_generate_public_key() - Invoke kpp operation   *   * Function invokes the specific kpp operation for generating the public part - * for a given kpp algorithm + * for a given kpp algorithm. + * + * To generate a private key, the caller should use a random number generator. + * The output of the requested length serves as the private key.   *   * @req:	kpp key request   * diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index cc4d98a7892e..750b14f1ada4 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -516,7 +516,7 @@ static inline void skcipher_request_zero(struct skcipher_request *req)   * skcipher_request_set_callback() - set asynchronous callback function   * @req: request handle   * @flags: specify zero or an ORing of the flags - *         CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + *	   CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and   *	   increase the wait queue beyond the initial maximum size;   *	   CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep   * @compl: callback function pointer to be registered with the request handle @@ -533,7 +533,7 @@ static inline void skcipher_request_zero(struct skcipher_request *req)   * cipher operation completes.   *   * The callback function is registered with the skcipher_request handle and - * must comply with the following template + * must comply with the following template::   *   *	void callback_function(struct crypto_async_request *req, int error)   */ diff --git a/include/crypto/xts.h b/include/crypto/xts.h index ede6b97b24cc..77b630672b2c 100644 --- a/include/crypto/xts.h +++ b/include/crypto/xts.h @@ -2,8 +2,7 @@  #define _CRYPTO_XTS_H  #include <crypto/b128ops.h> -#include <linux/crypto.h> -#include <crypto/algapi.h> +#include <crypto/internal/skcipher.h>  #include <linux/fips.h>  struct scatterlist; @@ -51,4 +50,27 @@ static inline int xts_check_key(struct crypto_tfm *tfm,  	return 0;  } +static inline int xts_verify_key(struct crypto_skcipher *tfm, +				 const u8 *key, unsigned int keylen) +{ +	/* +	 * key consists of keys of equal size concatenated, therefore +	 * the length must be even. +	 */ +	if (keylen % 2) { +		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); +		return -EINVAL; +	} + +	/* ensure that the AES and tweak key are not identical */ +	if ((fips_enabled || crypto_skcipher_get_flags(tfm) & +			     CRYPTO_TFM_REQ_WEAK_KEY) && +	    !crypto_memneq(key, key + (keylen / 2), keylen / 2)) { +		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); +		return -EINVAL; +	} + +	return 0; +} +  #endif  /* _CRYPTO_XTS_H */ |