diff options
| author | Dmitry Torokhov <[email protected]> | 2023-05-01 15:20:08 -0700 | 
|---|---|---|
| committer | Dmitry Torokhov <[email protected]> | 2023-05-01 15:20:08 -0700 | 
| commit | 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e (patch) | |
| tree | d57f3a63479a07b4e0cece029886e76e04feb984 /drivers/crypto/bcm/cipher.c | |
| parent | 5dc63e56a9cf8df0b59c234a505a1653f1bdf885 (diff) | |
| parent | 53bea86b5712c7491bb3dae12e271666df0a308c (diff) | |
Merge branch 'next' into for-linus
Prepare input updates for 6.4 merge window.
Diffstat (limited to 'drivers/crypto/bcm/cipher.c')
| -rw-r--r-- | drivers/crypto/bcm/cipher.c | 102 | 
1 files changed, 36 insertions, 66 deletions
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index c8c799428fe0..70b911baab26 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -1614,7 +1614,7 @@ static void finish_req(struct iproc_reqctx_s *rctx, int err)  	spu_chunk_cleanup(rctx);  	if (areq) -		areq->complete(areq, err); +		crypto_request_complete(areq, err);  }  /** @@ -2570,66 +2570,29 @@ static int aead_need_fallback(struct aead_request *req)  		return payload_len > ctx->max_payload;  } -static void aead_complete(struct crypto_async_request *areq, int err) -{ -	struct aead_request *req = -	    container_of(areq, struct aead_request, base); -	struct iproc_reqctx_s *rctx = aead_request_ctx(req); -	struct crypto_aead *aead = crypto_aead_reqtfm(req); - -	flow_log("%s() err:%d\n", __func__, err); - -	areq->tfm = crypto_aead_tfm(aead); - -	areq->complete = rctx->old_complete; -	areq->data = rctx->old_data; - -	areq->complete(areq, err); -} -  static int aead_do_fallback(struct aead_request *req, bool is_encrypt)  {  	struct crypto_aead *aead = crypto_aead_reqtfm(req);  	struct crypto_tfm *tfm = crypto_aead_tfm(aead);  	struct iproc_reqctx_s *rctx = aead_request_ctx(req);  	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); -	int err; -	u32 req_flags; +	struct aead_request *subreq;  	flow_log("%s() enc:%u\n", __func__, is_encrypt); -	if (ctx->fallback_cipher) { -		/* Store the cipher tfm and then use the fallback tfm */ -		rctx->old_tfm = tfm; -		aead_request_set_tfm(req, ctx->fallback_cipher); -		/* -		 * Save the callback and chain ourselves in, so we can restore -		 * the tfm -		 */ -		rctx->old_complete = req->base.complete; -		rctx->old_data = req->base.data; -		req_flags = aead_request_flags(req); -		aead_request_set_callback(req, req_flags, aead_complete, req); -		err = is_encrypt ? crypto_aead_encrypt(req) : -		    crypto_aead_decrypt(req); - -		if (err == 0) { -			/* -			 * fallback was synchronous (did not return -			 * -EINPROGRESS). So restore request state here. -			 */ -			aead_request_set_callback(req, req_flags, -						  rctx->old_complete, req); -			req->base.data = rctx->old_data; -			aead_request_set_tfm(req, aead); -			flow_log("%s() fallback completed successfully\n\n", -				 __func__); -		} -	} else { -		err = -EINVAL; -	} +	if (!ctx->fallback_cipher) +		return -EINVAL; -	return err; +	subreq = &rctx->req; +	aead_request_set_tfm(subreq, ctx->fallback_cipher); +	aead_request_set_callback(subreq, aead_request_flags(req), +				  req->base.complete, req->base.data); +	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, +			       req->iv); +	aead_request_set_ad(subreq, req->assoclen); + +	return is_encrypt ? crypto_aead_encrypt(req) : +			    crypto_aead_decrypt(req);  }  static int aead_enqueue(struct aead_request *req, bool is_encrypt) @@ -4243,6 +4206,7 @@ static int ahash_cra_init(struct crypto_tfm *tfm)  static int aead_cra_init(struct crypto_aead *aead)  { +	unsigned int reqsize = sizeof(struct iproc_reqctx_s);  	struct crypto_tfm *tfm = crypto_aead_tfm(aead);  	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);  	struct crypto_alg *alg = tfm->__crt_alg; @@ -4254,7 +4218,6 @@ static int aead_cra_init(struct crypto_aead *aead)  	flow_log("%s()\n", __func__); -	crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s));  	ctx->is_esp = false;  	ctx->salt_len = 0;  	ctx->salt_offset = 0; @@ -4263,22 +4226,29 @@ static int aead_cra_init(struct crypto_aead *aead)  	get_random_bytes(ctx->iv, MAX_IV_SIZE);  	flow_dump("  iv: ", ctx->iv, MAX_IV_SIZE); -	if (!err) { -		if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) { -			flow_log("%s() creating fallback cipher\n", __func__); - -			ctx->fallback_cipher = -			    crypto_alloc_aead(alg->cra_name, 0, -					      CRYPTO_ALG_ASYNC | -					      CRYPTO_ALG_NEED_FALLBACK); -			if (IS_ERR(ctx->fallback_cipher)) { -				pr_err("%s() Error: failed to allocate fallback for %s\n", -				       __func__, alg->cra_name); -				return PTR_ERR(ctx->fallback_cipher); -			} -		} +	if (err) +		goto out; + +	if (!(alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK)) +		goto reqsize; + +	flow_log("%s() creating fallback cipher\n", __func__); + +	ctx->fallback_cipher = crypto_alloc_aead(alg->cra_name, 0, +						 CRYPTO_ALG_ASYNC | +						 CRYPTO_ALG_NEED_FALLBACK); +	if (IS_ERR(ctx->fallback_cipher)) { +		pr_err("%s() Error: failed to allocate fallback for %s\n", +		       __func__, alg->cra_name); +		return PTR_ERR(ctx->fallback_cipher);  	} +	reqsize += crypto_aead_reqsize(ctx->fallback_cipher); + +reqsize: +	crypto_aead_set_reqsize(aead, reqsize); + +out:  	return err;  }  |