diff options
Diffstat (limited to 'recipes-extended/qat/files/qat17_4.7.0-00006-Switch-to-skcipher-API.patch')
-rw-r--r-- | recipes-extended/qat/files/qat17_4.7.0-00006-Switch-to-skcipher-API.patch | 1161 |
1 files changed, 0 insertions, 1161 deletions
diff --git a/recipes-extended/qat/files/qat17_4.7.0-00006-Switch-to-skcipher-API.patch b/recipes-extended/qat/files/qat17_4.7.0-00006-Switch-to-skcipher-API.patch deleted file mode 100644 index 96e949c..0000000 --- a/recipes-extended/qat/files/qat17_4.7.0-00006-Switch-to-skcipher-API.patch +++ /dev/null @@ -1,1161 +0,0 @@ -From b19449e3c11ffd477a3db60f21e14930ed07f251 Mon Sep 17 00:00:00 2001 -From: Yongxin Liu <yongxin.liu@windriver.com> -Date: Wed, 15 Jan 2020 13:50:38 +0000 -Subject: [PATCH] qat: Switch to skcipher API - -The patch is derived from mainline kernel commit 7fe948a52287 -("crypto: qat - switch to skcipher API"). - -Upstream-Status: Inappropriate [Code released in tarball form only] - -Signed-off-by: Yongxin Liu <yongxin.liu@windriver.com> ---- - .../drivers/crypto/qat/qat_common/qat_algs.c | 676 ++++++++++-------- - .../crypto/qat/qat_common/qat_crypto.h | 6 +- - 2 files changed, 394 insertions(+), 288 deletions(-) - -diff --git a/quickassist/qat/drivers/crypto/qat/qat_common/qat_algs.c b/quickassist/qat/drivers/crypto/qat/qat_common/qat_algs.c -index c4edb3c..35bca76 100644 ---- a/quickassist/qat/drivers/crypto/qat/qat_common/qat_algs.c -+++ b/quickassist/qat/drivers/crypto/qat/qat_common/qat_algs.c -@@ -44,14 +44,15 @@ - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ --#ifndef QAT_AEAD_OLD_SUPPORTED - #include <linux/module.h> - #include <linux/slab.h> - #include <linux/crypto.h> - #include <crypto/internal/aead.h> -+#include <crypto/internal/skcipher.h> - #include <crypto/aes.h> - #include <crypto/sha.h> - #include <crypto/hash.h> -+#include <crypto/hmac.h> - #include <crypto/algapi.h> - #include <crypto/authenc.h> - #include <linux/dma-mapping.h> -@@ -113,11 +114,16 @@ struct qat_alg_aead_ctx { - struct crypto_shash *hash_tfm; - enum icp_qat_hw_auth_algo qat_hash_alg; - struct qat_crypto_instance *inst; -- char ipad[SHA512_BLOCK_SIZE]; -+ union { -+ struct sha1_state sha1; -+ struct sha256_state sha256; -+ struct sha512_state sha512; -+ }; -+ char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */ - char opad[SHA512_BLOCK_SIZE]; - }; - --struct qat_alg_ablkcipher_ctx { -+struct qat_alg_skcipher_ctx { - struct icp_qat_hw_cipher_algo_blk *enc_cd; - struct icp_qat_hw_cipher_algo_blk *dec_cd; - dma_addr_t enc_cd_paddr; -@@ -125,7 +131,7 @@ struct qat_alg_ablkcipher_ctx { - struct icp_qat_fw_la_bulk_req enc_fw_req; - struct icp_qat_fw_la_bulk_req dec_fw_req; - struct qat_crypto_instance *inst; -- struct crypto_tfm *tfm; -+ struct crypto_skcipher *tfm; - }; - - static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) -@@ -149,9 +155,6 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, - unsigned int auth_keylen) - { - SHASH_DESC_ON_STACK(shash, ctx->hash_tfm); -- struct sha1_state sha1; -- struct sha256_state sha256; -- struct sha512_state sha512; - int block_size = crypto_shash_blocksize(ctx->hash_tfm); - int digest_size = crypto_shash_digestsize(ctx->hash_tfm); - __be32 *hash_state_out; -@@ -160,7 +163,6 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, - - memset(ctx->ipad, 0, block_size); - memset(ctx->opad, 0, block_size); -- memset(shash, 0, sizeof(struct shash_desc)); - shash->tfm = ctx->hash_tfm; - - if (auth_keylen > block_size) { -@@ -178,8 +180,8 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, - for (i = 0; i < block_size; i++) { - char *ipad_ptr = ctx->ipad + i; - char *opad_ptr = ctx->opad + i; -- *ipad_ptr ^= 0x36; -- *opad_ptr ^= 0x5C; -+ *ipad_ptr ^= HMAC_IPAD_VALUE; -+ *opad_ptr ^= HMAC_OPAD_VALUE; - } - - if (crypto_shash_init(shash)) -@@ -193,22 +195,22 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, - - switch (ctx->qat_hash_alg) { - case ICP_QAT_HW_AUTH_ALGO_SHA1: -- if (crypto_shash_export(shash, &sha1)) -+ if (crypto_shash_export(shash, &ctx->sha1)) - return -EFAULT; - for (i = 0; i < digest_size >> 2; i++, hash_state_out++) -- *hash_state_out = cpu_to_be32(*(sha1.state + i)); -+ *hash_state_out = cpu_to_be32(ctx->sha1.state[i]); - break; - case ICP_QAT_HW_AUTH_ALGO_SHA256: -- if (crypto_shash_export(shash, &sha256)) -+ if (crypto_shash_export(shash, &ctx->sha256)) - return -EFAULT; - for (i = 0; i < digest_size >> 2; i++, hash_state_out++) -- *hash_state_out = cpu_to_be32(*(sha256.state + i)); -+ *hash_state_out = cpu_to_be32(ctx->sha256.state[i]); - break; - case ICP_QAT_HW_AUTH_ALGO_SHA512: -- if (crypto_shash_export(shash, &sha512)) -+ if (crypto_shash_export(shash, &ctx->sha512)) - return -EFAULT; - for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) -- *hash512_state_out = cpu_to_be64(*(sha512.state + i)); -+ *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]); - break; - default: - return -EFAULT; -@@ -229,22 +231,22 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, - - switch (ctx->qat_hash_alg) { - case ICP_QAT_HW_AUTH_ALGO_SHA1: -- if (crypto_shash_export(shash, &sha1)) -+ if (crypto_shash_export(shash, &ctx->sha1)) - return -EFAULT; - for (i = 0; i < digest_size >> 2; i++, hash_state_out++) -- *hash_state_out = cpu_to_be32(*(sha1.state + i)); -+ *hash_state_out = cpu_to_be32(ctx->sha1.state[i]); - break; - case ICP_QAT_HW_AUTH_ALGO_SHA256: -- if (crypto_shash_export(shash, &sha256)) -+ if (crypto_shash_export(shash, &ctx->sha256)) - return -EFAULT; - for (i = 0; i < digest_size >> 2; i++, hash_state_out++) -- *hash_state_out = cpu_to_be32(*(sha256.state + i)); -+ *hash_state_out = cpu_to_be32(ctx->sha256.state[i]); - break; - case ICP_QAT_HW_AUTH_ALGO_SHA512: -- if (crypto_shash_export(shash, &sha512)) -+ if (crypto_shash_export(shash, &ctx->sha512)) - return -EFAULT; - for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) -- *hash512_state_out = cpu_to_be64(*(sha512.state + i)); -+ *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]); - break; - default: - return -EFAULT; -@@ -254,7 +256,24 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, - return 0; - } - --static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) -+static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header) -+{ -+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, -+ ICP_QAT_FW_CIPH_IV_64BIT_PTR); -+ ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, -+ ICP_QAT_FW_LA_UPDATE_STATE); -+} -+ -+static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header) -+{ -+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, -+ ICP_QAT_FW_CIPH_IV_16BYTE_DATA); -+ ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, -+ ICP_QAT_FW_LA_NO_UPDATE_STATE); -+} -+ -+static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, -+ int aead) - { - header->hdr_flags = - ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); -@@ -264,12 +283,12 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) - QAT_COMN_PTR_TYPE_SGL); - ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_PARTIAL_NONE); -- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, -- ICP_QAT_FW_CIPH_IV_16BYTE_DATA); -+ if (aead) -+ qat_alg_init_hdr_no_iv_updt(header); -+ else -+ qat_alg_init_hdr_iv_updt(header); - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_PROTO); -- ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, -- ICP_QAT_FW_LA_NO_UPDATE_STATE); - } - - static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm, -@@ -304,7 +323,7 @@ static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm, - return -EFAULT; - - /* Request setup */ -- qat_alg_init_common_hdr(header); -+ qat_alg_init_common_hdr(header, 1); - header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH; - ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_DIGEST_IN_BUFFER); -@@ -391,7 +410,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm, - return -EFAULT; - - /* Request setup */ -- qat_alg_init_common_hdr(header); -+ qat_alg_init_common_hdr(header, 1); - header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER; - ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_DIGEST_IN_BUFFER); -@@ -445,17 +464,17 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm, - return 0; - } - --static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx, -- struct icp_qat_fw_la_bulk_req *req, -- struct icp_qat_hw_cipher_algo_blk *cd, -- const uint8_t *key, unsigned int keylen) -+static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx, -+ struct icp_qat_fw_la_bulk_req *req, -+ struct icp_qat_hw_cipher_algo_blk *cd, -+ const uint8_t *key, unsigned int keylen) - { - struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; - struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr; - struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl; - - memcpy(cd->aes.key, key, keylen); -- qat_alg_init_common_hdr(header); -+ qat_alg_init_common_hdr(header, 0); - header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER; - cd_pars->u.s.content_desc_params_sz = - sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3; -@@ -467,28 +486,28 @@ static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx, - ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); - } - --static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx, -- int alg, const uint8_t *key, -- unsigned int keylen, int mode) -+static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx, -+ int alg, const uint8_t *key, -+ unsigned int keylen, int mode) - { - struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd; - struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req; - struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; - -- qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen); -+ qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen); - cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr; - enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode); - } - --static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx, -- int alg, const uint8_t *key, -- unsigned int keylen, int mode) -+static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx, -+ int alg, const uint8_t *key, -+ unsigned int keylen, int mode) - { - struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd; - struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req; - struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; - -- qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen); -+ qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen); - cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr; - - if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) -@@ -548,86 +567,110 @@ static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key, - if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode)) - goto error; - -+ memzero_explicit(&keys, sizeof(keys)); - return 0; - bad_key: - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); -+ memzero_explicit(&keys, sizeof(keys)); - return -EINVAL; - error: -+ memzero_explicit(&keys, sizeof(keys)); - return -EFAULT; - } - --static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx, -- const uint8_t *key, -- unsigned int keylen, -- int mode) -+static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx, -+ const uint8_t *key, -+ unsigned int keylen, -+ int mode) - { - int alg; - - if (qat_alg_validate_key(keylen, &alg, mode)) - goto bad_key; - -- qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode); -- qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode); -+ qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode); -+ qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode); - return 0; - bad_key: -- crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); -+ crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - --static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, -+static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key, -+ unsigned int keylen) -+{ -+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); -+ -+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); -+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); -+ memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); -+ memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); -+ -+ return qat_alg_aead_init_sessions(tfm, key, keylen, -+ ICP_QAT_HW_CIPHER_CBC_MODE); -+} -+ -+static int qat_alg_aead_newkey(struct crypto_aead *tfm, const uint8_t *key, - unsigned int keylen) - { - struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); -+ struct qat_crypto_instance *inst = NULL; -+ int node = get_current_node(); - struct device *dev; -+ int ret; - -- if (ctx->enc_cd) { -- /* rekeying */ -- dev = &GET_DEV(ctx->inst->accel_dev); -- memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); -- memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); -- memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); -- memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); -- } else { -- /* new key */ -- int node = get_current_node(); -- struct qat_crypto_instance *inst = -- qat_crypto_get_instance_node(node); -- if (!inst) { -- return -EINVAL; -- } -- -- dev = &GET_DEV(inst->accel_dev); -- ctx->inst = inst; -- ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), -- &ctx->enc_cd_paddr, -- GFP_ATOMIC); -- if (!ctx->enc_cd) { -- return -ENOMEM; -- } -- ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), -- &ctx->dec_cd_paddr, -- GFP_ATOMIC); -- if (!ctx->dec_cd) { -- goto out_free_enc; -- } -+ inst = qat_crypto_get_instance_node(node); -+ if (!inst) -+ return -EINVAL; -+ dev = &GET_DEV(inst->accel_dev); -+ ctx->inst = inst; -+ ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), -+ &ctx->enc_cd_paddr, -+ GFP_ATOMIC); -+ if (!ctx->enc_cd) { -+ ret = -ENOMEM; -+ goto out_free_inst; - } -- if (qat_alg_aead_init_sessions(tfm, key, keylen, -- ICP_QAT_HW_CIPHER_CBC_MODE)) -+ ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), -+ &ctx->dec_cd_paddr, -+ GFP_ATOMIC); -+ if (!ctx->dec_cd) { -+ ret = -ENOMEM; -+ goto out_free_enc; -+ } -+ -+ ret = qat_alg_aead_init_sessions(tfm, key, keylen, -+ ICP_QAT_HW_CIPHER_CBC_MODE); -+ if (ret) - goto out_free_all; - - return 0; - - out_free_all: -- memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd)); -+ memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); - dma_free_coherent(dev, sizeof(struct qat_alg_cd), - ctx->dec_cd, ctx->dec_cd_paddr); - ctx->dec_cd = NULL; - out_free_enc: -- memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd)); -+ memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); - dma_free_coherent(dev, sizeof(struct qat_alg_cd), - ctx->enc_cd, ctx->enc_cd_paddr); - ctx->enc_cd = NULL; -- return -ENOMEM; -+out_free_inst: -+ ctx->inst = NULL; -+ qat_crypto_put_instance(inst); -+ return ret; -+} -+ -+static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, -+ unsigned int keylen) -+{ -+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); -+ -+ if (ctx->enc_cd) -+ return qat_alg_aead_rekey(tfm, key, keylen); -+ else -+ return qat_alg_aead_newkey(tfm, key, keylen); - } - - static void qat_alg_free_bufl(struct qat_crypto_instance *inst, -@@ -675,8 +718,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, - dma_addr_t blp; - dma_addr_t bloutp = 0; - struct scatterlist *sg; -- size_t sz_out, sz = sizeof(struct qat_alg_buf_list) + -- ((1 + n) * sizeof(struct qat_alg_buf)); -+ size_t sz_out, sz = struct_size(bufl, bufers, n + 1); - - if (unlikely(!n)) - return -EINVAL; -@@ -688,7 +730,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, - - blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, blp))) -- goto err; -+ goto err_in; - - for_each_sg(sgl, sg, n, i) { - int y = sg_nctr; -@@ -701,7 +743,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, - DMA_BIDIRECTIONAL); - bufl->bufers[y].len = sg->length; - if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) -- goto err; -+ goto err_in; - sg_nctr++; - } - bufl->num_bufs = sg_nctr; -@@ -713,16 +755,15 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, - struct qat_alg_buf *bufers; - - n = sg_nents(sglout); -- sz_out = sizeof(struct qat_alg_buf_list) + -- ((1 + n) * sizeof(struct qat_alg_buf)); -+ sz_out = struct_size(buflout, bufers, n + 1); - sg_nctr = 0; - buflout = kzalloc_node(sz_out, GFP_ATOMIC, - dev_to_node(&GET_DEV(inst->accel_dev))); - if (unlikely(!buflout)) -- goto err; -+ goto err_in; - bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, bloutp))) -- goto err; -+ goto err_out; - bufers = buflout->bufers; - for_each_sg(sglout, sg, n, i) { - int y = sg_nctr; -@@ -734,7 +775,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, - sg->length, - DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(dev, bufers[y].addr))) -- goto err; -+ goto err_out; - bufers[y].len = sg->length; - sg_nctr++; - } -@@ -749,8 +790,20 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, - qat_req->buf.sz_out = 0; - } - return 0; --err: -- dev_err(dev, "Failed to map buf for dma\n"); -+ -+err_out: -+ n = sg_nents(sglout); -+ for (i = 0; i < n; i++) -+ if (!dma_mapping_error(dev, buflout->bufers[i].addr)) -+ dma_unmap_single(dev, buflout->bufers[i].addr, -+ buflout->bufers[i].len, -+ DMA_BIDIRECTIONAL); -+ if (!dma_mapping_error(dev, bloutp)) -+ dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); -+ kfree(buflout); -+ -+err_in: -+ n = sg_nents(sgl); - for (i = 0; i < n; i++) - if (!dma_mapping_error(dev, bufl->bufers[i].addr)) - dma_unmap_single(dev, bufl->bufers[i].addr, -@@ -760,17 +813,8 @@ err: - if (!dma_mapping_error(dev, blp)) - dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); - kfree(bufl); -- if (sgl != sglout && buflout) { -- n = sg_nents(sglout); -- for (i = 0; i < n; i++) -- if (!dma_mapping_error(dev, buflout->bufers[i].addr)) -- dma_unmap_single(dev, buflout->bufers[i].addr, -- buflout->bufers[i].len, -- DMA_BIDIRECTIONAL); -- if (!dma_mapping_error(dev, bloutp)) -- dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); -- kfree(buflout); -- } -+ -+ dev_err(dev, "Failed to map buf for dma\n"); - return -ENOMEM; - } - -@@ -789,19 +833,25 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp, - areq->base.complete(&areq->base, res); - } - --static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp, -- struct qat_crypto_request *qat_req) -+static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp, -+ struct qat_crypto_request *qat_req) - { -- struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx; -+ struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx; - struct qat_crypto_instance *inst = ctx->inst; -- struct ablkcipher_request *areq = qat_req->ablkcipher_req; -+ struct skcipher_request *sreq = qat_req->skcipher_req; - uint8_t stat_filed = qat_resp->comn_resp.comn_status; -+ struct device *dev = &GET_DEV(ctx->inst->accel_dev); - int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); - - qat_alg_free_bufl(inst, qat_req); - if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) - res = -EINVAL; -- areq->base.complete(&areq->base, res); -+ -+ memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE); -+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, -+ qat_req->iv_paddr); -+ -+ sreq->base.complete(&sreq->base, res); - } - - void qat_alg_callback(void *resp) -@@ -823,7 +873,7 @@ static int qat_alg_aead_dec(struct aead_request *areq) - struct icp_qat_fw_la_auth_req_params *auth_param; - struct icp_qat_fw_la_bulk_req *msg; - int digst_size = crypto_aead_authsize(aead_tfm); -- int ret; -+ int ret, ctr = 0; - - ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); - if (unlikely(ret)) -@@ -844,13 +894,14 @@ static int qat_alg_aead_dec(struct aead_request *areq) - auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); - auth_param->auth_off = 0; - auth_param->auth_len = areq->assoclen + cipher_param->cipher_length; -- - do { - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); -- if (ret) -- cond_resched(); -- } while (ret == -EAGAIN); -+ } while (ret == -EAGAIN && ctr++ < 10); - -+ if (ret == -EAGAIN) { -+ qat_alg_free_bufl(ctx->inst, qat_req); -+ return -EBUSY; -+ } - return -EINPROGRESS; - } - -@@ -864,7 +915,7 @@ static int qat_alg_aead_enc(struct aead_request *areq) - struct icp_qat_fw_la_auth_req_params *auth_param; - struct icp_qat_fw_la_bulk_req *msg; - uint8_t *iv = areq->iv; -- int ret; -+ int ret, ctr = 0; - - ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); - if (unlikely(ret)) -@@ -890,159 +941,230 @@ static int qat_alg_aead_enc(struct aead_request *areq) - - do { - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); -- if (ret) -- cond_resched(); -- } while (ret == -EAGAIN); -+ } while (ret == -EAGAIN && ctr++ < 10); - -+ if (ret == -EAGAIN) { -+ qat_alg_free_bufl(ctx->inst, qat_req); -+ return -EBUSY; -+ } - return -EINPROGRESS; - } - --static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, -- const u8 *key, unsigned int keylen, -- int mode) -+static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx, -+ const u8 *key, unsigned int keylen, -+ int mode) - { -- struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); -- struct device *dev; -+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); -+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); -+ memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); -+ memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); - -- if (ctx->enc_cd) { -- /* rekeying */ -- dev = &GET_DEV(ctx->inst->accel_dev); -- memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); -- memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); -- memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); -- memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); -- } else { -- /* new key */ -- int node = get_current_node(); -- struct qat_crypto_instance *inst = -- qat_crypto_get_instance_node(node); -- if (!inst) -- return -EINVAL; -+ return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode); -+} -+ -+static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx, -+ const u8 *key, unsigned int keylen, -+ int mode) -+{ -+ struct qat_crypto_instance *inst = NULL; -+ struct device *dev; -+ int node = get_current_node(); -+ int ret; - -- dev = &GET_DEV(inst->accel_dev); -- ctx->inst = inst; -- ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), -- &ctx->enc_cd_paddr, -- GFP_ATOMIC); -- if (!ctx->enc_cd) -- return -ENOMEM; -- ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), -- &ctx->dec_cd_paddr, -- GFP_ATOMIC); -- if (!ctx->dec_cd) -- goto out_free_enc; -+ inst = qat_crypto_get_instance_node(node); -+ if (!inst) -+ return -EINVAL; -+ dev = &GET_DEV(inst->accel_dev); -+ ctx->inst = inst; -+ ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), -+ &ctx->enc_cd_paddr, -+ GFP_ATOMIC); -+ if (!ctx->enc_cd) { -+ ret = -ENOMEM; -+ goto out_free_instance; -+ } -+ ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), -+ &ctx->dec_cd_paddr, -+ GFP_ATOMIC); -+ if (!ctx->dec_cd) { -+ ret = -ENOMEM; -+ goto out_free_enc; - } -- if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode)) -+ -+ ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode); -+ if (ret) - goto out_free_all; - - return 0; - - out_free_all: -- memzero_explicit(ctx->dec_cd, sizeof(*ctx->dec_cd)); -+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); - dma_free_coherent(dev, sizeof(*ctx->dec_cd), - ctx->dec_cd, ctx->dec_cd_paddr); - ctx->dec_cd = NULL; - out_free_enc: -- memzero_explicit(ctx->enc_cd, sizeof(*ctx->enc_cd)); -+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); - dma_free_coherent(dev, sizeof(*ctx->enc_cd), - ctx->enc_cd, ctx->enc_cd_paddr); - ctx->enc_cd = NULL; -- return -ENOMEM; -+out_free_instance: -+ ctx->inst = NULL; -+ qat_crypto_put_instance(inst); -+ return ret; - } - --static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm, -- const u8 *key, unsigned int keylen) -+static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm, -+ const u8 *key, unsigned int keylen, -+ int mode) - { -- return qat_alg_ablkcipher_setkey(tfm, key, keylen, -- ICP_QAT_HW_CIPHER_CBC_MODE); -+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); -+ -+ if (ctx->enc_cd) -+ return qat_alg_skcipher_rekey(ctx, key, keylen, mode); -+ else -+ return qat_alg_skcipher_newkey(ctx, key, keylen, mode); -+} -+ -+static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm, -+ const u8 *key, unsigned int keylen) -+{ -+ return qat_alg_skcipher_setkey(tfm, key, keylen, -+ ICP_QAT_HW_CIPHER_CBC_MODE); - } - --static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm, -- const u8 *key, unsigned int keylen) -+static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm, -+ const u8 *key, unsigned int keylen) - { -- return qat_alg_ablkcipher_setkey(tfm, key, keylen, -- ICP_QAT_HW_CIPHER_CTR_MODE); -+ return qat_alg_skcipher_setkey(tfm, key, keylen, -+ ICP_QAT_HW_CIPHER_CTR_MODE); - } - --static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm, -- const u8 *key, unsigned int keylen) -+static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm, -+ const u8 *key, unsigned int keylen) - { -- return qat_alg_ablkcipher_setkey(tfm, key, keylen, -- ICP_QAT_HW_CIPHER_XTS_MODE); -+ return qat_alg_skcipher_setkey(tfm, key, keylen, -+ ICP_QAT_HW_CIPHER_XTS_MODE); - } - --static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req) -+static int qat_alg_skcipher_encrypt(struct skcipher_request *req) - { -- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); -- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm); -- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); -- struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); -+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); -+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm); -+ struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm); -+ struct qat_crypto_request *qat_req = skcipher_request_ctx(req); - struct icp_qat_fw_la_cipher_req_params *cipher_param; - struct icp_qat_fw_la_bulk_req *msg; -- int ret; -+ struct device *dev = &GET_DEV(ctx->inst->accel_dev); -+ int ret, ctr = 0; -+ -+ if (req->cryptlen == 0) -+ return 0; -+ -+ qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE, -+ &qat_req->iv_paddr, GFP_ATOMIC); -+ if (!qat_req->iv) -+ return -ENOMEM; - - ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); -- if (unlikely(ret)) -+ if (unlikely(ret)) { -+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, -+ qat_req->iv_paddr); - return ret; -+ } - - msg = &qat_req->req; - *msg = ctx->enc_fw_req; -- qat_req->ablkcipher_ctx = ctx; -- qat_req->ablkcipher_req = req; -- qat_req->cb = qat_ablkcipher_alg_callback; -+ qat_req->skcipher_ctx = ctx; -+ qat_req->skcipher_req = req; -+ qat_req->cb = qat_skcipher_alg_callback; - qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; - qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; - qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; - cipher_param = (void *)&qat_req->req.serv_specif_rqpars; -- cipher_param->cipher_length = req->nbytes; -+ cipher_param->cipher_length = req->cryptlen; - cipher_param->cipher_offset = 0; -- memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE); -- -+ cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr; -+ memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE); - do { - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); -- if (ret) -- cond_resched(); -- } while (ret == -EAGAIN); -+ } while (ret == -EAGAIN && ctr++ < 10); - -+ if (ret == -EAGAIN) { -+ qat_alg_free_bufl(ctx->inst, qat_req); -+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, -+ qat_req->iv_paddr); -+ return -EBUSY; -+ } - return -EINPROGRESS; - } - --static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) -+static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req) -+{ -+ if (req->cryptlen % AES_BLOCK_SIZE != 0) -+ return -EINVAL; -+ -+ return qat_alg_skcipher_encrypt(req); -+} -+ -+static int qat_alg_skcipher_decrypt(struct skcipher_request *req) - { -- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); -- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm); -- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); -- struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); -+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); -+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm); -+ struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm); -+ struct qat_crypto_request *qat_req = skcipher_request_ctx(req); - struct icp_qat_fw_la_cipher_req_params *cipher_param; - struct icp_qat_fw_la_bulk_req *msg; -- int ret; -+ struct device *dev = &GET_DEV(ctx->inst->accel_dev); -+ int ret, ctr = 0; -+ -+ if (req->cryptlen == 0) -+ return 0; -+ -+ qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE, -+ &qat_req->iv_paddr, GFP_ATOMIC); -+ if (!qat_req->iv) -+ return -ENOMEM; - - ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); -- if (unlikely(ret)) -+ if (unlikely(ret)) { -+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, -+ qat_req->iv_paddr); - return ret; -+ } - - msg = &qat_req->req; - *msg = ctx->dec_fw_req; -- qat_req->ablkcipher_ctx = ctx; -- qat_req->ablkcipher_req = req; -- qat_req->cb = qat_ablkcipher_alg_callback; -+ qat_req->skcipher_ctx = ctx; -+ qat_req->skcipher_req = req; -+ qat_req->cb = qat_skcipher_alg_callback; - qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; - qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; - qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; - cipher_param = (void *)&qat_req->req.serv_specif_rqpars; -- cipher_param->cipher_length = req->nbytes; -+ cipher_param->cipher_length = req->cryptlen; - cipher_param->cipher_offset = 0; -- memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE); -- -+ cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr; -+ memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE); - do { - ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); -- if (ret) -- cond_resched(); -- } while (ret == -EAGAIN); -+ } while (ret == -EAGAIN && ctr++ < 10); - -+ if (ret == -EAGAIN) { -+ qat_alg_free_bufl(ctx->inst, qat_req); -+ dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, -+ qat_req->iv_paddr); -+ return -EBUSY; -+ } - return -EINPROGRESS; - } - -+static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req) -+{ -+ if (req->cryptlen % AES_BLOCK_SIZE != 0) -+ return -EINVAL; -+ -+ return qat_alg_skcipher_decrypt(req); -+} - static int qat_alg_aead_init(struct crypto_aead *tfm, - enum icp_qat_hw_auth_algo hash, - const char *hash_name) -@@ -1085,30 +1207,30 @@ static void qat_alg_aead_exit(struct crypto_aead *tfm) - - dev = &GET_DEV(inst->accel_dev); - if (ctx->enc_cd) { -- memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd)); -+ memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); - dma_free_coherent(dev, sizeof(struct qat_alg_cd), - ctx->enc_cd, ctx->enc_cd_paddr); - } - if (ctx->dec_cd) { -- memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd)); -+ memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); - dma_free_coherent(dev, sizeof(struct qat_alg_cd), - ctx->dec_cd, ctx->dec_cd_paddr); - } - qat_crypto_put_instance(inst); - } - --static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm) -+static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm) - { -- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); -+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - -- tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request); -+ crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request)); - ctx->tfm = tfm; - return 0; - } - --static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm) -+static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm) - { -- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); -+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct qat_crypto_instance *inst = ctx->inst; - struct device *dev; - -@@ -1117,15 +1239,15 @@ static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm) - - dev = &GET_DEV(inst->accel_dev); - if (ctx->enc_cd) { -- memzero_explicit(ctx->enc_cd, -- sizeof(struct icp_qat_hw_cipher_algo_blk)); -+ memset(ctx->enc_cd, 0, -+ sizeof(struct icp_qat_hw_cipher_algo_blk)); - dma_free_coherent(dev, - sizeof(struct icp_qat_hw_cipher_algo_blk), - ctx->enc_cd, ctx->enc_cd_paddr); - } - if (ctx->dec_cd) { -- memzero_explicit(ctx->dec_cd, -- sizeof(struct icp_qat_hw_cipher_algo_blk)); -+ memset(ctx->dec_cd, 0, -+ sizeof(struct icp_qat_hw_cipher_algo_blk)); - dma_free_coherent(dev, - sizeof(struct icp_qat_hw_cipher_algo_blk), - ctx->dec_cd, ctx->dec_cd_paddr); -@@ -1187,92 +1309,75 @@ static struct aead_alg qat_aeads[] = { { - .maxauthsize = SHA512_DIGEST_SIZE, - } }; - --static struct crypto_alg qat_algs[] = { { -- .cra_name = "cbc(aes)", -- .cra_driver_name = "qat_aes_cbc", -- .cra_priority = 4001, -- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, -- .cra_blocksize = AES_BLOCK_SIZE, -- .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx), -- .cra_alignmask = 0, -- .cra_type = &crypto_ablkcipher_type, -- .cra_module = THIS_MODULE, -- .cra_init = qat_alg_ablkcipher_init, -- .cra_exit = qat_alg_ablkcipher_exit, -- .cra_u = { -- .ablkcipher = { -- .setkey = qat_alg_ablkcipher_cbc_setkey, -- .decrypt = qat_alg_ablkcipher_decrypt, -- .encrypt = qat_alg_ablkcipher_encrypt, -- .min_keysize = AES_MIN_KEY_SIZE, -- .max_keysize = AES_MAX_KEY_SIZE, -- .ivsize = AES_BLOCK_SIZE, -- }, -- }, -+static struct skcipher_alg qat_skciphers[] = { { -+ .base.cra_name = "cbc(aes)", -+ .base.cra_driver_name = "qat_aes_cbc", -+ .base.cra_priority = 4001, -+ .base.cra_flags = CRYPTO_ALG_ASYNC, -+ .base.cra_blocksize = AES_BLOCK_SIZE, -+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), -+ .base.cra_alignmask = 0, -+ .base.cra_module = THIS_MODULE, -+ -+ .init = qat_alg_skcipher_init_tfm, -+ .exit = qat_alg_skcipher_exit_tfm, -+ .setkey = qat_alg_skcipher_cbc_setkey, -+ .decrypt = qat_alg_skcipher_blk_decrypt, -+ .encrypt = qat_alg_skcipher_blk_encrypt, -+ .min_keysize = AES_MIN_KEY_SIZE, -+ .max_keysize = AES_MAX_KEY_SIZE, -+ .ivsize = AES_BLOCK_SIZE, - }, { -- .cra_name = "ctr(aes)", -- .cra_driver_name = "qat_aes_ctr", -- .cra_priority = 4001, -- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, -- .cra_blocksize = AES_BLOCK_SIZE, -- .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx), -- .cra_alignmask = 0, -- .cra_type = &crypto_ablkcipher_type, -- .cra_module = THIS_MODULE, -- .cra_init = qat_alg_ablkcipher_init, -- .cra_exit = qat_alg_ablkcipher_exit, -- .cra_u = { -- .ablkcipher = { -- .setkey = qat_alg_ablkcipher_ctr_setkey, -- .decrypt = qat_alg_ablkcipher_decrypt, -- .encrypt = qat_alg_ablkcipher_encrypt, -- .min_keysize = AES_MIN_KEY_SIZE, -- .max_keysize = AES_MAX_KEY_SIZE, -- .ivsize = AES_BLOCK_SIZE, -- }, -- }, -+ .base.cra_name = "ctr(aes)", -+ .base.cra_driver_name = "qat_aes_ctr", -+ .base.cra_priority = 4001, -+ .base.cra_flags = CRYPTO_ALG_ASYNC, -+ .base.cra_blocksize = 1, -+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), -+ .base.cra_alignmask = 0, -+ .base.cra_module = THIS_MODULE, -+ -+ .init = qat_alg_skcipher_init_tfm, -+ .exit = qat_alg_skcipher_exit_tfm, -+ .setkey = qat_alg_skcipher_ctr_setkey, -+ .decrypt = qat_alg_skcipher_decrypt, -+ .encrypt = qat_alg_skcipher_encrypt, -+ .min_keysize = AES_MIN_KEY_SIZE, -+ .max_keysize = AES_MAX_KEY_SIZE, -+ .ivsize = AES_BLOCK_SIZE, - }, { -- .cra_name = "xts(aes)", -- .cra_driver_name = "qat_aes_xts", -- .cra_priority = 4001, -- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, -- .cra_blocksize = AES_BLOCK_SIZE, -- .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx), -- .cra_alignmask = 0, -- .cra_type = &crypto_ablkcipher_type, -- .cra_module = THIS_MODULE, -- .cra_init = qat_alg_ablkcipher_init, -- .cra_exit = qat_alg_ablkcipher_exit, -- .cra_u = { -- .ablkcipher = { -- .setkey = qat_alg_ablkcipher_xts_setkey, -- .decrypt = qat_alg_ablkcipher_decrypt, -- .encrypt = qat_alg_ablkcipher_encrypt, -- .min_keysize = 2 * AES_MIN_KEY_SIZE, -- .max_keysize = 2 * AES_MAX_KEY_SIZE, -- .ivsize = AES_BLOCK_SIZE, -- }, -- }, -+ .base.cra_name = "xts(aes)", -+ .base.cra_driver_name = "qat_aes_xts", -+ .base.cra_priority = 4001, -+ .base.cra_flags = CRYPTO_ALG_ASYNC, -+ .base.cra_blocksize = AES_BLOCK_SIZE, -+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx), -+ .base.cra_alignmask = 0, -+ .base.cra_module = THIS_MODULE, -+ -+ .init = qat_alg_skcipher_init_tfm, -+ .exit = qat_alg_skcipher_exit_tfm, -+ .setkey = qat_alg_skcipher_xts_setkey, -+ .decrypt = qat_alg_skcipher_blk_decrypt, -+ .encrypt = qat_alg_skcipher_blk_encrypt, -+ .min_keysize = 2 * AES_MIN_KEY_SIZE, -+ .max_keysize = 2 * AES_MAX_KEY_SIZE, -+ .ivsize = AES_BLOCK_SIZE, - } }; - - int qat_algs_register(void) - { -- int ret = 0, i; -+ int ret = 0; - - mutex_lock(&algs_lock); - if (++active_devs != 1) - goto unlock; - -- for (i = 0; i < ARRAY_SIZE(qat_algs); i++) -- qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; -- -- ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); -+ ret = crypto_register_skciphers(qat_skciphers, -+ ARRAY_SIZE(qat_skciphers)); - if (ret) - goto unlock; - -- for (i = 0; i < ARRAY_SIZE(qat_aeads); i++) -- qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC; -- - ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads)); - if (ret) - goto unreg_algs; -@@ -1282,7 +1387,7 @@ unlock: - return ret; - - unreg_algs: -- crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); -+ crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers)); - goto unlock; - } - -@@ -1293,9 +1398,8 @@ void qat_algs_unregister(void) - goto unlock; - - crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads)); -- crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); -+ crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers)); - - unlock: - mutex_unlock(&algs_lock); - } --#endif -diff --git a/quickassist/qat/drivers/crypto/qat/qat_common/qat_crypto.h b/quickassist/qat/drivers/crypto/qat/qat_common/qat_crypto.h -index dc0273f..300bb91 100644 ---- a/quickassist/qat/drivers/crypto/qat/qat_common/qat_crypto.h -+++ b/quickassist/qat/drivers/crypto/qat/qat_common/qat_crypto.h -@@ -79,15 +79,17 @@ struct qat_crypto_request { - struct icp_qat_fw_la_bulk_req req; - union { - struct qat_alg_aead_ctx *aead_ctx; -- struct qat_alg_ablkcipher_ctx *ablkcipher_ctx; -+ struct qat_alg_skcipher_ctx *skcipher_ctx; - }; - union { - struct aead_request *aead_req; -- struct ablkcipher_request *ablkcipher_req; -+ struct skcipher_request *skcipher_req; - }; - struct qat_crypto_request_buffs buf; - void (*cb)(struct icp_qat_fw_la_resp *resp, - struct qat_crypto_request *req); -+ void *iv; -+ dma_addr_t iv_paddr; - }; - - #endif --- -2.24.1 - |