aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/qat/qat_common/qat_algs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/qat/qat_common/qat_algs.c')
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c211
1 files changed, 125 insertions, 86 deletions
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index e14d3dd291f0..72753b84dc95 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/crypto.h>
@@ -55,6 +11,7 @@
#include <crypto/hmac.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
+#include <crypto/xts.h>
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
#include "adf_transport.h"
@@ -78,15 +35,15 @@ static DEFINE_MUTEX(algs_lock);
static unsigned int active_devs;
struct qat_alg_buf {
- uint32_t len;
- uint32_t resrvd;
- uint64_t addr;
+ u32 len;
+ u32 resrvd;
+ u64 addr;
} __packed;
struct qat_alg_buf_list {
- uint64_t resrvd;
- uint32_t num_bufs;
- uint32_t num_mapped_bufs;
+ u64 resrvd;
+ u32 num_bufs;
+ u32 num_mapped_bufs;
struct qat_alg_buf bufers[];
} __packed __aligned(64);
@@ -131,7 +88,8 @@ struct qat_alg_skcipher_ctx {
struct icp_qat_fw_la_bulk_req enc_fw_req;
struct icp_qat_fw_la_bulk_req dec_fw_req;
struct qat_crypto_instance *inst;
- struct crypto_skcipher *tfm;
+ struct crypto_skcipher *ftfm;
+ bool fallback;
};
static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
@@ -151,7 +109,7 @@ static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
struct qat_alg_aead_ctx *ctx,
- const uint8_t *auth_key,
+ const u8 *auth_key,
unsigned int auth_keylen)
{
SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
@@ -467,7 +425,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
struct icp_qat_fw_la_bulk_req *req,
struct icp_qat_hw_cipher_algo_blk *cd,
- const uint8_t *key, unsigned int keylen)
+ const u8 *key, unsigned int keylen)
{
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
@@ -487,7 +445,7 @@ static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
}
static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
- int alg, const uint8_t *key,
+ int alg, const u8 *key,
unsigned int keylen, int mode)
{
struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
@@ -500,7 +458,7 @@ static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
}
static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
- int alg, const uint8_t *key,
+ int alg, const u8 *key,
unsigned int keylen, int mode)
{
struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
@@ -578,7 +536,7 @@ error:
}
static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
- const uint8_t *key,
+ const u8 *key,
unsigned int keylen,
int mode)
{
@@ -592,7 +550,7 @@ static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
return 0;
}
-static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key,
+static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -606,7 +564,7 @@ static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key,
ICP_QAT_HW_CIPHER_CBC_MODE);
}
-static int qat_alg_aead_newkey(struct crypto_aead *tfm, const uint8_t *key,
+static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -658,7 +616,7 @@ out_free_inst:
return ret;
}
-static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
+static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -820,7 +778,7 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
struct qat_crypto_instance *inst = ctx->inst;
struct aead_request *areq = qat_req->aead_req;
- uint8_t stat_filed = qat_resp->comn_resp.comn_status;
+ u8 stat_filed = qat_resp->comn_resp.comn_status;
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
qat_alg_free_bufl(inst, qat_req);
@@ -835,7 +793,7 @@ static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
struct qat_crypto_instance *inst = ctx->inst;
struct skcipher_request *sreq = qat_req->skcipher_req;
- uint8_t stat_filed = qat_resp->comn_resp.comn_status;
+ u8 stat_filed = qat_resp->comn_resp.comn_status;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
@@ -880,18 +838,18 @@ static int qat_alg_aead_dec(struct aead_request *areq)
qat_req->aead_ctx = ctx;
qat_req->aead_req = areq;
qat_req->cb = qat_aead_alg_callback;
- qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
cipher_param->cipher_length = areq->cryptlen - digst_size;
cipher_param->cipher_offset = areq->assoclen;
memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
- auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+ auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
do {
- ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+ ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
@@ -910,7 +868,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
struct icp_qat_fw_la_bulk_req *msg;
- uint8_t *iv = areq->iv;
+ u8 *iv = areq->iv;
int ret, ctr = 0;
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
@@ -922,11 +880,11 @@ static int qat_alg_aead_enc(struct aead_request *areq)
qat_req->aead_ctx = ctx;
qat_req->aead_req = areq;
qat_req->cb = qat_aead_alg_callback;
- qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
- auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+ auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
cipher_param->cipher_length = areq->cryptlen;
@@ -936,7 +894,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
auth_param->auth_len = areq->assoclen + areq->cryptlen;
do {
- ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+ ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
@@ -1038,6 +996,25 @@ static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ ret = xts_verify_key(tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ if (keylen >> 1 == AES_KEYSIZE_192) {
+ ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
+ if (ret)
+ return ret;
+
+ ctx->fallback = true;
+
+ return 0;
+ }
+
+ ctx->fallback = false;
+
return qat_alg_skcipher_setkey(tfm, key, keylen,
ICP_QAT_HW_CIPHER_XTS_MODE);
}
@@ -1073,7 +1050,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
qat_req->skcipher_ctx = ctx;
qat_req->skcipher_req = req;
qat_req->cb = qat_skcipher_alg_callback;
- qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
@@ -1082,7 +1059,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
do {
- ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+ ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
@@ -1102,6 +1079,24 @@ static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
return qat_alg_skcipher_encrypt(req);
}
+static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct skcipher_request *nreq = skcipher_request_ctx(req);
+
+ if (req->cryptlen < XTS_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (ctx->fallback) {
+ memcpy(nreq, req, sizeof(*req));
+ skcipher_request_set_tfm(nreq, ctx->ftfm);
+ return crypto_skcipher_encrypt(nreq);
+ }
+
+ return qat_alg_skcipher_encrypt(req);
+}
+
static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
@@ -1133,7 +1128,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
qat_req->skcipher_ctx = ctx;
qat_req->skcipher_req = req;
qat_req->cb = qat_skcipher_alg_callback;
- qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
@@ -1142,7 +1137,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
do {
- ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+ ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
@@ -1161,6 +1156,25 @@ static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
return qat_alg_skcipher_decrypt(req);
}
+
+static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct skcipher_request *nreq = skcipher_request_ctx(req);
+
+ if (req->cryptlen < XTS_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (ctx->fallback) {
+ memcpy(nreq, req, sizeof(*req));
+ skcipher_request_set_tfm(nreq, ctx->ftfm);
+ return crypto_skcipher_decrypt(nreq);
+ }
+
+ return qat_alg_skcipher_decrypt(req);
+}
+
static int qat_alg_aead_init(struct crypto_aead *tfm,
enum icp_qat_hw_auth_algo hash,
const char *hash_name)
@@ -1217,10 +1231,25 @@ static void qat_alg_aead_exit(struct crypto_aead *tfm)
static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
{
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
+ return 0;
+}
+
+static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
+{
struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int reqsize;
+
+ ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->ftfm))
+ return PTR_ERR(ctx->ftfm);
+
+ reqsize = max(sizeof(struct qat_crypto_request),
+ sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(ctx->ftfm));
+ crypto_skcipher_set_reqsize(tfm, reqsize);
- crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
- ctx->tfm = tfm;
return 0;
}
@@ -1251,13 +1280,22 @@ static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
qat_crypto_put_instance(inst);
}
+static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
+{
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (ctx->ftfm)
+ crypto_free_skcipher(ctx->ftfm);
+
+ qat_alg_skcipher_exit_tfm(tfm);
+}
static struct aead_alg qat_aeads[] = { {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha1",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
.cra_module = THIS_MODULE,
@@ -1274,7 +1312,7 @@ static struct aead_alg qat_aeads[] = { {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha256",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
.cra_module = THIS_MODULE,
@@ -1291,7 +1329,7 @@ static struct aead_alg qat_aeads[] = { {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha512",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
.cra_module = THIS_MODULE,
@@ -1309,7 +1347,7 @@ static struct skcipher_alg qat_skciphers[] = { {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "qat_aes_cbc",
.base.cra_priority = 4001,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
.base.cra_alignmask = 0,
@@ -1327,7 +1365,7 @@ static struct skcipher_alg qat_skciphers[] = { {
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "qat_aes_ctr",
.base.cra_priority = 4001,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
.base.cra_alignmask = 0,
@@ -1345,17 +1383,18 @@ static struct skcipher_alg qat_skciphers[] = { {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "qat_aes_xts",
.base.cra_priority = 4001,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
.base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
- .init = qat_alg_skcipher_init_tfm,
- .exit = qat_alg_skcipher_exit_tfm,
+ .init = qat_alg_skcipher_init_xts_tfm,
+ .exit = qat_alg_skcipher_exit_xts_tfm,
.setkey = qat_alg_skcipher_xts_setkey,
- .decrypt = qat_alg_skcipher_blk_decrypt,
- .encrypt = qat_alg_skcipher_blk_encrypt,
+ .decrypt = qat_alg_skcipher_xts_decrypt,
+ .encrypt = qat_alg_skcipher_xts_encrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,