aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c5
-rw-r--r--drivers/crypto/caam/Kconfig1
-rw-r--r--drivers/crypto/caam/caamalg_qi.c81
-rw-r--r--drivers/crypto/ccp/ccp-ops.c2
-rw-r--r--drivers/crypto/ccree/cc_pm.c6
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c48
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c12
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c16
-rw-r--r--drivers/crypto/ixp4xx_crypto.c2
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c8
-rw-r--r--drivers/crypto/omap-sham.c3
-rw-r--r--drivers/crypto/picoxcell_crypto.c9
-rw-r--r--drivers/crypto/stm32/Kconfig1
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c15
14 files changed, 156 insertions, 53 deletions
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index b957061424a1..8f3d6d31da52 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -120,7 +120,10 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
/* Be sure all data is written before enabling the task */
wmb();
- v = 1 | (ce->chanlist[flow].tl->t_common_ctl & 0x7F) << 8;
+ /* Only H6 needs to write a part of t_common_ctl along with "1", but since it is ignored
+ * on older SoCs, we have no reason to complicate things.
+ */
+ v = 1 | ((le32_to_cpu(ce->chanlist[flow].tl->t_common_ctl) & 0x7F) << 8);
writel(v, ce->base + CE_TLR);
mutex_unlock(&ce->mlock);
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index bc35aa0ec07a..d7f2840cf0a9 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -114,6 +114,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
select CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
select CRYPTO_DES
+ select CRYPTO_XTS
help
Selecting this will use CAAM Queue Interface (QI) for sending
& receiving crypto jobs to/from CAAM. This gives better performance
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 315d53499ce8..829d41a1e5da 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -18,6 +18,8 @@
#include "qi.h"
#include "jr.h"
#include "caamalg_desc.h"
+#include <crypto/xts.h>
+#include <asm/unaligned.h>
/*
* crypto alg
@@ -67,6 +69,12 @@ struct caam_ctx {
struct device *qidev;
spinlock_t lock; /* Protects multiple init of driver context */
struct caam_drv_ctx *drv_ctx[NUM_OP];
+ bool xts_key_fallback;
+ struct crypto_skcipher *fallback;
+};
+
+struct caam_skcipher_req_ctx {
+ struct skcipher_request fallback_req;
};
static int aead_set_sh_desc(struct crypto_aead *aead)
@@ -726,12 +734,21 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
int ret = 0;
+ int err;
- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+ err = xts_verify_key(skcipher, key, keylen);
+ if (err) {
dev_dbg(jrdev, "key size mismatch\n");
- return -EINVAL;
+ return err;
}
+ if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
+ ctx->xts_key_fallback = true;
+
+ err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ if (err)
+ return err;
+
ctx->cdata.keylen = keylen;
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
@@ -1373,6 +1390,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
return edesc;
}
+static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
+}
+
static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
struct skcipher_edesc *edesc;
@@ -1383,6 +1408,22 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
if (!req->cryptlen)
return 0;
+ if (ctx->fallback && (xts_skcipher_ivsize(req) ||
+ ctx->xts_key_fallback)) {
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
+
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+
+ return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
+ crypto_skcipher_decrypt(&rctx->fallback_req);
+ }
+
if (unlikely(caam_congested))
return -EAGAIN;
@@ -1507,6 +1548,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.base = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-caam-qi",
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
},
.setkey = xts_skcipher_setkey,
@@ -2440,9 +2482,32 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
container_of(alg, typeof(*caam_alg), skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+ int ret = 0;
+
+ if (alg_aai == OP_ALG_AAI_XTS) {
+ const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
+ struct crypto_skcipher *fallback;
+
+ fallback = crypto_alloc_skcipher(tfm_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
+ tfm_name, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+
+ ctx->fallback = fallback;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
+ crypto_skcipher_reqsize(fallback));
+ }
+
+ ret = caam_init_common(ctx, &caam_alg->caam, false);
+ if (ret && ctx->fallback)
+ crypto_free_skcipher(ctx->fallback);
- return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
- false);
+ return ret;
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -2468,7 +2533,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- caam_exit_common(crypto_skcipher_ctx(tfm));
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (ctx->fallback)
+ crypto_free_skcipher(ctx->fallback);
+ caam_exit_common(ctx);
}
static void caam_aead_exit(struct crypto_aead *tfm)
@@ -2502,7 +2571,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = caam_cra_init;
alg->exit = caam_cra_exit;
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 64112c736810..7234b95241e9 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1746,7 +1746,7 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
break;
default:
ret = -EINVAL;
- goto e_ctx;
+ goto e_data;
}
} else {
/* Stash the context */
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index d39e1664fc7e..3c65bf070c90 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -65,8 +65,12 @@ const struct dev_pm_ops ccree_pm = {
int cc_pm_get(struct device *dev)
{
int rc = pm_runtime_get_sync(dev);
+ if (rc < 0) {
+ pm_runtime_put_noidle(dev);
+ return rc;
+ }
- return (rc == 1 ? 0 : rc);
+ return 0;
}
void cc_pm_put_suspend(struct device *dev)
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 54093115eb95..62fbc7df022b 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -92,11 +92,13 @@ static void chtls_sock_release(struct kref *ref)
static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
struct sock *sk)
{
+ struct adapter *adap = pci_get_drvdata(cdev->pdev);
struct net_device *ndev = cdev->ports[0];
#if IS_ENABLED(CONFIG_IPV6)
struct net_device *temp;
int addr_type;
#endif
+ int i;
switch (sk->sk_family) {
case PF_INET:
@@ -127,8 +129,12 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
return NULL;
if (is_vlan_dev(ndev))
- return vlan_dev_real_dev(ndev);
- return ndev;
+ ndev = vlan_dev_real_dev(ndev);
+
+ for_each_port(adap, i)
+ if (cdev->ports[i] == ndev)
+ return ndev;
+ return NULL;
}
static void assign_rxopt(struct sock *sk, unsigned int opt)
@@ -477,7 +483,6 @@ void chtls_destroy_sock(struct sock *sk)
chtls_purge_write_queue(sk);
free_tls_keyid(sk);
kref_put(&csk->kref, chtls_sock_release);
- csk->cdev = NULL;
if (sk->sk_family == AF_INET)
sk->sk_prot = &tcp_prot;
#if IS_ENABLED(CONFIG_IPV6)
@@ -736,14 +741,13 @@ void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == PF_INET6) {
- struct chtls_sock *csk;
+ struct net_device *ndev = chtls_find_netdev(cdev, sk);
int addr_type = 0;
- csk = rcu_dereference_sk_user_data(sk);
addr_type = ipv6_addr_type((const struct in6_addr *)
&sk->sk_v6_rcv_saddr);
if (addr_type != IPV6_ADDR_ANY)
- cxgb4_clip_release(csk->egress_dev, (const u32 *)
+ cxgb4_clip_release(ndev, (const u32 *)
&sk->sk_v6_rcv_saddr, 1);
}
#endif
@@ -768,14 +772,13 @@ static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
if (rpl->status != CPL_ERR_NONE) {
pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
rpl->status, stid);
- return CPL_RET_BUF_DONE;
+ } else {
+ cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+ sock_put(listen_ctx->lsk);
+ kfree(listen_ctx);
+ module_put(THIS_MODULE);
}
- cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
- sock_put(listen_ctx->lsk);
- kfree(listen_ctx);
- module_put(THIS_MODULE);
-
- return 0;
+ return CPL_RET_BUF_DONE;
}
static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
@@ -792,15 +795,13 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
if (rpl->status != CPL_ERR_NONE) {
pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
rpl->status, stid);
- return CPL_RET_BUF_DONE;
+ } else {
+ cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+ sock_put(listen_ctx->lsk);
+ kfree(listen_ctx);
+ module_put(THIS_MODULE);
}
-
- cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
- sock_put(listen_ctx->lsk);
- kfree(listen_ctx);
- module_put(THIS_MODULE);
-
- return 0;
+ return CPL_RET_BUF_DONE;
}
static void chtls_purge_wr_queue(struct sock *sk)
@@ -1156,6 +1157,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
ndev = n->dev;
if (!ndev)
goto free_dst;
+ if (is_vlan_dev(ndev))
+ ndev = vlan_dev_real_dev(ndev);
+
port_id = cxgb4_port_idx(ndev);
csk = chtls_sock_create(cdev);
@@ -1506,7 +1510,6 @@ static void add_to_reap_list(struct sock *sk)
struct chtls_sock *csk = sk->sk_user_data;
local_bh_disable();
- bh_lock_sock(sk);
release_tcp_port(sk); /* release the port immediately */
spin_lock(&reap_list_lock);
@@ -1515,7 +1518,6 @@ static void add_to_reap_list(struct sock *sk)
if (!csk->passive_reap_next)
schedule_work(&reap_task);
spin_unlock(&reap_list_lock);
- bh_unlock_sock(sk);
local_bh_enable();
}
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 2e9acae1cba3..188d871f6b8c 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -902,9 +902,9 @@ static int chtls_skb_copy_to_page_nocache(struct sock *sk,
return 0;
}
-static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
+static bool csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
{
- return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
+ return (cdev->max_host_sndbuf - sk->sk_wmem_queued > 0);
}
static int csk_wait_memory(struct chtls_dev *cdev,
@@ -1240,6 +1240,7 @@ int chtls_sendpage(struct sock *sk, struct page *page,
copied = 0;
csk = rcu_dereference_sk_user_data(sk);
cdev = csk->cdev;
+ lock_sock(sk);
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
err = sk_stream_wait_connect(sk, &timeo);
@@ -1584,6 +1585,7 @@ skip_copy:
tp->urg_data = 0;
if ((avail + offset) >= skb->len) {
+ struct sk_buff *next_skb;
if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
tp->copied_seq += skb->len;
hws->rcvpld = skb->hdr_len;
@@ -1594,8 +1596,10 @@ skip_copy:
chtls_free_skb(sk, skb);
buffers_freed++;
hws->copied_seq = 0;
- if (copied >= target &&
- !skb_peek(&sk->sk_receive_queue))
+ next_skb = skb_peek(&sk->sk_receive_queue);
+ if (copied >= target && !next_skb)
+ break;
+ if (ULP_SKB_CB(next_skb)->flags & ULPCB_FLAG_TLS_HDR)
break;
}
} while (len > 0);
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 64614a9bdf21..047826f18bd3 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -332,11 +332,14 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx,
ret = sec_alloc_pbuf_resource(dev, res);
if (ret) {
dev_err(dev, "fail to alloc pbuf dma resource!\n");
- goto alloc_fail;
+ goto alloc_pbuf_fail;
}
}
return 0;
+alloc_pbuf_fail:
+ if (ctx->alg_type == SEC_AEAD)
+ sec_free_mac_resource(dev, qp_ctx->res);
alloc_fail:
sec_free_civ_resource(dev, res);
@@ -447,8 +450,10 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
ctx->fake_req_limit = QM_Q_DEPTH >> 1;
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
GFP_KERNEL);
- if (!ctx->qp_ctx)
- return -ENOMEM;
+ if (!ctx->qp_ctx) {
+ ret = -ENOMEM;
+ goto err_destroy_qps;
+ }
for (i = 0; i < sec->ctx_q_num; i++) {
ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
@@ -457,12 +462,15 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
}
return 0;
+
err_sec_release_qp_ctx:
for (i = i - 1; i >= 0; i--)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
- sec_destroy_qps(ctx->qps, sec->ctx_q_num);
kfree(ctx->qp_ctx);
+err_destroy_qps:
+ sec_destroy_qps(ctx->qps, sec->ctx_q_num);
+
return ret;
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index ad73fc946682..3be6e0db0f9f 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -528,7 +528,7 @@ static void release_ixp_crypto(struct device *dev)
if (crypt_virt) {
dma_free_coherent(dev,
- NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
+ NPE_QLEN * sizeof(struct crypt_ctl),
crypt_virt, crypt_phys);
}
}
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index 7e3ad085b5bd..efce3a83b35a 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -442,7 +442,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp)
static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
{
struct mtk_ring **ring = cryp->ring;
- int i, err = ENOMEM;
+ int i;
for (i = 0; i < MTK_RING_MAX; i++) {
ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
@@ -469,14 +469,14 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
return 0;
err_cleanup:
- for (; i--; ) {
+ do {
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
ring[i]->res_base, ring[i]->res_dma);
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
ring[i]->cmd_base, ring[i]->cmd_dma);
kfree(ring[i]);
- }
- return err;
+ } while (i--);
+ return -ENOMEM;
}
static int mtk_crypto_probe(struct platform_device *pdev)
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 82691a057d2a..bc956dfb34de 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -456,6 +456,9 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
u32 val, mask;
+ if (likely(ctx->digcnt))
+ omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
+
/*
* Setting ALGO_CONST only for the first iteration and
* CLOSE_HASH only for the last one. Note that flags mode bits
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 7384e91c8b32..0d32b641a7f9 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1666,11 +1666,6 @@ static int spacc_probe(struct platform_device *pdev)
goto err_clk_put;
}
- ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
- if (ret)
- goto err_clk_disable;
-
-
/*
* Use an IRQ threshold of 50% as a default. This seems to be a
* reasonable trade off of latency against throughput but can be
@@ -1678,6 +1673,10 @@ static int spacc_probe(struct platform_device *pdev)
*/
engine->stat_irq_thresh = (engine->fifo_sz / 2);
+ ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+ if (ret)
+ goto err_clk_disable;
+
/*
* Configure the interrupts. We only use the STAT_CNT interrupt as we
* only submit a new packet for processing when we complete another in
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 4ef3eb11361c..4a4c3284ae1f 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -3,6 +3,7 @@ config CRYPTO_DEV_STM32_CRC
tristate "Support for STM32 crc accelerators"
depends on ARCH_STM32
select CRYPTO_HASH
+ select CRC32
help
This enables support for the CRC32 hw accelerator which can be found
on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 3ba41148c2a4..2c13f5214d2c 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -6,6 +6,7 @@
#include <linux/bitrev.h>
#include <linux/clk.h>
+#include <linux/crc32.h>
#include <linux/crc32poly.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -147,7 +148,6 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct stm32_crc *crc;
- unsigned long flags;
crc = stm32_crc_get_next_crc();
if (!crc)
@@ -155,7 +155,15 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
pm_runtime_get_sync(crc->dev);
- spin_lock_irqsave(&crc->lock, flags);
+ if (!spin_trylock(&crc->lock)) {
+ /* Hardware is busy, calculate crc32 by software */
+ if (mctx->poly == CRC32_POLY_LE)
+ ctx->partial = crc32_le(ctx->partial, d8, length);
+ else
+ ctx->partial = __crc32c_le(ctx->partial, d8, length);
+
+ goto pm_out;
+ }
/*
* Restore previously calculated CRC for this context as init value
@@ -195,8 +203,9 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
/* Store partial result */
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
- spin_unlock_irqrestore(&crc->lock, flags);
+ spin_unlock(&crc->lock);
+pm_out:
pm_runtime_mark_last_busy(crc->dev);
pm_runtime_put_autosuspend(crc->dev);