aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c10
-rw-r--r--drivers/crypto/caam/caamalg.c3
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c3
-rw-r--r--drivers/crypto/caam/caampkc.c4
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c6
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c17
-rw-r--r--drivers/crypto/ccp/ccp-ops.c5
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c2
-rw-r--r--drivers/crypto/ccree/cc_driver.c11
-rw-r--r--drivers/crypto/ccree/cc_hash.c49
-rw-r--r--drivers/crypto/ccree/cc_hash.h2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c8
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c14
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.h2
-rw-r--r--drivers/crypto/img-hash.c8
-rw-r--r--drivers/crypto/inside-secure/safexcel.c52
-rw-r--r--drivers/crypto/inside-secure/safexcel.h3
-rw-r--r--drivers/crypto/marvell/cipher.c3
-rw-r--r--drivers/crypto/n2_core.c6
-rw-r--r--drivers/crypto/nx/Makefile2
-rw-r--r--drivers/crypto/nx/nx.h4
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c23
-rw-r--r--drivers/crypto/qcom-rng.c1
-rw-r--r--drivers/crypto/sahara.c127
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c6
-rw-r--r--drivers/crypto/stm32/stm32-hash.c9
-rw-r--r--drivers/crypto/virtio/Kconfig1
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c26
30 files changed, 228 insertions, 183 deletions
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 230e8902c727..b58f21dd3fed 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -521,7 +521,6 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
{
struct skcipher_request *req;
struct scatterlist *dst;
- dma_addr_t addr;
req = skcipher_request_cast(pd_uinfo->async_req);
@@ -530,8 +529,8 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
req->cryptlen, req->dst);
} else {
dst = pd_uinfo->dest_va;
- addr = dma_map_page(dev->core_dev->device, sg_page(dst),
- dst->offset, dst->length, DMA_FROM_DEVICE);
+ dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
+ DMA_FROM_DEVICE);
}
if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
@@ -556,10 +555,9 @@ static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
struct ahash_request *ahash_req;
ahash_req = ahash_request_cast(pd_uinfo->async_req);
- ctx = crypto_tfm_ctx(ahash_req->base.tfm);
+ ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req));
- crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
- crypto_tfm_ctx(ahash_req->base.tfm));
+ crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx);
crypto4xx_ret_sg_desc(dev, pd_uinfo);
if (pd_uinfo->state & PD_ENTRY_BUSY)
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index fdd994ee55e2..2db88213f13e 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -553,7 +553,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
return -EINVAL;
}
- ctx->cdata.key_virt = key;
+ memcpy(ctx->key, key, keylen);
+ ctx->cdata.key_virt = ctx->key;
ctx->cdata.keylen = keylen - saltlen;
return chachapoly_set_sh_desc(aead);
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 28692d068176..b8d277bfbd32 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -639,7 +639,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
return -EINVAL;
}
- ctx->cdata.key_virt = key;
+ memcpy(ctx->key, key, keylen);
+ ctx->cdata.key_virt = ctx->key;
ctx->cdata.keylen = keylen - saltlen;
return chachapoly_set_sh_desc(aead);
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 30e3f41ed872..e0bba20c13cb 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -225,7 +225,9 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
if (len && *buff)
break;
- sg_miter_next(&miter);
+ if (!sg_miter_next(&miter))
+ break;
+
buff = miter.addr;
len = miter.length;
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 781949027451..d9362199423f 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -254,6 +254,7 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
const struct firmware *fw_entry;
struct device *dev = &cpt->pdev->dev;
struct ucode_header *ucode;
+ unsigned int code_length;
struct microcode *mcode;
int j, ret = 0;
@@ -264,11 +265,12 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
ucode = (struct ucode_header *)fw_entry->data;
mcode = &cpt->mcode[cpt->next_mc_idx];
memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ);
- mcode->code_size = ntohl(ucode->code_length) * 2;
- if (!mcode->code_size) {
+ code_length = ntohl(ucode->code_length);
+ if (code_length == 0 || code_length >= INT_MAX / 2) {
ret = -EINVAL;
goto fw_release;
}
+ mcode->code_size = code_length * 2;
mcode->is_ae = is_ae;
mcode->core_mask = 0ULL;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index b3eea329f840..e416456b2b8a 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -642,11 +642,27 @@ static void ccp_dma_release(struct ccp_device *ccp)
for (i = 0; i < ccp->cmd_q_count; i++) {
chan = ccp->ccp_dma_chan + i;
dma_chan = &chan->dma_chan;
+
tasklet_kill(&chan->cleanup_tasklet);
list_del_rcu(&dma_chan->device_node);
}
}
+static void ccp_dma_release_channels(struct ccp_device *ccp)
+{
+ struct ccp_dma_chan *chan;
+ struct dma_chan *dma_chan;
+ unsigned int i;
+
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ chan = ccp->ccp_dma_chan + i;
+ dma_chan = &chan->dma_chan;
+
+ if (dma_chan->client_count)
+ dma_release_channel(dma_chan);
+ }
+}
+
int ccp_dmaengine_register(struct ccp_device *ccp)
{
struct ccp_dma_chan *chan;
@@ -767,6 +783,7 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
if (!dmaengine)
return;
+ ccp_dma_release_channels(ccp);
dma_async_device_unregister(dma_dev);
ccp_dma_release(ccp);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index e826c4b6b3af..4865eb047866 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -178,8 +178,11 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
wa->dma.address = dma_map_single(wa->dev, wa->address, len,
dir);
- if (dma_mapping_error(wa->dev, wa->dma.address))
+ if (dma_mapping_error(wa->dev, wa->dma.address)) {
+ kfree(wa->address);
+ wa->address = NULL;
return -ENOMEM;
+ }
wa->dma.length = len;
}
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index 566999738698..47077dd77f5d 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -59,7 +59,7 @@ void __init cc_debugfs_global_init(void)
cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
}
-void __exit cc_debugfs_global_fini(void)
+void cc_debugfs_global_fini(void)
{
debugfs_remove(cc_debugfs_dir);
}
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 8b8eee513c27..3d59fef1fbee 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -653,10 +653,17 @@ static struct platform_driver ccree_driver = {
static int __init ccree_init(void)
{
- cc_hash_global_init();
+ int rc;
+
cc_debugfs_global_init();
- return platform_driver_register(&ccree_driver);
+ rc = platform_driver_register(&ccree_driver);
+ if (rc) {
+ cc_debugfs_global_fini();
+ return rc;
+ }
+
+ return 0;
}
module_init(ccree_init);
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index bc71bdf44a9f..9f67df0a4921 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -39,12 +39,19 @@ static const u32 cc_sha256_init[] = {
SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
static const u32 cc_digest_len_sha512_init[] = {
0x00000080, 0x00000000, 0x00000000, 0x00000000 };
-static u64 cc_sha384_init[] = {
- SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
- SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
-static u64 cc_sha512_init[] = {
- SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
- SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
+
+/*
+ * Due to the way the HW works, every double word in the SHA384 and SHA512
+ * larval hashes must be stored in hi/lo order
+ */
+#define hilo(x) upper_32_bits(x), lower_32_bits(x)
+static const u32 cc_sha384_init[] = {
+ hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4),
+ hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) };
+static const u32 cc_sha512_init[] = {
+ hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4),
+ hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) };
+
static const u32 cc_sm3_init[] = {
SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
@@ -1948,8 +1955,8 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata)
}
if (large_sha_supported) {
- cc_set_sram_desc((u32 *)cc_sha384_init, sram_buff_ofs,
- (ARRAY_SIZE(cc_sha384_init) * 2), larval_seq,
+ cc_set_sram_desc(cc_sha384_init, sram_buff_ofs,
+ ARRAY_SIZE(cc_sha384_init), larval_seq,
&larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (rc)
@@ -1957,8 +1964,8 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata)
sram_buff_ofs += sizeof(cc_sha384_init);
larval_seq_len = 0;
- cc_set_sram_desc((u32 *)cc_sha512_init, sram_buff_ofs,
- (ARRAY_SIZE(cc_sha512_init) * 2), larval_seq,
+ cc_set_sram_desc(cc_sha512_init, sram_buff_ofs,
+ ARRAY_SIZE(cc_sha512_init), larval_seq,
&larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (rc)
@@ -1969,28 +1976,6 @@ init_digest_const_err:
return rc;
}
-static void __init cc_swap_dwords(u32 *buf, unsigned long size)
-{
- int i;
- u32 tmp;
-
- for (i = 0; i < size; i += 2) {
- tmp = buf[i];
- buf[i] = buf[i + 1];
- buf[i + 1] = tmp;
- }
-}
-
-/*
- * Due to the way the HW works we need to swap every
- * double word in the SHA384 and SHA512 larval hashes
- */
-void __init cc_hash_global_init(void)
-{
- cc_swap_dwords((u32 *)&cc_sha384_init, (ARRAY_SIZE(cc_sha384_init) * 2));
- cc_swap_dwords((u32 *)&cc_sha512_init, (ARRAY_SIZE(cc_sha512_init) * 2));
-}
-
int cc_hash_alloc(struct cc_drvdata *drvdata)
{
struct cc_hash_handle *hash_handle;
diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h
index 0d6dc61484d7..3dbd0abefea0 100644
--- a/drivers/crypto/ccree/cc_hash.h
+++ b/drivers/crypto/ccree/cc_hash.h
@@ -104,6 +104,4 @@ cc_digest_len_addr(void *drvdata, u32 mode);
*/
cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode);
-void cc_hash_global_init(void);
-
#endif /*__CC_HASH_H__*/
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 82b76df43ae5..40054731f800 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -1103,8 +1103,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
csk->sndbuf = newsk->sk_sndbuf;
csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
- sock_net(newsk)->
- ipv4.sysctl_tcp_window_scaling,
+ READ_ONCE(sock_net(newsk)->
+ ipv4.sysctl_tcp_window_scaling),
tp->window_clamp);
neigh_release(n);
inet_inherit_port(&tcp_hashinfo, lsk, newsk);
@@ -1235,7 +1235,7 @@ static void chtls_pass_accept_request(struct sock *sk,
chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
ip_dsfield = ipv4_get_dsfield(iph);
if (req->tcpopt.wsf <= 14 &&
- sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
inet_rsk(oreq)->wscale_ok = 1;
inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
}
@@ -2092,7 +2092,7 @@ static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
if (tp->snd_una != snd_una) {
tp->snd_una = snd_una;
- tp->rcv_tstamp = tcp_time_stamp(tp);
+ tp->rcv_tstamp = tcp_jiffies32;
if (tp->snd_una == tp->snd_nxt &&
!csk_flag_nochk(csk, CSK_TX_FAILOVER))
csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index 4ad4ffd90cee..2402941a7f2f 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -449,7 +449,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
*/
}
- mutex_lock(&ctx->queue->queuelock);
+ spin_lock_bh(&ctx->queue->queuelock);
/* Put the IV in place for chained cases */
switch (ctx->cipher_alg) {
case SEC_C_AES_CBC_128:
@@ -509,7 +509,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
list_del(&backlog_req->backlog_head);
}
}
- mutex_unlock(&ctx->queue->queuelock);
+ spin_unlock_bh(&ctx->queue->queuelock);
mutex_lock(&sec_req->lock);
list_del(&sec_req_el->head);
@@ -798,7 +798,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
*/
/* Grab a big lock for a long time to avoid concurrency issues */
- mutex_lock(&queue->queuelock);
+ spin_lock_bh(&queue->queuelock);
/*
* Can go on to queue if we have space in either:
@@ -814,15 +814,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
ret = -EBUSY;
if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
list_add_tail(&sec_req->backlog_head, &ctx->backlog);
- mutex_unlock(&queue->queuelock);
+ spin_unlock_bh(&queue->queuelock);
goto out;
}
- mutex_unlock(&queue->queuelock);
+ spin_unlock_bh(&queue->queuelock);
goto err_free_elements;
}
ret = sec_send_request(sec_req, queue);
- mutex_unlock(&queue->queuelock);
+ spin_unlock_bh(&queue->queuelock);
if (ret)
goto err_free_elements;
@@ -881,7 +881,7 @@ static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
if (IS_ERR(ctx->queue))
return PTR_ERR(ctx->queue);
- mutex_init(&ctx->queue->queuelock);
+ spin_lock_init(&ctx->queue->queuelock);
ctx->queue->havesoftqueue = false;
return 0;
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h
index 4d9063a8b10b..0bf4d7c3856c 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.h
+++ b/drivers/crypto/hisilicon/sec/sec_drv.h
@@ -347,7 +347,7 @@ struct sec_queue {
DECLARE_BITMAP(unprocessed, SEC_QUEUE_LEN);
DECLARE_KFIFO_PTR(softqueue, typeof(struct sec_request_el *));
bool havesoftqueue;
- struct mutex queuelock;
+ spinlock_t queuelock;
void *shadow[SEC_QUEUE_LEN];
};
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index fe4cc8babe1c..17cc44f14e5c 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -356,12 +356,16 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
static void img_hash_dma_task(unsigned long d)
{
struct img_hash_dev *hdev = (struct img_hash_dev *)d;
- struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+ struct img_hash_request_ctx *ctx;
u8 *addr;
size_t nbytes, bleft, wsend, len, tbc;
struct scatterlist tsg;
- if (!hdev->req || !ctx->sg)
+ if (!hdev->req)
+ return;
+
+ ctx = ahash_request_ctx(hdev->req);
+ if (!ctx->sg)
return;
addr = sg_virt(ctx->sg);
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 4d9d97c59ee3..04638e183351 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1090,11 +1090,12 @@ static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
static int safexcel_request_ring_irq(void *pdev, int irqid,
int is_pci_dev,
+ int ring_id,
irq_handler_t handler,
irq_handler_t threaded_handler,
struct safexcel_ring_irq_data *ring_irq_priv)
{
- int ret, irq;
+ int ret, irq, cpu;
struct device *dev;
if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
@@ -1132,6 +1133,10 @@ static int safexcel_request_ring_irq(void *pdev, int irqid,
return ret;
}
+ /* Set affinity */
+ cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE);
+ irq_set_affinity_hint(irq, get_cpu_mask(cpu));
+
return irq;
}
@@ -1462,19 +1467,23 @@ static int safexcel_probe_generic(void *pdev,
&priv->ring[i].rdr);
if (ret) {
dev_err(dev, "Failed to initialize rings\n");
- return ret;
+ goto err_cleanup_rings;
}
priv->ring[i].rdr_req = devm_kcalloc(dev,
EIP197_DEFAULT_RING_SIZE,
sizeof(*priv->ring[i].rdr_req),
GFP_KERNEL);
- if (!priv->ring[i].rdr_req)
- return -ENOMEM;
+ if (!priv->ring[i].rdr_req) {
+ ret = -ENOMEM;
+ goto err_cleanup_rings;
+ }
ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
- if (!ring_irq)
- return -ENOMEM;
+ if (!ring_irq) {
+ ret = -ENOMEM;
+ goto err_cleanup_rings;
+ }
ring_irq->priv = priv;
ring_irq->ring = i;
@@ -1482,14 +1491,17 @@ static int safexcel_probe_generic(void *pdev,
irq = safexcel_request_ring_irq(pdev,
EIP197_IRQ_NUMBER(i, is_pci_dev),
is_pci_dev,
+ i,
safexcel_irq_ring,
safexcel_irq_ring_thread,
ring_irq);
if (irq < 0) {
dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
- return irq;
+ ret = irq;
+ goto err_cleanup_rings;
}
+ priv->ring[i].irq = irq;
priv->ring[i].work_data.priv = priv;
priv->ring[i].work_data.ring = i;
INIT_WORK(&priv->ring[i].work_data.work,
@@ -1498,8 +1510,10 @@ static int safexcel_probe_generic(void *pdev,
snprintf(wq_name, 9, "wq_ring%d", i);
priv->ring[i].workqueue =
create_singlethread_workqueue(wq_name);
- if (!priv->ring[i].workqueue)
- return -ENOMEM;
+ if (!priv->ring[i].workqueue) {
+ ret = -ENOMEM;
+ goto err_cleanup_rings;
+ }
priv->ring[i].requests = 0;
priv->ring[i].busy = false;
@@ -1516,16 +1530,26 @@ static int safexcel_probe_generic(void *pdev,
ret = safexcel_hw_init(priv);
if (ret) {
dev_err(dev, "HW init failed (%d)\n", ret);
- return ret;
+ goto err_cleanup_rings;
}
ret = safexcel_register_algorithms(priv);
if (ret) {
dev_err(dev, "Failed to register algorithms (%d)\n", ret);
- return ret;
+ goto err_cleanup_rings;
}
return 0;
+
+err_cleanup_rings:
+ for (i = 0; i < priv->config.rings; i++) {
+ if (priv->ring[i].irq)
+ irq_set_affinity_hint(priv->ring[i].irq, NULL);
+ if (priv->ring[i].workqueue)
+ destroy_workqueue(priv->ring[i].workqueue);
+ }
+
+ return ret;
}
static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
@@ -1627,8 +1651,10 @@ static int safexcel_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
- for (i = 0; i < priv->config.rings; i++)
+ for (i = 0; i < priv->config.rings; i++) {
+ irq_set_affinity_hint(priv->ring[i].irq, NULL);
destroy_workqueue(priv->ring[i].workqueue);
+ }
return 0;
}
@@ -1658,6 +1684,8 @@ static const struct of_device_id safexcel_of_match_table[] = {
{},
};
+MODULE_DEVICE_TABLE(of, safexcel_of_match_table);
+
static struct platform_driver crypto_safexcel = {
.probe = safexcel_probe,
.remove = safexcel_remove,
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 930cc48a6f85..6a4d7f09bca9 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -640,6 +640,9 @@ struct safexcel_ring {
*/
struct crypto_async_request *req;
struct crypto_async_request *backlog;
+
+ /* irq of this ring */
+ int irq;
};
/* EIP integration context flags */
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 84ceddfee76b..c7d433d1cd99 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -287,7 +287,7 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int len)
{
- struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct mv_cesa_des3_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
err = verify_skcipher_des3_key(cipher, key);
@@ -610,7 +610,6 @@ struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
.decrypt = mv_cesa_ecb_des3_ede_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "mv-ecb-des3-ede",
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index dc15b06e96ab..80127344d82b 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1278,6 +1278,7 @@ struct n2_hash_tmpl {
const u32 *hash_init;
u8 hw_op_hashsz;
u8 digest_size;
+ u8 statesize;
u8 block_size;
u8 auth_type;
u8 hmac_type;
@@ -1309,6 +1310,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.hmac_type = AUTH_TYPE_HMAC_MD5,
.hw_op_hashsz = MD5_DIGEST_SIZE,
.digest_size = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct md5_state),
.block_size = MD5_HMAC_BLOCK_SIZE },
{ .name = "sha1",
.hash_zero = sha1_zero_message_hash,
@@ -1317,6 +1319,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.hmac_type = AUTH_TYPE_HMAC_SHA1,
.hw_op_hashsz = SHA1_DIGEST_SIZE,
.digest_size = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
.block_size = SHA1_BLOCK_SIZE },
{ .name = "sha256",
.hash_zero = sha256_zero_message_hash,
@@ -1325,6 +1328,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.hmac_type = AUTH_TYPE_HMAC_SHA256,
.hw_op_hashsz = SHA256_DIGEST_SIZE,
.digest_size = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
.block_size = SHA256_BLOCK_SIZE },
{ .name = "sha224",
.hash_zero = sha224_zero_message_hash,
@@ -1333,6 +1337,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.hmac_type = AUTH_TYPE_RESERVED,
.hw_op_hashsz = SHA256_DIGEST_SIZE,
.digest_size = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
.block_size = SHA224_BLOCK_SIZE },
};
#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
@@ -1474,6 +1479,7 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
halg = &ahash->halg;
halg->digestsize = tmpl->digest_size;
+ halg->statesize = tmpl->statesize;
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
index 015155da59c2..76139865d7fa 100644
--- a/drivers/crypto/nx/Makefile
+++ b/drivers/crypto/nx/Makefile
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_NX_ENCRYPT) += nx-crypto.o
nx-crypto-objs := nx.o \
- nx_debugfs.o \
nx-aes-cbc.o \
nx-aes-ecb.o \
nx-aes-gcm.o \
@@ -11,6 +10,7 @@ nx-crypto-objs := nx.o \
nx-sha256.o \
nx-sha512.o
+nx-crypto-$(CONFIG_DEBUG_FS) += nx_debugfs.o
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
nx-compress-objs := nx-842.o
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index 7ecca168f8c4..5c77aba450cf 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -169,8 +169,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
void nx_debugfs_init(struct nx_crypto_driver *);
void nx_debugfs_fini(struct nx_crypto_driver *);
#else
-#define NX_DEBUGFS_INIT(drv) (0)
-#define NX_DEBUGFS_FINI(drv) (0)
+#define NX_DEBUGFS_INIT(drv) do {} while (0)
+#define NX_DEBUGFS_FINI(drv) do {} while (0)
#endif
#define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL)
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index f8a146554b1f..dbab9e38223e 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2141,7 +2141,7 @@ static int omap_sham_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_irq_safe(dev);
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get sync: %d\n", err);
goto err_pm;
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index f5e960d23a7a..a2989f0188ca 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -139,18 +139,28 @@ static void adf_device_reset_worker(struct work_struct *work)
if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
/* The device hanged and we can't restart it so stop here */
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
- kfree(reset_data);
+ if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
+ completion_done(&reset_data->compl))
+ kfree(reset_data);
WARN(1, "QAT: device restart failed. Device is unusable\n");
return;
}
adf_dev_restarted_notify(accel_dev);
clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
- /* The dev is back alive. Notify the caller if in sync mode */
- if (reset_data->mode == ADF_DEV_RESET_SYNC)
- complete(&reset_data->compl);
- else
+ /*
+ * The dev is back alive. Notify the caller if in sync mode
+ *
+ * If device restart will take a more time than expected,
+ * the schedule_reset() function can timeout and exit. This can be
+ * detected by calling the completion_done() function. In this case
+ * the reset_data structure needs to be freed here.
+ */
+ if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
+ completion_done(&reset_data->compl))
kfree(reset_data);
+ else
+ complete(&reset_data->compl);
}
static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
@@ -183,8 +193,9 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
dev_err(&GET_DEV(accel_dev),
"Reset device timeout expired\n");
ret = -EFAULT;
+ } else {
+ kfree(reset_data);
}
- kfree(reset_data);
return ret;
}
return 0;
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 3a633a0c40fd..6cc4fd005fe0 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -64,6 +64,7 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
} else {
/* copy only remaining bytes */
memcpy(data, &val, max - currsize);
+ break;
}
} while (currsize < max);
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 8ac8ec6decd5..19186617eafc 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -43,7 +43,6 @@
#define FLAGS_MODE_MASK 0x000f
#define FLAGS_ENCRYPT BIT(0)
#define FLAGS_CBC BIT(1)
-#define FLAGS_NEW_KEY BIT(3)
#define SAHARA_HDR_BASE 0x00800000
#define SAHARA_HDR_SKHA_ALG_AES 0
@@ -141,8 +140,6 @@ struct sahara_hw_link {
};
struct sahara_ctx {
- unsigned long flags;
-
/* AES-specific context */
int keylen;
u8 key[AES_KEYSIZE_128];
@@ -445,27 +442,24 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
int ret;
int i, j;
int idx = 0;
+ u32 len;
- /* Copy new key if necessary */
- if (ctx->flags & FLAGS_NEW_KEY) {
- memcpy(dev->key_base, ctx->key, ctx->keylen);
- ctx->flags &= ~FLAGS_NEW_KEY;
+ memcpy(dev->key_base, ctx->key, ctx->keylen);
- if (dev->flags & FLAGS_CBC) {
- dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
- dev->hw_desc[idx]->p1 = dev->iv_phys_base;
- } else {
- dev->hw_desc[idx]->len1 = 0;
- dev->hw_desc[idx]->p1 = 0;
- }
- dev->hw_desc[idx]->len2 = ctx->keylen;
- dev->hw_desc[idx]->p2 = dev->key_phys_base;
- dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
+ if (dev->flags & FLAGS_CBC) {
+ dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
+ dev->hw_desc[idx]->p1 = dev->iv_phys_base;
+ } else {
+ dev->hw_desc[idx]->len1 = 0;
+ dev->hw_desc[idx]->p1 = 0;
+ }
+ dev->hw_desc[idx]->len2 = ctx->keylen;
+ dev->hw_desc[idx]->p2 = dev->key_phys_base;
+ dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
+ dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
- dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
+ idx++;
- idx++;
- }
dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
if (dev->nb_in_sg < 0) {
@@ -487,24 +481,27 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
DMA_TO_DEVICE);
if (ret != dev->nb_in_sg) {
dev_err(dev->device, "couldn't map in sg\n");
- goto unmap_in;
+ return -EINVAL;
}
+
ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE);
if (ret != dev->nb_out_sg) {
dev_err(dev->device, "couldn't map out sg\n");
- goto unmap_out;
+ goto unmap_in;
}
/* Create input links */
dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
sg = dev->in_sg;
+ len = dev->total;
for (i = 0; i < dev->nb_in_sg; i++) {
- dev->hw_link[i]->len = sg->length;
+ dev->hw_link[i]->len = min(len, sg->length);
dev->hw_link[i]->p = sg->dma_address;
if (i == (dev->nb_in_sg - 1)) {
dev->hw_link[i]->next = 0;
} else {
+ len -= min(len, sg->length);
dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
sg = sg_next(sg);
}
@@ -513,12 +510,14 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
/* Create output links */
dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
sg = dev->out_sg;
+ len = dev->total;
for (j = i; j < dev->nb_out_sg + i; j++) {
- dev->hw_link[j]->len = sg->length;
+ dev->hw_link[j]->len = min(len, sg->length);
dev->hw_link[j]->p = sg->dma_address;
if (j == (dev->nb_out_sg + i - 1)) {
dev->hw_link[j]->next = 0;
} else {
+ len -= min(len, sg->length);
dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
sg = sg_next(sg);
}
@@ -537,9 +536,6 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
return 0;
-unmap_out:
- dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
- DMA_FROM_DEVICE);
unmap_in:
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
@@ -584,16 +580,17 @@ static int sahara_aes_process(struct ablkcipher_request *req)
timeout = wait_for_completion_timeout(&dev->dma_completion,
msecs_to_jiffies(SAHARA_TIMEOUT_MS));
- if (!timeout) {
- dev_err(dev->device, "AES timeout\n");
- return -ETIMEDOUT;
- }
dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE);
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
+ if (!timeout) {
+ dev_err(dev->device, "AES timeout\n");
+ return -ETIMEDOUT;
+ }
+
return 0;
}
@@ -608,7 +605,6 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
/* SAHARA only supports 128bit keys */
if (keylen == AES_KEYSIZE_128) {
memcpy(ctx->key, key, keylen);
- ctx->flags |= FLAGS_NEW_KEY;
return 0;
}
@@ -796,6 +792,7 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
int start)
{
struct scatterlist *sg;
+ unsigned int len;
unsigned int i;
int ret;
@@ -817,12 +814,14 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
if (!ret)
return -EFAULT;
+ len = rctx->total;
for (i = start; i < dev->nb_in_sg + start; i++) {
- dev->hw_link[i]->len = sg->length;
+ dev->hw_link[i]->len = min(len, sg->length);
dev->hw_link[i]->p = sg->dma_address;
if (i == (dev->nb_in_sg + start - 1)) {
dev->hw_link[i]->next = 0;
} else {
+ len -= min(len, sg->length);
dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
sg = sg_next(sg);
}
@@ -903,24 +902,6 @@ static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
return 0;
}
-static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
-{
- if (!sg || !sg->length)
- return nbytes;
-
- while (nbytes && sg) {
- if (nbytes <= sg->length) {
- sg->length = nbytes;
- sg_mark_end(sg);
- break;
- }
- nbytes -= sg->length;
- sg = sg_next(sg);
- }
-
- return nbytes;
-}
-
static int sahara_sha_prepare_request(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -957,36 +938,20 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
hash_later, 0);
}
- /* nbytes should now be multiple of blocksize */
- req->nbytes = req->nbytes - hash_later;
-
- sahara_walk_and_recalc(req->src, req->nbytes);
-
+ rctx->total = len - hash_later;
/* have data from previous operation and current */
if (rctx->buf_cnt && req->nbytes) {
sg_init_table(rctx->in_sg_chain, 2);
sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
-
sg_chain(rctx->in_sg_chain, 2, req->src);
-
- rctx->total = req->nbytes + rctx->buf_cnt;
rctx->in_sg = rctx->in_sg_chain;
-
- req->src = rctx->in_sg_chain;
/* only data from previous operation */
} else if (rctx->buf_cnt) {
- if (req->src)
- rctx->in_sg = req->src;
- else
- rctx->in_sg = rctx->in_sg_chain;
- /* buf was copied into rembuf above */
+ rctx->in_sg = rctx->in_sg_chain;
sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
- rctx->total = rctx->buf_cnt;
/* no data from previous operation */
} else {
rctx->in_sg = req->src;
- rctx->total = req->nbytes;
- req->src = rctx->in_sg;
}
/* on next call, we only have the remaining data in the buffer */
@@ -1007,7 +972,10 @@ static int sahara_sha_process(struct ahash_request *req)
return ret;
if (rctx->first) {
- sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
+ ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
+ if (ret)
+ return ret;
+
dev->hw_desc[0]->next = 0;
rctx->first = 0;
} else {
@@ -1015,7 +983,10 @@ static int sahara_sha_process(struct ahash_request *req)
sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
dev->hw_desc[0]->next = dev->hw_phys_desc[1];
- sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
+ ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
+ if (ret)
+ return ret;
+
dev->hw_desc[1]->next = 0;
}
@@ -1028,18 +999,19 @@ static int sahara_sha_process(struct ahash_request *req)
timeout = wait_for_completion_timeout(&dev->dma_completion,
msecs_to_jiffies(SAHARA_TIMEOUT_MS));
- if (!timeout) {
- dev_err(dev->device, "SHA timeout\n");
- return -ETIMEDOUT;
- }
if (rctx->sg_in_idx)
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
+ if (!timeout) {
+ dev_err(dev->device, "SHA timeout\n");
+ return -ETIMEDOUT;
+ }
+
memcpy(rctx->context, dev->context_base, rctx->context_size);
- if (req->result)
+ if (req->result && rctx->last)
memcpy(req->result, rctx->context, rctx->digest_size);
return 0;
@@ -1183,8 +1155,7 @@ static int sahara_sha_import(struct ahash_request *req, const void *in)
static int sahara_sha_cra_init(struct crypto_tfm *tfm)
{
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct sahara_sha_reqctx) +
- SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
+ sizeof(struct sahara_sha_reqctx));
return 0;
}
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index fb640e0ea614..b66e06818afc 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -98,7 +98,7 @@ static struct stm32_crc *stm32_crc_get_next_crc(void)
struct stm32_crc *crc;
spin_lock_bh(&crc_list.lock);
- crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list);
+ crc = list_first_entry_or_null(&crc_list.dev_list, struct stm32_crc, list);
if (crc)
list_move_tail(&crc->list, &crc_list.dev_list);
spin_unlock_bh(&crc_list.lock);
@@ -332,8 +332,10 @@ static int stm32_crc_remove(struct platform_device *pdev)
struct stm32_crc *crc = platform_get_drvdata(pdev);
int ret = pm_runtime_get_sync(crc->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(crc->dev);
return ret;
+ }
spin_lock(&crc_list.lock);
list_del(&crc->list);
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index dcce15b55809..393d0cae10f1 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -562,9 +562,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
}
for_each_sg(rctx->sg, tsg, rctx->nents, i) {
+ sg[0] = *tsg;
len = sg->length;
- sg[0] = *tsg;
if (sg_is_last(sg)) {
if (hdev->dma_mode == 1) {
len = (ALIGN(sg->length, 16) - 16);
@@ -1553,9 +1553,7 @@ static int stm32_hash_remove(struct platform_device *pdev)
if (!hdev)
return -ENODEV;
- ret = pm_runtime_resume_and_get(hdev->dev);
- if (ret < 0)
- return ret;
+ ret = pm_runtime_get_sync(hdev->dev);
stm32_hash_unregister_algs(hdev);
@@ -1571,7 +1569,8 @@ static int stm32_hash_remove(struct platform_device *pdev)
pm_runtime_disable(hdev->dev);
pm_runtime_put_noidle(hdev->dev);
- clk_disable_unprepare(hdev->clk);
+ if (ret >= 0)
+ clk_disable_unprepare(hdev->clk);
return 0;
}
diff --git a/drivers/crypto/virtio/Kconfig b/drivers/crypto/virtio/Kconfig
index 01b625e4e5ad..6d3deb025b2a 100644
--- a/drivers/crypto/virtio/Kconfig
+++ b/drivers/crypto/virtio/Kconfig
@@ -5,7 +5,6 @@ config CRYPTO_DEV_VIRTIO
select CRYPTO_AEAD
select CRYPTO_BLKCIPHER
select CRYPTO_ENGINE
- default m
help
This driver provides support for virtio crypto device. If you
choose 'M' here, this module will be called virtio_crypto.
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 1c6e00da5a29..947a6e01d93f 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -10,6 +10,7 @@
#include <linux/virtio.h>
#include <linux/crypto.h>
#include <linux/spinlock.h>
+#include <linux/interrupt.h>
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/engine.h>
@@ -27,6 +28,7 @@ struct data_queue {
char name[32];
struct crypto_engine *engine;
+ struct tasklet_struct done_task;
};
struct virtio_crypto {
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index c8a962c62663..469da86c4084 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -22,27 +22,28 @@ virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
}
}
-static void virtcrypto_dataq_callback(struct virtqueue *vq)
+static void virtcrypto_done_task(unsigned long data)
{
- struct virtio_crypto *vcrypto = vq->vdev->priv;
+ struct data_queue *data_vq = (struct data_queue *)data;
+ struct virtqueue *vq = data_vq->vq;
struct virtio_crypto_request *vc_req;
- unsigned long flags;
unsigned int len;
- unsigned int qid = vq->index;
- spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
do {
virtqueue_disable_cb(vq);
while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
- spin_unlock_irqrestore(
- &vcrypto->data_vq[qid].lock, flags);
if (vc_req->alg_cb)
vc_req->alg_cb(vc_req, len);
- spin_lock_irqsave(
- &vcrypto->data_vq[qid].lock, flags);
}
} while (!virtqueue_enable_cb(vq));
- spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
+}
+
+static void virtcrypto_dataq_callback(struct virtqueue *vq)
+{
+ struct virtio_crypto *vcrypto = vq->vdev->priv;
+ struct data_queue *dq = &vcrypto->data_vq[vq->index];
+
+ tasklet_schedule(&dq->done_task);
}
static int virtcrypto_find_vqs(struct virtio_crypto *vi)
@@ -99,6 +100,8 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
ret = -ENOMEM;
goto err_engine;
}
+ tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task,
+ (unsigned long)&vi->data_vq[i]);
}
kfree(names);
@@ -431,11 +434,14 @@ static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
static void virtcrypto_remove(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
+ int i;
dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto);
+ for (i = 0; i < vcrypto->max_data_queues; i++)
+ tasklet_kill(&vcrypto->data_vq[i].done_task);
vdev->config->reset(vdev);
virtcrypto_free_unused_reqs(vcrypto);
virtcrypto_clear_crypto_engines(vcrypto);