aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/ahash.c18
-rw-r--r--crypto/algapi.c12
-rw-r--r--crypto/async_tx/async_pq.c5
-rw-r--r--crypto/cryptd.c3
-rw-r--r--crypto/mcryptd.c23
5 files changed, 41 insertions, 20 deletions
diff --git a/crypto/ahash.c b/crypto/ahash.c
index f9caf0f74199..6978ad86e516 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -91,13 +91,14 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
if (nbytes && walk->offset & alignmask && !err) {
walk->offset = ALIGN(walk->offset, alignmask + 1);
- walk->data += walk->offset;
-
nbytes = min(nbytes,
((unsigned int)(PAGE_SIZE)) - walk->offset);
walk->entrylen -= nbytes;
- return nbytes;
+ if (nbytes) {
+ walk->data += walk->offset;
+ return nbytes;
+ }
}
if (walk->flags & CRYPTO_ALG_ASYNC)
@@ -637,5 +638,16 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(ahash_attr_alg);
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
+{
+ struct crypto_alg *alg = &halg->base;
+
+ if (alg->cra_type != &crypto_ahash_type)
+ return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
+
+ return __crypto_ahash_alg(alg)->setkey != NULL;
+}
+EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 1ac76fdd249d..6c0847ba2889 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -160,6 +160,18 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
spawn->alg = NULL;
spawns = &inst->alg.cra_users;
+
+ /*
+ * We may encounter an unregistered instance here, since
+ * an instance's spawns are set up prior to the instance
+ * being registered. An unregistered instance will have
+ * NULL ->cra_users.next, since ->cra_users isn't
+ * properly initialized until registration. But an
+ * unregistered instance cannot have any users, so treat
+ * it the same as ->cra_users being empty.
+ */
+ if (spawns->next == NULL)
+ break;
}
} while ((spawns = crypto_more_spawns(alg, &stack, &top,
&secondary_spawns)));
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 5d355e0c2633..f3c4f0cd62dd 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -62,9 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan,
dma_addr_t dma_dest[2];
int src_off = 0;
- if (submit->flags & ASYNC_TX_FENCE)
- dma_flags |= DMA_PREP_FENCE;
-
while (src_cnt > 0) {
submit->flags = flags_orig;
pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
@@ -83,6 +80,8 @@ do_async_gen_syndrome(struct dma_chan *chan,
if (cb_fn_orig)
dma_flags |= DMA_PREP_INTERRUPT;
}
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
/* Drivers force forward progress in case they can not provide
* a descriptor
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 66c9e8262572..4e76f1db3e68 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -645,7 +645,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.finup = cryptd_hash_finup_enqueue;
inst->alg.export = cryptd_hash_export;
inst->alg.import = cryptd_hash_import;
- inst->alg.setkey = cryptd_hash_setkey;
+ if (crypto_shash_alg_has_setkey(salg))
+ inst->alg.setkey = cryptd_hash_setkey;
inst->alg.digest = cryptd_hash_digest_enqueue;
err = ahash_register_instance(tmpl, inst);
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index cfb68a889ef6..cbe0135839df 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -80,6 +80,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue,
pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
+ spin_lock_init(&cpu_queue->q_lock);
}
return 0;
}
@@ -103,15 +104,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
int cpu, err;
struct mcryptd_cpu_queue *cpu_queue;
- cpu = get_cpu();
- cpu_queue = this_cpu_ptr(queue->cpu_queue);
- rctx->tag.cpu = cpu;
+ cpu_queue = raw_cpu_ptr(queue->cpu_queue);
+ spin_lock(&cpu_queue->q_lock);
+ cpu = smp_processor_id();
+ rctx->tag.cpu = smp_processor_id();
err = crypto_enqueue_request(&cpu_queue->queue, request);
pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
cpu, cpu_queue, request);
+ spin_unlock(&cpu_queue->q_lock);
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
- put_cpu();
return err;
}
@@ -164,16 +166,11 @@ static void mcryptd_queue_worker(struct work_struct *work)
cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
i = 0;
while (i < MCRYPTD_BATCH || single_task_running()) {
- /*
- * preempt_disable/enable is used to prevent
- * being preempted by mcryptd_enqueue_request()
- */
- local_bh_disable();
- preempt_disable();
+
+ spin_lock_bh(&cpu_queue->q_lock);
backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue);
- preempt_enable();
- local_bh_enable();
+ spin_unlock_bh(&cpu_queue->q_lock);
if (!req) {
mcryptd_opportunistic_flush();
@@ -188,7 +185,7 @@ static void mcryptd_queue_worker(struct work_struct *work)
++i;
}
if (cpu_queue->queue.qlen)
- queue_work(kcrypto_wq, &cpu_queue->work);
+ queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
}
void mcryptd_flusher(struct work_struct *__work)