aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/algapi.c4
-rw-r--r--crypto/api.c6
-rw-r--r--crypto/cryptd.c19
-rw-r--r--crypto/internal.h4
-rw-r--r--crypto/scompress.c6
5 files changed, 20 insertions, 19 deletions
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 603d2d637209..10e92ba5f7a4 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -723,13 +723,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
int crypto_register_notifier(struct notifier_block *nb)
{
- return blocking_notifier_chain_register(&crypto_chain, nb);
+ return srcu_notifier_chain_register(&crypto_chain, nb);
}
EXPORT_SYMBOL_GPL(crypto_register_notifier);
int crypto_unregister_notifier(struct notifier_block *nb)
{
- return blocking_notifier_chain_unregister(&crypto_chain, nb);
+ return srcu_notifier_chain_unregister(&crypto_chain, nb);
}
EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
diff --git a/crypto/api.c b/crypto/api.c
index 99bd438fa4a4..c502344021a0 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_list);
DECLARE_RWSEM(crypto_alg_sem);
EXPORT_SYMBOL_GPL(crypto_alg_sem);
-BLOCKING_NOTIFIER_HEAD(crypto_chain);
+SRCU_NOTIFIER_HEAD(crypto_chain);
EXPORT_SYMBOL_GPL(crypto_chain);
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
@@ -236,10 +236,10 @@ int crypto_probing_notify(unsigned long val, void *v)
{
int ok;
- ok = blocking_notifier_call_chain(&crypto_chain, val, v);
+ ok = srcu_notifier_call_chain(&crypto_chain, val, v);
if (ok == NOTIFY_DONE) {
request_module("cryptomgr");
- ok = blocking_notifier_call_chain(&crypto_chain, val, v);
+ ok = srcu_notifier_call_chain(&crypto_chain, val, v);
}
return ok;
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 4cc1871646a8..948ae4287486 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -37,6 +37,7 @@
struct cryptd_cpu_queue {
struct crypto_queue queue;
struct work_struct work;
+ spinlock_t qlock;
};
struct cryptd_queue {
@@ -115,6 +116,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue,
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
+ spin_lock_init(&cpu_queue->qlock);
}
return 0;
}
@@ -139,8 +141,10 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
atomic_t *refcnt;
bool may_backlog;
- cpu = get_cpu();
- cpu_queue = this_cpu_ptr(queue->cpu_queue);
+ cpu_queue = raw_cpu_ptr(queue->cpu_queue);
+ spin_lock_bh(&cpu_queue->qlock);
+ cpu = smp_processor_id();
+
err = crypto_enqueue_request(&cpu_queue->queue, request);
refcnt = crypto_tfm_ctx(request->tfm);
@@ -157,7 +161,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
atomic_inc(refcnt);
out_put_cpu:
- put_cpu();
+ spin_unlock_bh(&cpu_queue->qlock);
return err;
}
@@ -173,16 +177,11 @@ static void cryptd_queue_worker(struct work_struct *work)
cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
/*
* Only handle one request at a time to avoid hogging crypto workqueue.
- * preempt_disable/enable is used to prevent being preempted by
- * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
- * cryptd_enqueue_request() being accessed from software interrupts.
*/
- local_bh_disable();
- preempt_disable();
+ spin_lock_bh(&cpu_queue->qlock);
backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue);
- preempt_enable();
- local_bh_enable();
+ spin_unlock_bh(&cpu_queue->qlock);
if (!req)
return;
diff --git a/crypto/internal.h b/crypto/internal.h
index 6262ec0435b4..e480c949b48f 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -47,7 +47,7 @@ struct crypto_larval {
extern struct list_head crypto_alg_list;
extern struct rw_semaphore crypto_alg_sem;
-extern struct blocking_notifier_head crypto_chain;
+extern struct srcu_notifier_head crypto_chain;
#ifdef CONFIG_PROC_FS
void __init crypto_init_proc(void);
@@ -142,7 +142,7 @@ static inline int crypto_is_moribund(struct crypto_alg *alg)
static inline void crypto_notify(unsigned long val, void *v)
{
- blocking_notifier_call_chain(&crypto_chain, val, v);
+ srcu_notifier_call_chain(&crypto_chain, val, v);
}
#endif /* _CRYPTO_INTERNAL_H */
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 2075e2c4e7df..c6b4e265c6bf 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -24,6 +24,7 @@
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include <linux/scatterlist.h>
+#include <linux/locallock.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/acompress.h>
#include <crypto/internal/scompress.h>
@@ -34,6 +35,7 @@ static void * __percpu *scomp_src_scratches;
static void * __percpu *scomp_dst_scratches;
static int scomp_scratch_users;
static DEFINE_MUTEX(scomp_lock);
+static DEFINE_LOCAL_IRQ_LOCK(scomp_scratches_lock);
#ifdef CONFIG_NET
static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
@@ -193,7 +195,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
void **tfm_ctx = acomp_tfm_ctx(tfm);
struct crypto_scomp *scomp = *tfm_ctx;
void **ctx = acomp_request_ctx(req);
- const int cpu = get_cpu();
+ const int cpu = local_lock_cpu(scomp_scratches_lock);
u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
int ret;
@@ -228,7 +230,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
1);
}
out:
- put_cpu();
+ local_unlock_cpu(scomp_scratches_lock);
return ret;
}