aboutsummaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io-wq.c19
-rw-r--r--io_uring/io-wq.h3
-rw-r--r--io_uring/io_uring.c37
-rw-r--r--io_uring/io_uring.h1
-rw-r--r--io_uring/sqpoll.c17
-rw-r--r--io_uring/sqpoll.h1
6 files changed, 61 insertions, 17 deletions
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index 399e9a15c38d..2c03bc881edf 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -174,6 +174,16 @@ static void io_worker_ref_put(struct io_wq *wq)
complete(&wq->worker_done);
}
+bool io_wq_worker_stopped(void)
+{
+ struct io_worker *worker = current->worker_private;
+
+ if (WARN_ON_ONCE(!io_wq_current_is_worker()))
+ return true;
+
+ return test_bit(IO_WQ_BIT_EXIT, &worker->wq->state);
+}
+
static void io_worker_cancel_cb(struct io_worker *worker)
{
struct io_wq_acct *acct = io_wq_get_acct(worker);
@@ -1285,13 +1295,16 @@ static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
return __io_wq_cpu_online(wq, cpu, false);
}
-int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
+int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask)
{
+ if (!tctx || !tctx->io_wq)
+ return -EINVAL;
+
rcu_read_lock();
if (mask)
- cpumask_copy(wq->cpu_mask, mask);
+ cpumask_copy(tctx->io_wq->cpu_mask, mask);
else
- cpumask_copy(wq->cpu_mask, cpu_possible_mask);
+ cpumask_copy(tctx->io_wq->cpu_mask, cpu_possible_mask);
rcu_read_unlock();
return 0;
diff --git a/io_uring/io-wq.h b/io_uring/io-wq.h
index 31228426d192..2b2a6406dd8e 100644
--- a/io_uring/io-wq.h
+++ b/io_uring/io-wq.h
@@ -50,8 +50,9 @@ void io_wq_put_and_exit(struct io_wq *wq);
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
void io_wq_hash_work(struct io_wq_work *work, void *val);
-int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
+int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask);
int io_wq_max_workers(struct io_wq *wq, int *new_count);
+bool io_wq_worker_stopped(void);
static inline bool io_wq_is_hashed(struct io_wq_work *work)
{
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index a57bdf336ca8..d3b36197087a 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -231,7 +231,6 @@ static inline void req_fail_link_node(struct io_kiocb *req, int res)
static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
{
wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
- kasan_poison_object_data(req_cachep, req);
}
static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
@@ -1690,6 +1689,9 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
break;
nr_events += ret;
ret = 0;
+
+ if (task_sigpending(current))
+ return -EINTR;
} while (nr_events < min && !need_resched());
return ret;
@@ -2048,6 +2050,8 @@ fail:
if (!needs_poll) {
if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
break;
+ if (io_wq_worker_stopped())
+ break;
cond_resched();
continue;
}
@@ -2468,7 +2472,9 @@ static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe)
}
/* drop invalid entries */
+ spin_lock(&ctx->completion_lock);
ctx->cq_extra--;
+ spin_unlock(&ctx->completion_lock);
WRITE_ONCE(ctx->rings->sq_dropped,
READ_ONCE(ctx->rings->sq_dropped) + 1);
return false;
@@ -4173,16 +4179,28 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
return 0;
}
+static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx,
+ cpumask_var_t new_mask)
+{
+ int ret;
+
+ if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
+ ret = io_wq_cpu_affinity(current->io_uring, new_mask);
+ } else {
+ mutex_unlock(&ctx->uring_lock);
+ ret = io_sqpoll_wq_cpu_affinity(ctx, new_mask);
+ mutex_lock(&ctx->uring_lock);
+ }
+
+ return ret;
+}
+
static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
void __user *arg, unsigned len)
{
- struct io_uring_task *tctx = current->io_uring;
cpumask_var_t new_mask;
int ret;
- if (!tctx || !tctx->io_wq)
- return -EINVAL;
-
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
return -ENOMEM;
@@ -4203,19 +4221,14 @@ static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
return -EFAULT;
}
- ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
+ ret = __io_register_iowq_aff(ctx, new_mask);
free_cpumask_var(new_mask);
return ret;
}
static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
{
- struct io_uring_task *tctx = current->io_uring;
-
- if (!tctx || !tctx->io_wq)
- return -EINVAL;
-
- return io_wq_cpu_affinity(tctx->io_wq, NULL);
+ return __io_register_iowq_aff(ctx, NULL);
}
static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 259bf798a390..97cfb3f2f06d 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -361,7 +361,6 @@ static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
struct io_kiocb *req;
req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
- kasan_unpoison_object_data(req_cachep, req);
wq_stack_extract(&ctx->submit_state.free_list);
return req;
}
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index 5e329e3cd470..bd6c2c7959a5 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -421,3 +421,20 @@ err:
io_sq_thread_finish(ctx);
return ret;
}
+
+__cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
+ cpumask_var_t mask)
+{
+ struct io_sq_data *sqd = ctx->sq_data;
+ int ret = -EINVAL;
+
+ if (sqd) {
+ io_sq_thread_park(sqd);
+ /* Don't set affinity for a dying thread */
+ if (sqd->thread)
+ ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
+ io_sq_thread_unpark(sqd);
+ }
+
+ return ret;
+}
diff --git a/io_uring/sqpoll.h b/io_uring/sqpoll.h
index e1b8d508d22d..8df37e8c9149 100644
--- a/io_uring/sqpoll.h
+++ b/io_uring/sqpoll.h
@@ -27,3 +27,4 @@ void io_sq_thread_park(struct io_sq_data *sqd);
void io_sq_thread_unpark(struct io_sq_data *sqd);
void io_put_sq_data(struct io_sq_data *sqd);
void io_sqpoll_wait_sq(struct io_ring_ctx *ctx);
+int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask);