aboutsummaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c65
1 files changed, 37 insertions, 28 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index a46de2cfc28e..8276c3c42894 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -479,6 +479,7 @@ enum {
REQ_F_COMP_LOCKED_BIT,
REQ_F_NEED_CLEANUP_BIT,
REQ_F_OVERFLOW_BIT,
+ REQ_F_NO_FILE_TABLE_BIT,
};
enum {
@@ -521,6 +522,8 @@ enum {
REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
/* in overflow list */
REQ_F_OVERFLOW = BIT(REQ_F_OVERFLOW_BIT),
+ /* doesn't need file table for this request */
+ REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
};
/*
@@ -693,8 +696,6 @@ static const struct io_op_def io_op_defs[] = {
.needs_file = 1,
},
[IORING_OP_OPENAT] = {
- .needs_file = 1,
- .fd_non_neg = 1,
.file_table = 1,
.needs_fs = 1,
},
@@ -708,9 +709,8 @@ static const struct io_op_def io_op_defs[] = {
},
[IORING_OP_STATX] = {
.needs_mm = 1,
- .needs_file = 1,
- .fd_non_neg = 1,
.needs_fs = 1,
+ .file_table = 1,
},
[IORING_OP_READ] = {
.needs_mm = 1,
@@ -739,8 +739,6 @@ static const struct io_op_def io_op_defs[] = {
.unbound_nonreg_file = 1,
},
[IORING_OP_OPENAT2] = {
- .needs_file = 1,
- .fd_non_neg = 1,
.file_table = 1,
.needs_fs = 1,
},
@@ -824,6 +822,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
goto err;
ctx->flags = p->flags;
+ init_waitqueue_head(&ctx->sqo_wait);
init_waitqueue_head(&ctx->cq_wait);
INIT_LIST_HEAD(&ctx->cq_overflow_list);
init_completion(&ctx->completions[0]);
@@ -2843,8 +2842,12 @@ static int io_statx(struct io_kiocb *req, struct io_kiocb **nxt,
struct kstat stat;
int ret;
- if (force_nonblock)
+ if (force_nonblock) {
+ /* only need file table for an actual valid fd */
+ if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
+ req->flags |= REQ_F_NO_FILE_TABLE;
return -EAGAIN;
+ }
if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->how.flags))
return -EINVAL;
@@ -4256,15 +4259,16 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int ret;
/* Still need defer if there is pending req in defer list. */
- if (!req_need_defer(req) && list_empty(&ctx->defer_list))
+ if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list))
return 0;
- if (!req->io && io_alloc_async_ctx(req))
- return -EAGAIN;
-
- ret = io_req_defer_prep(req, sqe);
- if (ret < 0)
- return ret;
+ if (!req->io) {
+ if (io_alloc_async_ctx(req))
+ return -EAGAIN;
+ ret = io_req_defer_prep(req, sqe);
+ if (ret < 0)
+ return ret;
+ }
spin_lock_irq(&ctx->completion_lock);
if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
@@ -4632,7 +4636,7 @@ static int io_grab_files(struct io_kiocb *req)
int ret = -EBADF;
struct io_ring_ctx *ctx = req->ctx;
- if (req->work.files)
+ if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE))
return 0;
if (!ctx->ring_file)
return -EBADF;
@@ -4819,9 +4823,15 @@ fail_req:
io_double_put_req(req);
}
} else if (req->flags & REQ_F_FORCE_ASYNC) {
- ret = io_req_defer_prep(req, sqe);
- if (unlikely(ret < 0))
- goto fail_req;
+ if (!req->io) {
+ ret = -EAGAIN;
+ if (io_alloc_async_ctx(req))
+ goto fail_req;
+ ret = io_req_defer_prep(req, sqe);
+ if (unlikely(ret < 0))
+ goto fail_req;
+ }
+
/*
* Never try inline submit of IOSQE_ASYNC is set, go straight
* to async execution.
@@ -5214,6 +5224,7 @@ static int io_sq_thread(void *data)
finish_wait(&ctx->sqo_wait, &wait);
ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
+ ret = 0;
continue;
}
finish_wait(&ctx->sqo_wait, &wait);
@@ -6002,7 +6013,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
{
int ret;
- init_waitqueue_head(&ctx->sqo_wait);
mmgrab(current->mm);
ctx->sqo_mm = current->mm;
@@ -6244,8 +6254,8 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
ret = 0;
if (!pages || nr_pages > got_pages) {
- kfree(vmas);
- kfree(pages);
+ kvfree(vmas);
+ kvfree(pages);
pages = kvmalloc_array(nr_pages, sizeof(struct page *),
GFP_KERNEL);
vmas = kvmalloc_array(nr_pages,
@@ -6449,7 +6459,7 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
* it could cause shutdown to hang.
*/
while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait))
- cpu_relax();
+ cond_resched();
io_kill_timeouts(ctx);
io_poll_remove_all(ctx);
@@ -6478,11 +6488,9 @@ static int io_uring_release(struct inode *inode, struct file *file)
static void io_uring_cancel_files(struct io_ring_ctx *ctx,
struct files_struct *files)
{
- struct io_kiocb *req;
- DEFINE_WAIT(wait);
-
while (!list_empty_careful(&ctx->inflight_list)) {
- struct io_kiocb *cancel_req = NULL;
+ struct io_kiocb *cancel_req = NULL, *req;
+ DEFINE_WAIT(wait);
spin_lock_irq(&ctx->inflight_lock);
list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
@@ -6521,7 +6529,8 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
* all we had, then we're done with this request.
*/
if (refcount_sub_and_test(2, &cancel_req->refs)) {
- io_put_req(cancel_req);
+ io_free_req(cancel_req);
+ finish_wait(&ctx->inflight_wait, &wait);
continue;
}
}
@@ -6529,8 +6538,8 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
io_put_req(cancel_req);
schedule();
+ finish_wait(&ctx->inflight_wait, &wait);
}
- finish_wait(&ctx->inflight_wait, &wait);
}
static int io_uring_flush(struct file *file, void *data)