summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2019-10-27 23:15:41 +0300
committerJens Axboe <axboe@kernel.dk>2019-10-29 10:24:23 -0600
commit95a1b3ff9a3e4ea2f26c4e802067d58831f415db (patch)
tree6aa8d6d48b9faf66f33487baeeb259c4c7ea2d35
parentfa4562280889ad372dfb1413833a8b8675721b17 (diff)
io_uring: Fix mm_fault with READ/WRITE_FIXED
Commit fb5ccc98782f ("io_uring: Fix broken links with offloading") introduced a potential performance regression with unconditionally taking mm even for READ/WRITE_FIXED operations. Return the logic handling it back. mm-faulted requests will go through the generic submission path, so honoring links and drains, but will fail further on req->has_user check. Fixes: fb5ccc98782f ("io_uring: Fix broken links with offloading") Cc: stable@vger.kernel.org # v5.4 Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c41
1 files changed, 17 insertions, 24 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 545efd89a1f9..f9eff8f62ddb 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2726,13 +2726,14 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
}
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
- bool has_user, bool mm_fault)
+ struct mm_struct **mm)
{
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
struct io_kiocb *shadow_req = NULL;
bool prev_was_link = false;
int i, submitted = 0;
+ bool mm_fault = false;
if (nr > IO_PLUG_THRESHOLD) {
io_submit_state_start(&state, ctx, nr);
@@ -2745,6 +2746,14 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
if (!io_get_sqring(ctx, &s))
break;
+ if (io_sqe_needs_user(s.sqe) && !*mm) {
+ mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
+ if (!mm_fault) {
+ use_mm(ctx->sqo_mm);
+ *mm = ctx->sqo_mm;
+ }
+ }
+
/*
* If previous wasn't linked and we have a linked command,
* that's the end of the chain. Submit the previous link.
@@ -2768,17 +2777,12 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
}
out:
- if (unlikely(mm_fault)) {
- io_cqring_add_event(ctx, s.sqe->user_data,
- -EFAULT);
- } else {
- s.has_user = has_user;
- s.in_async = true;
- s.needs_fixed_file = true;
- trace_io_uring_submit_sqe(ctx, true, true);
- io_submit_sqe(ctx, &s, statep, &link);
- submitted++;
- }
+ s.has_user = *mm != NULL;
+ s.in_async = true;
+ s.needs_fixed_file = true;
+ trace_io_uring_submit_sqe(ctx, true, true);
+ io_submit_sqe(ctx, &s, statep, &link);
+ submitted++;
}
if (link)
@@ -2805,7 +2809,6 @@ static int io_sq_thread(void *data)
timeout = inflight = 0;
while (!kthread_should_park()) {
- bool mm_fault = false;
unsigned int to_submit;
if (inflight) {
@@ -2890,18 +2893,8 @@ static int io_sq_thread(void *data)
ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
}
- /* Unless all new commands are FIXED regions, grab mm */
- if (!cur_mm) {
- mm_fault = !mmget_not_zero(ctx->sqo_mm);
- if (!mm_fault) {
- use_mm(ctx->sqo_mm);
- cur_mm = ctx->sqo_mm;
- }
- }
-
to_submit = min(to_submit, ctx->sq_entries);
- inflight += io_submit_sqes(ctx, to_submit, cur_mm != NULL,
- mm_fault);
+ inflight += io_submit_sqes(ctx, to_submit, &cur_mm);
/* Commit SQ ring head once we've consumed all SQEs */
io_commit_sqring(ctx);