Skip to content

Commit 4f52875

Browse files
committed
Merge tag 'io_uring-6.5-2023-07-03' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe: "The fix for the msghdr->msg_inq assigned value being wrong, using -1 instead of -1U for the signed type. Also a fix for ensuring when we're trying to run task_work on an exiting task, that we wait for it. This is not really a correctness thing as the work is being canceled, but it does help with ensuring file descriptors are closed when the task has exited." * tag 'io_uring-6.5-2023-07-03' of git://git.kernel.dk/linux: io_uring: flush offloaded and delayed task_work on exit io_uring: remove io_fallback_tw() forward declaration io_uring/net: use proper value for msg_inq
2 parents 69c9f23 + dfbe556 commit 4f52875

File tree

2 files changed

+36
-21
lines changed

2 files changed

+36
-21
lines changed

io_uring/io_uring.c

+32-17
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,6 @@ static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
149149
static void io_queue_sqe(struct io_kiocb *req);
150150
static void io_move_task_work_from_local(struct io_ring_ctx *ctx);
151151
static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
152-
static __cold void io_fallback_tw(struct io_uring_task *tctx);
153152

154153
struct kmem_cache *req_cachep;
155154

@@ -1238,6 +1237,34 @@ static inline struct llist_node *io_llist_cmpxchg(struct llist_head *head,
12381237
return cmpxchg(&head->first, old, new);
12391238
}
12401239

1240+
static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
1241+
{
1242+
struct llist_node *node = llist_del_all(&tctx->task_list);
1243+
struct io_ring_ctx *last_ctx = NULL;
1244+
struct io_kiocb *req;
1245+
1246+
while (node) {
1247+
req = container_of(node, struct io_kiocb, io_task_work.node);
1248+
node = node->next;
1249+
if (sync && last_ctx != req->ctx) {
1250+
if (last_ctx) {
1251+
flush_delayed_work(&last_ctx->fallback_work);
1252+
percpu_ref_put(&last_ctx->refs);
1253+
}
1254+
last_ctx = req->ctx;
1255+
percpu_ref_get(&last_ctx->refs);
1256+
}
1257+
if (llist_add(&req->io_task_work.node,
1258+
&req->ctx->fallback_llist))
1259+
schedule_delayed_work(&req->ctx->fallback_work, 1);
1260+
}
1261+
1262+
if (last_ctx) {
1263+
flush_delayed_work(&last_ctx->fallback_work);
1264+
percpu_ref_put(&last_ctx->refs);
1265+
}
1266+
}
1267+
12411268
void tctx_task_work(struct callback_head *cb)
12421269
{
12431270
struct io_tw_state ts = {};
@@ -1250,7 +1277,7 @@ void tctx_task_work(struct callback_head *cb)
12501277
unsigned int count = 0;
12511278

12521279
if (unlikely(current->flags & PF_EXITING)) {
1253-
io_fallback_tw(tctx);
1280+
io_fallback_tw(tctx, true);
12541281
return;
12551282
}
12561283

@@ -1279,20 +1306,6 @@ void tctx_task_work(struct callback_head *cb)
12791306
trace_io_uring_task_work_run(tctx, count, loops);
12801307
}
12811308

1282-
static __cold void io_fallback_tw(struct io_uring_task *tctx)
1283-
{
1284-
struct llist_node *node = llist_del_all(&tctx->task_list);
1285-
struct io_kiocb *req;
1286-
1287-
while (node) {
1288-
req = container_of(node, struct io_kiocb, io_task_work.node);
1289-
node = node->next;
1290-
if (llist_add(&req->io_task_work.node,
1291-
&req->ctx->fallback_llist))
1292-
schedule_delayed_work(&req->ctx->fallback_work, 1);
1293-
}
1294-
}
1295-
12961309
static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
12971310
{
12981311
struct io_ring_ctx *ctx = req->ctx;
@@ -1359,7 +1372,7 @@ static void io_req_normal_work_add(struct io_kiocb *req)
13591372
if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
13601373
return;
13611374

1362-
io_fallback_tw(tctx);
1375+
io_fallback_tw(tctx, false);
13631376
}
13641377

13651378
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
@@ -3109,6 +3122,8 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
31093122
if (ctx->rings)
31103123
io_kill_timeouts(ctx, NULL, true);
31113124

3125+
flush_delayed_work(&ctx->fallback_work);
3126+
31123127
INIT_WORK(&ctx->exit_work, io_ring_exit_work);
31133128
/*
31143129
* Use system_unbound_wq to avoid spawning tons of event kworkers

io_uring/net.c

+4-4
Original file line numberDiff line numberDiff line change
@@ -631,7 +631,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
631631
unsigned int cflags;
632632

633633
cflags = io_put_kbuf(req, issue_flags);
634-
if (msg->msg_inq && msg->msg_inq != -1U)
634+
if (msg->msg_inq && msg->msg_inq != -1)
635635
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
636636

637637
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
@@ -646,7 +646,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
646646
io_recv_prep_retry(req);
647647
/* Known not-empty or unknown state, retry */
648648
if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
649-
msg->msg_inq == -1U)
649+
msg->msg_inq == -1)
650650
return false;
651651
if (issue_flags & IO_URING_F_MULTISHOT)
652652
*ret = IOU_ISSUE_SKIP_COMPLETE;
@@ -805,7 +805,7 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
805805
flags |= MSG_DONTWAIT;
806806

807807
kmsg->msg.msg_get_inq = 1;
808-
kmsg->msg.msg_inq = -1U;
808+
kmsg->msg.msg_inq = -1;
809809
if (req->flags & REQ_F_APOLL_MULTISHOT) {
810810
ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
811811
&mshot_finished);
@@ -903,7 +903,7 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
903903
if (unlikely(ret))
904904
goto out_free;
905905

906-
msg.msg_inq = -1U;
906+
msg.msg_inq = -1;
907907
msg.msg_flags = 0;
908908

909909
flags = sr->msg_flags;

0 commit comments

Comments
 (0)