Skip to content

Commit dd54fcc

Browse files
committed
Merge tag 'io_uring-6.13-20242901' of git://git.kernel.dk/linux
Pull more io_uring updates from Jens Axboe: - Remove a leftover struct from when the cqwait registered waiting was transitioned to regions. - Fix for an issue introduced in this merge window, where nop->fd might be used uninitialized. Ensure it's always set. - Add capping of the task_work run in local task_work mode, to prevent bursty and long chains from adding too much latency. - Work around xa_store() leaving ->head non-NULL if it encounters an allocation error during storing. Just a debug trigger, and can go away once xa_store() behaves in a more expected way for this condition. Not a major thing as it basically requires fault injection to trigger it. - Fix a few mapping corner cases - Fix KCSAN complaint on reading the table size post unlock. Again not a "real" issue, but it's easy to silence by just keeping the reading inside the lock that protects it. * tag 'io_uring-6.13-20242901' of git://git.kernel.dk/linux: io_uring/tctx: work around xa_store() allocation error issue io_uring: fix corner case forgetting to vunmap io_uring: fix task_work cap overshooting io_uring: check for overflows in io_pin_pages io_uring/nop: ensure nop->fd is always initialized io_uring: limit local tw done io_uring: add io_local_work_pending() io_uring/region: return negative -E2BIG in io_create_region() io_uring: protect register tracing io_uring: remove io_uring_cqwait_reg_arg
2 parents 133577c + 7eb75ce commit dd54fcc

File tree

8 files changed

+87
-47
lines changed

8 files changed

+87
-47
lines changed

include/linux/io_uring_types.h

+1
Original file line numberDiff line numberDiff line change
@@ -336,6 +336,7 @@ struct io_ring_ctx {
336336
*/
337337
struct {
338338
struct llist_head work_llist;
339+
struct llist_head retry_llist;
339340
unsigned long check_cq;
340341
atomic_t cq_wait_nr;
341342
atomic_t cq_timeouts;

include/uapi/linux/io_uring.h

-14
Original file line numberDiff line numberDiff line change
@@ -873,20 +873,6 @@ enum {
873873
IORING_REG_WAIT_TS = (1U << 0),
874874
};
875875

876-
/*
877-
* Argument for IORING_REGISTER_CQWAIT_REG, registering a region of
878-
* struct io_uring_reg_wait that can be indexed when io_uring_enter(2) is
879-
* called rather than pass in a wait argument structure separately.
880-
*/
881-
struct io_uring_cqwait_reg_arg {
882-
__u32 flags;
883-
__u32 struct_size;
884-
__u32 nr_entries;
885-
__u32 pad;
886-
__u64 user_addr;
887-
__u64 pad2[3];
888-
};
889-
890876
/*
891877
* Argument for io_uring_enter(2) with
892878
* IORING_GETEVENTS | IORING_ENTER_EXT_ARG_REG set, where the actual argument

io_uring/io_uring.c

+50-25
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,7 @@
121121

122122
#define IO_COMPL_BATCH 32
123123
#define IO_REQ_ALLOC_BATCH 8
124+
#define IO_LOCAL_TW_DEFAULT_MAX 20
124125

125126
struct io_defer_entry {
126127
struct list_head list;
@@ -1255,12 +1256,14 @@ static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
12551256
struct llist_node *node = llist_del_all(&ctx->work_llist);
12561257

12571258
__io_fallback_tw(node, false);
1259+
node = llist_del_all(&ctx->retry_llist);
1260+
__io_fallback_tw(node, false);
12581261
}
12591262

12601263
static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
12611264
int min_events)
12621265
{
1263-
if (llist_empty(&ctx->work_llist))
1266+
if (!io_local_work_pending(ctx))
12641267
return false;
12651268
if (events < min_events)
12661269
return true;
@@ -1269,8 +1272,29 @@ static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
12691272
return false;
12701273
}
12711274

1275+
static int __io_run_local_work_loop(struct llist_node **node,
1276+
struct io_tw_state *ts,
1277+
int events)
1278+
{
1279+
int ret = 0;
1280+
1281+
while (*node) {
1282+
struct llist_node *next = (*node)->next;
1283+
struct io_kiocb *req = container_of(*node, struct io_kiocb,
1284+
io_task_work.node);
1285+
INDIRECT_CALL_2(req->io_task_work.func,
1286+
io_poll_task_func, io_req_rw_complete,
1287+
req, ts);
1288+
*node = next;
1289+
if (++ret >= events)
1290+
break;
1291+
}
1292+
1293+
return ret;
1294+
}
1295+
12721296
static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
1273-
int min_events)
1297+
int min_events, int max_events)
12741298
{
12751299
struct llist_node *node;
12761300
unsigned int loops = 0;
@@ -1281,25 +1305,23 @@ static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
12811305
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
12821306
atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
12831307
again:
1308+
min_events -= ret;
1309+
ret = __io_run_local_work_loop(&ctx->retry_llist.first, ts, max_events);
1310+
if (ctx->retry_llist.first)
1311+
goto retry_done;
1312+
12841313
/*
12851314
* llists are in reverse order, flip it back the right way before
12861315
* running the pending items.
12871316
*/
12881317
node = llist_reverse_order(llist_del_all(&ctx->work_llist));
1289-
while (node) {
1290-
struct llist_node *next = node->next;
1291-
struct io_kiocb *req = container_of(node, struct io_kiocb,
1292-
io_task_work.node);
1293-
INDIRECT_CALL_2(req->io_task_work.func,
1294-
io_poll_task_func, io_req_rw_complete,
1295-
req, ts);
1296-
ret++;
1297-
node = next;
1298-
}
1318+
ret += __io_run_local_work_loop(&node, ts, max_events - ret);
1319+
ctx->retry_llist.first = node;
12991320
loops++;
13001321

13011322
if (io_run_local_work_continue(ctx, ret, min_events))
13021323
goto again;
1324+
retry_done:
13031325
io_submit_flush_completions(ctx);
13041326
if (io_run_local_work_continue(ctx, ret, min_events))
13051327
goto again;
@@ -1313,18 +1335,20 @@ static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
13131335
{
13141336
struct io_tw_state ts = {};
13151337

1316-
if (llist_empty(&ctx->work_llist))
1338+
if (!io_local_work_pending(ctx))
13171339
return 0;
1318-
return __io_run_local_work(ctx, &ts, min_events);
1340+
return __io_run_local_work(ctx, &ts, min_events,
1341+
max(IO_LOCAL_TW_DEFAULT_MAX, min_events));
13191342
}
13201343

1321-
static int io_run_local_work(struct io_ring_ctx *ctx, int min_events)
1344+
static int io_run_local_work(struct io_ring_ctx *ctx, int min_events,
1345+
int max_events)
13221346
{
13231347
struct io_tw_state ts = {};
13241348
int ret;
13251349

13261350
mutex_lock(&ctx->uring_lock);
1327-
ret = __io_run_local_work(ctx, &ts, min_events);
1351+
ret = __io_run_local_work(ctx, &ts, min_events, max_events);
13281352
mutex_unlock(&ctx->uring_lock);
13291353
return ret;
13301354
}
@@ -2328,9 +2352,9 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
23282352

23292353
int io_run_task_work_sig(struct io_ring_ctx *ctx)
23302354
{
2331-
if (!llist_empty(&ctx->work_llist)) {
2355+
if (io_local_work_pending(ctx)) {
23322356
__set_current_state(TASK_RUNNING);
2333-
if (io_run_local_work(ctx, INT_MAX) > 0)
2357+
if (io_run_local_work(ctx, INT_MAX, IO_LOCAL_TW_DEFAULT_MAX) > 0)
23342358
return 0;
23352359
}
23362360
if (io_run_task_work() > 0)
@@ -2459,7 +2483,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
24592483
{
24602484
if (unlikely(READ_ONCE(ctx->check_cq)))
24612485
return 1;
2462-
if (unlikely(!llist_empty(&ctx->work_llist)))
2486+
if (unlikely(io_local_work_pending(ctx)))
24632487
return 1;
24642488
if (unlikely(task_work_pending(current)))
24652489
return 1;
@@ -2493,8 +2517,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
24932517

24942518
if (!io_allowed_run_tw(ctx))
24952519
return -EEXIST;
2496-
if (!llist_empty(&ctx->work_llist))
2497-
io_run_local_work(ctx, min_events);
2520+
if (io_local_work_pending(ctx))
2521+
io_run_local_work(ctx, min_events,
2522+
max(IO_LOCAL_TW_DEFAULT_MAX, min_events));
24982523
io_run_task_work();
24992524

25002525
if (unlikely(test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)))
@@ -2564,8 +2589,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
25642589
* If we got woken because of task_work being processed, run it
25652590
* now rather than let the caller do another wait loop.
25662591
*/
2567-
if (!llist_empty(&ctx->work_llist))
2568-
io_run_local_work(ctx, nr_wait);
2592+
if (io_local_work_pending(ctx))
2593+
io_run_local_work(ctx, nr_wait, nr_wait);
25692594
io_run_task_work();
25702595

25712596
/*
@@ -3077,7 +3102,7 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
30773102

30783103
if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
30793104
io_allowed_defer_tw_run(ctx))
3080-
ret |= io_run_local_work(ctx, INT_MAX) > 0;
3105+
ret |= io_run_local_work(ctx, INT_MAX, INT_MAX) > 0;
30813106
ret |= io_cancel_defer_files(ctx, tctx, cancel_all);
30823107
mutex_lock(&ctx->uring_lock);
30833108
ret |= io_poll_remove_all(ctx, tctx, cancel_all);
@@ -3158,7 +3183,7 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
31583183
io_run_task_work();
31593184
io_uring_drop_tctx_refs(current);
31603185
xa_for_each(&tctx->xa, index, node) {
3161-
if (!llist_empty(&node->ctx->work_llist)) {
3186+
if (io_local_work_pending(node->ctx)) {
31623187
WARN_ON_ONCE(node->ctx->submitter_task &&
31633188
node->ctx->submitter_task != current);
31643189
goto end_wait;

io_uring/io_uring.h

+7-2
Original file line numberDiff line numberDiff line change
@@ -347,9 +347,14 @@ static inline int io_run_task_work(void)
347347
return ret;
348348
}
349349

350+
static inline bool io_local_work_pending(struct io_ring_ctx *ctx)
351+
{
352+
return !llist_empty(&ctx->work_llist) || !llist_empty(&ctx->retry_llist);
353+
}
354+
350355
static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
351356
{
352-
return task_work_pending(current) || !llist_empty(&ctx->work_llist);
357+
return task_work_pending(current) || io_local_work_pending(ctx);
353358
}
354359

355360
static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
@@ -484,6 +489,6 @@ enum {
484489
static inline bool io_has_work(struct io_ring_ctx *ctx)
485490
{
486491
return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
487-
!llist_empty(&ctx->work_llist);
492+
io_local_work_pending(ctx);
488493
}
489494
#endif

io_uring/memmap.c

+10-3
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,8 @@ void *io_pages_map(struct page ***out_pages, unsigned short *npages,
7373
ret = io_mem_alloc_compound(pages, nr_pages, size, gfp);
7474
if (!IS_ERR(ret))
7575
goto done;
76+
if (nr_pages == 1)
77+
goto fail;
7678

7779
ret = io_mem_alloc_single(pages, nr_pages, size, gfp);
7880
if (!IS_ERR(ret)) {
@@ -81,7 +83,7 @@ void *io_pages_map(struct page ***out_pages, unsigned short *npages,
8183
*npages = nr_pages;
8284
return ret;
8385
}
84-
86+
fail:
8587
kvfree(pages);
8688
*out_pages = NULL;
8789
*npages = 0;
@@ -136,7 +138,12 @@ struct page **io_pin_pages(unsigned long uaddr, unsigned long len, int *npages)
136138
struct page **pages;
137139
int ret;
138140

139-
end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
141+
if (check_add_overflow(uaddr, len, &end))
142+
return ERR_PTR(-EOVERFLOW);
143+
if (check_add_overflow(end, PAGE_SIZE - 1, &end))
144+
return ERR_PTR(-EOVERFLOW);
145+
146+
end = end >> PAGE_SHIFT;
140147
start = uaddr >> PAGE_SHIFT;
141148
nr_pages = end - start;
142149
if (WARN_ON_ONCE(!nr_pages))
@@ -229,7 +236,7 @@ int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
229236
if (!reg->size || reg->mmap_offset || reg->id)
230237
return -EINVAL;
231238
if ((reg->size >> PAGE_SHIFT) > INT_MAX)
232-
return E2BIG;
239+
return -E2BIG;
233240
if ((reg->user_addr | reg->size) & ~PAGE_MASK)
234241
return -EINVAL;
235242
if (check_add_overflow(reg->user_addr, reg->size, &end))

io_uring/nop.c

+5-1
Original file line numberDiff line numberDiff line change
@@ -35,10 +35,14 @@ int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3535
nop->result = READ_ONCE(sqe->len);
3636
else
3737
nop->result = 0;
38-
if (nop->flags & IORING_NOP_FIXED_FILE)
38+
if (nop->flags & IORING_NOP_FILE)
3939
nop->fd = READ_ONCE(sqe->fd);
40+
else
41+
nop->fd = -1;
4042
if (nop->flags & IORING_NOP_FIXED_BUFFER)
4143
nop->buffer = READ_ONCE(sqe->buf_index);
44+
else
45+
nop->buffer = -1;
4246
return 0;
4347
}
4448

io_uring/register.c

+2-1
Original file line numberDiff line numberDiff line change
@@ -905,9 +905,10 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
905905

906906
mutex_lock(&ctx->uring_lock);
907907
ret = __io_uring_register(ctx, opcode, arg, nr_args);
908-
mutex_unlock(&ctx->uring_lock);
908+
909909
trace_io_uring_register(ctx, opcode, ctx->file_table.data.nr,
910910
ctx->buf_table.nr, ret);
911+
mutex_unlock(&ctx->uring_lock);
911912
if (!use_registered_ring)
912913
fput(file);
913914
return ret;

io_uring/tctx.c

+12-1
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,19 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
4747
void __io_uring_free(struct task_struct *tsk)
4848
{
4949
struct io_uring_task *tctx = tsk->io_uring;
50+
struct io_tctx_node *node;
51+
unsigned long index;
5052

51-
WARN_ON_ONCE(!xa_empty(&tctx->xa));
53+
/*
54+
* Fault injection forcing allocation errors in the xa_store() path
55+
* can lead to xa_empty() returning false, even though no actual
56+
* node is stored in the xarray. Until that gets sorted out, attempt
57+
* an iteration here and warn if any entries are found.
58+
*/
59+
xa_for_each(&tctx->xa, index, node) {
60+
WARN_ON_ONCE(1);
61+
break;
62+
}
5263
WARN_ON_ONCE(tctx->io_wq);
5364
WARN_ON_ONCE(tctx->cached_refs);
5465

0 commit comments

Comments
 (0)