Skip to content

Commit f679ebf

Browse files
committed
Merge tag 'io_uring-6.14-20250221' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe: - Series fixing an issue with multishot read on pollable files that may return -EIOCBQUEUED from ->read_iter(). Four small patches for that, the first one deliberately done in such a way that it'd be easy to backport - Remove some dead constant definitions - Use array_index_nospec() for opcode indexing - Work-around for worker creation retries in the presence of signals * tag 'io_uring-6.14-20250221' of git://git.kernel.dk/linux: io_uring/rw: clean up mshot forced sync mode io_uring/rw: move ki_complete init into prep io_uring/rw: don't directly use ki_complete io_uring/rw: forbid multishot async reads io_uring/rsrc: remove unused constants io_uring: fix spelling error in uapi io_uring.h io_uring: prevent opcode speculation io-wq: backoff when retrying worker creation
2 parents 7108b48 + 4614de7 commit f679ebf

File tree

5 files changed

+42
-21
lines changed

5 files changed

+42
-21
lines changed

include/uapi/linux/io_uring.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -380,7 +380,7 @@ enum io_uring_op {
380380
* result will be the number of buffers send, with
381381
* the starting buffer ID in cqe->flags as per
382382
* usual for provided buffer usage. The buffers
383-
* will be contigious from the starting buffer ID.
383+
* will be contiguous from the starting buffer ID.
384384
*/
385385
#define IORING_RECVSEND_POLL_FIRST (1U << 0)
386386
#define IORING_RECV_MULTISHOT (1U << 1)

io_uring/io-wq.c

+18-5
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ struct io_worker {
6464

6565
union {
6666
struct rcu_head rcu;
67-
struct work_struct work;
67+
struct delayed_work work;
6868
};
6969
};
7070

@@ -770,6 +770,18 @@ static inline bool io_should_retry_thread(struct io_worker *worker, long err)
770770
}
771771
}
772772

773+
static void queue_create_worker_retry(struct io_worker *worker)
774+
{
775+
/*
776+
* We only bother retrying because there's a chance that the
777+
* failure to create a worker is due to some temporary condition
778+
* in the forking task (e.g. outstanding signal); give the task
779+
* some time to clear that condition.
780+
*/
781+
schedule_delayed_work(&worker->work,
782+
msecs_to_jiffies(worker->init_retries * 5));
783+
}
784+
773785
static void create_worker_cont(struct callback_head *cb)
774786
{
775787
struct io_worker *worker;
@@ -809,12 +821,13 @@ static void create_worker_cont(struct callback_head *cb)
809821

810822
/* re-create attempts grab a new worker ref, drop the existing one */
811823
io_worker_release(worker);
812-
schedule_work(&worker->work);
824+
queue_create_worker_retry(worker);
813825
}
814826

815827
static void io_workqueue_create(struct work_struct *work)
816828
{
817-
struct io_worker *worker = container_of(work, struct io_worker, work);
829+
struct io_worker *worker = container_of(work, struct io_worker,
830+
work.work);
818831
struct io_wq_acct *acct = io_wq_get_acct(worker);
819832

820833
if (!io_queue_worker_create(worker, acct, create_worker_cont))
@@ -855,8 +868,8 @@ static bool create_io_worker(struct io_wq *wq, int index)
855868
kfree(worker);
856869
goto fail;
857870
} else {
858-
INIT_WORK(&worker->work, io_workqueue_create);
859-
schedule_work(&worker->work);
871+
INIT_DELAYED_WORK(&worker->work, io_workqueue_create);
872+
queue_create_worker_retry(worker);
860873
}
861874

862875
return true;

io_uring/io_uring.c

+2
Original file line numberDiff line numberDiff line change
@@ -2045,6 +2045,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
20452045
req->opcode = 0;
20462046
return io_init_fail_req(req, -EINVAL);
20472047
}
2048+
opcode = array_index_nospec(opcode, IORING_OP_LAST);
2049+
20482050
def = &io_issue_defs[opcode];
20492051
if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
20502052
/* enforce forwards compatibility on users */

io_uring/rsrc.h

-6
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,6 @@
44

55
#include <linux/lockdep.h>
66

7-
#define IO_NODE_ALLOC_CACHE_MAX 32
8-
9-
#define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
10-
#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
11-
#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
12-
137
enum {
148
IORING_RSRC_FILE = 0,
159
IORING_RSRC_BUFFER = 1,

io_uring/rw.c

+21-9
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,9 @@
2323
#include "poll.h"
2424
#include "rw.h"
2525

26+
static void io_complete_rw(struct kiocb *kiocb, long res);
27+
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res);
28+
2629
struct io_rw {
2730
/* NOTE: kiocb has the file as the first member, so don't do it here */
2831
struct kiocb kiocb;
@@ -289,6 +292,11 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
289292
rw->kiocb.dio_complete = NULL;
290293
rw->kiocb.ki_flags = 0;
291294

295+
if (req->ctx->flags & IORING_SETUP_IOPOLL)
296+
rw->kiocb.ki_complete = io_complete_rw_iopoll;
297+
else
298+
rw->kiocb.ki_complete = io_complete_rw;
299+
292300
rw->addr = READ_ONCE(sqe->addr);
293301
rw->len = READ_ONCE(sqe->len);
294302
rw->flags = READ_ONCE(sqe->rw_flags);
@@ -563,8 +571,10 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
563571
smp_store_release(&req->iopoll_completed, 1);
564572
}
565573

566-
static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
574+
static inline void io_rw_done(struct io_kiocb *req, ssize_t ret)
567575
{
576+
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
577+
568578
/* IO was queued async, completion will happen later */
569579
if (ret == -EIOCBQUEUED)
570580
return;
@@ -586,8 +596,10 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
586596
}
587597
}
588598

589-
INDIRECT_CALL_2(kiocb->ki_complete, io_complete_rw_iopoll,
590-
io_complete_rw, kiocb, ret);
599+
if (req->ctx->flags & IORING_SETUP_IOPOLL)
600+
io_complete_rw_iopoll(&rw->kiocb, ret);
601+
else
602+
io_complete_rw(&rw->kiocb, ret);
591603
}
592604

593605
static int kiocb_done(struct io_kiocb *req, ssize_t ret,
@@ -598,7 +610,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
598610

599611
if (ret >= 0 && req->flags & REQ_F_CUR_POS)
600612
req->file->f_pos = rw->kiocb.ki_pos;
601-
if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
613+
if (ret >= 0 && !(req->ctx->flags & IORING_SETUP_IOPOLL)) {
602614
__io_complete_rw_common(req, ret);
603615
/*
604616
* Safe to call io_end from here as we're inline
@@ -609,7 +621,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
609621
io_req_rw_cleanup(req, issue_flags);
610622
return IOU_OK;
611623
} else {
612-
io_rw_done(&rw->kiocb, ret);
624+
io_rw_done(req, ret);
613625
}
614626

615627
return IOU_ISSUE_SKIP_COMPLETE;
@@ -813,10 +825,8 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
813825
if (ctx->flags & IORING_SETUP_IOPOLL) {
814826
if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
815827
return -EOPNOTSUPP;
816-
817828
kiocb->private = NULL;
818829
kiocb->ki_flags |= IOCB_HIPRI;
819-
kiocb->ki_complete = io_complete_rw_iopoll;
820830
req->iopoll_completed = 0;
821831
if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) {
822832
/* make sure every req only blocks once*/
@@ -826,7 +836,6 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
826836
} else {
827837
if (kiocb->ki_flags & IOCB_HIPRI)
828838
return -EINVAL;
829-
kiocb->ki_complete = io_complete_rw;
830839
}
831840

832841
if (req->flags & REQ_F_HAS_METADATA) {
@@ -904,7 +913,8 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
904913
} else if (ret == -EIOCBQUEUED) {
905914
return IOU_ISSUE_SKIP_COMPLETE;
906915
} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
907-
(req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
916+
(req->flags & REQ_F_NOWAIT) || !need_complete_io(req) ||
917+
(issue_flags & IO_URING_F_MULTISHOT)) {
908918
/* read all, failed, already did sync or don't want to retry */
909919
goto done;
910920
}
@@ -977,6 +987,8 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
977987
if (!io_file_can_poll(req))
978988
return -EBADFD;
979989

990+
/* make it sync, multishot doesn't support async execution */
991+
rw->kiocb.ki_complete = NULL;
980992
ret = __io_read(req, issue_flags);
981993

982994
/*

0 commit comments

Comments
 (0)