diff --git a/fs/io-wq.c b/fs/io-wq.c index c516912622082d03e349d720ce20061536a6f669..afd955d53db90d95c6da086aa7794304e41777fa 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -1308,7 +1308,9 @@ int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask) */ int io_wq_max_workers(struct io_wq *wq, int *new_count) { - int i, node, prev = 0; + int prev[IO_WQ_ACCT_NR]; + bool first_node = true; + int i, node; BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND); BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND); @@ -1319,6 +1321,9 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count) new_count[i] = task_rlimit(current, RLIMIT_NPROC); } + for (i = 0; i < IO_WQ_ACCT_NR; i++) + prev[i] = 0; + rcu_read_lock(); for_each_node(node) { struct io_wqe *wqe = wq->wqes[node]; @@ -1327,14 +1332,19 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count) raw_spin_lock(&wqe->lock); for (i = 0; i < IO_WQ_ACCT_NR; i++) { acct = &wqe->acct[i]; - prev = max_t(int, acct->max_workers, prev); + if (first_node) + prev[i] = max_t(int, acct->max_workers, prev[i]); if (new_count[i]) acct->max_workers = new_count[i]; - new_count[i] = prev; } raw_spin_unlock(&wqe->lock); + first_node = false; } rcu_read_unlock(); + + for (i = 0; i < IO_WQ_ACCT_NR; i++) + new_count[i] = prev[i]; + return 0; } diff --git a/fs/io_uring.c b/fs/io_uring.c index 3ecd4b51510eaaf51a826393dd60e3c88b19b284..b07196b4511c421a81ddda68a6cde64dbbf717f3 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6950,10 +6950,6 @@ static void io_queue_sqe_arm_apoll(struct io_kiocb *req) switch (io_arm_poll_handler(req)) { case IO_APOLL_READY: - if (linked_timeout) { - io_queue_linked_timeout(linked_timeout); - linked_timeout = NULL; - } io_req_task_queue(req); break; case IO_APOLL_ABORTED: @@ -10144,7 +10140,7 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, for (i = 0; i < sq_entries; i++) { unsigned int entry = i + sq_head; unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]); - struct io_uring_sqe *sqe = &ctx->sq_sqes[sq_idx]; + struct io_uring_sqe *sqe; if (sq_idx > sq_mask) continue; @@ -10795,10 +10791,11 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits)); - memcpy(ctx->iowq_limits, new_count, sizeof(new_count)); + for (i = 0; i < ARRAY_SIZE(new_count); i++) + if (new_count[i]) + ctx->iowq_limits[i] = new_count[i]; ctx->iowq_limits_set = true; - ret = -EINVAL; if (tctx && tctx->io_wq) { ret = io_wq_max_workers(tctx->io_wq, new_count); if (ret)