aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-10-22 20:19:35 -0600
committerJens Axboe <axboe@kernel.dk>2020-12-12 09:17:38 -0700
commit355fb9e2b78e78b38ec00f5cd9b05c6aceb98335 (patch)
tree73b6aebb80e177be9c156e37e0cfa8fcba46d56b
parente296dc4996b8094ccde45d19090d804c4103513e (diff)
downloadlinux-block-tif-task_work.arch.tar.gz
io_uring: remove 'twa_signal_ok' deadlock work-aroundtif-task_work.arch-2020-12-14tif-task_work.arch
The TIF_NOTIFY_SIGNAL based implementation of TWA_SIGNAL is always safe to use, regardless of context, as we won't be recursing into the signal lock. So now that all archs are using that, we can drop this deadlock work-around as it's always safe to use TWA_SIGNAL. Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c21
1 files changed, 6 insertions, 15 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index a170488507cfff..30d88f3d42b0dd 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1995,7 +1995,7 @@ static struct io_kiocb *io_req_find_next(struct io_kiocb *req)
return __io_req_find_next(req);
}
-static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
+static int io_req_task_work_add(struct io_kiocb *req)
{
struct task_struct *tsk = req->task;
struct io_ring_ctx *ctx = req->ctx;
@@ -2012,7 +2012,7 @@ static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
* will do the job.
*/
notify = TWA_NONE;
- if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
+ if (!(ctx->flags & IORING_SETUP_SQPOLL))
notify = TWA_SIGNAL;
ret = task_work_add(tsk, &req->task_work, notify);
@@ -2074,7 +2074,7 @@ static void io_req_task_queue(struct io_kiocb *req)
init_task_work(&req->task_work, io_req_task_submit);
percpu_ref_get(&req->ctx->refs);
- ret = io_req_task_work_add(req, true);
+ ret = io_req_task_work_add(req);
if (unlikely(ret)) {
struct task_struct *tsk;
@@ -2196,7 +2196,7 @@ static void io_free_req_deferred(struct io_kiocb *req)
int ret;
init_task_work(&req->task_work, io_put_req_deferred_cb);
- ret = io_req_task_work_add(req, true);
+ ret = io_req_task_work_add(req);
if (unlikely(ret)) {
struct task_struct *tsk;
@@ -3305,7 +3305,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
/* submit ref gets dropped, acquire a new one */
refcount_inc(&req->refs);
- ret = io_req_task_work_add(req, true);
+ ret = io_req_task_work_add(req);
if (unlikely(ret)) {
struct task_struct *tsk;
@@ -4843,7 +4843,6 @@ struct io_poll_table {
static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
__poll_t mask, task_work_func_t func)
{
- bool twa_signal_ok;
int ret;
/* for instances that support it check for an event match first: */
@@ -4859,20 +4858,12 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
percpu_ref_get(&req->ctx->refs);
/*
- * If we using the signalfd wait_queue_head for this wakeup, then
- * it's not safe to use TWA_SIGNAL as we could be recursing on the
- * tsk->sighand->siglock on doing the wakeup. Should not be needed
- * either, as the normal wakeup will suffice.
- */
- twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh);
-
- /*
* If this fails, then the task is exiting. When a task exits, the
* work gets canceled, so just cancel this request as well instead
* of executing it. We can't safely execute it anyway, as we may not
* have the needed state needed for it anyway.
*/
- ret = io_req_task_work_add(req, twa_signal_ok);
+ ret = io_req_task_work_add(req);
if (unlikely(ret)) {
struct task_struct *tsk;