io-uring.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Glauber Costa <glauber@scylladb.com>
To: Jens Axboe <axboe@kernel.dk>
Cc: io-uring@vger.kernel.org, Avi Kivity <avi@scylladb.com>
Subject: Re: Kernel BUG when registering the ring
Date: Tue, 11 Feb 2020 14:23:56 -0500	[thread overview]
Message-ID: <CAD-J=zYfbtQaGy8KatprCPdzrKTg3sbHp6Vc2D8Y+mK2G08s4A@mail.gmail.com> (raw)
In-Reply-To: <059cfdbf-bcfe-5680-9b0a-45a720cf65c5@kernel.dk>

Tested-by: Glauber Costa <glauber@scylladb.com>

On Tue, Feb 11, 2020 at 1:58 PM Jens Axboe <axboe@kernel.dk> wrote:
>
> On 2/11/20 6:01 AM, Glauber Costa wrote:
> > This works.
>
> Can you try this one as well?
>
>
> diff --git a/fs/io-wq.c b/fs/io-wq.c
> index 182aa17dc2ca..2d741fb76098 100644
> --- a/fs/io-wq.c
> +++ b/fs/io-wq.c
> @@ -699,11 +699,16 @@ static int io_wq_manager(void *data)
>         /* create fixed workers */
>         refcount_set(&wq->refs, workers_to_create);
>         for_each_node(node) {
> +               if (!node_online(node))
> +                       continue;
>                 if (!create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
>                         goto err;
>                 workers_to_create--;
>         }
>
> +       while (workers_to_create--)
> +               refcount_dec(&wq->refs);
> +
>         complete(&wq->done);
>
>         while (!kthread_should_stop()) {
> @@ -711,6 +716,9 @@ static int io_wq_manager(void *data)
>                         struct io_wqe *wqe = wq->wqes[node];
>                         bool fork_worker[2] = { false, false };
>
> +                       if (!node_online(node))
> +                               continue;
> +
>                         spin_lock_irq(&wqe->lock);
>                         if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
>                                 fork_worker[IO_WQ_ACCT_BOUND] = true;
> @@ -849,6 +857,8 @@ void io_wq_cancel_all(struct io_wq *wq)
>         for_each_node(node) {
>                 struct io_wqe *wqe = wq->wqes[node];
>
> +               if (!node_online(node))
> +                       continue;
>                 io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL);
>         }
>         rcu_read_unlock();
> @@ -929,6 +939,8 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
>         for_each_node(node) {
>                 struct io_wqe *wqe = wq->wqes[node];
>
> +               if (!node_online(node))
> +                       continue;
>                 ret = io_wqe_cancel_cb_work(wqe, cancel, data);
>                 if (ret != IO_WQ_CANCEL_NOTFOUND)
>                         break;
> @@ -1021,6 +1033,8 @@ enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
>         for_each_node(node) {
>                 struct io_wqe *wqe = wq->wqes[node];
>
> +               if (!node_online(node))
> +                       continue;
>                 ret = io_wqe_cancel_work(wqe, &match);
>                 if (ret != IO_WQ_CANCEL_NOTFOUND)
>                         break;
> @@ -1050,6 +1064,8 @@ enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid)
>         for_each_node(node) {
>                 struct io_wqe *wqe = wq->wqes[node];
>
> +               if (!node_online(node))
> +                       continue;
>                 ret = io_wqe_cancel_work(wqe, &match);
>                 if (ret != IO_WQ_CANCEL_NOTFOUND)
>                         break;
> @@ -1084,6 +1100,8 @@ void io_wq_flush(struct io_wq *wq)
>         for_each_node(node) {
>                 struct io_wqe *wqe = wq->wqes[node];
>
> +               if (!node_online(node))
> +                       continue;
>                 init_completion(&data.done);
>                 INIT_IO_WORK(&data.work, io_wq_flush_func);
>                 data.work.flags |= IO_WQ_WORK_INTERNAL;
> @@ -1115,12 +1133,15 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
>
>         for_each_node(node) {
>                 struct io_wqe *wqe;
> +               int alloc_node = node;
>
> -               wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, node);
> +               if (!node_online(alloc_node))
> +                       alloc_node = NUMA_NO_NODE;
> +               wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
>                 if (!wqe)
>                         goto err;
>                 wq->wqes[node] = wqe;
> -               wqe->node = node;
> +               wqe->node = alloc_node;
>                 wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
>                 atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
>                 if (wq->user) {
> @@ -1128,7 +1149,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
>                                         task_rlimit(current, RLIMIT_NPROC);
>                 }
>                 atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
> -               wqe->node = node;
>                 wqe->wq = wq;
>                 spin_lock_init(&wqe->lock);
>                 INIT_WQ_LIST(&wqe->work_list);
> @@ -1184,8 +1204,11 @@ static void __io_wq_destroy(struct io_wq *wq)
>                 kthread_stop(wq->manager);
>
>         rcu_read_lock();
> -       for_each_node(node)
> +       for_each_node(node) {
> +               if (!node_online(node))
> +                       continue;
>                 io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
> +       }
>         rcu_read_unlock();
>
>         wait_for_completion(&wq->done);
>
> --
> Jens Axboe
>

  reply	other threads:[~2020-02-11 19:24 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-11  1:22 Kernel BUG when registering the ring Glauber Costa
2020-02-11  3:25 ` Jens Axboe
2020-02-11  3:45   ` Glauber Costa
2020-02-11  3:50     ` Jens Axboe
2020-02-11 13:01       ` Glauber Costa
2020-02-11 18:58         ` Jens Axboe
2020-02-11 19:23           ` Glauber Costa [this message]
2020-02-11 19:24             ` Jens Axboe
2020-02-12 22:31           ` Jann Horn
2020-02-13 15:20             ` Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CAD-J=zYfbtQaGy8KatprCPdzrKTg3sbHp6Vc2D8Y+mK2G08s4A@mail.gmail.com' \
    --to=glauber@scylladb.com \
    --cc=avi@scylladb.com \
    --cc=axboe@kernel.dk \
    --cc=io-uring@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).