All of lore.kernel.org
 help / color / mirror / Atom feed
* [Bug 1890395] [NEW] qmp/hmp: crash if client closes socket too early
@ 2020-08-05  7:38 Raphael Pour
  2021-04-29  9:55 ` [Bug 1890395] " Thomas Huth
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Raphael Pour @ 2020-08-05  7:38 UTC (permalink / raw)
  To: qemu-devel

Public bug reported:

Qemu crashes on qmp/hmp command if client closes connection before
reading the whole response from the socket.

Reproducer:

1. Start arbitrary vm via qemu
2. Send e.g. hmp command 'info mem'
3. Abort before whole response came back


Stack Trace:

Stack trace of thread 6493:
#0  0x0000559902fd2d30 object_get_class (qemu-system-x86_64)
#1  0x0000559903071020 qio_channel_create_watch (qemu-system-x86_64)
#2  0x000055990305f437 qemu_chr_fe_add_watch (qemu-system-x86_64)
#3  0x0000559902f7340d monitor_flush_locked (qemu-system-x86_64)
#4  0x0000559902f7360e monitor_flush_locked (qemu-system-x86_64)
#5  0x0000559902f74342 qmp_send_response (qemu-system-x86_64)
#6  0x0000559902f74409 monitor_qmp_respond (qemu-system-x86_64)
#7  0x0000559902f74bc0 monitor_qmp_bh_dispatcher (qemu-system-x86_64)
#8  0x00005599030c37be aio_bh_call (qemu-system-x86_64)
#9  0x00005599030c6dd0 aio_dispatch (qemu-system-x86_64)
#10 0x00005599030c369e aio_ctx_dispatch (qemu-system-x86_64)
#11 0x00007f5b6d37f417 g_main_context_dispatch (libglib-2.0.so.0)
#12 0x00005599030c5e0a glib_pollfds_poll (qemu-system-x86_64)
#13 0x0000559902dd75df main_loop (qemu-system-x86_64)
#14 0x0000559902c59f49 main (qemu-system-x86_64)
#15 0x00007f5b6bfeab97 __libc_start_main (libc.so.6)
#16 0x0000559902c5d38a _start (qemu-system-x86_64)

#0  0x0000559902fd2d30 in object_get_class (obj=obj@entry=0x0) at ./qom/object.c:909
#1  0x0000559903071020 in qio_channel_create_watch (ioc=0x0, condition=(G_IO_OUT | G_IO_HUP)) at ./io/channel.c:281
        klass = <optimized out>
        __func__ = "qio_channel_create_watch"
        ret = <optimized out>
#2  0x000055990305f437 in qemu_chr_fe_add_watch (be=be@entry=0x559905a7f460, cond=cond@entry=(G_IO_OUT | G_IO_HUP), func=func@entry=0x559902f734b0 <monitor_unblocked>, user_data=user_data@entry=0x559905a7f460) at ./chardev/char-fe.c:367
        s = 0x5599055782c0
        src = <optimized out>
        tag = <optimized out>
        __func__ = "qemu_chr_fe_add_watch"
#3  0x0000559902f7340d in monitor_flush_locked (mon=mon@entry=0x559905a7f460) at ./monitor/monitor.c:140
        rc = 219264
        len = 3865832
        buf = 0x7f5afc00e480 "{\"return\": \"ffff9eb480000000-ffff9eb480099000 ", '0' <repeats 11 times>, "99000 -rw\\r\\nffff9eb480099000-ffff9eb48009b000 ", '0' <repeats 12 times>, "2000 -r-\\r\\nffff9eb48009b000-ffff9eb486800000 0000000006765000 -rw\\r\\nffff9eb4868000"...
#4  0x0000559902f7360e in monitor_flush_locked (mon=0x559905a7f460) at ./monitor/monitor.c:160
        i = 3865830
        c = <optimized out>
#5  0x0000559902f7360e in monitor_puts (mon=mon@entry=0x559905a7f460, str=0x7f5aa1eda010 "{\"return\": \"ffff9eb480000000-ffff9eb480099000 ", '0' <repeats 11 times>, "99000 -rw\\r\\nffff9eb480099000-ffff9eb48009b000 ", '0' <repeats 12 times>, "2000 -r-\\r\\nffff9eb48009b000-ffff9eb486800000 0000000006765000 -rw\\r\\nffff9eb4868000"...) at ./monitor/monitor.c:167
        i = 3865830
        c = <optimized out>
#6  0x0000559902f74342 in qmp_send_response (mon=0x559905a7f460, rsp=<optimized out>) at ./monitor/qmp.c:119
        data = <optimized out>
        json = 0x559906c88380
#7  0x0000559902f74409 in monitor_qmp_respond (rsp=0x559905bbf740, mon=0x559905a7f460) at ./monitor/qmp.c:132
        old_mon = <optimized out>
        rsp = 0x559905bbf740
        error = <optimized out>
#8  0x0000559902f74409 in monitor_qmp_dispatch (mon=0x559905a7f460, req=<optimized out>) at ./monitor/qmp.c:161
        old_mon = <optimized out>
        rsp = 0x559905bbf740
        error = <optimized out>
#9  0x0000559902f74bc0 in monitor_qmp_bh_dispatcher (data=<optimized out>) at ./monitor/qmp.c:234
        id = <optimized out>
        rsp = <optimized out>
        need_resume = true
        mon = 0x559905a7f460
        __PRETTY_FUNCTION__ = "monitor_qmp_bh_dispatcher"
#10 0x00005599030c37be in aio_bh_call (bh=0x559905571b40) at ./util/async.c:89
        bh = 0x559905571b40
        bhp = <optimized out>
        next = 0x5599055718f0
        ret = 1
        deleted = false
#11 0x00005599030c37be in aio_bh_poll (ctx=ctx@entry=0x5599055706f0) at ./util/async.c:117
        bh = 0x559905571b40
        bhp = <optimized out>
        next = 0x5599055718f0
        ret = 1
        deleted = false
#12 0x00005599030c6dd0 in aio_dispatch (ctx=0x5599055706f0) at ./util/aio-posix.c:459
#13 0x00005599030c369e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at ./util/async.c:260
        ctx = <optimized out>
#14 0x00007f5b6d37f417 in g_main_context_dispatch () at /usr/lib/x86_64-linux-gnu/libglib-2.0.so.0
#15 0x00005599030c5e0a in glib_pollfds_poll () at ./util/main-loop.c:219
        context = 0x559905652420
        pfds = <optimized out>
        context = 0x559905652420
        ret = 1
        mlpoll = {state = 0, timeout = 4294967295, pollfds = 0x559905651800}
#16 0x00005599030c5e0a in os_host_main_loop_wait (timeout=14451267) at ./util/main-loop.c:242
        context = 0x559905652420
        ret = 1
        mlpoll = {state = 0, timeout = 4294967295, pollfds = 0x559905651800}
#17 0x00005599030c5e0a in main_loop_wait (nonblocking=<optimized out>) at ./util/main-loop.c:518
        mlpoll = {state = 0, timeout = 4294967295, pollfds = 0x559905651800}
#18 0x0000559902dd75df in main_loop () at ./vl.c:1810
#19 0x0000559902c59f49 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at ./vl.c:4466
        i = <optimized out>
        snapshot = 0
        linux_boot = <optimized out>
        initrd_filename = 0x0
        kernel_filename = <optimized out>
        kernel_cmdline = <optimized out>
        boot_order = 0x55990318bc94 "cad"
        boot_once = <optimized out>
        ds = <optimized out>
        opts = <optimized out>
        icount_opts = <optimized out>
        accel_opts = 0x0
        olist = <optimized out>
        optind = 100
        optarg = 0x7ffe0ca05e74 "timestamp=on"
        loadvm = 0x0
        cpu_option = 0x7ffe0ca05d42 "SandyBridge-IBRS,-kvm_steal_time,+pcid,+ssbd,+spec-ctrl,+md-clear"
        vga_model = 0x0
        qtest_chrdev = 0x0
        qtest_log = 0x0
        incoming = 0x0
        userconfig = <optimized out>
        nographic = false
        display_remote = <optimized out>
        log_mask = <optimized out>
        log_file = 0x0
        trace_file = <optimized out>
        maxram_size = <optimized out>
        ram_slots = 0
        vmstate_dump_file = 0x0
        main_loop_err = 0x0
        err = 0x0
        list_data_dirs = false
        dirs = <optimized out>
        bdo_queue = {sqh_first = 0x0, sqh_last = 0x7ffe0ca03540}
        plugin_list = {tqh_first = 0x0, tqh_circ = {tql_next = 0x0, tql_prev = 0x7ffe0ca03550}}
        __func__ = "main"

** Affects: qemu
     Importance: Undecided
         Status: New


** Tags: hmp qemu qmp

-- 
You received this bug notification because you are a member of qemu-
devel-ml, which is subscribed to QEMU.
https://bugs.launchpad.net/bugs/1890395

Title:
  qmp/hmp: crash if client closes socket too early

Status in QEMU:
  New

Bug description:
  Qemu crashes on qmp/hmp command if client closes connection before
  reading the whole response from the socket.

  Reproducer:

  1. Start arbitrary vm via qemu
  2. Send e.g. hmp command 'info mem'
  3. Abort before whole response came back

  
  Stack Trace:

  Stack trace of thread 6493:
  #0  0x0000559902fd2d30 object_get_class (qemu-system-x86_64)
  #1  0x0000559903071020 qio_channel_create_watch (qemu-system-x86_64)
  #2  0x000055990305f437 qemu_chr_fe_add_watch (qemu-system-x86_64)
  #3  0x0000559902f7340d monitor_flush_locked (qemu-system-x86_64)
  #4  0x0000559902f7360e monitor_flush_locked (qemu-system-x86_64)
  #5  0x0000559902f74342 qmp_send_response (qemu-system-x86_64)
  #6  0x0000559902f74409 monitor_qmp_respond (qemu-system-x86_64)
  #7  0x0000559902f74bc0 monitor_qmp_bh_dispatcher (qemu-system-x86_64)
  #8  0x00005599030c37be aio_bh_call (qemu-system-x86_64)
  #9  0x00005599030c6dd0 aio_dispatch (qemu-system-x86_64)
  #10 0x00005599030c369e aio_ctx_dispatch (qemu-system-x86_64)
  #11 0x00007f5b6d37f417 g_main_context_dispatch (libglib-2.0.so.0)
  #12 0x00005599030c5e0a glib_pollfds_poll (qemu-system-x86_64)
  #13 0x0000559902dd75df main_loop (qemu-system-x86_64)
  #14 0x0000559902c59f49 main (qemu-system-x86_64)
  #15 0x00007f5b6bfeab97 __libc_start_main (libc.so.6)
  #16 0x0000559902c5d38a _start (qemu-system-x86_64)

  #0  0x0000559902fd2d30 in object_get_class (obj=obj@entry=0x0) at ./qom/object.c:909
  #1  0x0000559903071020 in qio_channel_create_watch (ioc=0x0, condition=(G_IO_OUT | G_IO_HUP)) at ./io/channel.c:281
          klass = <optimized out>
          __func__ = "qio_channel_create_watch"
          ret = <optimized out>
  #2  0x000055990305f437 in qemu_chr_fe_add_watch (be=be@entry=0x559905a7f460, cond=cond@entry=(G_IO_OUT | G_IO_HUP), func=func@entry=0x559902f734b0 <monitor_unblocked>, user_data=user_data@entry=0x559905a7f460) at ./chardev/char-fe.c:367
          s = 0x5599055782c0
          src = <optimized out>
          tag = <optimized out>
          __func__ = "qemu_chr_fe_add_watch"
  #3  0x0000559902f7340d in monitor_flush_locked (mon=mon@entry=0x559905a7f460) at ./monitor/monitor.c:140
          rc = 219264
          len = 3865832
          buf = 0x7f5afc00e480 "{\"return\": \"ffff9eb480000000-ffff9eb480099000 ", '0' <repeats 11 times>, "99000 -rw\\r\\nffff9eb480099000-ffff9eb48009b000 ", '0' <repeats 12 times>, "2000 -r-\\r\\nffff9eb48009b000-ffff9eb486800000 0000000006765000 -rw\\r\\nffff9eb4868000"...
  #4  0x0000559902f7360e in monitor_flush_locked (mon=0x559905a7f460) at ./monitor/monitor.c:160
          i = 3865830
          c = <optimized out>
  #5  0x0000559902f7360e in monitor_puts (mon=mon@entry=0x559905a7f460, str=0x7f5aa1eda010 "{\"return\": \"ffff9eb480000000-ffff9eb480099000 ", '0' <repeats 11 times>, "99000 -rw\\r\\nffff9eb480099000-ffff9eb48009b000 ", '0' <repeats 12 times>, "2000 -r-\\r\\nffff9eb48009b000-ffff9eb486800000 0000000006765000 -rw\\r\\nffff9eb4868000"...) at ./monitor/monitor.c:167
          i = 3865830
          c = <optimized out>
  #6  0x0000559902f74342 in qmp_send_response (mon=0x559905a7f460, rsp=<optimized out>) at ./monitor/qmp.c:119
          data = <optimized out>
          json = 0x559906c88380
  #7  0x0000559902f74409 in monitor_qmp_respond (rsp=0x559905bbf740, mon=0x559905a7f460) at ./monitor/qmp.c:132
          old_mon = <optimized out>
          rsp = 0x559905bbf740
          error = <optimized out>
  #8  0x0000559902f74409 in monitor_qmp_dispatch (mon=0x559905a7f460, req=<optimized out>) at ./monitor/qmp.c:161
          old_mon = <optimized out>
          rsp = 0x559905bbf740
          error = <optimized out>
  #9  0x0000559902f74bc0 in monitor_qmp_bh_dispatcher (data=<optimized out>) at ./monitor/qmp.c:234
          id = <optimized out>
          rsp = <optimized out>
          need_resume = true
          mon = 0x559905a7f460
          __PRETTY_FUNCTION__ = "monitor_qmp_bh_dispatcher"
  #10 0x00005599030c37be in aio_bh_call (bh=0x559905571b40) at ./util/async.c:89
          bh = 0x559905571b40
          bhp = <optimized out>
          next = 0x5599055718f0
          ret = 1
          deleted = false
  #11 0x00005599030c37be in aio_bh_poll (ctx=ctx@entry=0x5599055706f0) at ./util/async.c:117
          bh = 0x559905571b40
          bhp = <optimized out>
          next = 0x5599055718f0
          ret = 1
          deleted = false
  #12 0x00005599030c6dd0 in aio_dispatch (ctx=0x5599055706f0) at ./util/aio-posix.c:459
  #13 0x00005599030c369e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at ./util/async.c:260
          ctx = <optimized out>
  #14 0x00007f5b6d37f417 in g_main_context_dispatch () at /usr/lib/x86_64-linux-gnu/libglib-2.0.so.0
  #15 0x00005599030c5e0a in glib_pollfds_poll () at ./util/main-loop.c:219
          context = 0x559905652420
          pfds = <optimized out>
          context = 0x559905652420
          ret = 1
          mlpoll = {state = 0, timeout = 4294967295, pollfds = 0x559905651800}
  #16 0x00005599030c5e0a in os_host_main_loop_wait (timeout=14451267) at ./util/main-loop.c:242
          context = 0x559905652420
          ret = 1
          mlpoll = {state = 0, timeout = 4294967295, pollfds = 0x559905651800}
  #17 0x00005599030c5e0a in main_loop_wait (nonblocking=<optimized out>) at ./util/main-loop.c:518
          mlpoll = {state = 0, timeout = 4294967295, pollfds = 0x559905651800}
  #18 0x0000559902dd75df in main_loop () at ./vl.c:1810
  #19 0x0000559902c59f49 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at ./vl.c:4466
          i = <optimized out>
          snapshot = 0
          linux_boot = <optimized out>
          initrd_filename = 0x0
          kernel_filename = <optimized out>
          kernel_cmdline = <optimized out>
          boot_order = 0x55990318bc94 "cad"
          boot_once = <optimized out>
          ds = <optimized out>
          opts = <optimized out>
          icount_opts = <optimized out>
          accel_opts = 0x0
          olist = <optimized out>
          optind = 100
          optarg = 0x7ffe0ca05e74 "timestamp=on"
          loadvm = 0x0
          cpu_option = 0x7ffe0ca05d42 "SandyBridge-IBRS,-kvm_steal_time,+pcid,+ssbd,+spec-ctrl,+md-clear"
          vga_model = 0x0
          qtest_chrdev = 0x0
          qtest_log = 0x0
          incoming = 0x0
          userconfig = <optimized out>
          nographic = false
          display_remote = <optimized out>
          log_mask = <optimized out>
          log_file = 0x0
          trace_file = <optimized out>
          maxram_size = <optimized out>
          ram_slots = 0
          vmstate_dump_file = 0x0
          main_loop_err = 0x0
          err = 0x0
          list_data_dirs = false
          dirs = <optimized out>
          bdo_queue = {sqh_first = 0x0, sqh_last = 0x7ffe0ca03540}
          plugin_list = {tqh_first = 0x0, tqh_circ = {tql_next = 0x0, tql_prev = 0x7ffe0ca03550}}
          __func__ = "main"

To manage notifications about this bug go to:
https://bugs.launchpad.net/qemu/+bug/1890395/+subscriptions


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [Bug 1890395] Re: qmp/hmp: crash if client closes socket too early
  2020-08-05  7:38 [Bug 1890395] [NEW] qmp/hmp: crash if client closes socket too early Raphael Pour
@ 2021-04-29  9:55 ` Thomas Huth
  2021-05-08  5:51 ` Thomas Huth
  2021-07-08  4:17 ` Launchpad Bug Tracker
  2 siblings, 0 replies; 4+ messages in thread
From: Thomas Huth @ 2021-04-29  9:55 UTC (permalink / raw)
  To: qemu-devel

** Tags removed: qemu

-- 
You received this bug notification because you are a member of qemu-
devel-ml, which is subscribed to QEMU.
https://bugs.launchpad.net/bugs/1890395

Title:
  qmp/hmp: crash if client closes socket too early

Status in QEMU:
  New

Bug description:
  Qemu crashes on qmp/hmp command if client closes connection before
  reading the whole response from the socket.

  Reproducer:

  1. Start arbitrary vm via qemu
  2. Send e.g. hmp command 'info mem'
  3. Abort before whole response came back

  
  Stack Trace:

  Stack trace of thread 6493:
  #0  0x0000559902fd2d30 object_get_class (qemu-system-x86_64)
  #1  0x0000559903071020 qio_channel_create_watch (qemu-system-x86_64)
  #2  0x000055990305f437 qemu_chr_fe_add_watch (qemu-system-x86_64)
  #3  0x0000559902f7340d monitor_flush_locked (qemu-system-x86_64)
  #4  0x0000559902f7360e monitor_flush_locked (qemu-system-x86_64)
  #5  0x0000559902f74342 qmp_send_response (qemu-system-x86_64)
  #6  0x0000559902f74409 monitor_qmp_respond (qemu-system-x86_64)
  #7  0x0000559902f74bc0 monitor_qmp_bh_dispatcher (qemu-system-x86_64)
  #8  0x00005599030c37be aio_bh_call (qemu-system-x86_64)
  #9  0x00005599030c6dd0 aio_dispatch (qemu-system-x86_64)
  #10 0x00005599030c369e aio_ctx_dispatch (qemu-system-x86_64)
  #11 0x00007f5b6d37f417 g_main_context_dispatch (libglib-2.0.so.0)
  #12 0x00005599030c5e0a glib_pollfds_poll (qemu-system-x86_64)
  #13 0x0000559902dd75df main_loop (qemu-system-x86_64)
  #14 0x0000559902c59f49 main (qemu-system-x86_64)
  #15 0x00007f5b6bfeab97 __libc_start_main (libc.so.6)
  #16 0x0000559902c5d38a _start (qemu-system-x86_64)

  #0  0x0000559902fd2d30 in object_get_class (obj=obj@entry=0x0) at ./qom/object.c:909
  #1  0x0000559903071020 in qio_channel_create_watch (ioc=0x0, condition=(G_IO_OUT | G_IO_HUP)) at ./io/channel.c:281
          klass = <optimized out>
          __func__ = "qio_channel_create_watch"
          ret = <optimized out>
  #2  0x000055990305f437 in qemu_chr_fe_add_watch (be=be@entry=0x559905a7f460, cond=cond@entry=(G_IO_OUT | G_IO_HUP), func=func@entry=0x559902f734b0 <monitor_unblocked>, user_data=user_data@entry=0x559905a7f460) at ./chardev/char-fe.c:367
          s = 0x5599055782c0
          src = <optimized out>
          tag = <optimized out>
          __func__ = "qemu_chr_fe_add_watch"
  #3  0x0000559902f7340d in monitor_flush_locked (mon=mon@entry=0x559905a7f460) at ./monitor/monitor.c:140
          rc = 219264
          len = 3865832
          buf = 0x7f5afc00e480 "{\"return\": \"ffff9eb480000000-ffff9eb480099000 ", '0' <repeats 11 times>, "99000 -rw\\r\\nffff9eb480099000-ffff9eb48009b000 ", '0' <repeats 12 times>, "2000 -r-\\r\\nffff9eb48009b000-ffff9eb486800000 0000000006765000 -rw\\r\\nffff9eb4868000"...
  #4  0x0000559902f7360e in monitor_flush_locked (mon=0x559905a7f460) at ./monitor/monitor.c:160
          i = 3865830
          c = <optimized out>
  #5  0x0000559902f7360e in monitor_puts (mon=mon@entry=0x559905a7f460, str=0x7f5aa1eda010 "{\"return\": \"ffff9eb480000000-ffff9eb480099000 ", '0' <repeats 11 times>, "99000 -rw\\r\\nffff9eb480099000-ffff9eb48009b000 ", '0' <repeats 12 times>, "2000 -r-\\r\\nffff9eb48009b000-ffff9eb486800000 0000000006765000 -rw\\r\\nffff9eb4868000"...) at ./monitor/monitor.c:167
          i = 3865830
          c = <optimized out>
  #6  0x0000559902f74342 in qmp_send_response (mon=0x559905a7f460, rsp=<optimized out>) at ./monitor/qmp.c:119
          data = <optimized out>
          json = 0x559906c88380
  #7  0x0000559902f74409 in monitor_qmp_respond (rsp=0x559905bbf740, mon=0x559905a7f460) at ./monitor/qmp.c:132
          old_mon = <optimized out>
          rsp = 0x559905bbf740
          error = <optimized out>
  #8  0x0000559902f74409 in monitor_qmp_dispatch (mon=0x559905a7f460, req=<optimized out>) at ./monitor/qmp.c:161
          old_mon = <optimized out>
          rsp = 0x559905bbf740
          error = <optimized out>
  #9  0x0000559902f74bc0 in monitor_qmp_bh_dispatcher (data=<optimized out>) at ./monitor/qmp.c:234
          id = <optimized out>
          rsp = <optimized out>
          need_resume = true
          mon = 0x559905a7f460
          __PRETTY_FUNCTION__ = "monitor_qmp_bh_dispatcher"
  #10 0x00005599030c37be in aio_bh_call (bh=0x559905571b40) at ./util/async.c:89
          bh = 0x559905571b40
          bhp = <optimized out>
          next = 0x5599055718f0
          ret = 1
          deleted = false
  #11 0x00005599030c37be in aio_bh_poll (ctx=ctx@entry=0x5599055706f0) at ./util/async.c:117
          bh = 0x559905571b40
          bhp = <optimized out>
          next = 0x5599055718f0
          ret = 1
          deleted = false
  #12 0x00005599030c6dd0 in aio_dispatch (ctx=0x5599055706f0) at ./util/aio-posix.c:459
  #13 0x00005599030c369e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at ./util/async.c:260
          ctx = <optimized out>
  #14 0x00007f5b6d37f417 in g_main_context_dispatch () at /usr/lib/x86_64-linux-gnu/libglib-2.0.so.0
  #15 0x00005599030c5e0a in glib_pollfds_poll () at ./util/main-loop.c:219
          context = 0x559905652420
          pfds = <optimized out>
          context = 0x559905652420
          ret = 1
          mlpoll = {state = 0, timeout = 4294967295, pollfds = 0x559905651800}
  #16 0x00005599030c5e0a in os_host_main_loop_wait (timeout=14451267) at ./util/main-loop.c:242
          context = 0x559905652420
          ret = 1
          mlpoll = {state = 0, timeout = 4294967295, pollfds = 0x559905651800}
  #17 0x00005599030c5e0a in main_loop_wait (nonblocking=<optimized out>) at ./util/main-loop.c:518
          mlpoll = {state = 0, timeout = 4294967295, pollfds = 0x559905651800}
  #18 0x0000559902dd75df in main_loop () at ./vl.c:1810
  #19 0x0000559902c59f49 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at ./vl.c:4466
          i = <optimized out>
          snapshot = 0
          linux_boot = <optimized out>
          initrd_filename = 0x0
          kernel_filename = <optimized out>
          kernel_cmdline = <optimized out>
          boot_order = 0x55990318bc94 "cad"
          boot_once = <optimized out>
          ds = <optimized out>
          opts = <optimized out>
          icount_opts = <optimized out>
          accel_opts = 0x0
          olist = <optimized out>
          optind = 100
          optarg = 0x7ffe0ca05e74 "timestamp=on"
          loadvm = 0x0
          cpu_option = 0x7ffe0ca05d42 "SandyBridge-IBRS,-kvm_steal_time,+pcid,+ssbd,+spec-ctrl,+md-clear"
          vga_model = 0x0
          qtest_chrdev = 0x0
          qtest_log = 0x0
          incoming = 0x0
          userconfig = <optimized out>
          nographic = false
          display_remote = <optimized out>
          log_mask = <optimized out>
          log_file = 0x0
          trace_file = <optimized out>
          maxram_size = <optimized out>
          ram_slots = 0
          vmstate_dump_file = 0x0
          main_loop_err = 0x0
          err = 0x0
          list_data_dirs = false
          dirs = <optimized out>
          bdo_queue = {sqh_first = 0x0, sqh_last = 0x7ffe0ca03540}
          plugin_list = {tqh_first = 0x0, tqh_circ = {tql_next = 0x0, tql_prev = 0x7ffe0ca03550}}
          __func__ = "main"

To manage notifications about this bug go to:
https://bugs.launchpad.net/qemu/+bug/1890395/+subscriptions


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [Bug 1890395] Re: qmp/hmp: crash if client closes socket too early
  2020-08-05  7:38 [Bug 1890395] [NEW] qmp/hmp: crash if client closes socket too early Raphael Pour
  2021-04-29  9:55 ` [Bug 1890395] " Thomas Huth
@ 2021-05-08  5:51 ` Thomas Huth
  2021-07-08  4:17 ` Launchpad Bug Tracker
  2 siblings, 0 replies; 4+ messages in thread
From: Thomas Huth @ 2021-05-08  5:51 UTC (permalink / raw)
  To: qemu-devel

The QEMU project is currently moving its bug tracking to another system.
For this we need to know which bugs are still valid and which could be
closed already. Thus we are setting the bug state to "Incomplete" now.

If the bug has already been fixed in the latest upstream version of QEMU,
then please close this ticket as "Fix released".

If it is not fixed yet and you think that this bug report here is still
valid, then you have two options:

1) If you already have an account on gitlab.com, please open a new ticket
for this problem in our new tracker here:

    https://gitlab.com/qemu-project/qemu/-/issues

and then close this ticket here on Launchpad (or let it expire auto-
matically after 60 days). Please mention the URL of this bug ticket on
Launchpad in the new ticket on GitLab.

2) If you don't have an account on gitlab.com and don't intend to get
one, but still would like to keep this ticket opened, then please switch
the state back to "New" or "Confirmed" within the next 60 days (other-
wise it will get closed as "Expired"). We will then eventually migrate
the ticket automatically to the new system (but you won't be the reporter
of the bug in the new system and thus you won't get notified on changes
anymore).

Thank you and sorry for the inconvenience.


** Changed in: qemu
       Status: New => Incomplete

-- 
You received this bug notification because you are a member of qemu-
devel-ml, which is subscribed to QEMU.
https://bugs.launchpad.net/bugs/1890395

Title:
  qmp/hmp: crash if client closes socket too early

Status in QEMU:
  Incomplete

Bug description:
  Qemu crashes on qmp/hmp command if client closes connection before
  reading the whole response from the socket.

  Reproducer:

  1. Start arbitrary vm via qemu
  2. Send e.g. hmp command 'info mem'
  3. Abort before whole response came back

  
  Stack Trace:

  Stack trace of thread 6493:
  #0  0x0000559902fd2d30 object_get_class (qemu-system-x86_64)
  #1  0x0000559903071020 qio_channel_create_watch (qemu-system-x86_64)
  #2  0x000055990305f437 qemu_chr_fe_add_watch (qemu-system-x86_64)
  #3  0x0000559902f7340d monitor_flush_locked (qemu-system-x86_64)
  #4  0x0000559902f7360e monitor_flush_locked (qemu-system-x86_64)
  #5  0x0000559902f74342 qmp_send_response (qemu-system-x86_64)
  #6  0x0000559902f74409 monitor_qmp_respond (qemu-system-x86_64)
  #7  0x0000559902f74bc0 monitor_qmp_bh_dispatcher (qemu-system-x86_64)
  #8  0x00005599030c37be aio_bh_call (qemu-system-x86_64)
  #9  0x00005599030c6dd0 aio_dispatch (qemu-system-x86_64)
  #10 0x00005599030c369e aio_ctx_dispatch (qemu-system-x86_64)
  #11 0x00007f5b6d37f417 g_main_context_dispatch (libglib-2.0.so.0)
  #12 0x00005599030c5e0a glib_pollfds_poll (qemu-system-x86_64)
  #13 0x0000559902dd75df main_loop (qemu-system-x86_64)
  #14 0x0000559902c59f49 main (qemu-system-x86_64)
  #15 0x00007f5b6bfeab97 __libc_start_main (libc.so.6)
  #16 0x0000559902c5d38a _start (qemu-system-x86_64)

  #0  0x0000559902fd2d30 in object_get_class (obj=obj@entry=0x0) at ./qom/object.c:909
  #1  0x0000559903071020 in qio_channel_create_watch (ioc=0x0, condition=(G_IO_OUT | G_IO_HUP)) at ./io/channel.c:281
          klass = <optimized out>
          __func__ = "qio_channel_create_watch"
          ret = <optimized out>
  #2  0x000055990305f437 in qemu_chr_fe_add_watch (be=be@entry=0x559905a7f460, cond=cond@entry=(G_IO_OUT | G_IO_HUP), func=func@entry=0x559902f734b0 <monitor_unblocked>, user_data=user_data@entry=0x559905a7f460) at ./chardev/char-fe.c:367
          s = 0x5599055782c0
          src = <optimized out>
          tag = <optimized out>
          __func__ = "qemu_chr_fe_add_watch"
  #3  0x0000559902f7340d in monitor_flush_locked (mon=mon@entry=0x559905a7f460) at ./monitor/monitor.c:140
          rc = 219264
          len = 3865832
          buf = 0x7f5afc00e480 "{\"return\": \"ffff9eb480000000-ffff9eb480099000 ", '0' <repeats 11 times>, "99000 -rw\\r\\nffff9eb480099000-ffff9eb48009b000 ", '0' <repeats 12 times>, "2000 -r-\\r\\nffff9eb48009b000-ffff9eb486800000 0000000006765000 -rw\\r\\nffff9eb4868000"...
  #4  0x0000559902f7360e in monitor_flush_locked (mon=0x559905a7f460) at ./monitor/monitor.c:160
          i = 3865830
          c = <optimized out>
  #5  0x0000559902f7360e in monitor_puts (mon=mon@entry=0x559905a7f460, str=0x7f5aa1eda010 "{\"return\": \"ffff9eb480000000-ffff9eb480099000 ", '0' <repeats 11 times>, "99000 -rw\\r\\nffff9eb480099000-ffff9eb48009b000 ", '0' <repeats 12 times>, "2000 -r-\\r\\nffff9eb48009b000-ffff9eb486800000 0000000006765000 -rw\\r\\nffff9eb4868000"...) at ./monitor/monitor.c:167
          i = 3865830
          c = <optimized out>
  #6  0x0000559902f74342 in qmp_send_response (mon=0x559905a7f460, rsp=<optimized out>) at ./monitor/qmp.c:119
          data = <optimized out>
          json = 0x559906c88380
  #7  0x0000559902f74409 in monitor_qmp_respond (rsp=0x559905bbf740, mon=0x559905a7f460) at ./monitor/qmp.c:132
          old_mon = <optimized out>
          rsp = 0x559905bbf740
          error = <optimized out>
  #8  0x0000559902f74409 in monitor_qmp_dispatch (mon=0x559905a7f460, req=<optimized out>) at ./monitor/qmp.c:161
          old_mon = <optimized out>
          rsp = 0x559905bbf740
          error = <optimized out>
  #9  0x0000559902f74bc0 in monitor_qmp_bh_dispatcher (data=<optimized out>) at ./monitor/qmp.c:234
          id = <optimized out>
          rsp = <optimized out>
          need_resume = true
          mon = 0x559905a7f460
          __PRETTY_FUNCTION__ = "monitor_qmp_bh_dispatcher"
  #10 0x00005599030c37be in aio_bh_call (bh=0x559905571b40) at ./util/async.c:89
          bh = 0x559905571b40
          bhp = <optimized out>
          next = 0x5599055718f0
          ret = 1
          deleted = false
  #11 0x00005599030c37be in aio_bh_poll (ctx=ctx@entry=0x5599055706f0) at ./util/async.c:117
          bh = 0x559905571b40
          bhp = <optimized out>
          next = 0x5599055718f0
          ret = 1
          deleted = false
  #12 0x00005599030c6dd0 in aio_dispatch (ctx=0x5599055706f0) at ./util/aio-posix.c:459
  #13 0x00005599030c369e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at ./util/async.c:260
          ctx = <optimized out>
  #14 0x00007f5b6d37f417 in g_main_context_dispatch () at /usr/lib/x86_64-linux-gnu/libglib-2.0.so.0
  #15 0x00005599030c5e0a in glib_pollfds_poll () at ./util/main-loop.c:219
          context = 0x559905652420
          pfds = <optimized out>
          context = 0x559905652420
          ret = 1
          mlpoll = {state = 0, timeout = 4294967295, pollfds = 0x559905651800}
  #16 0x00005599030c5e0a in os_host_main_loop_wait (timeout=14451267) at ./util/main-loop.c:242
          context = 0x559905652420
          ret = 1
          mlpoll = {state = 0, timeout = 4294967295, pollfds = 0x559905651800}
  #17 0x00005599030c5e0a in main_loop_wait (nonblocking=<optimized out>) at ./util/main-loop.c:518
          mlpoll = {state = 0, timeout = 4294967295, pollfds = 0x559905651800}
  #18 0x0000559902dd75df in main_loop () at ./vl.c:1810
  #19 0x0000559902c59f49 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at ./vl.c:4466
          i = <optimized out>
          snapshot = 0
          linux_boot = <optimized out>
          initrd_filename = 0x0
          kernel_filename = <optimized out>
          kernel_cmdline = <optimized out>
          boot_order = 0x55990318bc94 "cad"
          boot_once = <optimized out>
          ds = <optimized out>
          opts = <optimized out>
          icount_opts = <optimized out>
          accel_opts = 0x0
          olist = <optimized out>
          optind = 100
          optarg = 0x7ffe0ca05e74 "timestamp=on"
          loadvm = 0x0
          cpu_option = 0x7ffe0ca05d42 "SandyBridge-IBRS,-kvm_steal_time,+pcid,+ssbd,+spec-ctrl,+md-clear"
          vga_model = 0x0
          qtest_chrdev = 0x0
          qtest_log = 0x0
          incoming = 0x0
          userconfig = <optimized out>
          nographic = false
          display_remote = <optimized out>
          log_mask = <optimized out>
          log_file = 0x0
          trace_file = <optimized out>
          maxram_size = <optimized out>
          ram_slots = 0
          vmstate_dump_file = 0x0
          main_loop_err = 0x0
          err = 0x0
          list_data_dirs = false
          dirs = <optimized out>
          bdo_queue = {sqh_first = 0x0, sqh_last = 0x7ffe0ca03540}
          plugin_list = {tqh_first = 0x0, tqh_circ = {tql_next = 0x0, tql_prev = 0x7ffe0ca03550}}
          __func__ = "main"

To manage notifications about this bug go to:
https://bugs.launchpad.net/qemu/+bug/1890395/+subscriptions


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [Bug 1890395] Re: qmp/hmp: crash if client closes socket too early
  2020-08-05  7:38 [Bug 1890395] [NEW] qmp/hmp: crash if client closes socket too early Raphael Pour
  2021-04-29  9:55 ` [Bug 1890395] " Thomas Huth
  2021-05-08  5:51 ` Thomas Huth
@ 2021-07-08  4:17 ` Launchpad Bug Tracker
  2 siblings, 0 replies; 4+ messages in thread
From: Launchpad Bug Tracker @ 2021-07-08  4:17 UTC (permalink / raw)
  To: qemu-devel

[Expired for QEMU because there has been no activity for 60 days.]

** Changed in: qemu
       Status: Incomplete => Expired

-- 
You received this bug notification because you are a member of qemu-
devel-ml, which is subscribed to QEMU.
https://bugs.launchpad.net/bugs/1890395

Title:
  qmp/hmp: crash if client closes socket too early

Status in QEMU:
  Expired

Bug description:
  Qemu crashes on qmp/hmp command if client closes connection before
  reading the whole response from the socket.

  Reproducer:

  1. Start arbitrary vm via qemu
  2. Send e.g. hmp command 'info mem'
  3. Abort before whole response came back

  
  Stack Trace:

  Stack trace of thread 6493:
  #0  0x0000559902fd2d30 object_get_class (qemu-system-x86_64)
  #1  0x0000559903071020 qio_channel_create_watch (qemu-system-x86_64)
  #2  0x000055990305f437 qemu_chr_fe_add_watch (qemu-system-x86_64)
  #3  0x0000559902f7340d monitor_flush_locked (qemu-system-x86_64)
  #4  0x0000559902f7360e monitor_flush_locked (qemu-system-x86_64)
  #5  0x0000559902f74342 qmp_send_response (qemu-system-x86_64)
  #6  0x0000559902f74409 monitor_qmp_respond (qemu-system-x86_64)
  #7  0x0000559902f74bc0 monitor_qmp_bh_dispatcher (qemu-system-x86_64)
  #8  0x00005599030c37be aio_bh_call (qemu-system-x86_64)
  #9  0x00005599030c6dd0 aio_dispatch (qemu-system-x86_64)
  #10 0x00005599030c369e aio_ctx_dispatch (qemu-system-x86_64)
  #11 0x00007f5b6d37f417 g_main_context_dispatch (libglib-2.0.so.0)
  #12 0x00005599030c5e0a glib_pollfds_poll (qemu-system-x86_64)
  #13 0x0000559902dd75df main_loop (qemu-system-x86_64)
  #14 0x0000559902c59f49 main (qemu-system-x86_64)
  #15 0x00007f5b6bfeab97 __libc_start_main (libc.so.6)
  #16 0x0000559902c5d38a _start (qemu-system-x86_64)

  #0  0x0000559902fd2d30 in object_get_class (obj=obj@entry=0x0) at ./qom/object.c:909
  #1  0x0000559903071020 in qio_channel_create_watch (ioc=0x0, condition=(G_IO_OUT | G_IO_HUP)) at ./io/channel.c:281
          klass = <optimized out>
          __func__ = "qio_channel_create_watch"
          ret = <optimized out>
  #2  0x000055990305f437 in qemu_chr_fe_add_watch (be=be@entry=0x559905a7f460, cond=cond@entry=(G_IO_OUT | G_IO_HUP), func=func@entry=0x559902f734b0 <monitor_unblocked>, user_data=user_data@entry=0x559905a7f460) at ./chardev/char-fe.c:367
          s = 0x5599055782c0
          src = <optimized out>
          tag = <optimized out>
          __func__ = "qemu_chr_fe_add_watch"
  #3  0x0000559902f7340d in monitor_flush_locked (mon=mon@entry=0x559905a7f460) at ./monitor/monitor.c:140
          rc = 219264
          len = 3865832
          buf = 0x7f5afc00e480 "{\"return\": \"ffff9eb480000000-ffff9eb480099000 ", '0' <repeats 11 times>, "99000 -rw\\r\\nffff9eb480099000-ffff9eb48009b000 ", '0' <repeats 12 times>, "2000 -r-\\r\\nffff9eb48009b000-ffff9eb486800000 0000000006765000 -rw\\r\\nffff9eb4868000"...
  #4  0x0000559902f7360e in monitor_flush_locked (mon=0x559905a7f460) at ./monitor/monitor.c:160
          i = 3865830
          c = <optimized out>
  #5  0x0000559902f7360e in monitor_puts (mon=mon@entry=0x559905a7f460, str=0x7f5aa1eda010 "{\"return\": \"ffff9eb480000000-ffff9eb480099000 ", '0' <repeats 11 times>, "99000 -rw\\r\\nffff9eb480099000-ffff9eb48009b000 ", '0' <repeats 12 times>, "2000 -r-\\r\\nffff9eb48009b000-ffff9eb486800000 0000000006765000 -rw\\r\\nffff9eb4868000"...) at ./monitor/monitor.c:167
          i = 3865830
          c = <optimized out>
  #6  0x0000559902f74342 in qmp_send_response (mon=0x559905a7f460, rsp=<optimized out>) at ./monitor/qmp.c:119
          data = <optimized out>
          json = 0x559906c88380
  #7  0x0000559902f74409 in monitor_qmp_respond (rsp=0x559905bbf740, mon=0x559905a7f460) at ./monitor/qmp.c:132
          old_mon = <optimized out>
          rsp = 0x559905bbf740
          error = <optimized out>
  #8  0x0000559902f74409 in monitor_qmp_dispatch (mon=0x559905a7f460, req=<optimized out>) at ./monitor/qmp.c:161
          old_mon = <optimized out>
          rsp = 0x559905bbf740
          error = <optimized out>
  #9  0x0000559902f74bc0 in monitor_qmp_bh_dispatcher (data=<optimized out>) at ./monitor/qmp.c:234
          id = <optimized out>
          rsp = <optimized out>
          need_resume = true
          mon = 0x559905a7f460
          __PRETTY_FUNCTION__ = "monitor_qmp_bh_dispatcher"
  #10 0x00005599030c37be in aio_bh_call (bh=0x559905571b40) at ./util/async.c:89
          bh = 0x559905571b40
          bhp = <optimized out>
          next = 0x5599055718f0
          ret = 1
          deleted = false
  #11 0x00005599030c37be in aio_bh_poll (ctx=ctx@entry=0x5599055706f0) at ./util/async.c:117
          bh = 0x559905571b40
          bhp = <optimized out>
          next = 0x5599055718f0
          ret = 1
          deleted = false
  #12 0x00005599030c6dd0 in aio_dispatch (ctx=0x5599055706f0) at ./util/aio-posix.c:459
  #13 0x00005599030c369e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at ./util/async.c:260
          ctx = <optimized out>
  #14 0x00007f5b6d37f417 in g_main_context_dispatch () at /usr/lib/x86_64-linux-gnu/libglib-2.0.so.0
  #15 0x00005599030c5e0a in glib_pollfds_poll () at ./util/main-loop.c:219
          context = 0x559905652420
          pfds = <optimized out>
          context = 0x559905652420
          ret = 1
          mlpoll = {state = 0, timeout = 4294967295, pollfds = 0x559905651800}
  #16 0x00005599030c5e0a in os_host_main_loop_wait (timeout=14451267) at ./util/main-loop.c:242
          context = 0x559905652420
          ret = 1
          mlpoll = {state = 0, timeout = 4294967295, pollfds = 0x559905651800}
  #17 0x00005599030c5e0a in main_loop_wait (nonblocking=<optimized out>) at ./util/main-loop.c:518
          mlpoll = {state = 0, timeout = 4294967295, pollfds = 0x559905651800}
  #18 0x0000559902dd75df in main_loop () at ./vl.c:1810
  #19 0x0000559902c59f49 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at ./vl.c:4466
          i = <optimized out>
          snapshot = 0
          linux_boot = <optimized out>
          initrd_filename = 0x0
          kernel_filename = <optimized out>
          kernel_cmdline = <optimized out>
          boot_order = 0x55990318bc94 "cad"
          boot_once = <optimized out>
          ds = <optimized out>
          opts = <optimized out>
          icount_opts = <optimized out>
          accel_opts = 0x0
          olist = <optimized out>
          optind = 100
          optarg = 0x7ffe0ca05e74 "timestamp=on"
          loadvm = 0x0
          cpu_option = 0x7ffe0ca05d42 "SandyBridge-IBRS,-kvm_steal_time,+pcid,+ssbd,+spec-ctrl,+md-clear"
          vga_model = 0x0
          qtest_chrdev = 0x0
          qtest_log = 0x0
          incoming = 0x0
          userconfig = <optimized out>
          nographic = false
          display_remote = <optimized out>
          log_mask = <optimized out>
          log_file = 0x0
          trace_file = <optimized out>
          maxram_size = <optimized out>
          ram_slots = 0
          vmstate_dump_file = 0x0
          main_loop_err = 0x0
          err = 0x0
          list_data_dirs = false
          dirs = <optimized out>
          bdo_queue = {sqh_first = 0x0, sqh_last = 0x7ffe0ca03540}
          plugin_list = {tqh_first = 0x0, tqh_circ = {tql_next = 0x0, tql_prev = 0x7ffe0ca03550}}
          __func__ = "main"

To manage notifications about this bug go to:
https://bugs.launchpad.net/qemu/+bug/1890395/+subscriptions


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-07-08  4:32 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-08-05  7:38 [Bug 1890395] [NEW] qmp/hmp: crash if client closes socket too early Raphael Pour
2021-04-29  9:55 ` [Bug 1890395] " Thomas Huth
2021-05-08  5:51 ` Thomas Huth
2021-07-08  4:17 ` Launchpad Bug Tracker

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.