All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christian Schoenebeck <qemu_oss@crudebyte.com>
To: qemu-devel@nongnu.org
Cc: Greg Kurz <groug@kaod.org>
Subject: [PATCH v4 05/11] tests/virtio-9p: added readdir test
Date: Tue, 21 Jan 2020 01:12:00 +0100	[thread overview]
Message-ID: <e0b4402722a877178f8fb6a8ad7b64bb20150613.1579567020.git.qemu_oss@crudebyte.com> (raw)
In-Reply-To: <cover.1579567019.git.qemu_oss@crudebyte.com>

The first readdir test simply checks the amount of directory
entries returned by 9pfs server, according to the created amount
of virtual files on 9pfs synth driver side. Then the subsequent
readdir test also checks whether all directory entries have the
expected file names (as created on 9pfs synth driver side),
ignoring their precise order in result list though.

Signed-off-by: Christian Schoenebeck <qemu_oss@crudebyte.com>
---
 tests/qtest/virtio-9p-test.c | 152 +++++++++++++++++++++++++++++++++++
 1 file changed, 152 insertions(+)

diff --git a/tests/qtest/virtio-9p-test.c b/tests/qtest/virtio-9p-test.c
index 06263edb53..2167322985 100644
--- a/tests/qtest/virtio-9p-test.c
+++ b/tests/qtest/virtio-9p-test.c
@@ -68,6 +68,11 @@ static void v9fs_memread(P9Req *req, void *addr, size_t len)
     req->r_off += len;
 }
 
+static void v9fs_uint8_read(P9Req *req, uint8_t *val)
+{
+    v9fs_memread(req, val, 1);
+}
+
 static void v9fs_uint16_write(P9Req *req, uint16_t val)
 {
     uint16_t le_val = cpu_to_le16(val);
@@ -101,6 +106,12 @@ static void v9fs_uint32_read(P9Req *req, uint32_t *val)
     le32_to_cpus(val);
 }
 
+static void v9fs_uint64_read(P9Req *req, uint64_t *val)
+{
+    v9fs_memread(req, val, 8);
+    le64_to_cpus(val);
+}
+
 /* len[2] string[len] */
 static uint16_t v9fs_string_size(const char *string)
 {
@@ -191,6 +202,7 @@ static const char *rmessage_name(uint8_t id)
         id == P9_RLOPEN ? "RLOPEN" :
         id == P9_RWRITE ? "RWRITE" :
         id == P9_RFLUSH ? "RFLUSH" :
+        id == P9_RREADDIR ? "READDIR" :
         "<unknown>";
 }
 
@@ -348,6 +360,82 @@ static void v9fs_rwalk(P9Req *req, uint16_t *nwqid, v9fs_qid **wqid)
     v9fs_req_free(req);
 }
 
+/* size[4] Treaddir tag[2] fid[4] offset[8] count[4] */
+static P9Req *v9fs_treaddir(QVirtio9P *v9p, uint32_t fid, uint64_t offset,
+                            uint32_t count, uint16_t tag)
+{
+    P9Req *req;
+
+    req = v9fs_req_init(v9p, 4 + 8 + 4, P9_TREADDIR, tag);
+    v9fs_uint32_write(req, fid);
+    v9fs_uint64_write(req, offset);
+    v9fs_uint32_write(req, count);
+    v9fs_req_send(req);
+    return req;
+}
+
+struct V9fsDirent {
+    v9fs_qid qid;
+    uint64_t offset;
+    uint8_t type;
+    char *name;
+    struct V9fsDirent *next;
+};
+
+/* size[4] Rreaddir tag[2] count[4] data[count] */
+static void v9fs_rreaddir(P9Req *req, uint32_t *count, uint32_t *nentries,
+                          struct V9fsDirent **entries)
+{
+    uint32_t local_count;
+    struct V9fsDirent *e = NULL;
+    uint16_t slen;
+    uint32_t n = 0;
+
+    v9fs_req_recv(req, P9_RREADDIR);
+    v9fs_uint32_read(req, &local_count);
+
+    if (count) {
+        *count = local_count;
+    }
+
+    for (int32_t togo = (int32_t)local_count;
+         togo >= 13 + 8 + 1 + 2;
+         togo -= 13 + 8 + 1 + 2 + slen, ++n)
+    {
+        if (!e) {
+            e = g_malloc(sizeof(struct V9fsDirent));
+            if (entries) {
+                *entries = e;
+            }
+        } else {
+            e = e->next = g_malloc(sizeof(struct V9fsDirent));
+        }
+        e->next = NULL;
+        /* qid[13] offset[8] type[1] name[s] */
+        v9fs_memread(req, &e->qid, 13);
+        v9fs_uint64_read(req, &e->offset);
+        v9fs_uint8_read(req, &e->type);
+        v9fs_string_read(req, &slen, &e->name);
+    }
+
+    if (nentries) {
+        *nentries = n;
+    }
+
+    v9fs_req_free(req);
+}
+
+static void v9fs_free_dirents(struct V9fsDirent *e)
+{
+    struct V9fsDirent *next = NULL;
+
+    for (; e; e = next) {
+        next = e->next;
+        g_free(e->name);
+        g_free(e);
+    }
+}
+
 /* size[4] Tlopen tag[2] fid[4] flags[4] */
 static P9Req *v9fs_tlopen(QVirtio9P *v9p, uint32_t fid, uint32_t flags,
                           uint16_t tag)
@@ -480,6 +568,69 @@ static void fs_walk(void *obj, void *data, QGuestAllocator *t_alloc)
     g_free(wqid);
 }
 
+static bool fs_dirents_contain_name(struct V9fsDirent *e, const char* name)
+{
+    for (; e; e = e->next) {
+        if (!strcmp(e->name, name)) {
+            return true;
+        }
+    }
+    return false;
+}
+
+static void fs_readdir(void *obj, void *data, QGuestAllocator *t_alloc)
+{
+    QVirtio9P *v9p = obj;
+    alloc = t_alloc;
+    char *const wnames[] = { g_strdup(QTEST_V9FS_SYNTH_READDIR_DIR) };
+    uint16_t nqid;
+    v9fs_qid qid;
+    uint32_t count, nentries;
+    struct V9fsDirent *entries = NULL;
+    P9Req *req;
+
+    fs_attach(v9p, NULL, t_alloc);
+    req = v9fs_twalk(v9p, 0, 1, 1, wnames, 0);
+    v9fs_req_wait_for_reply(req, NULL);
+    v9fs_rwalk(req, &nqid, NULL);
+    g_assert_cmpint(nqid, ==, 1);
+
+    req = v9fs_tlopen(v9p, 1, O_DIRECTORY, 0);
+    v9fs_req_wait_for_reply(req, NULL);
+    v9fs_rlopen(req, &qid, NULL);
+
+    /*
+     * submit count = msize - 11, because 11 is the header size of Rreaddir
+     */
+    req = v9fs_treaddir(v9p, 1, 0, P9_MAX_SIZE - 11, 0);
+    v9fs_req_wait_for_reply(req, NULL);
+    v9fs_rreaddir(req, &count, &nentries, &entries);
+
+    /*
+     * Assuming msize (P9_MAX_SIZE) is large enough so we can retrieve all
+     * dir entries with only one readdir request.
+     */
+    g_assert_cmpint(
+        nentries, ==,
+        QTEST_V9FS_SYNTH_READDIR_NFILES + 2 /* "." and ".." */
+    );
+
+    /*
+     * Check all file names exist in returned entries, ignore their order
+     * though.
+     */
+    g_assert_cmpint(fs_dirents_contain_name(entries, "."), ==, true);
+    g_assert_cmpint(fs_dirents_contain_name(entries, ".."), ==, true);
+    for (int i = 0; i < QTEST_V9FS_SYNTH_READDIR_NFILES; ++i) {
+        char *name = g_strdup_printf(QTEST_V9FS_SYNTH_READDIR_FILE, i);
+        g_assert_cmpint(fs_dirents_contain_name(entries, name), ==, true);
+        g_free(name);
+    }
+
+    v9fs_free_dirents(entries);
+    g_free(wnames[0]);
+}
+
 static void fs_walk_no_slash(void *obj, void *data, QGuestAllocator *t_alloc)
 {
     QVirtio9P *v9p = obj;
@@ -658,6 +809,7 @@ static void register_virtio_9p_test(void)
                  NULL);
     qos_add_test("fs/flush/ignored", "virtio-9p", fs_flush_ignored,
                  NULL);
+    qos_add_test("fs/readdir/basic", "virtio-9p", fs_readdir, NULL);
 }
 
 libqos_init(register_virtio_9p_test);
-- 
2.20.1



  parent reply	other threads:[~2020-01-21  1:42 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-21  0:36 [PATCH v4 00/11] 9pfs: readdir optimization Christian Schoenebeck
2020-01-20 22:29 ` [PATCH v4 01/11] tests/virtio-9p: add terminating null in v9fs_string_read() Christian Schoenebeck
2020-01-20 22:47 ` [PATCH v4 02/11] 9pfs: require msize >= 4096 Christian Schoenebeck
2020-01-20 23:50 ` [PATCH v4 03/11] 9pfs: validate count sent by client with T_readdir Christian Schoenebeck
2020-01-22 14:11   ` Greg Kurz
2020-01-22 14:26     ` Christian Schoenebeck
2020-01-21  0:01 ` [PATCH v4 04/11] hw/9pfs/9p-synth: added directory for readdir test Christian Schoenebeck
2020-01-21  0:12 ` Christian Schoenebeck [this message]
2020-01-22 19:56   ` [PATCH v4 05/11] tests/virtio-9p: added " Greg Kurz
2020-01-21  0:16 ` [PATCH v4 06/11] tests/virtio-9p: added splitted " Christian Schoenebeck
2020-01-22 21:14   ` Eric Blake
2020-01-22 21:29     ` Christian Schoenebeck
2020-01-23  6:59       ` Greg Kurz
2020-01-22 21:19   ` Greg Kurz
2020-01-22 22:36     ` Christian Schoenebeck
2020-01-23 10:30       ` Greg Kurz
2020-01-23 13:07         ` Christian Schoenebeck
2020-01-21  0:17 ` [PATCH v4 07/11] tests/virtio-9p: failing " Christian Schoenebeck
2020-01-22 22:59   ` Greg Kurz
2020-01-23 11:36     ` Christian Schoenebeck
2020-01-23 12:08       ` Greg Kurz
2020-01-21  0:23 ` [PATCH v4 08/11] 9pfs: readdir benchmark Christian Schoenebeck
2020-01-23 10:34   ` Greg Kurz
2020-01-23 13:20     ` Christian Schoenebeck
2020-01-21  0:26 ` [PATCH v4 09/11] hw/9pfs/9p-synth: avoid n-square issue in synth_readdir() Christian Schoenebeck
2020-01-23 11:13   ` Greg Kurz
2020-01-23 12:40     ` Christian Schoenebeck
2020-01-21  0:30 ` [PATCH v4 10/11] 9pfs: T_readdir latency optimization Christian Schoenebeck
2020-01-23 11:33   ` Greg Kurz
2020-01-23 12:57     ` Christian Schoenebeck
2020-03-09 14:09   ` Christian Schoenebeck
2020-03-09 15:42     ` Greg Kurz
2020-03-10 15:10       ` Christian Schoenebeck
2020-03-10 18:33         ` Greg Kurz
2020-03-11  1:18           ` Christian Schoenebeck
     [not found]             ` <20200311171408.3b3a2dfa@bahia.home>
2020-03-11 19:54               ` Christian Schoenebeck
2020-03-17 14:14                 ` Greg Kurz
2020-03-17 16:09                   ` Christian Schoenebeck
2020-01-21  0:32 ` [PATCH v4 11/11] hw/9pfs/9p.c: benchmark time on T_readdir request Christian Schoenebeck

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=e0b4402722a877178f8fb6a8ad7b64bb20150613.1579567020.git.qemu_oss@crudebyte.com \
    --to=qemu_oss@crudebyte.com \
    --cc=groug@kaod.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.