qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: David Woodhouse <dwmw2@infradead.org>
To: qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
	"Paul Durrant" <paul@xen.org>,
	"Joao Martins" <joao.m.martins@oracle.com>,
	"Ankur Arora" <ankur.a.arora@oracle.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"Thomas Huth" <thuth@redhat.com>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Juan Quintela" <quintela@redhat.com>,
	"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
	"Claudio Fontana" <cfontana@suse.de>,
	"Julien Grall" <julien@xen.org>
Subject: [PATCH v6 51/51] hw/xen: Add basic ring handling to xenstore
Date: Tue, 10 Jan 2023 12:20:42 +0000	[thread overview]
Message-ID: <20230110122042.1562155-52-dwmw2@infradead.org> (raw)
In-Reply-To: <20230110122042.1562155-1-dwmw2@infradead.org>

From: David Woodhouse <dwmw@amazon.co.uk>

Extract requests, return ENOSYS to all of them. This is enough to allow
older Linux guests to boot, as they need *something* back but it doesn't
matter much what.

In the first instance we're likely to wire this up over a UNIX socket to
an actual xenstored implementation, but in the fullness of time it would
be nice to have a fully single-tenant "virtual" xenstore within qemu.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
 hw/i386/kvm/xen_xenstore.c | 223 ++++++++++++++++++++++++++++++++++++-
 1 file changed, 220 insertions(+), 3 deletions(-)

diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c
index bb3346f4e3..6369e29f59 100644
--- a/hw/i386/kvm/xen_xenstore.c
+++ b/hw/i386/kvm/xen_xenstore.c
@@ -188,18 +188,235 @@ uint16_t xen_xenstore_get_port(void)
     return s->guest_port;
 }
 
+static bool req_pending(XenXenstoreState *s)
+{
+    struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
+
+    return s->req_offset == XENSTORE_HEADER_SIZE + req->len;
+}
+
+static void reset_req(XenXenstoreState *s)
+{
+    memset(s->req_data, 0, sizeof(s->req_data));
+    s->req_offset = 0;
+}
+
+static void reset_rsp(XenXenstoreState *s)
+{
+    s->rsp_pending = false;
+
+    memset(s->rsp_data, 0, sizeof(s->rsp_data));
+    s->rsp_offset = 0;
+}
+
+static void process_req(XenXenstoreState *s)
+{
+    struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
+    struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
+    const char enosys[] = "ENOSYS";
+
+    assert(req_pending(s));
+	assert(!s->rsp_pending);
+
+    rsp->type = XS_ERROR;
+    rsp->req_id = req->req_id;
+    rsp->tx_id = req->tx_id;
+    rsp->len = sizeof(enosys);
+    memcpy((void *)&rsp[1], enosys, sizeof(enosys));
+
+    s->rsp_pending = true;
+    reset_req(s);
+}
+
+static unsigned int copy_from_ring(XenXenstoreState *s, uint8_t *ptr, unsigned int len)
+{
+    if (!len)
+        return 0;
+
+    XENSTORE_RING_IDX prod = qatomic_read(&s->xs->req_prod);
+    XENSTORE_RING_IDX cons = qatomic_read(&s->xs->req_cons);
+    unsigned int copied = 0;
+
+    smp_mb();
+
+    while (len) {
+        unsigned int avail = prod - cons;
+        unsigned int offset = MASK_XENSTORE_IDX(cons);
+        unsigned int copylen = avail;
+
+        if (avail > XENSTORE_RING_SIZE) {
+            error_report("XenStore ring handling error");
+            s->fatal_error = true;
+            break;
+        } else if (avail == 0)
+            break;
+
+        if (copylen > len) {
+            copylen = len;
+        }
+        if (copylen > XENSTORE_RING_SIZE - offset) {
+            copylen = XENSTORE_RING_SIZE - offset;
+        }
+
+        memcpy(ptr, &s->xs->req[offset], copylen);
+        copied += copylen;
+
+        ptr += copylen;
+        len -= copylen;
+
+        cons += copylen;
+    }
+
+    smp_mb();
+
+    qatomic_set(&s->xs->req_cons, cons);
+
+    return copied;
+}
+
+static unsigned int copy_to_ring(XenXenstoreState *s, uint8_t *ptr, unsigned int len)
+{
+    if (!len)
+        return 0;
+
+    XENSTORE_RING_IDX cons = qatomic_read(&s->xs->rsp_cons);
+    XENSTORE_RING_IDX prod = qatomic_read(&s->xs->rsp_prod);
+    unsigned int copied = 0;
+
+    smp_mb();
+
+    while (len) {
+        unsigned int avail = cons + XENSTORE_RING_SIZE - prod;
+        unsigned int offset = MASK_XENSTORE_IDX(prod);
+        unsigned int copylen = len;
+
+        if (avail > XENSTORE_RING_SIZE) {
+            error_report("XenStore ring handling error");
+            s->fatal_error = true;
+            break;
+        } else if (avail == 0)
+            break;
+
+        if (copylen > avail) {
+            copylen = avail;
+        }
+        if (copylen > XENSTORE_RING_SIZE - offset) {
+            copylen = XENSTORE_RING_SIZE - offset;
+        }
+
+
+        memcpy(&s->xs->rsp[offset], ptr, copylen);
+        copied += copylen;
+
+        ptr += copylen;
+        len -= copylen;
+
+        prod += copylen;
+    }
+
+    smp_mb();
+
+    qatomic_set(&s->xs->rsp_prod, prod);
+
+    return copied;
+}
+
+static unsigned int get_req(XenXenstoreState *s)
+{
+    unsigned int copied = 0;
+
+    if (s->fatal_error)
+        return 0;
+
+    assert(!req_pending(s));
+
+    if (s->req_offset < XENSTORE_HEADER_SIZE) {
+        void *ptr = s->req_data + s->req_offset;
+        unsigned int len = XENSTORE_HEADER_SIZE;
+        unsigned int copylen = copy_from_ring(s, ptr, len);
+
+        copied += copylen;
+        s->req_offset += copylen;
+    }
+
+    if (s->req_offset >= XENSTORE_HEADER_SIZE) {
+        struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
+
+        if (req->len > (uint32_t)XENSTORE_PAYLOAD_MAX) {
+            error_report("Illegal XenStore request");
+            s->fatal_error = true;
+            return 0;
+        }
+
+        void *ptr = s->req_data + s->req_offset;
+        unsigned int len = XENSTORE_HEADER_SIZE + req->len - s->req_offset;
+        unsigned int copylen = copy_from_ring(s, ptr, len);
+
+        copied += copylen;
+        s->req_offset += copylen;
+    }
+
+    return copied;
+}
+
+static unsigned int put_rsp(XenXenstoreState *s)
+{
+    if (s->fatal_error)
+        return 0;
+
+    assert(s->rsp_pending);
+
+    struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
+    assert(s->rsp_offset < XENSTORE_HEADER_SIZE + rsp->len);
+
+    void *ptr = s->rsp_data + s->rsp_offset;
+    unsigned int len = XENSTORE_HEADER_SIZE + rsp->len - s->rsp_offset;
+    unsigned int copylen = copy_to_ring(s, ptr, len);
+
+    s->rsp_offset += copylen;
+
+    /* Have we produced a complete response? */
+    if (s->rsp_offset == XENSTORE_HEADER_SIZE + rsp->len)
+        reset_rsp(s);
+
+    return copylen;
+}
+
 static void xen_xenstore_event(void *opaque)
 {
     XenXenstoreState *s = opaque;
     evtchn_port_t port = xen_be_evtchn_pending(s->eh);
+    unsigned int copied_to, copied_from;
+    bool processed, notify = false;
+
     if (port != s->be_port) {
         return;
     }
-    printf("xenstore event\n");
+
     /* We know this is a no-op. */
     xen_be_evtchn_unmask(s->eh, port);
-    qemu_hexdump(stdout, "", s->xs, sizeof(*s->xs));
-    xen_be_evtchn_notify(s->eh, s->be_port);
+
+    do {
+        copied_to = copied_from = 0;
+        processed = false;
+
+        if (s->rsp_pending)
+            copied_to = put_rsp(s);
+
+        if (!req_pending(s))
+            copied_from = get_req(s);
+
+        if (req_pending(s) && !s->rsp_pending) {
+            process_req(s);
+            processed = true;
+        }
+
+        notify |= copied_to || copied_from;
+    } while (copied_to || copied_from || processed);
+
+    if (notify) {
+        xen_be_evtchn_notify(s->eh, s->be_port);
+    }
 }
 
 static void alloc_guest_port(XenXenstoreState *s)
-- 
2.35.3



  parent reply	other threads:[~2023-01-10 14:18 UTC|newest]

Thread overview: 94+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-10 12:19 [PATCH v6 00/51] Xen support under KVM David Woodhouse
2023-01-10 12:19 ` [PATCH v6 01/51] include: import Xen public headers to include/standard-headers/ David Woodhouse
2023-01-10 12:19 ` [PATCH v6 02/51] xen: add CONFIG_XENFV_MACHINE and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2023-01-10 12:19 ` [PATCH v6 03/51] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2023-01-10 12:19 ` [PATCH v6 04/51] i386/kvm: Add xen-version KVM accelerator property and init KVM Xen support David Woodhouse
2023-01-10 12:19 ` [PATCH v6 05/51] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2023-01-10 12:19 ` [PATCH v6 06/51] i386/hvm: Set Xen vCPU ID in KVM David Woodhouse
2023-01-10 12:19 ` [PATCH v6 07/51] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2023-01-10 12:19 ` [PATCH v6 08/51] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2023-01-16 16:20   ` Paul Durrant
2023-01-16 17:56     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 09/51] i386/xen: handle guest hypercalls David Woodhouse
2023-01-16 16:24   ` Paul Durrant
2023-01-16 17:57     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 10/51] i386/xen: implement HYPERVISOR_xen_version David Woodhouse
2023-01-10 12:20 ` [PATCH v6 11/51] i386/xen: implement HYPERVISOR_sched_op, SCHEDOP_shutdown David Woodhouse
2023-01-16 16:27   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 12/51] i386/xen: Implement SCHEDOP_poll and SCHEDOP_yield David Woodhouse
2023-01-16 16:36   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 13/51] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2023-01-16 16:57   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 14/51] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2023-01-16 17:17   ` Paul Durrant
2023-01-16 19:45     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 15/51] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2023-01-16 17:20   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 16/51] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2023-01-16 17:28   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 17/51] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2023-01-16 17:36   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 18/51] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2023-01-16 17:39   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 19/51] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2023-01-16 17:40   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 20/51] i386/xen: handle VCPUOP_register_vcpu_info David Woodhouse
2023-01-16 17:46   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 21/51] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2023-01-16 17:53   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 22/51] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2023-01-16 17:56   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 23/51] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2023-01-16 17:59   ` Paul Durrant
2023-01-16 19:54     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 24/51] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2023-01-10 12:20 ` [PATCH v6 25/51] i386/xen: implement HVMOP_set_param David Woodhouse
2023-01-16 18:00   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 26/51] hw/xen: Add xen_evtchn device for event channel emulation David Woodhouse
2023-01-10 12:20 ` [PATCH v6 27/51] i386/xen: Add support for Xen event channel delivery to vCPU David Woodhouse
2023-01-10 12:20 ` [PATCH v6 28/51] hw/xen: Implement EVTCHNOP_status David Woodhouse
2023-01-10 12:20 ` [PATCH v6 29/51] hw/xen: Implement EVTCHNOP_close David Woodhouse
2023-01-10 12:20 ` [PATCH v6 30/51] hw/xen: Implement EVTCHNOP_unmask David Woodhouse
2023-01-10 12:20 ` [PATCH v6 31/51] hw/xen: Implement EVTCHNOP_bind_virq David Woodhouse
2023-01-10 12:20 ` [PATCH v6 32/51] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2023-01-10 12:20 ` [PATCH v6 33/51] hw/xen: Implement EVTCHNOP_send David Woodhouse
2023-01-10 12:20 ` [PATCH v6 34/51] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2023-01-10 12:20 ` [PATCH v6 35/51] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2023-01-10 12:20 ` [PATCH v6 36/51] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2023-01-10 12:20 ` [PATCH v6 37/51] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2023-01-10 12:20 ` [PATCH v6 38/51] i386/xen: add monitor commands to test event injection David Woodhouse
2023-01-11 14:28   ` Dr. David Alan Gilbert
2023-01-11 14:57     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 39/51] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback David Woodhouse
2023-01-10 12:20 ` [PATCH v6 40/51] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse
2023-01-10 12:20 ` [PATCH v6 41/51] kvm/i386: Add xen-gnttab-max-frames property David Woodhouse
2023-01-10 12:20 ` [PATCH v6 42/51] hw/xen: Add xen_gnttab device for grant table emulation David Woodhouse
2023-01-10 12:20 ` [PATCH v6 43/51] hw/xen: Support mapping grant frames David Woodhouse
2023-01-10 12:20 ` [PATCH v6 44/51] i386/xen: Implement HYPERVISOR_grant_table_op and GNTTABOP_[gs]et_verson David Woodhouse
2023-01-10 12:20 ` [PATCH v6 45/51] hw/xen: Implement GNTTABOP_query_size David Woodhouse
2023-01-10 12:20 ` [PATCH v6 46/51] i386/xen: handle PV timer hypercalls David Woodhouse
2023-01-10 12:20 ` [PATCH v6 47/51] i386/xen: Reserve Xen special pages for console, xenstore rings David Woodhouse
2023-01-10 12:20 ` [PATCH v6 48/51] i386/xen: handle HVMOP_get_param David Woodhouse
2023-01-10 12:20 ` [PATCH v6 49/51] hw/xen: Add backend implementation of interdomain event channel support David Woodhouse
2023-01-10 12:20 ` [PATCH v6 50/51] hw/xen: Add xen_xenstore device for xenstore emulation David Woodhouse
2023-01-10 12:20 ` David Woodhouse [this message]
2023-01-10 12:37 ` [RFC PATCH v1 00/15] Xen PV backend support for KVM/Xen guests David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 01/15] hw/xen: Add evtchn operations to allow redirection to internal emulation David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 02/15] hw/xen: Add emulated evtchn ops David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 03/15] hw/xen: Add gnttab operations to allow redirection to internal emulation David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 04/15] hw/xen: Pass grant ref to gnttab unmap David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 05/15] hw/xen: Add foreignmem operations to allow redirection to internal emulation David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 06/15] hw/xen: Add xenstore " David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 07/15] hw/xen: Move xenstore_store_pv_console_info to xen_console.c David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 08/15] hw/xen: Use XEN_PAGE_SIZE in PV backend drivers David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 09/15] hw/xen: Rename xen_common.h to xen_native.h David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 10/15] hw/xen: Build PV backend drivers for XENFV_MACHINE David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 11/15] hw/xen: Map guest XENSTORE_PFN grant in emulated Xenstore David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 12/15] hw/xen: Add backend implementation of grant table operations David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 13/15] hw/xen: Implement soft reset for emulated gnttab David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 14/15] hw/xen: Remove old version of Xen headers David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 15/15] i386/xen: Initialize XenBus and legacy backends from pc_init1() David Woodhouse
2023-01-10 15:43   ` [RFC PATCH v1 00/15] Xen PV backend support for KVM/Xen guests Joao Martins
2023-01-10 15:47     ` Joao Martins
2023-01-10 16:52     ` David Woodhouse
2023-01-10 17:26       ` Joao Martins

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230110122042.1562155-52-dwmw2@infradead.org \
    --to=dwmw2@infradead.org \
    --cc=alex.bennee@linaro.org \
    --cc=ankur.a.arora@oracle.com \
    --cc=cfontana@suse.de \
    --cc=dgilbert@redhat.com \
    --cc=joao.m.martins@oracle.com \
    --cc=julien@xen.org \
    --cc=paul@xen.org \
    --cc=pbonzini@redhat.com \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).