qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: David Woodhouse <dwmw2@infradead.org>
To: qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
	"Paul Durrant" <paul@xen.org>,
	"Joao Martins" <joao.m.martins@oracle.com>,
	"Ankur Arora" <ankur.a.arora@oracle.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"Thomas Huth" <thuth@redhat.com>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Juan Quintela" <quintela@redhat.com>,
	"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
	"Claudio Fontana" <cfontana@suse.de>,
	"Julien Grall" <julien@xen.org>
Subject: [RFC PATCH v1 08/15] hw/xen: Use XEN_PAGE_SIZE in PV backend drivers
Date: Tue, 10 Jan 2023 12:37:47 +0000	[thread overview]
Message-ID: <20230110123754.1564465-9-dwmw2@infradead.org> (raw)
In-Reply-To: <20230110123754.1564465-1-dwmw2@infradead.org>

From: David Woodhouse <dwmw@amazon.co.uk>

XC_PAGE_SIZE comes from the actual Xen libraries, while XEN_PAGE_SIZE is
provided by QEMU itself in xen_backend_ops.h. For backends which may be
built for emulation mode, use the latter.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
 hw/block/dataplane/xen-block.c |  8 ++++----
 hw/display/xenfb.c             | 12 ++++++------
 hw/net/xen_nic.c               | 12 ++++++------
 hw/usb/xen-usb.c               |  8 ++++----
 4 files changed, 20 insertions(+), 20 deletions(-)

diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c
index e55b713002..8322a1de82 100644
--- a/hw/block/dataplane/xen-block.c
+++ b/hw/block/dataplane/xen-block.c
@@ -101,9 +101,9 @@ static XenBlockRequest *xen_block_start_request(XenBlockDataPlane *dataplane)
          * re-use requests, allocate the memory once here. It will be freed
          * xen_block_dataplane_destroy() when the request list is freed.
          */
-        request->buf = qemu_memalign(XC_PAGE_SIZE,
+        request->buf = qemu_memalign(XEN_PAGE_SIZE,
                                      BLKIF_MAX_SEGMENTS_PER_REQUEST *
-                                     XC_PAGE_SIZE);
+                                     XEN_PAGE_SIZE);
         dataplane->requests_total++;
         qemu_iovec_init(&request->v, 1);
     } else {
@@ -185,7 +185,7 @@ static int xen_block_parse_request(XenBlockRequest *request)
             goto err;
         }
         if (request->req.seg[i].last_sect * dataplane->sector_size >=
-            XC_PAGE_SIZE) {
+            XEN_PAGE_SIZE) {
             error_report("error: page crossing");
             goto err;
         }
@@ -740,7 +740,7 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
 
     dataplane->protocol = protocol;
 
-    ring_size = XC_PAGE_SIZE * dataplane->nr_ring_ref;
+    ring_size = XEN_PAGE_SIZE * dataplane->nr_ring_ref;
     switch (dataplane->protocol) {
     case BLKIF_PROTOCOL_NATIVE:
     {
diff --git a/hw/display/xenfb.c b/hw/display/xenfb.c
index 2c4016fcbd..0074a9b6f8 100644
--- a/hw/display/xenfb.c
+++ b/hw/display/xenfb.c
@@ -489,13 +489,13 @@ static int xenfb_map_fb(struct XenFB *xenfb)
     }
 
     if (xenfb->pixels) {
-        munmap(xenfb->pixels, xenfb->fbpages * XC_PAGE_SIZE);
+        munmap(xenfb->pixels, xenfb->fbpages * XEN_PAGE_SIZE);
         xenfb->pixels = NULL;
     }
 
-    xenfb->fbpages = DIV_ROUND_UP(xenfb->fb_len, XC_PAGE_SIZE);
+    xenfb->fbpages = DIV_ROUND_UP(xenfb->fb_len, XEN_PAGE_SIZE);
     n_fbdirs = xenfb->fbpages * mode / 8;
-    n_fbdirs = DIV_ROUND_UP(n_fbdirs, XC_PAGE_SIZE);
+    n_fbdirs = DIV_ROUND_UP(n_fbdirs, XEN_PAGE_SIZE);
 
     pgmfns = g_new0(xen_pfn_t, n_fbdirs);
     fbmfns = g_new0(xen_pfn_t, xenfb->fbpages);
@@ -528,8 +528,8 @@ static int xenfb_configure_fb(struct XenFB *xenfb, size_t fb_len_lim,
 {
     size_t mfn_sz = sizeof_field(struct xenfb_page, pd[0]);
     size_t pd_len = sizeof_field(struct xenfb_page, pd) / mfn_sz;
-    size_t fb_pages = pd_len * XC_PAGE_SIZE / mfn_sz;
-    size_t fb_len_max = fb_pages * XC_PAGE_SIZE;
+    size_t fb_pages = pd_len * XEN_PAGE_SIZE / mfn_sz;
+    size_t fb_len_max = fb_pages * XEN_PAGE_SIZE;
     int max_width, max_height;
 
     if (fb_len_lim > fb_len_max) {
@@ -930,7 +930,7 @@ static void fb_disconnect(struct XenLegacyDevice *xendev)
      *   instead.  This releases the guest pages and keeps qemu happy.
      */
     qemu_xen_foreignmem_unmap(fb->pixels, fb->fbpages);
-    fb->pixels = mmap(fb->pixels, fb->fbpages * XC_PAGE_SIZE,
+    fb->pixels = mmap(fb->pixels, fb->fbpages * XEN_PAGE_SIZE,
                       PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON,
                       -1, 0);
     if (fb->pixels == MAP_FAILED) {
diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c
index 166d03787d..9bbf6599fc 100644
--- a/hw/net/xen_nic.c
+++ b/hw/net/xen_nic.c
@@ -145,7 +145,7 @@ static void net_tx_packets(struct XenNetDev *netdev)
                 continue;
             }
 
-            if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) {
+            if ((txreq.offset + txreq.size) > XEN_PAGE_SIZE) {
                 xen_pv_printf(&netdev->xendev, 0, "error: page crossing\n");
                 net_tx_error(netdev, &txreq, rc);
                 continue;
@@ -171,7 +171,7 @@ static void net_tx_packets(struct XenNetDev *netdev)
             if (txreq.flags & NETTXF_csum_blank) {
                 /* have read-only mapping -> can't fill checksum in-place */
                 if (!tmpbuf) {
-                    tmpbuf = g_malloc(XC_PAGE_SIZE);
+                    tmpbuf = g_malloc(XEN_PAGE_SIZE);
                 }
                 memcpy(tmpbuf, page + txreq.offset, txreq.size);
                 net_checksum_calculate(tmpbuf, txreq.size, CSUM_ALL);
@@ -243,9 +243,9 @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size
     if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) {
         return 0;
     }
-    if (size > XC_PAGE_SIZE - NET_IP_ALIGN) {
+    if (size > XEN_PAGE_SIZE - NET_IP_ALIGN) {
         xen_pv_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)",
-                      (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN);
+                      (unsigned long)size, XEN_PAGE_SIZE - NET_IP_ALIGN);
         return -1;
     }
 
@@ -348,8 +348,8 @@ static int net_connect(struct XenLegacyDevice *xendev)
         netdev->txs = NULL;
         return -1;
     }
-    BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE);
-    BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE);
+    BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XEN_PAGE_SIZE);
+    BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XEN_PAGE_SIZE);
 
     xen_be_bind_evtchn(&netdev->xendev);
 
diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c
index a770a64cb4..66cb3f7c24 100644
--- a/hw/usb/xen-usb.c
+++ b/hw/usb/xen-usb.c
@@ -161,7 +161,7 @@ static int usbback_gnttab_map(struct usbback_req *usbback_req)
 
     for (i = 0; i < nr_segs; i++) {
         if ((unsigned)usbback_req->req.seg[i].offset +
-            (unsigned)usbback_req->req.seg[i].length > XC_PAGE_SIZE) {
+            (unsigned)usbback_req->req.seg[i].length > XEN_PAGE_SIZE) {
             xen_pv_printf(xendev, 0, "segment crosses page boundary\n");
             return -EINVAL;
         }
@@ -185,7 +185,7 @@ static int usbback_gnttab_map(struct usbback_req *usbback_req)
 
         for (i = 0; i < usbback_req->nr_buffer_segs; i++) {
             seg = usbback_req->req.seg + i;
-            addr = usbback_req->buffer + i * XC_PAGE_SIZE + seg->offset;
+            addr = usbback_req->buffer + i * XEN_PAGE_SIZE + seg->offset;
             qemu_iovec_add(&usbback_req->packet.iov, addr, seg->length);
         }
     }
@@ -902,8 +902,8 @@ static int usbback_connect(struct XenLegacyDevice *xendev)
     usbif->conn_ring_ref = conn_ring_ref;
     urb_sring = usbif->urb_sring;
     conn_sring = usbif->conn_sring;
-    BACK_RING_INIT(&usbif->urb_ring, urb_sring, XC_PAGE_SIZE);
-    BACK_RING_INIT(&usbif->conn_ring, conn_sring, XC_PAGE_SIZE);
+    BACK_RING_INIT(&usbif->urb_ring, urb_sring, XEN_PAGE_SIZE);
+    BACK_RING_INIT(&usbif->conn_ring, conn_sring, XEN_PAGE_SIZE);
 
     xen_be_bind_evtchn(xendev);
 
-- 
2.35.3



  parent reply	other threads:[~2023-01-10 14:43 UTC|newest]

Thread overview: 94+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-10 12:19 [PATCH v6 00/51] Xen support under KVM David Woodhouse
2023-01-10 12:19 ` [PATCH v6 01/51] include: import Xen public headers to include/standard-headers/ David Woodhouse
2023-01-10 12:19 ` [PATCH v6 02/51] xen: add CONFIG_XENFV_MACHINE and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2023-01-10 12:19 ` [PATCH v6 03/51] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2023-01-10 12:19 ` [PATCH v6 04/51] i386/kvm: Add xen-version KVM accelerator property and init KVM Xen support David Woodhouse
2023-01-10 12:19 ` [PATCH v6 05/51] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2023-01-10 12:19 ` [PATCH v6 06/51] i386/hvm: Set Xen vCPU ID in KVM David Woodhouse
2023-01-10 12:19 ` [PATCH v6 07/51] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2023-01-10 12:19 ` [PATCH v6 08/51] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2023-01-16 16:20   ` Paul Durrant
2023-01-16 17:56     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 09/51] i386/xen: handle guest hypercalls David Woodhouse
2023-01-16 16:24   ` Paul Durrant
2023-01-16 17:57     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 10/51] i386/xen: implement HYPERVISOR_xen_version David Woodhouse
2023-01-10 12:20 ` [PATCH v6 11/51] i386/xen: implement HYPERVISOR_sched_op, SCHEDOP_shutdown David Woodhouse
2023-01-16 16:27   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 12/51] i386/xen: Implement SCHEDOP_poll and SCHEDOP_yield David Woodhouse
2023-01-16 16:36   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 13/51] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2023-01-16 16:57   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 14/51] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2023-01-16 17:17   ` Paul Durrant
2023-01-16 19:45     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 15/51] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2023-01-16 17:20   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 16/51] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2023-01-16 17:28   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 17/51] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2023-01-16 17:36   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 18/51] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2023-01-16 17:39   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 19/51] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2023-01-16 17:40   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 20/51] i386/xen: handle VCPUOP_register_vcpu_info David Woodhouse
2023-01-16 17:46   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 21/51] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2023-01-16 17:53   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 22/51] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2023-01-16 17:56   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 23/51] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2023-01-16 17:59   ` Paul Durrant
2023-01-16 19:54     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 24/51] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2023-01-10 12:20 ` [PATCH v6 25/51] i386/xen: implement HVMOP_set_param David Woodhouse
2023-01-16 18:00   ` Paul Durrant
2023-01-10 12:20 ` [PATCH v6 26/51] hw/xen: Add xen_evtchn device for event channel emulation David Woodhouse
2023-01-10 12:20 ` [PATCH v6 27/51] i386/xen: Add support for Xen event channel delivery to vCPU David Woodhouse
2023-01-10 12:20 ` [PATCH v6 28/51] hw/xen: Implement EVTCHNOP_status David Woodhouse
2023-01-10 12:20 ` [PATCH v6 29/51] hw/xen: Implement EVTCHNOP_close David Woodhouse
2023-01-10 12:20 ` [PATCH v6 30/51] hw/xen: Implement EVTCHNOP_unmask David Woodhouse
2023-01-10 12:20 ` [PATCH v6 31/51] hw/xen: Implement EVTCHNOP_bind_virq David Woodhouse
2023-01-10 12:20 ` [PATCH v6 32/51] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2023-01-10 12:20 ` [PATCH v6 33/51] hw/xen: Implement EVTCHNOP_send David Woodhouse
2023-01-10 12:20 ` [PATCH v6 34/51] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2023-01-10 12:20 ` [PATCH v6 35/51] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2023-01-10 12:20 ` [PATCH v6 36/51] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2023-01-10 12:20 ` [PATCH v6 37/51] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2023-01-10 12:20 ` [PATCH v6 38/51] i386/xen: add monitor commands to test event injection David Woodhouse
2023-01-11 14:28   ` Dr. David Alan Gilbert
2023-01-11 14:57     ` David Woodhouse
2023-01-10 12:20 ` [PATCH v6 39/51] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback David Woodhouse
2023-01-10 12:20 ` [PATCH v6 40/51] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse
2023-01-10 12:20 ` [PATCH v6 41/51] kvm/i386: Add xen-gnttab-max-frames property David Woodhouse
2023-01-10 12:20 ` [PATCH v6 42/51] hw/xen: Add xen_gnttab device for grant table emulation David Woodhouse
2023-01-10 12:20 ` [PATCH v6 43/51] hw/xen: Support mapping grant frames David Woodhouse
2023-01-10 12:20 ` [PATCH v6 44/51] i386/xen: Implement HYPERVISOR_grant_table_op and GNTTABOP_[gs]et_verson David Woodhouse
2023-01-10 12:20 ` [PATCH v6 45/51] hw/xen: Implement GNTTABOP_query_size David Woodhouse
2023-01-10 12:20 ` [PATCH v6 46/51] i386/xen: handle PV timer hypercalls David Woodhouse
2023-01-10 12:20 ` [PATCH v6 47/51] i386/xen: Reserve Xen special pages for console, xenstore rings David Woodhouse
2023-01-10 12:20 ` [PATCH v6 48/51] i386/xen: handle HVMOP_get_param David Woodhouse
2023-01-10 12:20 ` [PATCH v6 49/51] hw/xen: Add backend implementation of interdomain event channel support David Woodhouse
2023-01-10 12:20 ` [PATCH v6 50/51] hw/xen: Add xen_xenstore device for xenstore emulation David Woodhouse
2023-01-10 12:20 ` [PATCH v6 51/51] hw/xen: Add basic ring handling to xenstore David Woodhouse
2023-01-10 12:37 ` [RFC PATCH v1 00/15] Xen PV backend support for KVM/Xen guests David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 01/15] hw/xen: Add evtchn operations to allow redirection to internal emulation David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 02/15] hw/xen: Add emulated evtchn ops David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 03/15] hw/xen: Add gnttab operations to allow redirection to internal emulation David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 04/15] hw/xen: Pass grant ref to gnttab unmap David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 05/15] hw/xen: Add foreignmem operations to allow redirection to internal emulation David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 06/15] hw/xen: Add xenstore " David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 07/15] hw/xen: Move xenstore_store_pv_console_info to xen_console.c David Woodhouse
2023-01-10 12:37   ` David Woodhouse [this message]
2023-01-10 12:37   ` [RFC PATCH v1 09/15] hw/xen: Rename xen_common.h to xen_native.h David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 10/15] hw/xen: Build PV backend drivers for XENFV_MACHINE David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 11/15] hw/xen: Map guest XENSTORE_PFN grant in emulated Xenstore David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 12/15] hw/xen: Add backend implementation of grant table operations David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 13/15] hw/xen: Implement soft reset for emulated gnttab David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 14/15] hw/xen: Remove old version of Xen headers David Woodhouse
2023-01-10 12:37   ` [RFC PATCH v1 15/15] i386/xen: Initialize XenBus and legacy backends from pc_init1() David Woodhouse
2023-01-10 15:43   ` [RFC PATCH v1 00/15] Xen PV backend support for KVM/Xen guests Joao Martins
2023-01-10 15:47     ` Joao Martins
2023-01-10 16:52     ` David Woodhouse
2023-01-10 17:26       ` Joao Martins

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230110123754.1564465-9-dwmw2@infradead.org \
    --to=dwmw2@infradead.org \
    --cc=alex.bennee@linaro.org \
    --cc=ankur.a.arora@oracle.com \
    --cc=cfontana@suse.de \
    --cc=dgilbert@redhat.com \
    --cc=joao.m.martins@oracle.com \
    --cc=julien@xen.org \
    --cc=paul@xen.org \
    --cc=pbonzini@redhat.com \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).