All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jan Beulich <jbeulich@suse.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>,
	Paul Durrant <paul@xen.org>
Subject: [Xen-devel] [PATCH v7 2/3] AMD/IOMMU: allow callers to request allocate_buffer() to skip its memset()
Date: Thu, 26 Sep 2019 16:29:20 +0200	[thread overview]
Message-ID: <b143bc0c-3d13-2127-be5d-b459d7b53c1e@suse.com> (raw)
In-Reply-To: <ba0fd598-9102-e765-e7f5-61e91d47b124@suse.com>

The command ring buffer doesn't need clearing up front in any event.
Subsequently we'll also want to avoid clearing the device tables.

While playing with functions signatures replace undue use of fixed width
types at the same time, and extend this to deallocate_buffer() as well.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v7: New.

--- a/xen/drivers/passthrough/amd/iommu_init.c
+++ b/xen/drivers/passthrough/amd/iommu_init.c
@@ -994,12 +994,12 @@ static unsigned int __init dt_alloc_size
                                              IOMMU_DEV_TABLE_ENTRY_SIZE);
 }
 
-static void __init deallocate_buffer(void *buf, uint32_t sz)
+static void __init deallocate_buffer(void *buf, unsigned long sz)
 {
-    int order = 0;
     if ( buf )
     {
-        order = get_order_from_bytes(sz);
+        unsigned int order = get_order_from_bytes(sz);
+
         __free_amd_iommu_tables(buf, order);
     }
 }
@@ -1012,10 +1012,11 @@ static void __init deallocate_ring_buffe
     ring_buf->tail = 0;
 }
 
-static void * __init allocate_buffer(uint32_t alloc_size, const char *name)
+static void *__init allocate_buffer(unsigned long alloc_size,
+                                    const char *name, bool clear)
 {
-    void * buffer;
-    int order = get_order_from_bytes(alloc_size);
+    void *buffer;
+    unsigned int order = get_order_from_bytes(alloc_size);
 
     buffer = __alloc_amd_iommu_tables(order);
 
@@ -1025,13 +1026,16 @@ static void * __init allocate_buffer(uin
         return NULL;
     }
 
-    memset(buffer, 0, PAGE_SIZE * (1UL << order));
+    if ( clear )
+        memset(buffer, 0, PAGE_SIZE << order);
+
     return buffer;
 }
 
-static void * __init allocate_ring_buffer(struct ring_buffer *ring_buf,
-                                          uint32_t entry_size,
-                                          uint64_t entries, const char *name)
+static void *__init allocate_ring_buffer(struct ring_buffer *ring_buf,
+                                         unsigned int entry_size,
+                                         unsigned long entries,
+                                         const char *name, bool clear)
 {
     ring_buf->head = 0;
     ring_buf->tail = 0;
@@ -1041,7 +1045,8 @@ static void * __init allocate_ring_buffe
     ring_buf->alloc_size = PAGE_SIZE << get_order_from_bytes(entries *
                                                              entry_size);
     ring_buf->entries = ring_buf->alloc_size / entry_size;
-    ring_buf->buffer = allocate_buffer(ring_buf->alloc_size, name);
+    ring_buf->buffer = allocate_buffer(ring_buf->alloc_size, name, clear);
+
     return ring_buf->buffer;
 }
 
@@ -1050,21 +1055,23 @@ static void * __init allocate_cmd_buffer
     /* allocate 'command buffer' in power of 2 increments of 4K */
     return allocate_ring_buffer(&iommu->cmd_buffer, sizeof(cmd_entry_t),
                                 IOMMU_CMD_BUFFER_DEFAULT_ENTRIES,
-                                "Command Buffer");
+                                "Command Buffer", false);
 }
 
 static void * __init allocate_event_log(struct amd_iommu *iommu)
 {
     /* allocate 'event log' in power of 2 increments of 4K */
     return allocate_ring_buffer(&iommu->event_log, sizeof(event_entry_t),
-                                IOMMU_EVENT_LOG_DEFAULT_ENTRIES, "Event Log");
+                                IOMMU_EVENT_LOG_DEFAULT_ENTRIES, "Event Log",
+                                true);
 }
 
 static void * __init allocate_ppr_log(struct amd_iommu *iommu)
 {
     /* allocate 'ppr log' in power of 2 increments of 4K */
     return allocate_ring_buffer(&iommu->ppr_log, sizeof(ppr_entry_t),
-                                IOMMU_PPR_LOG_DEFAULT_ENTRIES, "PPR Log");
+                                IOMMU_PPR_LOG_DEFAULT_ENTRIES, "PPR Log",
+                                true);
 }
 
 /*
@@ -1257,7 +1264,7 @@ static int __init amd_iommu_setup_device
     {
         /* allocate 'device table' on a 4K boundary */
         dt = IVRS_MAPPINGS_DEVTAB(ivrs_mappings) =
-            allocate_buffer(dt_alloc_size(), "Device Table");
+            allocate_buffer(dt_alloc_size(), "Device Table", true);
     }
     if ( !dt )
         return -ENOMEM;


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2019-09-26 14:29 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-09-26 14:26 [Xen-devel] [PATCH v7 0/3] AMD IOMMU: further improvements Jan Beulich
2019-09-26 14:28 ` [Xen-devel] [PATCH v7 1/3] AMD/IOMMU: allocate one device table per PCI segment Jan Beulich
2019-10-04 13:18   ` Andrew Cooper
2019-10-04 13:30     ` Jan Beulich
2019-10-04 17:28       ` Andrew Cooper
2019-10-07 10:03         ` Jan Beulich
2019-10-07 10:19           ` Jürgen Groß
2019-10-07 10:49             ` Jan Beulich
2019-10-07 11:25               ` Jürgen Groß
2019-10-10  5:57         ` Jan Beulich
2019-10-10  6:12           ` Jürgen Groß
2019-09-26 14:29 ` Jan Beulich [this message]
2019-10-04 13:26   ` [Xen-devel] [PATCH v7 2/3] AMD/IOMMU: allow callers to request allocate_buffer() to skip its memset() Andrew Cooper
2019-10-04 13:33     ` Jan Beulich
2019-09-26 14:29 ` [Xen-devel] [PATCH v7 3/3] AMD/IOMMU: pre-fill all DTEs right after table allocation Jan Beulich
2019-10-04 13:43   ` Andrew Cooper

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b143bc0c-3d13-2127-be5d-b459d7b53c1e@suse.com \
    --to=jbeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=paul@xen.org \
    --cc=suravee.suthikulpanit@amd.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.