All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] re-order struct domain fields
@ 2015-01-19 15:42 Jan Beulich
  2015-01-19 16:00 ` Tim Deegan
  2015-01-19 16:25 ` Ian Campbell
  0 siblings, 2 replies; 4+ messages in thread
From: Jan Beulich @ 2015-01-19 15:42 UTC (permalink / raw)
  To: xen-devel; +Cc: Ian Campbell, Keir Fraser, Ian Jackson, Tim Deegan

[-- Attachment #1: Type: text/plain, Size: 4650 bytes --]

... to reduce padding holes.

I also wonder whether having independent spin locks side by side is
really a good thing cache-line-bouncing-wise.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -306,6 +306,9 @@ struct domain
 {
     domid_t          domain_id;
 
+    unsigned int     max_vcpus;
+    struct vcpu    **vcpu;
+
     shared_info_t   *shared_info;     /* shared data area */
 
     spinlock_t       domain_lock;
@@ -314,13 +317,11 @@ struct domain
     struct page_list_head page_list;  /* linked list */
     struct page_list_head xenpage_list; /* linked list (size xenheap_pages) */
     unsigned int     tot_pages;       /* number of pages currently possesed */
+    unsigned int     xenheap_pages;   /* # pages allocated from Xen heap    */
     unsigned int     outstanding_pages; /* pages claimed but not possessed  */
     unsigned int     max_pages;       /* maximum value for tot_pages        */
     atomic_t         shr_pages;       /* number of shared pages             */
     atomic_t         paged_pages;     /* number of paged-out pages          */
-    unsigned int     xenheap_pages;   /* # pages allocated from Xen heap    */
-
-    unsigned int     max_vcpus;
 
     /* Scheduling. */
     void            *sched_priv;    /* scheduler-specific data */
@@ -347,15 +348,19 @@ struct domain
      * Interrupt to event-channel mappings and other per-guest-pirq data.
      * Protected by the domain's event-channel spinlock.
      */
-    unsigned int     nr_pirqs;
     struct radix_tree_root pirq_tree;
-
-    /* I/O capabilities (access to IRQs and memory-mapped I/O). */
-    struct rangeset *iomem_caps;
-    struct rangeset *irq_caps;
+    unsigned int     nr_pirqs;
 
     enum guest_type guest_type;
 
+    /* Is this guest dying (i.e., a zombie)? */
+    enum { DOMDYING_alive, DOMDYING_dying, DOMDYING_dead } is_dying;
+
+    /* Domain is paused by controller software? */
+    int              controller_pause_count;
+
+    int32_t          time_offset_seconds;
+
 #ifdef HAS_PASSTHROUGH
     /* Does this guest need iommu mappings (-1 meaning "being set up")? */
     s8               need_iommu;
@@ -364,16 +369,14 @@ struct domain
     bool_t           auto_node_affinity;
     /* Is this guest fully privileged (aka dom0)? */
     bool_t           is_privileged;
-    /* Which guest this guest has privileges on */
-    struct domain   *target;
-    /* Is this guest being debugged by dom0? */
-    bool_t           debugger_attached;
-    /* Is this guest dying (i.e., a zombie)? */
-    enum { DOMDYING_alive, DOMDYING_dying, DOMDYING_dead } is_dying;
-    /* Domain is paused by controller software? */
-    int              controller_pause_count;
     /* Domain's VCPUs are pinned 1:1 to physical CPUs? */
     bool_t           is_pinned;
+    /* Non-migratable and non-restoreable? */
+    bool_t           disable_migrate;
+    /* Is this guest being debugged by dom0? */
+    bool_t           debugger_attached;
+    /* Which guest this guest has privileges on */
+    struct domain   *target;
 
     /* Are any VCPUs polling event channels (SCHEDOP_poll)? */
 #if MAX_VIRT_CPUS <= BITS_PER_LONG
@@ -382,6 +385,10 @@ struct domain
     unsigned long   *poll_mask;
 #endif
 
+    /* I/O capabilities (access to IRQs and memory-mapped I/O). */
+    struct rangeset *iomem_caps;
+    struct rangeset *irq_caps;
+
     /* Guest has shut down (inc. reason code)? */
     spinlock_t       shutdown_lock;
     bool_t           is_shutting_down; /* in process of shutting down? */
@@ -390,15 +397,12 @@ struct domain
 
     /* If this is not 0, send suspend notification here instead of
      * raising DOM_EXC */
-    int              suspend_evtchn;
+    evtchn_port_t    suspend_evtchn;
 
     atomic_t         pause_count;
-
-    unsigned long    vm_assist;
-
     atomic_t         refcnt;
 
-    struct vcpu    **vcpu;
+    unsigned long    vm_assist;
 
     /* Bitmask of CPUs which are holding onto this domain's state. */
     cpumask_var_t    domain_dirty_cpumask;
@@ -418,7 +422,6 @@ struct domain
 
     /* OProfile support. */
     struct xenoprof *xenoprof;
-    int32_t time_offset_seconds;
 
     /* Domain watchdog. */
 #define NR_DOMAIN_WATCHDOG_TIMERS 2
@@ -439,9 +442,6 @@ struct domain
 
     struct lock_profile_qhead profile_head;
 
-    /* Non-migratable and non-restoreable? */
-    bool_t disable_migrate;
-
     /* Various mem_events */
     struct mem_event_per_domain *mem_event;
 



[-- Attachment #2: struct-domain-reorder.patch --]
[-- Type: text/plain, Size: 4679 bytes --]

re-order struct domain fields

... to reduce padding holes.

I also wonder whether having independent spin locks side by side is
really a good thing cache-line-bouncing-wise.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -306,6 +306,9 @@ struct domain
 {
     domid_t          domain_id;
 
+    unsigned int     max_vcpus;
+    struct vcpu    **vcpu;
+
     shared_info_t   *shared_info;     /* shared data area */
 
     spinlock_t       domain_lock;
@@ -314,13 +317,11 @@ struct domain
     struct page_list_head page_list;  /* linked list */
     struct page_list_head xenpage_list; /* linked list (size xenheap_pages) */
     unsigned int     tot_pages;       /* number of pages currently possesed */
+    unsigned int     xenheap_pages;   /* # pages allocated from Xen heap    */
     unsigned int     outstanding_pages; /* pages claimed but not possessed  */
     unsigned int     max_pages;       /* maximum value for tot_pages        */
     atomic_t         shr_pages;       /* number of shared pages             */
     atomic_t         paged_pages;     /* number of paged-out pages          */
-    unsigned int     xenheap_pages;   /* # pages allocated from Xen heap    */
-
-    unsigned int     max_vcpus;
 
     /* Scheduling. */
     void            *sched_priv;    /* scheduler-specific data */
@@ -347,15 +348,19 @@ struct domain
      * Interrupt to event-channel mappings and other per-guest-pirq data.
      * Protected by the domain's event-channel spinlock.
      */
-    unsigned int     nr_pirqs;
     struct radix_tree_root pirq_tree;
-
-    /* I/O capabilities (access to IRQs and memory-mapped I/O). */
-    struct rangeset *iomem_caps;
-    struct rangeset *irq_caps;
+    unsigned int     nr_pirqs;
 
     enum guest_type guest_type;
 
+    /* Is this guest dying (i.e., a zombie)? */
+    enum { DOMDYING_alive, DOMDYING_dying, DOMDYING_dead } is_dying;
+
+    /* Domain is paused by controller software? */
+    int              controller_pause_count;
+
+    int32_t          time_offset_seconds;
+
 #ifdef HAS_PASSTHROUGH
     /* Does this guest need iommu mappings (-1 meaning "being set up")? */
     s8               need_iommu;
@@ -364,16 +369,14 @@ struct domain
     bool_t           auto_node_affinity;
     /* Is this guest fully privileged (aka dom0)? */
     bool_t           is_privileged;
-    /* Which guest this guest has privileges on */
-    struct domain   *target;
-    /* Is this guest being debugged by dom0? */
-    bool_t           debugger_attached;
-    /* Is this guest dying (i.e., a zombie)? */
-    enum { DOMDYING_alive, DOMDYING_dying, DOMDYING_dead } is_dying;
-    /* Domain is paused by controller software? */
-    int              controller_pause_count;
     /* Domain's VCPUs are pinned 1:1 to physical CPUs? */
     bool_t           is_pinned;
+    /* Non-migratable and non-restoreable? */
+    bool_t           disable_migrate;
+    /* Is this guest being debugged by dom0? */
+    bool_t           debugger_attached;
+    /* Which guest this guest has privileges on */
+    struct domain   *target;
 
     /* Are any VCPUs polling event channels (SCHEDOP_poll)? */
 #if MAX_VIRT_CPUS <= BITS_PER_LONG
@@ -382,6 +385,10 @@ struct domain
     unsigned long   *poll_mask;
 #endif
 
+    /* I/O capabilities (access to IRQs and memory-mapped I/O). */
+    struct rangeset *iomem_caps;
+    struct rangeset *irq_caps;
+
     /* Guest has shut down (inc. reason code)? */
     spinlock_t       shutdown_lock;
     bool_t           is_shutting_down; /* in process of shutting down? */
@@ -390,15 +397,12 @@ struct domain
 
     /* If this is not 0, send suspend notification here instead of
      * raising DOM_EXC */
-    int              suspend_evtchn;
+    evtchn_port_t    suspend_evtchn;
 
     atomic_t         pause_count;
-
-    unsigned long    vm_assist;
-
     atomic_t         refcnt;
 
-    struct vcpu    **vcpu;
+    unsigned long    vm_assist;
 
     /* Bitmask of CPUs which are holding onto this domain's state. */
     cpumask_var_t    domain_dirty_cpumask;
@@ -418,7 +422,6 @@ struct domain
 
     /* OProfile support. */
     struct xenoprof *xenoprof;
-    int32_t time_offset_seconds;
 
     /* Domain watchdog. */
 #define NR_DOMAIN_WATCHDOG_TIMERS 2
@@ -439,9 +442,6 @@ struct domain
 
     struct lock_profile_qhead profile_head;
 
-    /* Non-migratable and non-restoreable? */
-    bool_t disable_migrate;
-
     /* Various mem_events */
     struct mem_event_per_domain *mem_event;
 

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] re-order struct domain fields
  2015-01-19 15:42 [PATCH] re-order struct domain fields Jan Beulich
@ 2015-01-19 16:00 ` Tim Deegan
  2015-01-19 16:25 ` Ian Campbell
  1 sibling, 0 replies; 4+ messages in thread
From: Tim Deegan @ 2015-01-19 16:00 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Ian Campbell, xen-devel, Keir Fraser, Ian Jackson

At 15:42 +0000 on 19 Jan (1421678566), Jan Beulich wrote:
> ... to reduce padding holes.
> 
> I also wonder whether having independent spin locks side by side is
> really a good thing cache-line-bouncing-wise.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

> @@ -390,15 +397,12 @@ struct domain
>  
>      /* If this is not 0, send suspend notification here instead of
>       * raising DOM_EXC */
> -    int              suspend_evtchn;
> +    evtchn_port_t    suspend_evtchn;
>  

This type change looks OK, but please mention it in the cset
description.

Acked-by: Tim Deegan <tim@xen.org>

Cheers,

Tim.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] re-order struct domain fields
  2015-01-19 15:42 [PATCH] re-order struct domain fields Jan Beulich
  2015-01-19 16:00 ` Tim Deegan
@ 2015-01-19 16:25 ` Ian Campbell
  2015-01-19 16:35   ` Jan Beulich
  1 sibling, 1 reply; 4+ messages in thread
From: Ian Campbell @ 2015-01-19 16:25 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Keir Fraser, Ian Jackson, Tim Deegan

On Mon, 2015-01-19 at 15:42 +0000, Jan Beulich wrote:
> ... to reduce padding holes.
> 
> I also wonder whether having independent spin locks side by side is
> really a good thing cache-line-bouncing-wise.

AIUI the general wisdom is to put each spinlock next to the data it
protects (I suppose on the assumption that after acquiring the lock you
will next touch that data).

Is the problem is that the domain_lock doesn't actually protect anything
in this struct since it is a more umbrella lock, so there is nothing to
put next to it?

You could move either it or the page lock below all the page counts
stuff.

Ian.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] re-order struct domain fields
  2015-01-19 16:25 ` Ian Campbell
@ 2015-01-19 16:35   ` Jan Beulich
  0 siblings, 0 replies; 4+ messages in thread
From: Jan Beulich @ 2015-01-19 16:35 UTC (permalink / raw)
  To: Ian Campbell; +Cc: xen-devel, Keir Fraser, Ian Jackson, Tim Deegan

>>> On 19.01.15 at 17:25, <Ian.Campbell@eu.citrix.com> wrote:
> On Mon, 2015-01-19 at 15:42 +0000, Jan Beulich wrote:
>> ... to reduce padding holes.
>> 
>> I also wonder whether having independent spin locks side by side is
>> really a good thing cache-line-bouncing-wise.
> 
> AIUI the general wisdom is to put each spinlock next to the data it
> protects (I suppose on the assumption that after acquiring the lock you
> will next touch that data).
> 
> Is the problem is that the domain_lock doesn't actually protect anything
> in this struct since it is a more umbrella lock, so there is nothing to
> put next to it?

Kind of. The more relevant aspect of why I didn't move it right away
was that two locks side by side are (on x86) efficient padding-wise.

> You could move either it or the page lock below all the page counts
> stuff.

Yeah, that would apparently be best, albeit I'm not sure it would
guarantee them to be on different cache lines in all cases.

Jan

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2015-01-19 16:35 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-01-19 15:42 [PATCH] re-order struct domain fields Jan Beulich
2015-01-19 16:00 ` Tim Deegan
2015-01-19 16:25 ` Ian Campbell
2015-01-19 16:35   ` Jan Beulich

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.