From mboxrd@z Thu Jan 1 00:00:00 1970 From: "Jan Beulich" Subject: [PATCH v2] x86: re-order struct arch_domain fields Date: Tue, 10 Feb 2015 11:34:20 +0000 Message-ID: <54D9FACC020000780005E907@mail.emea.novell.com> Mime-Version: 1.0 Content-Type: multipart/mixed; boundary="=__PartA59025AC.1__=" Return-path: Received: from mail6.bemta5.messagelabs.com ([195.245.231.135]) by lists.xen.org with esmtp (Exim 4.72) (envelope-from ) id 1YL95D-0001Bo-43 for xen-devel@lists.xenproject.org; Tue, 10 Feb 2015 11:34:23 +0000 List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel Cc: Andrew Cooper , Keir Fraser List-Id: xen-devel@lists.xenproject.org This is a MIME message. If you are reading this text, you may want to consider changing to a mail reader or gateway that understands how to properly handle MIME multipart messages. --=__PartA59025AC.1__= Content-Type: text/plain; charset=US-ASCII Content-Transfer-Encoding: quoted-printable Content-Disposition: inline ... to reduce padding holes. Signed-off-by: Jan Beulich --- v2: Drop vtsc_usercount movement to struct pv_domain. --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -255,13 +255,16 @@ struct arch_domain =20 unsigned int hv_compat_vstart; =20 - bool_t s3_integrity; + /* Maximum physical-address bitwidth supported by this guest. */ + unsigned int physaddr_bitsize; =20 /* I/O-port admin-specified access capabilities. */ struct rangeset *ioport_caps; uint32_t pci_cf8; uint8_t cmos_idx; =20 + bool_t s3_integrity; + struct list_head pdev_list; =20 union { @@ -275,6 +278,18 @@ struct arch_domain * page_alloc lock */ int page_alloc_unlock_level; =20 + /* Continuable domain_relinquish_resources(). */ + enum { + RELMEM_not_started, + RELMEM_shared, + RELMEM_xen, + RELMEM_l4, + RELMEM_l3, + RELMEM_l2, + RELMEM_done, + } relmem; + struct page_list_head relmem_list; + /* nestedhvm: translate l2 guest physical to host physical */ struct p2m_domain *nested_p2m[MAX_NESTEDP2M]; mm_lock_t nested_p2m_lock; @@ -282,27 +297,16 @@ struct arch_domain /* NB. protected by d->event_lock and by irq_desc[irq].lock */ struct radix_tree_root irq_pirq; =20 - /* Maximum physical-address bitwidth supported by this guest. */ - unsigned int physaddr_bitsize; - /* Is a 32-bit PV (non-HVM) guest? */ bool_t is_32bit_pv; /* Is shared-info page in 32-bit format? */ bool_t has_32bit_shinfo; + /* Domain cannot handle spurious page faults? */ bool_t suppress_spurious_page_faults; =20 - /* Continuable domain_relinquish_resources(). */ - enum { - RELMEM_not_started, - RELMEM_shared, - RELMEM_xen, - RELMEM_l4, - RELMEM_l3, - RELMEM_l2, - RELMEM_done, - } relmem; - struct page_list_head relmem_list; + /* Is PHYSDEVOP_eoi to automatically unmask the event channel? */ + bool_t auto_unmask; =20 cpuid_input_t *cpuids; =20 @@ -329,15 +333,12 @@ struct arch_domain struct e820entry *e820; unsigned int nr_e820; =20 - /* set auto_unmask to 1 if you want PHYSDEVOP_eoi to automatically - * unmask the event channel */ - bool_t auto_unmask; + unsigned int psr_rmid; /* RMID assigned to the domain for CMT */ + /* Shared page for notifying that explicit PIRQ EOI is required. */ unsigned long *pirq_eoi_map; unsigned long pirq_eoi_map_mfn; - - unsigned int psr_rmid; /* RMID assigned to the domain for CMT */ -} __cacheline_aligned; +}; =20 #define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list)) =20 @@ -493,7 +494,7 @@ struct arch_vcpu unsigned long eip; } mem_event; =20 -} __cacheline_aligned; +}; =20 smap_check_policy_t smap_policy_change(struct vcpu *v, smap_check_policy_t new_policy); --=__PartA59025AC.1__= Content-Type: text/plain; name="x86-arch_domain-reorder.patch" Content-Transfer-Encoding: quoted-printable Content-Disposition: attachment; filename="x86-arch_domain-reorder.patch" x86: re-order struct arch_domain fields=0A=0A... to reduce padding = holes.=0A=0ASigned-off-by: Jan Beulich =0A---=0Av2: = Drop vtsc_usercount movement to struct pv_domain.=0A=0A--- a/xen/include/as= m-x86/domain.h=0A+++ b/xen/include/asm-x86/domain.h=0A@@ -255,13 +255,16 = @@ struct arch_domain=0A =0A unsigned int hv_compat_vstart;=0A =0A- = bool_t s3_integrity;=0A+ /* Maximum physical-address bitwidth supported = by this guest. */=0A+ unsigned int physaddr_bitsize;=0A =0A /* = I/O-port admin-specified access capabilities. */=0A struct rangeset = *ioport_caps;=0A uint32_t pci_cf8;=0A uint8_t cmos_idx;=0A =0A+ = bool_t s3_integrity;=0A+=0A struct list_head pdev_list;=0A =0A = union {=0A@@ -275,6 +278,18 @@ struct arch_domain=0A * page_alloc = lock */=0A int page_alloc_unlock_level;=0A =0A+ /* Continuable = domain_relinquish_resources(). */=0A+ enum {=0A+ RELMEM_not_start= ed,=0A+ RELMEM_shared,=0A+ RELMEM_xen,=0A+ RELMEM_l4,= =0A+ RELMEM_l3,=0A+ RELMEM_l2,=0A+ RELMEM_done,=0A+ = } relmem;=0A+ struct page_list_head relmem_list;=0A+=0A /* = nestedhvm: translate l2 guest physical to host physical */=0A struct = p2m_domain *nested_p2m[MAX_NESTEDP2M];=0A mm_lock_t nested_p2m_lock;=0A= @@ -282,27 +297,16 @@ struct arch_domain=0A /* NB. protected by = d->event_lock and by irq_desc[irq].lock */=0A struct radix_tree_root = irq_pirq;=0A =0A- /* Maximum physical-address bitwidth supported by = this guest. */=0A- unsigned int physaddr_bitsize;=0A-=0A /* Is a = 32-bit PV (non-HVM) guest? */=0A bool_t is_32bit_pv;=0A /* Is = shared-info page in 32-bit format? */=0A bool_t has_32bit_shinfo;=0A+= =0A /* Domain cannot handle spurious page faults? */=0A bool_t = suppress_spurious_page_faults;=0A =0A- /* Continuable domain_relinquish_= resources(). */=0A- enum {=0A- RELMEM_not_started,=0A- = RELMEM_shared,=0A- RELMEM_xen,=0A- RELMEM_l4,=0A- = RELMEM_l3,=0A- RELMEM_l2,=0A- RELMEM_done,=0A- } = relmem;=0A- struct page_list_head relmem_list;=0A+ /* Is PHYSDEVOP_eo= i to automatically unmask the event channel? */=0A+ bool_t auto_unmask;= =0A =0A cpuid_input_t *cpuids;=0A =0A@@ -329,15 +333,12 @@ struct = arch_domain=0A struct e820entry *e820;=0A unsigned int nr_e820;=0A = =0A- /* set auto_unmask to 1 if you want PHYSDEVOP_eoi to automatically= =0A- * unmask the event channel */=0A- bool_t auto_unmask;=0A+ = unsigned int psr_rmid; /* RMID assigned to the domain for CMT */=0A+=0A = /* Shared page for notifying that explicit PIRQ EOI is required. */=0A = unsigned long *pirq_eoi_map;=0A unsigned long pirq_eoi_map_mfn;=0A-=0A= - unsigned int psr_rmid; /* RMID assigned to the domain for CMT */=0A-} = __cacheline_aligned;=0A+};=0A =0A #define has_arch_pdevs(d) (!list_empty= (&(d)->arch.pdev_list))=0A =0A@@ -493,7 +494,7 @@ struct arch_vcpu=0A = unsigned long eip;=0A } mem_event;=0A =0A-} __cacheline_aligned;=0A+= };=0A =0A smap_check_policy_t smap_policy_change(struct vcpu *v,=0A = smap_check_policy_t new_policy);=0A --=__PartA59025AC.1__= Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: inline _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel --=__PartA59025AC.1__=--