xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: "Tian, Kevin" <kevin.tian@intel.com>
To: Jan Beulich <jbeulich@suse.com>,
	"xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: "Cooper, Andrew" <andrew.cooper3@citrix.com>,
	"Wei Liu" <wl@xen.org>, "Roger Pau Monné" <roger.pau@citrix.com>,
	"George Dunlap" <george.dunlap@citrix.com>
Subject: RE: [PATCH 07/16] x86/P2M: p2m_{alloc,free}_ptp() and p2m_alloc_table() are HVM-only
Date: Wed, 7 Jul 2021 01:35:30 +0000	[thread overview]
Message-ID: <BN9PR11MB54330E8E2A36577B1C39217B8C1A9@BN9PR11MB5433.namprd11.prod.outlook.com> (raw)
In-Reply-To: <fcd39881-e225-d1a7-a168-9aa9fd9a7735@suse.com>

> From: Jan Beulich <jbeulich@suse.com>
> Sent: Tuesday, July 6, 2021 12:09 AM
> 
> This also includes the two p2m related fields.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Kevin Tian <kevin.tian@intel.com>

> 
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -94,7 +94,9 @@ static int p2m_initialise(struct domain
>      int ret = 0;
> 
>      mm_rwlock_init(&p2m->lock);
> +#ifdef CONFIG_HVM
>      INIT_PAGE_LIST_HEAD(&p2m->pages);
> +#endif
> 
>      p2m->domain = d;
>      p2m->default_access = p2m_access_rwx;
> @@ -628,6 +630,7 @@ struct page_info *p2m_get_page_from_gfn(
>  }
> 
>  #ifdef CONFIG_HVM
> +
>  /* Returns: 0 for success, -errno for failure */
>  int p2m_set_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn,
>                    unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
> @@ -667,7 +670,6 @@ int p2m_set_entry(struct p2m_domain *p2m
> 
>      return rc;
>  }
> -#endif
> 
>  mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned int level)
>  {
> @@ -746,6 +748,8 @@ int p2m_alloc_table(struct p2m_domain *p
>      return 0;
>  }
> 
> +#endif /* CONFIG_HVM */
> +
>  /*
>   * hvm fixme: when adding support for pvh non-hardware domains, this
> path must
>   * cleanup any foreign p2m types (release refcnts on them).
> @@ -754,7 +758,9 @@ void p2m_teardown(struct p2m_domain *p2m
>  /* Return all the p2m pages to Xen.
>   * We know we don't have any extra mappings to these pages */
>  {
> +#ifdef CONFIG_HVM
>      struct page_info *pg;
> +#endif
>      struct domain *d;
> 
>      if (p2m == NULL)
> @@ -763,11 +769,16 @@ void p2m_teardown(struct p2m_domain *p2m
>      d = p2m->domain;
> 
>      p2m_lock(p2m);
> +
>      ASSERT(atomic_read(&d->shr_pages) == 0);
> +
> +#ifdef CONFIG_HVM
>      p2m->phys_table = pagetable_null();
> 
>      while ( (pg = page_list_remove_head(&p2m->pages)) )
>          d->arch.paging.free_page(d, pg);
> +#endif
> +
>      p2m_unlock(p2m);
>  }
> 
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -2700,8 +2700,10 @@ int shadow_enable(struct domain *d, u32
>   out_locked:
>      paging_unlock(d);
>   out_unlocked:
> +#ifdef CONFIG_HVM
>      if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m)) )
>          p2m_teardown(p2m);
> +#endif
>      if ( rv != 0 && pg != NULL )
>      {
>          pg->count_info &= ~PGC_count_mask;
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -339,12 +339,14 @@ static uint64_t domain_pgd_maddr(struct
> 
>      ASSERT(spin_is_locked(&hd->arch.mapping_lock));
> 
> +#ifdef CONFIG_HVM
>      if ( iommu_use_hap_pt(d) )
>      {
>          pagetable_t pgt = p2m_get_pagetable(p2m_get_hostp2m(d));
> 
>          return pagetable_get_paddr(pgt);
>      }
> +#endif
> 
>      if ( !hd->arch.vtd.pgd_maddr )
>      {
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -202,9 +202,6 @@ struct p2m_domain {
>      /* Lock that protects updates to the p2m */
>      mm_rwlock_t           lock;
> 
> -    /* Shadow translated domain: p2m mapping */
> -    pagetable_t        phys_table;
> -
>      /*
>       * Same as a domain's dirty_cpumask but limited to
>       * this p2m and those physical cpus whose vcpu's are in
> @@ -223,9 +220,6 @@ struct p2m_domain {
>       */
>      p2m_access_t default_access;
> 
> -    /* Pages used to construct the p2m */
> -    struct page_list_head pages;
> -
>      /* Host p2m: Log-dirty ranges registered for the domain. */
>      struct rangeset   *logdirty_ranges;
> 
> @@ -233,6 +227,12 @@ struct p2m_domain {
>      bool               global_logdirty;
> 
>  #ifdef CONFIG_HVM
> +    /* Translated domain: p2m mapping */
> +    pagetable_t        phys_table;
> +
> +    /* Pages used to construct the p2m */
> +    struct page_list_head pages;
> +
>      /* Alternate p2m: count of vcpu's currently using this p2m. */
>      atomic_t           active_vcpus;
> 


  reply	other threads:[~2021-07-07  1:35 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-05 16:03 [PATCH 00/16] x86/mm: large parts of P2M code and struct p2m_domain are HVM-only Jan Beulich
2021-07-05 16:05 ` Jan Beulich
2021-07-05 16:05 ` [PATCH 01/16] x86/P2M: rename p2m_remove_page() Jan Beulich
2022-02-04 21:54   ` George Dunlap
2022-02-07  9:20     ` Jan Beulich
2021-07-05 16:06 ` [PATCH 02/16] x86/P2M: introduce p2m_{add,remove}_page() Jan Beulich
2021-07-05 17:47   ` Paul Durrant
2021-07-06  7:05     ` Jan Beulich
2022-02-04 22:07   ` George Dunlap
2022-02-07  9:38     ` Jan Beulich
2022-02-07 15:49       ` George Dunlap
2021-07-05 16:06 ` [PATCH 03/16] x86/P2M: drop a few CONFIG_HVM Jan Beulich
2022-02-04 22:13   ` George Dunlap
2022-02-07  9:51     ` Jan Beulich
2021-07-05 16:07 ` [PATCH 04/16] x86/P2M: move map_domain_gfn() (again) Jan Beulich
2022-02-04 22:17   ` George Dunlap
2021-07-05 16:07 ` [PATCH 05/16] x86/mm: move guest_physmap_{add,remove}_page() Jan Beulich
2022-02-05 21:06   ` George Dunlap
2021-07-05 16:07 ` [PATCH 06/16] x86/mm: split set_identity_p2m_entry() into PV and HVM parts Jan Beulich
2022-02-05 21:09   ` George Dunlap
2021-07-05 16:09 ` [PATCH 07/16] x86/P2M: p2m_{alloc,free}_ptp() and p2m_alloc_table() are HVM-only Jan Beulich
2021-07-07  1:35   ` Tian, Kevin [this message]
2022-02-05 21:17   ` George Dunlap
2021-07-05 16:09 ` [PATCH 08/16] x86/P2M: PoD, altp2m, and nested-p2m " Jan Beulich
2022-02-05 21:29   ` George Dunlap
2022-02-07 10:11     ` Jan Beulich
2022-02-07 14:45       ` George Dunlap
2022-02-07 15:23         ` Jan Beulich
2021-07-05 16:10 ` [PATCH 09/16] x86/P2M: split out init/teardown functions Jan Beulich
2022-02-05 21:31   ` George Dunlap
2021-07-05 16:10 ` [PATCH 10/16] x86/P2M: p2m_get_page_from_gfn() is HVM-only Jan Beulich
2022-02-14 14:26   ` George Dunlap
2021-07-05 16:12 ` [PATCH 11/16] x86/P2M: derive a HVM-only variant from __get_gfn_type_access() Jan Beulich
2022-02-14 15:12   ` George Dunlap
2022-02-14 15:20     ` Jan Beulich
2021-07-05 16:12 ` [PATCH 12/16] x86/p2m: re-arrange {,__}put_gfn() Jan Beulich
2022-02-14 15:17   ` George Dunlap
2021-07-05 16:13 ` [PATCH 13/16] shr_pages field is MEM_SHARING-only Jan Beulich
2021-07-06 12:42   ` Tamas K Lengyel
2022-02-14 15:36   ` George Dunlap
2021-07-05 16:14 ` [PATCH 14/16] paged_pages field is MEM_PAGING-only Jan Beulich
2021-07-06 12:44   ` Tamas K Lengyel
2022-02-14 15:38   ` George Dunlap
2021-07-05 16:14 ` [PATCH 15/16] x86/P2M: p2m.c is HVM-only Jan Beulich
2022-02-14 15:39   ` George Dunlap
2021-07-05 16:15 ` [PATCH 16/16] x86/P2M: the majority for struct p2m_domain's fields are HVM-only Jan Beulich
2021-07-05 17:49   ` Paul Durrant
2022-02-14 15:51   ` George Dunlap
2022-02-14 16:07     ` Jan Beulich
2022-02-16  7:54       ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=BN9PR11MB54330E8E2A36577B1C39217B8C1A9@BN9PR11MB5433.namprd11.prod.outlook.com \
    --to=kevin.tian@intel.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=roger.pau@citrix.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).