xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: "Sahita, Ravi" <ravi.sahita@intel.com>
To: George Dunlap <George.Dunlap@eu.citrix.com>,
	"White, Edmund H" <edmund.h.white@intel.com>
Cc: Wei Liu <wei.liu2@citrix.com>, Tim Deegan <tim@xen.org>,
	Ian Jackson <ian.jackson@eu.citrix.com>,
	"xen-devel@lists.xen.org" <xen-devel@lists.xen.org>,
	Jan Beulich <jbeulich@suse.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	"tlengyel@novetta.com" <tlengyel@novetta.com>,
	Daniel De Graaf <dgdegra@tycho.nsa.gov>
Subject: Re: [PATCH v5 10/15] x86/altp2m: add remaining support routines.
Date: Fri, 17 Jul 2015 21:01:22 +0000	[thread overview]
Message-ID: <DBC12B0F5509554280826E40BCDEE8BE54FD9108@ORSMSX104.amr.corp.intel.com> (raw)
In-Reply-To: <CAFLBxZbLTd+-uqMZo4DoscCj9ns0mA9U+dK-+ZV=2Ro_AMS5bg@mail.gmail.com>

>From: dunlapg@gmail.com [mailto:dunlapg@gmail.com] On Behalf Of George
>Dunlap
>Sent: Thursday, July 16, 2015 7:45 AM
>
>On Tue, Jul 14, 2015 at 1:14 AM, Ed White <edmund.h.white@intel.com>
>wrote:
>> Add the remaining routines required to support enabling the alternate
>> p2m functionality.
>>
>> Signed-off-by: Ed White <edmund.h.white@intel.com>
>>
>> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
>> ---
>>  xen/arch/x86/hvm/hvm.c           |  58 +++++-
>>  xen/arch/x86/mm/hap/Makefile     |   1 +
>>  xen/arch/x86/mm/hap/altp2m_hap.c |  98 ++++++++++
>>  xen/arch/x86/mm/p2m-ept.c        |   3 +
>>  xen/arch/x86/mm/p2m.c            | 385
>+++++++++++++++++++++++++++++++++++++++
>>  xen/include/asm-x86/hvm/altp2m.h |   4 +
>>  xen/include/asm-x86/p2m.h        |  33 ++++
>>  7 files changed, 576 insertions(+), 6 deletions(-)  create mode
>> 100644 xen/arch/x86/mm/hap/altp2m_hap.c
>>
>> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index
>> 38deedc..a9f4b1b 100644
>> --- a/xen/arch/x86/hvm/hvm.c
>> +++ b/xen/arch/x86/hvm/hvm.c
>> @@ -2802,10 +2802,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
>unsigned long gla,
>>      mfn_t mfn;
>>      struct vcpu *curr = current;
>>      struct domain *currd = curr->domain;
>> -    struct p2m_domain *p2m;
>> +    struct p2m_domain *p2m, *hostp2m;
>>      int rc, fall_through = 0, paged = 0;
>>      int sharing_enomem = 0;
>>      vm_event_request_t *req_ptr = NULL;
>> +    bool_t ap2m_active = 0;
>>
>>      /* On Nested Virtualization, walk the guest page table.
>>       * If this succeeds, all is fine.
>> @@ -2865,11 +2866,31 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
>unsigned long gla,
>>          goto out;
>>      }
>>
>> -    p2m = p2m_get_hostp2m(currd);
>> -    mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma,
>> +    ap2m_active = altp2m_active(currd);
>> +
>> +    /* Take a lock on the host p2m speculatively, to avoid potential
>> +     * locking order problems later and to handle unshare etc.
>> +     */
>> +    hostp2m = p2m_get_hostp2m(currd);
>> +    mfn = get_gfn_type_access(hostp2m, gfn, &p2mt, &p2ma,
>>                                P2M_ALLOC | (npfec.write_access ? P2M_UNSHARE : 0),
>>                                NULL);
>>
>> +    if ( ap2m_active )
>> +    {
>> +        if ( altp2m_hap_nested_page_fault(curr, gpa, gla, npfec, &p2m) == 1 )
>> +        {
>> +            /* entry was lazily copied from host -- retry */
>> +            __put_gfn(hostp2m, gfn);
>> +            rc = 1;
>> +            goto out;
>> +        }
>> +
>> +        mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL);
>> +    }
>> +    else
>> +        p2m = hostp2m;
>> +
>>      /* Check access permissions first, then handle faults */
>>      if ( mfn_x(mfn) != INVALID_MFN )
>>      {
>> @@ -2909,6 +2930,20 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
>> unsigned long gla,
>>
>>          if ( violation )
>>          {
>> +            /* Should #VE be emulated for this fault? */
>> +            if ( p2m_is_altp2m(p2m) && !cpu_has_vmx_virt_exceptions )
>> +            {
>> +                bool_t sve;
>> +
>> +                p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL,
>> + &sve);
>> +
>> +                if ( !sve && altp2m_vcpu_emulate_ve(curr) )
>> +                {
>> +                    rc = 1;
>> +                    goto out_put_gfn;
>> +                }
>> +            }
>> +
>>              if ( p2m_mem_access_check(gpa, gla, npfec, &req_ptr) )
>>              {
>>                  fall_through = 1;
>> @@ -2928,7 +2963,9 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
>unsigned long gla,
>>           (npfec.write_access &&
>>            (p2m_is_discard_write(p2mt) || (p2mt == p2m_mmio_write_dm))) )
>>      {
>> -        put_gfn(currd, gfn);
>> +        __put_gfn(p2m, gfn);
>> +        if ( ap2m_active )
>> +            __put_gfn(hostp2m, gfn);
>>
>>          rc = 0;
>>          if ( unlikely(is_pvh_domain(currd)) ) @@ -2957,6 +2994,7 @@
>> int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
>>      /* Spurious fault? PoD and log-dirty also take this path. */
>>      if ( p2m_is_ram(p2mt) )
>>      {
>> +        rc = 1;
>>          /*
>>           * Page log dirty is always done with order 0. If this mfn resides in
>>           * a large page, we do not change other pages type within
>> that large @@ -2965,9 +3003,15 @@ int
>hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
>>          if ( npfec.write_access )
>>          {
>>              paging_mark_dirty(currd, mfn_x(mfn));
>> +            /* If p2m is really an altp2m, unlock here to avoid lock ordering
>> +             * violation when the change below is propagated from host p2m */
>> +            if ( ap2m_active )
>> +                __put_gfn(p2m, gfn);
>>              p2m_change_type_one(currd, gfn, p2m_ram_logdirty,
>> p2m_ram_rw);
>> +            __put_gfn(ap2m_active ? hostp2m : p2m, gfn);
>> +
>> +            goto out;
>>          }
>> -        rc = 1;
>>          goto out_put_gfn;
>>      }
>>
>> @@ -2977,7 +3021,9 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
>unsigned long gla,
>>      rc = fall_through;
>>
>>  out_put_gfn:
>> -    put_gfn(currd, gfn);
>> +    __put_gfn(p2m, gfn);
>> +    if ( ap2m_active )
>> +        __put_gfn(hostp2m, gfn);
>>  out:
>>      /* All of these are delayed until we exit, since we might
>>       * sleep on event ring wait queues, and we must not hold diff
>> --git a/xen/arch/x86/mm/hap/Makefile b/xen/arch/x86/mm/hap/Makefile
>> index 68f2bb5..216cd90 100644
>> --- a/xen/arch/x86/mm/hap/Makefile
>> +++ b/xen/arch/x86/mm/hap/Makefile
>> @@ -4,6 +4,7 @@ obj-y += guest_walk_3level.o
>>  obj-$(x86_64) += guest_walk_4level.o
>>  obj-y += nested_hap.o
>>  obj-y += nested_ept.o
>> +obj-y += altp2m_hap.o
>>
>>  guest_walk_%level.o: guest_walk.c Makefile
>>         $(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@ diff
>> --git a/xen/arch/x86/mm/hap/altp2m_hap.c
>> b/xen/arch/x86/mm/hap/altp2m_hap.c
>> new file mode 100644
>> index 0000000..52c7877
>> --- /dev/null
>> +++ b/xen/arch/x86/mm/hap/altp2m_hap.c
>> @@ -0,0 +1,98 @@
>>
>+/*********************************************************
>***********
>> +**********
>> + * arch/x86/mm/hap/altp2m_hap.c
>> + *
>> + * Copyright (c) 2014 Intel Corporation.
>> + *
>> + * This program is free software; you can redistribute it and/or
>> +modify
>> + * it under the terms of the GNU General Public License as published
>> +by
>> + * the Free Software Foundation; either version 2 of the License, or
>> + * (at your option) any later version.
>> + *
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>> + * GNU General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU General Public License
>> + * along with this program; if not, write to the Free Software
>> + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
>> +02111-1307  USA  */
>> +
>> +#include <asm/domain.h>
>> +#include <asm/page.h>
>> +#include <asm/paging.h>
>> +#include <asm/p2m.h>
>> +#include <asm/hap.h>
>> +#include <asm/hvm/altp2m.h>
>> +
>> +#include "private.h"
>> +
>> +/*
>> + * If the fault is for a not present entry:
>> + *     if the entry in the host p2m has a valid mfn, copy it and retry
>> + *     else indicate that outer handler should handle fault
>> + *
>> + * If the fault is for a present entry:
>> + *     indicate that outer handler should handle fault
>> + */
>> +
>> +int
>> +altp2m_hap_nested_page_fault(struct vcpu *v, paddr_t gpa,
>> +                                unsigned long gla, struct npfec npfec,
>> +                                struct p2m_domain **ap2m) {
>> +    struct p2m_domain *hp2m = p2m_get_hostp2m(v->domain);
>> +    p2m_type_t p2mt;
>> +    p2m_access_t p2ma;
>> +    unsigned int page_order;
>> +    gfn_t gfn = _gfn(paddr_to_pfn(gpa));
>> +    unsigned long mask;
>> +    mfn_t mfn;
>> +    int rv;
>> +
>> +    *ap2m = p2m_get_altp2m(v);
>> +
>> +    mfn = get_gfn_type_access(*ap2m, gfn_x(gfn), &p2mt, &p2ma,
>> +                              0, &page_order);
>> +    __put_gfn(*ap2m, gfn_x(gfn));
>> +
>> +    if ( mfn_x(mfn) != INVALID_MFN )
>> +        return 0;
>> +
>> +    mfn = get_gfn_type_access(hp2m, gfn_x(gfn), &p2mt, &p2ma,
>> +                              P2M_ALLOC | P2M_UNSHARE, &page_order);
>> +    put_gfn(hp2m->domain, gfn_x(gfn));
>
>So the reason I said my comments weren't blockers for v3 was so that it could
>be checked in before the code freeze last Friday if that turned out to be
>possible.
>
>Please do at least give this function a name that reflects what it does (i.e., try
>to propagate changes from the host p2m to the altp2m), and change this
>put_gfn() to match the __put_gfn() above.
>
>I'd prefer it if you moved this into mm/p2m.c as well.
>
> -George

Will do

Ravi

  reply	other threads:[~2015-07-17 21:01 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-07-14  0:14 [PATCH v5 00/15] Alternate p2m: support multiple copies of host p2m Ed White
2015-07-14  0:14 ` [PATCH v5 01/15] common/domain: Helpers to pause a domain while in context Ed White
2015-07-14  0:14 ` [PATCH v5 02/15] VMX: VMFUNC and #VE definitions and detection Ed White
2015-07-14  0:14 ` [PATCH v5 03/15] VMX: implement suppress #VE Ed White
2015-07-14 12:46   ` Jan Beulich
2015-07-14 13:47   ` George Dunlap
2015-07-14  0:14 ` [PATCH v5 04/15] x86/HVM: Hardware alternate p2m support detection Ed White
2015-07-14  0:14 ` [PATCH v5 05/15] x86/altp2m: basic data structures and support routines Ed White
2015-07-14 13:13   ` Jan Beulich
2015-07-14 14:45     ` George Dunlap
2015-07-14 14:58       ` Jan Beulich
2015-07-16  8:57     ` Sahita, Ravi
2015-07-16  9:07       ` Jan Beulich
2015-07-17 22:36         ` Sahita, Ravi
2015-07-20  6:20           ` Jan Beulich
2015-07-21  5:18             ` Sahita, Ravi
2015-07-14 15:57   ` George Dunlap
2015-07-21 17:44     ` Sahita, Ravi
2015-07-14  0:14 ` [PATCH v5 06/15] VMX/altp2m: add code to support EPTP switching and #VE Ed White
2015-07-14 13:57   ` Jan Beulich
2015-07-16  9:20     ` Sahita, Ravi
2015-07-16  9:38       ` Jan Beulich
2015-07-17 21:08         ` Sahita, Ravi
2015-07-20  6:21           ` Jan Beulich
2015-07-21  5:49             ` Sahita, Ravi
2015-07-14  0:14 ` [PATCH v5 07/15] VMX: add VMFUNC leaf 0 (EPTP switching) to emulator Ed White
2015-07-14 14:04   ` Jan Beulich
2015-07-14 17:56     ` Sahita, Ravi
2015-07-17 22:41     ` Sahita, Ravi
2015-07-14  0:14 ` [PATCH v5 08/15] x86/altp2m: add control of suppress_ve Ed White
2015-07-14 17:03   ` George Dunlap
2015-07-14  0:14 ` [PATCH v5 09/15] x86/altp2m: alternate p2m memory events Ed White
2015-07-14 14:08   ` Jan Beulich
2015-07-16  9:22     ` Sahita, Ravi
2015-07-14  0:14 ` [PATCH v5 10/15] x86/altp2m: add remaining support routines Ed White
2015-07-14 14:31   ` Jan Beulich
2015-07-16  9:16     ` Sahita, Ravi
2015-07-16  9:34       ` Jan Beulich
2015-07-17 22:32         ` Sahita, Ravi
2015-07-20  6:53           ` Jan Beulich
2015-07-21  5:46             ` Sahita, Ravi
2015-07-21  6:38               ` Jan Beulich
2015-07-21 18:33                 ` Sahita, Ravi
2015-07-22  7:33                   ` Jan Beulich
2015-07-16 14:44   ` George Dunlap
2015-07-17 21:01     ` Sahita, Ravi [this message]
2015-07-14  0:14 ` [PATCH v5 11/15] x86/altp2m: define and implement alternate p2m HVMOP types Ed White
2015-07-14 14:36   ` Jan Beulich
2015-07-16  9:02     ` Sahita, Ravi
2015-07-16  9:09       ` Jan Beulich
2015-07-14  0:15 ` [PATCH v5 12/15] x86/altp2m: Add altp2mhvm HVM domain parameter Ed White
2015-07-14  0:15 ` [PATCH v5 13/15] x86/altp2m: XSM hooks for altp2m HVM ops Ed White
2015-07-14  0:15 ` [PATCH v5 14/15] tools/libxc: add support to altp2m hvmops Ed White
2015-07-14  0:15 ` [PATCH v5 15/15] tools/xen-access: altp2m testcases Ed White
2015-07-14  9:56   ` Wei Liu
2015-07-14 11:52     ` Lengyel, Tamas

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=DBC12B0F5509554280826E40BCDEE8BE54FD9108@ORSMSX104.amr.corp.intel.com \
    --to=ravi.sahita@intel.com \
    --cc=George.Dunlap@eu.citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dgdegra@tycho.nsa.gov \
    --cc=edmund.h.white@intel.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=tim@xen.org \
    --cc=tlengyel@novetta.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).