xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: George Dunlap <dunlapg@umich.edu>
To: Yu Zhang <yu.c.zhang@linux.intel.com>
Cc: Keir Fraser <keir@xen.org>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	"xen-devel@lists.xen.org" <xen-devel@lists.xen.org>,
	Paul Durrant <paul.durrant@citrix.com>,
	"Lv, Zhiyuan" <zhiyuan.lv@intel.com>,
	Jan Beulich <jbeulich@suse.com>
Subject: Re: [PATCH v2 1/3] x86/ioreq server: Add new functions to get/set memory types.
Date: Tue, 5 Apr 2016 14:57:38 +0100	[thread overview]
Message-ID: <CAFLBxZYHTMey-c6ivRtWr2=V_NLufYPGQR6dGcS8vY1EYPbj_w@mail.gmail.com> (raw)
In-Reply-To: <1459421618-5991-2-git-send-email-yu.c.zhang@linux.intel.com>

On Thu, Mar 31, 2016 at 11:53 AM, Yu Zhang <yu.c.zhang@linux.intel.com> wrote:
> For clarity this patch breaks the code to set/get memory types out
> of do_hvm_op() into dedicated functions: hvmop_set/get_mem_type().
> Also, for clarity, checks for whether a memory type change is allowed
> are broken out into a separate function called by hvmop_set_mem_type().
>
> There is no intentional functional change in this patch.
>
> Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
> Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
> Cc: Keir Fraser <keir@xen.org>
> Cc: Jan Beulich <jbeulich@suse.com>
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: George Dunlap <george.dunlap@citrix.com>

> ---
>  xen/arch/x86/hvm/hvm.c | 284 +++++++++++++++++++++++++++----------------------
>  1 file changed, 159 insertions(+), 125 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 80d59ff..f700923 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -6547,6 +6547,61 @@ static int do_altp2m_op(
>      return rc;
>  }
>
> +static int hvmop_get_mem_type(
> +    XEN_GUEST_HANDLE_PARAM(xen_hvm_get_mem_type_t) arg)
> +{
> +    struct xen_hvm_get_mem_type a;
> +    struct domain *d;
> +    p2m_type_t t;
> +    int rc;
> +
> +    if ( copy_from_guest(&a, arg, 1) )
> +        return -EFAULT;
> +
> +    d = rcu_lock_domain_by_any_id(a.domid);
> +    if ( d == NULL )
> +        return -ESRCH;
> +
> +    rc = xsm_hvm_param(XSM_TARGET, d, HVMOP_get_mem_type);
> +    if ( rc )
> +        goto out;
> +
> +    rc = -EINVAL;
> +    if ( !is_hvm_domain(d) )
> +        goto out;
> +
> +    /*
> +     * Use get_gfn query as we are interested in the current
> +     * type, not in allocating or unsharing. That'll happen
> +     * on access.
> +     */
> +    get_gfn_query_unlocked(d, a.pfn, &t);
> +    if ( p2m_is_mmio(t) )
> +        a.mem_type =  HVMMEM_mmio_dm;
> +    else if ( t == p2m_mmio_write_dm )
> +        a.mem_type = HVMMEM_mmio_write_dm;
> +    else if ( p2m_is_readonly(t) )
> +        a.mem_type =  HVMMEM_ram_ro;
> +    else if ( p2m_is_ram(t) )
> +        a.mem_type =  HVMMEM_ram_rw;
> +    else if ( p2m_is_pod(t) )
> +        a.mem_type =  HVMMEM_ram_rw;
> +    else if ( p2m_is_grant(t) )
> +        a.mem_type =  HVMMEM_ram_rw;
> +    else
> +        a.mem_type =  HVMMEM_mmio_dm;
> +
> +    rc = -EFAULT;
> +    if ( __copy_to_guest(arg, &a, 1) )
> +        goto out;
> +    rc = 0;
> +
> + out:
> +    rcu_unlock_domain(d);
> +
> +    return rc;
> +}
> +
>  /*
>   * Note that this value is effectively part of the ABI, even if we don't need
>   * to make it a formal part of it: A guest suspended for migration in the
> @@ -6555,6 +6610,105 @@ static int do_altp2m_op(
>   */
>  #define HVMOP_op_mask 0xff
>
> +static bool_t hvm_allow_p2m_type_change(p2m_type_t old, p2m_type_t new)
> +{
> +    if ( p2m_is_ram(old) ||
> +         (p2m_is_hole(old) && new == p2m_mmio_dm) ||
> +         (old == p2m_mmio_write_dm && new == p2m_ram_rw) )
> +        return 1;
> +
> +    return 0;
> +}
> +
> +static int hvmop_set_mem_type(
> +    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_mem_type_t) arg,
> +    unsigned long *iter)
> +{
> +    unsigned long start_iter = *iter;
> +    struct xen_hvm_set_mem_type a;
> +    struct domain *d;
> +    int rc;
> +
> +    /* Interface types to internal p2m types */
> +    static const p2m_type_t memtype[] = {
> +        [HVMMEM_ram_rw]  = p2m_ram_rw,
> +        [HVMMEM_ram_ro]  = p2m_ram_ro,
> +        [HVMMEM_mmio_dm] = p2m_mmio_dm,
> +        [HVMMEM_mmio_write_dm] = p2m_mmio_write_dm
> +    };
> +
> +    if ( copy_from_guest(&a, arg, 1) )
> +        return -EFAULT;
> +
> +    rc = rcu_lock_remote_domain_by_id(a.domid, &d);
> +    if ( rc != 0 )
> +        return rc;
> +
> +    rc = -EINVAL;
> +    if ( !is_hvm_domain(d) )
> +        goto out;
> +
> +    rc = xsm_hvm_control(XSM_DM_PRIV, d, HVMOP_set_mem_type);
> +    if ( rc )
> +        goto out;
> +
> +    rc = -EINVAL;
> +    if ( a.nr < start_iter ||
> +         ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
> +         ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
> +        goto out;
> +
> +    if ( a.hvmmem_type >= ARRAY_SIZE(memtype) )
> +        goto out;
> +
> +    while ( a.nr > start_iter )
> +    {
> +        unsigned long pfn = a.first_pfn + start_iter;
> +        p2m_type_t t;
> +
> +        get_gfn_unshare(d, pfn, &t);
> +        if ( p2m_is_paging(t) )
> +        {
> +            put_gfn(d, pfn);
> +            p2m_mem_paging_populate(d, pfn);
> +            rc = -EAGAIN;
> +            goto out;
> +        }
> +        if ( p2m_is_shared(t) )
> +        {
> +            put_gfn(d, pfn);
> +            rc = -EAGAIN;
> +            goto out;
> +        }
> +        if ( !hvm_allow_p2m_type_change(t, memtype[a.hvmmem_type]) )
> +        {
> +            put_gfn(d, pfn);
> +            goto out;
> +        }
> +
> +        rc = p2m_change_type_one(d, pfn, t, memtype[a.hvmmem_type]);
> +        put_gfn(d, pfn);
> +
> +        if ( rc )
> +            goto out;
> +
> +        /* Check for continuation if it's not the last interation */
> +        if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) &&
> +             hypercall_preempt_check() )
> +        {
> +            rc = -ERESTART;
> +            goto out;
> +        }
> +    }
> +    rc = 0;
> +
> + out:
> +    rcu_unlock_domain(d);
> +    *iter = start_iter;
> +
> +    return rc;
> +}
> +
>  long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
>  {
>      unsigned long start_iter, mask;
> @@ -6744,135 +6898,15 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
>      }
>
>      case HVMOP_get_mem_type:
> -    {
> -        struct xen_hvm_get_mem_type a;
> -        struct domain *d;
> -        p2m_type_t t;
> -
> -        if ( copy_from_guest(&a, arg, 1) )
> -            return -EFAULT;
> -
> -        d = rcu_lock_domain_by_any_id(a.domid);
> -        if ( d == NULL )
> -            return -ESRCH;
> -
> -        rc = xsm_hvm_param(XSM_TARGET, d, op);
> -        if ( unlikely(rc) )
> -            /* nothing */;
> -        else if ( likely(is_hvm_domain(d)) )
> -        {
> -            /* Use get_gfn query as we are interested in the current
> -             * type, not in allocating or unsharing. That'll happen
> -             * on access. */
> -            get_gfn_query_unlocked(d, a.pfn, &t);
> -            if ( p2m_is_mmio(t) )
> -                a.mem_type =  HVMMEM_mmio_dm;
> -            else if ( t == p2m_mmio_write_dm )
> -                a.mem_type = HVMMEM_mmio_write_dm;
> -            else if ( p2m_is_readonly(t) )
> -                a.mem_type =  HVMMEM_ram_ro;
> -            else if ( p2m_is_ram(t) )
> -                a.mem_type =  HVMMEM_ram_rw;
> -            else if ( p2m_is_pod(t) )
> -                a.mem_type =  HVMMEM_ram_rw;
> -            else if ( p2m_is_grant(t) )
> -                a.mem_type =  HVMMEM_ram_rw;
> -            else
> -                a.mem_type =  HVMMEM_mmio_dm;
> -            if ( __copy_to_guest(arg, &a, 1) )
> -                rc = -EFAULT;
> -        }
> -        else
> -            rc = -EINVAL;
> -
> -        rcu_unlock_domain(d);
> +        rc = hvmop_get_mem_type(
> +            guest_handle_cast(arg, xen_hvm_get_mem_type_t));
>          break;
> -    }
>
>      case HVMOP_set_mem_type:
> -    {
> -        struct xen_hvm_set_mem_type a;
> -        struct domain *d;
> -
> -        /* Interface types to internal p2m types */
> -        static const p2m_type_t memtype[] = {
> -            [HVMMEM_ram_rw]  = p2m_ram_rw,
> -            [HVMMEM_ram_ro]  = p2m_ram_ro,
> -            [HVMMEM_mmio_dm] = p2m_mmio_dm,
> -            [HVMMEM_mmio_write_dm] = p2m_mmio_write_dm
> -        };
> -
> -        if ( copy_from_guest(&a, arg, 1) )
> -            return -EFAULT;
> -
> -        rc = rcu_lock_remote_domain_by_id(a.domid, &d);
> -        if ( rc != 0 )
> -            return rc;
> -
> -        rc = -EINVAL;
> -        if ( !is_hvm_domain(d) )
> -            goto setmemtype_fail;
> -
> -        rc = xsm_hvm_control(XSM_DM_PRIV, d, op);
> -        if ( rc )
> -            goto setmemtype_fail;
> -
> -        rc = -EINVAL;
> -        if ( a.nr < start_iter ||
> -             ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
> -             ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
> -            goto setmemtype_fail;
> -
> -        if ( a.hvmmem_type >= ARRAY_SIZE(memtype) )
> -            goto setmemtype_fail;
> -
> -        while ( a.nr > start_iter )
> -        {
> -            unsigned long pfn = a.first_pfn + start_iter;
> -            p2m_type_t t;
> -
> -            get_gfn_unshare(d, pfn, &t);
> -            if ( p2m_is_paging(t) )
> -            {
> -                put_gfn(d, pfn);
> -                p2m_mem_paging_populate(d, pfn);
> -                rc = -EAGAIN;
> -                goto setmemtype_fail;
> -            }
> -            if ( p2m_is_shared(t) )
> -            {
> -                put_gfn(d, pfn);
> -                rc = -EAGAIN;
> -                goto setmemtype_fail;
> -            }
> -            if ( !p2m_is_ram(t) &&
> -                 (!p2m_is_hole(t) || a.hvmmem_type != HVMMEM_mmio_dm) &&
> -                 (t != p2m_mmio_write_dm || a.hvmmem_type != HVMMEM_ram_rw) )
> -            {
> -                put_gfn(d, pfn);
> -                goto setmemtype_fail;
> -            }
> -
> -            rc = p2m_change_type_one(d, pfn, t, memtype[a.hvmmem_type]);
> -            put_gfn(d, pfn);
> -            if ( rc )
> -                goto setmemtype_fail;
> -
> -            /* Check for continuation if it's not the last interation */
> -            if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) &&
> -                 hypercall_preempt_check() )
> -            {
> -                rc = -ERESTART;
> -                goto setmemtype_fail;
> -            }
> -        }
> -
> -        rc = 0;
> -
> -    setmemtype_fail:
> -        rcu_unlock_domain(d);
> +        rc = hvmop_set_mem_type(
> +            guest_handle_cast(arg, xen_hvm_set_mem_type_t),
> +            &start_iter);
>          break;
> -    }
>
>      case HVMOP_pagetable_dying:
>      {
> --
> 1.9.1
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  reply	other threads:[~2016-04-05 13:57 UTC|newest]

Thread overview: 82+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-03-31 10:53 [PATCH v2 0/3] x86/ioreq server: introduce HVMMEM_ioreq_server mem type Yu Zhang
2016-03-31 10:53 ` [PATCH v2 1/3] x86/ioreq server: Add new functions to get/set memory types Yu Zhang
2016-04-05 13:57   ` George Dunlap [this message]
2016-04-05 14:08     ` George Dunlap
2016-04-08 13:25   ` Andrew Cooper
2016-03-31 10:53 ` [PATCH v2 2/3] x86/ioreq server: Rename p2m_mmio_write_dm to p2m_ioreq_server Yu Zhang
2016-04-05 14:38   ` George Dunlap
2016-04-08 13:26   ` Andrew Cooper
2016-04-08 21:48   ` Jan Beulich
2016-04-18  8:41     ` Paul Durrant
2016-04-18  9:10       ` George Dunlap
2016-04-18  9:14         ` Wei Liu
2016-04-18  9:45           ` Paul Durrant
2016-04-18 16:40       ` Jan Beulich
2016-04-18 16:45         ` Paul Durrant
2016-04-18 16:47           ` Jan Beulich
2016-04-18 16:58             ` Paul Durrant
2016-04-19 11:02               ` Yu, Zhang
2016-04-19 11:15                 ` Paul Durrant
2016-04-19 11:38                   ` Yu, Zhang
2016-04-19 11:50                     ` Paul Durrant
2016-04-19 16:51                     ` Jan Beulich
2016-04-20 14:59                       ` Wei Liu
2016-04-20 15:02                 ` George Dunlap
2016-04-20 16:30                   ` George Dunlap
2016-04-20 16:52                     ` Jan Beulich
2016-04-20 16:58                       ` Paul Durrant
2016-04-20 17:06                         ` George Dunlap
2016-04-20 17:09                           ` Paul Durrant
2016-04-21 12:24                           ` Yu, Zhang
2016-04-21 13:31                             ` Paul Durrant
2016-04-21 13:48                               ` Yu, Zhang
2016-04-21 13:56                                 ` Paul Durrant
2016-04-21 14:09                                   ` George Dunlap
2016-04-20 17:08                       ` George Dunlap
2016-04-21 12:04                       ` Yu, Zhang
2016-03-31 10:53 ` [PATCH v2 3/3] x86/ioreq server: Add HVMOP to map guest ram with p2m_ioreq_server to an ioreq server Yu Zhang
     [not found]   ` <20160404082556.GC28633@deinos.phlegethon.org>
2016-04-05  6:01     ` Yu, Zhang
2016-04-06 17:13   ` George Dunlap
2016-04-07  7:01     ` Yu, Zhang
     [not found]       ` <CAFLBxZbLp2zWzCzQTaJNWbanQSmTJ57ZyTh0qaD-+YUn8o8pyQ@mail.gmail.com>
2016-04-08 10:39         ` George Dunlap
     [not found]         ` <5707839F.9060803@linux.intel.com>
2016-04-08 11:01           ` George Dunlap
2016-04-11 11:15             ` Yu, Zhang
2016-04-14 10:45               ` Yu, Zhang
2016-04-18 15:57                 ` Paul Durrant
2016-04-19  9:11                   ` Yu, Zhang
2016-04-19  9:21                     ` Paul Durrant
2016-04-19  9:44                       ` Yu, Zhang
2016-04-19 10:05                         ` Paul Durrant
2016-04-19 11:17                           ` Yu, Zhang
2016-04-19 11:47                             ` Paul Durrant
2016-04-19 11:59                               ` Yu, Zhang
2016-04-20 14:50                                 ` George Dunlap
2016-04-20 14:57                                   ` Paul Durrant
2016-04-20 15:37                                     ` George Dunlap
2016-04-20 16:30                                       ` Paul Durrant
2016-04-20 16:58                                         ` George Dunlap
2016-04-21 13:28                                         ` Yu, Zhang
2016-04-21 13:21                                   ` Yu, Zhang
2016-04-22 11:27                                     ` Wei Liu
2016-04-22 11:30                                       ` George Dunlap
2016-04-19  4:37                 ` Tian, Kevin
2016-04-19  9:21                   ` Yu, Zhang
2016-04-08 13:33   ` Andrew Cooper
2016-04-11 11:14     ` Yu, Zhang
2016-04-11 12:20       ` Andrew Cooper
2016-04-11 16:25         ` Jan Beulich
2016-04-08 22:28   ` Jan Beulich
2016-04-11 11:14     ` Yu, Zhang
2016-04-11 16:31       ` Jan Beulich
2016-04-12  9:37         ` Yu, Zhang
2016-04-12 15:08           ` Jan Beulich
2016-04-14  9:56             ` Yu, Zhang
2016-04-19  4:50               ` Tian, Kevin
2016-04-19  8:46                 ` Paul Durrant
2016-04-19  9:27                   ` Yu, Zhang
2016-04-19  9:40                     ` Paul Durrant
2016-04-19  9:49                       ` Yu, Zhang
2016-04-19 10:01                         ` Paul Durrant
2016-04-19  9:54                           ` Yu, Zhang
2016-04-19  9:15                 ` Yu, Zhang
2016-04-19  9:23                   ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CAFLBxZYHTMey-c6ivRtWr2=V_NLufYPGQR6dGcS8vY1EYPbj_w@mail.gmail.com' \
    --to=dunlapg@umich.edu \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=keir@xen.org \
    --cc=paul.durrant@citrix.com \
    --cc=xen-devel@lists.xen.org \
    --cc=yu.c.zhang@linux.intel.com \
    --cc=zhiyuan.lv@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).