All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xen.org>
Cc: "Stefano Stabellini" <sstabellini@kernel.org>,
	"Wei Liu" <wei.liu2@citrix.com>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Julien Grall" <julien.grall@arm.com>,
	"Paul Durrant" <paul.durrant@citrix.com>,
	"Jan Beulich" <JBeulich@suse.com>,
	"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [PATCH 4/5] x86/hvm: Misc non-functional cleanup to the HVM_PARAM infrastructure
Date: Wed, 5 Sep 2018 19:12:03 +0100	[thread overview]
Message-ID: <1536171124-27053-5-git-send-email-andrew.cooper3@citrix.com> (raw)
In-Reply-To: <1536171124-27053-1-git-send-email-andrew.cooper3@citrix.com>

The parameter marshalling and xsm checks are common to both the set and get
paths.  Lift all of the common code out into do_hvm_op() and let
hvmop_{get,set}_param() operate on struct xen_hvm_param directly.

This is somewhat noisy in the functions as each a. reference has to change to
a-> instead.

In addition, drop an empty default statement, insert newlines as appropriate
between cases, and there is no need to update the IDENT_PT on the fastpath,
because the common path after the switch will make the update.

No functional change, but a net shrink of -328 to do_hvm_op().

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Wei Liu <wei.liu2@citrix.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Paul Durrant <paul.durrant@citrix.com>
CC: Stefano Stabellini <sstabellini@kernel.org>
CC: Julien Grall <julien.grall@arm.com>
---
 xen/arch/x86/hvm/hvm.c | 223 +++++++++++++++++++++++--------------------------
 1 file changed, 104 insertions(+), 119 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 408e695..c891269 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4065,11 +4065,7 @@ static int hvm_allow_set_param(struct domain *d,
                                const struct xen_hvm_param *a)
 {
     uint64_t value = d->arch.hvm.params[a->index];
-    int rc;
-
-    rc = xsm_hvm_param(XSM_TARGET, d, HVMOP_set_param);
-    if ( rc )
-        return rc;
+    int rc = 0;
 
     switch ( a->index )
     {
@@ -4133,62 +4129,46 @@ static int hvm_allow_set_param(struct domain *d,
         if ( value != 0 && a->value != value )
             rc = -EEXIST;
         break;
-    default:
-        break;
     }
 
     return rc;
 }
 
-static int hvmop_set_param(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg)
+static int hvmop_set_param(struct domain *d, struct xen_hvm_param *a)
 {
     struct domain *curr_d = current->domain;
-    struct xen_hvm_param a;
-    struct domain *d;
     struct vcpu *v;
     int rc;
 
-    if ( copy_from_guest(&a, arg, 1) )
-        return -EFAULT;
-
-    d = rcu_lock_domain_by_any_id(a.domid);
-    if ( d == NULL )
-        return -ESRCH;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = hvm_allow_set_param(d, &a);
+    rc = hvm_allow_set_param(d, a);
     if ( rc )
         goto out;
 
-    switch ( a.index )
+    switch ( a->index )
     {
     case HVM_PARAM_CALLBACK_IRQ:
-        hvm_set_callback_via(d, a.value);
+        hvm_set_callback_via(d, a->value);
         hvm_latch_shinfo_size(d);
         break;
+
     case HVM_PARAM_TIMER_MODE:
-        if ( a.value > HVMPTM_one_missed_tick_pending )
+        if ( a->value > HVMPTM_one_missed_tick_pending )
             rc = -EINVAL;
         break;
+
     case HVM_PARAM_VIRIDIAN:
-        if ( (a.value & ~HVMPV_feature_mask) ||
-             !(a.value & HVMPV_base_freq) )
+        if ( (a->value & ~HVMPV_feature_mask) ||
+             !(a->value & HVMPV_base_freq) )
             rc = -EINVAL;
         break;
+
     case HVM_PARAM_IDENT_PT:
         /*
          * Only actually required for VT-x lacking unrestricted_guest
          * capabilities.  Short circuit the pause if possible.
          */
         if ( !paging_mode_hap(d) || !cpu_has_vmx )
-        {
-            d->arch.hvm.params[a.index] = a.value;
             break;
-        }
 
         /*
          * Update GUEST_CR3 in each VMCS to point at identity map.
@@ -4201,117 +4181,123 @@ static int hvmop_set_param(
 
         rc = 0;
         domain_pause(d);
-        d->arch.hvm.params[a.index] = a.value;
+        d->arch.hvm.params[a->index] = a->value;
         for_each_vcpu ( d, v )
             paging_update_cr3(v, false);
         domain_unpause(d);
 
         domctl_lock_release();
         break;
+
     case HVM_PARAM_DM_DOMAIN:
         /* The only value this should ever be set to is DOMID_SELF */
-        if ( a.value != DOMID_SELF )
+        if ( a->value != DOMID_SELF )
             rc = -EINVAL;
 
-        a.value = curr_d->domain_id;
+        a->value = curr_d->domain_id;
         break;
+
     case HVM_PARAM_ACPI_S_STATE:
         rc = 0;
-        if ( a.value == 3 )
+        if ( a->value == 3 )
             hvm_s3_suspend(d);
-        else if ( a.value == 0 )
+        else if ( a->value == 0 )
             hvm_s3_resume(d);
         else
             rc = -EINVAL;
-
         break;
+
     case HVM_PARAM_ACPI_IOPORTS_LOCATION:
-        rc = pmtimer_change_ioport(d, a.value);
+        rc = pmtimer_change_ioport(d, a->value);
         break;
 
     case HVM_PARAM_NESTEDHVM:
         rc = xsm_hvm_param_nested(XSM_PRIV, d);
         if ( rc )
             break;
-        if ( a.value > 1 )
+        if ( a->value > 1 )
             rc = -EINVAL;
         /*
          * Remove the check below once we have
          * shadow-on-shadow.
          */
-        if ( !paging_mode_hap(d) && a.value )
+        if ( !paging_mode_hap(d) && a->value )
             rc = -EINVAL;
-        if ( a.value &&
+        if ( a->value &&
              d->arch.hvm.params[HVM_PARAM_ALTP2M] )
             rc = -EINVAL;
         /* Set up NHVM state for any vcpus that are already up. */
-        if ( a.value &&
+        if ( a->value &&
              !d->arch.hvm.params[HVM_PARAM_NESTEDHVM] )
             for_each_vcpu(d, v)
                 if ( rc == 0 )
                     rc = nestedhvm_vcpu_initialise(v);
-        if ( !a.value || rc )
+        if ( !a->value || rc )
             for_each_vcpu(d, v)
                 nestedhvm_vcpu_destroy(v);
         break;
+
     case HVM_PARAM_ALTP2M:
         rc = xsm_hvm_param_altp2mhvm(XSM_PRIV, d);
         if ( rc )
             break;
-        if ( a.value > XEN_ALTP2M_limited )
+        if ( a->value > XEN_ALTP2M_limited )
             rc = -EINVAL;
-        if ( a.value &&
+        if ( a->value &&
              d->arch.hvm.params[HVM_PARAM_NESTEDHVM] )
             rc = -EINVAL;
         break;
 
     case HVM_PARAM_TRIPLE_FAULT_REASON:
-        if ( a.value > SHUTDOWN_MAX )
+        if ( a->value > SHUTDOWN_MAX )
             rc = -EINVAL;
         break;
+
     case HVM_PARAM_IOREQ_SERVER_PFN:
-        d->arch.hvm.ioreq_gfn.base = a.value;
+        d->arch.hvm.ioreq_gfn.base = a->value;
         break;
+
     case HVM_PARAM_NR_IOREQ_SERVER_PAGES:
     {
         unsigned int i;
 
-        if ( a.value == 0 ||
-             a.value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 )
+        if ( a->value == 0 ||
+             a->value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 )
         {
             rc = -EINVAL;
             break;
         }
-        for ( i = 0; i < a.value; i++ )
+        for ( i = 0; i < a->value; i++ )
             set_bit(i, &d->arch.hvm.ioreq_gfn.mask);
 
         break;
     }
+
     case HVM_PARAM_X87_FIP_WIDTH:
-        if ( a.value != 0 && a.value != 4 && a.value != 8 )
+        if ( a->value != 0 && a->value != 4 && a->value != 8 )
         {
             rc = -EINVAL;
             break;
         }
-        d->arch.x87_fip_width = a.value;
+        d->arch.x87_fip_width = a->value;
         break;
 
     case HVM_PARAM_VM86_TSS:
         /* Hardware would silently truncate high bits. */
-        if ( a.value != (uint32_t)a.value )
+        if ( a->value != (uint32_t)a->value )
         {
             if ( d == curr_d )
                 domain_crash(d);
             rc = -EINVAL;
         }
         /* Old hvmloader binaries hardcode the size to 128 bytes. */
-        if ( a.value )
-            a.value |= (128ULL << 32) | VM86_TSS_UPDATED;
-        a.index = HVM_PARAM_VM86_TSS_SIZED;
+        if ( a->value )
+            a->value |= (128ULL << 32) | VM86_TSS_UPDATED;
+        a->index = HVM_PARAM_VM86_TSS_SIZED;
         break;
 
     case HVM_PARAM_VM86_TSS_SIZED:
-        if ( (a.value >> 32) < sizeof(struct tss32) )
+        if ( (a->value >> 32) < sizeof(struct tss32) )
         {
             if ( d == curr_d )
                 domain_crash(d);
@@ -4322,41 +4308,34 @@ static int hvmop_set_param(
          * 256 bits interrupt redirection bitmap + 64k bits I/O bitmap
          * plus one padding byte).
          */
-        if ( (a.value >> 32) > sizeof(struct tss32) +
-                               (0x100 / 8) + (0x10000 / 8) + 1 )
-            a.value = (uint32_t)a.value |
-                      ((sizeof(struct tss32) + (0x100 / 8) +
-                                               (0x10000 / 8) + 1) << 32);
-        a.value |= VM86_TSS_UPDATED;
+        if ( (a->value >> 32) > sizeof(struct tss32) +
+                                (0x100 / 8) + (0x10000 / 8) + 1 )
+            a->value = (uint32_t)a->value |
+                       ((sizeof(struct tss32) + (0x100 / 8) +
+                                                (0x10000 / 8) + 1) << 32);
+        a->value |= VM86_TSS_UPDATED;
         break;
 
     case HVM_PARAM_MCA_CAP:
-        rc = vmce_enable_mca_cap(d, a.value);
+        rc = vmce_enable_mca_cap(d, a->value);
         break;
     }
 
     if ( rc != 0 )
         goto out;
 
-    d->arch.hvm.params[a.index] = a.value;
+    d->arch.hvm.params[a->index] = a->value;
 
     HVM_DBG_LOG(DBG_LEVEL_HCALL, "set param %u = %"PRIx64,
-                a.index, a.value);
+                a->index, a->value);
 
  out:
-    rcu_unlock_domain(d);
     return rc;
 }
 
 static int hvm_allow_get_param(struct domain *d,
                                const struct xen_hvm_param *a)
 {
-    int rc;
-
-    rc = xsm_hvm_param(XSM_TARGET, d, HVMOP_get_param);
-    if ( rc )
-        return rc;
-
     switch ( a->index )
     {
         /* The following parameters can be read by the guest and toolstack. */
@@ -4371,7 +4350,7 @@ static int hvm_allow_get_param(struct domain *d,
     case HVM_PARAM_CONSOLE_EVTCHN:
     case HVM_PARAM_ALTP2M:
     case HVM_PARAM_X87_FIP_WIDTH:
-        break;
+        return 0;
 
         /*
          * The following parameters are intended for toolstack usage only.
@@ -4397,59 +4376,41 @@ static int hvm_allow_get_param(struct domain *d,
     case HVM_PARAM_IOREQ_SERVER_PFN:
     case HVM_PARAM_NR_IOREQ_SERVER_PAGES:
     case HVM_PARAM_MCA_CAP:
-        if ( d == current->domain )
-            rc = -EPERM;
-        break;
+        return d == current->domain ? -EPERM : 0;
 
         /* Hole, deprecated, or out-of-range. */
     default:
-        rc = -EINVAL;
-        break;
+        return -EINVAL;
     }
-
-    return rc;
 }
 
-static int hvmop_get_param(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg)
+static int hvmop_get_param(struct domain *d, struct xen_hvm_param *a)
 {
-    struct xen_hvm_param a;
-    struct domain *d;
     int rc;
 
-    if ( copy_from_guest(&a, arg, 1) )
-        return -EFAULT;
-
-    d = rcu_lock_domain_by_any_id(a.domid);
-    if ( d == NULL )
-        return -ESRCH;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = hvm_allow_get_param(d, &a);
+    rc = hvm_allow_get_param(d, a);
     if ( rc )
-        goto out;
+        return rc;
 
-    switch ( a.index )
+    switch ( a->index )
     {
     case HVM_PARAM_ACPI_S_STATE:
-        a.value = d->arch.hvm.is_s3_suspended ? 3 : 0;
+        a->value = d->arch.hvm.is_s3_suspended ? 3 : 0;
         break;
 
     case HVM_PARAM_VM86_TSS:
-        a.value = (uint32_t)d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED];
+        a->value = (uint32_t)d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED];
         break;
 
     case HVM_PARAM_VM86_TSS_SIZED:
-        a.value = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] &
-                  ~VM86_TSS_UPDATED;
+        a->value = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] &
+                   ~VM86_TSS_UPDATED;
         break;
 
     case HVM_PARAM_X87_FIP_WIDTH:
-        a.value = d->arch.x87_fip_width;
+        a->value = d->arch.x87_fip_width;
         break;
+
     case HVM_PARAM_IOREQ_PFN:
     case HVM_PARAM_BUFIOREQ_PFN:
     case HVM_PARAM_BUFIOREQ_EVTCHN:
@@ -4465,23 +4426,19 @@ static int hvmop_get_param(
             rc = hvm_create_ioreq_server(d, true,
                                          HVM_IOREQSRV_BUFIOREQ_LEGACY, NULL);
             if ( rc != 0 && rc != -EEXIST )
-                goto out;
+                return rc;
         }
 
-    /*FALLTHRU*/
+        /* Fallthrough */
     default:
-        a.value = d->arch.hvm.params[a.index];
+        a->value = d->arch.hvm.params[a->index];
         break;
     }
 
-    rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
-
     HVM_DBG_LOG(DBG_LEVEL_HCALL, "get param %u = %"PRIx64,
-                a.index, a.value);
+                a->index, a->value);
 
- out:
-    rcu_unlock_domain(d);
-    return rc;
+    return 0;
 }
 
 /*
@@ -4896,14 +4853,42 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
         break;
     
     case HVMOP_set_param:
-        rc = hvmop_set_param(
-            guest_handle_cast(arg, xen_hvm_param_t));
-        break;
-
     case HVMOP_get_param:
-        rc = hvmop_get_param(
-            guest_handle_cast(arg, xen_hvm_param_t));
+    {
+        struct xen_hvm_param a;
+        struct domain *d;
+
+        rc = -EFAULT;
+        if ( copy_from_guest(&a, arg, 1) )
+            break;
+
+        rc = -ESRCH;
+        d = rcu_lock_domain_by_any_id(a.domid);
+        if ( d == NULL )
+            break;
+
+        rc = -EINVAL;
+        if ( !is_hvm_domain(d) )
+            goto param_out;
+
+        rc = xsm_hvm_param(XSM_TARGET, d, op);
+        if ( rc )
+            goto param_out;
+
+        if ( op == HVMOP_set_param )
+            rc = hvmop_set_param(d, &a);
+        else
+        {
+            rc = hvmop_get_param(d, &a);
+
+            if ( !rc && __copy_to_guest(arg, &a, 1) )
+                rc = -EFAULT;
+        }
+
+    param_out:
+        rcu_unlock_domain(d);
         break;
+    }
 
     case HVMOP_flush_tlbs:
         rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -EINVAL;
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2018-09-05 18:12 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-05 18:11 [PATCH 0/5] xen: Fixes and improvements to HVM_PARAM handling Andrew Cooper
2018-09-05 18:12 ` [PATCH 1/5] x86/hvm: Switch hvm_allow_get_param() to use a whitelist Andrew Cooper
2018-09-06  8:56   ` Paul Durrant
2018-09-06 15:21     ` Andrew Cooper
2018-09-07  6:30       ` Jan Beulich
2018-09-07  8:55       ` Jan Beulich
2018-09-07 18:18         ` Andrew Cooper
2018-09-10  9:41           ` Jan Beulich
2018-09-07 15:42   ` Roger Pau Monné
2018-09-05 18:12 ` [PATCH 2/5] x86/hvm: Switch hvm_allow_set_param() " Andrew Cooper
2018-09-06  9:08   ` Paul Durrant
2018-09-06 15:27     ` Andrew Cooper
2018-09-07 16:01   ` Roger Pau Monné
2018-09-07 18:13     ` Andrew Cooper
2018-09-10 14:28       ` Roger Pau Monné
2018-09-05 18:12 ` [PATCH 3/5] x86/hvm: Make HVM_PARAM_{STORE, CONSOLE}_EVTCHN read-only to the guest Andrew Cooper
2018-09-06  9:16   ` Paul Durrant
2018-09-06 15:29     ` Andrew Cooper
2018-09-06 17:28       ` Julien Grall
2018-09-07 16:19         ` Paul Durrant
2018-09-07 16:03   ` Roger Pau Monné
2018-09-05 18:12 ` Andrew Cooper [this message]
2018-09-06  9:26   ` [PATCH 4/5] x86/hvm: Misc non-functional cleanup to the HVM_PARAM infrastructure Paul Durrant
2018-09-07  9:08     ` Jan Beulich
2018-09-07 16:23   ` Roger Pau Monné
2018-09-05 18:12 ` [PATCH 5/5] xen/ARM: Restrict access to most HVM_PARAM's Andrew Cooper
2018-09-06  9:29   ` Paul Durrant
2018-09-06 10:36     ` Julien Grall
2018-09-06 10:40       ` Andrew Cooper
2018-09-06 10:43         ` Paul Durrant
2018-09-06 10:40       ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1536171124-27053-5-git-send-email-andrew.cooper3@citrix.com \
    --to=andrew.cooper3@citrix.com \
    --cc=JBeulich@suse.com \
    --cc=julien.grall@arm.com \
    --cc=paul.durrant@citrix.com \
    --cc=roger.pau@citrix.com \
    --cc=sstabellini@kernel.org \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.