All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Durrant <paul@xen.org>
To: xen-devel@lists.xenproject.org
Cc: "Paul Durrant" <pdurrant@amazon.com>, "Wei Liu" <wl@xen.org>,
	"Jan Beulich" <jbeulich@suse.com>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [PATCH 06/10] viridian: add ExProcessorMasks variants of the flush hypercalls
Date: Wed, 11 Nov 2020 20:07:17 +0000	[thread overview]
Message-ID: <20201111200721.30551-7-paul@xen.org> (raw)
In-Reply-To: <20201111200721.30551-1-paul@xen.org>

From: Paul Durrant <pdurrant@amazon.com>

The Microsoft Hypervisor TLFS specifies variants of the already implemented
HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE/LIST hypercalls that take a 'Virtual
Processor Set' as an argument rather than a simple 64-bit mask.

This patch adds a new hvcall_flush_ex() function to implement these
(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE/LIST_EX) hypercalls. This makes use of
new helper functions, hv_vpset_nr_banks() and hv_vpset_to_vpmask(), to
determine the size of the Virtual Processor Set (so it can be copied from
guest memory) and parse it into hypercall_vpmask (respectively).

NOTE: A guest should not yet issue these hypercalls as 'ExProcessorMasks'
      support needs to be advertised via CPUID. This will be done in a
      subsequent patch.

Signed-off-by: Paul Durrant <pdurrant@amazon.com>
---
Cc: Wei Liu <wl@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: "Roger Pau Monné" <roger.pau@citrix.com>
---
 xen/arch/x86/hvm/viridian/viridian.c | 147 +++++++++++++++++++++++++++
 1 file changed, 147 insertions(+)

diff --git a/xen/arch/x86/hvm/viridian/viridian.c b/xen/arch/x86/hvm/viridian/viridian.c
index 765d53016c02..1226e1596a1c 100644
--- a/xen/arch/x86/hvm/viridian/viridian.c
+++ b/xen/arch/x86/hvm/viridian/viridian.c
@@ -553,6 +553,83 @@ static unsigned int vpmask_next(struct hypercall_vpmask *vpmask, unsigned int vp
 	     (vp) < HVM_MAX_VCPUS; \
 	     (vp) = vpmask_next(vpmask, vp))
 
+struct hypercall_vpset {
+        struct hv_vpset set;
+        uint64_t __bank_contents[64];
+};
+
+static DEFINE_PER_CPU(struct hypercall_vpset, hypercall_vpset);
+
+static unsigned int hv_vpset_nr_banks(struct hv_vpset *vpset)
+{
+    uint64_t bank_mask;
+    unsigned int nr = 0;
+
+    for ( bank_mask = vpset->valid_bank_mask; bank_mask; bank_mask >>= 1 )
+        if ( bank_mask & 1 )
+            nr++;
+
+    return nr;
+}
+
+static uint16_t hv_vpset_to_vpmask(struct hv_vpset *set, size_t size,
+                                   struct hypercall_vpmask *vpmask)
+{
+    switch ( set->format )
+    {
+    case HV_GENERIC_SET_ALL:
+        vpmask_fill(vpmask);
+        return 0;
+
+    case HV_GENERIC_SET_SPARSE_4K:
+    {
+        uint64_t bank_mask;
+        unsigned int bank = 0, vp = 0;
+
+        vpmask_empty(vpmask);
+        for ( bank_mask = set->valid_bank_mask; bank_mask; bank_mask >>= 1 )
+        {
+            /* Make sure we won't dereference past the end of the array */
+            if ( (void *)(set->bank_contents + bank) >=
+                 (void *)set + size )
+            {
+                ASSERT_UNREACHABLE();
+                return -EINVAL;
+            }
+
+            if ( bank_mask & 1 )
+            {
+                uint64_t mask = set->bank_contents[bank];
+                unsigned int i;
+
+                for ( i = 0; i < 64; i++, vp++ )
+                {
+                    if ( mask & 1 )
+                    {
+                        if ( vp >= HVM_MAX_VCPUS )
+                            return -EINVAL;
+
+                        vpmask_set(vpmask, vp);
+                    }
+
+                    mask >>= 1;
+                }
+
+                bank++;
+            }
+            else
+                vp += 64;
+        }
+        return 0;
+    }
+
+    default:
+        break;
+    }
+
+    return -EINVAL;
+}
+
 /*
  * Windows should not issue the hypercalls requiring this callback in the
  * case where vcpu_id would exceed the size of the mask.
@@ -644,6 +721,70 @@ static int hvcall_flush(union hypercall_input *input,
     return 0;
 }
 
+static int hvcall_flush_ex(union hypercall_input *input,
+                           union hypercall_output *output,
+                           unsigned long input_params_gpa,
+                           unsigned long output_params_gpa)
+{
+    struct hypercall_vpmask *vpmask = &this_cpu(hypercall_vpmask);
+    struct {
+        uint64_t address_space;
+        uint64_t flags;
+        struct hv_vpset set;
+    } input_params;
+
+    /* These hypercalls should never use the fast-call convention. */
+    if ( input->fast )
+        return -EINVAL;
+
+    /* Get input parameters. */
+    if ( hvm_copy_from_guest_phys(&input_params, input_params_gpa,
+                                  sizeof(input_params)) != HVMTRANS_okay )
+        return -EINVAL;
+
+    if ( input_params.flags & HV_FLUSH_ALL_PROCESSORS )
+        vpmask_fill(vpmask);
+    else
+    {
+        struct hypercall_vpset *vpset = &this_cpu(hypercall_vpset);
+        struct hv_vpset *set = &vpset->set;
+        size_t size;
+        int rc;
+
+        *set = input_params.set;
+        if ( set->format == HV_GENERIC_SET_SPARSE_4K )
+        {
+            unsigned long offset = offsetof(typeof(input_params),
+                                            set.bank_contents);
+
+            size = sizeof(*set->bank_contents) * hv_vpset_nr_banks(set);
+            if ( hvm_copy_from_guest_phys(&set->bank_contents,
+                                          input_params_gpa + offset,
+                                          size) != HVMTRANS_okay)
+                return -EINVAL;
+
+            size += sizeof(*set);
+        }
+        else
+            size = sizeof(*set);
+
+        rc = hv_vpset_to_vpmask(set, size, vpmask);
+        if ( rc )
+            return rc;
+    }
+
+    /*
+     * A false return means that another vcpu is currently trying
+     * a similar operation, so back off.
+     */
+    if ( !paging_flush_tlb(need_flush, vpmask) )
+        return -ERESTART;
+
+    output->rep_complete = input->rep_count;
+
+    return 0;
+}
+
 static void send_ipi(struct hypercall_vpmask *vpmask, uint8_t vector)
 {
     struct domain *currd = current->domain;
@@ -767,6 +908,12 @@ int viridian_hypercall(struct cpu_user_regs *regs)
                           output_params_gpa);
         break;
 
+    case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
+    case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
+        rc = hvcall_flush_ex(&input, &output, input_params_gpa,
+                             output_params_gpa);
+        break;
+
     case HVCALL_SEND_IPI:
         rc = hvcall_ipi(&input, &output, input_params_gpa,
                         output_params_gpa);
-- 
2.20.1



  parent reply	other threads:[~2020-11-11 20:07 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-11 20:07 [PATCH 00/10] viridian: add support for ExProcessorMasks Paul Durrant
2020-11-11 20:07 ` [PATCH 01/10] viridian: move flush hypercall implementation into separate function Paul Durrant
2020-11-11 20:07 ` [PATCH 02/10] viridian: move IPI " Paul Durrant
2020-11-12  8:37   ` Jan Beulich
2020-11-12  9:30     ` Durrant, Paul
2020-11-11 20:07 ` [PATCH 03/10] viridian: introduce a per-cpu hypercall_vpmask and accessor functions Paul Durrant
2020-11-12  8:45   ` Jan Beulich
2020-11-19 16:02     ` Paul Durrant
2020-11-19 16:41       ` Jan Beulich
2020-11-19 16:44         ` Durrant, Paul
2020-11-19 16:46           ` Jan Beulich
2020-11-11 20:07 ` [PATCH 04/10] viridian: use hypercall_vpmask in hvcall_ipi() Paul Durrant
2020-11-12  8:49   ` Jan Beulich
2020-11-19 16:04     ` Paul Durrant
2020-11-11 20:07 ` [PATCH 05/10] viridian: use softirq batching " Paul Durrant
2020-11-12  8:52   ` Jan Beulich
2020-11-19 16:08     ` Paul Durrant
2020-11-11 20:07 ` Paul Durrant [this message]
2020-11-12  9:19   ` [PATCH 06/10] viridian: add ExProcessorMasks variants of the flush hypercalls Jan Beulich
2020-11-19 16:11     ` Paul Durrant
2020-11-19 16:44       ` Jan Beulich
2020-11-19 16:47         ` Paul Durrant
2020-11-11 20:07 ` [PATCH 07/10] viridian: add ExProcessorMasks variant of the IPI hypercall Paul Durrant
2020-11-11 20:07 ` [PATCH 08/10] viridian: log initial invocation of each type of hypercall Paul Durrant
2020-11-12  9:22   ` Jan Beulich
2020-11-19 16:13     ` Paul Durrant
2020-11-11 20:07 ` [PATCH 09/10] viridian: add a new '_HVMPV_ex_processor_masks' bit into HVM_PARAM_VIRIDIAN Paul Durrant
2020-11-11 20:07 ` [PATCH 10/10] xl / libxl: add 'ex_processor_mask' into 'libxl_viridian_enlightenment' Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201111200721.30551-7-paul@xen.org \
    --to=paul@xen.org \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=pdurrant@amazon.com \
    --cc=roger.pau@citrix.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.