All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Jan Beulich" <JBeulich@suse.com>
To: xen-devel <xen-devel@lists.xenproject.org>
Cc: Stefano Stabellini <sstabellini@kernel.org>,
	Wei Liu <wei.liu2@citrix.com>,
	George Dunlap <George.Dunlap@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Ian Jackson <Ian.Jackson@eu.citrix.com>, Tim Deegan <tim@xen.org>,
	Julien Grall <julien.grall@arm.com>
Subject: [PATCH 4/6] replace vCPU's dirty CPU mask by numeric ID
Date: Fri, 19 Jan 2018 09:06:02 -0700	[thread overview]
Message-ID: <5A62257A02000078001A0791@prv-mh.provo.novell.com> (raw)
In-Reply-To: <5A62238B02000078001A0768@prv-mh.provo.novell.com>

At most one bit can be set in the masks, so especially on larger systems
it's quite a bit of unnecessary memory and processing overhead to track
the information as a mask. Store the numeric ID of the respective CPU
instead, or NR_CPUS if no dirty state exists.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
ARM adjustments compile tested only.

--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -330,7 +330,7 @@ void context_switch(struct vcpu *prev, s
 {
     ASSERT(local_irq_is_enabled());
     ASSERT(prev != next);
-    ASSERT(cpumask_empty(next->vcpu_dirty_cpumask));
+    ASSERT(next->dirty_cpu >= nr_cpu_ids);
 
     if ( prev != next )
         update_runstate_area(prev);
@@ -471,7 +471,7 @@ void startup_cpu_idle_loop(void)
     ASSERT(is_idle_vcpu(v));
     /* TODO
        cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask);
-       cpumask_set_cpu(v->processor, v->vcpu_dirty_cpumask);
+       v->dirty_cpu = v->processor;
     */
 
     reset_stack_and_jump(idle_loop);
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -146,7 +146,7 @@ void startup_cpu_idle_loop(void)
 
     ASSERT(is_idle_vcpu(v));
     cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask);
-    cpumask_set_cpu(v->processor, v->vcpu_dirty_cpumask);
+    v->dirty_cpu = v->processor;
 
     reset_stack_and_jump(idle_loop);
 }
@@ -1602,7 +1602,7 @@ static void __context_switch(void)
     struct desc_ptr       gdt_desc;
 
     ASSERT(p != n);
-    ASSERT(cpumask_empty(n->vcpu_dirty_cpumask));
+    ASSERT(n->dirty_cpu >= nr_cpu_ids);
 
     if ( !is_idle_domain(pd) )
     {
@@ -1618,7 +1618,7 @@ static void __context_switch(void)
      */
     if ( pd != nd )
         cpumask_set_cpu(cpu, nd->domain_dirty_cpumask);
-    cpumask_set_cpu(cpu, n->vcpu_dirty_cpumask);
+    n->dirty_cpu = cpu;
 
     if ( !is_idle_domain(nd) )
     {
@@ -1674,7 +1674,7 @@ static void __context_switch(void)
 
     if ( pd != nd )
         cpumask_clear_cpu(cpu, pd->domain_dirty_cpumask);
-    cpumask_clear_cpu(cpu, p->vcpu_dirty_cpumask);
+    p->dirty_cpu = NR_CPUS;
 
     per_cpu(curr_vcpu, cpu) = n;
 }
@@ -1684,20 +1684,16 @@ void context_switch(struct vcpu *prev, s
 {
     unsigned int cpu = smp_processor_id();
     const struct domain *prevd = prev->domain, *nextd = next->domain;
-    cpumask_t dirty_mask;
+    unsigned int dirty_cpu = next->dirty_cpu;
 
     ASSERT(local_irq_is_enabled());
 
     get_cpu_info()->xen_cr3 = 0;
 
-    cpumask_copy(&dirty_mask, next->vcpu_dirty_cpumask);
-    /* Allow at most one CPU at a time to be dirty. */
-    ASSERT(cpumask_weight(&dirty_mask) <= 1);
-    if ( unlikely(!cpumask_test_cpu(cpu, &dirty_mask) &&
-                  !cpumask_empty(&dirty_mask)) )
+    if ( unlikely(dirty_cpu != cpu) && dirty_cpu != NR_CPUS )
     {
         /* Other cpus call __sync_local_execstate from flush ipi handler. */
-        flush_mask(&dirty_mask, FLUSH_TLB | FLUSH_STATE);
+        flush_mask(cpumask_of(dirty_cpu), FLUSH_TLB | FLUSH_STATE);
     }
 
     if ( prev != next )
@@ -1802,11 +1798,14 @@ void sync_local_execstate(void)
 
 void sync_vcpu_execstate(struct vcpu *v)
 {
-    if ( cpumask_test_cpu(smp_processor_id(), v->vcpu_dirty_cpumask) )
+    if ( v->dirty_cpu == smp_processor_id() )
         sync_local_execstate();
 
-    /* Other cpus call __sync_local_execstate from flush ipi handler. */
-    flush_mask(v->vcpu_dirty_cpumask, FLUSH_TLB | FLUSH_STATE);
+    if ( v->dirty_cpu != NR_CPUS )
+    {
+        /* Other cpus call __sync_local_execstate from flush ipi handler. */
+        flush_mask(cpumask_of(v->dirty_cpu), FLUSH_TLB | FLUSH_STATE);
+    }
 }
 
 static int relinquish_memory(
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1253,7 +1253,7 @@ void put_page_from_l1e(l1_pgentry_t l1e,
             for_each_vcpu ( pg_owner, v )
             {
                 if ( invalidate_shadow_ldt(v) )
-                    flush_tlb_mask(v->vcpu_dirty_cpumask);
+                    flush_tlb_mask(cpumask_of(v->dirty_cpu));
             }
         }
         put_page(page);
@@ -2978,8 +2978,8 @@ static inline int vcpumask_to_pcpumask(
             vcpu_id += vcpu_bias;
             if ( (vcpu_id >= d->max_vcpus) )
                 return 0;
-            if ( ((v = d->vcpu[vcpu_id]) != NULL) )
-                cpumask_or(pmask, pmask, v->vcpu_dirty_cpumask);
+            if ( ((v = d->vcpu[vcpu_id]) != NULL) && v->dirty_cpu != NR_CPUS )
+                __cpumask_set_cpu(v->dirty_cpu, pmask);
         }
     }
 }
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -135,6 +135,7 @@ struct vcpu *alloc_vcpu(
 
     v->domain = d;
     v->vcpu_id = vcpu_id;
+    v->dirty_cpu = NR_CPUS;
 
     spin_lock_init(&v->virq_lock);
 
@@ -145,8 +146,7 @@ struct vcpu *alloc_vcpu(
     if ( !zalloc_cpumask_var(&v->cpu_hard_affinity) ||
          !zalloc_cpumask_var(&v->cpu_hard_affinity_tmp) ||
          !zalloc_cpumask_var(&v->cpu_hard_affinity_saved) ||
-         !zalloc_cpumask_var(&v->cpu_soft_affinity) ||
-         !zalloc_cpumask_var(&v->vcpu_dirty_cpumask) )
+         !zalloc_cpumask_var(&v->cpu_soft_affinity) )
         goto fail_free;
 
     if ( is_idle_domain(d) )
@@ -175,7 +175,6 @@ struct vcpu *alloc_vcpu(
         free_cpumask_var(v->cpu_hard_affinity_tmp);
         free_cpumask_var(v->cpu_hard_affinity_saved);
         free_cpumask_var(v->cpu_soft_affinity);
-        free_cpumask_var(v->vcpu_dirty_cpumask);
         free_vcpu_struct(v);
         return NULL;
     }
@@ -863,7 +862,6 @@ static void complete_domain_destroy(stru
             free_cpumask_var(v->cpu_hard_affinity_tmp);
             free_cpumask_var(v->cpu_hard_affinity_saved);
             free_cpumask_var(v->cpu_soft_affinity);
-            free_cpumask_var(v->vcpu_dirty_cpumask);
             free_vcpu_struct(v);
         }
 
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -340,8 +340,9 @@ static void dump_domains(unsigned char k
                    v->is_running ? 'T':'F', v->poll_evtchn,
                    vcpu_info(v, evtchn_upcall_pending),
                    !vcpu_event_delivery_is_enabled(v));
-            cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask);
-            printk("dirty_cpus=%s\n", tmpstr);
+            if ( v->dirty_cpu < nr_cpu_ids )
+                printk("dirty_cpu=%u", v->dirty_cpu);
+            printk("\n");
             cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_hard_affinity);
             printk("    cpu_hard_affinity=%s ", tmpstr);
             cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_soft_affinity);
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -210,6 +210,8 @@ struct vcpu
     bool             hcall_compat;
 #endif
 
+    /* The CPU, if any, which is holding onto this VCPU's state. */
+    unsigned int     dirty_cpu;
 
     /*
      * > 0: a single port is being polled;
@@ -248,9 +250,6 @@ struct vcpu
     /* Bitmask of CPUs on which this VCPU prefers to run. */
     cpumask_var_t    cpu_soft_affinity;
 
-    /* Bitmask of CPUs which are holding onto this VCPU's state. */
-    cpumask_var_t    vcpu_dirty_cpumask;
-
     /* Tasklet for continue_hypercall_on_cpu(). */
     struct tasklet   continue_hypercall_tasklet;
 



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2018-01-19 16:30 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-01-19 15:57 [PATCH 0/6] misc flush and dirty-mask related adjustments Jan Beulich
2018-01-19 16:02 ` [PATCH 1/6] x86: move invocations of hvm_flush_guest_tlbs() Jan Beulich
2018-01-19 17:00   ` Andrew Cooper
2018-01-19 17:29   ` George Dunlap
2018-01-22  9:30     ` Jan Beulich
2018-01-19 16:03 ` [PATCH 2/6] x86: make CPU state flush requests explicit Jan Beulich
2018-01-19 17:40   ` Andrew Cooper
2018-01-22  9:31     ` Jan Beulich
2018-01-22  9:32       ` Andrew Cooper
2018-01-19 16:04 ` [PATCH 3/6] add check to cpumask_of() Jan Beulich
2018-01-19 16:59   ` Wei Liu
2018-01-19 17:43   ` Andrew Cooper
2018-01-22  9:35     ` Jan Beulich
2018-01-19 16:06 ` Jan Beulich [this message]
2018-01-19 17:41   ` [PATCH 4/6] replace vCPU's dirty CPU mask by numeric ID George Dunlap
2018-01-19 17:48   ` Andrew Cooper
2018-01-22  9:39     ` Jan Beulich
2018-01-22  9:44       ` Andrew Cooper
2018-01-19 16:06 ` [PATCH 5/6] x86: avoid explicit TLB flush when saving exec state Jan Beulich
2018-01-19 17:59   ` George Dunlap
2018-01-19 18:23     ` George Dunlap
2018-01-22  9:56       ` Jan Beulich
2018-01-19 18:12   ` Andrew Cooper
2018-01-22 10:00     ` Jan Beulich
2018-01-19 16:07 ` [PATCH 6/6] drop "domain_" prefix from struct domain's dirty CPU mask Jan Beulich
2018-01-19 16:15   ` Wei Liu
2018-01-19 17:56   ` Andrew Cooper
2018-01-22 10:06     ` Jan Beulich
2018-01-19 18:01   ` George Dunlap

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5A62257A02000078001A0791@prv-mh.provo.novell.com \
    --to=jbeulich@suse.com \
    --cc=George.Dunlap@eu.citrix.com \
    --cc=Ian.Jackson@eu.citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=julien.grall@arm.com \
    --cc=sstabellini@kernel.org \
    --cc=tim@xen.org \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.